diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index 91e20112ae18b42606d81207ef601f9e17f433a2..1b3dcee30288f218316d2d5f7f40e7cae250d31b 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -35,9 +35,9 @@ import (
 	mmap "github.com/edsrzf/mmap-go"
 	"github.com/ethereum/go-ethereum/consensus"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/rpc"
 	"github.com/hashicorp/golang-lru/simplelru"
-	metrics "github.com/rcrowley/go-metrics"
 )
 
 var ErrInvalidDumpMagic = errors.New("invalid dump magic")
diff --git a/core/blockchain.go b/core/blockchain.go
index 4ae0e4f4ec32a1c0e6c1c818a0597d84a9dbce26..644df123c52165aa1553a0abb8208fcb57b104e7 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -46,7 +46,7 @@ import (
 )
 
 var (
-	blockInsertTimer = metrics.NewTimer("chain/inserts")
+	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
 
 	ErrNoGenesis = errors.New("Genesis not found in chain")
 )
diff --git a/core/database_util.go b/core/database_util.go
index c6b125dae9d7d5c0f208f45d2e7c35a144a8922d..61ab7013496b26e42472f4592071996174ff0a88 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -70,8 +70,8 @@ var (
 
 	ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
 
-	preimageCounter    = metrics.NewCounter("db/preimage/total")
-	preimageHitCounter = metrics.NewCounter("db/preimage/hits")
+	preimageCounter    = metrics.NewRegisteredCounter("db/preimage/total", nil)
+	preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
 )
 
 // TxLookupEntry is a positional metadata to help looking up the data content of
diff --git a/core/tx_pool.go b/core/tx_pool.go
index dc3ddc4232498bd52f2e5fc301f87cfd8d6e75b4..0534fe57a1d40648612e56f3053e7d2ebdd27399 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -87,20 +87,20 @@ var (
 
 var (
 	// Metrics for the pending pool
-	pendingDiscardCounter   = metrics.NewCounter("txpool/pending/discard")
-	pendingReplaceCounter   = metrics.NewCounter("txpool/pending/replace")
-	pendingRateLimitCounter = metrics.NewCounter("txpool/pending/ratelimit") // Dropped due to rate limiting
-	pendingNofundsCounter   = metrics.NewCounter("txpool/pending/nofunds")   // Dropped due to out-of-funds
+	pendingDiscardCounter   = metrics.NewRegisteredCounter("txpool/pending/discard", nil)
+	pendingReplaceCounter   = metrics.NewRegisteredCounter("txpool/pending/replace", nil)
+	pendingRateLimitCounter = metrics.NewRegisteredCounter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
+	pendingNofundsCounter   = metrics.NewRegisteredCounter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
 
 	// Metrics for the queued pool
-	queuedDiscardCounter   = metrics.NewCounter("txpool/queued/discard")
-	queuedReplaceCounter   = metrics.NewCounter("txpool/queued/replace")
-	queuedRateLimitCounter = metrics.NewCounter("txpool/queued/ratelimit") // Dropped due to rate limiting
-	queuedNofundsCounter   = metrics.NewCounter("txpool/queued/nofunds")   // Dropped due to out-of-funds
+	queuedDiscardCounter   = metrics.NewRegisteredCounter("txpool/queued/discard", nil)
+	queuedReplaceCounter   = metrics.NewRegisteredCounter("txpool/queued/replace", nil)
+	queuedRateLimitCounter = metrics.NewRegisteredCounter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
+	queuedNofundsCounter   = metrics.NewRegisteredCounter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
 
 	// General tx metrics
-	invalidTxCounter     = metrics.NewCounter("txpool/invalid")
-	underpricedTxCounter = metrics.NewCounter("txpool/underpriced")
+	invalidTxCounter     = metrics.NewRegisteredCounter("txpool/invalid", nil)
+	underpricedTxCounter = metrics.NewRegisteredCounter("txpool/underpriced", nil)
 )
 
 // TxStatus is the current status of a transaction as seen by the pool.
diff --git a/dashboard/dashboard.go b/dashboard/dashboard.go
index 09038638edffa9ea32d747d40b082dcd8a3b9923..2ca795187f4057780436fee8e613816a238b2e80 100644
--- a/dashboard/dashboard.go
+++ b/dashboard/dashboard.go
@@ -36,10 +36,10 @@ import (
 
 	"github.com/elastic/gosigar"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rpc"
-	"github.com/rcrowley/go-metrics"
 	"golang.org/x/net/websocket"
 )
 
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 7ede530a94ca8099cdc37c17693d0c733bb6489e..d13247766a7830763e65f3287d06118ee253e9d9 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -31,8 +31,8 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/params"
-	"github.com/rcrowley/go-metrics"
 )
 
 var (
diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go
index 58764ccf0630845589d91c255b78a551a4a9094d..d4eb33794628467716f60073da21932abf069c67 100644
--- a/eth/downloader/metrics.go
+++ b/eth/downloader/metrics.go
@@ -23,21 +23,21 @@ import (
 )
 
 var (
-	headerInMeter      = metrics.NewMeter("eth/downloader/headers/in")
-	headerReqTimer     = metrics.NewTimer("eth/downloader/headers/req")
-	headerDropMeter    = metrics.NewMeter("eth/downloader/headers/drop")
-	headerTimeoutMeter = metrics.NewMeter("eth/downloader/headers/timeout")
+	headerInMeter      = metrics.NewRegisteredMeter("eth/downloader/headers/in", nil)
+	headerReqTimer     = metrics.NewRegisteredTimer("eth/downloader/headers/req", nil)
+	headerDropMeter    = metrics.NewRegisteredMeter("eth/downloader/headers/drop", nil)
+	headerTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/headers/timeout", nil)
 
-	bodyInMeter      = metrics.NewMeter("eth/downloader/bodies/in")
-	bodyReqTimer     = metrics.NewTimer("eth/downloader/bodies/req")
-	bodyDropMeter    = metrics.NewMeter("eth/downloader/bodies/drop")
-	bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
+	bodyInMeter      = metrics.NewRegisteredMeter("eth/downloader/bodies/in", nil)
+	bodyReqTimer     = metrics.NewRegisteredTimer("eth/downloader/bodies/req", nil)
+	bodyDropMeter    = metrics.NewRegisteredMeter("eth/downloader/bodies/drop", nil)
+	bodyTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/timeout", nil)
 
-	receiptInMeter      = metrics.NewMeter("eth/downloader/receipts/in")
-	receiptReqTimer     = metrics.NewTimer("eth/downloader/receipts/req")
-	receiptDropMeter    = metrics.NewMeter("eth/downloader/receipts/drop")
-	receiptTimeoutMeter = metrics.NewMeter("eth/downloader/receipts/timeout")
+	receiptInMeter      = metrics.NewRegisteredMeter("eth/downloader/receipts/in", nil)
+	receiptReqTimer     = metrics.NewRegisteredTimer("eth/downloader/receipts/req", nil)
+	receiptDropMeter    = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil)
+	receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil)
 
-	stateInMeter   = metrics.NewMeter("eth/downloader/states/in")
-	stateDropMeter = metrics.NewMeter("eth/downloader/states/drop")
+	stateInMeter   = metrics.NewRegisteredMeter("eth/downloader/states/in", nil)
+	stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil)
 )
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index a1a70e46ea9f8918323f9ee2dfed6cf8de477877..359cce54b5c6cadb8f210d2ae5a6868def49f5a2 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -28,7 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/log"
-	"github.com/rcrowley/go-metrics"
+	"github.com/ethereum/go-ethereum/metrics"
 	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
 )
 
diff --git a/eth/fetcher/metrics.go b/eth/fetcher/metrics.go
index 1ed8075bf3911c064ca0241b2096be565f67b5de..d68d12f000f71a44b724657f28b4efb69b828815 100644
--- a/eth/fetcher/metrics.go
+++ b/eth/fetcher/metrics.go
@@ -23,21 +23,21 @@ import (
 )
 
 var (
-	propAnnounceInMeter   = metrics.NewMeter("eth/fetcher/prop/announces/in")
-	propAnnounceOutTimer  = metrics.NewTimer("eth/fetcher/prop/announces/out")
-	propAnnounceDropMeter = metrics.NewMeter("eth/fetcher/prop/announces/drop")
-	propAnnounceDOSMeter  = metrics.NewMeter("eth/fetcher/prop/announces/dos")
+	propAnnounceInMeter   = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/in", nil)
+	propAnnounceOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/prop/announces/out", nil)
+	propAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/drop", nil)
+	propAnnounceDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/dos", nil)
 
-	propBroadcastInMeter   = metrics.NewMeter("eth/fetcher/prop/broadcasts/in")
-	propBroadcastOutTimer  = metrics.NewTimer("eth/fetcher/prop/broadcasts/out")
-	propBroadcastDropMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/drop")
-	propBroadcastDOSMeter  = metrics.NewMeter("eth/fetcher/prop/broadcasts/dos")
+	propBroadcastInMeter   = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/in", nil)
+	propBroadcastOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/prop/broadcasts/out", nil)
+	propBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/drop", nil)
+	propBroadcastDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/dos", nil)
 
-	headerFetchMeter = metrics.NewMeter("eth/fetcher/fetch/headers")
-	bodyFetchMeter   = metrics.NewMeter("eth/fetcher/fetch/bodies")
+	headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/headers", nil)
+	bodyFetchMeter   = metrics.NewRegisteredMeter("eth/fetcher/fetch/bodies", nil)
 
-	headerFilterInMeter  = metrics.NewMeter("eth/fetcher/filter/headers/in")
-	headerFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/headers/out")
-	bodyFilterInMeter    = metrics.NewMeter("eth/fetcher/filter/bodies/in")
-	bodyFilterOutMeter   = metrics.NewMeter("eth/fetcher/filter/bodies/out")
+	headerFilterInMeter  = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/in", nil)
+	headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/out", nil)
+	bodyFilterInMeter    = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/in", nil)
+	bodyFilterOutMeter   = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/out", nil)
 )
diff --git a/eth/metrics.go b/eth/metrics.go
index 5fa2597d4d12bfc86bf74281b4f93d9313bc9270..0533a2a8757c5a5e4015b7344e1b25e0cb2606dd 100644
--- a/eth/metrics.go
+++ b/eth/metrics.go
@@ -22,38 +22,38 @@ import (
 )
 
 var (
-	propTxnInPacketsMeter     = metrics.NewMeter("eth/prop/txns/in/packets")
-	propTxnInTrafficMeter     = metrics.NewMeter("eth/prop/txns/in/traffic")
-	propTxnOutPacketsMeter    = metrics.NewMeter("eth/prop/txns/out/packets")
-	propTxnOutTrafficMeter    = metrics.NewMeter("eth/prop/txns/out/traffic")
-	propHashInPacketsMeter    = metrics.NewMeter("eth/prop/hashes/in/packets")
-	propHashInTrafficMeter    = metrics.NewMeter("eth/prop/hashes/in/traffic")
-	propHashOutPacketsMeter   = metrics.NewMeter("eth/prop/hashes/out/packets")
-	propHashOutTrafficMeter   = metrics.NewMeter("eth/prop/hashes/out/traffic")
-	propBlockInPacketsMeter   = metrics.NewMeter("eth/prop/blocks/in/packets")
-	propBlockInTrafficMeter   = metrics.NewMeter("eth/prop/blocks/in/traffic")
-	propBlockOutPacketsMeter  = metrics.NewMeter("eth/prop/blocks/out/packets")
-	propBlockOutTrafficMeter  = metrics.NewMeter("eth/prop/blocks/out/traffic")
-	reqHeaderInPacketsMeter   = metrics.NewMeter("eth/req/headers/in/packets")
-	reqHeaderInTrafficMeter   = metrics.NewMeter("eth/req/headers/in/traffic")
-	reqHeaderOutPacketsMeter  = metrics.NewMeter("eth/req/headers/out/packets")
-	reqHeaderOutTrafficMeter  = metrics.NewMeter("eth/req/headers/out/traffic")
-	reqBodyInPacketsMeter     = metrics.NewMeter("eth/req/bodies/in/packets")
-	reqBodyInTrafficMeter     = metrics.NewMeter("eth/req/bodies/in/traffic")
-	reqBodyOutPacketsMeter    = metrics.NewMeter("eth/req/bodies/out/packets")
-	reqBodyOutTrafficMeter    = metrics.NewMeter("eth/req/bodies/out/traffic")
-	reqStateInPacketsMeter    = metrics.NewMeter("eth/req/states/in/packets")
-	reqStateInTrafficMeter    = metrics.NewMeter("eth/req/states/in/traffic")
-	reqStateOutPacketsMeter   = metrics.NewMeter("eth/req/states/out/packets")
-	reqStateOutTrafficMeter   = metrics.NewMeter("eth/req/states/out/traffic")
-	reqReceiptInPacketsMeter  = metrics.NewMeter("eth/req/receipts/in/packets")
-	reqReceiptInTrafficMeter  = metrics.NewMeter("eth/req/receipts/in/traffic")
-	reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
-	reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")
-	miscInPacketsMeter        = metrics.NewMeter("eth/misc/in/packets")
-	miscInTrafficMeter        = metrics.NewMeter("eth/misc/in/traffic")
-	miscOutPacketsMeter       = metrics.NewMeter("eth/misc/out/packets")
-	miscOutTrafficMeter       = metrics.NewMeter("eth/misc/out/traffic")
+	propTxnInPacketsMeter     = metrics.NewRegisteredMeter("eth/prop/txns/in/packets", nil)
+	propTxnInTrafficMeter     = metrics.NewRegisteredMeter("eth/prop/txns/in/traffic", nil)
+	propTxnOutPacketsMeter    = metrics.NewRegisteredMeter("eth/prop/txns/out/packets", nil)
+	propTxnOutTrafficMeter    = metrics.NewRegisteredMeter("eth/prop/txns/out/traffic", nil)
+	propHashInPacketsMeter    = metrics.NewRegisteredMeter("eth/prop/hashes/in/packets", nil)
+	propHashInTrafficMeter    = metrics.NewRegisteredMeter("eth/prop/hashes/in/traffic", nil)
+	propHashOutPacketsMeter   = metrics.NewRegisteredMeter("eth/prop/hashes/out/packets", nil)
+	propHashOutTrafficMeter   = metrics.NewRegisteredMeter("eth/prop/hashes/out/traffic", nil)
+	propBlockInPacketsMeter   = metrics.NewRegisteredMeter("eth/prop/blocks/in/packets", nil)
+	propBlockInTrafficMeter   = metrics.NewRegisteredMeter("eth/prop/blocks/in/traffic", nil)
+	propBlockOutPacketsMeter  = metrics.NewRegisteredMeter("eth/prop/blocks/out/packets", nil)
+	propBlockOutTrafficMeter  = metrics.NewRegisteredMeter("eth/prop/blocks/out/traffic", nil)
+	reqHeaderInPacketsMeter   = metrics.NewRegisteredMeter("eth/req/headers/in/packets", nil)
+	reqHeaderInTrafficMeter   = metrics.NewRegisteredMeter("eth/req/headers/in/traffic", nil)
+	reqHeaderOutPacketsMeter  = metrics.NewRegisteredMeter("eth/req/headers/out/packets", nil)
+	reqHeaderOutTrafficMeter  = metrics.NewRegisteredMeter("eth/req/headers/out/traffic", nil)
+	reqBodyInPacketsMeter     = metrics.NewRegisteredMeter("eth/req/bodies/in/packets", nil)
+	reqBodyInTrafficMeter     = metrics.NewRegisteredMeter("eth/req/bodies/in/traffic", nil)
+	reqBodyOutPacketsMeter    = metrics.NewRegisteredMeter("eth/req/bodies/out/packets", nil)
+	reqBodyOutTrafficMeter    = metrics.NewRegisteredMeter("eth/req/bodies/out/traffic", nil)
+	reqStateInPacketsMeter    = metrics.NewRegisteredMeter("eth/req/states/in/packets", nil)
+	reqStateInTrafficMeter    = metrics.NewRegisteredMeter("eth/req/states/in/traffic", nil)
+	reqStateOutPacketsMeter   = metrics.NewRegisteredMeter("eth/req/states/out/packets", nil)
+	reqStateOutTrafficMeter   = metrics.NewRegisteredMeter("eth/req/states/out/traffic", nil)
+	reqReceiptInPacketsMeter  = metrics.NewRegisteredMeter("eth/req/receipts/in/packets", nil)
+	reqReceiptInTrafficMeter  = metrics.NewRegisteredMeter("eth/req/receipts/in/traffic", nil)
+	reqReceiptOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/packets", nil)
+	reqReceiptOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/traffic", nil)
+	miscInPacketsMeter        = metrics.NewRegisteredMeter("eth/misc/in/packets", nil)
+	miscInTrafficMeter        = metrics.NewRegisteredMeter("eth/misc/in/traffic", nil)
+	miscOutPacketsMeter       = metrics.NewRegisteredMeter("eth/misc/out/packets", nil)
+	miscOutTrafficMeter       = metrics.NewRegisteredMeter("eth/misc/out/traffic", nil)
 )
 
 // meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
diff --git a/ethdb/database.go b/ethdb/database.go
index d86585f0795bdf97566ce563249c9798eadad7da..57d38f7f5fec336e46ee71ebd891bdb6bfc2f42c 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -29,8 +29,6 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/filter"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
-
-	gometrics "github.com/rcrowley/go-metrics"
 )
 
 var OpenFileLimit = 64
@@ -39,15 +37,15 @@ type LDBDatabase struct {
 	fn string      // filename for reporting
 	db *leveldb.DB // LevelDB instance
 
-	getTimer       gometrics.Timer // Timer for measuring the database get request counts and latencies
-	putTimer       gometrics.Timer // Timer for measuring the database put request counts and latencies
-	delTimer       gometrics.Timer // Timer for measuring the database delete request counts and latencies
-	missMeter      gometrics.Meter // Meter for measuring the missed database get requests
-	readMeter      gometrics.Meter // Meter for measuring the database get request data usage
-	writeMeter     gometrics.Meter // Meter for measuring the database put request data usage
-	compTimeMeter  gometrics.Meter // Meter for measuring the total time spent in database compaction
-	compReadMeter  gometrics.Meter // Meter for measuring the data read during compaction
-	compWriteMeter gometrics.Meter // Meter for measuring the data written during compaction
+	getTimer       metrics.Timer // Timer for measuring the database get request counts and latencies
+	putTimer       metrics.Timer // Timer for measuring the database put request counts and latencies
+	delTimer       metrics.Timer // Timer for measuring the database delete request counts and latencies
+	missMeter      metrics.Meter // Meter for measuring the missed database get requests
+	readMeter      metrics.Meter // Meter for measuring the database get request data usage
+	writeMeter     metrics.Meter // Meter for measuring the database put request data usage
+	compTimeMeter  metrics.Meter // Meter for measuring the total time spent in database compaction
+	compReadMeter  metrics.Meter // Meter for measuring the data read during compaction
+	compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
 
 	quitLock sync.Mutex      // Mutex protecting the quit channel access
 	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
@@ -180,15 +178,15 @@ func (db *LDBDatabase) Meter(prefix string) {
 		return
 	}
 	// Initialize all the metrics collector at the requested prefix
-	db.getTimer = metrics.NewTimer(prefix + "user/gets")
-	db.putTimer = metrics.NewTimer(prefix + "user/puts")
-	db.delTimer = metrics.NewTimer(prefix + "user/dels")
-	db.missMeter = metrics.NewMeter(prefix + "user/misses")
-	db.readMeter = metrics.NewMeter(prefix + "user/reads")
-	db.writeMeter = metrics.NewMeter(prefix + "user/writes")
-	db.compTimeMeter = metrics.NewMeter(prefix + "compact/time")
-	db.compReadMeter = metrics.NewMeter(prefix + "compact/input")
-	db.compWriteMeter = metrics.NewMeter(prefix + "compact/output")
+	db.getTimer = metrics.NewRegisteredTimer(prefix+"user/gets", nil)
+	db.putTimer = metrics.NewRegisteredTimer(prefix+"user/puts", nil)
+	db.delTimer = metrics.NewRegisteredTimer(prefix+"user/dels", nil)
+	db.missMeter = metrics.NewRegisteredMeter(prefix+"user/misses", nil)
+	db.readMeter = metrics.NewRegisteredMeter(prefix+"user/reads", nil)
+	db.writeMeter = metrics.NewRegisteredMeter(prefix+"user/writes", nil)
+	db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
+	db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
+	db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
 
 	// Create a quit channel for the periodic collector and run it
 	db.quitLock.Lock()
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 6247cc7dc02f25c9fe569a4f02f83af9859801ef..1f181bf8b0f08253a880b7e8649cfd174f19aa06 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -26,6 +26,8 @@ import (
 
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/log/term"
+	"github.com/ethereum/go-ethereum/metrics"
+	"github.com/ethereum/go-ethereum/metrics/exp"
 	colorable "github.com/mattn/go-colorable"
 	"gopkg.in/urfave/cli.v1"
 )
@@ -127,6 +129,10 @@ func Setup(ctx *cli.Context) error {
 
 	// pprof server
 	if ctx.GlobalBool(pprofFlag.Name) {
+		// Hook go-metrics into expvar on any /debug/metrics request, load all vars
+		// from the registry into expvar, and execute regular expvar handler.
+		exp.Exp(metrics.DefaultRegistry)
+
 		address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
 		go func() {
 			log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address))
diff --git a/les/metrics.go b/les/metrics.go
index 0162a1d1ad5b9539a7745923b5b1f03b42328546..c282a62a1aed75de0938f079655ebfef901fcbce 100644
--- a/les/metrics.go
+++ b/les/metrics.go
@@ -58,10 +58,10 @@ var (
 		reqReceiptInTrafficMeter  = metrics.NewMeter("eth/req/receipts/in/traffic")
 		reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
 		reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")*/
-	miscInPacketsMeter  = metrics.NewMeter("les/misc/in/packets")
-	miscInTrafficMeter  = metrics.NewMeter("les/misc/in/traffic")
-	miscOutPacketsMeter = metrics.NewMeter("les/misc/out/packets")
-	miscOutTrafficMeter = metrics.NewMeter("les/misc/out/traffic")
+	miscInPacketsMeter  = metrics.NewRegisteredMeter("les/misc/in/packets", nil)
+	miscInTrafficMeter  = metrics.NewRegisteredMeter("les/misc/in/traffic", nil)
+	miscOutPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets", nil)
+	miscOutTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic", nil)
 )
 
 // meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
diff --git a/metrics/FORK.md b/metrics/FORK.md
new file mode 100644
index 0000000000000000000000000000000000000000..b19985bf56e97aa6f95bd49eabb0b2a61d5889f3
--- /dev/null
+++ b/metrics/FORK.md
@@ -0,0 +1 @@
+This repo has been forked from https://github.com/rcrowley/go-metrics at commit e181e09
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/metrics/LICENSE
similarity index 100%
rename from vendor/github.com/rcrowley/go-metrics/LICENSE
rename to metrics/LICENSE
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/metrics/README.md
similarity index 81%
rename from vendor/github.com/rcrowley/go-metrics/README.md
rename to metrics/README.md
index 2d1a6dcfa445d5c76796fe60e29e056f9f905b22..bc2a45a8382dabc540d9ee02eab075bedc19faa4 100644
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ b/metrics/README.md
@@ -42,12 +42,22 @@ t.Update(47)
 Register() is not threadsafe. For threadsafe metric registration use
 GetOrRegister:
 
-```
+```go
 t := metrics.GetOrRegisterTimer("account.create.latency", nil)
 t.Time(func() {})
 t.Update(47)
 ```
 
+**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
+leak memory:
+
+```go
+// Will call Stop() on the Meter to allow for garbage collection
+metrics.Unregister("quux")
+// Or similarly for a Timer that embeds a Meter
+metrics.Unregister("bang")
+```
+
 Periodically log every metric in human-readable form to standard error:
 
 ```go
@@ -81,12 +91,13 @@ issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
 ```go
 import "github.com/vrischmann/go-metrics-influxdb"
 
-go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
-    Host:     "127.0.0.1:8086",
-    Database: "metrics",
-    Username: "test",
-    Password: "test",
-})
+go influxdb.InfluxDB(metrics.DefaultRegistry,
+  10e9, 
+  "127.0.0.1:8086", 
+  "database-name", 
+  "username", 
+  "password"
+)
 ```
 
 Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
@@ -146,8 +157,10 @@ Publishing Metrics
 
 Clients are available for the following destinations:
 
-* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
-* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
-* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
-* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia)
-* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus)
+* Librato - https://github.com/mihasya/go-metrics-librato
+* Graphite - https://github.com/cyberdelia/go-metrics-graphite
+* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
+* Ganglia - https://github.com/appscode/metlia
+* Prometheus - https://github.com/deathowl/go-metrics-prometheus
+* DataDog - https://github.com/syntaqx/go-metrics-datadog
+* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/metrics/counter.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/counter.go
rename to metrics/counter.go
index bb7b039cb5725de5af2fd8a9b2096418e2cbe225..c7f2b4bd3aa3ceff55e9dda365d43c52f03d850b 100644
--- a/vendor/github.com/rcrowley/go-metrics/counter.go
+++ b/metrics/counter.go
@@ -22,7 +22,7 @@ func GetOrRegisterCounter(name string, r Registry) Counter {
 
 // NewCounter constructs a new StandardCounter.
 func NewCounter() Counter {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilCounter{}
 	}
 	return &StandardCounter{0}
diff --git a/metrics/counter_test.go b/metrics/counter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfb03b4e8845b795a492f2ab9761e27e074beb7f
--- /dev/null
+++ b/metrics/counter_test.go
@@ -0,0 +1,77 @@
+package metrics
+
+import "testing"
+
+func BenchmarkCounter(b *testing.B) {
+	c := NewCounter()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		c.Inc(1)
+	}
+}
+
+func TestCounterClear(t *testing.T) {
+	c := NewCounter()
+	c.Inc(1)
+	c.Clear()
+	if count := c.Count(); 0 != count {
+		t.Errorf("c.Count(): 0 != %v\n", count)
+	}
+}
+
+func TestCounterDec1(t *testing.T) {
+	c := NewCounter()
+	c.Dec(1)
+	if count := c.Count(); -1 != count {
+		t.Errorf("c.Count(): -1 != %v\n", count)
+	}
+}
+
+func TestCounterDec2(t *testing.T) {
+	c := NewCounter()
+	c.Dec(2)
+	if count := c.Count(); -2 != count {
+		t.Errorf("c.Count(): -2 != %v\n", count)
+	}
+}
+
+func TestCounterInc1(t *testing.T) {
+	c := NewCounter()
+	c.Inc(1)
+	if count := c.Count(); 1 != count {
+		t.Errorf("c.Count(): 1 != %v\n", count)
+	}
+}
+
+func TestCounterInc2(t *testing.T) {
+	c := NewCounter()
+	c.Inc(2)
+	if count := c.Count(); 2 != count {
+		t.Errorf("c.Count(): 2 != %v\n", count)
+	}
+}
+
+func TestCounterSnapshot(t *testing.T) {
+	c := NewCounter()
+	c.Inc(1)
+	snapshot := c.Snapshot()
+	c.Inc(1)
+	if count := snapshot.Count(); 1 != count {
+		t.Errorf("c.Count(): 1 != %v\n", count)
+	}
+}
+
+func TestCounterZero(t *testing.T) {
+	c := NewCounter()
+	if count := c.Count(); 0 != count {
+		t.Errorf("c.Count(): 0 != %v\n", count)
+	}
+}
+
+func TestGetOrRegisterCounter(t *testing.T) {
+	r := NewRegistry()
+	NewRegisteredCounter("foo", r).Inc(47)
+	if c := GetOrRegisterCounter("foo", r); 47 != c.Count() {
+		t.Fatal(c)
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/metrics/debug.go
similarity index 94%
rename from vendor/github.com/rcrowley/go-metrics/debug.go
rename to metrics/debug.go
index 043ccefab612df20a24bfa80968cd886dfa7c4ca..de4a2739fe08fbe10815c86ac4b7c8ff9ff17d60 100644
--- a/vendor/github.com/rcrowley/go-metrics/debug.go
+++ b/metrics/debug.go
@@ -22,7 +22,7 @@ var (
 // Capture new values for the Go garbage collector statistics exported in
 // debug.GCStats.  This is designed to be called as a goroutine.
 func CaptureDebugGCStats(r Registry, d time.Duration) {
-	for _ = range time.Tick(d) {
+	for range time.Tick(d) {
 		CaptureDebugGCStatsOnce(r)
 	}
 }
@@ -41,8 +41,8 @@ func CaptureDebugGCStatsOnce(r Registry) {
 	debug.ReadGCStats(&gcStats)
 	debugMetrics.ReadGCStats.UpdateSince(t)
 
-	debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
-	debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+	debugMetrics.GCStats.LastGC.Update(gcStats.LastGC.UnixNano())
+	debugMetrics.GCStats.NumGC.Update(gcStats.NumGC)
 	if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
 		debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
 	}
diff --git a/metrics/debug_test.go b/metrics/debug_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..07eb867841603bb076b940d42760a837fd2de6ff
--- /dev/null
+++ b/metrics/debug_test.go
@@ -0,0 +1,48 @@
+package metrics
+
+import (
+	"runtime"
+	"runtime/debug"
+	"testing"
+	"time"
+)
+
+func BenchmarkDebugGCStats(b *testing.B) {
+	r := NewRegistry()
+	RegisterDebugGCStats(r)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		CaptureDebugGCStatsOnce(r)
+	}
+}
+
+func TestDebugGCStatsBlocking(t *testing.T) {
+	if g := runtime.GOMAXPROCS(0); g < 2 {
+		t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g)
+		return
+	}
+	ch := make(chan int)
+	go testDebugGCStatsBlocking(ch)
+	var gcStats debug.GCStats
+	t0 := time.Now()
+	debug.ReadGCStats(&gcStats)
+	t1 := time.Now()
+	t.Log("i++ during debug.ReadGCStats:", <-ch)
+	go testDebugGCStatsBlocking(ch)
+	d := t1.Sub(t0)
+	t.Log(d)
+	time.Sleep(d)
+	t.Log("i++ during time.Sleep:", <-ch)
+}
+
+func testDebugGCStatsBlocking(ch chan int) {
+	i := 0
+	for {
+		select {
+		case ch <- i:
+			return
+		default:
+			i++
+		}
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/metrics/ewma.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/ewma.go
rename to metrics/ewma.go
index 694a1d03307a4d46931dd4e5a4d220efe99e9029..3aecd4fa35a1795a0c2391eb64be966f5eda6c05 100644
--- a/vendor/github.com/rcrowley/go-metrics/ewma.go
+++ b/metrics/ewma.go
@@ -17,7 +17,7 @@ type EWMA interface {
 
 // NewEWMA constructs a new EWMA with the given alpha.
 func NewEWMA(alpha float64) EWMA {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilEWMA{}
 	}
 	return &StandardEWMA{alpha: alpha}
diff --git a/metrics/ewma_test.go b/metrics/ewma_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0430fbd24725296ad5b7cfc71a00e151d8846b62
--- /dev/null
+++ b/metrics/ewma_test.go
@@ -0,0 +1,225 @@
+package metrics
+
+import "testing"
+
+func BenchmarkEWMA(b *testing.B) {
+	a := NewEWMA1()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		a.Update(1)
+		a.Tick()
+	}
+}
+
+func TestEWMA1(t *testing.T) {
+	a := NewEWMA1()
+	a.Update(3)
+	a.Tick()
+	if rate := a.Rate(); 0.6 != rate {
+		t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.22072766470286553 != rate {
+		t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.08120116994196772 != rate {
+		t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.029872241020718428 != rate {
+		t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.01098938333324054 != rate {
+		t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.004042768199451294 != rate {
+		t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.0014872513059998212 != rate {
+		t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.0005471291793327122 != rate {
+		t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.00020127757674150815 != rate {
+		t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 7.404588245200814e-05 != rate {
+		t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 2.7239957857491083e-05 != rate {
+		t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 1.0021020474147462e-05 != rate {
+		t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 3.6865274119969525e-06 != rate {
+		t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 1.3561976441886433e-06 != rate {
+		t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 4.989172314621449e-07 != rate {
+		t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 1.8354139230109722e-07 != rate {
+		t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
+	}
+}
+
+func TestEWMA5(t *testing.T) {
+	a := NewEWMA5()
+	a.Update(3)
+	a.Tick()
+	if rate := a.Rate(); 0.6 != rate {
+		t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.49123845184678905 != rate {
+		t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.4021920276213837 != rate {
+		t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.32928698165641596 != rate {
+		t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.269597378470333 != rate {
+		t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.2207276647028654 != rate {
+		t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.18071652714732128 != rate {
+		t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.14795817836496392 != rate {
+		t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.12113791079679326 != rate {
+		t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.09917933293295193 != rate {
+		t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.08120116994196763 != rate {
+		t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.06648189501740036 != rate {
+		t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.05443077197364752 != rate {
+		t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.04456414692860035 != rate {
+		t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.03648603757513079 != rate {
+		t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.0298722410207183831020718428 != rate {
+		t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
+	}
+}
+
+func TestEWMA15(t *testing.T) {
+	a := NewEWMA15()
+	a.Update(3)
+	a.Tick()
+	if rate := a.Rate(); 0.6 != rate {
+		t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.5613041910189706 != rate {
+		t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.5251039914257684 != rate {
+		t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.4912384518467888184678905 != rate {
+		t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.459557003018789 != rate {
+		t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.4299187863442732 != rate {
+		t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.4021920276213831 != rate {
+		t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.37625345116383313 != rate {
+		t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.3519877317060185 != rate {
+		t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.3292869816564153165641596 != rate {
+		t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.3080502714195546 != rate {
+		t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.2881831806538789 != rate {
+		t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.26959737847033216 != rate {
+		t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.2522102307052083 != rate {
+		t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.23594443252115815 != rate {
+		t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
+	}
+	elapseMinute(a)
+	if rate := a.Rate(); 0.2207276647028646247028654470286553 != rate {
+		t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
+	}
+}
+
+func elapseMinute(a EWMA) {
+	for i := 0; i < 12; i++ {
+		a.Tick()
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/exp/exp.go b/metrics/exp/exp.go
similarity index 73%
rename from vendor/github.com/rcrowley/go-metrics/exp/exp.go
rename to metrics/exp/exp.go
index 11dd3f898a4268642bdeff0342b130ffa2c0ce49..c19d00a94d6e804060722c5eb7568f40ca112970 100644
--- a/vendor/github.com/rcrowley/go-metrics/exp/exp.go
+++ b/metrics/exp/exp.go
@@ -8,7 +8,7 @@ import (
 	"net/http"
 	"sync"
 
-	"github.com/rcrowley/go-metrics"
+	"github.com/ethereum/go-ethereum/metrics"
 )
 
 type exp struct {
@@ -97,22 +97,22 @@ func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
 	exp.getInt(name + ".count").Set(h.Count())
 	exp.getFloat(name + ".min").Set(float64(h.Min()))
 	exp.getFloat(name + ".max").Set(float64(h.Max()))
-	exp.getFloat(name + ".mean").Set(float64(h.Mean()))
-	exp.getFloat(name + ".std-dev").Set(float64(h.StdDev()))
-	exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
-	exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
-	exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
-	exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
-	exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
+	exp.getFloat(name + ".mean").Set(h.Mean())
+	exp.getFloat(name + ".std-dev").Set(h.StdDev())
+	exp.getFloat(name + ".50-percentile").Set(ps[0])
+	exp.getFloat(name + ".75-percentile").Set(ps[1])
+	exp.getFloat(name + ".95-percentile").Set(ps[2])
+	exp.getFloat(name + ".99-percentile").Set(ps[3])
+	exp.getFloat(name + ".999-percentile").Set(ps[4])
 }
 
 func (exp *exp) publishMeter(name string, metric metrics.Meter) {
 	m := metric.Snapshot()
 	exp.getInt(name + ".count").Set(m.Count())
-	exp.getFloat(name + ".one-minute").Set(float64(m.Rate1()))
-	exp.getFloat(name + ".five-minute").Set(float64(m.Rate5()))
-	exp.getFloat(name + ".fifteen-minute").Set(float64((m.Rate15())))
-	exp.getFloat(name + ".mean").Set(float64(m.RateMean()))
+	exp.getFloat(name + ".one-minute").Set(m.Rate1())
+	exp.getFloat(name + ".five-minute").Set(m.Rate5())
+	exp.getFloat(name + ".fifteen-minute").Set((m.Rate15()))
+	exp.getFloat(name + ".mean").Set(m.RateMean())
 }
 
 func (exp *exp) publishTimer(name string, metric metrics.Timer) {
@@ -121,17 +121,17 @@ func (exp *exp) publishTimer(name string, metric metrics.Timer) {
 	exp.getInt(name + ".count").Set(t.Count())
 	exp.getFloat(name + ".min").Set(float64(t.Min()))
 	exp.getFloat(name + ".max").Set(float64(t.Max()))
-	exp.getFloat(name + ".mean").Set(float64(t.Mean()))
-	exp.getFloat(name + ".std-dev").Set(float64(t.StdDev()))
-	exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
-	exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
-	exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
-	exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
-	exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
-	exp.getFloat(name + ".one-minute").Set(float64(t.Rate1()))
-	exp.getFloat(name + ".five-minute").Set(float64(t.Rate5()))
-	exp.getFloat(name + ".fifteen-minute").Set(float64((t.Rate15())))
-	exp.getFloat(name + ".mean-rate").Set(float64(t.RateMean()))
+	exp.getFloat(name + ".mean").Set(t.Mean())
+	exp.getFloat(name + ".std-dev").Set(t.StdDev())
+	exp.getFloat(name + ".50-percentile").Set(ps[0])
+	exp.getFloat(name + ".75-percentile").Set(ps[1])
+	exp.getFloat(name + ".95-percentile").Set(ps[2])
+	exp.getFloat(name + ".99-percentile").Set(ps[3])
+	exp.getFloat(name + ".999-percentile").Set(ps[4])
+	exp.getFloat(name + ".one-minute").Set(t.Rate1())
+	exp.getFloat(name + ".five-minute").Set(t.Rate5())
+	exp.getFloat(name + ".fifteen-minute").Set(t.Rate15())
+	exp.getFloat(name + ".mean-rate").Set(t.RateMean())
 }
 
 func (exp *exp) syncToExpvar() {
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/metrics/gauge.go
similarity index 98%
rename from vendor/github.com/rcrowley/go-metrics/gauge.go
rename to metrics/gauge.go
index cb57a93889fc23b01222be3181510a169b856e76..0fbfdb86033b03c725130faed2043bc37a564b2a 100644
--- a/vendor/github.com/rcrowley/go-metrics/gauge.go
+++ b/metrics/gauge.go
@@ -20,7 +20,7 @@ func GetOrRegisterGauge(name string, r Registry) Gauge {
 
 // NewGauge constructs a new StandardGauge.
 func NewGauge() Gauge {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilGauge{}
 	}
 	return &StandardGauge{0}
@@ -38,7 +38,7 @@ func NewRegisteredGauge(name string, r Registry) Gauge {
 
 // NewFunctionalGauge constructs a new FunctionalGauge.
 func NewFunctionalGauge(f func() int64) Gauge {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilGauge{}
 	}
 	return &FunctionalGauge{value: f}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/metrics/gauge_float64.go
similarity index 98%
rename from vendor/github.com/rcrowley/go-metrics/gauge_float64.go
rename to metrics/gauge_float64.go
index 6f93920b2c00b743cb9891f4dcd609d813eaa2ab..66819c957774703e520c43df501939c1625c7e4f 100644
--- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
+++ b/metrics/gauge_float64.go
@@ -20,7 +20,7 @@ func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
 
 // NewGaugeFloat64 constructs a new StandardGaugeFloat64.
 func NewGaugeFloat64() GaugeFloat64 {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilGaugeFloat64{}
 	}
 	return &StandardGaugeFloat64{
@@ -40,7 +40,7 @@ func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
 
 // NewFunctionalGauge constructs a new FunctionalGauge.
 func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilGaugeFloat64{}
 	}
 	return &FunctionalGaugeFloat64{value: f}
diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..99e62a40302f67b089889c60571f0e07c5c3a8d1
--- /dev/null
+++ b/metrics/gauge_float64_test.go
@@ -0,0 +1,59 @@
+package metrics
+
+import "testing"
+
+func BenchmarkGuageFloat64(b *testing.B) {
+	g := NewGaugeFloat64()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		g.Update(float64(i))
+	}
+}
+
+func TestGaugeFloat64(t *testing.T) {
+	g := NewGaugeFloat64()
+	g.Update(float64(47.0))
+	if v := g.Value(); float64(47.0) != v {
+		t.Errorf("g.Value(): 47.0 != %v\n", v)
+	}
+}
+
+func TestGaugeFloat64Snapshot(t *testing.T) {
+	g := NewGaugeFloat64()
+	g.Update(float64(47.0))
+	snapshot := g.Snapshot()
+	g.Update(float64(0))
+	if v := snapshot.Value(); float64(47.0) != v {
+		t.Errorf("g.Value(): 47.0 != %v\n", v)
+	}
+}
+
+func TestGetOrRegisterGaugeFloat64(t *testing.T) {
+	r := NewRegistry()
+	NewRegisteredGaugeFloat64("foo", r).Update(float64(47.0))
+	t.Logf("registry: %v", r)
+	if g := GetOrRegisterGaugeFloat64("foo", r); float64(47.0) != g.Value() {
+		t.Fatal(g)
+	}
+}
+
+func TestFunctionalGaugeFloat64(t *testing.T) {
+	var counter float64
+	fg := NewFunctionalGaugeFloat64(func() float64 {
+		counter++
+		return counter
+	})
+	fg.Value()
+	fg.Value()
+	if counter != 2 {
+		t.Error("counter != 2")
+	}
+}
+
+func TestGetOrRegisterFunctionalGaugeFloat64(t *testing.T) {
+	r := NewRegistry()
+	NewRegisteredFunctionalGaugeFloat64("foo", r, func() float64 { return 47 })
+	if g := GetOrRegisterGaugeFloat64("foo", r); 47 != g.Value() {
+		t.Fatal(g)
+	}
+}
diff --git a/metrics/gauge_test.go b/metrics/gauge_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f2603d33985c2e6bddcd07662c79168e499c11c
--- /dev/null
+++ b/metrics/gauge_test.go
@@ -0,0 +1,68 @@
+package metrics
+
+import (
+	"fmt"
+	"testing"
+)
+
+func BenchmarkGuage(b *testing.B) {
+	g := NewGauge()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		g.Update(int64(i))
+	}
+}
+
+func TestGauge(t *testing.T) {
+	g := NewGauge()
+	g.Update(int64(47))
+	if v := g.Value(); 47 != v {
+		t.Errorf("g.Value(): 47 != %v\n", v)
+	}
+}
+
+func TestGaugeSnapshot(t *testing.T) {
+	g := NewGauge()
+	g.Update(int64(47))
+	snapshot := g.Snapshot()
+	g.Update(int64(0))
+	if v := snapshot.Value(); 47 != v {
+		t.Errorf("g.Value(): 47 != %v\n", v)
+	}
+}
+
+func TestGetOrRegisterGauge(t *testing.T) {
+	r := NewRegistry()
+	NewRegisteredGauge("foo", r).Update(47)
+	if g := GetOrRegisterGauge("foo", r); 47 != g.Value() {
+		t.Fatal(g)
+	}
+}
+
+func TestFunctionalGauge(t *testing.T) {
+	var counter int64
+	fg := NewFunctionalGauge(func() int64 {
+		counter++
+		return counter
+	})
+	fg.Value()
+	fg.Value()
+	if counter != 2 {
+		t.Error("counter != 2")
+	}
+}
+
+func TestGetOrRegisterFunctionalGauge(t *testing.T) {
+	r := NewRegistry()
+	NewRegisteredFunctionalGauge("foo", r, func() int64 { return 47 })
+	if g := GetOrRegisterGauge("foo", r); 47 != g.Value() {
+		t.Fatal(g)
+	}
+}
+
+func ExampleGetOrRegisterGauge() {
+	m := "server.bytes_sent"
+	g := GetOrRegisterGauge(m, nil)
+	g.Update(47)
+	fmt.Println(g.Value()) // Output: 47
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/metrics/graphite.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/graphite.go
rename to metrics/graphite.go
index abd0a7d2918bcf6500dfa5c0e2639a24ede495bd..142eec86beb4a927590fa02326083ab18323d9e7 100644
--- a/vendor/github.com/rcrowley/go-metrics/graphite.go
+++ b/metrics/graphite.go
@@ -39,7 +39,7 @@ func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
 // but it takes a GraphiteConfig instead.
 func GraphiteWithConfig(c GraphiteConfig) {
 	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
-	for _ = range time.Tick(c.FlushInterval) {
+	for range time.Tick(c.FlushInterval) {
 		if err := graphite(&c); nil != err {
 			log.Println(err)
 		}
diff --git a/metrics/graphite_test.go b/metrics/graphite_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c797c781df6fe25b6a03067288252a44c93a49ad
--- /dev/null
+++ b/metrics/graphite_test.go
@@ -0,0 +1,22 @@
+package metrics
+
+import (
+	"net"
+	"time"
+)
+
+func ExampleGraphite() {
+	addr, _ := net.ResolveTCPAddr("net", ":2003")
+	go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr)
+}
+
+func ExampleGraphiteWithConfig() {
+	addr, _ := net.ResolveTCPAddr("net", ":2003")
+	go GraphiteWithConfig(GraphiteConfig{
+		Addr:          addr,
+		Registry:      DefaultRegistry,
+		FlushInterval: 1 * time.Second,
+		DurationUnit:  time.Millisecond,
+		Percentiles:   []float64{0.5, 0.75, 0.99, 0.999},
+	})
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/metrics/healthcheck.go
similarity index 98%
rename from vendor/github.com/rcrowley/go-metrics/healthcheck.go
rename to metrics/healthcheck.go
index 445131caee596ff3fe323c720557c4075455a982..f1ae31e34aee4ba993f28892c39b4261dcfb6473 100644
--- a/vendor/github.com/rcrowley/go-metrics/healthcheck.go
+++ b/metrics/healthcheck.go
@@ -11,7 +11,7 @@ type Healthcheck interface {
 // NewHealthcheck constructs a new Healthcheck which will use the given
 // function to update its status.
 func NewHealthcheck(f func(Healthcheck)) Healthcheck {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilHealthcheck{}
 	}
 	return &StandardHealthcheck{nil, f}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/metrics/histogram.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/histogram.go
rename to metrics/histogram.go
index dbc837fe4d9541f5a3d93f1a4aab00a433edf163..46f3bbd2f138bf09c44d6d1f3c034d94ad568acf 100644
--- a/vendor/github.com/rcrowley/go-metrics/histogram.go
+++ b/metrics/histogram.go
@@ -28,7 +28,7 @@ func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
 
 // NewHistogram constructs a new StandardHistogram from a Sample.
 func NewHistogram(s Sample) Histogram {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilHistogram{}
 	}
 	return &StandardHistogram{sample: s}
diff --git a/metrics/histogram_test.go b/metrics/histogram_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7f4f0171cf0904ded7466c3dca75b7581670fb9
--- /dev/null
+++ b/metrics/histogram_test.go
@@ -0,0 +1,95 @@
+package metrics
+
+import "testing"
+
+func BenchmarkHistogram(b *testing.B) {
+	h := NewHistogram(NewUniformSample(100))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		h.Update(int64(i))
+	}
+}
+
+func TestGetOrRegisterHistogram(t *testing.T) {
+	r := NewRegistry()
+	s := NewUniformSample(100)
+	NewRegisteredHistogram("foo", r, s).Update(47)
+	if h := GetOrRegisterHistogram("foo", r, s); 1 != h.Count() {
+		t.Fatal(h)
+	}
+}
+
+func TestHistogram10000(t *testing.T) {
+	h := NewHistogram(NewUniformSample(100000))
+	for i := 1; i <= 10000; i++ {
+		h.Update(int64(i))
+	}
+	testHistogram10000(t, h)
+}
+
+func TestHistogramEmpty(t *testing.T) {
+	h := NewHistogram(NewUniformSample(100))
+	if count := h.Count(); 0 != count {
+		t.Errorf("h.Count(): 0 != %v\n", count)
+	}
+	if min := h.Min(); 0 != min {
+		t.Errorf("h.Min(): 0 != %v\n", min)
+	}
+	if max := h.Max(); 0 != max {
+		t.Errorf("h.Max(): 0 != %v\n", max)
+	}
+	if mean := h.Mean(); 0.0 != mean {
+		t.Errorf("h.Mean(): 0.0 != %v\n", mean)
+	}
+	if stdDev := h.StdDev(); 0.0 != stdDev {
+		t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
+	}
+	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 0.0 != ps[0] {
+		t.Errorf("median: 0.0 != %v\n", ps[0])
+	}
+	if 0.0 != ps[1] {
+		t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
+	}
+	if 0.0 != ps[2] {
+		t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
+	}
+}
+
+func TestHistogramSnapshot(t *testing.T) {
+	h := NewHistogram(NewUniformSample(100000))
+	for i := 1; i <= 10000; i++ {
+		h.Update(int64(i))
+	}
+	snapshot := h.Snapshot()
+	h.Update(0)
+	testHistogram10000(t, snapshot)
+}
+
+func testHistogram10000(t *testing.T, h Histogram) {
+	if count := h.Count(); 10000 != count {
+		t.Errorf("h.Count(): 10000 != %v\n", count)
+	}
+	if min := h.Min(); 1 != min {
+		t.Errorf("h.Min(): 1 != %v\n", min)
+	}
+	if max := h.Max(); 10000 != max {
+		t.Errorf("h.Max(): 10000 != %v\n", max)
+	}
+	if mean := h.Mean(); 5000.5 != mean {
+		t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
+	}
+	if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
+		t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
+	}
+	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 5000.5 != ps[0] {
+		t.Errorf("median: 5000.5 != %v\n", ps[0])
+	}
+	if 7500.75 != ps[1] {
+		t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
+	}
+	if 9900.99 != ps[2] {
+		t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
+	}
+}
diff --git a/metrics/influxdb/LICENSE b/metrics/influxdb/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..e5bf20cdb5677a760d588f37a82fefab5305b91d
--- /dev/null
+++ b/metrics/influxdb/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Vincent Rischmann
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/metrics/influxdb/README.md b/metrics/influxdb/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b76b1a3f99b60108c00bfec4e909404444b4b35c
--- /dev/null
+++ b/metrics/influxdb/README.md
@@ -0,0 +1,30 @@
+go-metrics-influxdb
+===================
+
+This is a reporter for the [go-metrics](https://github.com/rcrowley/go-metrics) library which will post the metrics to [InfluxDB](https://influxdb.com/).
+
+Note
+----
+
+This is only compatible with InfluxDB 0.9+.
+
+Usage
+-----
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.InfluxDB(
+    metrics.DefaultRegistry, // metrics registry
+    time.Second * 10,        // interval
+    "http://localhost:8086", // the InfluxDB url
+    "mydb",                  // your InfluxDB database
+    "myuser",                // your InfluxDB user
+    "mypassword",            // your InfluxDB password
+)
+```
+
+License
+-------
+
+go-metrics-influxdb is licensed under the MIT license. See the LICENSE file for details.
diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5cb4da66fadb6ed31350710971447bce29c272e
--- /dev/null
+++ b/metrics/influxdb/influxdb.go
@@ -0,0 +1,227 @@
+package influxdb
+
+import (
+	"fmt"
+	"log"
+	uurl "net/url"
+	"time"
+
+	"github.com/ethereum/go-ethereum/metrics"
+	"github.com/influxdata/influxdb/client"
+)
+
+type reporter struct {
+	reg      metrics.Registry
+	interval time.Duration
+
+	url       uurl.URL
+	database  string
+	username  string
+	password  string
+	namespace string
+	tags      map[string]string
+
+	client *client.Client
+
+	cache map[string]int64
+}
+
+// InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
+func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
+	InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
+}
+
+// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
+func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
+	u, err := uurl.Parse(url)
+	if err != nil {
+		log.Printf("unable to parse InfluxDB url %s. err=%v", url, err)
+		return
+	}
+
+	rep := &reporter{
+		reg:       r,
+		interval:  d,
+		url:       *u,
+		database:  database,
+		username:  username,
+		password:  password,
+		namespace: namespace,
+		tags:      tags,
+		cache:     make(map[string]int64),
+	}
+	if err := rep.makeClient(); err != nil {
+		log.Printf("unable to make InfluxDB client. err=%v", err)
+		return
+	}
+
+	rep.run()
+}
+
+func (r *reporter) makeClient() (err error) {
+	r.client, err = client.NewClient(client.Config{
+		URL:      r.url,
+		Username: r.username,
+		Password: r.password,
+	})
+
+	return
+}
+
+func (r *reporter) run() {
+	intervalTicker := time.Tick(r.interval)
+	pingTicker := time.Tick(time.Second * 5)
+
+	for {
+		select {
+		case <-intervalTicker:
+			if err := r.send(); err != nil {
+				log.Printf("unable to send to InfluxDB. err=%v", err)
+			}
+		case <-pingTicker:
+			_, _, err := r.client.Ping()
+			if err != nil {
+				log.Printf("got error while sending a ping to InfluxDB, trying to recreate client. err=%v", err)
+
+				if err = r.makeClient(); err != nil {
+					log.Printf("unable to make InfluxDB client. err=%v", err)
+				}
+			}
+		}
+	}
+}
+
+func (r *reporter) send() error {
+	var pts []client.Point
+
+	r.reg.Each(func(name string, i interface{}) {
+		now := time.Now()
+		namespace := r.namespace
+
+		switch metric := i.(type) {
+		case metrics.Counter:
+			v := metric.Count()
+			l := r.cache[name]
+			pts = append(pts, client.Point{
+				Measurement: fmt.Sprintf("%s%s.count", namespace, name),
+				Tags:        r.tags,
+				Fields: map[string]interface{}{
+					"value": v - l,
+				},
+				Time: now,
+			})
+			r.cache[name] = v
+		case metrics.Gauge:
+			ms := metric.Snapshot()
+			pts = append(pts, client.Point{
+				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
+				Tags:        r.tags,
+				Fields: map[string]interface{}{
+					"value": ms.Value(),
+				},
+				Time: now,
+			})
+		case metrics.GaugeFloat64:
+			ms := metric.Snapshot()
+			pts = append(pts, client.Point{
+				Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
+				Tags:        r.tags,
+				Fields: map[string]interface{}{
+					"value": ms.Value(),
+				},
+				Time: now,
+			})
+		case metrics.Histogram:
+			ms := metric.Snapshot()
+			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
+			pts = append(pts, client.Point{
+				Measurement: fmt.Sprintf("%s%s.histogram", namespace, name),
+				Tags:        r.tags,
+				Fields: map[string]interface{}{
+					"count":    ms.Count(),
+					"max":      ms.Max(),
+					"mean":     ms.Mean(),
+					"min":      ms.Min(),
+					"stddev":   ms.StdDev(),
+					"variance": ms.Variance(),
+					"p50":      ps[0],
+					"p75":      ps[1],
+					"p95":      ps[2],
+					"p99":      ps[3],
+					"p999":     ps[4],
+					"p9999":    ps[5],
+				},
+				Time: now,
+			})
+		case metrics.Meter:
+			ms := metric.Snapshot()
+			pts = append(pts, client.Point{
+				Measurement: fmt.Sprintf("%s%s.meter", namespace, name),
+				Tags:        r.tags,
+				Fields: map[string]interface{}{
+					"count": ms.Count(),
+					"m1":    ms.Rate1(),
+					"m5":    ms.Rate5(),
+					"m15":   ms.Rate15(),
+					"mean":  ms.RateMean(),
+				},
+				Time: now,
+			})
+		case metrics.Timer:
+			ms := metric.Snapshot()
+			ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
+			pts = append(pts, client.Point{
+				Measurement: fmt.Sprintf("%s%s.timer", namespace, name),
+				Tags:        r.tags,
+				Fields: map[string]interface{}{
+					"count":    ms.Count(),
+					"max":      ms.Max(),
+					"mean":     ms.Mean(),
+					"min":      ms.Min(),
+					"stddev":   ms.StdDev(),
+					"variance": ms.Variance(),
+					"p50":      ps[0],
+					"p75":      ps[1],
+					"p95":      ps[2],
+					"p99":      ps[3],
+					"p999":     ps[4],
+					"p9999":    ps[5],
+					"m1":       ms.Rate1(),
+					"m5":       ms.Rate5(),
+					"m15":      ms.Rate15(),
+					"meanrate": ms.RateMean(),
+				},
+				Time: now,
+			})
+		case metrics.ResettingTimer:
+			t := metric.Snapshot()
+
+			if len(t.Values()) > 0 {
+				ps := t.Percentiles([]float64{50, 95, 99})
+				val := t.Values()
+				pts = append(pts, client.Point{
+					Measurement: fmt.Sprintf("%s%s.span", namespace, name),
+					Tags:        r.tags,
+					Fields: map[string]interface{}{
+						"count": len(val),
+						"max":   val[len(val)-1],
+						"mean":  t.Mean(),
+						"min":   val[0],
+						"p50":   ps[0],
+						"p95":   ps[1],
+						"p99":   ps[2],
+					},
+					Time: now,
+				})
+			}
+		}
+	})
+
+	bps := client.BatchPoints{
+		Points:   pts,
+		Database: r.database,
+	}
+
+	_, err := r.client.Write(bps)
+	return err
+}
diff --git a/metrics/init_test.go b/metrics/init_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..43401e833c345ce2462fe456f1e832e08a56afa7
--- /dev/null
+++ b/metrics/init_test.go
@@ -0,0 +1,5 @@
+package metrics
+
+func init() {
+	Enabled = true
+}
diff --git a/metrics/json.go b/metrics/json.go
new file mode 100644
index 0000000000000000000000000000000000000000..2087d8211eb1f84ee2c073deb509fd0ce1c71c4c
--- /dev/null
+++ b/metrics/json.go
@@ -0,0 +1,31 @@
+package metrics
+
+import (
+	"encoding/json"
+	"io"
+	"time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(r.GetAll())
+}
+
+// WriteJSON writes metrics from the given registry  periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+	for range time.Tick(d) {
+		WriteJSONOnce(r, w)
+	}
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+	json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(p.GetAll())
+}
diff --git a/metrics/json_test.go b/metrics/json_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf70051f7a2c05c8cd414b89110e4220b98b7793
--- /dev/null
+++ b/metrics/json_test.go
@@ -0,0 +1,28 @@
+package metrics
+
+import (
+	"bytes"
+	"encoding/json"
+	"testing"
+)
+
+func TestRegistryMarshallJSON(t *testing.T) {
+	b := &bytes.Buffer{}
+	enc := json.NewEncoder(b)
+	r := NewRegistry()
+	r.Register("counter", NewCounter())
+	enc.Encode(r)
+	if s := b.String(); "{\"counter\":{\"count\":0}}\n" != s {
+		t.Fatalf(s)
+	}
+}
+
+func TestRegistryWriteJSONOnce(t *testing.T) {
+	r := NewRegistry()
+	r.Register("counter", NewCounter())
+	b := &bytes.Buffer{}
+	WriteJSONOnce(r, b)
+	if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
+		t.Fail()
+	}
+}
diff --git a/metrics/librato/client.go b/metrics/librato/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..8c0c850e3860b2c5d570e7a74f4b75eb249bbafe
--- /dev/null
+++ b/metrics/librato/client.go
@@ -0,0 +1,102 @@
+package librato
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+const Operations = "operations"
+const OperationsShort = "ops"
+
+type LibratoClient struct {
+	Email, Token string
+}
+
+// property strings
+const (
+	// display attributes
+	Color             = "color"
+	DisplayMax        = "display_max"
+	DisplayMin        = "display_min"
+	DisplayUnitsLong  = "display_units_long"
+	DisplayUnitsShort = "display_units_short"
+	DisplayStacked    = "display_stacked"
+	DisplayTransform  = "display_transform"
+	// special gauge display attributes
+	SummarizeFunction = "summarize_function"
+	Aggregate         = "aggregate"
+
+	// metric keys
+	Name        = "name"
+	Period      = "period"
+	Description = "description"
+	DisplayName = "display_name"
+	Attributes  = "attributes"
+
+	// measurement keys
+	MeasureTime = "measure_time"
+	Source      = "source"
+	Value       = "value"
+
+	// special gauge keys
+	Count      = "count"
+	Sum        = "sum"
+	Max        = "max"
+	Min        = "min"
+	SumSquares = "sum_squares"
+
+	// batch keys
+	Counters = "counters"
+	Gauges   = "gauges"
+
+	MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics"
+)
+
+type Measurement map[string]interface{}
+type Metric map[string]interface{}
+
+type Batch struct {
+	Gauges      []Measurement `json:"gauges,omitempty"`
+	Counters    []Measurement `json:"counters,omitempty"`
+	MeasureTime int64         `json:"measure_time"`
+	Source      string        `json:"source"`
+}
+
+func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
+	var (
+		js   []byte
+		req  *http.Request
+		resp *http.Response
+	)
+
+	if len(batch.Counters) == 0 && len(batch.Gauges) == 0 {
+		return nil
+	}
+
+	if js, err = json.Marshal(batch); err != nil {
+		return
+	}
+
+	if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
+		return
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+	req.SetBasicAuth(self.Email, self.Token)
+
+	if resp, err = http.DefaultClient.Do(req); err != nil {
+		return
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		var body []byte
+		if body, err = ioutil.ReadAll(resp.Body); err != nil {
+			body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
+		}
+		err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
+	}
+	return
+}
diff --git a/metrics/librato/librato.go b/metrics/librato/librato.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8c8c9ecb7a05d1c66c9dfe6ab723591982951ad
--- /dev/null
+++ b/metrics/librato/librato.go
@@ -0,0 +1,235 @@
+package librato
+
+import (
+	"fmt"
+	"log"
+	"math"
+	"regexp"
+	"time"
+
+	"github.com/ethereum/go-ethereum/metrics"
+)
+
+// a regexp for extracting the unit from time.Duration.String
+var unitRegexp = regexp.MustCompile(`[^\\d]+$`)
+
+// a helper that turns a time.Duration into librato display attributes for timer metrics
+func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
+	attrs = make(map[string]interface{})
+	attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d))
+	attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String())))
+	return
+}
+
+type Reporter struct {
+	Email, Token    string
+	Namespace       string
+	Source          string
+	Interval        time.Duration
+	Registry        metrics.Registry
+	Percentiles     []float64              // percentiles to report on histogram metrics
+	TimerAttributes map[string]interface{} // units in which timers will be displayed
+	intervalSec     int64
+}
+
+func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
+	return &Reporter{e, t, "", s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
+}
+
+func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
+	NewReporter(r, d, e, t, s, p, u).Run()
+}
+
+func (self *Reporter) Run() {
+	log.Printf("WARNING: This client has been DEPRECATED! It has been moved to https://github.com/mihasya/go-metrics-librato and will be removed from rcrowley/go-metrics on August 5th 2015")
+	ticker := time.Tick(self.Interval)
+	metricsApi := &LibratoClient{self.Email, self.Token}
+	for now := range ticker {
+		var metrics Batch
+		var err error
+		if metrics, err = self.BuildRequest(now, self.Registry); err != nil {
+			log.Printf("ERROR constructing librato request body %s", err)
+			continue
+		}
+		if err := metricsApi.PostMetrics(metrics); err != nil {
+			log.Printf("ERROR sending metrics to librato %s", err)
+			continue
+		}
+	}
+}
+
+// calculate sum of squares from data provided by metrics.Histogram
+// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
+func sumSquares(s metrics.Sample) float64 {
+	count := float64(s.Count())
+	sumSquared := math.Pow(count*s.Mean(), 2)
+	sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
+	if math.IsNaN(sumSquares) {
+		return 0.0
+	}
+	return sumSquares
+}
+func sumSquaresTimer(t metrics.Timer) float64 {
+	count := float64(t.Count())
+	sumSquared := math.Pow(count*t.Mean(), 2)
+	sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
+	if math.IsNaN(sumSquares) {
+		return 0.0
+	}
+	return sumSquares
+}
+
+func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
+	snapshot = Batch{
+		// coerce timestamps to a stepping fn so that they line up in Librato graphs
+		MeasureTime: (now.Unix() / self.intervalSec) * self.intervalSec,
+		Source:      self.Source,
+	}
+	snapshot.Gauges = make([]Measurement, 0)
+	snapshot.Counters = make([]Measurement, 0)
+	histogramGaugeCount := 1 + len(self.Percentiles)
+	r.Each(func(name string, metric interface{}) {
+		if self.Namespace != "" {
+			name = fmt.Sprintf("%s.%s", self.Namespace, name)
+		}
+		measurement := Measurement{}
+		measurement[Period] = self.Interval.Seconds()
+		switch m := metric.(type) {
+		case metrics.Counter:
+			if m.Count() > 0 {
+				measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
+				measurement[Value] = float64(m.Count())
+				measurement[Attributes] = map[string]interface{}{
+					DisplayUnitsLong:  Operations,
+					DisplayUnitsShort: OperationsShort,
+					DisplayMin:        "0",
+				}
+				snapshot.Counters = append(snapshot.Counters, measurement)
+			}
+		case metrics.Gauge:
+			measurement[Name] = name
+			measurement[Value] = float64(m.Value())
+			snapshot.Gauges = append(snapshot.Gauges, measurement)
+		case metrics.GaugeFloat64:
+			measurement[Name] = name
+			measurement[Value] = m.Value()
+			snapshot.Gauges = append(snapshot.Gauges, measurement)
+		case metrics.Histogram:
+			if m.Count() > 0 {
+				gauges := make([]Measurement, histogramGaugeCount)
+				s := m.Sample()
+				measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
+				measurement[Count] = uint64(s.Count())
+				measurement[Max] = float64(s.Max())
+				measurement[Min] = float64(s.Min())
+				measurement[Sum] = float64(s.Sum())
+				measurement[SumSquares] = sumSquares(s)
+				gauges[0] = measurement
+				for i, p := range self.Percentiles {
+					gauges[i+1] = Measurement{
+						Name:   fmt.Sprintf("%s.%.2f", measurement[Name], p),
+						Value:  s.Percentile(p),
+						Period: measurement[Period],
+					}
+				}
+				snapshot.Gauges = append(snapshot.Gauges, gauges...)
+			}
+		case metrics.Meter:
+			measurement[Name] = name
+			measurement[Value] = float64(m.Count())
+			snapshot.Counters = append(snapshot.Counters, measurement)
+			snapshot.Gauges = append(snapshot.Gauges,
+				Measurement{
+					Name:   fmt.Sprintf("%s.%s", name, "1min"),
+					Value:  m.Rate1(),
+					Period: int64(self.Interval.Seconds()),
+					Attributes: map[string]interface{}{
+						DisplayUnitsLong:  Operations,
+						DisplayUnitsShort: OperationsShort,
+						DisplayMin:        "0",
+					},
+				},
+				Measurement{
+					Name:   fmt.Sprintf("%s.%s", name, "5min"),
+					Value:  m.Rate5(),
+					Period: int64(self.Interval.Seconds()),
+					Attributes: map[string]interface{}{
+						DisplayUnitsLong:  Operations,
+						DisplayUnitsShort: OperationsShort,
+						DisplayMin:        "0",
+					},
+				},
+				Measurement{
+					Name:   fmt.Sprintf("%s.%s", name, "15min"),
+					Value:  m.Rate15(),
+					Period: int64(self.Interval.Seconds()),
+					Attributes: map[string]interface{}{
+						DisplayUnitsLong:  Operations,
+						DisplayUnitsShort: OperationsShort,
+						DisplayMin:        "0",
+					},
+				},
+			)
+		case metrics.Timer:
+			measurement[Name] = name
+			measurement[Value] = float64(m.Count())
+			snapshot.Counters = append(snapshot.Counters, measurement)
+			if m.Count() > 0 {
+				libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
+				gauges := make([]Measurement, histogramGaugeCount)
+				gauges[0] = Measurement{
+					Name:       libratoName,
+					Count:      uint64(m.Count()),
+					Sum:        m.Mean() * float64(m.Count()),
+					Max:        float64(m.Max()),
+					Min:        float64(m.Min()),
+					SumSquares: sumSquaresTimer(m),
+					Period:     int64(self.Interval.Seconds()),
+					Attributes: self.TimerAttributes,
+				}
+				for i, p := range self.Percentiles {
+					gauges[i+1] = Measurement{
+						Name:       fmt.Sprintf("%s.timer.%2.0f", name, p*100),
+						Value:      m.Percentile(p),
+						Period:     int64(self.Interval.Seconds()),
+						Attributes: self.TimerAttributes,
+					}
+				}
+				snapshot.Gauges = append(snapshot.Gauges, gauges...)
+				snapshot.Gauges = append(snapshot.Gauges,
+					Measurement{
+						Name:   fmt.Sprintf("%s.%s", name, "rate.1min"),
+						Value:  m.Rate1(),
+						Period: int64(self.Interval.Seconds()),
+						Attributes: map[string]interface{}{
+							DisplayUnitsLong:  Operations,
+							DisplayUnitsShort: OperationsShort,
+							DisplayMin:        "0",
+						},
+					},
+					Measurement{
+						Name:   fmt.Sprintf("%s.%s", name, "rate.5min"),
+						Value:  m.Rate5(),
+						Period: int64(self.Interval.Seconds()),
+						Attributes: map[string]interface{}{
+							DisplayUnitsLong:  Operations,
+							DisplayUnitsShort: OperationsShort,
+							DisplayMin:        "0",
+						},
+					},
+					Measurement{
+						Name:   fmt.Sprintf("%s.%s", name, "rate.15min"),
+						Value:  m.Rate15(),
+						Period: int64(self.Interval.Seconds()),
+						Attributes: map[string]interface{}{
+							DisplayUnitsLong:  Operations,
+							DisplayUnitsShort: OperationsShort,
+							DisplayMin:        "0",
+						},
+					},
+				)
+			}
+		}
+	})
+	return
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/metrics/log.go
similarity index 98%
rename from vendor/github.com/rcrowley/go-metrics/log.go
rename to metrics/log.go
index f8074c04576823b117d07ce726249d53b7d8a61b..0c8ea7c97123f1e885bbd5150ad5594e8c45806b 100644
--- a/vendor/github.com/rcrowley/go-metrics/log.go
+++ b/metrics/log.go
@@ -18,7 +18,7 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
 	du := float64(scale)
 	duSuffix := scale.String()[1:]
 
-	for _ = range time.Tick(freq) {
+	for range time.Tick(freq) {
 		r.Each(func(name string, i interface{}) {
 			switch metric := i.(type) {
 			case Counter:
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/metrics/memory.md
similarity index 100%
rename from vendor/github.com/rcrowley/go-metrics/memory.md
rename to metrics/memory.md
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/metrics/meter.go
similarity index 81%
rename from vendor/github.com/rcrowley/go-metrics/meter.go
rename to metrics/meter.go
index 0389ab0b8f6682c2eac1bc1eab2102fe6543e4bf..82b2141a624bff0e8eb6fd18cb5152a74b5428fd 100644
--- a/vendor/github.com/rcrowley/go-metrics/meter.go
+++ b/metrics/meter.go
@@ -15,10 +15,13 @@ type Meter interface {
 	Rate15() float64
 	RateMean() float64
 	Snapshot() Meter
+	Stop()
 }
 
 // GetOrRegisterMeter returns an existing Meter or constructs and registers a
 // new StandardMeter.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
 func GetOrRegisterMeter(name string, r Registry) Meter {
 	if nil == r {
 		r = DefaultRegistry
@@ -27,14 +30,15 @@ func GetOrRegisterMeter(name string, r Registry) Meter {
 }
 
 // NewMeter constructs a new StandardMeter and launches a goroutine.
+// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
 func NewMeter() Meter {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilMeter{}
 	}
 	m := newStandardMeter()
 	arbiter.Lock()
 	defer arbiter.Unlock()
-	arbiter.meters = append(arbiter.meters, m)
+	arbiter.meters[m] = struct{}{}
 	if !arbiter.started {
 		arbiter.started = true
 		go arbiter.tick()
@@ -44,6 +48,8 @@ func NewMeter() Meter {
 
 // NewMeter constructs and registers a new StandardMeter and launches a
 // goroutine.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
 func NewRegisteredMeter(name string, r Registry) Meter {
 	c := NewMeter()
 	if nil == r {
@@ -86,6 +92,9 @@ func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
 // Snapshot returns the snapshot.
 func (m *MeterSnapshot) Snapshot() Meter { return m }
 
+// Stop is a no-op.
+func (m *MeterSnapshot) Stop() {}
+
 // NilMeter is a no-op Meter.
 type NilMeter struct{}
 
@@ -110,12 +119,16 @@ func (NilMeter) RateMean() float64 { return 0.0 }
 // Snapshot is a no-op.
 func (NilMeter) Snapshot() Meter { return NilMeter{} }
 
+// Stop is a no-op.
+func (NilMeter) Stop() {}
+
 // StandardMeter is the standard implementation of a Meter.
 type StandardMeter struct {
 	lock        sync.RWMutex
 	snapshot    *MeterSnapshot
 	a1, a5, a15 EWMA
 	startTime   time.Time
+	stopped     bool
 }
 
 func newStandardMeter() *StandardMeter {
@@ -128,6 +141,19 @@ func newStandardMeter() *StandardMeter {
 	}
 }
 
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *StandardMeter) Stop() {
+	m.lock.Lock()
+	stopped := m.stopped
+	m.stopped = true
+	m.lock.Unlock()
+	if !stopped {
+		arbiter.Lock()
+		delete(arbiter.meters, m)
+		arbiter.Unlock()
+	}
+}
+
 // Count returns the number of events recorded.
 func (m *StandardMeter) Count() int64 {
 	m.lock.RLock()
@@ -136,10 +162,13 @@ func (m *StandardMeter) Count() int64 {
 	return count
 }
 
-// Mark records the occurance of n events.
+// Mark records the occurrence of n events.
 func (m *StandardMeter) Mark(n int64) {
 	m.lock.Lock()
 	defer m.lock.Unlock()
+	if m.stopped {
+		return
+	}
 	m.snapshot.count += n
 	m.a1.Update(n)
 	m.a5.Update(n)
@@ -205,29 +234,28 @@ func (m *StandardMeter) tick() {
 	m.updateSnapshot()
 }
 
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
 type meterArbiter struct {
 	sync.RWMutex
 	started bool
-	meters  []*StandardMeter
+	meters  map[*StandardMeter]struct{}
 	ticker  *time.Ticker
 }
 
-var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
 
 // Ticks meters on the scheduled interval
 func (ma *meterArbiter) tick() {
-	for {
-		select {
-		case <-ma.ticker.C:
-			ma.tickMeters()
-		}
+	for range ma.ticker.C {
+		ma.tickMeters()
 	}
 }
 
 func (ma *meterArbiter) tickMeters() {
 	ma.RLock()
 	defer ma.RUnlock()
-	for _, meter := range ma.meters {
+	for meter := range ma.meters {
 		meter.tick()
 	}
 }
diff --git a/metrics/meter_test.go b/metrics/meter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e88922260145ebb422b468c508537a94f2e52766
--- /dev/null
+++ b/metrics/meter_test.go
@@ -0,0 +1,73 @@
+package metrics
+
+import (
+	"testing"
+	"time"
+)
+
+func BenchmarkMeter(b *testing.B) {
+	m := NewMeter()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		m.Mark(1)
+	}
+}
+
+func TestGetOrRegisterMeter(t *testing.T) {
+	r := NewRegistry()
+	NewRegisteredMeter("foo", r).Mark(47)
+	if m := GetOrRegisterMeter("foo", r); 47 != m.Count() {
+		t.Fatal(m)
+	}
+}
+
+func TestMeterDecay(t *testing.T) {
+	ma := meterArbiter{
+		ticker: time.NewTicker(time.Millisecond),
+		meters: make(map[*StandardMeter]struct{}),
+	}
+	m := newStandardMeter()
+	ma.meters[m] = struct{}{}
+	go ma.tick()
+	m.Mark(1)
+	rateMean := m.RateMean()
+	time.Sleep(100 * time.Millisecond)
+	if m.RateMean() >= rateMean {
+		t.Error("m.RateMean() didn't decrease")
+	}
+}
+
+func TestMeterNonzero(t *testing.T) {
+	m := NewMeter()
+	m.Mark(3)
+	if count := m.Count(); 3 != count {
+		t.Errorf("m.Count(): 3 != %v\n", count)
+	}
+}
+
+func TestMeterStop(t *testing.T) {
+	l := len(arbiter.meters)
+	m := NewMeter()
+	if len(arbiter.meters) != l+1 {
+		t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters))
+	}
+	m.Stop()
+	if len(arbiter.meters) != l {
+		t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
+	}
+}
+
+func TestMeterSnapshot(t *testing.T) {
+	m := NewMeter()
+	m.Mark(1)
+	if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
+		t.Fatal(snapshot)
+	}
+}
+
+func TestMeterZero(t *testing.T) {
+	m := NewMeter()
+	if count := m.Count(); 0 != count {
+		t.Errorf("m.Count(): 0 != %v\n", count)
+	}
+}
diff --git a/metrics/metrics.go b/metrics/metrics.go
index c82661d8025d79264e5f6ec3c7d24b27189e1342..4e4e3ecb2b5b4584ce1005f571a7fd563b009b25 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -1,20 +1,8 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
+// Go port of Coda Hale's Metrics library
 //
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
+// <https://github.com/rcrowley/go-metrics>
 //
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-// Package metrics provides general system and process level metrics collection.
+// Coda Hale's original work: <https://github.com/codahale/metrics>
 package metrics
 
 import (
@@ -24,17 +12,19 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/log"
-	"github.com/rcrowley/go-metrics"
-	"github.com/rcrowley/go-metrics/exp"
 )
 
+// Enabled is checked by the constructor functions for all of the
+// standard metrics.  If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var Enabled bool = false
+
 // MetricsEnabledFlag is the CLI flag name to use to enable metrics collections.
 const MetricsEnabledFlag = "metrics"
 const DashboardEnabledFlag = "dashboard"
 
-// Enabled is the flag specifying if metrics are enable or not.
-var Enabled = false
-
 // Init enables or disables the metrics system. Since we need this to run before
 // any other code gets to create meters and timers, we'll actually do an ugly hack
 // and peek into the command line args for the metrics flag.
@@ -45,34 +35,7 @@ func init() {
 			Enabled = true
 		}
 	}
-	exp.Exp(metrics.DefaultRegistry)
-}
-
-// NewCounter create a new metrics Counter, either a real one of a NOP stub depending
-// on the metrics flag.
-func NewCounter(name string) metrics.Counter {
-	if !Enabled {
-		return new(metrics.NilCounter)
-	}
-	return metrics.GetOrRegisterCounter(name, metrics.DefaultRegistry)
-}
-
-// NewMeter create a new metrics Meter, either a real one of a NOP stub depending
-// on the metrics flag.
-func NewMeter(name string) metrics.Meter {
-	if !Enabled {
-		return new(metrics.NilMeter)
-	}
-	return metrics.GetOrRegisterMeter(name, metrics.DefaultRegistry)
-}
-
-// NewTimer create a new metrics Timer, either a real one of a NOP stub depending
-// on the metrics flag.
-func NewTimer(name string) metrics.Timer {
-	if !Enabled {
-		return new(metrics.NilTimer)
-	}
-	return metrics.GetOrRegisterTimer(name, metrics.DefaultRegistry)
+	//exp.Exp(DefaultRegistry)
 }
 
 // CollectProcessMetrics periodically collects various metrics about the running
@@ -90,17 +53,17 @@ func CollectProcessMetrics(refresh time.Duration) {
 		diskstats[i] = new(DiskStats)
 	}
 	// Define the various metrics to collect
-	memAllocs := metrics.GetOrRegisterMeter("system/memory/allocs", metrics.DefaultRegistry)
-	memFrees := metrics.GetOrRegisterMeter("system/memory/frees", metrics.DefaultRegistry)
-	memInuse := metrics.GetOrRegisterMeter("system/memory/inuse", metrics.DefaultRegistry)
-	memPauses := metrics.GetOrRegisterMeter("system/memory/pauses", metrics.DefaultRegistry)
+	memAllocs := GetOrRegisterMeter("system/memory/allocs", DefaultRegistry)
+	memFrees := GetOrRegisterMeter("system/memory/frees", DefaultRegistry)
+	memInuse := GetOrRegisterMeter("system/memory/inuse", DefaultRegistry)
+	memPauses := GetOrRegisterMeter("system/memory/pauses", DefaultRegistry)
 
-	var diskReads, diskReadBytes, diskWrites, diskWriteBytes metrics.Meter
+	var diskReads, diskReadBytes, diskWrites, diskWriteBytes Meter
 	if err := ReadDiskStats(diskstats[0]); err == nil {
-		diskReads = metrics.GetOrRegisterMeter("system/disk/readcount", metrics.DefaultRegistry)
-		diskReadBytes = metrics.GetOrRegisterMeter("system/disk/readdata", metrics.DefaultRegistry)
-		diskWrites = metrics.GetOrRegisterMeter("system/disk/writecount", metrics.DefaultRegistry)
-		diskWriteBytes = metrics.GetOrRegisterMeter("system/disk/writedata", metrics.DefaultRegistry)
+		diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry)
+		diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry)
+		diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry)
+		diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry)
 	} else {
 		log.Debug("Failed to read disk metrics", "err", err)
 	}
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..726fba34758af3a7aad3906aee0332bc5896c8ee
--- /dev/null
+++ b/metrics/metrics_test.go
@@ -0,0 +1,124 @@
+package metrics
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"sync"
+	"testing"
+)
+
+const FANOUT = 128
+
+// Stop the compiler from complaining during debugging.
+var (
+	_ = ioutil.Discard
+	_ = log.LstdFlags
+)
+
+func BenchmarkMetrics(b *testing.B) {
+	r := NewRegistry()
+	c := NewRegisteredCounter("counter", r)
+	g := NewRegisteredGauge("gauge", r)
+	gf := NewRegisteredGaugeFloat64("gaugefloat64", r)
+	h := NewRegisteredHistogram("histogram", r, NewUniformSample(100))
+	m := NewRegisteredMeter("meter", r)
+	t := NewRegisteredTimer("timer", r)
+	RegisterDebugGCStats(r)
+	RegisterRuntimeMemStats(r)
+	b.ResetTimer()
+	ch := make(chan bool)
+
+	wgD := &sync.WaitGroup{}
+	/*
+		wgD.Add(1)
+		go func() {
+			defer wgD.Done()
+			//log.Println("go CaptureDebugGCStats")
+			for {
+				select {
+				case <-ch:
+					//log.Println("done CaptureDebugGCStats")
+					return
+				default:
+					CaptureDebugGCStatsOnce(r)
+				}
+			}
+		}()
+	//*/
+
+	wgR := &sync.WaitGroup{}
+	//*
+	wgR.Add(1)
+	go func() {
+		defer wgR.Done()
+		//log.Println("go CaptureRuntimeMemStats")
+		for {
+			select {
+			case <-ch:
+				//log.Println("done CaptureRuntimeMemStats")
+				return
+			default:
+				CaptureRuntimeMemStatsOnce(r)
+			}
+		}
+	}()
+	//*/
+
+	wgW := &sync.WaitGroup{}
+	/*
+		wgW.Add(1)
+		go func() {
+			defer wgW.Done()
+			//log.Println("go Write")
+			for {
+				select {
+				case <-ch:
+					//log.Println("done Write")
+					return
+				default:
+					WriteOnce(r, ioutil.Discard)
+				}
+			}
+		}()
+	//*/
+
+	wg := &sync.WaitGroup{}
+	wg.Add(FANOUT)
+	for i := 0; i < FANOUT; i++ {
+		go func(i int) {
+			defer wg.Done()
+			//log.Println("go", i)
+			for i := 0; i < b.N; i++ {
+				c.Inc(1)
+				g.Update(int64(i))
+				gf.Update(float64(i))
+				h.Update(int64(i))
+				m.Mark(1)
+				t.Update(1)
+			}
+			//log.Println("done", i)
+		}(i)
+	}
+	wg.Wait()
+	close(ch)
+	wgD.Wait()
+	wgR.Wait()
+	wgW.Wait()
+}
+
+func Example() {
+	c := NewCounter()
+	Register("money", c)
+	c.Inc(17)
+
+	// Threadsafe registration
+	t := GetOrRegisterTimer("db.get.latency", nil)
+	t.Time(func() {})
+	t.Update(1)
+
+	fmt.Println(c.Count())
+	fmt.Println(t.Min())
+	// Output: 17
+	// 1
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/metrics/opentsdb.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/opentsdb.go
rename to metrics/opentsdb.go
index 266b6c93d21def6b339d6f14a686095b2c736d12..df7f152ed2ebf7ed66b556c77dd331613e6ba914 100644
--- a/vendor/github.com/rcrowley/go-metrics/opentsdb.go
+++ b/metrics/opentsdb.go
@@ -38,7 +38,7 @@ func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
 // OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
 // but it takes a OpenTSDBConfig instead.
 func OpenTSDBWithConfig(c OpenTSDBConfig) {
-	for _ = range time.Tick(c.FlushInterval) {
+	for range time.Tick(c.FlushInterval) {
 		if err := openTSDB(&c); nil != err {
 			log.Println(err)
 		}
diff --git a/metrics/opentsdb_test.go b/metrics/opentsdb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c43728960ed5cd3d7f5292c725c214ca7a8e3866
--- /dev/null
+++ b/metrics/opentsdb_test.go
@@ -0,0 +1,21 @@
+package metrics
+
+import (
+	"net"
+	"time"
+)
+
+func ExampleOpenTSDB() {
+	addr, _ := net.ResolveTCPAddr("net", ":2003")
+	go OpenTSDB(DefaultRegistry, 1*time.Second, "some.prefix", addr)
+}
+
+func ExampleOpenTSDBWithConfig() {
+	addr, _ := net.ResolveTCPAddr("net", ":2003")
+	go OpenTSDBWithConfig(OpenTSDBConfig{
+		Addr:          addr,
+		Registry:      DefaultRegistry,
+		FlushInterval: 1 * time.Second,
+		DurationUnit:  time.Millisecond,
+	})
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/metrics/registry.go
similarity index 76%
rename from vendor/github.com/rcrowley/go-metrics/registry.go
rename to metrics/registry.go
index 2bb7a1e7d0fdc8d140c65cbbe609f1d49d57e2c0..cc34c9dfd2c80154f94441887497622f3c90d0ac 100644
--- a/vendor/github.com/rcrowley/go-metrics/registry.go
+++ b/metrics/registry.go
@@ -29,6 +29,9 @@ type Registry interface {
 	// Get the metric by the given name or nil if none is registered.
 	Get(string) interface{}
 
+	// GetAll metrics in the Registry.
+	GetAll() map[string]map[string]interface{}
+
 	// Gets an existing metric or registers the given one.
 	// The interface can be the metric to register if not found in registry,
 	// or a function returning the metric for lazy instantiation.
@@ -109,10 +112,72 @@ func (r *StandardRegistry) RunHealthchecks() {
 	}
 }
 
+// GetAll metrics in the Registry
+func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
+	data := make(map[string]map[string]interface{})
+	r.Each(func(name string, i interface{}) {
+		values := make(map[string]interface{})
+		switch metric := i.(type) {
+		case Counter:
+			values["count"] = metric.Count()
+		case Gauge:
+			values["value"] = metric.Value()
+		case GaugeFloat64:
+			values["value"] = metric.Value()
+		case Healthcheck:
+			values["error"] = nil
+			metric.Check()
+			if err := metric.Error(); nil != err {
+				values["error"] = metric.Error().Error()
+			}
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = h.Count()
+			values["min"] = h.Min()
+			values["max"] = h.Max()
+			values["mean"] = h.Mean()
+			values["stddev"] = h.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+		case Meter:
+			m := metric.Snapshot()
+			values["count"] = m.Count()
+			values["1m.rate"] = m.Rate1()
+			values["5m.rate"] = m.Rate5()
+			values["15m.rate"] = m.Rate15()
+			values["mean.rate"] = m.RateMean()
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = t.Count()
+			values["min"] = t.Min()
+			values["max"] = t.Max()
+			values["mean"] = t.Mean()
+			values["stddev"] = t.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+			values["1m.rate"] = t.Rate1()
+			values["5m.rate"] = t.Rate5()
+			values["15m.rate"] = t.Rate15()
+			values["mean.rate"] = t.RateMean()
+		}
+		data[name] = values
+	})
+	return data
+}
+
 // Unregister the metric with the given name.
 func (r *StandardRegistry) Unregister(name string) {
 	r.mutex.Lock()
 	defer r.mutex.Unlock()
+	r.stop(name)
 	delete(r.metrics, name)
 }
 
@@ -120,7 +185,8 @@ func (r *StandardRegistry) Unregister(name string) {
 func (r *StandardRegistry) UnregisterAll() {
 	r.mutex.Lock()
 	defer r.mutex.Unlock()
-	for name, _ := range r.metrics {
+	for name := range r.metrics {
+		r.stop(name)
 		delete(r.metrics, name)
 	}
 }
@@ -130,7 +196,7 @@ func (r *StandardRegistry) register(name string, i interface{}) error {
 		return DuplicateMetric(name)
 	}
 	switch i.(type) {
-	case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+	case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
 		r.metrics[name] = i
 	}
 	return nil
@@ -146,6 +212,19 @@ func (r *StandardRegistry) registered() map[string]interface{} {
 	return metrics
 }
 
+func (r *StandardRegistry) stop(name string) {
+	if i, ok := r.metrics[name]; ok {
+		if s, ok := i.(Stoppable); ok {
+			s.Stop()
+		}
+	}
+}
+
+// Stoppable defines the metrics which has to be stopped.
+type Stoppable interface {
+	Stop()
+}
+
 type PrefixedRegistry struct {
 	underlying Registry
 	prefix     string
@@ -216,6 +295,11 @@ func (r *PrefixedRegistry) RunHealthchecks() {
 	r.underlying.RunHealthchecks()
 }
 
+// GetAll metrics in the Registry
+func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
+	return r.underlying.GetAll()
+}
+
 // Unregister the metric with the given name. The name will be prefixed.
 func (r *PrefixedRegistry) Unregister(name string) {
 	realName := r.prefix + name
diff --git a/metrics/registry_test.go b/metrics/registry_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a63e485fe986dfb45114fb64e6746e2144f2c5ee
--- /dev/null
+++ b/metrics/registry_test.go
@@ -0,0 +1,305 @@
+package metrics
+
+import (
+	"testing"
+)
+
+func BenchmarkRegistry(b *testing.B) {
+	r := NewRegistry()
+	r.Register("foo", NewCounter())
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		r.Each(func(string, interface{}) {})
+	}
+}
+
+func TestRegistry(t *testing.T) {
+	r := NewRegistry()
+	r.Register("foo", NewCounter())
+	i := 0
+	r.Each(func(name string, iface interface{}) {
+		i++
+		if "foo" != name {
+			t.Fatal(name)
+		}
+		if _, ok := iface.(Counter); !ok {
+			t.Fatal(iface)
+		}
+	})
+	if 1 != i {
+		t.Fatal(i)
+	}
+	r.Unregister("foo")
+	i = 0
+	r.Each(func(string, interface{}) { i++ })
+	if 0 != i {
+		t.Fatal(i)
+	}
+}
+
+func TestRegistryDuplicate(t *testing.T) {
+	r := NewRegistry()
+	if err := r.Register("foo", NewCounter()); nil != err {
+		t.Fatal(err)
+	}
+	if err := r.Register("foo", NewGauge()); nil == err {
+		t.Fatal(err)
+	}
+	i := 0
+	r.Each(func(name string, iface interface{}) {
+		i++
+		if _, ok := iface.(Counter); !ok {
+			t.Fatal(iface)
+		}
+	})
+	if 1 != i {
+		t.Fatal(i)
+	}
+}
+
+func TestRegistryGet(t *testing.T) {
+	r := NewRegistry()
+	r.Register("foo", NewCounter())
+	if count := r.Get("foo").(Counter).Count(); 0 != count {
+		t.Fatal(count)
+	}
+	r.Get("foo").(Counter).Inc(1)
+	if count := r.Get("foo").(Counter).Count(); 1 != count {
+		t.Fatal(count)
+	}
+}
+
+func TestRegistryGetOrRegister(t *testing.T) {
+	r := NewRegistry()
+
+	// First metric wins with GetOrRegister
+	_ = r.GetOrRegister("foo", NewCounter())
+	m := r.GetOrRegister("foo", NewGauge())
+	if _, ok := m.(Counter); !ok {
+		t.Fatal(m)
+	}
+
+	i := 0
+	r.Each(func(name string, iface interface{}) {
+		i++
+		if name != "foo" {
+			t.Fatal(name)
+		}
+		if _, ok := iface.(Counter); !ok {
+			t.Fatal(iface)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+}
+
+func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
+	r := NewRegistry()
+
+	// First metric wins with GetOrRegister
+	_ = r.GetOrRegister("foo", NewCounter)
+	m := r.GetOrRegister("foo", NewGauge)
+	if _, ok := m.(Counter); !ok {
+		t.Fatal(m)
+	}
+
+	i := 0
+	r.Each(func(name string, iface interface{}) {
+		i++
+		if name != "foo" {
+			t.Fatal(name)
+		}
+		if _, ok := iface.(Counter); !ok {
+			t.Fatal(iface)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+}
+
+func TestRegistryUnregister(t *testing.T) {
+	l := len(arbiter.meters)
+	r := NewRegistry()
+	r.Register("foo", NewCounter())
+	r.Register("bar", NewMeter())
+	r.Register("baz", NewTimer())
+	if len(arbiter.meters) != l+2 {
+		t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters))
+	}
+	r.Unregister("foo")
+	r.Unregister("bar")
+	r.Unregister("baz")
+	if len(arbiter.meters) != l {
+		t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters))
+	}
+}
+
+func TestPrefixedChildRegistryGetOrRegister(t *testing.T) {
+	r := NewRegistry()
+	pr := NewPrefixedChildRegistry(r, "prefix.")
+
+	_ = pr.GetOrRegister("foo", NewCounter())
+
+	i := 0
+	r.Each(func(name string, m interface{}) {
+		i++
+		if name != "prefix.foo" {
+			t.Fatal(name)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+}
+
+func TestPrefixedRegistryGetOrRegister(t *testing.T) {
+	r := NewPrefixedRegistry("prefix.")
+
+	_ = r.GetOrRegister("foo", NewCounter())
+
+	i := 0
+	r.Each(func(name string, m interface{}) {
+		i++
+		if name != "prefix.foo" {
+			t.Fatal(name)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+}
+
+func TestPrefixedRegistryRegister(t *testing.T) {
+	r := NewPrefixedRegistry("prefix.")
+	err := r.Register("foo", NewCounter())
+	c := NewCounter()
+	Register("bar", c)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+
+	i := 0
+	r.Each(func(name string, m interface{}) {
+		i++
+		if name != "prefix.foo" {
+			t.Fatal(name)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+}
+
+func TestPrefixedRegistryUnregister(t *testing.T) {
+	r := NewPrefixedRegistry("prefix.")
+
+	_ = r.Register("foo", NewCounter())
+
+	i := 0
+	r.Each(func(name string, m interface{}) {
+		i++
+		if name != "prefix.foo" {
+			t.Fatal(name)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+
+	r.Unregister("foo")
+
+	i = 0
+	r.Each(func(name string, m interface{}) {
+		i++
+	})
+
+	if i != 0 {
+		t.Fatal(i)
+	}
+}
+
+func TestPrefixedRegistryGet(t *testing.T) {
+	pr := NewPrefixedRegistry("prefix.")
+	name := "foo"
+	pr.Register(name, NewCounter())
+
+	fooCounter := pr.Get(name)
+	if fooCounter == nil {
+		t.Fatal(name)
+	}
+}
+
+func TestPrefixedChildRegistryGet(t *testing.T) {
+	r := NewRegistry()
+	pr := NewPrefixedChildRegistry(r, "prefix.")
+	name := "foo"
+	pr.Register(name, NewCounter())
+	fooCounter := pr.Get(name)
+	if fooCounter == nil {
+		t.Fatal(name)
+	}
+}
+
+func TestChildPrefixedRegistryRegister(t *testing.T) {
+	r := NewPrefixedChildRegistry(DefaultRegistry, "prefix.")
+	err := r.Register("foo", NewCounter())
+	c := NewCounter()
+	Register("bar", c)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+
+	i := 0
+	r.Each(func(name string, m interface{}) {
+		i++
+		if name != "prefix.foo" {
+			t.Fatal(name)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+}
+
+func TestChildPrefixedRegistryOfChildRegister(t *testing.T) {
+	r := NewPrefixedChildRegistry(NewRegistry(), "prefix.")
+	r2 := NewPrefixedChildRegistry(r, "prefix2.")
+	err := r.Register("foo2", NewCounter())
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+	err = r2.Register("baz", NewCounter())
+	c := NewCounter()
+	Register("bars", c)
+
+	i := 0
+	r2.Each(func(name string, m interface{}) {
+		i++
+		if name != "prefix.prefix2.baz" {
+			//t.Fatal(name)
+		}
+	})
+	if i != 1 {
+		t.Fatal(i)
+	}
+}
+
+func TestWalkRegistries(t *testing.T) {
+	r := NewPrefixedChildRegistry(NewRegistry(), "prefix.")
+	r2 := NewPrefixedChildRegistry(r, "prefix2.")
+	err := r.Register("foo2", NewCounter())
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+	err = r2.Register("baz", NewCounter())
+	c := NewCounter()
+	Register("bars", c)
+
+	_, prefix := findPrefix(r2, "")
+	if "prefix.prefix2." != prefix {
+		t.Fatal(prefix)
+	}
+
+}
diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go
new file mode 100644
index 0000000000000000000000000000000000000000..57bcb31343fd3ca29d42295eabd317a105f42fea
--- /dev/null
+++ b/metrics/resetting_timer.go
@@ -0,0 +1,237 @@
+package metrics
+
+import (
+	"math"
+	"sort"
+	"sync"
+	"time"
+)
+
+// Initial slice capacity for the values stored in a ResettingTimer
+const InitialResettingTimerSliceCap = 10
+
+// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval.
+type ResettingTimer interface {
+	Values() []int64
+	Snapshot() ResettingTimer
+	Percentiles([]float64) []int64
+	Mean() float64
+	Time(func())
+	Update(time.Duration)
+	UpdateSince(time.Time)
+}
+
+// GetOrRegisterResettingTimer returns an existing ResettingTimer or constructs and registers a
+// new StandardResettingTimer.
+func GetOrRegisterResettingTimer(name string, r Registry) ResettingTimer {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewResettingTimer).(ResettingTimer)
+}
+
+// NewRegisteredResettingTimer constructs and registers a new StandardResettingTimer.
+func NewRegisteredResettingTimer(name string, r Registry) ResettingTimer {
+	c := NewResettingTimer()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewResettingTimer constructs a new StandardResettingTimer
+func NewResettingTimer() ResettingTimer {
+	if !Enabled {
+		return NilResettingTimer{}
+	}
+	return &StandardResettingTimer{
+		values: make([]int64, 0, InitialResettingTimerSliceCap),
+	}
+}
+
+// NilResettingTimer is a no-op ResettingTimer.
+type NilResettingTimer struct {
+}
+
+// Values is a no-op.
+func (NilResettingTimer) Values() []int64 { return nil }
+
+// Snapshot is a no-op.
+func (NilResettingTimer) Snapshot() ResettingTimer { return NilResettingTimer{} }
+
+// Time is a no-op.
+func (NilResettingTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilResettingTimer) Update(time.Duration) {}
+
+// Percentiles panics.
+func (NilResettingTimer) Percentiles([]float64) []int64 {
+	panic("Percentiles called on a NilResettingTimer")
+}
+
+// Mean panics.
+func (NilResettingTimer) Mean() float64 {
+	panic("Mean called on a NilResettingTimer")
+}
+
+// UpdateSince is a no-op.
+func (NilResettingTimer) UpdateSince(time.Time) {}
+
+// StandardResettingTimer is the standard implementation of a ResettingTimer.
+// and Meter.
+type StandardResettingTimer struct {
+	values []int64
+	mutex  sync.Mutex
+}
+
+// Values returns a slice with all measurements.
+func (t *StandardResettingTimer) Values() []int64 {
+	return t.values
+}
+
+// Snapshot resets the timer and returns a read-only copy of its contents.
+func (t *StandardResettingTimer) Snapshot() ResettingTimer {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	currentValues := t.values
+	t.values = make([]int64, 0, InitialResettingTimerSliceCap)
+
+	return &ResettingTimerSnapshot{
+		values: currentValues,
+	}
+}
+
+// Percentiles panics.
+func (t *StandardResettingTimer) Percentiles([]float64) []int64 {
+	panic("Percentiles called on a StandardResettingTimer")
+}
+
+// Mean panics.
+func (t *StandardResettingTimer) Mean() float64 {
+	panic("Mean called on a StandardResettingTimer")
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardResettingTimer) Time(f func()) {
+	ts := time.Now()
+	f()
+	t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardResettingTimer) Update(d time.Duration) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.values = append(t.values, int64(d))
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardResettingTimer) UpdateSince(ts time.Time) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.values = append(t.values, int64(time.Since(ts)))
+}
+
+// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
+type ResettingTimerSnapshot struct {
+	values              []int64
+	mean                float64
+	thresholdBoundaries []int64
+	calculated          bool
+}
+
+// Snapshot returns the snapshot.
+func (t *ResettingTimerSnapshot) Snapshot() ResettingTimer { return t }
+
+// Time panics.
+func (*ResettingTimerSnapshot) Time(func()) {
+	panic("Time called on a ResettingTimerSnapshot")
+}
+
+// Update panics.
+func (*ResettingTimerSnapshot) Update(time.Duration) {
+	panic("Update called on a ResettingTimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*ResettingTimerSnapshot) UpdateSince(time.Time) {
+	panic("UpdateSince called on a ResettingTimerSnapshot")
+}
+
+// Values returns all values from snapshot.
+func (t *ResettingTimerSnapshot) Values() []int64 {
+	return t.values
+}
+
+// Percentiles returns the boundaries for the input percentiles.
+func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []int64 {
+	t.calc(percentiles)
+
+	return t.thresholdBoundaries
+}
+
+// Mean returns the mean of the snapshotted values
+func (t *ResettingTimerSnapshot) Mean() float64 {
+	if !t.calculated {
+		t.calc([]float64{})
+	}
+
+	return t.mean
+}
+
+func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
+	sort.Sort(Int64Slice(t.values))
+
+	count := len(t.values)
+	if count > 0 {
+		min := t.values[0]
+		max := t.values[count-1]
+
+		cumulativeValues := make([]int64, count)
+		cumulativeValues[0] = min
+		for i := 1; i < count; i++ {
+			cumulativeValues[i] = t.values[i] + cumulativeValues[i-1]
+		}
+
+		t.thresholdBoundaries = make([]int64, len(percentiles))
+
+		thresholdBoundary := max
+
+		for i, pct := range percentiles {
+			if count > 1 {
+				var abs float64
+				if pct >= 0 {
+					abs = pct
+				} else {
+					abs = 100 + pct
+				}
+				// poor man's math.Round(x):
+				// math.Floor(x + 0.5)
+				indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5))
+				if pct >= 0 {
+					indexOfPerc -= 1 // index offset=0
+				}
+				thresholdBoundary = t.values[indexOfPerc]
+			}
+
+			t.thresholdBoundaries[i] = thresholdBoundary
+		}
+
+		sum := cumulativeValues[count-1]
+		t.mean = float64(sum) / float64(count)
+	} else {
+		t.thresholdBoundaries = make([]int64, len(percentiles))
+		t.mean = 0
+	}
+
+	t.calculated = true
+}
+
+// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order.
+type Int64Slice []int64
+
+func (s Int64Slice) Len() int           { return len(s) }
+func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s Int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
diff --git a/metrics/resetting_timer_test.go b/metrics/resetting_timer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..58fd47f35245af95518a31f9f88352a4087a3277
--- /dev/null
+++ b/metrics/resetting_timer_test.go
@@ -0,0 +1,106 @@
+package metrics
+
+import (
+	"testing"
+	"time"
+)
+
+func TestResettingTimer(t *testing.T) {
+	tests := []struct {
+		values   []int64
+		start    int
+		end      int
+		wantP50  int64
+		wantP95  int64
+		wantP99  int64
+		wantMean float64
+		wantMin  int64
+		wantMax  int64
+	}{
+		{
+			values:  []int64{},
+			start:   1,
+			end:     11,
+			wantP50: 5, wantP95: 10, wantP99: 10,
+			wantMin: 1, wantMax: 10, wantMean: 5.5,
+		},
+		{
+			values:  []int64{},
+			start:   1,
+			end:     101,
+			wantP50: 50, wantP95: 95, wantP99: 99,
+			wantMin: 1, wantMax: 100, wantMean: 50.5,
+		},
+		{
+			values:  []int64{1},
+			start:   0,
+			end:     0,
+			wantP50: 1, wantP95: 1, wantP99: 1,
+			wantMin: 1, wantMax: 1, wantMean: 1,
+		},
+		{
+			values:  []int64{0},
+			start:   0,
+			end:     0,
+			wantP50: 0, wantP95: 0, wantP99: 0,
+			wantMin: 0, wantMax: 0, wantMean: 0,
+		},
+		{
+			values:  []int64{},
+			start:   0,
+			end:     0,
+			wantP50: 0, wantP95: 0, wantP99: 0,
+			wantMin: 0, wantMax: 0, wantMean: 0,
+		},
+		{
+			values:  []int64{1, 10},
+			start:   0,
+			end:     0,
+			wantP50: 1, wantP95: 10, wantP99: 10,
+			wantMin: 1, wantMax: 10, wantMean: 5.5,
+		},
+	}
+	for ind, tt := range tests {
+		timer := NewResettingTimer()
+
+		for i := tt.start; i < tt.end; i++ {
+			tt.values = append(tt.values, int64(i))
+		}
+
+		for _, v := range tt.values {
+			timer.Update(time.Duration(v))
+		}
+
+		snap := timer.Snapshot()
+
+		ps := snap.Percentiles([]float64{50, 95, 99})
+
+		val := snap.Values()
+
+		if len(val) > 0 {
+			if tt.wantMin != val[0] {
+				t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
+			}
+
+			if tt.wantMax != val[len(val)-1] {
+				t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
+			}
+		}
+
+		if tt.wantMean != snap.Mean() {
+			t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
+		}
+
+		if tt.wantP50 != ps[0] {
+			t.Fatalf("%d: p50: got %d, want %d", ind, ps[0], tt.wantP50)
+		}
+
+		if tt.wantP95 != ps[1] {
+			t.Fatalf("%d: p95: got %d, want %d", ind, ps[1], tt.wantP95)
+		}
+
+		if tt.wantP99 != ps[2] {
+			t.Fatalf("%d: p99: got %d, want %d", ind, ps[2], tt.wantP99)
+		}
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/metrics/runtime.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/runtime.go
rename to metrics/runtime.go
index 11c6b785a0f8fb0490675db5107c7383aa42ad41..9450c479bad708b6933ca4ebc76ce558255fe07d 100644
--- a/vendor/github.com/rcrowley/go-metrics/runtime.go
+++ b/metrics/runtime.go
@@ -55,7 +55,7 @@ var (
 // Capture new values for the Go runtime statistics exported in
 // runtime.MemStats.  This is designed to be called as a goroutine.
 func CaptureRuntimeMemStats(r Registry, d time.Duration) {
-	for _ = range time.Tick(d) {
+	for range time.Tick(d) {
 		CaptureRuntimeMemStatsOnce(r)
 	}
 }
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/metrics/runtime_cgo.go
similarity index 100%
rename from vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
rename to metrics/runtime_cgo.go
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/metrics/runtime_gccpufraction.go
similarity index 100%
rename from vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
rename to metrics/runtime_gccpufraction.go
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/metrics/runtime_no_cgo.go
similarity index 100%
rename from vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
rename to metrics/runtime_no_cgo.go
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/metrics/runtime_no_gccpufraction.go
similarity index 100%
rename from vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
rename to metrics/runtime_no_gccpufraction.go
diff --git a/metrics/runtime_test.go b/metrics/runtime_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ebbfd501a25d9df8dfb183b77cd25925a1626f58
--- /dev/null
+++ b/metrics/runtime_test.go
@@ -0,0 +1,88 @@
+package metrics
+
+import (
+	"runtime"
+	"testing"
+	"time"
+)
+
+func BenchmarkRuntimeMemStats(b *testing.B) {
+	r := NewRegistry()
+	RegisterRuntimeMemStats(r)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		CaptureRuntimeMemStatsOnce(r)
+	}
+}
+
+func TestRuntimeMemStats(t *testing.T) {
+	r := NewRegistry()
+	RegisterRuntimeMemStats(r)
+	CaptureRuntimeMemStatsOnce(r)
+	zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
+	runtime.GC()
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero {
+		t.Fatal(count - zero)
+	}
+	runtime.GC()
+	runtime.GC()
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero {
+		t.Fatal(count - zero)
+	}
+	for i := 0; i < 256; i++ {
+		runtime.GC()
+	}
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero {
+		t.Fatal(count - zero)
+	}
+	for i := 0; i < 257; i++ {
+		runtime.GC()
+	}
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures.
+		t.Fatal(count - zero)
+	}
+}
+
+func TestRuntimeMemStatsNumThread(t *testing.T) {
+	r := NewRegistry()
+	RegisterRuntimeMemStats(r)
+	CaptureRuntimeMemStatsOnce(r)
+
+	if value := runtimeMetrics.NumThread.Value(); value < 1 {
+		t.Fatalf("got NumThread: %d, wanted at least 1", value)
+	}
+}
+
+func TestRuntimeMemStatsBlocking(t *testing.T) {
+	if g := runtime.GOMAXPROCS(0); g < 2 {
+		t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g)
+	}
+	ch := make(chan int)
+	go testRuntimeMemStatsBlocking(ch)
+	var memStats runtime.MemStats
+	t0 := time.Now()
+	runtime.ReadMemStats(&memStats)
+	t1 := time.Now()
+	t.Log("i++ during runtime.ReadMemStats:", <-ch)
+	go testRuntimeMemStatsBlocking(ch)
+	d := t1.Sub(t0)
+	t.Log(d)
+	time.Sleep(d)
+	t.Log("i++ during time.Sleep:", <-ch)
+}
+
+func testRuntimeMemStatsBlocking(ch chan int) {
+	i := 0
+	for {
+		select {
+		case ch <- i:
+			return
+		default:
+			i++
+		}
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/metrics/sample.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/sample.go
rename to metrics/sample.go
index fecee5ef68ba78000a53a6aee4c9a5c10137fe4e..5c4845a4f8411d804d3777f61bbfe22b692ba872 100644
--- a/vendor/github.com/rcrowley/go-metrics/sample.go
+++ b/metrics/sample.go
@@ -46,7 +46,7 @@ type ExpDecaySample struct {
 // NewExpDecaySample constructs a new exponentially-decaying sample with the
 // given reservoir size and alpha.
 func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilSample{}
 	}
 	s := &ExpDecaySample{
@@ -407,7 +407,7 @@ type UniformSample struct {
 // NewUniformSample constructs a new uniform sample with the given reservoir
 // size.
 func NewUniformSample(reservoirSize int) Sample {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilSample{}
 	}
 	return &UniformSample{
diff --git a/metrics/sample_test.go b/metrics/sample_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d60e99c5bba7accae97a5c8861b4e41f44842035
--- /dev/null
+++ b/metrics/sample_test.go
@@ -0,0 +1,363 @@
+package metrics
+
+import (
+	"math/rand"
+	"runtime"
+	"testing"
+	"time"
+)
+
+// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
+// expensive computations like Variance, the cost of copying the Sample, as
+// approximated by a make and copy, is much greater than the cost of the
+// computation for small samples and only slightly less for large samples.
+func BenchmarkCompute1000(b *testing.B) {
+	s := make([]int64, 1000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		SampleVariance(s)
+	}
+}
+func BenchmarkCompute1000000(b *testing.B) {
+	s := make([]int64, 1000000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		SampleVariance(s)
+	}
+}
+func BenchmarkCopy1000(b *testing.B) {
+	s := make([]int64, 1000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		sCopy := make([]int64, len(s))
+		copy(sCopy, s)
+	}
+}
+func BenchmarkCopy1000000(b *testing.B) {
+	s := make([]int64, 1000000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		sCopy := make([]int64, len(s))
+		copy(sCopy, s)
+	}
+}
+
+func BenchmarkExpDecaySample257(b *testing.B) {
+	benchmarkSample(b, NewExpDecaySample(257, 0.015))
+}
+
+func BenchmarkExpDecaySample514(b *testing.B) {
+	benchmarkSample(b, NewExpDecaySample(514, 0.015))
+}
+
+func BenchmarkExpDecaySample1028(b *testing.B) {
+	benchmarkSample(b, NewExpDecaySample(1028, 0.015))
+}
+
+func BenchmarkUniformSample257(b *testing.B) {
+	benchmarkSample(b, NewUniformSample(257))
+}
+
+func BenchmarkUniformSample514(b *testing.B) {
+	benchmarkSample(b, NewUniformSample(514))
+}
+
+func BenchmarkUniformSample1028(b *testing.B) {
+	benchmarkSample(b, NewUniformSample(1028))
+}
+
+func TestExpDecaySample10(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 0; i < 10; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 10 != size {
+		t.Errorf("s.Count(): 10 != %v\n", size)
+	}
+	if size := s.Size(); 10 != size {
+		t.Errorf("s.Size(): 10 != %v\n", size)
+	}
+	if l := len(s.Values()); 10 != l {
+		t.Errorf("len(s.Values()): 10 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 10 || v < 0 {
+			t.Errorf("out of range [0, 10): %v\n", v)
+		}
+	}
+}
+
+func TestExpDecaySample100(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(1000, 0.01)
+	for i := 0; i < 100; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 100 != size {
+		t.Errorf("s.Count(): 100 != %v\n", size)
+	}
+	if size := s.Size(); 100 != size {
+		t.Errorf("s.Size(): 100 != %v\n", size)
+	}
+	if l := len(s.Values()); 100 != l {
+		t.Errorf("len(s.Values()): 100 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 100 || v < 0 {
+			t.Errorf("out of range [0, 100): %v\n", v)
+		}
+	}
+}
+
+func TestExpDecaySample1000(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 0; i < 1000; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 1000 != size {
+		t.Errorf("s.Count(): 1000 != %v\n", size)
+	}
+	if size := s.Size(); 100 != size {
+		t.Errorf("s.Size(): 100 != %v\n", size)
+	}
+	if l := len(s.Values()); 100 != l {
+		t.Errorf("len(s.Values()): 100 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 1000 || v < 0 {
+			t.Errorf("out of range [0, 1000): %v\n", v)
+		}
+	}
+}
+
+// This test makes sure that the sample's priority is not amplified by using
+// nanosecond duration since start rather than second duration since start.
+// The priority becomes +Inf quickly after starting if this is done,
+// effectively freezing the set of samples until a rescale step happens.
+func TestExpDecaySampleNanosecondRegression(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 0; i < 100; i++ {
+		s.Update(10)
+	}
+	time.Sleep(1 * time.Millisecond)
+	for i := 0; i < 100; i++ {
+		s.Update(20)
+	}
+	v := s.Values()
+	avg := float64(0)
+	for i := 0; i < len(v); i++ {
+		avg += float64(v[i])
+	}
+	avg /= float64(len(v))
+	if avg > 16 || avg < 14 {
+		t.Errorf("out of range [14, 16]: %v\n", avg)
+	}
+}
+
+func TestExpDecaySampleRescale(t *testing.T) {
+	s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
+	s.update(time.Now(), 1)
+	s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
+	for _, v := range s.values.Values() {
+		if v.k == 0.0 {
+			t.Fatal("v.k == 0.0")
+		}
+	}
+}
+
+func TestExpDecaySampleSnapshot(t *testing.T) {
+	now := time.Now()
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 1; i <= 10000; i++ {
+		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+	}
+	snapshot := s.Snapshot()
+	s.Update(1)
+	testExpDecaySampleStatistics(t, snapshot)
+}
+
+func TestExpDecaySampleStatistics(t *testing.T) {
+	now := time.Now()
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 1; i <= 10000; i++ {
+		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+	}
+	testExpDecaySampleStatistics(t, s)
+}
+
+func TestUniformSample(t *testing.T) {
+	rand.Seed(1)
+	s := NewUniformSample(100)
+	for i := 0; i < 1000; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 1000 != size {
+		t.Errorf("s.Count(): 1000 != %v\n", size)
+	}
+	if size := s.Size(); 100 != size {
+		t.Errorf("s.Size(): 100 != %v\n", size)
+	}
+	if l := len(s.Values()); 100 != l {
+		t.Errorf("len(s.Values()): 100 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 1000 || v < 0 {
+			t.Errorf("out of range [0, 100): %v\n", v)
+		}
+	}
+}
+
+func TestUniformSampleIncludesTail(t *testing.T) {
+	rand.Seed(1)
+	s := NewUniformSample(100)
+	max := 100
+	for i := 0; i < max; i++ {
+		s.Update(int64(i))
+	}
+	v := s.Values()
+	sum := 0
+	exp := (max - 1) * max / 2
+	for i := 0; i < len(v); i++ {
+		sum += int(v[i])
+	}
+	if exp != sum {
+		t.Errorf("sum: %v != %v\n", exp, sum)
+	}
+}
+
+func TestUniformSampleSnapshot(t *testing.T) {
+	s := NewUniformSample(100)
+	for i := 1; i <= 10000; i++ {
+		s.Update(int64(i))
+	}
+	snapshot := s.Snapshot()
+	s.Update(1)
+	testUniformSampleStatistics(t, snapshot)
+}
+
+func TestUniformSampleStatistics(t *testing.T) {
+	rand.Seed(1)
+	s := NewUniformSample(100)
+	for i := 1; i <= 10000; i++ {
+		s.Update(int64(i))
+	}
+	testUniformSampleStatistics(t, s)
+}
+
+func benchmarkSample(b *testing.B, s Sample) {
+	var memStats runtime.MemStats
+	runtime.ReadMemStats(&memStats)
+	pauseTotalNs := memStats.PauseTotalNs
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Update(1)
+	}
+	b.StopTimer()
+	runtime.GC()
+	runtime.ReadMemStats(&memStats)
+	b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
+}
+
+func testExpDecaySampleStatistics(t *testing.T, s Sample) {
+	if count := s.Count(); 10000 != count {
+		t.Errorf("s.Count(): 10000 != %v\n", count)
+	}
+	if min := s.Min(); 107 != min {
+		t.Errorf("s.Min(): 107 != %v\n", min)
+	}
+	if max := s.Max(); 10000 != max {
+		t.Errorf("s.Max(): 10000 != %v\n", max)
+	}
+	if mean := s.Mean(); 4965.98 != mean {
+		t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
+	}
+	if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
+		t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
+	}
+	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 4615 != ps[0] {
+		t.Errorf("median: 4615 != %v\n", ps[0])
+	}
+	if 7672 != ps[1] {
+		t.Errorf("75th percentile: 7672 != %v\n", ps[1])
+	}
+	if 9998.99 != ps[2] {
+		t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
+	}
+}
+
+func testUniformSampleStatistics(t *testing.T, s Sample) {
+	if count := s.Count(); 10000 != count {
+		t.Errorf("s.Count(): 10000 != %v\n", count)
+	}
+	if min := s.Min(); 37 != min {
+		t.Errorf("s.Min(): 37 != %v\n", min)
+	}
+	if max := s.Max(); 9989 != max {
+		t.Errorf("s.Max(): 9989 != %v\n", max)
+	}
+	if mean := s.Mean(); 4748.14 != mean {
+		t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
+	}
+	if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
+		t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
+	}
+	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 4599 != ps[0] {
+		t.Errorf("median: 4599 != %v\n", ps[0])
+	}
+	if 7380.5 != ps[1] {
+		t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
+	}
+	if 9986.429999999998 != ps[2] {
+		t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
+	}
+}
+
+// TestUniformSampleConcurrentUpdateCount would expose data race problems with
+// concurrent Update and Count calls on Sample when test is called with -race
+// argument
+func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping in short mode")
+	}
+	s := NewUniformSample(100)
+	for i := 0; i < 100; i++ {
+		s.Update(int64(i))
+	}
+	quit := make(chan struct{})
+	go func() {
+		t := time.NewTicker(10 * time.Millisecond)
+		for {
+			select {
+			case <-t.C:
+				s.Update(rand.Int63())
+			case <-quit:
+				t.Stop()
+				return
+			}
+		}
+	}()
+	for i := 0; i < 1000; i++ {
+		s.Count()
+		time.Sleep(5 * time.Millisecond)
+	}
+	quit <- struct{}{}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/metrics/syslog.go
similarity index 98%
rename from vendor/github.com/rcrowley/go-metrics/syslog.go
rename to metrics/syslog.go
index 693f190855c1e7775c4dee0bcf8aef9c5c08d039..a0ed4b1b2364910231e42a4b5141fef68de2919f 100644
--- a/vendor/github.com/rcrowley/go-metrics/syslog.go
+++ b/metrics/syslog.go
@@ -11,7 +11,7 @@ import (
 // Output each metric in the given registry to syslog periodically using
 // the given syslogger.
 func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
-	for _ = range time.Tick(d) {
+	for range time.Tick(d) {
 		r.Each(func(name string, i interface{}) {
 			switch metric := i.(type) {
 			case Counter:
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/metrics/timer.go
similarity index 93%
rename from vendor/github.com/rcrowley/go-metrics/timer.go
rename to metrics/timer.go
index 17db8f8d202ea5e3ffc67b57bc15267617d923d6..89e22208fde0fa2757ee0f9f4ccd59b2b59934cd 100644
--- a/vendor/github.com/rcrowley/go-metrics/timer.go
+++ b/metrics/timer.go
@@ -19,6 +19,7 @@ type Timer interface {
 	RateMean() float64
 	Snapshot() Timer
 	StdDev() float64
+	Stop()
 	Sum() int64
 	Time(func())
 	Update(time.Duration)
@@ -28,6 +29,8 @@ type Timer interface {
 
 // GetOrRegisterTimer returns an existing Timer or constructs and registers a
 // new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
 func GetOrRegisterTimer(name string, r Registry) Timer {
 	if nil == r {
 		r = DefaultRegistry
@@ -36,8 +39,9 @@ func GetOrRegisterTimer(name string, r Registry) Timer {
 }
 
 // NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
 func NewCustomTimer(h Histogram, m Meter) Timer {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilTimer{}
 	}
 	return &StandardTimer{
@@ -47,6 +51,8 @@ func NewCustomTimer(h Histogram, m Meter) Timer {
 }
 
 // NewRegisteredTimer constructs and registers a new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
 func NewRegisteredTimer(name string, r Registry) Timer {
 	c := NewTimer()
 	if nil == r {
@@ -58,8 +64,9 @@ func NewRegisteredTimer(name string, r Registry) Timer {
 
 // NewTimer constructs a new StandardTimer using an exponentially-decaying
 // sample with the same reservoir size and alpha as UNIX load averages.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
 func NewTimer() Timer {
-	if UseNilMetrics {
+	if !Enabled {
 		return NilTimer{}
 	}
 	return &StandardTimer{
@@ -112,6 +119,9 @@ func (NilTimer) Snapshot() Timer { return NilTimer{} }
 // StdDev is a no-op.
 func (NilTimer) StdDev() float64 { return 0.0 }
 
+// Stop is a no-op.
+func (NilTimer) Stop() {}
+
 // Sum is a no-op.
 func (NilTimer) Sum() int64 { return 0 }
 
@@ -201,6 +211,11 @@ func (t *StandardTimer) StdDev() float64 {
 	return t.histogram.StdDev()
 }
 
+// Stop stops the meter.
+func (t *StandardTimer) Stop() {
+	t.meter.Stop()
+}
+
 // Sum returns the sum in the sample.
 func (t *StandardTimer) Sum() int64 {
 	return t.histogram.Sum()
@@ -288,6 +303,9 @@ func (t *TimerSnapshot) Snapshot() Timer { return t }
 // was taken.
 func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
 
+// Stop is a no-op.
+func (t *TimerSnapshot) Stop() {}
+
 // Sum returns the sum at the time the snapshot was taken.
 func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
 
diff --git a/metrics/timer_test.go b/metrics/timer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f85c9b803d3410a1a1f596939e4c4f6d484b4416
--- /dev/null
+++ b/metrics/timer_test.go
@@ -0,0 +1,101 @@
+package metrics
+
+import (
+	"fmt"
+	"math"
+	"testing"
+	"time"
+)
+
+func BenchmarkTimer(b *testing.B) {
+	tm := NewTimer()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		tm.Update(1)
+	}
+}
+
+func TestGetOrRegisterTimer(t *testing.T) {
+	r := NewRegistry()
+	NewRegisteredTimer("foo", r).Update(47)
+	if tm := GetOrRegisterTimer("foo", r); 1 != tm.Count() {
+		t.Fatal(tm)
+	}
+}
+
+func TestTimerExtremes(t *testing.T) {
+	tm := NewTimer()
+	tm.Update(math.MaxInt64)
+	tm.Update(0)
+	if stdDev := tm.StdDev(); 4.611686018427388e+18 != stdDev {
+		t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
+	}
+}
+
+func TestTimerStop(t *testing.T) {
+	l := len(arbiter.meters)
+	tm := NewTimer()
+	if len(arbiter.meters) != l+1 {
+		t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters))
+	}
+	tm.Stop()
+	if len(arbiter.meters) != l {
+		t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
+	}
+}
+
+func TestTimerFunc(t *testing.T) {
+	tm := NewTimer()
+	tm.Time(func() { time.Sleep(50e6) })
+	if max := tm.Max(); 45e6 > max || max > 55e6 {
+		t.Errorf("tm.Max(): 45e6 > %v || %v > 55e6\n", max, max)
+	}
+}
+
+func TestTimerZero(t *testing.T) {
+	tm := NewTimer()
+	if count := tm.Count(); 0 != count {
+		t.Errorf("tm.Count(): 0 != %v\n", count)
+	}
+	if min := tm.Min(); 0 != min {
+		t.Errorf("tm.Min(): 0 != %v\n", min)
+	}
+	if max := tm.Max(); 0 != max {
+		t.Errorf("tm.Max(): 0 != %v\n", max)
+	}
+	if mean := tm.Mean(); 0.0 != mean {
+		t.Errorf("tm.Mean(): 0.0 != %v\n", mean)
+	}
+	if stdDev := tm.StdDev(); 0.0 != stdDev {
+		t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev)
+	}
+	ps := tm.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 0.0 != ps[0] {
+		t.Errorf("median: 0.0 != %v\n", ps[0])
+	}
+	if 0.0 != ps[1] {
+		t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
+	}
+	if 0.0 != ps[2] {
+		t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
+	}
+	if rate1 := tm.Rate1(); 0.0 != rate1 {
+		t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1)
+	}
+	if rate5 := tm.Rate5(); 0.0 != rate5 {
+		t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5)
+	}
+	if rate15 := tm.Rate15(); 0.0 != rate15 {
+		t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15)
+	}
+	if rateMean := tm.RateMean(); 0.0 != rateMean {
+		t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean)
+	}
+}
+
+func ExampleGetOrRegisterTimer() {
+	m := "account.create.latency"
+	t := GetOrRegisterTimer(m, nil)
+	t.Update(47)
+	fmt.Println(t.Max()) // Output: 47
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/metrics/validate.sh
similarity index 93%
rename from vendor/github.com/rcrowley/go-metrics/validate.sh
rename to metrics/validate.sh
index f6499982e58652f09bbab46e3ed1243a381fdc11..c4ae91e642d67f338c8bd850595a747178bd5ed2 100755
--- a/vendor/github.com/rcrowley/go-metrics/validate.sh
+++ b/metrics/validate.sh
@@ -7,4 +7,4 @@ GOFMT_LINES=`gofmt -l . | wc -l | xargs`
 test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
 
 # run the tests for the root package
-go test .
+go test -race .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/metrics/writer.go
similarity index 99%
rename from vendor/github.com/rcrowley/go-metrics/writer.go
rename to metrics/writer.go
index 091e971d2e6faecdce68004485bec2ee30442d72..88521a80d9d7a22dc243e346ba86b0cc77a3cd75 100644
--- a/vendor/github.com/rcrowley/go-metrics/writer.go
+++ b/metrics/writer.go
@@ -10,7 +10,7 @@ import (
 // Write sorts writes each metric in the given registry periodically to the
 // given io.Writer.
 func Write(r Registry, d time.Duration, w io.Writer) {
-	for _ = range time.Tick(d) {
+	for range time.Tick(d) {
 		WriteOnce(r, w)
 	}
 }
diff --git a/metrics/writer_test.go b/metrics/writer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1aacc287121b4855a5913ead850f7687314fefdf
--- /dev/null
+++ b/metrics/writer_test.go
@@ -0,0 +1,22 @@
+package metrics
+
+import (
+	"sort"
+	"testing"
+)
+
+func TestMetricsSorting(t *testing.T) {
+	var namedMetrics = namedMetricSlice{
+		{name: "zzz"},
+		{name: "bbb"},
+		{name: "fff"},
+		{name: "ggg"},
+	}
+
+	sort.Sort(namedMetrics)
+	for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
+		if namedMetrics[i].name != name {
+			t.Fail()
+		}
+	}
+}
diff --git a/node/api.go b/node/api.go
index 4e9b1edc4719e0a9a65680224159aeb110e5761e..992a7c4166b02b398095f948fb7674836a729a7c 100644
--- a/node/api.go
+++ b/node/api.go
@@ -24,10 +24,10 @@ import (
 
 	"github.com/ethereum/go-ethereum/common/hexutil"
 	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/rpc"
-	"github.com/rcrowley/go-metrics"
 )
 
 // PrivateAdminAPI is the collection of administrative API methods exposed only
diff --git a/p2p/metrics.go b/p2p/metrics.go
index 98b61901d53d22d4a7395232d71354594498748f..4cbff90aca67b44ec4c1fa5911fbaba398ac87b4 100644
--- a/p2p/metrics.go
+++ b/p2p/metrics.go
@@ -25,10 +25,10 @@ import (
 )
 
 var (
-	ingressConnectMeter = metrics.NewMeter("p2p/InboundConnects")
-	ingressTrafficMeter = metrics.NewMeter("p2p/InboundTraffic")
-	egressConnectMeter  = metrics.NewMeter("p2p/OutboundConnects")
-	egressTrafficMeter  = metrics.NewMeter("p2p/OutboundTraffic")
+	ingressConnectMeter = metrics.NewRegisteredMeter("p2p/InboundConnects", nil)
+	ingressTrafficMeter = metrics.NewRegisteredMeter("p2p/InboundTraffic", nil)
+	egressConnectMeter  = metrics.NewRegisteredMeter("p2p/OutboundConnects", nil)
+	egressTrafficMeter  = metrics.NewRegisteredMeter("p2p/OutboundTraffic", nil)
 )
 
 // meteredConn is a wrapper around a network TCP connection that meters both the
diff --git a/trie/trie.go b/trie/trie.go
index e37a1ae109f28c5ba35db61d3d27e1c78c55be01..31a404e3a08f84c7c7042df99209b5090e33071e 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -24,7 +24,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/log"
-	"github.com/rcrowley/go-metrics"
+	"github.com/ethereum/go-ethereum/metrics"
 )
 
 var (
diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..63cef79ba6f647c960e53808c351f834392f8400
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2016 Errplane Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
new file mode 100644
index 0000000000000000000000000000000000000000..ea6fc69f30d1ab66cfa665ee0729af364d804cba
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
@@ -0,0 +1,62 @@
+- # List
+- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
+- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
+- github.com/BurntSushi/toml [MIT LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/RoaringBitmap/roaring [APACHE LICENSE](https://github.com/RoaringBitmap/roaring/blob/master/LICENSE)
+- github.com/beorn7/perks [MIT LICENSE](https://github.com/beorn7/perks/blob/master/LICENSE)
+- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
+- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
+- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
+- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE)
+- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
+- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
+- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
+- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
+- github.com/glycerine/go-unsnap-stream [MIT LICENSE](https://github.com/glycerine/go-unsnap-stream/blob/master/LICENSE)
+- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
+- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
+- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)
+- github.com/influxdata/influxql [MIT LICENSE](https://github.com/influxdata/influxql/blob/master/LICENSE)
+- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
+- github.com/influxdata/yamux [MOZILLA PUBLIC LICENSE](https://github.com/influxdata/yamux/blob/master/LICENSE)
+- github.com/influxdata/yarpc [MIT LICENSE](https://github.com/influxdata/yarpc/blob/master/LICENSE)
+- github.com/jsternberg/zap-logfmt [MIT LICENSE](https://github.com/jsternberg/zap-logfmt/blob/master/LICENSE)
+- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
+- github.com/mattn/go-isatty [MIT LICENSE](https://github.com/mattn/go-isatty/blob/master/LICENSE)
+- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
+- github.com/opentracing/opentracing-go [MIT LICENSE](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
+- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
+- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
+- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md)
+- github.com/prometheus/client_golang [MIT LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
+- github.com/prometheus/client_model [MIT LICENSE](https://github.com/prometheus/client_model/blob/master/LICENSE)
+- github.com/prometheus/common [APACHE LICENSE](https://github.com/prometheus/common/blob/master/LICENSE)
+- github.com/prometheus/procfs [APACHE LICENSE](https://github.com/prometheus/procfs/blob/master/LICENSE)
+- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
+- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)
+- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE)
+- go.uber.org/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)
+- go.uber.org/multierr [MIT LICENSE](https://github.com/uber-go/multierr/blob/master/LICENSE.txt)
+- go.uber.org/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
+- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+- golang.org/x/net [BSD LICENSE](https://github.com/golang/net/blob/master/LICENSE)
+- golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE)
+- golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE)
+- golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE)
+- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+- github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/influxdata/influxdb/client/README.md b/vendor/github.com/influxdata/influxdb/client/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..773a1112289ac71a170b777b4366b1bac8aab800
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/README.md
@@ -0,0 +1,306 @@
+# InfluxDB Client
+
+[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+
+## Description
+
+**NOTE:** The Go client library now has a "v2" version, with the old version
+being deprecated. The new version can be imported at
+`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
+
+A Go client library written and maintained by the **InfluxDB** team.
+This package provides convenience functions to read and write time series data.
+It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
+
+
+## Getting Started
+
+### Connecting To Your Database
+
+Connecting to an **InfluxDB** database is straightforward. You will need a host
+name, a port and the cluster user credentials if applicable. The default port is
+8086. You can customize these settings to your specific installation via the
+**InfluxDB** configuration file.
+
+Though not necessary for experimentation, you may want to create a new user
+and authenticate the connection to your database.
+
+For more information please check out the
+[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
+
+For the impatient, you can create a new admin user _bubba_ by firing off the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
+
+```shell
+influx
+> create user bubba with password 'bumblebeetuna'
+> grant all privileges to bubba
+```
+
+And now for good measure set the credentials in you shell environment.
+In the example below we will use $INFLUX_USER and $INFLUX_PWD
+
+Now with the administrivia out of the way, let's connect to our database.
+
+NOTE: If you've opted out of creating a user, you can omit Username and Password in
+the configuration below.
+
+```go
+package main
+
+import (
+	"log"
+	"time"
+
+	"github.com/influxdata/influxdb/client/v2"
+)
+
+const (
+	MyDB = "square_holes"
+	username = "bubba"
+	password = "bumblebeetuna"
+)
+
+
+func main() {
+	// Create a new HTTPClient
+	c, err := client.NewHTTPClient(client.HTTPConfig{
+		Addr:     "http://localhost:8086",
+		Username: username,
+		Password: password,
+	})
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// Create a new point batch
+	bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+		Database:  MyDB,
+		Precision: "s",
+	})
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// Create a point and add to batch
+	tags := map[string]string{"cpu": "cpu-total"}
+	fields := map[string]interface{}{
+		"idle":   10.1,
+		"system": 53.3,
+		"user":   46.6,
+	}
+
+	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+	if err != nil {
+		log.Fatal(err)
+	}
+	bp.AddPoint(pt)
+
+	// Write the batch
+	if err := c.Write(bp); err != nil {
+		log.Fatal(err)
+	}
+}
+
+```
+
+### Inserting Data
+
+Time series data aka *points* are written to the database using batch inserts.
+The mechanism is to create one or more points and then create a batch aka
+*batch points* and write these to a given database and series. A series is a
+combination of a measurement (time/values) and a set of tags.
+
+In this sample we will create a batch of a 1,000 points. Each point has a time and
+a single value as well as 2 tags indicating a shape and color. We write these points
+to a database called _square_holes_ using a measurement named _shapes_.
+
+NOTE: You can specify a RetentionPolicy as part of the batch points. If not
+provided InfluxDB will use the database _default_ retention policy.
+
+```go
+
+func writePoints(clnt client.Client) {
+	sampleSize := 1000
+
+	bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+		Database:  "systemstats",
+		Precision: "us",
+	})
+	if err != nil {
+		log.Fatal(err)
+	}
+
+    rand.Seed(time.Now().UnixNano())
+	for i := 0; i < sampleSize; i++ {
+		regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
+		tags := map[string]string{
+			"cpu":    "cpu-total",
+			"host":   fmt.Sprintf("host%d", rand.Intn(1000)),
+			"region": regions[rand.Intn(len(regions))],
+		}
+
+		idle := rand.Float64() * 100.0
+		fields := map[string]interface{}{
+			"idle": idle,
+			"busy": 100.0 - idle,
+		}
+
+		pt, err := client.NewPoint(
+			"cpu_usage",
+			tags,
+			fields,
+			time.Now(),
+		)
+		if err != nil {
+			log.Fatal(err)
+		}
+		bp.AddPoint(pt)
+	}
+
+	if err := clnt.Write(bp); err != nil {
+		log.Fatal(err)
+	}
+}
+```
+
+#### Uint64 Support
+
+The `uint64` data type is supported if your server is version `1.4.0` or
+greater. To write a data point as an unsigned integer, you must insert
+the point as `uint64`. You cannot use `uint` or any of the other
+derivatives because previous versions of the client have supported
+writing those types as an integer.
+
+### Querying Data
+
+One nice advantage of using **InfluxDB** the ability to query your data using familiar
+SQL constructs. In this example we can create a convenience function to query the database
+as follows:
+
+```go
+// queryDB convenience function to query the database
+func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
+	q := client.Query{
+		Command:  cmd,
+		Database: MyDB,
+	}
+	if response, err := clnt.Query(q); err == nil {
+		if response.Error() != nil {
+			return res, response.Error()
+		}
+		res = response.Results
+	} else {
+		return res, err
+	}
+	return res, nil
+}
+```
+
+#### Creating a Database
+
+```go
+_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
+if err != nil {
+	log.Fatal(err)
+}
+```
+
+#### Count Records
+
+```go
+q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
+res, err := queryDB(clnt, q)
+if err != nil {
+	log.Fatal(err)
+}
+count := res[0].Series[0].Values[0][1]
+log.Printf("Found a total of %v records\n", count)
+```
+
+#### Find the last 10 _shapes_ records
+
+```go
+q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 10)
+res, err = queryDB(clnt, q)
+if err != nil {
+	log.Fatal(err)
+}
+
+for i, row := range res[0].Series[0].Values {
+	t, err := time.Parse(time.RFC3339, row[0].(string))
+	if err != nil {
+		log.Fatal(err)
+	}
+	val := row[1].(string)
+	log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
+}
+```
+
+### Using the UDP Client
+
+The **InfluxDB** client also supports writing over UDP.
+
+```go
+func WriteUDP() {
+	// Make client
+	c, err := client.NewUDPClient("localhost:8089")
+	if err != nil {
+		panic(err.Error())
+	}
+	
+	// Create a new point batch
+	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+		Precision: "s",
+	})
+
+	// Create a point and add to batch
+	tags := map[string]string{"cpu": "cpu-total"}
+	fields := map[string]interface{}{
+		"idle":   10.1,
+		"system": 53.3,
+		"user":   46.6,
+	}
+	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+	if err != nil {
+		panic(err.Error())
+	}
+	bp.AddPoint(pt)
+
+	// Write the batch
+	c.Write(bp)
+}
+```
+
+### Point Splitting
+
+The UDP client now supports splitting single points that exceed the configured
+payload size. The logic for processing each point is listed here, starting with
+an empty payload.
+
+1. If adding the point to the current (non-empty) payload would exceed the
+   configured size, send the current payload. Otherwise, add it to the current
+   payload.
+1. If the point is smaller than the configured size, add it to the payload.
+1. If the point has no timestamp, just try to send the entire point as a single
+   UDP payload, and process the next point.
+1. Since the point has a timestamp, re-use the existing measurement name,
+   tagset, and timestamp and create multiple new points by splitting up the
+   fields. The per-point length will be kept close to the configured size,
+   staying under it if possible. This does mean that one large field, maybe a
+   long string, could be sent as a larger-than-configured payload.
+
+The above logic attempts to respect configured payload sizes, but not sacrifice
+any data integrity. Points without a timestamp can't be split, as that may
+cause fields to have differing timestamps when processed by the server.
+
+## Go Docs
+
+Please refer to
+[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+for documentation.
+
+## See Also
+
+You can also examine how the client library is used by the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go
new file mode 100644
index 0000000000000000000000000000000000000000..98d362d50ac1c74a3d94cd4356f84361435a1fe0
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go
@@ -0,0 +1,840 @@
+// Package client implements a now-deprecated client for InfluxDB;
+// use github.com/influxdata/influxdb/client/v2 instead.
+package client // import "github.com/influxdata/influxdb/client"
+
+import (
+	"bytes"
+	"context"
+	"crypto/tls"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/influxdb/models"
+)
+
+const (
+	// DefaultHost is the default host used to connect to an InfluxDB instance
+	DefaultHost = "localhost"
+
+	// DefaultPort is the default port used to connect to an InfluxDB instance
+	DefaultPort = 8086
+
+	// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
+	DefaultTimeout = 0
+)
+
+// Query is used to send a command to the server. Both Command and Database are required.
+type Query struct {
+	Command  string
+	Database string
+
+	// Chunked tells the server to send back chunked responses. This places
+	// less load on the server by sending back chunks of the response rather
+	// than waiting for the entire response all at once.
+	Chunked bool
+
+	// ChunkSize sets the maximum number of rows that will be returned per
+	// chunk. Chunks are either divided based on their series or if they hit
+	// the chunk size limit.
+	//
+	// Chunked must be set to true for this option to be used.
+	ChunkSize int
+}
+
+// ParseConnectionString will parse a string to create a valid connection URL
+func ParseConnectionString(path string, ssl bool) (url.URL, error) {
+	var host string
+	var port int
+
+	h, p, err := net.SplitHostPort(path)
+	if err != nil {
+		if path == "" {
+			host = DefaultHost
+		} else {
+			host = path
+		}
+		// If they didn't specify a port, always use the default port
+		port = DefaultPort
+	} else {
+		host = h
+		port, err = strconv.Atoi(p)
+		if err != nil {
+			return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
+		}
+	}
+
+	u := url.URL{
+		Scheme: "http",
+	}
+	if ssl {
+		u.Scheme = "https"
+	}
+
+	u.Host = net.JoinHostPort(host, strconv.Itoa(port))
+
+	return u, nil
+}
+
+// Config is used to specify what server to connect to.
+// URL: The URL of the server connecting to.
+// Username/Password are optional. They will be passed via basic auth if provided.
+// UserAgent: If not provided, will default "InfluxDBClient",
+// Timeout: If not provided, will default to 0 (no timeout)
+type Config struct {
+	URL              url.URL
+	UnixSocket       string
+	Username         string
+	Password         string
+	UserAgent        string
+	Timeout          time.Duration
+	Precision        string
+	WriteConsistency string
+	UnsafeSsl        bool
+}
+
+// NewConfig will create a config to be used in connecting to the client
+func NewConfig() Config {
+	return Config{
+		Timeout: DefaultTimeout,
+	}
+}
+
+// Client is used to make calls to the server.
+type Client struct {
+	url        url.URL
+	unixSocket string
+	username   string
+	password   string
+	httpClient *http.Client
+	userAgent  string
+	precision  string
+}
+
+const (
+	// ConsistencyOne requires at least one data node acknowledged a write.
+	ConsistencyOne = "one"
+
+	// ConsistencyAll requires all data nodes to acknowledge a write.
+	ConsistencyAll = "all"
+
+	// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
+	ConsistencyQuorum = "quorum"
+
+	// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
+	ConsistencyAny = "any"
+)
+
+// NewClient will instantiate and return a connected client to issue commands to the server.
+func NewClient(c Config) (*Client, error) {
+	tlsConfig := &tls.Config{
+		InsecureSkipVerify: c.UnsafeSsl,
+	}
+
+	tr := &http.Transport{
+		TLSClientConfig: tlsConfig,
+	}
+
+	if c.UnixSocket != "" {
+		// No need for compression in local communications.
+		tr.DisableCompression = true
+
+		tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
+			return net.Dial("unix", c.UnixSocket)
+		}
+	}
+
+	client := Client{
+		url:        c.URL,
+		unixSocket: c.UnixSocket,
+		username:   c.Username,
+		password:   c.Password,
+		httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
+		userAgent:  c.UserAgent,
+		precision:  c.Precision,
+	}
+	if client.userAgent == "" {
+		client.userAgent = "InfluxDBClient"
+	}
+	return &client, nil
+}
+
+// SetAuth will update the username and passwords
+func (c *Client) SetAuth(u, p string) {
+	c.username = u
+	c.password = p
+}
+
+// SetPrecision will update the precision
+func (c *Client) SetPrecision(precision string) {
+	c.precision = precision
+}
+
+// Query sends a command to the server and returns the Response
+func (c *Client) Query(q Query) (*Response, error) {
+	return c.QueryContext(context.Background(), q)
+}
+
+// QueryContext sends a command to the server and returns the Response
+// It uses a context that can be cancelled by the command line client
+func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) {
+	u := c.url
+
+	u.Path = "query"
+	values := u.Query()
+	values.Set("q", q.Command)
+	values.Set("db", q.Database)
+	if q.Chunked {
+		values.Set("chunked", "true")
+		if q.ChunkSize > 0 {
+			values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+		}
+	}
+	if c.precision != "" {
+		values.Set("epoch", c.precision)
+	}
+	u.RawQuery = values.Encode()
+
+	req, err := http.NewRequest("POST", u.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	req = req.WithContext(ctx)
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	if q.Chunked {
+		cr := NewChunkedResponse(resp.Body)
+		for {
+			r, err := cr.NextResponse()
+			if err != nil {
+				// If we got an error while decoding the response, send that back.
+				return nil, err
+			}
+
+			if r == nil {
+				break
+			}
+
+			response.Results = append(response.Results, r.Results...)
+			if r.Err != nil {
+				response.Err = r.Err
+				break
+			}
+		}
+	} else {
+		dec := json.NewDecoder(resp.Body)
+		dec.UseNumber()
+		if err := dec.Decode(&response); err != nil {
+			// Ignore EOF errors if we got an invalid status code.
+			if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
+				return nil, err
+			}
+		}
+	}
+
+	// If we don't have an error in our json response, and didn't get StatusOK,
+	// then send back an error.
+	if resp.StatusCode != http.StatusOK && response.Error() == nil {
+		return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+	}
+	return &response, nil
+}
+
+// Write takes BatchPoints and allows for writing of multiple points with defaults
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) Write(bp BatchPoints) (*Response, error) {
+	u := c.url
+	u.Path = "write"
+
+	var b bytes.Buffer
+	for _, p := range bp.Points {
+		err := checkPointTypes(p)
+		if err != nil {
+			return nil, err
+		}
+		if p.Raw != "" {
+			if _, err := b.WriteString(p.Raw); err != nil {
+				return nil, err
+			}
+		} else {
+			for k, v := range bp.Tags {
+				if p.Tags == nil {
+					p.Tags = make(map[string]string, len(bp.Tags))
+				}
+				p.Tags[k] = v
+			}
+
+			if _, err := b.WriteString(p.MarshalString()); err != nil {
+				return nil, err
+			}
+		}
+
+		if err := b.WriteByte('\n'); err != nil {
+			return nil, err
+		}
+	}
+
+	req, err := http.NewRequest("POST", u.String(), &b)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	precision := bp.Precision
+	if precision == "" {
+		precision = "ns"
+	}
+
+	params := req.URL.Query()
+	params.Set("db", bp.Database)
+	params.Set("rp", bp.RetentionPolicy)
+	params.Set("precision", precision)
+	params.Set("consistency", bp.WriteConsistency)
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+		var err = fmt.Errorf(string(body))
+		response.Err = err
+		return &response, err
+	}
+
+	return nil, nil
+}
+
+// WriteLineProtocol takes a string with line returns to delimit each write
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
+	u := c.url
+	u.Path = "write"
+
+	r := strings.NewReader(data)
+
+	req, err := http.NewRequest("POST", u.String(), r)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+	params := req.URL.Query()
+	params.Set("db", database)
+	params.Set("rp", retentionPolicy)
+	params.Set("precision", precision)
+	params.Set("consistency", writeConsistency)
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+		err := fmt.Errorf(string(body))
+		response.Err = err
+		return &response, err
+	}
+
+	return nil, nil
+}
+
+// Ping will check to see if the server is up
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *Client) Ping() (time.Duration, string, error) {
+	now := time.Now()
+	u := c.url
+	u.Path = "ping"
+
+	req, err := http.NewRequest("GET", u.String(), nil)
+	if err != nil {
+		return 0, "", err
+	}
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return 0, "", err
+	}
+	defer resp.Body.Close()
+
+	version := resp.Header.Get("X-Influxdb-Version")
+	return time.Since(now), version, nil
+}
+
+// Structs
+
+// Message represents a user message.
+type Message struct {
+	Level string `json:"level,omitempty"`
+	Text  string `json:"text,omitempty"`
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+	Series   []models.Row
+	Messages []*Message
+	Err      error
+}
+
+// MarshalJSON encodes the result into JSON.
+func (r *Result) MarshalJSON() ([]byte, error) {
+	// Define a struct that outputs "error" as a string.
+	var o struct {
+		Series   []models.Row `json:"series,omitempty"`
+		Messages []*Message   `json:"messages,omitempty"`
+		Err      string       `json:"error,omitempty"`
+	}
+
+	// Copy fields to output struct.
+	o.Series = r.Series
+	o.Messages = r.Messages
+	if r.Err != nil {
+		o.Err = r.Err.Error()
+	}
+
+	return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Result struct
+func (r *Result) UnmarshalJSON(b []byte) error {
+	var o struct {
+		Series   []models.Row `json:"series,omitempty"`
+		Messages []*Message   `json:"messages,omitempty"`
+		Err      string       `json:"error,omitempty"`
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	err := dec.Decode(&o)
+	if err != nil {
+		return err
+	}
+	r.Series = o.Series
+	r.Messages = o.Messages
+	if o.Err != "" {
+		r.Err = errors.New(o.Err)
+	}
+	return nil
+}
+
+// Response represents a list of statement results.
+type Response struct {
+	Results []Result
+	Err     error
+}
+
+// MarshalJSON encodes the response into JSON.
+func (r *Response) MarshalJSON() ([]byte, error) {
+	// Define a struct that outputs "error" as a string.
+	var o struct {
+		Results []Result `json:"results,omitempty"`
+		Err     string   `json:"error,omitempty"`
+	}
+
+	// Copy fields to output struct.
+	o.Results = r.Results
+	if r.Err != nil {
+		o.Err = r.Err.Error()
+	}
+
+	return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Response struct
+func (r *Response) UnmarshalJSON(b []byte) error {
+	var o struct {
+		Results []Result `json:"results,omitempty"`
+		Err     string   `json:"error,omitempty"`
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	err := dec.Decode(&o)
+	if err != nil {
+		return err
+	}
+	r.Results = o.Results
+	if o.Err != "" {
+		r.Err = errors.New(o.Err)
+	}
+	return nil
+}
+
+// Error returns the first error from any statement.
+// Returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+	if r.Err != nil {
+		return r.Err
+	}
+	for _, result := range r.Results {
+		if result.Err != nil {
+			return result.Err
+		}
+	}
+	return nil
+}
+
+// duplexReader reads responses and writes it to another writer while
+// satisfying the reader interface.
+type duplexReader struct {
+	r io.Reader
+	w io.Writer
+}
+
+func (r *duplexReader) Read(p []byte) (n int, err error) {
+	n, err = r.r.Read(p)
+	if err == nil {
+		r.w.Write(p[:n])
+	}
+	return n, err
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+	dec    *json.Decoder
+	duplex *duplexReader
+	buf    bytes.Buffer
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+	resp := &ChunkedResponse{}
+	resp.duplex = &duplexReader{r: r, w: &resp.buf}
+	resp.dec = json.NewDecoder(resp.duplex)
+	resp.dec.UseNumber()
+	return resp
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+	var response Response
+	if err := r.dec.Decode(&response); err != nil {
+		if err == io.EOF {
+			return nil, nil
+		}
+		// A decoding error happened. This probably means the server crashed
+		// and sent a last-ditch error message to us. Ensure we have read the
+		// entirety of the connection to get any remaining error text.
+		io.Copy(ioutil.Discard, r.duplex)
+		return nil, errors.New(strings.TrimSpace(r.buf.String()))
+	}
+	r.buf.Reset()
+	return &response, nil
+}
+
+// Point defines the fields that will be written to the database
+// Measurement, Time, and Fields are required
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type Point struct {
+	Measurement string
+	Tags        map[string]string
+	Time        time.Time
+	Fields      map[string]interface{}
+	Precision   string
+	Raw         string
+}
+
+// MarshalJSON will format the time in RFC3339Nano
+// Precision is also ignored as it is only used for writing, not reading
+// Or another way to say it is we always send back in nanosecond precision
+func (p *Point) MarshalJSON() ([]byte, error) {
+	point := struct {
+		Measurement string                 `json:"measurement,omitempty"`
+		Tags        map[string]string      `json:"tags,omitempty"`
+		Time        string                 `json:"time,omitempty"`
+		Fields      map[string]interface{} `json:"fields,omitempty"`
+		Precision   string                 `json:"precision,omitempty"`
+	}{
+		Measurement: p.Measurement,
+		Tags:        p.Tags,
+		Fields:      p.Fields,
+		Precision:   p.Precision,
+	}
+	// Let it omit empty if it's really zero
+	if !p.Time.IsZero() {
+		point.Time = p.Time.UTC().Format(time.RFC3339Nano)
+	}
+	return json.Marshal(&point)
+}
+
+// MarshalString renders string representation of a Point with specified
+// precision. The default precision is nanoseconds.
+func (p *Point) MarshalString() string {
+	pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time)
+	if err != nil {
+		return "# ERROR: " + err.Error() + " " + p.Measurement
+	}
+	if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
+		return pt.String()
+	}
+	return pt.PrecisionString(p.Precision)
+}
+
+// UnmarshalJSON decodes the data into the Point struct
+func (p *Point) UnmarshalJSON(b []byte) error {
+	var normal struct {
+		Measurement string                 `json:"measurement"`
+		Tags        map[string]string      `json:"tags"`
+		Time        time.Time              `json:"time"`
+		Precision   string                 `json:"precision"`
+		Fields      map[string]interface{} `json:"fields"`
+	}
+	var epoch struct {
+		Measurement string                 `json:"measurement"`
+		Tags        map[string]string      `json:"tags"`
+		Time        *int64                 `json:"time"`
+		Precision   string                 `json:"precision"`
+		Fields      map[string]interface{} `json:"fields"`
+	}
+
+	if err := func() error {
+		var err error
+		dec := json.NewDecoder(bytes.NewBuffer(b))
+		dec.UseNumber()
+		if err = dec.Decode(&epoch); err != nil {
+			return err
+		}
+		// Convert from epoch to time.Time, but only if Time
+		// was actually set.
+		var ts time.Time
+		if epoch.Time != nil {
+			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+			if err != nil {
+				return err
+			}
+		}
+		p.Measurement = epoch.Measurement
+		p.Tags = epoch.Tags
+		p.Time = ts
+		p.Precision = epoch.Precision
+		p.Fields = normalizeFields(epoch.Fields)
+		return nil
+	}(); err == nil {
+		return nil
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	if err := dec.Decode(&normal); err != nil {
+		return err
+	}
+	normal.Time = SetPrecision(normal.Time, normal.Precision)
+	p.Measurement = normal.Measurement
+	p.Tags = normal.Tags
+	p.Time = normal.Time
+	p.Precision = normal.Precision
+	p.Fields = normalizeFields(normal.Fields)
+
+	return nil
+}
+
+// Remove any notion of json.Number
+func normalizeFields(fields map[string]interface{}) map[string]interface{} {
+	newFields := map[string]interface{}{}
+
+	for k, v := range fields {
+		switch v := v.(type) {
+		case json.Number:
+			jv, e := v.Float64()
+			if e != nil {
+				panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
+			}
+			newFields[k] = jv
+		default:
+			newFields[k] = v
+		}
+	}
+	return newFields
+}
+
+// BatchPoints is used to send batched data in a single write.
+// Database and Points are required
+// If no retention policy is specified, it will use the databases default retention policy.
+// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
+// If time is specified, it will be applied to any point with an empty time.
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type BatchPoints struct {
+	Points           []Point           `json:"points,omitempty"`
+	Database         string            `json:"database,omitempty"`
+	RetentionPolicy  string            `json:"retentionPolicy,omitempty"`
+	Tags             map[string]string `json:"tags,omitempty"`
+	Time             time.Time         `json:"time,omitempty"`
+	Precision        string            `json:"precision,omitempty"`
+	WriteConsistency string            `json:"-"`
+}
+
+// UnmarshalJSON decodes the data into the BatchPoints struct
+func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
+	var normal struct {
+		Points          []Point           `json:"points"`
+		Database        string            `json:"database"`
+		RetentionPolicy string            `json:"retentionPolicy"`
+		Tags            map[string]string `json:"tags"`
+		Time            time.Time         `json:"time"`
+		Precision       string            `json:"precision"`
+	}
+	var epoch struct {
+		Points          []Point           `json:"points"`
+		Database        string            `json:"database"`
+		RetentionPolicy string            `json:"retentionPolicy"`
+		Tags            map[string]string `json:"tags"`
+		Time            *int64            `json:"time"`
+		Precision       string            `json:"precision"`
+	}
+
+	if err := func() error {
+		var err error
+		if err = json.Unmarshal(b, &epoch); err != nil {
+			return err
+		}
+		// Convert from epoch to time.Time
+		var ts time.Time
+		if epoch.Time != nil {
+			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+			if err != nil {
+				return err
+			}
+		}
+		bp.Points = epoch.Points
+		bp.Database = epoch.Database
+		bp.RetentionPolicy = epoch.RetentionPolicy
+		bp.Tags = epoch.Tags
+		bp.Time = ts
+		bp.Precision = epoch.Precision
+		return nil
+	}(); err == nil {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &normal); err != nil {
+		return err
+	}
+	normal.Time = SetPrecision(normal.Time, normal.Precision)
+	bp.Points = normal.Points
+	bp.Database = normal.Database
+	bp.RetentionPolicy = normal.RetentionPolicy
+	bp.Tags = normal.Tags
+	bp.Time = normal.Time
+	bp.Precision = normal.Precision
+
+	return nil
+}
+
+// utility functions
+
+// Addr provides the current url as a string of the server the client is connected to.
+func (c *Client) Addr() string {
+	if c.unixSocket != "" {
+		return c.unixSocket
+	}
+	return c.url.String()
+}
+
+// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
+func checkPointTypes(p Point) error {
+	for _, v := range p.Fields {
+		switch v.(type) {
+		case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool, string, nil:
+			return nil
+		default:
+			return fmt.Errorf("unsupported point type: %T", v)
+		}
+	}
+	return nil
+}
+
+// helper functions
+
+// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
+func EpochToTime(epoch int64, precision string) (time.Time, error) {
+	if precision == "" {
+		precision = "s"
+	}
+	var t time.Time
+	switch precision {
+	case "h":
+		t = time.Unix(0, epoch*int64(time.Hour))
+	case "m":
+		t = time.Unix(0, epoch*int64(time.Minute))
+	case "s":
+		t = time.Unix(0, epoch*int64(time.Second))
+	case "ms":
+		t = time.Unix(0, epoch*int64(time.Millisecond))
+	case "u":
+		t = time.Unix(0, epoch*int64(time.Microsecond))
+	case "n":
+		t = time.Unix(0, epoch)
+	default:
+		return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
+	}
+	return t, nil
+}
+
+// SetPrecision will round a time to the specified precision
+func SetPrecision(t time.Time, precision string) time.Time {
+	switch precision {
+	case "n":
+	case "u":
+		return t.Round(time.Microsecond)
+	case "ms":
+		return t.Round(time.Millisecond)
+	case "s":
+		return t.Round(time.Second)
+	case "m":
+		return t.Round(time.Minute)
+	case "h":
+		return t.Round(time.Hour)
+	}
+	return t
+}
diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..77d44f2b34fbc2364a7abf32232c454fa7a5462c
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go
@@ -0,0 +1,635 @@
+// Package client (v2) is the current official Go client for InfluxDB.
+package client // import "github.com/influxdata/influxdb/client/v2"
+
+import (
+	"bytes"
+	"crypto/tls"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/influxdb/models"
+)
+
+// HTTPConfig is the config data needed to create an HTTP Client.
+type HTTPConfig struct {
+	// Addr should be of the form "http://host:port"
+	// or "http://[ipv6-host%zone]:port".
+	Addr string
+
+	// Username is the influxdb username, optional.
+	Username string
+
+	// Password is the influxdb password, optional.
+	Password string
+
+	// UserAgent is the http User Agent, defaults to "InfluxDBClient".
+	UserAgent string
+
+	// Timeout for influxdb writes, defaults to no timeout.
+	Timeout time.Duration
+
+	// InsecureSkipVerify gets passed to the http client, if true, it will
+	// skip https certificate verification. Defaults to false.
+	InsecureSkipVerify bool
+
+	// TLSConfig allows the user to set their own TLS config for the HTTP
+	// Client. If set, this option overrides InsecureSkipVerify.
+	TLSConfig *tls.Config
+}
+
+// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
+type BatchPointsConfig struct {
+	// Precision is the write precision of the points, defaults to "ns".
+	Precision string
+
+	// Database is the database to write points to.
+	Database string
+
+	// RetentionPolicy is the retention policy of the points.
+	RetentionPolicy string
+
+	// Write consistency is the number of servers required to confirm write.
+	WriteConsistency string
+}
+
+// Client is a client interface for writing & querying the database.
+type Client interface {
+	// Ping checks that status of cluster, and will always return 0 time and no
+	// error for UDP clients.
+	Ping(timeout time.Duration) (time.Duration, string, error)
+
+	// Write takes a BatchPoints object and writes all Points to InfluxDB.
+	Write(bp BatchPoints) error
+
+	// Query makes an InfluxDB Query on the database. This will fail if using
+	// the UDP client.
+	Query(q Query) (*Response, error)
+
+	// Close releases any resources a Client may be using.
+	Close() error
+}
+
+// NewHTTPClient returns a new Client from the provided config.
+// Client is safe for concurrent use by multiple goroutines.
+func NewHTTPClient(conf HTTPConfig) (Client, error) {
+	if conf.UserAgent == "" {
+		conf.UserAgent = "InfluxDBClient"
+	}
+
+	u, err := url.Parse(conf.Addr)
+	if err != nil {
+		return nil, err
+	} else if u.Scheme != "http" && u.Scheme != "https" {
+		m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
+			" must start with http:// or https://", u.Scheme)
+		return nil, errors.New(m)
+	}
+
+	tr := &http.Transport{
+		TLSClientConfig: &tls.Config{
+			InsecureSkipVerify: conf.InsecureSkipVerify,
+		},
+	}
+	if conf.TLSConfig != nil {
+		tr.TLSClientConfig = conf.TLSConfig
+	}
+	return &client{
+		url:       *u,
+		username:  conf.Username,
+		password:  conf.Password,
+		useragent: conf.UserAgent,
+		httpClient: &http.Client{
+			Timeout:   conf.Timeout,
+			Transport: tr,
+		},
+		transport: tr,
+	}, nil
+}
+
+// Ping will check to see if the server is up with an optional timeout on waiting for leader.
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
+	now := time.Now()
+	u := c.url
+	u.Path = "ping"
+
+	req, err := http.NewRequest("GET", u.String(), nil)
+	if err != nil {
+		return 0, "", err
+	}
+
+	req.Header.Set("User-Agent", c.useragent)
+
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	if timeout > 0 {
+		params := req.URL.Query()
+		params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds()))
+		req.URL.RawQuery = params.Encode()
+	}
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return 0, "", err
+	}
+	defer resp.Body.Close()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return 0, "", err
+	}
+
+	if resp.StatusCode != http.StatusNoContent {
+		var err = fmt.Errorf(string(body))
+		return 0, "", err
+	}
+
+	version := resp.Header.Get("X-Influxdb-Version")
+	return time.Since(now), version, nil
+}
+
+// Close releases the client's resources.
+func (c *client) Close() error {
+	c.transport.CloseIdleConnections()
+	return nil
+}
+
+// client is safe for concurrent use as the fields are all read-only
+// once the client is instantiated.
+type client struct {
+	// N.B - if url.UserInfo is accessed in future modifications to the
+	// methods on client, you will need to syncronise access to url.
+	url        url.URL
+	username   string
+	password   string
+	useragent  string
+	httpClient *http.Client
+	transport  *http.Transport
+}
+
+// BatchPoints is an interface into a batched grouping of points to write into
+// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
+// batch for each goroutine.
+type BatchPoints interface {
+	// AddPoint adds the given point to the Batch of points.
+	AddPoint(p *Point)
+	// AddPoints adds the given points to the Batch of points.
+	AddPoints(ps []*Point)
+	// Points lists the points in the Batch.
+	Points() []*Point
+
+	// Precision returns the currently set precision of this Batch.
+	Precision() string
+	// SetPrecision sets the precision of this batch.
+	SetPrecision(s string) error
+
+	// Database returns the currently set database of this Batch.
+	Database() string
+	// SetDatabase sets the database of this Batch.
+	SetDatabase(s string)
+
+	// WriteConsistency returns the currently set write consistency of this Batch.
+	WriteConsistency() string
+	// SetWriteConsistency sets the write consistency of this Batch.
+	SetWriteConsistency(s string)
+
+	// RetentionPolicy returns the currently set retention policy of this Batch.
+	RetentionPolicy() string
+	// SetRetentionPolicy sets the retention policy of this Batch.
+	SetRetentionPolicy(s string)
+}
+
+// NewBatchPoints returns a BatchPoints interface based on the given config.
+func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
+	if conf.Precision == "" {
+		conf.Precision = "ns"
+	}
+	if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
+		return nil, err
+	}
+	bp := &batchpoints{
+		database:         conf.Database,
+		precision:        conf.Precision,
+		retentionPolicy:  conf.RetentionPolicy,
+		writeConsistency: conf.WriteConsistency,
+	}
+	return bp, nil
+}
+
+type batchpoints struct {
+	points           []*Point
+	database         string
+	precision        string
+	retentionPolicy  string
+	writeConsistency string
+}
+
+func (bp *batchpoints) AddPoint(p *Point) {
+	bp.points = append(bp.points, p)
+}
+
+func (bp *batchpoints) AddPoints(ps []*Point) {
+	bp.points = append(bp.points, ps...)
+}
+
+func (bp *batchpoints) Points() []*Point {
+	return bp.points
+}
+
+func (bp *batchpoints) Precision() string {
+	return bp.precision
+}
+
+func (bp *batchpoints) Database() string {
+	return bp.database
+}
+
+func (bp *batchpoints) WriteConsistency() string {
+	return bp.writeConsistency
+}
+
+func (bp *batchpoints) RetentionPolicy() string {
+	return bp.retentionPolicy
+}
+
+func (bp *batchpoints) SetPrecision(p string) error {
+	if _, err := time.ParseDuration("1" + p); err != nil {
+		return err
+	}
+	bp.precision = p
+	return nil
+}
+
+func (bp *batchpoints) SetDatabase(db string) {
+	bp.database = db
+}
+
+func (bp *batchpoints) SetWriteConsistency(wc string) {
+	bp.writeConsistency = wc
+}
+
+func (bp *batchpoints) SetRetentionPolicy(rp string) {
+	bp.retentionPolicy = rp
+}
+
+// Point represents a single data point.
+type Point struct {
+	pt models.Point
+}
+
+// NewPoint returns a point with the given timestamp. If a timestamp is not
+// given, then data is sent to the database without a timestamp, in which case
+// the server will assign local time upon reception. NOTE: it is recommended to
+// send data with a timestamp.
+func NewPoint(
+	name string,
+	tags map[string]string,
+	fields map[string]interface{},
+	t ...time.Time,
+) (*Point, error) {
+	var T time.Time
+	if len(t) > 0 {
+		T = t[0]
+	}
+
+	pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
+	if err != nil {
+		return nil, err
+	}
+	return &Point{
+		pt: pt,
+	}, nil
+}
+
+// String returns a line-protocol string of the Point.
+func (p *Point) String() string {
+	return p.pt.String()
+}
+
+// PrecisionString returns a line-protocol string of the Point,
+// with the timestamp formatted for the given precision.
+func (p *Point) PrecisionString(precison string) string {
+	return p.pt.PrecisionString(precison)
+}
+
+// Name returns the measurement name of the point.
+func (p *Point) Name() string {
+	return string(p.pt.Name())
+}
+
+// Tags returns the tags associated with the point.
+func (p *Point) Tags() map[string]string {
+	return p.pt.Tags().Map()
+}
+
+// Time return the timestamp for the point.
+func (p *Point) Time() time.Time {
+	return p.pt.Time()
+}
+
+// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.
+func (p *Point) UnixNano() int64 {
+	return p.pt.UnixNano()
+}
+
+// Fields returns the fields for the point.
+func (p *Point) Fields() (map[string]interface{}, error) {
+	return p.pt.Fields()
+}
+
+// NewPointFrom returns a point from the provided models.Point.
+func NewPointFrom(pt models.Point) *Point {
+	return &Point{pt: pt}
+}
+
+func (c *client) Write(bp BatchPoints) error {
+	var b bytes.Buffer
+
+	for _, p := range bp.Points() {
+		if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
+			return err
+		}
+
+		if err := b.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	u := c.url
+	u.Path = "write"
+	req, err := http.NewRequest("POST", u.String(), &b)
+	if err != nil {
+		return err
+	}
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.useragent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	params := req.URL.Query()
+	params.Set("db", bp.Database())
+	params.Set("rp", bp.RetentionPolicy())
+	params.Set("precision", bp.Precision())
+	params.Set("consistency", bp.WriteConsistency())
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return err
+	}
+
+	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+		var err = fmt.Errorf(string(body))
+		return err
+	}
+
+	return nil
+}
+
+// Query defines a query to send to the server.
+type Query struct {
+	Command    string
+	Database   string
+	Precision  string
+	Chunked    bool
+	ChunkSize  int
+	Parameters map[string]interface{}
+}
+
+// NewQuery returns a query object.
+// The database and precision arguments can be empty strings if they are not needed for the query.
+func NewQuery(command, database, precision string) Query {
+	return Query{
+		Command:    command,
+		Database:   database,
+		Precision:  precision,
+		Parameters: make(map[string]interface{}),
+	}
+}
+
+// NewQueryWithParameters returns a query object.
+// The database and precision arguments can be empty strings if they are not needed for the query.
+// parameters is a map of the parameter names used in the command to their values.
+func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query {
+	return Query{
+		Command:    command,
+		Database:   database,
+		Precision:  precision,
+		Parameters: parameters,
+	}
+}
+
+// Response represents a list of statement results.
+type Response struct {
+	Results []Result
+	Err     string `json:"error,omitempty"`
+}
+
+// Error returns the first error from any statement.
+// It returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+	if r.Err != "" {
+		return fmt.Errorf(r.Err)
+	}
+	for _, result := range r.Results {
+		if result.Err != "" {
+			return fmt.Errorf(result.Err)
+		}
+	}
+	return nil
+}
+
+// Message represents a user message.
+type Message struct {
+	Level string
+	Text  string
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+	Series   []models.Row
+	Messages []*Message
+	Err      string `json:"error,omitempty"`
+}
+
+// Query sends a command to the server and returns the Response.
+func (c *client) Query(q Query) (*Response, error) {
+	u := c.url
+	u.Path = "query"
+
+	jsonParameters, err := json.Marshal(q.Parameters)
+
+	if err != nil {
+		return nil, err
+	}
+
+	req, err := http.NewRequest("POST", u.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.useragent)
+
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	params := req.URL.Query()
+	params.Set("q", q.Command)
+	params.Set("db", q.Database)
+	params.Set("params", string(jsonParameters))
+	if q.Chunked {
+		params.Set("chunked", "true")
+		if q.ChunkSize > 0 {
+			params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+		}
+	}
+
+	if q.Precision != "" {
+		params.Set("epoch", q.Precision)
+	}
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	// If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb
+	// but instead some other service. If the error code is also a 500+ code, then some
+	// downstream loadbalancer/proxy/etc had an issue and we should report that.
+	if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError {
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil || len(body) == 0 {
+			return nil, fmt.Errorf("received status code %d from downstream server", resp.StatusCode)
+		}
+
+		return nil, fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body)
+	}
+
+	// If we get an unexpected content type, then it is also not from influx direct and therefore
+	// we want to know what we received and what status code was returned for debugging purposes.
+	if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" {
+		// Read up to 1kb of the body to help identify downstream errors and limit the impact of things
+		// like downstream serving a large file
+		body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
+		if err != nil || len(body) == 0 {
+			return nil, fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode)
+		}
+
+		return nil, fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body)
+	}
+
+	var response Response
+	if q.Chunked {
+		cr := NewChunkedResponse(resp.Body)
+		for {
+			r, err := cr.NextResponse()
+			if err != nil {
+				// If we got an error while decoding the response, send that back.
+				return nil, err
+			}
+
+			if r == nil {
+				break
+			}
+
+			response.Results = append(response.Results, r.Results...)
+			if r.Err != "" {
+				response.Err = r.Err
+				break
+			}
+		}
+	} else {
+		dec := json.NewDecoder(resp.Body)
+		dec.UseNumber()
+		decErr := dec.Decode(&response)
+
+		// ignore this error if we got an invalid status code
+		if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
+			decErr = nil
+		}
+		// If we got a valid decode error, send that back
+		if decErr != nil {
+			return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
+		}
+	}
+
+	// If we don't have an error in our json response, and didn't get statusOK
+	// then send back an error
+	if resp.StatusCode != http.StatusOK && response.Error() == nil {
+		return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+	}
+	return &response, nil
+}
+
+// duplexReader reads responses and writes it to another writer while
+// satisfying the reader interface.
+type duplexReader struct {
+	r io.Reader
+	w io.Writer
+}
+
+func (r *duplexReader) Read(p []byte) (n int, err error) {
+	n, err = r.r.Read(p)
+	if err == nil {
+		r.w.Write(p[:n])
+	}
+	return n, err
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+	dec    *json.Decoder
+	duplex *duplexReader
+	buf    bytes.Buffer
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+	resp := &ChunkedResponse{}
+	resp.duplex = &duplexReader{r: r, w: &resp.buf}
+	resp.dec = json.NewDecoder(resp.duplex)
+	resp.dec.UseNumber()
+	return resp
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+	var response Response
+
+	if err := r.dec.Decode(&response); err != nil {
+		if err == io.EOF {
+			return nil, nil
+		}
+		// A decoding error happened. This probably means the server crashed
+		// and sent a last-ditch error message to us. Ensure we have read the
+		// entirety of the connection to get any remaining error text.
+		io.Copy(ioutil.Discard, r.duplex)
+		return nil, errors.New(strings.TrimSpace(r.buf.String()))
+	}
+
+	r.buf.Reset()
+	return &response, nil
+}
diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go
new file mode 100644
index 0000000000000000000000000000000000000000..779a28b33f3478c5f8e6cb389dbc963d8b72bd87
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go
@@ -0,0 +1,112 @@
+package client
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"time"
+)
+
+const (
+	// UDPPayloadSize is a reasonable default payload size for UDP packets that
+	// could be travelling over the internet.
+	UDPPayloadSize = 512
+)
+
+// UDPConfig is the config data needed to create a UDP Client.
+type UDPConfig struct {
+	// Addr should be of the form "host:port"
+	// or "[ipv6-host%zone]:port".
+	Addr string
+
+	// PayloadSize is the maximum size of a UDP client message, optional
+	// Tune this based on your network. Defaults to UDPPayloadSize.
+	PayloadSize int
+}
+
+// NewUDPClient returns a client interface for writing to an InfluxDB UDP
+// service from the given config.
+func NewUDPClient(conf UDPConfig) (Client, error) {
+	var udpAddr *net.UDPAddr
+	udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := net.DialUDP("udp", nil, udpAddr)
+	if err != nil {
+		return nil, err
+	}
+
+	payloadSize := conf.PayloadSize
+	if payloadSize == 0 {
+		payloadSize = UDPPayloadSize
+	}
+
+	return &udpclient{
+		conn:        conn,
+		payloadSize: payloadSize,
+	}, nil
+}
+
+// Close releases the udpclient's resources.
+func (uc *udpclient) Close() error {
+	return uc.conn.Close()
+}
+
+type udpclient struct {
+	conn        io.WriteCloser
+	payloadSize int
+}
+
+func (uc *udpclient) Write(bp BatchPoints) error {
+	var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
+	var d, _ = time.ParseDuration("1" + bp.Precision())
+
+	var delayedError error
+
+	var checkBuffer = func(n int) {
+		if len(b) > 0 && len(b)+n > uc.payloadSize {
+			if _, err := uc.conn.Write(b); err != nil {
+				delayedError = err
+			}
+			b = b[:0]
+		}
+	}
+
+	for _, p := range bp.Points() {
+		p.pt.Round(d)
+		pointSize := p.pt.StringSize() + 1 // include newline in size
+		//point := p.pt.RoundedString(d) + "\n"
+
+		checkBuffer(pointSize)
+
+		if p.Time().IsZero() || pointSize <= uc.payloadSize {
+			b = p.pt.AppendString(b)
+			b = append(b, '\n')
+			continue
+		}
+
+		points := p.pt.Split(uc.payloadSize - 1) // account for newline character
+		for _, sp := range points {
+			checkBuffer(sp.StringSize() + 1)
+			b = sp.AppendString(b)
+			b = append(b, '\n')
+		}
+	}
+
+	if len(b) > 0 {
+		if _, err := uc.conn.Write(b); err != nil {
+			return err
+		}
+	}
+	return delayedError
+}
+
+func (uc *udpclient) Query(q Query) (*Response, error) {
+	return nil, fmt.Errorf("Querying via UDP is not supported")
+}
+
+func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
+	return 0, "", nil
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go
new file mode 100644
index 0000000000000000000000000000000000000000..2a3269bca11b8d5d8f2a73a4701506311cb9693b
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/consistency.go
@@ -0,0 +1,48 @@
+package models
+
+import (
+	"errors"
+	"strings"
+)
+
+// ConsistencyLevel represent a required replication criteria before a write can
+// be returned as successful.
+//
+// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
+type ConsistencyLevel int
+
+const (
+	// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
+	ConsistencyLevelAny ConsistencyLevel = iota
+
+	// ConsistencyLevelOne requires at least one data node acknowledged a write.
+	ConsistencyLevelOne
+
+	// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
+	ConsistencyLevelQuorum
+
+	// ConsistencyLevelAll requires all data nodes to acknowledge a write.
+	ConsistencyLevelAll
+)
+
+var (
+	// ErrInvalidConsistencyLevel is returned when parsing the string version
+	// of a consistency level.
+	ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
+)
+
+// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
+func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
+	switch strings.ToLower(level) {
+	case "any":
+		return ConsistencyLevelAny, nil
+	case "one":
+		return ConsistencyLevelOne, nil
+	case "quorum":
+		return ConsistencyLevelQuorum, nil
+	case "all":
+		return ConsistencyLevelAll, nil
+	default:
+		return 0, ErrInvalidConsistencyLevel
+	}
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go
new file mode 100644
index 0000000000000000000000000000000000000000..eec1ae8b01317cc1f5c73ba5f57fa4e6c573387d
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go
@@ -0,0 +1,32 @@
+package models // import "github.com/influxdata/influxdb/models"
+
+// from stdlib hash/fnv/fnv.go
+const (
+	prime64  = 1099511628211
+	offset64 = 14695981039346656037
+)
+
+// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
+// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
+type InlineFNV64a uint64
+
+// NewInlineFNV64a returns a new instance of InlineFNV64a.
+func NewInlineFNV64a() InlineFNV64a {
+	return offset64
+}
+
+// Write adds data to the running hash.
+func (s *InlineFNV64a) Write(data []byte) (int, error) {
+	hash := uint64(*s)
+	for _, c := range data {
+		hash ^= uint64(c)
+		hash *= prime64
+	}
+	*s = InlineFNV64a(hash)
+	return len(data), nil
+}
+
+// Sum64 returns the uint64 of the current resulting hash.
+func (s *InlineFNV64a) Sum64() uint64 {
+	return uint64(*s)
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
new file mode 100644
index 0000000000000000000000000000000000000000..8db4837384aaa4863fcd6bda5be4a3389b9c2ade
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
@@ -0,0 +1,44 @@
+package models // import "github.com/influxdata/influxdb/models"
+
+import (
+	"reflect"
+	"strconv"
+	"unsafe"
+)
+
+// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
+func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
+	s := unsafeBytesToString(b)
+	return strconv.ParseInt(s, base, bitSize)
+}
+
+// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
+func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
+	s := unsafeBytesToString(b)
+	return strconv.ParseUint(s, base, bitSize)
+}
+
+// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
+func parseFloatBytes(b []byte, bitSize int) (float64, error) {
+	s := unsafeBytesToString(b)
+	return strconv.ParseFloat(s, bitSize)
+}
+
+// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
+func parseBoolBytes(b []byte) (bool, error) {
+	return strconv.ParseBool(unsafeBytesToString(b))
+}
+
+// unsafeBytesToString converts a []byte to a string without a heap allocation.
+//
+// It is unsafe, and is intended to prepare input to short-lived functions
+// that require strings.
+func unsafeBytesToString(in []byte) string {
+	src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
+	dst := reflect.StringHeader{
+		Data: src.Data,
+		Len:  src.Len,
+	}
+	s := *(*string)(unsafe.Pointer(&dst))
+	return s
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go
new file mode 100644
index 0000000000000000000000000000000000000000..ad80a816bf1b7ff87df0c2d462f57b4af8bc4631
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/points.go
@@ -0,0 +1,2337 @@
+// Package models implements basic objects used throughout the TICK stack.
+package models // import "github.com/influxdata/influxdb/models"
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/influxdb/pkg/escape"
+)
+
+var (
+	measurementEscapeCodes = map[byte][]byte{
+		',': []byte(`\,`),
+		' ': []byte(`\ `),
+	}
+
+	tagEscapeCodes = map[byte][]byte{
+		',': []byte(`\,`),
+		' ': []byte(`\ `),
+		'=': []byte(`\=`),
+	}
+
+	// ErrPointMustHaveAField is returned when operating on a point that does not have any fields.
+	ErrPointMustHaveAField = errors.New("point without fields is unsupported")
+
+	// ErrInvalidNumber is returned when a number is expected but not provided.
+	ErrInvalidNumber = errors.New("invalid number")
+
+	// ErrInvalidPoint is returned when a point cannot be parsed correctly.
+	ErrInvalidPoint = errors.New("point is invalid")
+)
+
+const (
+	// MaxKeyLength is the largest allowed size of the combined measurement and tag keys.
+	MaxKeyLength = 65535
+)
+
+// enableUint64Support will enable uint64 support if set to true.
+var enableUint64Support = false
+
+// EnableUintSupport manually enables uint support for the point parser.
+// This function will be removed in the future and only exists for unit tests during the
+// transition.
+func EnableUintSupport() {
+	enableUint64Support = true
+}
+
+// Point defines the values that will be written to the database.
+type Point interface {
+	// Name return the measurement name for the point.
+	Name() []byte
+
+	// SetName updates the measurement name for the point.
+	SetName(string)
+
+	// Tags returns the tag set for the point.
+	Tags() Tags
+
+	// AddTag adds or replaces a tag value for a point.
+	AddTag(key, value string)
+
+	// SetTags replaces the tags for the point.
+	SetTags(tags Tags)
+
+	// HasTag returns true if the tag exists for the point.
+	HasTag(tag []byte) bool
+
+	// Fields returns the fields for the point.
+	Fields() (Fields, error)
+
+	// Time return the timestamp for the point.
+	Time() time.Time
+
+	// SetTime updates the timestamp for the point.
+	SetTime(t time.Time)
+
+	// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
+	UnixNano() int64
+
+	// HashID returns a non-cryptographic checksum of the point's key.
+	HashID() uint64
+
+	// Key returns the key (measurement joined with tags) of the point.
+	Key() []byte
+
+	// String returns a string representation of the point. If there is a
+	// timestamp associated with the point then it will be specified with the default
+	// precision of nanoseconds.
+	String() string
+
+	// MarshalBinary returns a binary representation of the point.
+	MarshalBinary() ([]byte, error)
+
+	// PrecisionString returns a string representation of the point. If there
+	// is a timestamp associated with the point then it will be specified in the
+	// given unit.
+	PrecisionString(precision string) string
+
+	// RoundedString returns a string representation of the point. If there
+	// is a timestamp associated with the point, then it will be rounded to the
+	// given duration.
+	RoundedString(d time.Duration) string
+
+	// Split will attempt to return multiple points with the same timestamp whose
+	// string representations are no longer than size. Points with a single field or
+	// a point without a timestamp may exceed the requested size.
+	Split(size int) []Point
+
+	// Round will round the timestamp of the point to the given duration.
+	Round(d time.Duration)
+
+	// StringSize returns the length of the string that would be returned by String().
+	StringSize() int
+
+	// AppendString appends the result of String() to the provided buffer and returns
+	// the result, potentially reducing string allocations.
+	AppendString(buf []byte) []byte
+
+	// FieldIterator retuns a FieldIterator that can be used to traverse the
+	// fields of a point without constructing the in-memory map.
+	FieldIterator() FieldIterator
+}
+
+// FieldType represents the type of a field.
+type FieldType int
+
+const (
+	// Integer indicates the field's type is integer.
+	Integer FieldType = iota
+
+	// Float indicates the field's type is float.
+	Float
+
+	// Boolean indicates the field's type is boolean.
+	Boolean
+
+	// String indicates the field's type is string.
+	String
+
+	// Empty is used to indicate that there is no field.
+	Empty
+
+	// Unsigned indicates the field's type is an unsigned integer.
+	Unsigned
+)
+
+// FieldIterator provides a low-allocation interface to iterate through a point's fields.
+type FieldIterator interface {
+	// Next indicates whether there any fields remaining.
+	Next() bool
+
+	// FieldKey returns the key of the current field.
+	FieldKey() []byte
+
+	// Type returns the FieldType of the current field.
+	Type() FieldType
+
+	// StringValue returns the string value of the current field.
+	StringValue() string
+
+	// IntegerValue returns the integer value of the current field.
+	IntegerValue() (int64, error)
+
+	// UnsignedValue returns the unsigned value of the current field.
+	UnsignedValue() (uint64, error)
+
+	// BooleanValue returns the boolean value of the current field.
+	BooleanValue() (bool, error)
+
+	// FloatValue returns the float value of the current field.
+	FloatValue() (float64, error)
+
+	// Reset resets the iterator to its initial state.
+	Reset()
+}
+
+// Points represents a sortable list of points by timestamp.
+type Points []Point
+
+// Len implements sort.Interface.
+func (a Points) Len() int { return len(a) }
+
+// Less implements sort.Interface.
+func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
+
+// Swap implements sort.Interface.
+func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// point is the default implementation of Point.
+type point struct {
+	time time.Time
+
+	// text encoding of measurement and tags
+	// key must always be stored sorted by tags, if the original line was not sorted,
+	// we need to resort it
+	key []byte
+
+	// text encoding of field data
+	fields []byte
+
+	// text encoding of timestamp
+	ts []byte
+
+	// cached version of parsed fields from data
+	cachedFields map[string]interface{}
+
+	// cached version of parsed name from key
+	cachedName string
+
+	// cached version of parsed tags
+	cachedTags Tags
+
+	it fieldIterator
+}
+
+// type assertions
+var (
+	_ Point         = (*point)(nil)
+	_ FieldIterator = (*point)(nil)
+)
+
+const (
+	// the number of characters for the largest possible int64 (9223372036854775807)
+	maxInt64Digits = 19
+
+	// the number of characters for the smallest possible int64 (-9223372036854775808)
+	minInt64Digits = 20
+
+	// the number of characters for the largest possible uint64 (18446744073709551615)
+	maxUint64Digits = 20
+
+	// the number of characters required for the largest float64 before a range check
+	// would occur during parsing
+	maxFloat64Digits = 25
+
+	// the number of characters required for smallest float64 before a range check occur
+	// would occur during parsing
+	minFloat64Digits = 27
+)
+
+// ParsePoints returns a slice of Points from a text representation of a point
+// with each point separated by newlines.  If any points fail to parse, a non-nil error
+// will be returned in addition to the points that parsed successfully.
+func ParsePoints(buf []byte) ([]Point, error) {
+	return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
+}
+
+// ParsePointsString is identical to ParsePoints but accepts a string.
+func ParsePointsString(buf string) ([]Point, error) {
+	return ParsePoints([]byte(buf))
+}
+
+// ParseKey returns the measurement name and tags from a point.
+//
+// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf.
+// This can have the unintended effect preventing buf from being garbage collected.
+func ParseKey(buf []byte) (string, Tags) {
+	meas, tags := ParseKeyBytes(buf)
+	return string(meas), tags
+}
+
+func ParseKeyBytes(buf []byte) ([]byte, Tags) {
+	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
+	// when just parsing a key
+	state, i, _ := scanMeasurement(buf, 0)
+
+	var tags Tags
+	if state == tagKeyState {
+		tags = parseTags(buf)
+		// scanMeasurement returns the location of the comma if there are tags, strip that off
+		return buf[:i-1], tags
+	}
+	return buf[:i], tags
+}
+
+func ParseTags(buf []byte) Tags {
+	return parseTags(buf)
+}
+
+func ParseName(buf []byte) ([]byte, error) {
+	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
+	// when just parsing a key
+	state, i, _ := scanMeasurement(buf, 0)
+	if state == tagKeyState {
+		return buf[:i-1], nil
+	}
+	return buf[:i], nil
+}
+
+// ParsePointsWithPrecision is similar to ParsePoints, but allows the
+// caller to provide a precision for time.
+//
+// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf.
+// This can have the unintended effect preventing buf from being garbage collected.
+func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
+	points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1)
+	var (
+		pos    int
+		block  []byte
+		failed []string
+	)
+	for pos < len(buf) {
+		pos, block = scanLine(buf, pos)
+		pos++
+
+		if len(block) == 0 {
+			continue
+		}
+
+		// lines which start with '#' are comments
+		start := skipWhitespace(block, 0)
+
+		// If line is all whitespace, just skip it
+		if start >= len(block) {
+			continue
+		}
+
+		if block[start] == '#' {
+			continue
+		}
+
+		// strip the newline if one is present
+		if block[len(block)-1] == '\n' {
+			block = block[:len(block)-1]
+		}
+
+		pt, err := parsePoint(block[start:], defaultTime, precision)
+		if err != nil {
+			failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err))
+		} else {
+			points = append(points, pt)
+		}
+
+	}
+	if len(failed) > 0 {
+		return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
+	}
+	return points, nil
+
+}
+
+func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
+	// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
+	pos, key, err := scanKey(buf, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	// measurement name is required
+	if len(key) == 0 {
+		return nil, fmt.Errorf("missing measurement")
+	}
+
+	if len(key) > MaxKeyLength {
+		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
+	}
+
+	// scan the second block is which is field1=value1[,field2=value2,...]
+	pos, fields, err := scanFields(buf, pos)
+	if err != nil {
+		return nil, err
+	}
+
+	// at least one field is required
+	if len(fields) == 0 {
+		return nil, fmt.Errorf("missing fields")
+	}
+
+	var maxKeyErr error
+	walkFields(fields, func(k, v []byte) bool {
+		if sz := seriesKeySize(key, k); sz > MaxKeyLength {
+			maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
+			return false
+		}
+		return true
+	})
+
+	if maxKeyErr != nil {
+		return nil, maxKeyErr
+	}
+
+	// scan the last block which is an optional integer timestamp
+	pos, ts, err := scanTime(buf, pos)
+	if err != nil {
+		return nil, err
+	}
+
+	pt := &point{
+		key:    key,
+		fields: fields,
+		ts:     ts,
+	}
+
+	if len(ts) == 0 {
+		pt.time = defaultTime
+		pt.SetPrecision(precision)
+	} else {
+		ts, err := parseIntBytes(ts, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		pt.time, err = SafeCalcTime(ts, precision)
+		if err != nil {
+			return nil, err
+		}
+
+		// Determine if there are illegal non-whitespace characters after the
+		// timestamp block.
+		for pos < len(buf) {
+			if buf[pos] != ' ' {
+				return nil, ErrInvalidPoint
+			}
+			pos++
+		}
+	}
+	return pt, nil
+}
+
+// GetPrecisionMultiplier will return a multiplier for the precision specified.
+func GetPrecisionMultiplier(precision string) int64 {
+	d := time.Nanosecond
+	switch precision {
+	case "u":
+		d = time.Microsecond
+	case "ms":
+		d = time.Millisecond
+	case "s":
+		d = time.Second
+	case "m":
+		d = time.Minute
+	case "h":
+		d = time.Hour
+	}
+	return int64(d)
+}
+
+// scanKey scans buf starting at i for the measurement and tag portion of the point.
+// It returns the ending position and the byte slice of key within buf.  If there
+// are tags, they will be sorted if they are not already.
+func scanKey(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+
+	i = start
+
+	// Determines whether the tags are sort, assume they are
+	sorted := true
+
+	// indices holds the indexes within buf of the start of each tag.  For example,
+	// a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
+	// which indicates that the first tag starts at buf[4], seconds at buf[11], and
+	// last at buf[20]
+	indices := make([]int, 100)
+
+	// tracks how many commas we've seen so we know how many values are indices.
+	// Since indices is an arbitrarily large slice,
+	// we need to know how many values in the buffer are in use.
+	commas := 0
+
+	// First scan the Point's measurement.
+	state, i, err := scanMeasurement(buf, i)
+	if err != nil {
+		return i, buf[start:i], err
+	}
+
+	// Optionally scan tags if needed.
+	if state == tagKeyState {
+		i, commas, indices, err = scanTags(buf, i, indices)
+		if err != nil {
+			return i, buf[start:i], err
+		}
+	}
+
+	// Now we know where the key region is within buf, and the location of tags, we
+	// need to determine if duplicate tags exist and if the tags are sorted. This iterates
+	// over the list comparing each tag in the sequence with each other.
+	for j := 0; j < commas-1; j++ {
+		// get the left and right tags
+		_, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
+		_, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=')
+
+		// If left is greater than right, the tags are not sorted. We do not have to
+		// continue because the short path no longer works.
+		// If the tags are equal, then there are duplicate tags, and we should abort.
+		// If the tags are not sorted, this pass may not find duplicate tags and we
+		// need to do a more exhaustive search later.
+		if cmp := bytes.Compare(left, right); cmp > 0 {
+			sorted = false
+			break
+		} else if cmp == 0 {
+			return i, buf[start:i], fmt.Errorf("duplicate tags")
+		}
+	}
+
+	// If the tags are not sorted, then sort them.  This sort is inline and
+	// uses the tag indices we created earlier.  The actual buffer is not sorted, the
+	// indices are using the buffer for value comparison.  After the indices are sorted,
+	// the buffer is reconstructed from the sorted indices.
+	if !sorted && commas > 0 {
+		// Get the measurement name for later
+		measurement := buf[start : indices[0]-1]
+
+		// Sort the indices
+		indices := indices[:commas]
+		insertionSort(0, commas, buf, indices)
+
+		// Create a new key using the measurement and sorted indices
+		b := make([]byte, len(buf[start:i]))
+		pos := copy(b, measurement)
+		for _, i := range indices {
+			b[pos] = ','
+			pos++
+			_, v := scanToSpaceOr(buf, i, ',')
+			pos += copy(b[pos:], v)
+		}
+
+		// Check again for duplicate tags now that the tags are sorted.
+		for j := 0; j < commas-1; j++ {
+			// get the left and right tags
+			_, left := scanTo(buf[indices[j]:], 0, '=')
+			_, right := scanTo(buf[indices[j+1]:], 0, '=')
+
+			// If the tags are equal, then there are duplicate tags, and we should abort.
+			// If the tags are not sorted, this pass may not find duplicate tags and we
+			// need to do a more exhaustive search later.
+			if bytes.Equal(left, right) {
+				return i, b, fmt.Errorf("duplicate tags")
+			}
+		}
+
+		return i, b, nil
+	}
+
+	return i, buf[start:i], nil
+}
+
+// The following constants allow us to specify which state to move to
+// next, when scanning sections of a Point.
+const (
+	tagKeyState = iota
+	tagValueState
+	fieldsState
+)
+
+// scanMeasurement examines the measurement part of a Point, returning
+// the next state to move to, and the current location in the buffer.
+func scanMeasurement(buf []byte, i int) (int, int, error) {
+	// Check first byte of measurement, anything except a comma is fine.
+	// It can't be a space, since whitespace is stripped prior to this
+	// function call.
+	if i >= len(buf) || buf[i] == ',' {
+		return -1, i, fmt.Errorf("missing measurement")
+	}
+
+	for {
+		i++
+		if i >= len(buf) {
+			// cpu
+			return -1, i, fmt.Errorf("missing fields")
+		}
+
+		if buf[i-1] == '\\' {
+			// Skip character (it's escaped).
+			continue
+		}
+
+		// Unescaped comma; move onto scanning the tags.
+		if buf[i] == ',' {
+			return tagKeyState, i + 1, nil
+		}
+
+		// Unescaped space; move onto scanning the fields.
+		if buf[i] == ' ' {
+			// cpu value=1.0
+			return fieldsState, i, nil
+		}
+	}
+}
+
+// scanTags examines all the tags in a Point, keeping track of and
+// returning the updated indices slice, number of commas and location
+// in buf where to start examining the Point fields.
+func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
+	var (
+		err    error
+		commas int
+		state  = tagKeyState
+	)
+
+	for {
+		switch state {
+		case tagKeyState:
+			// Grow our indices slice if we have too many tags.
+			if commas >= len(indices) {
+				newIndics := make([]int, cap(indices)*2)
+				copy(newIndics, indices)
+				indices = newIndics
+			}
+			indices[commas] = i
+			commas++
+
+			i, err = scanTagsKey(buf, i)
+			state = tagValueState // tag value always follows a tag key
+		case tagValueState:
+			state, i, err = scanTagsValue(buf, i)
+		case fieldsState:
+			indices[commas] = i + 1
+			return i, commas, indices, nil
+		}
+
+		if err != nil {
+			return i, commas, indices, err
+		}
+	}
+}
+
+// scanTagsKey scans each character in a tag key.
+func scanTagsKey(buf []byte, i int) (int, error) {
+	// First character of the key.
+	if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
+		// cpu,{'', ' ', ',', '='}
+		return i, fmt.Errorf("missing tag key")
+	}
+
+	// Examine each character in the tag key until we hit an unescaped
+	// equals (the tag value), or we hit an error (i.e., unescaped
+	// space or comma).
+	for {
+		i++
+
+		// Either we reached the end of the buffer or we hit an
+		// unescaped comma or space.
+		if i >= len(buf) ||
+			((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
+			// cpu,tag{'', ' ', ','}
+			return i, fmt.Errorf("missing tag value")
+		}
+
+		if buf[i] == '=' && buf[i-1] != '\\' {
+			// cpu,tag=
+			return i + 1, nil
+		}
+	}
+}
+
+// scanTagsValue scans each character in a tag value.
+func scanTagsValue(buf []byte, i int) (int, int, error) {
+	// Tag value cannot be empty.
+	if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
+		// cpu,tag={',', ' '}
+		return -1, i, fmt.Errorf("missing tag value")
+	}
+
+	// Examine each character in the tag value until we hit an unescaped
+	// comma (move onto next tag key), an unescaped space (move onto
+	// fields), or we error out.
+	for {
+		i++
+		if i >= len(buf) {
+			// cpu,tag=value
+			return -1, i, fmt.Errorf("missing fields")
+		}
+
+		// An unescaped equals sign is an invalid tag value.
+		if buf[i] == '=' && buf[i-1] != '\\' {
+			// cpu,tag={'=', 'fo=o'}
+			return -1, i, fmt.Errorf("invalid tag format")
+		}
+
+		if buf[i] == ',' && buf[i-1] != '\\' {
+			// cpu,tag=foo,
+			return tagKeyState, i + 1, nil
+		}
+
+		// cpu,tag=foo value=1.0
+		// cpu, tag=foo\= value=1.0
+		if buf[i] == ' ' && buf[i-1] != '\\' {
+			return fieldsState, i, nil
+		}
+	}
+}
+
+func insertionSort(l, r int, buf []byte, indices []int) {
+	for i := l + 1; i < r; i++ {
+		for j := i; j > l && less(buf, indices, j, j-1); j-- {
+			indices[j], indices[j-1] = indices[j-1], indices[j]
+		}
+	}
+}
+
+func less(buf []byte, indices []int, i, j int) bool {
+	// This grabs the tag names for i & j, it ignores the values
+	_, a := scanTo(buf, indices[i], '=')
+	_, b := scanTo(buf, indices[j], '=')
+	return bytes.Compare(a, b) < 0
+}
+
+// scanFields scans buf, starting at i for the fields section of a point.  It returns
+// the ending position and the byte slice of the fields within buf.
+func scanFields(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+	i = start
+	quoted := false
+
+	// tracks how many '=' we've seen
+	equals := 0
+
+	// tracks how many commas we've seen
+	commas := 0
+
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// escaped characters?
+		if buf[i] == '\\' && i+1 < len(buf) {
+			i += 2
+			continue
+		}
+
+		// If the value is quoted, scan until we get to the end quote
+		// Only quote values in the field value since quotes are not significant
+		// in the field key
+		if buf[i] == '"' && equals > commas {
+			quoted = !quoted
+			i++
+			continue
+		}
+
+		// If we see an =, ensure that there is at least on char before and after it
+		if buf[i] == '=' && !quoted {
+			equals++
+
+			// check for "... =123" but allow "a\ =123"
+			if buf[i-1] == ' ' && buf[i-2] != '\\' {
+				return i, buf[start:i], fmt.Errorf("missing field key")
+			}
+
+			// check for "...a=123,=456" but allow "a=123,a\,=456"
+			if buf[i-1] == ',' && buf[i-2] != '\\' {
+				return i, buf[start:i], fmt.Errorf("missing field key")
+			}
+
+			// check for "... value="
+			if i+1 >= len(buf) {
+				return i, buf[start:i], fmt.Errorf("missing field value")
+			}
+
+			// check for "... value=,value2=..."
+			if buf[i+1] == ',' || buf[i+1] == ' ' {
+				return i, buf[start:i], fmt.Errorf("missing field value")
+			}
+
+			if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
+				var err error
+				i, err = scanNumber(buf, i+1)
+				if err != nil {
+					return i, buf[start:i], err
+				}
+				continue
+			}
+			// If next byte is not a double-quote, the value must be a boolean
+			if buf[i+1] != '"' {
+				var err error
+				i, _, err = scanBoolean(buf, i+1)
+				if err != nil {
+					return i, buf[start:i], err
+				}
+				continue
+			}
+		}
+
+		if buf[i] == ',' && !quoted {
+			commas++
+		}
+
+		// reached end of block?
+		if buf[i] == ' ' && !quoted {
+			break
+		}
+		i++
+	}
+
+	if quoted {
+		return i, buf[start:i], fmt.Errorf("unbalanced quotes")
+	}
+
+	// check that all field sections had key and values (e.g. prevent "a=1,b"
+	if equals == 0 || commas != equals-1 {
+		return i, buf[start:i], fmt.Errorf("invalid field format")
+	}
+
+	return i, buf[start:i], nil
+}
+
+// scanTime scans buf, starting at i for the time section of a point. It
+// returns the ending position and the byte slice of the timestamp within buf
+// and and error if the timestamp is not in the correct numeric format.
+func scanTime(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+	i = start
+
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// Reached end of block or trailing whitespace?
+		if buf[i] == '\n' || buf[i] == ' ' {
+			break
+		}
+
+		// Handle negative timestamps
+		if i == start && buf[i] == '-' {
+			i++
+			continue
+		}
+
+		// Timestamps should be integers, make sure they are so we don't need
+		// to actually  parse the timestamp until needed.
+		if buf[i] < '0' || buf[i] > '9' {
+			return i, buf[start:i], fmt.Errorf("bad timestamp")
+		}
+		i++
+	}
+	return i, buf[start:i], nil
+}
+
+func isNumeric(b byte) bool {
+	return (b >= '0' && b <= '9') || b == '.'
+}
+
+// scanNumber returns the end position within buf, start at i after
+// scanning over buf for an integer, or float.  It returns an
+// error if a invalid number is scanned.
+func scanNumber(buf []byte, i int) (int, error) {
+	start := i
+	var isInt, isUnsigned bool
+
+	// Is negative number?
+	if i < len(buf) && buf[i] == '-' {
+		i++
+		// There must be more characters now, as just '-' is illegal.
+		if i == len(buf) {
+			return i, ErrInvalidNumber
+		}
+	}
+
+	// how many decimal points we've see
+	decimal := false
+
+	// indicates the number is float in scientific notation
+	scientific := false
+
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' || buf[i] == ' ' {
+			break
+		}
+
+		if buf[i] == 'i' && i > start && !(isInt || isUnsigned) {
+			isInt = true
+			i++
+			continue
+		} else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) {
+			isUnsigned = true
+			i++
+			continue
+		}
+
+		if buf[i] == '.' {
+			// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
+			if decimal {
+				return i, ErrInvalidNumber
+			}
+			decimal = true
+		}
+
+		// `e` is valid for floats but not as the first char
+		if i > start && (buf[i] == 'e' || buf[i] == 'E') {
+			scientific = true
+			i++
+			continue
+		}
+
+		// + and - are only valid at this point if they follow an e (scientific notation)
+		if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
+			i++
+			continue
+		}
+
+		// NaN is an unsupported value
+		if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
+			return i, ErrInvalidNumber
+		}
+
+		if !isNumeric(buf[i]) {
+			return i, ErrInvalidNumber
+		}
+		i++
+	}
+
+	if (isInt || isUnsigned) && (decimal || scientific) {
+		return i, ErrInvalidNumber
+	}
+
+	numericDigits := i - start
+	if isInt {
+		numericDigits--
+	}
+	if decimal {
+		numericDigits--
+	}
+	if buf[start] == '-' {
+		numericDigits--
+	}
+
+	if numericDigits == 0 {
+		return i, ErrInvalidNumber
+	}
+
+	// It's more common that numbers will be within min/max range for their type but we need to prevent
+	// out or range numbers from being parsed successfully.  This uses some simple heuristics to decide
+	// if we should parse the number to the actual type.  It does not do it all the time because it incurs
+	// extra allocations and we end up converting the type again when writing points to disk.
+	if isInt {
+		// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
+		if buf[i-1] != 'i' {
+			return i, ErrInvalidNumber
+		}
+		// Parse the int to check bounds the number of digits could be larger than the max range
+		// We subtract 1 from the index to remove the `i` from our tests
+		if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
+			if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
+				return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
+			}
+		}
+	} else if isUnsigned {
+		// Return an error if uint64 support has not been enabled.
+		if !enableUint64Support {
+			return i, ErrInvalidNumber
+		}
+		// Make sure the last char is a 'u' for unsigned
+		if buf[i-1] != 'u' {
+			return i, ErrInvalidNumber
+		}
+		// Make sure the first char is not a '-' for unsigned
+		if buf[start] == '-' {
+			return i, ErrInvalidNumber
+		}
+		// Parse the uint to check bounds the number of digits could be larger than the max range
+		// We subtract 1 from the index to remove the `u` from our tests
+		if len(buf[start:i-1]) >= maxUint64Digits {
+			if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil {
+				return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err)
+			}
+		}
+	} else {
+		// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
+		if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
+			if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
+				return i, fmt.Errorf("invalid float")
+			}
+		}
+	}
+
+	return i, nil
+}
+
+// scanBoolean returns the end position within buf, start at i after
+// scanning over buf for boolean. Valid values for a boolean are
+// t, T, true, TRUE, f, F, false, FALSE.  It returns an error if a invalid boolean
+// is scanned.
+func scanBoolean(buf []byte, i int) (int, []byte, error) {
+	start := i
+
+	if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	i++
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' || buf[i] == ' ' {
+			break
+		}
+		i++
+	}
+
+	// Single char bool (t, T, f, F) is ok
+	if i-start == 1 {
+		return i, buf[start:i], nil
+	}
+
+	// length must be 4 for true or TRUE
+	if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	// length must be 5 for false or FALSE
+	if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	// Otherwise
+	valid := false
+	switch buf[start] {
+	case 't':
+		valid = bytes.Equal(buf[start:i], []byte("true"))
+	case 'f':
+		valid = bytes.Equal(buf[start:i], []byte("false"))
+	case 'T':
+		valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
+	case 'F':
+		valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
+	}
+
+	if !valid {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	return i, buf[start:i], nil
+
+}
+
+// skipWhitespace returns the end position within buf, starting at i after
+// scanning over spaces in tags.
+func skipWhitespace(buf []byte, i int) int {
+	for i < len(buf) {
+		if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
+			break
+		}
+		i++
+	}
+	return i
+}
+
+// scanLine returns the end position in buf and the next line found within
+// buf.
+func scanLine(buf []byte, i int) (int, []byte) {
+	start := i
+	quoted := false
+	fields := false
+
+	// tracks how many '=' and commas we've seen
+	// this duplicates some of the functionality in scanFields
+	equals := 0
+	commas := 0
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// skip past escaped characters
+		if buf[i] == '\\' && i+2 < len(buf) {
+			i += 2
+			continue
+		}
+
+		if buf[i] == ' ' {
+			fields = true
+		}
+
+		// If we see a double quote, makes sure it is not escaped
+		if fields {
+			if !quoted && buf[i] == '=' {
+				i++
+				equals++
+				continue
+			} else if !quoted && buf[i] == ',' {
+				i++
+				commas++
+				continue
+			} else if buf[i] == '"' && equals > commas {
+				i++
+				quoted = !quoted
+				continue
+			}
+		}
+
+		if buf[i] == '\n' && !quoted {
+			break
+		}
+
+		i++
+	}
+
+	return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte, where stop byte
+// has not been escaped.
+//
+// If there are leading spaces, they are skipped.
+func scanTo(buf []byte, i int, stop byte) (int, []byte) {
+	start := i
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// Reached unescaped stop value?
+		if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
+			break
+		}
+		i++
+	}
+
+	return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte.  If there are leading
+// spaces, they are skipped.
+func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
+	start := i
+	if buf[i] == stop || buf[i] == ' ' {
+		return i, buf[start:i]
+	}
+
+	for {
+		i++
+		if buf[i-1] == '\\' {
+			continue
+		}
+
+		// reached the end of buf?
+		if i >= len(buf) {
+			return i, buf[start:i]
+		}
+
+		// reached end of block?
+		if buf[i] == stop || buf[i] == ' ' {
+			return i, buf[start:i]
+		}
+	}
+}
+
+func scanTagValue(buf []byte, i int) (int, []byte) {
+	start := i
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' && buf[i-1] != '\\' {
+			break
+		}
+		i++
+	}
+	if i > len(buf) {
+		return i, nil
+	}
+	return i, buf[start:i]
+}
+
+func scanFieldValue(buf []byte, i int) (int, []byte) {
+	start := i
+	quoted := false
+	for i < len(buf) {
+		// Only escape char for a field value is a double-quote and backslash
+		if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') {
+			i += 2
+			continue
+		}
+
+		// Quoted value? (e.g. string)
+		if buf[i] == '"' {
+			i++
+			quoted = !quoted
+			continue
+		}
+
+		if buf[i] == ',' && !quoted {
+			break
+		}
+		i++
+	}
+	return i, buf[start:i]
+}
+
+func EscapeMeasurement(in []byte) []byte {
+	for b, esc := range measurementEscapeCodes {
+		in = bytes.Replace(in, []byte{b}, esc, -1)
+	}
+	return in
+}
+
+func unescapeMeasurement(in []byte) []byte {
+	for b, esc := range measurementEscapeCodes {
+		in = bytes.Replace(in, esc, []byte{b}, -1)
+	}
+	return in
+}
+
+func escapeTag(in []byte) []byte {
+	for b, esc := range tagEscapeCodes {
+		if bytes.IndexByte(in, b) != -1 {
+			in = bytes.Replace(in, []byte{b}, esc, -1)
+		}
+	}
+	return in
+}
+
+func unescapeTag(in []byte) []byte {
+	if bytes.IndexByte(in, '\\') == -1 {
+		return in
+	}
+
+	for b, esc := range tagEscapeCodes {
+		if bytes.IndexByte(in, b) != -1 {
+			in = bytes.Replace(in, esc, []byte{b}, -1)
+		}
+	}
+	return in
+}
+
+// escapeStringFieldReplacer replaces double quotes and backslashes
+// with the same character preceded by a backslash.
+// As of Go 1.7 this benchmarked better in allocations and CPU time
+// compared to iterating through a string byte-by-byte and appending to a new byte slice,
+// calling strings.Replace twice, and better than (*Regex).ReplaceAllString.
+var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
+
+// EscapeStringField returns a copy of in with any double quotes or
+// backslashes with escaped values.
+func EscapeStringField(in string) string {
+	return escapeStringFieldReplacer.Replace(in)
+}
+
+// unescapeStringField returns a copy of in with any escaped double-quotes
+// or backslashes unescaped.
+func unescapeStringField(in string) string {
+	if strings.IndexByte(in, '\\') == -1 {
+		return in
+	}
+
+	var out []byte
+	i := 0
+	for {
+		if i >= len(in) {
+			break
+		}
+		// unescape backslashes
+		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
+			out = append(out, '\\')
+			i += 2
+			continue
+		}
+		// unescape double-quotes
+		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
+			out = append(out, '"')
+			i += 2
+			continue
+		}
+		out = append(out, in[i])
+		i++
+
+	}
+	return string(out)
+}
+
+// NewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
+// an unsupported field value (NaN) or out of range time is passed, this function returns an error.
+func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) {
+	key, err := pointKey(name, tags, fields, t)
+	if err != nil {
+		return nil, err
+	}
+
+	return &point{
+		key:    key,
+		time:   t,
+		fields: fields.MarshalBinary(),
+	}, nil
+}
+
+// pointKey checks some basic requirements for valid points, and returns the
+// key, along with an possible error.
+func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) {
+	if len(fields) == 0 {
+		return nil, ErrPointMustHaveAField
+	}
+
+	if !t.IsZero() {
+		if err := CheckTime(t); err != nil {
+			return nil, err
+		}
+	}
+
+	for key, value := range fields {
+		switch value := value.(type) {
+		case float64:
+			// Ensure the caller validates and handles invalid field values
+			if math.IsNaN(value) {
+				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+			}
+		case float32:
+			// Ensure the caller validates and handles invalid field values
+			if math.IsNaN(float64(value)) {
+				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+			}
+		}
+		if len(key) == 0 {
+			return nil, fmt.Errorf("all fields must have non-empty names")
+		}
+	}
+
+	key := MakeKey([]byte(measurement), tags)
+	for field := range fields {
+		sz := seriesKeySize(key, []byte(field))
+		if sz > MaxKeyLength {
+			return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
+		}
+	}
+
+	return key, nil
+}
+
+func seriesKeySize(key, field []byte) int {
+	// 4 is the length of the tsm1.fieldKeySeparator constant.  It's inlined here to avoid a circular
+	// dependency.
+	return len(key) + 4 + len(field)
+}
+
+// NewPointFromBytes returns a new Point from a marshalled Point.
+func NewPointFromBytes(b []byte) (Point, error) {
+	p := &point{}
+	if err := p.UnmarshalBinary(b); err != nil {
+		return nil, err
+	}
+
+	// This does some basic validation to ensure there are fields and they
+	// can be unmarshalled as well.
+	iter := p.FieldIterator()
+	var hasField bool
+	for iter.Next() {
+		if len(iter.FieldKey()) == 0 {
+			continue
+		}
+		hasField = true
+		switch iter.Type() {
+		case Float:
+			_, err := iter.FloatValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+		case Integer:
+			_, err := iter.IntegerValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+		case Unsigned:
+			_, err := iter.UnsignedValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+		case String:
+			// Skip since this won't return an error
+		case Boolean:
+			_, err := iter.BooleanValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+		}
+	}
+
+	if !hasField {
+		return nil, ErrPointMustHaveAField
+	}
+
+	return p, nil
+}
+
+// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
+// an unsupported field value (NaN) is passed, this function panics.
+func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
+	pt, err := NewPoint(name, tags, fields, time)
+	if err != nil {
+		panic(err.Error())
+	}
+	return pt
+}
+
+// Key returns the key (measurement joined with tags) of the point.
+func (p *point) Key() []byte {
+	return p.key
+}
+
+func (p *point) name() []byte {
+	_, name := scanTo(p.key, 0, ',')
+	return name
+}
+
+func (p *point) Name() []byte {
+	return escape.Unescape(p.name())
+}
+
+// SetName updates the measurement name for the point.
+func (p *point) SetName(name string) {
+	p.cachedName = ""
+	p.key = MakeKey([]byte(name), p.Tags())
+}
+
+// Time return the timestamp for the point.
+func (p *point) Time() time.Time {
+	return p.time
+}
+
+// SetTime updates the timestamp for the point.
+func (p *point) SetTime(t time.Time) {
+	p.time = t
+}
+
+// Round will round the timestamp of the point to the given duration.
+func (p *point) Round(d time.Duration) {
+	p.time = p.time.Round(d)
+}
+
+// Tags returns the tag set for the point.
+func (p *point) Tags() Tags {
+	if p.cachedTags != nil {
+		return p.cachedTags
+	}
+	p.cachedTags = parseTags(p.key)
+	return p.cachedTags
+}
+
+func (p *point) HasTag(tag []byte) bool {
+	if len(p.key) == 0 {
+		return false
+	}
+
+	var exists bool
+	walkTags(p.key, func(key, value []byte) bool {
+		if bytes.Equal(tag, key) {
+			exists = true
+			return false
+		}
+		return true
+	})
+
+	return exists
+}
+
+func walkTags(buf []byte, fn func(key, value []byte) bool) {
+	if len(buf) == 0 {
+		return
+	}
+
+	pos, name := scanTo(buf, 0, ',')
+
+	// it's an empty key, so there are no tags
+	if len(name) == 0 {
+		return
+	}
+
+	hasEscape := bytes.IndexByte(buf, '\\') != -1
+	i := pos + 1
+	var key, value []byte
+	for {
+		if i >= len(buf) {
+			break
+		}
+		i, key = scanTo(buf, i, '=')
+		i, value = scanTagValue(buf, i+1)
+
+		if len(value) == 0 {
+			continue
+		}
+
+		if hasEscape {
+			if !fn(unescapeTag(key), unescapeTag(value)) {
+				return
+			}
+		} else {
+			if !fn(key, value) {
+				return
+			}
+		}
+
+		i++
+	}
+}
+
+// walkFields walks each field key and value via fn.  If fn returns false, the iteration
+// is stopped.  The values are the raw byte slices and not the converted types.
+func walkFields(buf []byte, fn func(key, value []byte) bool) {
+	var i int
+	var key, val []byte
+	for len(buf) > 0 {
+		i, key = scanTo(buf, 0, '=')
+		buf = buf[i+1:]
+		i, val = scanFieldValue(buf, 0)
+		buf = buf[i:]
+		if !fn(key, val) {
+			break
+		}
+
+		// slice off comma
+		if len(buf) > 0 {
+			buf = buf[1:]
+		}
+	}
+}
+
+func parseTags(buf []byte) Tags {
+	if len(buf) == 0 {
+		return nil
+	}
+
+	tags := make(Tags, bytes.Count(buf, []byte(",")))
+	p := 0
+	walkTags(buf, func(key, value []byte) bool {
+		tags[p].Key = key
+		tags[p].Value = value
+		p++
+		return true
+	})
+	return tags
+}
+
+// MakeKey creates a key for a set of tags.
+func MakeKey(name []byte, tags Tags) []byte {
+	// unescape the name and then re-escape it to avoid double escaping.
+	// The key should always be stored in escaped form.
+	return append(EscapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
+}
+
+// SetTags replaces the tags for the point.
+func (p *point) SetTags(tags Tags) {
+	p.key = MakeKey(p.Name(), tags)
+	p.cachedTags = tags
+}
+
+// AddTag adds or replaces a tag value for a point.
+func (p *point) AddTag(key, value string) {
+	tags := p.Tags()
+	tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)})
+	sort.Sort(tags)
+	p.cachedTags = tags
+	p.key = MakeKey(p.Name(), tags)
+}
+
+// Fields returns the fields for the point.
+func (p *point) Fields() (Fields, error) {
+	if p.cachedFields != nil {
+		return p.cachedFields, nil
+	}
+	cf, err := p.unmarshalBinary()
+	if err != nil {
+		return nil, err
+	}
+	p.cachedFields = cf
+	return p.cachedFields, nil
+}
+
+// SetPrecision will round a time to the specified precision.
+func (p *point) SetPrecision(precision string) {
+	switch precision {
+	case "n":
+	case "u":
+		p.SetTime(p.Time().Truncate(time.Microsecond))
+	case "ms":
+		p.SetTime(p.Time().Truncate(time.Millisecond))
+	case "s":
+		p.SetTime(p.Time().Truncate(time.Second))
+	case "m":
+		p.SetTime(p.Time().Truncate(time.Minute))
+	case "h":
+		p.SetTime(p.Time().Truncate(time.Hour))
+	}
+}
+
+// String returns the string representation of the point.
+func (p *point) String() string {
+	if p.Time().IsZero() {
+		return string(p.Key()) + " " + string(p.fields)
+	}
+	return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
+}
+
+// AppendString appends the string representation of the point to buf.
+func (p *point) AppendString(buf []byte) []byte {
+	buf = append(buf, p.key...)
+	buf = append(buf, ' ')
+	buf = append(buf, p.fields...)
+
+	if !p.time.IsZero() {
+		buf = append(buf, ' ')
+		buf = strconv.AppendInt(buf, p.UnixNano(), 10)
+	}
+
+	return buf
+}
+
+// StringSize returns the length of the string that would be returned by String().
+func (p *point) StringSize() int {
+	size := len(p.key) + len(p.fields) + 1
+
+	if !p.time.IsZero() {
+		digits := 1 // even "0" has one digit
+		t := p.UnixNano()
+		if t < 0 {
+			// account for negative sign, then negate
+			digits++
+			t = -t
+		}
+		for t > 9 { // already accounted for one digit
+			digits++
+			t /= 10
+		}
+		size += digits + 1 // digits and a space
+	}
+
+	return size
+}
+
+// MarshalBinary returns a binary representation of the point.
+func (p *point) MarshalBinary() ([]byte, error) {
+	if len(p.fields) == 0 {
+		return nil, ErrPointMustHaveAField
+	}
+
+	tb, err := p.time.MarshalBinary()
+	if err != nil {
+		return nil, err
+	}
+
+	b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
+	i := 0
+
+	binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
+	i += 4
+
+	i += copy(b[i:], p.key)
+
+	binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
+	i += 4
+
+	i += copy(b[i:], p.fields)
+
+	copy(b[i:], tb)
+	return b, nil
+}
+
+// UnmarshalBinary decodes a binary representation of the point into a point struct.
+func (p *point) UnmarshalBinary(b []byte) error {
+	var n int
+
+	// Read key length.
+	if len(b) < 4 {
+		return io.ErrShortBuffer
+	}
+	n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
+
+	// Read key.
+	if len(b) < n {
+		return io.ErrShortBuffer
+	}
+	p.key, b = b[:n], b[n:]
+
+	// Read fields length.
+	if len(b) < 4 {
+		return io.ErrShortBuffer
+	}
+	n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
+
+	// Read fields.
+	if len(b) < n {
+		return io.ErrShortBuffer
+	}
+	p.fields, b = b[:n], b[n:]
+
+	// Read timestamp.
+	if err := p.time.UnmarshalBinary(b); err != nil {
+		return err
+	}
+	return nil
+}
+
+// PrecisionString returns a string representation of the point. If there
+// is a timestamp associated with the point then it will be specified in the
+// given unit.
+func (p *point) PrecisionString(precision string) string {
+	if p.Time().IsZero() {
+		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+	}
+	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+		p.UnixNano()/GetPrecisionMultiplier(precision))
+}
+
+// RoundedString returns a string representation of the point. If there
+// is a timestamp associated with the point, then it will be rounded to the
+// given duration.
+func (p *point) RoundedString(d time.Duration) string {
+	if p.Time().IsZero() {
+		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+	}
+	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+		p.time.Round(d).UnixNano())
+}
+
+func (p *point) unmarshalBinary() (Fields, error) {
+	iter := p.FieldIterator()
+	fields := make(Fields, 8)
+	for iter.Next() {
+		if len(iter.FieldKey()) == 0 {
+			continue
+		}
+		switch iter.Type() {
+		case Float:
+			v, err := iter.FloatValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+			fields[string(iter.FieldKey())] = v
+		case Integer:
+			v, err := iter.IntegerValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+			fields[string(iter.FieldKey())] = v
+		case Unsigned:
+			v, err := iter.UnsignedValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+			fields[string(iter.FieldKey())] = v
+		case String:
+			fields[string(iter.FieldKey())] = iter.StringValue()
+		case Boolean:
+			v, err := iter.BooleanValue()
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+			}
+			fields[string(iter.FieldKey())] = v
+		}
+	}
+	return fields, nil
+}
+
+// HashID returns a non-cryptographic checksum of the point's key.
+func (p *point) HashID() uint64 {
+	h := NewInlineFNV64a()
+	h.Write(p.key)
+	sum := h.Sum64()
+	return sum
+}
+
+// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
+func (p *point) UnixNano() int64 {
+	return p.Time().UnixNano()
+}
+
+// Split will attempt to return multiple points with the same timestamp whose
+// string representations are no longer than size. Points with a single field or
+// a point without a timestamp may exceed the requested size.
+func (p *point) Split(size int) []Point {
+	if p.time.IsZero() || p.StringSize() <= size {
+		return []Point{p}
+	}
+
+	// key string, timestamp string, spaces
+	size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2
+
+	var points []Point
+	var start, cur int
+
+	for cur < len(p.fields) {
+		end, _ := scanTo(p.fields, cur, '=')
+		end, _ = scanFieldValue(p.fields, end+1)
+
+		if cur > start && end-start > size {
+			points = append(points, &point{
+				key:    p.key,
+				time:   p.time,
+				fields: p.fields[start : cur-1],
+			})
+			start = cur
+		}
+
+		cur = end + 1
+	}
+
+	points = append(points, &point{
+		key:    p.key,
+		time:   p.time,
+		fields: p.fields[start:],
+	})
+
+	return points
+}
+
+// Tag represents a single key/value tag pair.
+type Tag struct {
+	Key   []byte
+	Value []byte
+}
+
+// NewTag returns a new Tag.
+func NewTag(key, value []byte) Tag {
+	return Tag{
+		Key:   key,
+		Value: value,
+	}
+}
+
+// Size returns the size of the key and value.
+func (t Tag) Size() int { return len(t.Key) + len(t.Value) }
+
+// Clone returns a shallow copy of Tag.
+//
+// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
+// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
+func (t Tag) Clone() Tag {
+	other := Tag{
+		Key:   make([]byte, len(t.Key)),
+		Value: make([]byte, len(t.Value)),
+	}
+
+	copy(other.Key, t.Key)
+	copy(other.Value, t.Value)
+
+	return other
+}
+
+// String returns the string reprsentation of the tag.
+func (t *Tag) String() string {
+	var buf bytes.Buffer
+	buf.WriteByte('{')
+	buf.WriteString(string(t.Key))
+	buf.WriteByte(' ')
+	buf.WriteString(string(t.Value))
+	buf.WriteByte('}')
+	return buf.String()
+}
+
+// Tags represents a sorted list of tags.
+type Tags []Tag
+
+// NewTags returns a new Tags from a map.
+func NewTags(m map[string]string) Tags {
+	if len(m) == 0 {
+		return nil
+	}
+	a := make(Tags, 0, len(m))
+	for k, v := range m {
+		a = append(a, NewTag([]byte(k), []byte(v)))
+	}
+	sort.Sort(a)
+	return a
+}
+
+// Keys returns the list of keys for a tag set.
+func (a Tags) Keys() []string {
+	if len(a) == 0 {
+		return nil
+	}
+	keys := make([]string, len(a))
+	for i, tag := range a {
+		keys[i] = string(tag.Key)
+	}
+	return keys
+}
+
+// Values returns the list of values for a tag set.
+func (a Tags) Values() []string {
+	if len(a) == 0 {
+		return nil
+	}
+	values := make([]string, len(a))
+	for i, tag := range a {
+		values[i] = string(tag.Value)
+	}
+	return values
+}
+
+// String returns the string representation of the tags.
+func (a Tags) String() string {
+	var buf bytes.Buffer
+	buf.WriteByte('[')
+	for i := range a {
+		buf.WriteString(a[i].String())
+		if i < len(a)-1 {
+			buf.WriteByte(' ')
+		}
+	}
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+// Size returns the number of bytes needed to store all tags. Note, this is
+// the number of bytes needed to store all keys and values and does not account
+// for data structures or delimiters for example.
+func (a Tags) Size() int {
+	var total int
+	for _, t := range a {
+		total += t.Size()
+	}
+	return total
+}
+
+// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements
+//
+// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
+// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
+func (a Tags) Clone() Tags {
+	if len(a) == 0 {
+		return nil
+	}
+
+	others := make(Tags, len(a))
+	for i := range a {
+		others[i] = a[i].Clone()
+	}
+
+	return others
+}
+
+func (a Tags) Len() int           { return len(a) }
+func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 }
+func (a Tags) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+// Equal returns true if a equals other.
+func (a Tags) Equal(other Tags) bool {
+	if len(a) != len(other) {
+		return false
+	}
+	for i := range a {
+		if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) {
+			return false
+		}
+	}
+	return true
+}
+
+// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b.
+func CompareTags(a, b Tags) int {
+	// Compare each key & value until a mismatch.
+	for i := 0; i < len(a) && i < len(b); i++ {
+		if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 {
+			return cmp
+		}
+		if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 {
+			return cmp
+		}
+	}
+
+	// If all tags are equal up to this point then return shorter tagset.
+	if len(a) < len(b) {
+		return -1
+	} else if len(a) > len(b) {
+		return 1
+	}
+
+	// All tags are equal.
+	return 0
+}
+
+// Get returns the value for a key.
+func (a Tags) Get(key []byte) []byte {
+	// OPTIMIZE: Use sort.Search if tagset is large.
+
+	for _, t := range a {
+		if bytes.Equal(t.Key, key) {
+			return t.Value
+		}
+	}
+	return nil
+}
+
+// GetString returns the string value for a string key.
+func (a Tags) GetString(key string) string {
+	return string(a.Get([]byte(key)))
+}
+
+// Set sets the value for a key.
+func (a *Tags) Set(key, value []byte) {
+	for i, t := range *a {
+		if bytes.Equal(t.Key, key) {
+			(*a)[i].Value = value
+			return
+		}
+	}
+	*a = append(*a, Tag{Key: key, Value: value})
+	sort.Sort(*a)
+}
+
+// SetString sets the string value for a string key.
+func (a *Tags) SetString(key, value string) {
+	a.Set([]byte(key), []byte(value))
+}
+
+// Delete removes a tag by key.
+func (a *Tags) Delete(key []byte) {
+	for i, t := range *a {
+		if bytes.Equal(t.Key, key) {
+			copy((*a)[i:], (*a)[i+1:])
+			(*a)[len(*a)-1] = Tag{}
+			*a = (*a)[:len(*a)-1]
+			return
+		}
+	}
+}
+
+// Map returns a map representation of the tags.
+func (a Tags) Map() map[string]string {
+	m := make(map[string]string, len(a))
+	for _, t := range a {
+		m[string(t.Key)] = string(t.Value)
+	}
+	return m
+}
+
+// Merge merges the tags combining the two. If both define a tag with the
+// same key, the merged value overwrites the old value.
+// A new map is returned.
+func (a Tags) Merge(other map[string]string) Tags {
+	merged := make(map[string]string, len(a)+len(other))
+	for _, t := range a {
+		merged[string(t.Key)] = string(t.Value)
+	}
+	for k, v := range other {
+		merged[k] = v
+	}
+	return NewTags(merged)
+}
+
+// HashKey hashes all of a tag's keys.
+func (a Tags) HashKey() []byte {
+	// Empty maps marshal to empty bytes.
+	if len(a) == 0 {
+		return nil
+	}
+
+	// Type invariant: Tags are sorted
+
+	escaped := make(Tags, 0, len(a))
+	sz := 0
+	for _, t := range a {
+		ek := escapeTag(t.Key)
+		ev := escapeTag(t.Value)
+
+		if len(ev) > 0 {
+			escaped = append(escaped, Tag{Key: ek, Value: ev})
+			sz += len(ek) + len(ev)
+		}
+	}
+
+	sz += len(escaped) + (len(escaped) * 2) // separators
+
+	// Generate marshaled bytes.
+	b := make([]byte, sz)
+	buf := b
+	idx := 0
+	for _, k := range escaped {
+		buf[idx] = ','
+		idx++
+		copy(buf[idx:idx+len(k.Key)], k.Key)
+		idx += len(k.Key)
+		buf[idx] = '='
+		idx++
+		copy(buf[idx:idx+len(k.Value)], k.Value)
+		idx += len(k.Value)
+	}
+	return b[:idx]
+}
+
+// CopyTags returns a shallow copy of tags.
+func CopyTags(a Tags) Tags {
+	other := make(Tags, len(a))
+	copy(other, a)
+	return other
+}
+
+// DeepCopyTags returns a deep copy of tags.
+func DeepCopyTags(a Tags) Tags {
+	// Calculate size of keys/values in bytes.
+	var n int
+	for _, t := range a {
+		n += len(t.Key) + len(t.Value)
+	}
+
+	// Build single allocation for all key/values.
+	buf := make([]byte, n)
+
+	// Copy tags to new set.
+	other := make(Tags, len(a))
+	for i, t := range a {
+		copy(buf, t.Key)
+		other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):]
+
+		copy(buf, t.Value)
+		other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):]
+	}
+
+	return other
+}
+
+// Fields represents a mapping between a Point's field names and their
+// values.
+type Fields map[string]interface{}
+
+// FieldIterator retuns a FieldIterator that can be used to traverse the
+// fields of a point without constructing the in-memory map.
+func (p *point) FieldIterator() FieldIterator {
+	p.Reset()
+	return p
+}
+
+type fieldIterator struct {
+	start, end  int
+	key, keybuf []byte
+	valueBuf    []byte
+	fieldType   FieldType
+}
+
+// Next indicates whether there any fields remaining.
+func (p *point) Next() bool {
+	p.it.start = p.it.end
+	if p.it.start >= len(p.fields) {
+		return false
+	}
+
+	p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=')
+	if escape.IsEscaped(p.it.key) {
+		p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key)
+		p.it.key = p.it.keybuf
+	}
+
+	p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1)
+	p.it.end++
+
+	if len(p.it.valueBuf) == 0 {
+		p.it.fieldType = Empty
+		return true
+	}
+
+	c := p.it.valueBuf[0]
+
+	if c == '"' {
+		p.it.fieldType = String
+		return true
+	}
+
+	if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 {
+		if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' {
+			p.it.fieldType = Integer
+			p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
+		} else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' {
+			p.it.fieldType = Unsigned
+			p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
+		} else {
+			p.it.fieldType = Float
+		}
+		return true
+	}
+
+	// to keep the same behavior that currently exists, default to boolean
+	p.it.fieldType = Boolean
+	return true
+}
+
+// FieldKey returns the key of the current field.
+func (p *point) FieldKey() []byte {
+	return p.it.key
+}
+
+// Type returns the FieldType of the current field.
+func (p *point) Type() FieldType {
+	return p.it.fieldType
+}
+
+// StringValue returns the string value of the current field.
+func (p *point) StringValue() string {
+	return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1]))
+}
+
+// IntegerValue returns the integer value of the current field.
+func (p *point) IntegerValue() (int64, error) {
+	n, err := parseIntBytes(p.it.valueBuf, 10, 64)
+	if err != nil {
+		return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err)
+	}
+	return n, nil
+}
+
+// UnsignedValue returns the unsigned value of the current field.
+func (p *point) UnsignedValue() (uint64, error) {
+	n, err := parseUintBytes(p.it.valueBuf, 10, 64)
+	if err != nil {
+		return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err)
+	}
+	return n, nil
+}
+
+// BooleanValue returns the boolean value of the current field.
+func (p *point) BooleanValue() (bool, error) {
+	b, err := parseBoolBytes(p.it.valueBuf)
+	if err != nil {
+		return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err)
+	}
+	return b, nil
+}
+
+// FloatValue returns the float value of the current field.
+func (p *point) FloatValue() (float64, error) {
+	f, err := parseFloatBytes(p.it.valueBuf, 64)
+	if err != nil {
+		return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err)
+	}
+	return f, nil
+}
+
+// Reset resets the iterator to its initial state.
+func (p *point) Reset() {
+	p.it.fieldType = Empty
+	p.it.key = nil
+	p.it.valueBuf = nil
+	p.it.start = 0
+	p.it.end = 0
+}
+
+// MarshalBinary encodes all the fields to their proper type and returns the binary
+// represenation
+// NOTE: uint64 is specifically not supported due to potential overflow when we decode
+// again later to an int64
+// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted...
+func (p Fields) MarshalBinary() []byte {
+	var b []byte
+	keys := make([]string, 0, len(p))
+
+	for k := range p {
+		keys = append(keys, k)
+	}
+
+	// Not really necessary, can probably be removed.
+	sort.Strings(keys)
+
+	for i, k := range keys {
+		if i > 0 {
+			b = append(b, ',')
+		}
+		b = appendField(b, k, p[k])
+	}
+
+	return b
+}
+
+func appendField(b []byte, k string, v interface{}) []byte {
+	b = append(b, []byte(escape.String(k))...)
+	b = append(b, '=')
+
+	// check popular types first
+	switch v := v.(type) {
+	case float64:
+		b = strconv.AppendFloat(b, v, 'f', -1, 64)
+	case int64:
+		b = strconv.AppendInt(b, v, 10)
+		b = append(b, 'i')
+	case string:
+		b = append(b, '"')
+		b = append(b, []byte(EscapeStringField(v))...)
+		b = append(b, '"')
+	case bool:
+		b = strconv.AppendBool(b, v)
+	case int32:
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case int16:
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case int8:
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case int:
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case uint64:
+		b = strconv.AppendUint(b, v, 10)
+		b = append(b, 'u')
+	case uint32:
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case uint16:
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case uint8:
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case uint:
+		// TODO: 'uint' should be converted to writing as an unsigned integer,
+		// but we cannot since that would break backwards compatibility.
+		b = strconv.AppendInt(b, int64(v), 10)
+		b = append(b, 'i')
+	case float32:
+		b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
+	case []byte:
+		b = append(b, v...)
+	case nil:
+		// skip
+	default:
+		// Can't determine the type, so convert to string
+		b = append(b, '"')
+		b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...)
+		b = append(b, '"')
+
+	}
+
+	return b
+}
+
+type byteSlices [][]byte
+
+func (a byteSlices) Len() int           { return len(a) }
+func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }
+func (a byteSlices) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go
new file mode 100644
index 0000000000000000000000000000000000000000..c087a4882d0d2d57da01a8d142db7091d6eebda3
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/rows.go
@@ -0,0 +1,62 @@
+package models
+
+import (
+	"sort"
+)
+
+// Row represents a single row returned from the execution of a statement.
+type Row struct {
+	Name    string            `json:"name,omitempty"`
+	Tags    map[string]string `json:"tags,omitempty"`
+	Columns []string          `json:"columns,omitempty"`
+	Values  [][]interface{}   `json:"values,omitempty"`
+	Partial bool              `json:"partial,omitempty"`
+}
+
+// SameSeries returns true if r contains values for the same series as o.
+func (r *Row) SameSeries(o *Row) bool {
+	return r.tagsHash() == o.tagsHash() && r.Name == o.Name
+}
+
+// tagsHash returns a hash of tag key/value pairs.
+func (r *Row) tagsHash() uint64 {
+	h := NewInlineFNV64a()
+	keys := r.tagsKeys()
+	for _, k := range keys {
+		h.Write([]byte(k))
+		h.Write([]byte(r.Tags[k]))
+	}
+	return h.Sum64()
+}
+
+// tagKeys returns a sorted list of tag keys.
+func (r *Row) tagsKeys() []string {
+	a := make([]string, 0, len(r.Tags))
+	for k := range r.Tags {
+		a = append(a, k)
+	}
+	sort.Strings(a)
+	return a
+}
+
+// Rows represents a collection of rows. Rows implements sort.Interface.
+type Rows []*Row
+
+// Len implements sort.Interface.
+func (p Rows) Len() int { return len(p) }
+
+// Less implements sort.Interface.
+func (p Rows) Less(i, j int) bool {
+	// Sort by name first.
+	if p[i].Name != p[j].Name {
+		return p[i].Name < p[j].Name
+	}
+
+	// Sort by tag set hash. Tags don't have a meaningful sort order so we
+	// just compute a hash and sort by that instead. This allows the tests
+	// to receive rows in a predictable order every time.
+	return p[i].tagsHash() < p[j].tagsHash()
+}
+
+// Swap implements sort.Interface.
+func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go
new file mode 100644
index 0000000000000000000000000000000000000000..553e9d09fb64ee317371705421df717ca8144196
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/statistic.go
@@ -0,0 +1,42 @@
+package models
+
+// Statistic is the representation of a statistic used by the monitoring service.
+type Statistic struct {
+	Name   string                 `json:"name"`
+	Tags   map[string]string      `json:"tags"`
+	Values map[string]interface{} `json:"values"`
+}
+
+// NewStatistic returns an initialized Statistic.
+func NewStatistic(name string) Statistic {
+	return Statistic{
+		Name:   name,
+		Tags:   make(map[string]string),
+		Values: make(map[string]interface{}),
+	}
+}
+
+// StatisticTags is a map that can be merged with others without causing
+// mutations to either map.
+type StatisticTags map[string]string
+
+// Merge creates a new map containing the merged contents of tags and t.
+// If both tags and the receiver map contain the same key, the value in tags
+// is used in the resulting map.
+//
+// Merge always returns a usable map.
+func (t StatisticTags) Merge(tags map[string]string) map[string]string {
+	// Add everything in tags to the result.
+	out := make(map[string]string, len(tags))
+	for k, v := range tags {
+		out[k] = v
+	}
+
+	// Only add values from t that don't appear in tags.
+	for k, v := range t {
+		if _, ok := tags[k]; !ok {
+			out[k] = v
+		}
+	}
+	return out
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go
new file mode 100644
index 0000000000000000000000000000000000000000..e98f2cb3363505728645893c0d7b8a0d2d23b7ef
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/time.go
@@ -0,0 +1,74 @@
+package models
+
+// Helper time methods since parsing time can easily overflow and we only support a
+// specific time range.
+
+import (
+	"fmt"
+	"math"
+	"time"
+)
+
+const (
+	// MinNanoTime is the minumum time that can be represented.
+	//
+	// 1677-09-21 00:12:43.145224194 +0000 UTC
+	//
+	// The two lowest minimum integers are used as sentinel values.  The
+	// minimum value needs to be used as a value lower than any other value for
+	// comparisons and another separate value is needed to act as a sentinel
+	// default value that is unusable by the user, but usable internally.
+	// Because these two values need to be used for a special purpose, we do
+	// not allow users to write points at these two times.
+	MinNanoTime = int64(math.MinInt64) + 2
+
+	// MaxNanoTime is the maximum time that can be represented.
+	//
+	// 2262-04-11 23:47:16.854775806 +0000 UTC
+	//
+	// The highest time represented by a nanosecond needs to be used for an
+	// exclusive range in the shard group, so the maximum time needs to be one
+	// less than the possible maximum number of nanoseconds representable by an
+	// int64 so that we don't lose a point at that one time.
+	MaxNanoTime = int64(math.MaxInt64) - 1
+)
+
+var (
+	minNanoTime = time.Unix(0, MinNanoTime).UTC()
+	maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
+
+	// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
+	ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
+)
+
+// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
+// supported range.
+func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
+	mult := GetPrecisionMultiplier(precision)
+	if t, ok := safeSignedMult(timestamp, mult); ok {
+		tme := time.Unix(0, t).UTC()
+		return tme, CheckTime(tme)
+	}
+
+	return time.Time{}, ErrTimeOutOfRange
+}
+
+// CheckTime checks that a time is within the safe range.
+func CheckTime(t time.Time) error {
+	if t.Before(minNanoTime) || t.After(maxNanoTime) {
+		return ErrTimeOutOfRange
+	}
+	return nil
+}
+
+// Perform the multiplication and check to make sure it didn't overflow.
+func safeSignedMult(a, b int64) (int64, bool) {
+	if a == 0 || b == 0 || a == 1 || b == 1 {
+		return a * b, true
+	}
+	if a == MinNanoTime || b == MaxNanoTime {
+		return 0, false
+	}
+	c := a * b
+	return c, c/b == a
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/uint_support.go b/vendor/github.com/influxdata/influxdb/models/uint_support.go
new file mode 100644
index 0000000000000000000000000000000000000000..18d1ca06e2dcffe42155f1947d5d652ffdbdf74b
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/uint_support.go
@@ -0,0 +1,7 @@
+// +build uint uint64
+
+package models
+
+func init() {
+	EnableUintSupport()
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go
new file mode 100644
index 0000000000000000000000000000000000000000..f3b31f42d36841df4afd74bfaa8fe9b52440e235
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go
@@ -0,0 +1,115 @@
+// Package escape contains utilities for escaping parts of InfluxQL
+// and InfluxDB line protocol.
+package escape // import "github.com/influxdata/influxdb/pkg/escape"
+
+import (
+	"bytes"
+	"strings"
+)
+
+// Codes is a map of bytes to be escaped.
+var Codes = map[byte][]byte{
+	',': []byte(`\,`),
+	'"': []byte(`\"`),
+	' ': []byte(`\ `),
+	'=': []byte(`\=`),
+}
+
+// Bytes escapes characters on the input slice, as defined by Codes.
+func Bytes(in []byte) []byte {
+	for b, esc := range Codes {
+		in = bytes.Replace(in, []byte{b}, esc, -1)
+	}
+	return in
+}
+
+const escapeChars = `," =`
+
+// IsEscaped returns whether b has any escaped characters,
+// i.e. whether b seems to have been processed by Bytes.
+func IsEscaped(b []byte) bool {
+	for len(b) > 0 {
+		i := bytes.IndexByte(b, '\\')
+		if i < 0 {
+			return false
+		}
+
+		if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 {
+			return true
+		}
+		b = b[i+1:]
+	}
+	return false
+}
+
+// AppendUnescaped appends the unescaped version of src to dst
+// and returns the resulting slice.
+func AppendUnescaped(dst, src []byte) []byte {
+	var pos int
+	for len(src) > 0 {
+		next := bytes.IndexByte(src[pos:], '\\')
+		if next < 0 || pos+next+1 >= len(src) {
+			return append(dst, src...)
+		}
+
+		if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 {
+			if pos+next > 0 {
+				dst = append(dst, src[:pos+next]...)
+			}
+			src = src[pos+next+1:]
+			pos = 0
+		} else {
+			pos += next + 1
+		}
+	}
+
+	return dst
+}
+
+// Unescape returns a new slice containing the unescaped version of in.
+func Unescape(in []byte) []byte {
+	if len(in) == 0 {
+		return nil
+	}
+
+	if bytes.IndexByte(in, '\\') == -1 {
+		return in
+	}
+
+	i := 0
+	inLen := len(in)
+
+	// The output size will be no more than inLen. Preallocating the
+	// capacity of the output is faster and uses less memory than
+	// letting append() do its own (over)allocation.
+	out := make([]byte, 0, inLen)
+
+	for {
+		if i >= inLen {
+			break
+		}
+		if in[i] == '\\' && i+1 < inLen {
+			switch in[i+1] {
+			case ',':
+				out = append(out, ',')
+				i += 2
+				continue
+			case '"':
+				out = append(out, '"')
+				i += 2
+				continue
+			case ' ':
+				out = append(out, ' ')
+				i += 2
+				continue
+			case '=':
+				out = append(out, '=')
+				i += 2
+				continue
+			}
+		}
+		out = append(out, in[i])
+		i += 1
+	}
+	return out
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go
new file mode 100644
index 0000000000000000000000000000000000000000..db98033b0d7a2b4174ba29825b81f8c5eb044cdb
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go
@@ -0,0 +1,21 @@
+package escape
+
+import "strings"
+
+var (
+	escaper   = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
+	unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
+)
+
+// UnescapeString returns unescaped version of in.
+func UnescapeString(in string) string {
+	if strings.IndexByte(in, '\\') == -1 {
+		return in
+	}
+	return unescaper.Replace(in)
+}
+
+// String returns the escaped version of in.
+func String(in string) string {
+	return escaper.Replace(in)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
deleted file mode 100644
index 2fdcbcfbf1da9afccc796297881e35596aa3c25b..0000000000000000000000000000000000000000
--- a/vendor/github.com/rcrowley/go-metrics/json.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package metrics
-
-import (
-	"encoding/json"
-	"io"
-	"time"
-)
-
-// MarshalJSON returns a byte slice containing a JSON representation of all
-// the metrics in the Registry.
-func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
-	data := make(map[string]map[string]interface{})
-	r.Each(func(name string, i interface{}) {
-		values := make(map[string]interface{})
-		switch metric := i.(type) {
-		case Counter:
-			values["count"] = metric.Count()
-		case Gauge:
-			values["value"] = metric.Value()
-		case GaugeFloat64:
-			values["value"] = metric.Value()
-		case Healthcheck:
-			values["error"] = nil
-			metric.Check()
-			if err := metric.Error(); nil != err {
-				values["error"] = metric.Error().Error()
-			}
-		case Histogram:
-			h := metric.Snapshot()
-			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			values["count"] = h.Count()
-			values["min"] = h.Min()
-			values["max"] = h.Max()
-			values["mean"] = h.Mean()
-			values["stddev"] = h.StdDev()
-			values["median"] = ps[0]
-			values["75%"] = ps[1]
-			values["95%"] = ps[2]
-			values["99%"] = ps[3]
-			values["99.9%"] = ps[4]
-		case Meter:
-			m := metric.Snapshot()
-			values["count"] = m.Count()
-			values["1m.rate"] = m.Rate1()
-			values["5m.rate"] = m.Rate5()
-			values["15m.rate"] = m.Rate15()
-			values["mean.rate"] = m.RateMean()
-		case Timer:
-			t := metric.Snapshot()
-			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			values["count"] = t.Count()
-			values["min"] = t.Min()
-			values["max"] = t.Max()
-			values["mean"] = t.Mean()
-			values["stddev"] = t.StdDev()
-			values["median"] = ps[0]
-			values["75%"] = ps[1]
-			values["95%"] = ps[2]
-			values["99%"] = ps[3]
-			values["99.9%"] = ps[4]
-			values["1m.rate"] = t.Rate1()
-			values["5m.rate"] = t.Rate5()
-			values["15m.rate"] = t.Rate15()
-			values["mean.rate"] = t.RateMean()
-		}
-		data[name] = values
-	})
-	return json.Marshal(data)
-}
-
-// WriteJSON writes metrics from the given registry  periodically to the
-// specified io.Writer as JSON.
-func WriteJSON(r Registry, d time.Duration, w io.Writer) {
-	for _ = range time.Tick(d) {
-		WriteJSONOnce(r, w)
-	}
-}
-
-// WriteJSONOnce writes metrics from the given registry to the specified
-// io.Writer as JSON.
-func WriteJSONOnce(r Registry, w io.Writer) {
-	json.NewEncoder(w).Encode(r)
-}
-
-func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
-	return json.Marshal(p.underlying)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
deleted file mode 100644
index b97a49ed123ee7011e381cadea9b7104200297fc..0000000000000000000000000000000000000000
--- a/vendor/github.com/rcrowley/go-metrics/metrics.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Go port of Coda Hale's Metrics library
-//
-// <https://github.com/rcrowley/go-metrics>
-//
-// Coda Hale's original work: <https://github.com/codahale/metrics>
-package metrics
-
-// UseNilMetrics is checked by the constructor functions for all of the
-// standard metrics.  If it is true, the metric returned is a stub.
-//
-// This global kill-switch helps quantify the observer effect and makes
-// for less cluttered pprof profiles.
-var UseNilMetrics bool = false
diff --git a/vendor/vendor.json b/vendor/vendor.json
index e938ce5e645d4a2474d2c362f93c57d9e4a515c1..13415899524304f2b947288332ffa5d0002763bc 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -206,6 +206,30 @@
 			"revision": "679507af18f3c7ba2bcc7905392ce23e148661c3",
 			"revisionTime": "2016-12-24T10:41:01Z"
 		},
+		{
+			"checksumSHA1": "6tNwbL5tUS0dxYzADKVZtI2d/lE=",
+			"path": "github.com/influxdata/influxdb/client",
+			"revision": "a55dd0f50edd14c9c798d3564189eb4f53914309",
+			"revisionTime": "2017-10-09T17:24:46Z"
+		},
+		{
+			"checksumSHA1": "O4XpbSNeUhSIMD2FWtQximJiFIs=",
+			"path": "github.com/influxdata/influxdb/client/v2",
+			"revision": "b36b9f109f2da91c8941679caf5356e08eee0b2b",
+			"revisionTime": "2018-01-17T01:42:09Z"
+		},
+		{
+			"checksumSHA1": "cfumoC9gHEUROd+fA8qK3WLFAZQ=",
+			"path": "github.com/influxdata/influxdb/models",
+			"revision": "b36b9f109f2da91c8941679caf5356e08eee0b2b",
+			"revisionTime": "2018-01-17T01:42:09Z"
+		},
+		{
+			"checksumSHA1": "Z0Bb5PWa5WL/j5Dm2KJCLGn1l7U=",
+			"path": "github.com/influxdata/influxdb/pkg/escape",
+			"revision": "01288bdb0883a01cac999326bd34421b29acaec8",
+			"revisionTime": "2018-02-21T22:33:40Z"
+		},
 		{
 			"checksumSHA1": "vTGKMIfiMwz43y5bsgx9PrL+AVw=",
 			"path": "github.com/jackpal/go-nat-pmp",
@@ -309,18 +333,6 @@
 			"revision": "3101606756c53221ed58ba94ecba6b26adf89dcc",
 			"revisionTime": "2017-08-14T17:01:13Z"
 		},
-		{
-			"checksumSHA1": "KAzbLjI9MzW2tjfcAsK75lVRp6I=",
-			"path": "github.com/rcrowley/go-metrics",
-			"revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
-			"revisionTime": "2016-11-28T21:05:44Z"
-		},
-		{
-			"checksumSHA1": "q/d9nXRQYKEJ/EWn+5y6jL8rPGs=",
-			"path": "github.com/rcrowley/go-metrics/exp",
-			"revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
-			"revisionTime": "2016-11-28T21:05:44Z"
-		},
 		{
 			"checksumSHA1": "28UVHMmHx0iqO0XiJsjx+fwILyI=",
 			"path": "github.com/rjeczalik/notify",