diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go
index bbb6d6a75bce200aa8e7028e8b6a3ed90e3036d0..88234688e456915d61762fd5329b4ed4714e2de5 100644
--- a/accounts/abi/bind/util.go
+++ b/accounts/abi/bind/util.go
@@ -22,8 +22,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"golang.org/x/net/context"
 )
 
@@ -39,9 +38,9 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
 			return receipt, nil
 		}
 		if err != nil {
-			glog.V(logger.Detail).Infof("tx %x error: %v", loghash, err)
+			log.Trace(fmt.Sprintf("tx %x error: %v", loghash, err))
 		} else {
-			glog.V(logger.Detail).Infof("tx %x not yet mined...", loghash)
+			log.Trace(fmt.Sprintf("tx %x not yet mined...", loghash))
 		}
 		// Wait for the next round.
 		select {
diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go
index 3fae3ef5b6460ff933d8ea61bafd74a34c4db262..11100ebc1dd3d867afe0b5310be7270832d44bcf 100644
--- a/accounts/keystore/account_cache.go
+++ b/accounts/keystore/account_cache.go
@@ -30,8 +30,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/accounts"
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 // Minimum amount of time between cache reloads. This limit applies if the platform does
@@ -210,8 +209,8 @@ func (ac *accountCache) close() {
 // Callers must hold ac.mu.
 func (ac *accountCache) reload() {
 	accounts, err := ac.scan()
-	if err != nil && glog.V(logger.Debug) {
-		glog.Errorf("can't load keys: %v", err)
+	if err != nil {
+		log.Debug(fmt.Sprintf("can't load keys: %v", err))
 	}
 	ac.all = accounts
 	sort.Sort(ac.all)
@@ -225,7 +224,7 @@ func (ac *accountCache) reload() {
 	case ac.notify <- struct{}{}:
 	default:
 	}
-	glog.V(logger.Debug).Infof("reloaded keys, cache has %d accounts", len(ac.all))
+	log.Debug(fmt.Sprintf("reloaded keys, cache has %d accounts", len(ac.all)))
 }
 
 func (ac *accountCache) scan() ([]accounts.Account, error) {
@@ -244,12 +243,12 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
 	for _, fi := range files {
 		path := filepath.Join(ac.keydir, fi.Name())
 		if skipKeyFile(fi) {
-			glog.V(logger.Detail).Infof("ignoring file %s", path)
+			log.Trace(fmt.Sprintf("ignoring file %s", path))
 			continue
 		}
 		fd, err := os.Open(path)
 		if err != nil {
-			glog.V(logger.Detail).Infoln(err)
+			log.Trace(fmt.Sprint(err))
 			continue
 		}
 		buf.Reset(fd)
@@ -259,9 +258,9 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
 		addr := common.HexToAddress(keyJSON.Address)
 		switch {
 		case err != nil:
-			glog.V(logger.Debug).Infof("can't decode key %s: %v", path, err)
+			log.Debug(fmt.Sprintf("can't decode key %s: %v", path, err))
 		case (addr == common.Address{}):
-			glog.V(logger.Debug).Infof("can't decode key %s: missing or zero address", path)
+			log.Debug(fmt.Sprintf("can't decode key %s: missing or zero address", path))
 		default:
 			addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
 		}
diff --git a/accounts/keystore/watch.go b/accounts/keystore/watch.go
index 0b44012554560e4af35abddf8fd67742f1f4b023..ff95a7cdcee4b4920d53fb4c68909c92bfca234c 100644
--- a/accounts/keystore/watch.go
+++ b/accounts/keystore/watch.go
@@ -19,10 +19,10 @@
 package keystore
 
 import (
+	"fmt"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/rjeczalik/notify"
 )
 
@@ -67,12 +67,12 @@ func (w *watcher) loop() {
 
 	err := notify.Watch(w.ac.keydir, w.ev, notify.All)
 	if err != nil {
-		glog.V(logger.Detail).Infof("can't watch %s: %v", w.ac.keydir, err)
+		log.Trace(fmt.Sprintf("can't watch %s: %v", w.ac.keydir, err))
 		return
 	}
 	defer notify.Stop(w.ev)
-	glog.V(logger.Detail).Infof("now watching %s", w.ac.keydir)
-	defer glog.V(logger.Detail).Infof("no longer watching %s", w.ac.keydir)
+	log.Trace(fmt.Sprintf("now watching %s", w.ac.keydir))
+	defer log.Trace(fmt.Sprintf("no longer watching %s", w.ac.keydir))
 
 	w.ac.mu.Lock()
 	w.running = true
diff --git a/accounts/usbwallet/ledger_wallet.go b/accounts/usbwallet/ledger_wallet.go
index 235086d1ea16575654856f3c1989ea2aee5582df..0dc53daf53ee36f47f71bf5d54318ea8c2a64f3e 100644
--- a/accounts/usbwallet/ledger_wallet.go
+++ b/accounts/usbwallet/ledger_wallet.go
@@ -34,8 +34,7 @@ import (
 	"github.com/ethereum/go-ethereum/accounts"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/karalabe/hid"
 	"golang.org/x/net/context"
@@ -220,8 +219,8 @@ func (w *ledgerWallet) Open(passphrase string) error {
 //  - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
 //  - communication timeout on the Ledger requires a device power cycle to fix
 func (w *ledgerWallet) heartbeat() {
-	glog.V(logger.Debug).Infof("%s health-check started", w.url.String())
-	defer glog.V(logger.Debug).Infof("%s health-check stopped", w.url.String())
+	log.Debug(fmt.Sprintf("%s health-check started", w.url.String()))
+	defer log.Debug(fmt.Sprintf("%s health-check stopped", w.url.String()))
 
 	// Execute heartbeat checks until termination or error
 	var (
@@ -260,7 +259,7 @@ func (w *ledgerWallet) heartbeat() {
 	}
 	// In case of error, wait for termination
 	if err != nil {
-		glog.V(logger.Debug).Infof("%s health-check failed: %v", w.url.String(), err)
+		log.Debug(fmt.Sprintf("%s health-check failed: %v", w.url.String(), err))
 		errc = <-w.healthQuit
 	}
 	errc <- err
@@ -348,8 +347,8 @@ func (w *ledgerWallet) Accounts() []accounts.Account {
 // selfDerive is an account derivation loop that upon request attempts to find
 // new non-zero accounts.
 func (w *ledgerWallet) selfDerive() {
-	glog.V(logger.Debug).Infof("%s self-derivation started", w.url.String())
-	defer glog.V(logger.Debug).Infof("%s self-derivation stopped", w.url.String())
+	log.Debug(fmt.Sprintf("%s self-derivation started", w.url.String()))
+	defer log.Debug(fmt.Sprintf("%s self-derivation stopped", w.url.String()))
 
 	// Execute self-derivations until termination or error
 	var (
@@ -394,7 +393,7 @@ func (w *ledgerWallet) selfDerive() {
 			// Retrieve the next derived Ethereum account
 			if nextAddr == (common.Address{}) {
 				if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
-					glog.V(logger.Warn).Infof("%s self-derivation failed: %v", w.url.String(), err)
+					log.Warn(fmt.Sprintf("%s self-derivation failed: %v", w.url.String(), err))
 					break
 				}
 			}
@@ -405,12 +404,12 @@ func (w *ledgerWallet) selfDerive() {
 			)
 			balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
 			if err != nil {
-				glog.V(logger.Warn).Infof("%s self-derivation balance retrieval failed: %v", w.url.String(), err)
+				log.Warn(fmt.Sprintf("%s self-derivation balance retrieval failed: %v", w.url.String(), err))
 				break
 			}
 			nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
 			if err != nil {
-				glog.V(logger.Warn).Infof("%s self-derivation nonce retrieval failed: %v", w.url.String(), err)
+				log.Warn(fmt.Sprintf("%s self-derivation nonce retrieval failed: %v", w.url.String(), err))
 				break
 			}
 			// If the next account is empty, stop self-derivation, but add it nonetheless
@@ -430,7 +429,7 @@ func (w *ledgerWallet) selfDerive() {
 
 			// Display a log message to the user for new (or previously empty accounts)
 			if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
-				glog.V(logger.Info).Infof("%s discovered %s (balance %22v, nonce %4d) at %s", w.url.String(), nextAddr.Hex(), balance, nonce, path)
+				log.Info(fmt.Sprintf("%s discovered %s (balance %22v, nonce %4d) at %s", w.url.String(), nextAddr.Hex(), balance, nonce, path))
 			}
 			// Fetch the next potential account
 			if !empty {
@@ -469,7 +468,7 @@ func (w *ledgerWallet) selfDerive() {
 	}
 	// In case of error, wait for termination
 	if err != nil {
-		glog.V(logger.Debug).Infof("%s self-derivation failed: %s", w.url.String(), err)
+		log.Debug(fmt.Sprintf("%s self-derivation failed: %s", w.url.String(), err))
 		errc = <-w.deriveQuit
 	}
 	errc <- err
@@ -849,9 +848,7 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
 			apdu = nil
 		}
 		// Send over to the device
-		if glog.V(logger.Detail) {
-			glog.Infof("-> %s: %x", w.device.Path, chunk)
-		}
+		log.Trace("", "msg", log.Lazy{Fn: func() string { return fmt.Sprintf("-> %s: %x", w.device.Path, chunk) }})
 		if _, err := w.device.Write(chunk); err != nil {
 			return nil, err
 		}
@@ -864,9 +861,8 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
 		if _, err := io.ReadFull(w.device, chunk); err != nil {
 			return nil, err
 		}
-		if glog.V(logger.Detail) {
-			glog.Infof("<- %s: %x", w.device.Path, chunk)
-		}
+		log.Trace("", "msg", log.Lazy{Fn: func() string { return fmt.Sprintf("<- %s: %x", w.device.Path, chunk) }})
+
 		// Make sure the transport header matches
 		if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
 			return nil, errReplyInvalidHeader
diff --git a/build/update-license.go b/build/update-license.go
index cc713aa6c30d841d2f5da9670c80635619639142..ee5281410e84a749a67d4dac6423a739179e01c2 100644
--- a/build/update-license.go
+++ b/build/update-license.go
@@ -47,7 +47,7 @@ var (
 		// boring stuff
 		"vendor/", "tests/files/", "build/",
 		// don't relicense vendored sources
-		"crypto/sha3/", "crypto/ecies/", "logger/glog/",
+		"crypto/sha3/", "crypto/ecies/", "log/",
 		"crypto/secp256k1/curve.go",
 		// don't license generated files
 		"contracts/chequebook/contract/",
diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go
index 9b5ba19368ed586a82e7cbc04217c1e4dffe1f74..f8cc77f8366ce701eb74fdc13ab28f1bf03f21ea 100644
--- a/cmd/bootnode/main.go
+++ b/cmd/bootnode/main.go
@@ -23,9 +23,8 @@ import (
 	"fmt"
 	"os"
 
-	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/discv5"
 	"github.com/ethereum/go-ethereum/p2p/nat"
@@ -42,39 +41,43 @@ func main() {
 		natdesc     = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
 		netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
 		runv5       = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
+		verbosity   = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)")
+		vmodule     = flag.String("vmodule", "", "log verbosity pattern")
 
 		nodeKey *ecdsa.PrivateKey
 		err     error
 	)
-	flag.Var(glog.GetVerbosity(), "verbosity", "log verbosity (0-9)")
-	flag.Var(glog.GetVModule(), "vmodule", "log verbosity pattern")
-	glog.SetToStderr(true)
 	flag.Parse()
 
+	glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
+	glogger.Verbosity(log.Lvl(*verbosity))
+	glogger.Vmodule(*vmodule)
+	log.Root().SetHandler(glogger)
+
 	natm, err := nat.Parse(*natdesc)
 	if err != nil {
-		utils.Fatalf("-nat: %v", err)
+		log.Crit(fmt.Sprintf("-nat: %v", err))
 	}
 	switch {
 	case *genKey != "":
 		nodeKey, err = crypto.GenerateKey()
 		if err != nil {
-			utils.Fatalf("could not generate key: %v", err)
+			log.Crit(fmt.Sprintf("could not generate key: %v", err))
 		}
 		if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
-			utils.Fatalf("%v", err)
+			log.Crit(fmt.Sprintf("%v", err))
 		}
 	case *nodeKeyFile == "" && *nodeKeyHex == "":
-		utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
+		log.Crit(fmt.Sprintf("Use -nodekey or -nodekeyhex to specify a private key"))
 	case *nodeKeyFile != "" && *nodeKeyHex != "":
-		utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive")
+		log.Crit(fmt.Sprintf("Options -nodekey and -nodekeyhex are mutually exclusive"))
 	case *nodeKeyFile != "":
 		if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
-			utils.Fatalf("-nodekey: %v", err)
+			log.Crit(fmt.Sprintf("-nodekey: %v", err))
 		}
 	case *nodeKeyHex != "":
 		if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
-			utils.Fatalf("-nodekeyhex: %v", err)
+			log.Crit(fmt.Sprintf("-nodekeyhex: %v", err))
 		}
 	}
 
@@ -87,17 +90,17 @@ func main() {
 	if *netrestrict != "" {
 		restrictList, err = netutil.ParseNetlist(*netrestrict)
 		if err != nil {
-			utils.Fatalf("-netrestrict: %v", err)
+			log.Crit(fmt.Sprintf("-netrestrict: %v", err))
 		}
 	}
 
 	if *runv5 {
 		if _, err := discv5.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
-			utils.Fatalf("%v", err)
+			log.Crit(fmt.Sprintf("%v", err))
 		}
 	} else {
 		if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
-			utils.Fatalf("%v", err)
+			log.Crit(fmt.Sprintf("%v", err))
 		}
 	}
 
diff --git a/cmd/ethtest/main.go b/cmd/ethtest/main.go
index 14b8395798209472619e8e4ad874a5136c95225d..a107c701fbfcedf298877a86acfe95738b749962 100644
--- a/cmd/ethtest/main.go
+++ b/cmd/ethtest/main.go
@@ -25,7 +25,7 @@ import (
 	"path/filepath"
 	"strings"
 
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/tests"
 	"gopkg.in/urfave/cli.v1"
@@ -70,7 +70,7 @@ var (
 )
 
 func runTestWithReader(test string, r io.Reader) error {
-	glog.Infoln("runTest", test)
+	log.Info(fmt.Sprint("runTest", test))
 	var err error
 	switch strings.ToLower(test) {
 	case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests":
@@ -92,7 +92,7 @@ func runTestWithReader(test string, r io.Reader) error {
 }
 
 func getFiles(path string) ([]string, error) {
-	glog.Infoln("getFiles", path)
+	log.Info(fmt.Sprint("getFiles", path))
 	var files []string
 	f, err := os.Open(path)
 	if err != nil {
@@ -113,7 +113,7 @@ func getFiles(path string) ([]string, error) {
 			// only go 1 depth and leave directory entires blank
 			if !v.IsDir() && v.Name()[len(v.Name())-len(testExtension):len(v.Name())] == testExtension {
 				files[i] = filepath.Join(path, v.Name())
-				glog.Infoln("Found file", files[i])
+				log.Info(fmt.Sprint("Found file", files[i]))
 			}
 		}
 	case mode.IsRegular():
@@ -134,7 +134,7 @@ func runSuite(test, file string) {
 	}
 
 	for _, curTest := range tests {
-		glog.Infoln("runSuite", curTest, file)
+		log.Info(fmt.Sprint("runSuite", curTest, file))
 		var err error
 		var files []string
 		if test == defaultTest {
@@ -149,11 +149,11 @@ func runSuite(test, file string) {
 			files, err = getFiles(file)
 		}
 		if err != nil {
-			glog.Fatalln(err)
+			log.Crit(fmt.Sprint(err))
 		}
 
 		if len(files) == 0 {
-			glog.Warningln("No files matched path")
+			log.Warn("No files matched path")
 		}
 		for _, curFile := range files {
 			// Skip blank entries
@@ -163,16 +163,16 @@ func runSuite(test, file string) {
 
 			r, err := os.Open(curFile)
 			if err != nil {
-				glog.Fatalln(err)
+				log.Crit(fmt.Sprint(err))
 			}
 			defer r.Close()
 
 			err = runTestWithReader(curTest, r)
 			if err != nil {
 				if continueOnError {
-					glog.Errorln(err)
+					log.Error(fmt.Sprint(err))
 				} else {
-					glog.Fatalln(err)
+					log.Crit(fmt.Sprint(err))
 				}
 			}
 		}
@@ -190,14 +190,14 @@ func setupApp(c *cli.Context) error {
 		runSuite(flagTest, flagFile)
 	} else {
 		if err := runTestWithReader(flagTest, os.Stdin); err != nil {
-			glog.Fatalln(err)
+			log.Crit(fmt.Sprint(err))
 		}
 	}
 	return nil
 }
 
 func main() {
-	glog.SetToStderr(true)
+	log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
 
 	app := cli.NewApp()
 	app.Name = "ethtest"
@@ -216,7 +216,7 @@ func main() {
 	}
 
 	if err := app.Run(os.Args); err != nil {
-		glog.Fatalln(err)
+		log.Crit(fmt.Sprint(err))
 	}
 
 }
diff --git a/cmd/evm/main.go b/cmd/evm/main.go
index 0693d7cd3252c6d6031a8710582129643a1cf979..86e2493ca1c113c84874677fff2d8e3de2a5bac6 100644
--- a/cmd/evm/main.go
+++ b/cmd/evm/main.go
@@ -30,7 +30,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/core/vm/runtime"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"gopkg.in/urfave/cli.v1"
 )
 
@@ -111,8 +111,9 @@ func init() {
 }
 
 func run(ctx *cli.Context) error {
-	glog.SetToStderr(true)
-	glog.SetV(ctx.GlobalInt(VerbosityFlag.Name))
+	glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
+	glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
+	log.Root().SetHandler(glogger)
 
 	var (
 		db, _      = ethdb.NewMemDatabase()
diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go
index cd398eadb1405daec3472ae4f623d3ec3e4fe490..b7c411e8254b9f7b96537353e9b03819a21cf788 100644
--- a/cmd/geth/accountcmd.go
+++ b/cmd/geth/accountcmd.go
@@ -19,14 +19,14 @@ package main
 import (
 	"fmt"
 	"io/ioutil"
+	"os"
 
 	"github.com/ethereum/go-ethereum/accounts"
 	"github.com/ethereum/go-ethereum/accounts/keystore"
 	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/console"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"gopkg.in/urfave/cli.v1"
 )
 
@@ -196,18 +196,19 @@ func accountList(ctx *cli.Context) error {
 func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i int, passwords []string) (accounts.Account, string) {
 	account, err := utils.MakeAddress(ks, address)
 	if err != nil {
-		utils.Fatalf("Could not list accounts: %v", err)
+		fmt.Printf("Fatal: Could not list accounts: %v\n", err)
+		os.Exit(1)
 	}
 	for trials := 0; trials < 3; trials++ {
 		prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3)
 		password := getPassPhrase(prompt, false, i, passwords)
 		err = ks.Unlock(account, password)
 		if err == nil {
-			glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
+			log.Info(fmt.Sprintf("Unlocked account %x", account.Address))
 			return account, password
 		}
 		if err, ok := err.(*keystore.AmbiguousAddrError); ok {
-			glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
+			log.Info(fmt.Sprintf("Unlocked account %x", account.Address))
 			return ambiguousAddrRecovery(ks, err, password), password
 		}
 		if err != keystore.ErrDecrypt {
@@ -216,7 +217,9 @@ func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i in
 		}
 	}
 	// All trials expended to unlock account, bail out
-	utils.Fatalf("Failed to unlock account %s (%v)", address, err)
+	fmt.Printf("Fatal: Failed to unlock account %s (%v)\n", address, err)
+	os.Exit(1)
+
 	return accounts.Account{}, ""
 }
 
@@ -236,15 +239,18 @@ func getPassPhrase(prompt string, confirmation bool, i int, passwords []string)
 	}
 	password, err := console.Stdin.PromptPassword("Passphrase: ")
 	if err != nil {
-		utils.Fatalf("Failed to read passphrase: %v", err)
+		fmt.Printf("Fatal: Failed to read passphrase: %v\n", err)
+		os.Exit(1)
 	}
 	if confirmation {
 		confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
 		if err != nil {
-			utils.Fatalf("Failed to read passphrase confirmation: %v", err)
+			fmt.Printf("Fatal: Failed to read passphrase confirmation: %v\n", err)
+			os.Exit(1)
 		}
 		if password != confirm {
-			utils.Fatalf("Passphrases do not match")
+			fmt.Printf("Fatal: Passphrases do not match\n")
+			os.Exit(1)
 		}
 	}
 	return password
@@ -264,7 +270,8 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
 		}
 	}
 	if match == nil {
-		utils.Fatalf("None of the listed files could be unlocked.")
+		fmt.Printf("Fatal: None of the listed files could be unlocked.\n")
+		os.Exit(1)
 	}
 	fmt.Printf("Your passphrase unlocked %s\n", match.URL)
 	fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:")
@@ -284,7 +291,8 @@ func accountCreate(ctx *cli.Context) error {
 	ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
 	account, err := ks.NewAccount(password)
 	if err != nil {
-		utils.Fatalf("Failed to create account: %v", err)
+		fmt.Printf("Fatal: Failed to create account: %v\n", err)
+		os.Exit(1)
 	}
 	fmt.Printf("Address: {%x}\n", account.Address)
 	return nil
@@ -294,7 +302,8 @@ func accountCreate(ctx *cli.Context) error {
 // one, also providing the possibility to change the pass-phrase.
 func accountUpdate(ctx *cli.Context) error {
 	if len(ctx.Args()) == 0 {
-		utils.Fatalf("No accounts specified to update")
+		fmt.Printf("Fatal: No accounts specified to update\n")
+		os.Exit(1)
 	}
 	stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
 	ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
@@ -302,7 +311,8 @@ func accountUpdate(ctx *cli.Context) error {
 	account, oldPassword := unlockAccount(ctx, ks, ctx.Args().First(), 0, nil)
 	newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil)
 	if err := ks.Update(account, oldPassword, newPassword); err != nil {
-		utils.Fatalf("Could not update the account: %v", err)
+		fmt.Printf("Fatal: Could not update the account: %v\n", err)
+		os.Exit(1)
 	}
 	return nil
 }
@@ -310,11 +320,13 @@ func accountUpdate(ctx *cli.Context) error {
 func importWallet(ctx *cli.Context) error {
 	keyfile := ctx.Args().First()
 	if len(keyfile) == 0 {
-		utils.Fatalf("keyfile must be given as argument")
+		fmt.Printf("Fatal: keyfile must be given as argument\n")
+		os.Exit(1)
 	}
 	keyJson, err := ioutil.ReadFile(keyfile)
 	if err != nil {
-		utils.Fatalf("Could not read wallet file: %v", err)
+		fmt.Printf("Fatal: Could not read wallet file: %v\n", err)
+		os.Exit(1)
 	}
 
 	stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
@@ -323,7 +335,8 @@ func importWallet(ctx *cli.Context) error {
 	ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
 	acct, err := ks.ImportPreSaleKey(keyJson, passphrase)
 	if err != nil {
-		utils.Fatalf("%v", err)
+		fmt.Printf("Fatal: %v\n", err)
+		os.Exit(1)
 	}
 	fmt.Printf("Address: {%x}\n", acct.Address)
 	return nil
@@ -332,11 +345,13 @@ func importWallet(ctx *cli.Context) error {
 func accountImport(ctx *cli.Context) error {
 	keyfile := ctx.Args().First()
 	if len(keyfile) == 0 {
-		utils.Fatalf("keyfile must be given as argument")
+		fmt.Printf("Fatal: keyfile must be given as argument\n")
+		os.Exit(1)
 	}
 	key, err := crypto.LoadECDSA(keyfile)
 	if err != nil {
-		utils.Fatalf("Failed to load the private key: %v", err)
+		fmt.Printf("Fatal: Failed to load the private key: %v\n", err)
+		os.Exit(1)
 	}
 	stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
 	passphrase := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
@@ -344,7 +359,8 @@ func accountImport(ctx *cli.Context) error {
 	ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
 	acct, err := ks.ImportECDSA(key, passphrase)
 	if err != nil {
-		utils.Fatalf("Could not create the account: %v", err)
+		fmt.Printf("Fatal: Could not create the account: %v\n", err)
+		os.Exit(1)
 	}
 	fmt.Printf("Address: {%x}\n", acct.Address)
 	return nil
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index f38ee046f18a9e57e8088132b59bb6446297b79a..1127a1090054d5a4247140301ad10c55bfba9290 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -32,8 +32,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/trie"
 	"github.com/syndtr/goleveldb/leveldb/util"
 	"gopkg.in/urfave/cli.v1"
@@ -113,7 +112,7 @@ Use "ethereum dump 0" to dump the genesis block.
 func initGenesis(ctx *cli.Context) error {
 	genesisPath := ctx.Args().First()
 	if len(genesisPath) == 0 {
-		utils.Fatalf("must supply path to genesis JSON file")
+		log.Crit(fmt.Sprintf("must supply path to genesis JSON file"))
 	}
 
 	stack := makeFullNode(ctx)
@@ -121,21 +120,21 @@ func initGenesis(ctx *cli.Context) error {
 
 	genesisFile, err := os.Open(genesisPath)
 	if err != nil {
-		utils.Fatalf("failed to read genesis file: %v", err)
+		log.Crit(fmt.Sprintf("failed to read genesis file: %v", err))
 	}
 	defer genesisFile.Close()
 
 	block, err := core.WriteGenesisBlock(chaindb, genesisFile)
 	if err != nil {
-		utils.Fatalf("failed to write genesis block: %v", err)
+		log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
 	}
-	glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash())
+	log.Info(fmt.Sprintf("successfully wrote genesis block and/or chain rule set: %x", block.Hash()))
 	return nil
 }
 
 func importChain(ctx *cli.Context) error {
 	if len(ctx.Args()) != 1 {
-		utils.Fatalf("This command requires an argument.")
+		log.Crit(fmt.Sprintf("This command requires an argument."))
 	}
 	stack := makeFullNode(ctx)
 	chain, chainDb := utils.MakeChain(ctx, stack)
@@ -159,7 +158,7 @@ func importChain(ctx *cli.Context) error {
 	// Import the chain
 	start := time.Now()
 	if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
-		utils.Fatalf("Import error: %v", err)
+		log.Crit(fmt.Sprintf("Import error: %v", err))
 	}
 	fmt.Printf("Import done in %v.\n\n", time.Since(start))
 
@@ -168,7 +167,7 @@ func importChain(ctx *cli.Context) error {
 
 	stats, err := db.LDB().GetProperty("leveldb.stats")
 	if err != nil {
-		utils.Fatalf("Failed to read database stats: %v", err)
+		log.Crit(fmt.Sprintf("Failed to read database stats: %v", err))
 	}
 	fmt.Println(stats)
 	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
@@ -187,13 +186,13 @@ func importChain(ctx *cli.Context) error {
 	start = time.Now()
 	fmt.Println("Compacting entire database...")
 	if err = db.LDB().CompactRange(util.Range{}); err != nil {
-		utils.Fatalf("Compaction failed: %v", err)
+		log.Crit(fmt.Sprintf("Compaction failed: %v", err))
 	}
 	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
 
 	stats, err = db.LDB().GetProperty("leveldb.stats")
 	if err != nil {
-		utils.Fatalf("Failed to read database stats: %v", err)
+		log.Crit(fmt.Sprintf("Failed to read database stats: %v", err))
 	}
 	fmt.Println(stats)
 
@@ -202,7 +201,7 @@ func importChain(ctx *cli.Context) error {
 
 func exportChain(ctx *cli.Context) error {
 	if len(ctx.Args()) < 1 {
-		utils.Fatalf("This command requires an argument.")
+		log.Crit(fmt.Sprintf("This command requires an argument."))
 	}
 	stack := makeFullNode(ctx)
 	chain, _ := utils.MakeChain(ctx, stack)
@@ -217,16 +216,16 @@ func exportChain(ctx *cli.Context) error {
 		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
 		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
 		if ferr != nil || lerr != nil {
-			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
+			log.Crit(fmt.Sprintf("Export error in parsing parameters: block number not an integer\n"))
 		}
 		if first < 0 || last < 0 {
-			utils.Fatalf("Export error: block number must be greater than 0\n")
+			log.Crit(fmt.Sprintf("Export error: block number must be greater than 0\n"))
 		}
 		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
 	}
 
 	if err != nil {
-		utils.Fatalf("Export error: %v\n", err)
+		log.Crit(fmt.Sprintf("Export error: %v\n", err))
 	}
 	fmt.Printf("Export done in %v", time.Since(start))
 	return nil
@@ -244,7 +243,7 @@ func removeDB(ctx *cli.Context) error {
 	confirm, err := console.Stdin.PromptConfirm("Remove this database?")
 	switch {
 	case err != nil:
-		utils.Fatalf("%v", err)
+		log.Crit(fmt.Sprintf("%v", err))
 	case !confirm:
 		fmt.Println("Operation aborted")
 	default:
@@ -257,7 +256,7 @@ func removeDB(ctx *cli.Context) error {
 }
 
 func upgradeDB(ctx *cli.Context) error {
-	glog.Infoln("Upgrading blockchain database")
+	log.Info(fmt.Sprint("Upgrading blockchain database"))
 
 	stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
 	chain, chainDb := utils.MakeChain(ctx, stack)
@@ -270,7 +269,7 @@ func upgradeDB(ctx *cli.Context) error {
 	filename := fmt.Sprintf("blockchain_%d_%s.chain", bcVersion, time.Now().Format("20060102_150405"))
 	exportFile := filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), filename)
 	if err := utils.ExportChain(chain, exportFile); err != nil {
-		utils.Fatalf("Unable to export chain for reimport %s", err)
+		log.Crit(fmt.Sprintf("Unable to export chain for reimport %s", err))
 	}
 	chainDb.Close()
 	if dir := dbDirectory(chainDb); dir != "" {
@@ -283,10 +282,10 @@ func upgradeDB(ctx *cli.Context) error {
 	err := utils.ImportChain(chain, exportFile)
 	chainDb.Close()
 	if err != nil {
-		utils.Fatalf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile)
+		log.Crit(fmt.Sprintf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile))
 	} else {
 		os.Remove(exportFile)
-		glog.Infoln("Import finished")
+		log.Info(fmt.Sprint("Import finished"))
 	}
 	return nil
 }
@@ -312,11 +311,11 @@ func dump(ctx *cli.Context) error {
 		}
 		if block == nil {
 			fmt.Println("{}")
-			utils.Fatalf("block not found")
+			log.Crit(fmt.Sprintf("block not found"))
 		} else {
 			state, err := state.New(block.Root(), chainDb)
 			if err != nil {
-				utils.Fatalf("could not create new state: %v", err)
+				log.Crit(fmt.Sprintf("could not create new state: %v", err))
 			}
 			fmt.Printf("%s\n", state.Dump())
 		}
diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go
index b1c435e00f2ac2314c64a2cc182ad7e30ace4e30..4009e3e334d2e44c514d9fe7f69ff9306ad59780 100644
--- a/cmd/geth/consolecmd.go
+++ b/cmd/geth/consolecmd.go
@@ -17,12 +17,14 @@
 package main
 
 import (
+	"fmt"
 	"os"
 	"os/signal"
 	"strings"
 
 	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/console"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/rpc"
 	"gopkg.in/urfave/cli.v1"
@@ -78,7 +80,7 @@ func localConsole(ctx *cli.Context) error {
 	// Attach to the newly started node and start the JavaScript console
 	client, err := node.Attach()
 	if err != nil {
-		utils.Fatalf("Failed to attach to the inproc geth: %v", err)
+		log.Crit(fmt.Sprintf("Failed to attach to the inproc geth: %v", err))
 	}
 	config := console.Config{
 		DataDir: node.DataDir(),
@@ -88,7 +90,7 @@ func localConsole(ctx *cli.Context) error {
 	}
 	console, err := console.New(config)
 	if err != nil {
-		utils.Fatalf("Failed to start the JavaScript console: %v", err)
+		log.Crit(fmt.Sprintf("Failed to start the JavaScript console: %v", err))
 	}
 	defer console.Stop(false)
 
@@ -110,7 +112,7 @@ func remoteConsole(ctx *cli.Context) error {
 	// Attach to a remotely running geth instance and start the JavaScript console
 	client, err := dialRPC(ctx.Args().First())
 	if err != nil {
-		utils.Fatalf("Unable to attach to remote geth: %v", err)
+		log.Crit(fmt.Sprintf("Unable to attach to remote geth: %v", err))
 	}
 	config := console.Config{
 		DataDir: utils.MakeDataDir(ctx),
@@ -120,7 +122,7 @@ func remoteConsole(ctx *cli.Context) error {
 	}
 	console, err := console.New(config)
 	if err != nil {
-		utils.Fatalf("Failed to start the JavaScript console: %v", err)
+		log.Crit(fmt.Sprintf("Failed to start the JavaScript console: %v", err))
 	}
 	defer console.Stop(false)
 
@@ -162,7 +164,7 @@ func ephemeralConsole(ctx *cli.Context) error {
 	// Attach to the newly started node and start the JavaScript console
 	client, err := node.Attach()
 	if err != nil {
-		utils.Fatalf("Failed to attach to the inproc geth: %v", err)
+		log.Crit(fmt.Sprintf("Failed to attach to the inproc geth: %v", err))
 	}
 	config := console.Config{
 		DataDir: node.DataDir(),
@@ -172,14 +174,14 @@ func ephemeralConsole(ctx *cli.Context) error {
 	}
 	console, err := console.New(config)
 	if err != nil {
-		utils.Fatalf("Failed to start the JavaScript console: %v", err)
+		log.Crit(fmt.Sprintf("Failed to start the JavaScript console: %v", err))
 	}
 	defer console.Stop(false)
 
 	// Evaluate each of the specified JavaScript files
 	for _, file := range ctx.Args() {
 		if err = console.Execute(file); err != nil {
-			utils.Fatalf("Failed to execute %s: %v", file, err)
+			log.Crit(fmt.Sprintf("Failed to execute %s: %v", file, err))
 		}
 	}
 	// Wait for pending callbacks, but stop for Ctrl-C.
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 7d98f6bb2b196454847f7e75264595d447ccf173..2bebccac722f5f4242b8bf388db69e5cc47e161a 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -34,8 +34,7 @@ import (
 	"github.com/ethereum/go-ethereum/eth"
 	"github.com/ethereum/go-ethereum/ethclient"
 	"github.com/ethereum/go-ethereum/internal/debug"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/params"
@@ -204,11 +203,11 @@ func makeFullNode(ctx *cli.Context) *node.Node {
 	}{uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), clientIdentifier, runtime.Version(), runtime.GOOS}
 	extra, err := rlp.EncodeToBytes(clientInfo)
 	if err != nil {
-		glog.V(logger.Warn).Infoln("error setting canonical miner information:", err)
+		log.Warn(fmt.Sprint("error setting canonical miner information:", err))
 	}
 	if uint64(len(extra)) > params.MaximumExtraDataSize {
-		glog.V(logger.Warn).Infoln("error setting canonical miner information: extra exceeds", params.MaximumExtraDataSize)
-		glog.V(logger.Debug).Infof("extra: %x\n", extra)
+		log.Warn(fmt.Sprint("error setting canonical miner information: extra exceeds", params.MaximumExtraDataSize))
+		log.Debug(fmt.Sprintf("extra: %x\n", extra))
 		extra = nil
 	}
 	stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
@@ -236,7 +235,7 @@ func makeFullNode(ctx *cli.Context) *node.Node {
 		copy(config.Commit[:], commit)
 		return release.NewReleaseService(ctx, config)
 	}); err != nil {
-		utils.Fatalf("Failed to register the Geth release oracle service: %v", err)
+		log.Crit(fmt.Sprintf("Failed to register the Geth release oracle service: %v", err))
 	}
 	return stack
 }
@@ -266,14 +265,14 @@ func startNode(ctx *cli.Context, stack *node.Node) {
 		// Create an chain state reader for self-derivation
 		rpcClient, err := stack.Attach()
 		if err != nil {
-			utils.Fatalf("Failed to attach to self: %v", err)
+			log.Crit(fmt.Sprintf("Failed to attach to self: %v", err))
 		}
 		stateReader := ethclient.NewClient(rpcClient)
 
 		// Open and self derive any wallets already attached
 		for _, wallet := range stack.AccountManager().Wallets() {
 			if err := wallet.Open(""); err != nil {
-				glog.V(logger.Warn).Infof("Failed to open wallet %s: %v", wallet.URL(), err)
+				log.Warn(fmt.Sprintf("Failed to open wallet %s: %v", wallet.URL(), err))
 			} else {
 				wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
 			}
@@ -282,13 +281,13 @@ func startNode(ctx *cli.Context, stack *node.Node) {
 		for event := range events {
 			if event.Arrive {
 				if err := event.Wallet.Open(""); err != nil {
-					glog.V(logger.Info).Infof("New wallet appeared: %s, failed to open: %s", event.Wallet.URL(), err)
+					log.Info(fmt.Sprintf("New wallet appeared: %s, failed to open: %s", event.Wallet.URL(), err))
 				} else {
-					glog.V(logger.Info).Infof("New wallet appeared: %s, %s", event.Wallet.URL(), event.Wallet.Status())
+					log.Info(fmt.Sprintf("New wallet appeared: %s, %s", event.Wallet.URL(), event.Wallet.Status()))
 					event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
 				}
 			} else {
-				glog.V(logger.Info).Infof("Old wallet dropped:  %s", event.Wallet.URL())
+				log.Info(fmt.Sprintf("Old wallet dropped:  %s", event.Wallet.URL()))
 				event.Wallet.Close()
 			}
 		}
@@ -297,10 +296,10 @@ func startNode(ctx *cli.Context, stack *node.Node) {
 	if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
 		var ethereum *eth.Ethereum
 		if err := stack.Service(&ethereum); err != nil {
-			utils.Fatalf("ethereum service not running: %v", err)
+			log.Crit(fmt.Sprintf("ethereum service not running: %v", err))
 		}
 		if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name)); err != nil {
-			utils.Fatalf("Failed to start mining: %v", err)
+			log.Crit(fmt.Sprintf("Failed to start mining: %v", err))
 		}
 	}
 }
diff --git a/cmd/geth/misccmd.go b/cmd/geth/misccmd.go
index 077f1ad11ed4112001156bff9f2261ddb1cecbd4..9cb161e1aebbfd319f32cc2c9da888dd31eeca6f 100644
--- a/cmd/geth/misccmd.go
+++ b/cmd/geth/misccmd.go
@@ -28,6 +28,7 @@ import (
 	"github.com/ethereum/ethash"
 	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"gopkg.in/urfave/cli.v1"
 )
@@ -68,7 +69,7 @@ The output of this command is supposed to be machine-readable.
 func makedag(ctx *cli.Context) error {
 	args := ctx.Args()
 	wrongArgs := func() {
-		utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
+		log.Crit(fmt.Sprintf(`Usage: geth makedag <block number> <outputdir>`))
 	}
 	switch {
 	case len(args) == 2:
@@ -84,7 +85,7 @@ func makedag(ctx *cli.Context) error {
 			}
 			_, err = ioutil.ReadDir(dir)
 			if err != nil {
-				utils.Fatalf("Can't find dir")
+				log.Crit(fmt.Sprintf("Can't find dir"))
 			}
 			fmt.Println("making DAG, this could take awhile...")
 			ethash.MakeDAG(blockNum, dir)
diff --git a/cmd/geth/monitorcmd.go b/cmd/geth/monitorcmd.go
index c63542f13a2ad06ac1f6cd7808cf5783c35ab0b3..129a801811fc6b39516939be50377be9a070dbc3 100644
--- a/cmd/geth/monitorcmd.go
+++ b/cmd/geth/monitorcmd.go
@@ -26,6 +26,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/cmd/utils"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/rpc"
 	"github.com/gizak/termui"
@@ -76,14 +77,14 @@ func monitor(ctx *cli.Context) error {
 	// Attach to an Ethereum node over IPC or RPC
 	endpoint := ctx.String(monitorCommandAttachFlag.Name)
 	if client, err = dialRPC(endpoint); err != nil {
-		utils.Fatalf("Unable to attach to geth node: %v", err)
+		log.Crit(fmt.Sprintf("Unable to attach to geth node: %v", err))
 	}
 	defer client.Close()
 
 	// Retrieve all the available metrics and resolve the user pattens
 	metrics, err := retrieveMetrics(client)
 	if err != nil {
-		utils.Fatalf("Failed to retrieve system metrics: %v", err)
+		log.Crit(fmt.Sprintf("Failed to retrieve system metrics: %v", err))
 	}
 	monitored := resolveMetrics(metrics, ctx.Args())
 	if len(monitored) == 0 {
@@ -91,18 +92,18 @@ func monitor(ctx *cli.Context) error {
 		sort.Strings(list)
 
 		if len(list) > 0 {
-			utils.Fatalf("No metrics specified.\n\nAvailable:\n - %s", strings.Join(list, "\n - "))
+			log.Crit(fmt.Sprintf("No metrics specified.\n\nAvailable:\n - %s", strings.Join(list, "\n - ")))
 		} else {
-			utils.Fatalf("No metrics collected by geth (--%s).\n", utils.MetricsEnabledFlag.Name)
+			log.Crit(fmt.Sprintf("No metrics collected by geth (--%s).\n", utils.MetricsEnabledFlag.Name))
 		}
 	}
 	sort.Strings(monitored)
 	if cols := len(monitored) / ctx.Int(monitorCommandRowsFlag.Name); cols > 6 {
-		utils.Fatalf("Requested metrics (%d) spans more that 6 columns:\n - %s", len(monitored), strings.Join(monitored, "\n - "))
+		log.Crit(fmt.Sprintf("Requested metrics (%d) spans more that 6 columns:\n - %s", len(monitored), strings.Join(monitored, "\n - ")))
 	}
 	// Create and configure the chart UI defaults
 	if err := termui.Init(); err != nil {
-		utils.Fatalf("Unable to initialize terminal UI: %v", err)
+		log.Crit(fmt.Sprintf("Unable to initialize terminal UI: %v", err))
 	}
 	defer termui.Close()
 
@@ -186,7 +187,7 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string)
 	if len(parts) > 1 {
 		for _, variation := range strings.Split(parts[0], ",") {
 			if submetrics, ok := metrics[variation].(map[string]interface{}); !ok {
-				utils.Fatalf("Failed to retrieve system metrics: %s", path+variation)
+				log.Crit(fmt.Sprintf("Failed to retrieve system metrics: %s", path+variation))
 				return nil
 			} else {
 				results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
@@ -205,7 +206,7 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string)
 			results = append(results, expandMetrics(metric, path+variation+"/")...)
 
 		default:
-			utils.Fatalf("Metric pattern resolved to unexpected type: %v", reflect.TypeOf(metric))
+			log.Crit(fmt.Sprintf("Metric pattern resolved to unexpected type: %v", reflect.TypeOf(metric)))
 			return nil
 		}
 	}
@@ -227,7 +228,7 @@ func expandMetrics(metrics map[string]interface{}, path string) []string {
 			list = append(list, expandMetrics(metric, path+name+"/")...)
 
 		default:
-			utils.Fatalf("Metric pattern %s resolved to unexpected type: %v", path+name, reflect.TypeOf(metric))
+			log.Crit(fmt.Sprintf("Metric pattern %s resolved to unexpected type: %v", path+name, reflect.TypeOf(metric)))
 			return nil
 		}
 	}
diff --git a/cmd/gethrpctest/main.go b/cmd/gethrpctest/main.go
index 9e80ad05d2bbe5a06733003f5268147df38a5f22..69a6074e705e69c7ee27623f8eb663e06ca9c5a4 100644
--- a/cmd/gethrpctest/main.go
+++ b/cmd/gethrpctest/main.go
@@ -19,7 +19,7 @@ package main
 
 import (
 	"flag"
-	"log"
+	"fmt"
 	"os"
 	"os/signal"
 
@@ -27,7 +27,7 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/eth"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/tests"
@@ -46,35 +46,34 @@ func main() {
 	flag.Parse()
 
 	// Enable logging errors, we really do want to see those
-	glog.SetV(2)
-	glog.SetToStderr(true)
+	log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StreamHandler(os.Stderr, log.TerminalFormat())))
 
 	// Load the test suite to run the RPC against
 	tests, err := tests.LoadBlockTests(*testFile)
 	if err != nil {
-		log.Fatalf("Failed to load test suite: %v", err)
+		log.Crit(fmt.Sprintf("Failed to load test suite: %v", err))
 	}
 	test, found := tests[*testName]
 	if !found {
-		log.Fatalf("Requested test (%s) not found within suite", *testName)
+		log.Crit(fmt.Sprintf("Requested test (%s) not found within suite", *testName))
 	}
 
 	stack, err := MakeSystemNode(*testKey, test)
 	if err != nil {
-		log.Fatalf("Failed to assemble test stack: %v", err)
+		log.Crit(fmt.Sprintf("Failed to assemble test stack: %v", err))
 	}
 	if err := stack.Start(); err != nil {
-		log.Fatalf("Failed to start test node: %v", err)
+		log.Crit(fmt.Sprintf("Failed to start test node: %v", err))
 	}
 	defer stack.Stop()
 
-	log.Println("Test node started...")
+	log.Info("Test node started...")
 
 	// Make sure the tests contained within the suite pass
 	if err := RunTest(stack, test); err != nil {
-		log.Fatalf("Failed to run the pre-configured test: %v", err)
+		log.Crit(fmt.Sprintf("Failed to run the pre-configured test: %v", err))
 	}
-	log.Println("Initial test suite passed...")
+	log.Info("Initial test suite passed...")
 
 	quit := make(chan os.Signal, 1)
 	signal.Notify(quit, os.Interrupt)
diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go
index 5661b3f6ea11f09d5909204328b9f9e9cefb6d94..84af69d7abc79655fd76f48e78c592a61666a459 100644
--- a/cmd/swarm/main.go
+++ b/cmd/swarm/main.go
@@ -35,8 +35,7 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethclient"
 	"github.com/ethereum/go-ethereum/internal/debug"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
@@ -278,7 +277,7 @@ func bzzd(ctx *cli.Context) error {
 		signal.Notify(sigc, syscall.SIGTERM)
 		defer signal.Stop(sigc)
 		<-sigc
-		glog.V(logger.Info).Infoln("Got sigterm, shutting down...")
+		log.Info(fmt.Sprint("Got sigterm, shutting down..."))
 		stack.Stop()
 	}()
 	networkId := ctx.GlobalUint64(SwarmNetworkIdFlag.Name)
@@ -308,7 +307,7 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
 
 	bzzconfig, err := bzzapi.NewConfig(bzzdir, chbookaddr, prvkey, ctx.GlobalUint64(SwarmNetworkIdFlag.Name))
 	if err != nil {
-		utils.Fatalf("unable to configure swarm: %v", err)
+		log.Crit(fmt.Sprintf("unable to configure swarm: %v", err))
 	}
 	bzzport := ctx.GlobalString(SwarmPortFlag.Name)
 	if len(bzzport) > 0 {
@@ -325,13 +324,13 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
 		if len(ethapi) > 0 {
 			client, err = ethclient.Dial(ethapi)
 			if err != nil {
-				utils.Fatalf("Can't connect: %v", err)
+				log.Crit(fmt.Sprintf("Can't connect: %v", err))
 			}
 		}
 		return swarm.NewSwarm(ctx, client, bzzconfig, swapEnabled, syncEnabled, cors)
 	}
 	if err := stack.Register(boot); err != nil {
-		utils.Fatalf("Failed to register the Swarm service: %v", err)
+		log.Crit(fmt.Sprintf("Failed to register the Swarm service: %v", err))
 	}
 }
 
@@ -339,11 +338,11 @@ func getAccount(ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey {
 	keyid := ctx.GlobalString(SwarmAccountFlag.Name)
 
 	if keyid == "" {
-		utils.Fatalf("Option %q is required", SwarmAccountFlag.Name)
+		log.Crit(fmt.Sprintf("Option %q is required", SwarmAccountFlag.Name))
 	}
 	// Try to load the arg as a hex key file.
 	if key, err := crypto.LoadECDSA(keyid); err == nil {
-		glog.V(logger.Info).Infof("swarm account key loaded: %#x", crypto.PubkeyToAddress(key.PublicKey))
+		log.Info(fmt.Sprintf("swarm account key loaded: %#x", crypto.PubkeyToAddress(key.PublicKey)))
 		return key
 	}
 	// Otherwise try getting it from the keystore.
@@ -365,14 +364,14 @@ func decryptStoreAccount(ks *keystore.KeyStore, account string) *ecdsa.PrivateKe
 			err = fmt.Errorf("index %d higher than number of accounts %d", ix, len(accounts))
 		}
 	} else {
-		utils.Fatalf("Can't find swarm account key %s", account)
+		log.Crit(fmt.Sprintf("Can't find swarm account key %s", account))
 	}
 	if err != nil {
-		utils.Fatalf("Can't find swarm account key: %v", err)
+		log.Crit(fmt.Sprintf("Can't find swarm account key: %v", err))
 	}
 	keyjson, err := ioutil.ReadFile(a.URL.Path)
 	if err != nil {
-		utils.Fatalf("Can't load swarm account key: %v", err)
+		log.Crit(fmt.Sprintf("Can't load swarm account key: %v", err))
 	}
 	for i := 1; i <= 3; i++ {
 		passphrase := promptPassphrase(fmt.Sprintf("Unlocking swarm account %s [%d/3]", a.Address.Hex(), i))
@@ -381,7 +380,7 @@ func decryptStoreAccount(ks *keystore.KeyStore, account string) *ecdsa.PrivateKe
 			return key.PrivateKey
 		}
 	}
-	utils.Fatalf("Can't decrypt swarm account key")
+	log.Crit(fmt.Sprintf("Can't decrypt swarm account key"))
 	return nil
 }
 
@@ -391,7 +390,7 @@ func promptPassphrase(prompt string) string {
 	}
 	password, err := console.Stdin.PromptPassword("Passphrase: ")
 	if err != nil {
-		utils.Fatalf("Failed to read passphrase: %v", err)
+		log.Crit(fmt.Sprintf("Failed to read passphrase: %v", err))
 	}
 	return password
 }
@@ -400,7 +399,7 @@ func injectBootnodes(srv *p2p.Server, nodes []string) {
 	for _, url := range nodes {
 		n, err := discover.ParseNode(url)
 		if err != nil {
-			glog.Errorf("invalid bootnode %q", err)
+			log.Error(fmt.Sprintf("invalid bootnode %q", err))
 			continue
 		}
 		srv.AddPeer(n)
diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go
index 0de0d69bbfea3ab22febc0bbc7391ac41d93ac91..f647926899154ad7f6698b740bcc16fa1ff68e29 100644
--- a/cmd/swarm/manifest.go
+++ b/cmd/swarm/manifest.go
@@ -18,13 +18,14 @@
 package main
 
 import (
-	"gopkg.in/urfave/cli.v1"
+	"encoding/json"
+	"fmt"
 	"log"
 	"mime"
 	"path/filepath"
 	"strings"
-	"fmt"
-	"encoding/json"
+
+	"gopkg.in/urfave/cli.v1"
 )
 
 func add(ctx *cli.Context) {
@@ -35,23 +36,22 @@ func add(ctx *cli.Context) {
 	}
 
 	var (
-		mhash  = args[0]
-		path   = args[1]
-		hash   = args[2]
+		mhash = args[0]
+		path  = args[1]
+		hash  = args[2]
 
-		ctype  string
+		ctype        string
 		wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
-		mroot  manifest
+		mroot        manifest
 	)
 
-
 	if len(args) > 3 {
 		ctype = args[3]
 	} else {
 		ctype = mime.TypeByExtension(filepath.Ext(path))
 	}
 
-	newManifest := addEntryToManifest (ctx, mhash, path, hash, ctype)
+	newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype)
 	fmt.Println(newManifest)
 
 	if !wantManifest {
@@ -70,13 +70,13 @@ func update(ctx *cli.Context) {
 	}
 
 	var (
-		mhash  = args[0]
-		path   = args[1]
-		hash   = args[2]
+		mhash = args[0]
+		path  = args[1]
+		hash  = args[2]
 
-		ctype  string
+		ctype        string
 		wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
-		mroot  manifest
+		mroot        manifest
 	)
 	if len(args) > 3 {
 		ctype = args[3]
@@ -84,7 +84,7 @@ func update(ctx *cli.Context) {
 		ctype = mime.TypeByExtension(filepath.Ext(path))
 	}
 
-	newManifest := updateEntryInManifest (ctx, mhash, path, hash, ctype)
+	newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype)
 	fmt.Println(newManifest)
 
 	if !wantManifest {
@@ -102,14 +102,14 @@ func remove(ctx *cli.Context) {
 	}
 
 	var (
-		mhash  = args[0]
-		path   = args[1]
+		mhash = args[0]
+		path  = args[1]
 
 		wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
-		mroot  manifest
+		mroot        manifest
 	)
 
-	newManifest := removeEntryFromManifest (ctx, mhash, path)
+	newManifest := removeEntryFromManifest(ctx, mhash, path)
 	fmt.Println(newManifest)
 
 	if !wantManifest {
@@ -120,15 +120,15 @@ func remove(ctx *cli.Context) {
 	}
 }
 
-func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string)  string {
+func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
 
 	var (
-		bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
-		client = &client{api: bzzapi}
+		bzzapi           = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+		client           = &client{api: bzzapi}
 		longestPathEntry = manifestEntry{
 			Path:        "",
 			Hash:        "",
-			ContentType:  "",
+			ContentType: "",
 		}
 	)
 
@@ -143,12 +143,11 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string)  st
 		log.Fatalln("hash to add is not present:", err)
 	}
 
-
 	// See if we path is in this Manifest or do we have to dig deeper
 	for _, entry := range mroot.Entries {
 		if path == entry.Path {
 			log.Fatal(path, "Already present, not adding anything")
-		}else {
+		} else {
 			if entry.ContentType == "application/bzz-manifest+json" {
 				prfxlen := strings.HasPrefix(path, entry.Path)
 				if prfxlen && len(path) > len(longestPathEntry.Path) {
@@ -161,7 +160,7 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string)  st
 	if longestPathEntry.Path != "" {
 		// Load the child Manifest add the entry there
 		newPath := path[len(longestPathEntry.Path):]
-		newHash := addEntryToManifest (ctx, longestPathEntry.Hash, newPath, hash, ctype)
+		newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
 
 		// Replace the hash for parent Manifests
 		newMRoot := manifest{}
@@ -182,31 +181,28 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string)  st
 		mroot.Entries = append(mroot.Entries, newEntry)
 	}
 
-
 	newManifestHash, err := client.uploadManifest(mroot)
 	if err != nil {
 		log.Fatalln("manifest upload failed:", err)
 	}
 	return newManifestHash
 
-
-
 }
 
-func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string) string {
+func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
 
 	var (
-		bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
-		client = &client{api: bzzapi}
+		bzzapi   = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+		client   = &client{api: bzzapi}
 		newEntry = manifestEntry{
 			Path:        "",
 			Hash:        "",
-			ContentType:  "",
+			ContentType: "",
 		}
 		longestPathEntry = manifestEntry{
 			Path:        "",
 			Hash:        "",
-			ContentType:  "",
+			ContentType: "",
 		}
 	)
 
@@ -217,12 +213,11 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
 
 	//TODO: check if the "hash" with which to update is valid and present in swarm
 
-
 	// See if we path is in this Manifest or do we have to dig deeper
 	for _, entry := range mroot.Entries {
 		if path == entry.Path {
 			newEntry = entry
-		}else {
+		} else {
 			if entry.ContentType == "application/bzz-manifest+json" {
 				prfxlen := strings.HasPrefix(path, entry.Path)
 				if prfxlen && len(path) > len(longestPathEntry.Path) {
@@ -239,7 +234,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
 	if longestPathEntry.Path != "" {
 		// Load the child Manifest add the entry there
 		newPath := path[len(longestPathEntry.Path):]
-		newHash := updateEntryInManifest (ctx, longestPathEntry.Hash, newPath, hash, ctype)
+		newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
 
 		// Replace the hash for parent Manifests
 		newMRoot := manifest{}
@@ -271,7 +266,6 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
 		mroot = newMRoot
 	}
 
-
 	newManifestHash, err := client.uploadManifest(mroot)
 	if err != nil {
 		log.Fatalln("manifest upload failed:", err)
@@ -279,20 +273,20 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
 	return newManifestHash
 }
 
-func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
+func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
 
 	var (
-		bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
-		client = &client{api: bzzapi}
+		bzzapi        = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+		client        = &client{api: bzzapi}
 		entryToRemove = manifestEntry{
 			Path:        "",
 			Hash:        "",
-			ContentType:  "",
+			ContentType: "",
 		}
 		longestPathEntry = manifestEntry{
 			Path:        "",
 			Hash:        "",
-			ContentType:  "",
+			ContentType: "",
 		}
 	)
 
@@ -301,13 +295,11 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
 		log.Fatalln("manifest download failed:", err)
 	}
 
-
-
 	// See if we path is in this Manifest or do we have to dig deeper
 	for _, entry := range mroot.Entries {
 		if path == entry.Path {
 			entryToRemove = entry
-		}else {
+		} else {
 			if entry.ContentType == "application/bzz-manifest+json" {
 				prfxlen := strings.HasPrefix(path, entry.Path)
 				if prfxlen && len(path) > len(longestPathEntry.Path) {
@@ -324,7 +316,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
 	if longestPathEntry.Path != "" {
 		// Load the child Manifest remove the entry there
 		newPath := path[len(longestPathEntry.Path):]
-		newHash := removeEntryFromManifest (ctx, longestPathEntry.Hash, newPath)
+		newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath)
 
 		// Replace the hash for parent Manifests
 		newMRoot := manifest{}
@@ -348,13 +340,10 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
 		mroot = newMRoot
 	}
 
-
 	newManifestHash, err := client.uploadManifest(mroot)
 	if err != nil {
 		log.Fatalln("manifest upload failed:", err)
 	}
 	return newManifestHash
 
-
 }
-
diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go
index 871713b2da1beef0e4d1e7d14dad368499147df3..9f3a2abe02561558b6d9ae6f345eed9943680e04 100644
--- a/cmd/swarm/upload.go
+++ b/cmd/swarm/upload.go
@@ -233,7 +233,7 @@ func (c *client) postRaw(mimetype string, size int64, body io.ReadCloser) (strin
 func (c *client) downloadManifest(mhash string) (manifest, error) {
 
 	mroot := manifest{}
-	req, err := http.NewRequest("GET", c.api + "/bzzr:/" + mhash, nil)
+	req, err := http.NewRequest("GET", c.api+"/bzzr:/"+mhash, nil)
 	if err != nil {
 		return mroot, err
 	}
@@ -254,4 +254,4 @@ func (c *client) downloadManifest(mhash string) (manifest, error) {
 		return mroot, fmt.Errorf("Manifest %v is malformed: %v", mhash, err)
 	}
 	return mroot, err
-}
\ No newline at end of file
+}
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index 8666f37756d39178a0d9925a9805a7635537b18a..e288f8bd2a519925ef6d6e20230e31a61fecf6aa 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -31,8 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/internal/debug"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/rlp"
 )
@@ -72,19 +71,19 @@ func Fatalf(format string, args ...interface{}) {
 
 func StartNode(stack *node.Node) {
 	if err := stack.Start(); err != nil {
-		Fatalf("Error starting protocol stack: %v", err)
+		log.Crit(fmt.Sprintf("Error starting protocol stack: %v", err))
 	}
 	go func() {
 		sigc := make(chan os.Signal, 1)
 		signal.Notify(sigc, os.Interrupt)
 		defer signal.Stop(sigc)
 		<-sigc
-		glog.V(logger.Info).Infoln("Got interrupt, shutting down...")
+		log.Info(fmt.Sprint("Got interrupt, shutting down..."))
 		go stack.Stop()
 		for i := 10; i > 0; i-- {
 			<-sigc
 			if i > 1 {
-				glog.V(logger.Info).Infof("Already shutting down, interrupt %d more times for panic.", i-1)
+				log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
 			}
 		}
 		debug.Exit() // ensure trace and CPU profile data is flushed.
@@ -115,7 +114,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
 	defer close(interrupt)
 	go func() {
 		if _, ok := <-interrupt; ok {
-			glog.Info("caught interrupt during import, will stop at next batch")
+			log.Info(fmt.Sprint("caught interrupt during import, will stop at next batch"))
 		}
 		close(stop)
 	}()
@@ -128,7 +127,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
 		}
 	}
 
-	glog.Infoln("Importing blockchain ", fn)
+	log.Info(fmt.Sprint("Importing blockchain ", fn))
 	fh, err := os.Open(fn)
 	if err != nil {
 		return err
@@ -176,8 +175,8 @@ func ImportChain(chain *core.BlockChain, fn string) error {
 			return fmt.Errorf("interrupted")
 		}
 		if hasAllBlocks(chain, blocks[:i]) {
-			glog.Infof("skipping batch %d, all blocks present [%x / %x]",
-				batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4])
+			log.Info(fmt.Sprintf("skipping batch %d, all blocks present [%x / %x]",
+				batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4]))
 			continue
 		}
 
@@ -198,7 +197,7 @@ func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
 }
 
 func ExportChain(blockchain *core.BlockChain, fn string) error {
-	glog.Infoln("Exporting blockchain to ", fn)
+	log.Info(fmt.Sprint("Exporting blockchain to ", fn))
 	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
 	if err != nil {
 		return err
@@ -214,13 +213,13 @@ func ExportChain(blockchain *core.BlockChain, fn string) error {
 	if err := blockchain.Export(writer); err != nil {
 		return err
 	}
-	glog.Infoln("Exported blockchain to ", fn)
+	log.Info(fmt.Sprint("Exported blockchain to ", fn))
 
 	return nil
 }
 
 func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
-	glog.Infoln("Exporting blockchain to ", fn)
+	log.Info(fmt.Sprint("Exporting blockchain to ", fn))
 	// TODO verify mode perms
 	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
 	if err != nil {
@@ -237,6 +236,6 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
 	if err := blockchain.ExportN(writer, first, last); err != nil {
 		return err
 	}
-	glog.Infoln("Exported blockchain to ", fn)
+	log.Info(fmt.Sprint("Exported blockchain to ", fn))
 	return nil
 }
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 92eb05e321570d53b3b3c8d81e6e172d9ec1e72e..55713c15cc08d9270abf8dcd1086324df1ee0199 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -41,8 +41,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethstats"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/les"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p/discover"
@@ -426,7 +425,7 @@ func MakeDataDir(ctx *cli.Context) string {
 		}
 		return path
 	}
-	Fatalf("Cannot determine default data directory, please set manually (--datadir)")
+	log.Crit(fmt.Sprintf("Cannot determine default data directory, please set manually (--datadir)"))
 	return ""
 }
 
@@ -452,16 +451,16 @@ func MakeNodeKey(ctx *cli.Context) *ecdsa.PrivateKey {
 	)
 	switch {
 	case file != "" && hex != "":
-		Fatalf("Options %q and %q are mutually exclusive", NodeKeyFileFlag.Name, NodeKeyHexFlag.Name)
+		log.Crit(fmt.Sprintf("Options %q and %q are mutually exclusive", NodeKeyFileFlag.Name, NodeKeyHexFlag.Name))
 
 	case file != "":
 		if key, err = crypto.LoadECDSA(file); err != nil {
-			Fatalf("Option %q: %v", NodeKeyFileFlag.Name, err)
+			log.Crit(fmt.Sprintf("Option %q: %v", NodeKeyFileFlag.Name, err))
 		}
 
 	case hex != "":
 		if key, err = crypto.HexToECDSA(hex); err != nil {
-			Fatalf("Option %q: %v", NodeKeyHexFlag.Name, err)
+			log.Crit(fmt.Sprintf("Option %q: %v", NodeKeyHexFlag.Name, err))
 		}
 	}
 	return key
@@ -493,7 +492,7 @@ func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node {
 	for _, url := range urls {
 		node, err := discover.ParseNode(url)
 		if err != nil {
-			glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)
+			log.Error(fmt.Sprintf("Bootstrap URL %s: %v\n", url, err))
 			continue
 		}
 		bootnodes = append(bootnodes, node)
@@ -513,7 +512,7 @@ func MakeBootstrapNodesV5(ctx *cli.Context) []*discv5.Node {
 	for _, url := range urls {
 		node, err := discv5.ParseNode(url)
 		if err != nil {
-			glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)
+			log.Error(fmt.Sprintf("Bootstrap URL %s: %v\n", url, err))
 			continue
 		}
 		bootnodes = append(bootnodes, node)
@@ -537,7 +536,7 @@ func MakeDiscoveryV5Address(ctx *cli.Context) string {
 func MakeNAT(ctx *cli.Context) nat.Interface {
 	natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name))
 	if err != nil {
-		Fatalf("Option %s: %v", NATFlag.Name, err)
+		log.Crit(fmt.Sprintf("Option %s: %v", NATFlag.Name, err))
 	}
 	return natif
 }
@@ -574,11 +573,11 @@ func MakeWSRpcHost(ctx *cli.Context) string {
 // for Geth and returns half of the allowance to assign to the database.
 func MakeDatabaseHandles() int {
 	if err := raiseFdLimit(2048); err != nil {
-		Fatalf("Failed to raise file descriptor allowance: %v", err)
+		log.Crit(fmt.Sprintf("Failed to raise file descriptor allowance: %v", err))
 	}
 	limit, err := getFdLimit()
 	if err != nil {
-		Fatalf("Failed to retrieve file descriptor allowance: %v", err)
+		log.Crit(fmt.Sprintf("Failed to retrieve file descriptor allowance: %v", err))
 	}
 	if limit > 2048 { // cap database file descriptors even if more is available
 		limit = 2048
@@ -610,7 +609,7 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
 func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address {
 	accounts := ks.Accounts()
 	if !ctx.GlobalIsSet(EtherbaseFlag.Name) && len(accounts) == 0 {
-		glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
+		log.Error(fmt.Sprint("WARNING: No etherbase set and no accounts found as default"))
 		return common.Address{}
 	}
 	etherbase := ctx.GlobalString(EtherbaseFlag.Name)
@@ -620,7 +619,7 @@ func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address {
 	// If the specified etherbase is a valid address, return it
 	account, err := MakeAddress(ks, etherbase)
 	if err != nil {
-		Fatalf("Option %q: %v", EtherbaseFlag.Name, err)
+		log.Crit(fmt.Sprintf("Option %q: %v", EtherbaseFlag.Name, err))
 	}
 	return account.Address
 }
@@ -642,7 +641,7 @@ func MakePasswordList(ctx *cli.Context) []string {
 	}
 	text, err := ioutil.ReadFile(path)
 	if err != nil {
-		Fatalf("Failed to read password file: %v", err)
+		log.Crit(fmt.Sprintf("Failed to read password file: %v", err))
 	}
 	lines := strings.Split(string(text), "\n")
 	// Sanitise DOS line endings.
@@ -701,14 +700,14 @@ func MakeNode(ctx *cli.Context, name, gitCommit string) *node.Node {
 	if netrestrict := ctx.GlobalString(NetrestrictFlag.Name); netrestrict != "" {
 		list, err := netutil.ParseNetlist(netrestrict)
 		if err != nil {
-			Fatalf("Option %q: %v", NetrestrictFlag.Name, err)
+			log.Crit(fmt.Sprintf("Option %q: %v", NetrestrictFlag.Name, err))
 		}
 		config.NetRestrict = list
 	}
 
 	stack, err := node.New(config)
 	if err != nil {
-		Fatalf("Failed to create the protocol stack: %v", err)
+		log.Crit(fmt.Sprintf("Failed to create the protocol stack: %v", err))
 	}
 	return stack
 }
@@ -724,7 +723,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 		}
 	}
 	if networks > 1 {
-		Fatalf("The %v flags are mutually exclusive", netFlags)
+		log.Crit(fmt.Sprintf("The %v flags are mutually exclusive", netFlags))
 	}
 	ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
 
@@ -778,7 +777,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 		if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
 			return les.New(ctx, ethConf)
 		}); err != nil {
-			Fatalf("Failed to register the Ethereum light node service: %v", err)
+			log.Crit(fmt.Sprintf("Failed to register the Ethereum light node service: %v", err))
 		}
 	} else {
 		if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
@@ -789,7 +788,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 			}
 			return fullNode, err
 		}); err != nil {
-			Fatalf("Failed to register the Ethereum full node service: %v", err)
+			log.Crit(fmt.Sprintf("Failed to register the Ethereum full node service: %v", err))
 		}
 	}
 }
@@ -797,7 +796,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 // RegisterShhService configures Whisper and adds it to the given node.
 func RegisterShhService(stack *node.Node) {
 	if err := stack.Register(func(*node.ServiceContext) (node.Service, error) { return whisper.New(), nil }); err != nil {
-		Fatalf("Failed to register the Whisper service: %v", err)
+		log.Crit(fmt.Sprintf("Failed to register the Whisper service: %v", err))
 	}
 }
 
@@ -814,7 +813,7 @@ func RegisterEthStatsService(stack *node.Node, url string) {
 
 		return ethstats.New(url, ethServ, lesServ)
 	}); err != nil {
-		Fatalf("Failed to register the Ethereum Stats service: %v", err)
+		log.Crit(fmt.Sprintf("Failed to register the Ethereum Stats service: %v", err))
 	}
 }
 
@@ -845,7 +844,7 @@ func MakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *params.ChainCon
 		case core.ChainConfigNotFoundErr:
 			// No configs found, use empty, will populate below
 		default:
-			Fatalf("Could not make chain configuration: %v", err)
+			log.Crit(fmt.Sprintf("Could not make chain configuration: %v", err))
 		}
 	}
 	// set chain id in case it's zero.
@@ -900,7 +899,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
 
 	chainDb, err := stack.OpenDatabase(name, cache, handles)
 	if err != nil {
-		Fatalf("Could not open database: %v", err)
+		log.Crit(fmt.Sprintf("Could not open database: %v", err))
 	}
 	return chainDb
 }
@@ -913,7 +912,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
 	if ctx.GlobalBool(TestNetFlag.Name) {
 		_, err := core.WriteTestNetGenesisBlock(chainDb)
 		if err != nil {
-			glog.Fatalln(err)
+			log.Crit(fmt.Sprint(err))
 		}
 	}
 
@@ -925,7 +924,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
 	}
 	chain, err = core.NewBlockChain(chainDb, chainConfig, pow, new(event.TypeMux), vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)})
 	if err != nil {
-		Fatalf("Could not start chainmanager: %v", err)
+		log.Crit(fmt.Sprintf("Could not start chainmanager: %v", err))
 	}
 	return chain, chainDb
 }
diff --git a/cmd/wnode/main.go b/cmd/wnode/main.go
index d002497fbf73f74e957c7be64b6ac766faad2c82..eb0dd2511ece70768ebcae75463eb48e23f7fdf8 100644
--- a/cmd/wnode/main.go
+++ b/cmd/wnode/main.go
@@ -36,8 +36,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/console"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/nat"
@@ -82,7 +81,7 @@ var (
 	testMode       = flag.Bool("t", false, "use of predefined parameters for diagnostics")
 	generateKey    = flag.Bool("k", false, "generate and show the private key")
 
-	argVerbosity = flag.Int("verbosity", logger.Warn, "log verbosity level")
+	argVerbosity = flag.Int("verbosity", int(log.LvlWarn), "log verbosity level")
 	argTTL       = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
 	argWorkTime  = flag.Uint("work", 5, "work time in seconds")
 	argPoW       = flag.Float64("pow", whisper.MinimumPoW, "PoW for normal messages in float format (e.g. 2.7)")
@@ -109,7 +108,7 @@ func processArgs() {
 		var err error
 		nodeid, err = crypto.LoadECDSA(*argIDFile)
 		if err != nil {
-			utils.Fatalf("Failed to load file [%s]: %s.", *argIDFile, err)
+			log.Crit(fmt.Sprintf("Failed to load file [%s]: %s.", *argIDFile, err))
 		}
 	}
 
@@ -123,7 +122,7 @@ func processArgs() {
 	if len(*argTopic) > 0 {
 		x, err := hex.DecodeString(*argTopic)
 		if err != nil {
-			utils.Fatalf("Failed to parse the topic: %s", err)
+			log.Crit(fmt.Sprintf("Failed to parse the topic: %s", err))
 		}
 		topic = whisper.BytesToTopic(x)
 	}
@@ -131,7 +130,7 @@ func processArgs() {
 	if *asymmetricMode && len(*argPub) > 0 {
 		pub = crypto.ToECDSAPub(common.FromHex(*argPub))
 		if !isKeyValid(pub) {
-			utils.Fatalf("invalid public key")
+			log.Crit(fmt.Sprintf("invalid public key"))
 		}
 	}
 
@@ -153,8 +152,7 @@ func echo() {
 }
 
 func initialize() {
-	glog.SetV(*argVerbosity)
-	glog.SetToStderr(true)
+	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat())))
 
 	done = make(chan struct{})
 	var peers []*discover.Node
@@ -163,7 +161,7 @@ func initialize() {
 	if *generateKey {
 		key, err := crypto.GenerateKey()
 		if err != nil {
-			utils.Fatalf("Failed to generate private key: %s", err)
+			log.Crit(fmt.Sprintf("Failed to generate private key: %s", err))
 		}
 		k := hex.EncodeToString(crypto.FromECDSA(key))
 		fmt.Printf("Random private key: %s \n", k)
@@ -191,7 +189,7 @@ func initialize() {
 		if len(msPassword) == 0 {
 			msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
 			if err != nil {
-				utils.Fatalf("Failed to read Mail Server password: %s", err)
+				log.Crit(fmt.Sprintf("Failed to read Mail Server password: %s", err))
 			}
 		}
 		shh = whisper.New()
@@ -229,7 +227,7 @@ func initialize() {
 func startServer() {
 	err := server.Start()
 	if err != nil {
-		utils.Fatalf("Failed to start Whisper peer: %s.", err)
+		log.Crit(fmt.Sprintf("Failed to start Whisper peer: %s.", err))
 	}
 
 	fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey)))
@@ -267,7 +265,7 @@ func configureNode() {
 			s := scanLine("Please enter the peer's public key: ")
 			pub = crypto.ToECDSAPub(common.FromHex(s))
 			if !isKeyValid(pub) {
-				utils.Fatalf("Error: invalid public key")
+				log.Crit(fmt.Sprintf("Error: invalid public key"))
 			}
 		}
 	}
@@ -277,7 +275,7 @@ func configureNode() {
 		if len(msPassword) == 0 {
 			msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
 			if err != nil {
-				utils.Fatalf("Failed to read Mail Server password: %s", err)
+				log.Crit(fmt.Sprintf("Failed to read Mail Server password: %s", err))
 			}
 		}
 	}
@@ -286,7 +284,7 @@ func configureNode() {
 		if len(symPass) == 0 {
 			symPass, err = console.Stdin.PromptPassword("Please enter the password: ")
 			if err != nil {
-				utils.Fatalf("Failed to read passphrase: %v", err)
+				log.Crit(fmt.Sprintf("Failed to read passphrase: %v", err))
 			}
 		}
 
@@ -332,7 +330,7 @@ func waitForConnection(timeout bool) {
 		if timeout {
 			cnt++
 			if cnt > 1000 {
-				utils.Fatalf("Timeout expired, failed to connect")
+				log.Crit(fmt.Sprintf("Timeout expired, failed to connect"))
 			}
 		}
 	}
@@ -384,7 +382,7 @@ func scanLine(prompt string) string {
 	}
 	txt, err := input.ReadString('\n')
 	if err != nil {
-		utils.Fatalf("input error: %s", err)
+		log.Crit(fmt.Sprintf("input error: %s", err))
 	}
 	txt = strings.TrimRight(txt, "\n\r")
 	return txt
@@ -399,7 +397,7 @@ func scanUint(prompt string) uint32 {
 	s := scanLine(prompt)
 	i, err := strconv.Atoi(s)
 	if err != nil {
-		utils.Fatalf("Fail to parse the lower time limit: %s", err)
+		log.Crit(fmt.Sprintf("Fail to parse the lower time limit: %s", err))
 	}
 	return uint32(i)
 }
@@ -432,7 +430,7 @@ func sendMsg(payload []byte) {
 func messageLoop() {
 	f := shh.GetFilter(filterID)
 	if f == nil {
-		utils.Fatalf("filter is not installed")
+		log.Crit(fmt.Sprintf("filter is not installed"))
 	}
 
 	ticker := time.NewTicker(time.Millisecond * 50)
@@ -474,7 +472,7 @@ func requestExpiredMessagesLoop() {
 
 	err := shh.AddSymKey(mailserver.MailServerKeyName, []byte(msPassword))
 	if err != nil {
-		utils.Fatalf("Failed to create symmetric key for mail request: %s", err)
+		log.Crit(fmt.Sprintf("Failed to create symmetric key for mail request: %s", err))
 	}
 	key = shh.GetSymKey(mailserver.MailServerKeyName)
 	peerID = extractIdFromEnode(*argEnode)
@@ -487,7 +485,7 @@ func requestExpiredMessagesLoop() {
 		if len(t) >= whisper.TopicLength*2 {
 			x, err := hex.DecodeString(t)
 			if err != nil {
-				utils.Fatalf("Failed to parse the topic: %s", err)
+				log.Crit(fmt.Sprintf("Failed to parse the topic: %s", err))
 			}
 			xt = whisper.BytesToTopic(x)
 		}
@@ -513,12 +511,12 @@ func requestExpiredMessagesLoop() {
 		msg := whisper.NewSentMessage(&params)
 		env, err := msg.Wrap(&params)
 		if err != nil {
-			utils.Fatalf("Wrap failed: %s", err)
+			log.Crit(fmt.Sprintf("Wrap failed: %s", err))
 		}
 
 		err = shh.RequestHistoricMessages(peerID, env)
 		if err != nil {
-			utils.Fatalf("Failed to send P2P message: %s", err)
+			log.Crit(fmt.Sprintf("Failed to send P2P message: %s", err))
 		}
 
 		time.Sleep(time.Second * 5)
@@ -528,7 +526,7 @@ func requestExpiredMessagesLoop() {
 func extractIdFromEnode(s string) []byte {
 	n, err := discover.ParseNode(s)
 	if err != nil {
-		utils.Fatalf("Failed to parse enode: %s", err)
+		log.Crit(fmt.Sprintf("Failed to parse enode: %s", err))
 		return nil
 	}
 	return n.ID[:]
diff --git a/console/bridge.go b/console/bridge.go
index f0c59804ba8ffcc2007aa9cc18b83db2445f573b..81f216d1d172b66cf5933f27d0f04929476e0d6e 100644
--- a/console/bridge.go
+++ b/console/bridge.go
@@ -22,8 +22,7 @@ import (
 	"io"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rpc"
 	"github.com/robertkrimen/otto"
 )
@@ -306,7 +305,7 @@ func setError(resp *otto.Object, code int, msg string) {
 func throwJSException(msg interface{}) otto.Value {
 	val, err := otto.ToValue(msg)
 	if err != nil {
-		glog.V(logger.Error).Infof("Failed to serialize JavaScript exception %v: %v", msg, err)
+		log.Error(fmt.Sprintf("Failed to serialize JavaScript exception %v: %v", msg, err))
 	}
 	panic(val)
 }
diff --git a/contracts/chequebook/cheque.go b/contracts/chequebook/cheque.go
index d49964f91b4aa918f67416db896ebfb6ffcfd318..9a2a5a2a65c60e8ef9e8bfe8e852d4816f0c69dd 100644
--- a/contracts/chequebook/cheque.go
+++ b/contracts/chequebook/cheque.go
@@ -40,8 +40,7 @@ import (
 	"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
 	"golang.org/x/net/context"
 )
@@ -140,7 +139,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
 
 	if (contractAddr != common.Address{}) {
 		self.setBalanceFromBlockChain()
-		glog.V(logger.Detail).Infof("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String())
+		log.Trace(fmt.Sprintf("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String()))
 	}
 	return
 }
@@ -148,7 +147,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
 func (self *Chequebook) setBalanceFromBlockChain() {
 	balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
 	if err != nil {
-		glog.V(logger.Error).Infof("can't get balance: %v", err)
+		log.Error(fmt.Sprintf("can't get balance: %v", err))
 	} else {
 		self.balance.Set(balance)
 	}
@@ -172,7 +171,7 @@ func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, chec
 		self.setBalanceFromBlockChain()
 	}
 
-	glog.V(logger.Detail).Infof("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path)
+	log.Trace(fmt.Sprintf("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path))
 
 	return
 }
@@ -227,7 +226,7 @@ func (self *Chequebook) Save() (err error) {
 	if err != nil {
 		return err
 	}
-	glog.V(logger.Detail).Infof("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path)
+	log.Trace(fmt.Sprintf("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path))
 
 	return ioutil.WriteFile(self.path, data, os.ModePerm)
 }
@@ -340,12 +339,12 @@ func (self *Chequebook) deposit(amount *big.Int) (string, error) {
 	chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
 	tx, err := chbookRaw.Transfer(depositTransactor)
 	if err != nil {
-		glog.V(logger.Warn).Infof("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err)
+		log.Warn(fmt.Sprintf("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err))
 		return "", err
 	}
 	// assume that transaction is actually successful, we add the amount to balance right away
 	self.balance.Add(self.balance, amount)
-	glog.V(logger.Detail).Infof("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer)
+	log.Trace(fmt.Sprintf("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer))
 	return tx.Hash().Hex(), nil
 }
 
@@ -469,7 +468,7 @@ func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address
 		session:     session,
 		cashed:      new(big.Int).Set(common.Big0),
 	}
-	glog.V(logger.Detail).Infof("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer))
+	log.Trace(fmt.Sprintf("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer)))
 	return
 }
 
@@ -491,7 +490,7 @@ func (self *Inbox) Stop() {
 func (self *Inbox) Cash() (txhash string, err error) {
 	if self.cheque != nil {
 		txhash, err = self.cheque.Cash(self.session)
-		glog.V(logger.Detail).Infof("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex())
+		log.Trace(fmt.Sprintf("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex()))
 		self.cashed = self.cheque.Amount
 	}
 	return
@@ -575,7 +574,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
 				self.Cash()
 			}
 		}
-		glog.V(logger.Detail).Infof("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed)
+		log.Trace(fmt.Sprintf("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed))
 	}
 
 	return amount, err
@@ -583,7 +582,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
 
 // Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
 func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
-	glog.V(logger.Detail).Infof("verify cheque: %v - sum: %v", self, sum)
+	log.Trace(fmt.Sprintf("verify cheque: %v - sum: %v", self, sum))
 	if sum == nil {
 		return nil, fmt.Errorf("invalid amount")
 	}
diff --git a/contracts/release/release.go b/contracts/release/release.go
index cd79112cd141a8719dfdef809ac0e2a4d27c8c95..7b63059d0b9af516b2e326cfec2a4c0fe55d2384 100644
--- a/contracts/release/release.go
+++ b/contracts/release/release.go
@@ -29,8 +29,7 @@ import (
 	"github.com/ethereum/go-ethereum/eth"
 	"github.com/ethereum/go-ethereum/internal/ethapi"
 	"github.com/ethereum/go-ethereum/les"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rpc"
@@ -128,10 +127,10 @@ func (r *ReleaseService) checker() {
 			version, err := r.oracle.CurrentVersion(opts)
 			if err != nil {
 				if err == bind.ErrNoCode {
-					glog.V(logger.Debug).Infof("Release oracle not found at %x", r.config.Oracle)
+					log.Debug(fmt.Sprintf("Release oracle not found at %x", r.config.Oracle))
 					continue
 				}
-				glog.V(logger.Error).Infof("Failed to retrieve current release: %v", err)
+				log.Error(fmt.Sprintf("Failed to retrieve current release: %v", err))
 				continue
 			}
 			// Version was successfully retrieved, notify if newer than ours
@@ -144,13 +143,13 @@ func (r *ReleaseService) checker() {
 				howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
 				separator := strings.Repeat("-", len(warning))
 
-				glog.V(logger.Warn).Info(separator)
-				glog.V(logger.Warn).Info(warning)
-				glog.V(logger.Warn).Info(howtofix)
-				glog.V(logger.Warn).Info(separator)
+				log.Warn(fmt.Sprint(separator))
+				log.Warn(fmt.Sprint(warning))
+				log.Warn(fmt.Sprint(howtofix))
+				log.Warn(fmt.Sprint(separator))
 			} else {
-				glog.V(logger.Debug).Infof("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
-					r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4])
+				log.Debug(fmt.Sprintf("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
+					r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]))
 			}
 
 		// If termination was requested, return
diff --git a/core/block_validator.go b/core/block_validator.go
index ee524b61f83e70427d24866d2f5f913fc4f433b4..a23a4134b179bd2e6bb444a6b430506d55a9582e 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -24,7 +24,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/pow"
 	"gopkg.in/fatih/set.v0"
@@ -169,7 +169,7 @@ func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
 			for h := range ancestors {
 				branch += fmt.Sprintf("  O - %x\n  |\n", h)
 			}
-			glog.Infoln(branch)
+			log.Info(fmt.Sprint(branch))
 			return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
 		}
 
diff --git a/core/blockchain.go b/core/blockchain.go
index b57eb48e3af5e29c645bba0f2546801e071693d8..e46a76fc1ba21831cb2715588229cea7ee6a8c64 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -36,8 +36,7 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/pow"
@@ -161,9 +160,9 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
 			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
 			// make sure the headerByNumber (if present) is in our current canonical chain
 			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
-				glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
+				log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
 				bc.SetHead(header.Number.Uint64() - 1)
-				glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
+				log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
 			}
 		}
 	}
@@ -220,9 +219,9 @@ func (self *BlockChain) loadLastState() error {
 	blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
 	fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
 
-	glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd)
-	glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)
-	glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)
+	log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd))
+	log.Info(fmt.Sprintf("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd))
+	log.Info(fmt.Sprintf("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd))
 
 	return nil
 }
@@ -263,10 +262,10 @@ func (bc *BlockChain) SetHead(head uint64) {
 	}
 
 	if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
-		glog.Fatalf("failed to reset head block hash: %v", err)
+		log.Crit(fmt.Sprintf("failed to reset head block hash: %v", err))
 	}
 	if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
-		glog.Fatalf("failed to reset head fast block hash: %v", err)
+		log.Crit(fmt.Sprintf("failed to reset head fast block hash: %v", err))
 	}
 	bc.loadLastState()
 }
@@ -287,7 +286,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
 	self.currentBlock = block
 	self.mu.Unlock()
 
-	glog.V(logger.Info).Infof("committed block #%d [%x…] as new head", block.Number(), hash[:4])
+	log.Info(fmt.Sprintf("committed block #%d [%x…] as new head", block.Number(), hash[:4]))
 	return nil
 }
 
@@ -391,10 +390,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
 
 	// Prepare the genesis block and reinitialise the chain
 	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
-		glog.Fatalf("failed to write genesis block TD: %v", err)
+		log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
 	}
 	if err := WriteBlock(bc.chainDb, genesis); err != nil {
-		glog.Fatalf("failed to write genesis block: %v", err)
+		log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
 	}
 	bc.genesisBlock = genesis
 	bc.insert(bc.genesisBlock)
@@ -418,7 +417,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
 		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
 	}
 
-	glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1)
+	log.Info(fmt.Sprintf("exporting %d blocks...\n", last-first+1))
 
 	for nr := first; nr <= last; nr++ {
 		block := self.GetBlockByNumber(nr)
@@ -446,10 +445,10 @@ func (bc *BlockChain) insert(block *types.Block) {
 
 	// Add the block to the canonical chain number scheme and mark as the head
 	if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
-		glog.Fatalf("failed to insert block number: %v", err)
+		log.Crit(fmt.Sprintf("failed to insert block number: %v", err))
 	}
 	if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
-		glog.Fatalf("failed to insert head block hash: %v", err)
+		log.Crit(fmt.Sprintf("failed to insert head block hash: %v", err))
 	}
 	bc.currentBlock = block
 
@@ -458,7 +457,7 @@ func (bc *BlockChain) insert(block *types.Block) {
 		bc.hc.SetCurrentHeader(block.Header())
 
 		if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
-			glog.Fatalf("failed to insert head fast block hash: %v", err)
+			log.Crit(fmt.Sprintf("failed to insert head fast block hash: %v", err))
 		}
 		bc.currentFastBlock = block
 	}
@@ -590,7 +589,7 @@ func (bc *BlockChain) Stop() {
 
 	bc.wg.Wait()
 
-	glog.V(logger.Info).Infoln("Chain manager stopped")
+	log.Info(fmt.Sprint("Chain manager stopped"))
 }
 
 func (self *BlockChain) procFutureBlocks() {
@@ -687,7 +686,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
 			failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
 				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
 
-			glog.V(logger.Error).Info(failure.Error())
+			log.Error(fmt.Sprint(failure.Error()))
 			return 0, failure
 		}
 	}
@@ -735,31 +734,31 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
 			if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
 				errs[index] = fmt.Errorf("failed to write block body: %v", err)
 				atomic.AddInt32(&failed, 1)
-				glog.Fatal(errs[index])
+				log.Crit(fmt.Sprint(errs[index]))
 				return
 			}
 			if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
 				errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
 				atomic.AddInt32(&failed, 1)
-				glog.Fatal(errs[index])
+				log.Crit(fmt.Sprint(errs[index]))
 				return
 			}
 			if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
 				errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
 				atomic.AddInt32(&failed, 1)
-				glog.Fatal(errs[index])
+				log.Crit(fmt.Sprint(errs[index]))
 				return
 			}
 			if err := WriteTransactions(self.chainDb, block); err != nil {
 				errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
 				atomic.AddInt32(&failed, 1)
-				glog.Fatal(errs[index])
+				log.Crit(fmt.Sprint(errs[index]))
 				return
 			}
 			if err := WriteReceipts(self.chainDb, receipts); err != nil {
 				errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
 				atomic.AddInt32(&failed, 1)
-				glog.Fatal(errs[index])
+				log.Crit(fmt.Sprint(errs[index]))
 				return
 			}
 			atomic.AddInt32(&stats.processed, 1)
@@ -785,7 +784,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
 		}
 	}
 	if atomic.LoadInt32(&self.procInterrupt) == 1 {
-		glog.V(logger.Debug).Infoln("premature abort during receipt chain processing")
+		log.Debug(fmt.Sprint("premature abort during receipt chain processing"))
 		return 0, nil
 	}
 	// Update the head fast sync block if better
@@ -793,7 +792,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
 	head := blockChain[len(errs)-1]
 	if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
 		if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
-			glog.Fatalf("failed to update head fast block hash: %v", err)
+			log.Crit(fmt.Sprintf("failed to update head fast block hash: %v", err))
 		}
 		self.currentFastBlock = head
 	}
@@ -806,7 +805,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
 	if stats.ignored > 0 {
 		ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
 	}
-	glog.V(logger.Info).Infof("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored)
+	log.Info(fmt.Sprintf("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored))
 
 	return 0, nil
 }
@@ -830,10 +829,10 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
 
 	// Irrelevant of the canonical status, write the block itself to the database
 	if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
-		glog.Fatalf("failed to write block total difficulty: %v", err)
+		log.Crit(fmt.Sprintf("failed to write block total difficulty: %v", err))
 	}
 	if err := WriteBlock(self.chainDb, block); err != nil {
-		glog.Fatalf("failed to write block contents: %v", err)
+		log.Crit(fmt.Sprintf("failed to write block contents: %v", err))
 	}
 
 	// If the total difficulty is higher than our known, add it to the canonical chain
@@ -867,7 +866,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 			failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
 				i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
 
-			glog.V(logger.Error).Info(failure.Error())
+			log.Error(fmt.Sprint(failure.Error()))
 			return 0, failure
 		}
 	}
@@ -894,7 +893,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 
 	for i, block := range chain {
 		if atomic.LoadInt32(&self.procInterrupt) == 1 {
-			glog.V(logger.Debug).Infoln("Premature abort during block chain processing")
+			log.Debug(fmt.Sprint("Premature abort during block chain processing"))
 			break
 		}
 		bstart := time.Now()
@@ -991,9 +990,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 
 		switch status {
 		case CanonStatTy:
-			if glog.V(logger.Debug) {
-				glog.Infof("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
+			}})
 			blockInsertTimer.UpdateSince(bstart)
 			events = append(events, ChainEvent{block, block.Hash(), logs})
 
@@ -1014,9 +1013,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 				return i, err
 			}
 		case SideStatTy:
-			if glog.V(logger.Detail) {
-				glog.Infof("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
-			}
+			log.Trace("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
+			}})
 			blockInsertTimer.UpdateSince(bstart)
 			events = append(events, ChainSideEvent{block})
 
@@ -1025,10 +1024,8 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 		}
 
 		stats.processed++
-		if glog.V(logger.Info) {
-			stats.usedGas += usedGas.Uint64()
-			stats.report(chain, i)
-		}
+		stats.usedGas += usedGas.Uint64()
+		stats.report(chain, i)
 	}
 
 	go self.postChainEvents(events, coalescedLogs)
@@ -1070,7 +1067,7 @@ func (st *insertStats) report(chain []*types.Block, index int) {
 		} else {
 			hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4])
 		}
-		glog.Infof("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra)
+		log.Info(fmt.Sprintf("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra))
 
 		*st = insertStats{startTime: now, lastIndex: index}
 	}
@@ -1150,21 +1147,24 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
 			return fmt.Errorf("Invalid new chain")
 		}
 	}
+	// Ensure the user sees large reorgs
+	logFn := log.Debug
+	if len(oldChain) > 63 {
+		logFn = log.Warn
+	}
+	logFn("", "msg", log.Lazy{Fn: func() string {
+		oldLen, newLen := len(oldChain), len(newChain)
+		newLast, newFirst := newChain[0], newChain[newLen-1]
+		oldLast, oldFirst := oldChain[0], oldChain[oldLen-1]
 
-	if oldLen := len(oldChain); oldLen > 63 || glog.V(logger.Debug) {
-		newLen := len(newChain)
-		newLast := newChain[0]
-		newFirst := newChain[newLen-1]
-		oldLast := oldChain[0]
-		oldFirst := oldChain[oldLen-1]
-		glog.Infof("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
+		return fmt.Sprintf("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
 			commonBlock.Number(), commonBlock.Hash().Bytes()[:4],
 			oldLen, newLen,
 			oldFirst.Number(), oldLast.Number(),
 			oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4],
 			newFirst.Number(), newLast.Number(),
 			newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4])
-	}
+	}})
 
 	var addedTxs types.Transactions
 	// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
@@ -1271,12 +1271,12 @@ func (bc *BlockChain) addBadBlock(block *types.Block) {
 // reportBlock logs a bad block error.
 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
 	bc.addBadBlock(block)
-	if glog.V(logger.Error) {
+	log.Error("", "msg", log.Lazy{Fn: func() string {
 		var receiptString string
 		for _, receipt := range receipts {
 			receiptString += fmt.Sprintf("\t%v\n", receipt)
 		}
-		glog.Errorf(`
+		return fmt.Sprintf(`
 ########## BAD BLOCK #########
 Chain config: %v
 
@@ -1287,7 +1287,7 @@ Hash: 0x%x
 Error: %v
 ##############################
 `, bc.config, block.Number(), block.Hash(), receiptString, err)
-	}
+	}})
 }
 
 // InsertHeaderChain attempts to insert the given header chain in to the local
diff --git a/core/database_util.go b/core/database_util.go
index e83d5d5e70f683405cfa0a2445c9960e288d1961..23240b35e48aacfa461c93fe77b8e4c55b389ca0 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -28,8 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -107,7 +106,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
 		}
 		header := new(types.Header)
 		if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
-			glog.Fatalf("failed to decode block header: %v", err)
+			log.Crit(fmt.Sprintf("failed to decode block header: %v", err))
 		}
 		return header.Number.Uint64()
 	}
@@ -167,7 +166,7 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
 	}
 	header := new(types.Header)
 	if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
-		glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
+		log.Error(fmt.Sprintf("invalid block header RLP for hash %x: %v", hash, err))
 		return nil
 	}
 	return header
@@ -191,7 +190,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
 	}
 	body := new(types.Body)
 	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
-		glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
+		log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
 		return nil
 	}
 	return body
@@ -209,7 +208,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
 	}
 	td := new(big.Int)
 	if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
-		glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err)
+		log.Error(fmt.Sprintf("invalid block total difficulty RLP for hash %x: %v", hash, err))
 		return nil
 	}
 	return td
@@ -247,7 +246,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
 	}
 	storageReceipts := []*types.ReceiptForStorage{}
 	if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
-		glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err)
+		log.Error(fmt.Sprintf("invalid receipt array RLP for hash %x: %v", hash, err))
 		return nil
 	}
 	receipts := make(types.Receipts, len(storageReceipts))
@@ -294,7 +293,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
 	var receipt types.ReceiptForStorage
 	err := rlp.DecodeBytes(data, &receipt)
 	if err != nil {
-		glog.V(logger.Debug).Infoln("GetReceipt err:", err)
+		log.Debug(fmt.Sprint("GetReceipt err:", err))
 	}
 	return (*types.Receipt)(&receipt)
 }
@@ -303,7 +302,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
 func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
 	key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
 	if err := db.Put(key, hash.Bytes()); err != nil {
-		glog.Fatalf("failed to store number to hash mapping into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store number to hash mapping into database: %v", err))
 	}
 	return nil
 }
@@ -311,7 +310,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
 // WriteHeadHeaderHash stores the head header's hash.
 func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
 	if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
-		glog.Fatalf("failed to store last header's hash into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store last header's hash into database: %v", err))
 	}
 	return nil
 }
@@ -319,7 +318,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
 // WriteHeadBlockHash stores the head block's hash.
 func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
 	if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
-		glog.Fatalf("failed to store last block's hash into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store last block's hash into database: %v", err))
 	}
 	return nil
 }
@@ -327,7 +326,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
 // WriteHeadFastBlockHash stores the fast head block's hash.
 func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
 	if err := db.Put(headFastKey, hash.Bytes()); err != nil {
-		glog.Fatalf("failed to store last fast block's hash into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store last fast block's hash into database: %v", err))
 	}
 	return nil
 }
@@ -343,13 +342,13 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
 	encNum := encodeBlockNumber(num)
 	key := append(blockHashPrefix, hash...)
 	if err := db.Put(key, encNum); err != nil {
-		glog.Fatalf("failed to store hash to number mapping into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store hash to number mapping into database: %v", err))
 	}
 	key = append(append(headerPrefix, encNum...), hash...)
 	if err := db.Put(key, data); err != nil {
-		glog.Fatalf("failed to store header into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store header into database: %v", err))
 	}
-	glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, hash[:4])
+	log.Debug(fmt.Sprintf("stored header #%v [%x…]", header.Number, hash[:4]))
 	return nil
 }
 
@@ -366,9 +365,9 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
 func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
 	key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
 	if err := db.Put(key, rlp); err != nil {
-		glog.Fatalf("failed to store block body into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store block body into database: %v", err))
 	}
-	glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
+	log.Debug(fmt.Sprintf("stored block body [%x…]", hash.Bytes()[:4]))
 	return nil
 }
 
@@ -380,9 +379,9 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
 	}
 	key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
 	if err := db.Put(key, data); err != nil {
-		glog.Fatalf("failed to store block total difficulty into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store block total difficulty into database: %v", err))
 	}
-	glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
+	log.Debug(fmt.Sprintf("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td))
 	return nil
 }
 
@@ -415,9 +414,9 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, rece
 	// Store the flattened receipt slice
 	key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
 	if err := db.Put(key, bytes); err != nil {
-		glog.Fatalf("failed to store block receipts into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store block receipts into database: %v", err))
 	}
-	glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4])
+	log.Debug(fmt.Sprintf("stored block receipts [%x…]", hash.Bytes()[:4]))
 	return nil
 }
 
@@ -458,7 +457,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
 	}
 	// Write the scheduled data into the database
 	if err := batch.Write(); err != nil {
-		glog.Fatalf("failed to store transactions into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store transactions into database: %v", err))
 	}
 	return nil
 }
@@ -490,7 +489,7 @@ func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
 	}
 	// Write the scheduled data into the database
 	if err := batch.Write(); err != nil {
-		glog.Fatalf("failed to store receipts into database: %v", err)
+		log.Crit(fmt.Sprintf("failed to store receipts into database: %v", err))
 	}
 	return nil
 }
@@ -552,7 +551,7 @@ func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
 	}
 	var block types.StorageBlock
 	if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
-		glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
+		log.Error(fmt.Sprintf("invalid block RLP for hash %x: %v", hash, err))
 		return nil
 	}
 	return (*types.Block)(&block)
@@ -623,7 +622,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
 		if err := batch.Write(); err != nil {
 			return fmt.Errorf("preimage write fail for block %d: %v", number, err)
 		}
-		glog.V(logger.Debug).Infof("%d preimages in block %d, including %d new", len(preimages), number, hitCount)
+		log.Debug(fmt.Sprintf("%d preimages in block %d, including %d new", len(preimages), number, hitCount))
 	}
 	return nil
 }
diff --git a/core/genesis.go b/core/genesis.go
index b94b5af76f56b3031f8f95d32309856a84bc5172..a015f04c14fc92a538c23a96c75e36bb3ba5e0f1 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -31,8 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -92,7 +91,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
 	}, nil, nil, nil)
 
 	if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
-		glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
+		log.Info(fmt.Sprint("Genesis block already in chain. Writing canonical number"))
 		err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
 		if err != nil {
 			return nil, err
diff --git a/core/headerchain.go b/core/headerchain.go
index 1dc189323d1f040df766322376acb3ba34a7f7ad..a0550a4286f34302083723985fc1ac8a97498aae 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -30,8 +30,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/pow"
 	"github.com/hashicorp/golang-lru"
@@ -102,7 +101,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
 		if err != nil {
 			return nil, err
 		}
-		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
+		log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
 		hc.genesisHeader = genesisBlock.Header()
 	}
 
@@ -155,10 +154,10 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
 
 	// Irrelevant of the canonical status, write the td and header to the database
 	if err := hc.WriteTd(hash, number, externTd); err != nil {
-		glog.Fatalf("failed to write header total difficulty: %v", err)
+		log.Crit(fmt.Sprintf("failed to write header total difficulty: %v", err))
 	}
 	if err := WriteHeader(hc.chainDb, header); err != nil {
-		glog.Fatalf("failed to write header contents: %v", err)
+		log.Crit(fmt.Sprintf("failed to write header contents: %v", err))
 	}
 
 	// If the total difficulty is higher than our known, add it to the canonical chain
@@ -189,10 +188,10 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
 
 		// Extend the canonical chain with the new header
 		if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
-			glog.Fatalf("failed to insert header number: %v", err)
+			log.Crit(fmt.Sprintf("failed to insert header number: %v", err))
 		}
 		if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
-			glog.Fatalf("failed to insert head header hash: %v", err)
+			log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
 		}
 
 		hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
@@ -231,7 +230,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
 			failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
 				i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4])
 
-			glog.V(logger.Error).Info(failure.Error())
+			log.Error(fmt.Sprint(failure.Error()))
 			return 0, failure
 		}
 	}
@@ -317,7 +316,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
 	for i, header := range chain {
 		// Short circuit insertion if shutting down
 		if hc.procInterrupt() {
-			glog.V(logger.Debug).Infoln("premature abort during header chain processing")
+			log.Debug(fmt.Sprint("premature abort during header chain processing"))
 			break
 		}
 		hash := header.Hash()
@@ -339,7 +338,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
 	if stats.ignored > 0 {
 		ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
 	}
-	glog.V(logger.Info).Infof("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
+	log.Info(fmt.Sprintf("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4]))
 
 	return 0, nil
 }
@@ -446,7 +445,7 @@ func (hc *HeaderChain) CurrentHeader() *types.Header {
 // SetCurrentHeader sets the current head header of the canonical chain.
 func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
 	if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
-		glog.Fatalf("failed to insert head header hash: %v", err)
+		log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
 	}
 	hc.currentHeader = head
 	hc.currentHeaderHash = head.Hash()
@@ -489,7 +488,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
 	hc.currentHeaderHash = hc.currentHeader.Hash()
 
 	if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
-		glog.Fatalf("failed to reset head header hash: %v", err)
+		log.Crit(fmt.Sprintf("failed to reset head header hash: %v", err))
 	}
 }
 
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 4fb69b6462c1d70a1105c9de261546d0fd45be2a..ebb103806fcb24c716410c785a7e9c1556dc9500 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -24,8 +24,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 )
@@ -135,9 +134,9 @@ func (self *stateObject) markSuicided() {
 		self.onDirty(self.Address())
 		self.onDirty = nil
 	}
-	if glog.V(logger.Debug) {
-		glog.Infof("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance())
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance())
+	}})
 }
 
 func (c *stateObject) touch() {
@@ -253,9 +252,9 @@ func (c *stateObject) AddBalance(amount *big.Int) {
 	}
 	c.SetBalance(new(big.Int).Add(c.Balance(), amount))
 
-	if glog.V(logger.Debug) {
-		glog.Infof("%x: #%d %v (+ %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
+	}})
 }
 
 // SubBalance removes amount from c's balance.
@@ -266,9 +265,9 @@ func (c *stateObject) SubBalance(amount *big.Int) {
 	}
 	c.SetBalance(new(big.Int).Sub(c.Balance(), amount))
 
-	if glog.V(logger.Debug) {
-		glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
+	}})
 }
 
 func (self *stateObject) SetBalance(amount *big.Int) {
diff --git a/core/state/statedb.go b/core/state/statedb.go
index cae2dc4b2b170b27afc14f36dbd3b918c3b46093..a87607a25b9af467102af2c17c430c1d084ba0d5 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -27,8 +27,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 	lru "github.com/hashicorp/golang-lru"
@@ -411,7 +410,7 @@ func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObje
 	}
 	var data Account
 	if err := rlp.DecodeBytes(enc, &data); err != nil {
-		glog.Errorf("can't decode object at %x: %v", addr[:], err)
+		log.Error(fmt.Sprintf("can't decode object at %x: %v", addr[:], err))
 		return nil
 	}
 	// Insert into the live set.
@@ -446,9 +445,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
 	newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty)
 	newobj.setNonce(0) // sets the object to dirty
 	if prev == nil {
-		if glog.V(logger.Debug) {
-			glog.Infof("(+) %x\n", addr)
-		}
+		log.Debug("", "msg", log.Lazy{Fn: func() string {
+			return fmt.Sprintf("(+) %x\n", addr)
+		}})
 		self.journal = append(self.journal, createObjectChange{account: &addr})
 	} else {
 		self.journal = append(self.journal, resetObjectChange{prev: prev})
@@ -617,7 +616,7 @@ func (s *StateDB) CommitBatch(deleteEmptyObjects bool) (root common.Hash, batch
 	batch = s.db.NewBatch()
 	root, _ = s.commit(batch, deleteEmptyObjects)
 
-	glog.V(logger.Debug).Infof("Trie cache stats: %d misses, %d unloads", trie.CacheMisses(), trie.CacheUnloads())
+	log.Debug(fmt.Sprintf("Trie cache stats: %d misses, %d unloads", trie.CacheMisses(), trie.CacheUnloads()))
 	return root, batch
 }
 
diff --git a/core/state_processor.go b/core/state_processor.go
index 6485e9abda39d316bae8c17b0923eb634e9ae5bd..72c6e6c37dab0babe41d02e7a6c6f967fd82518c 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -17,14 +17,14 @@
 package core
 
 import (
+	"fmt"
 	"math/big"
 
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -122,7 +122,7 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
 	receipt.Logs = statedb.GetLogs(tx.Hash())
 	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
 
-	glog.V(logger.Debug).Infoln(receipt)
+	log.Debug(fmt.Sprint(receipt))
 
 	return receipt, gas, err
 }
diff --git a/core/state_transition.go b/core/state_transition.go
index 6acc78479393126f17d5ae825c87fa2b4501e08a..8e7891b965f8706d07a8f5bcf37f7b3af446b6c8 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -18,12 +18,12 @@ package core
 
 import (
 	"errors"
+	"fmt"
 	"math/big"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/vm"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -255,7 +255,7 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
 		ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
 	}
 	if vmerr != nil {
-		glog.V(logger.Debug).Infoln("vm returned with error:", err)
+		log.Debug(fmt.Sprint("vm returned with error:", err))
 		// The only possible consensus-error would be if there wasn't
 		// sufficient balance to make the transfer happen. The first
 		// balance transfer may never fail.
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 987b43d4a9fb94adbf5c910ff5faf2156cfb7f17..b0a8eea0fdbd08467bfcecc3c88b0bc7b88ee184 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -28,8 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/params"
 	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
@@ -163,12 +162,12 @@ func (pool *TxPool) eventLoop() {
 func (pool *TxPool) resetState() {
 	currentState, err := pool.currentState()
 	if err != nil {
-		glog.V(logger.Error).Infof("Failed to get current state: %v", err)
+		log.Error(fmt.Sprintf("Failed to get current state: %v", err))
 		return
 	}
 	managedState := state.ManageState(currentState)
 	if err != nil {
-		glog.V(logger.Error).Infof("Failed to get managed state: %v", err)
+		log.Error(fmt.Sprintf("Failed to get managed state: %v", err))
 		return
 	}
 	pool.pendingState = managedState
@@ -193,7 +192,7 @@ func (pool *TxPool) Stop() {
 	pool.events.Unsubscribe()
 	close(pool.quit)
 	pool.wg.Wait()
-	glog.V(logger.Info).Infoln("Transaction pool stopped")
+	log.Info(fmt.Sprint("Transaction pool stopped"))
 }
 
 func (pool *TxPool) State() *state.ManagedState {
@@ -334,14 +333,14 @@ func (pool *TxPool) add(tx *types.Transaction) error {
 	pool.enqueueTx(hash, tx)
 
 	// Print a log message if low enough level is set
-	if glog.V(logger.Debug) {
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
 		rcpt := "[NEW_CONTRACT]"
 		if to := tx.To(); to != nil {
 			rcpt = common.Bytes2Hex(to[:4])
 		}
 		from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation
-		glog.Infof("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value, hash)
-	}
+		return fmt.Sprintf("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value(), hash)
+	}})
 	return nil
 }
 
@@ -423,7 +422,7 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
 
 	for _, tx := range txs {
 		if err := pool.add(tx); err != nil {
-			glog.V(logger.Debug).Infoln("tx error:", err)
+			log.Debug(fmt.Sprint("tx error:", err))
 		}
 	}
 
@@ -514,32 +513,32 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
 	for addr, list := range pool.queue {
 		// Drop all transactions that are deemed too old (low nonce)
 		for _, tx := range list.Forward(state.GetNonce(addr)) {
-			if glog.V(logger.Debug) {
-				glog.Infof("Removed old queued transaction: %v", tx)
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("Removed old queued transaction: %v", tx)
+			}})
 			delete(pool.all, tx.Hash())
 		}
 		// Drop all transactions that are too costly (low balance)
 		drops, _ := list.Filter(state.GetBalance(addr))
 		for _, tx := range drops {
-			if glog.V(logger.Debug) {
-				glog.Infof("Removed unpayable queued transaction: %v", tx)
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("Removed unpayable queued transaction: %v", tx)
+			}})
 			delete(pool.all, tx.Hash())
 			queuedNofundsCounter.Inc(1)
 		}
 		// Gather all executable transactions and promote them
 		for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
-			if glog.V(logger.Debug) {
-				glog.Infof("Promoting queued transaction: %v", tx)
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("Promoting queued transaction: %v", tx)
+			}})
 			pool.promoteTx(addr, tx.Hash(), tx)
 		}
 		// Drop all transactions over the allowed limit
 		for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
-			if glog.V(logger.Debug) {
-				glog.Infof("Removed cap-exceeding queued transaction: %v", tx)
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("Removed cap-exceeding queued transaction: %v", tx)
+			}})
 			delete(pool.all, tx.Hash())
 			queuedRLCounter.Inc(1)
 		}
@@ -651,24 +650,24 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
 
 		// Drop all transactions that are deemed too old (low nonce)
 		for _, tx := range list.Forward(nonce) {
-			if glog.V(logger.Debug) {
-				glog.Infof("Removed old pending transaction: %v", tx)
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("Removed old pending transaction: %v", tx)
+			}})
 			delete(pool.all, tx.Hash())
 		}
 		// Drop all transactions that are too costly (low balance), and queue any invalids back for later
 		drops, invalids := list.Filter(state.GetBalance(addr))
 		for _, tx := range drops {
-			if glog.V(logger.Debug) {
-				glog.Infof("Removed unpayable pending transaction: %v", tx)
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("Removed unpayable pending transaction: %v", tx)
+			}})
 			delete(pool.all, tx.Hash())
 			pendingNofundsCounter.Inc(1)
 		}
 		for _, tx := range invalids {
-			if glog.V(logger.Debug) {
-				glog.Infof("Demoting pending transaction: %v", tx)
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("Demoting pending transaction: %v", tx)
+			}})
 			pool.enqueueTx(tx.Hash(), tx)
 		}
 		// Delete the entire queue entry if it became empty.
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 1447c2cada7db0ad32fd7881e13e6a591aae72c2..2793d2aa769dfc369a6b1f051f931ac036a137b2 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -18,11 +18,11 @@ package vm
 
 import (
 	"crypto/sha256"
+	"fmt"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"golang.org/x/crypto/ripemd160"
 )
@@ -75,14 +75,14 @@ func (c *ecrecover) Run(in []byte) []byte {
 
 	// tighter sig s values in homestead only apply to tx sigs
 	if common.Bytes2Big(in[32:63]).BitLen() > 0 || !crypto.ValidateSignatureValues(v, r, s, false) {
-		glog.V(logger.Detail).Infof("ECRECOVER error: v, r or s value invalid")
+		log.Trace(fmt.Sprintf("ECRECOVER error: v, r or s value invalid"))
 		return nil
 	}
 	// v needs to be at the end for libsecp256k1
 	pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
 	// make sure the public key is a valid one
 	if err != nil {
-		glog.V(logger.Detail).Infoln("ECRECOVER error: ", err)
+		log.Trace(fmt.Sprint("ECRECOVER error: ", err))
 		return nil
 	}
 
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 46c6befefcb12078f2bca9e6ab6af278d471d912..41f6a53f8b4772868a3acb624f02f1baffc05528 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -25,8 +25,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/common/math"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -124,13 +123,13 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
 		}
 	}()
 
-	if glog.V(logger.Debug) {
-		glog.Infof("evm running: %x\n", codehash[:4])
-		tstart := time.Now()
-		defer func() {
-			glog.Infof("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart))
-		}()
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("evm running: %x\n", codehash[:4])
+	}})
+	tstart := time.Now()
+	defer log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart))
+	}})
 
 	// The Interpreter main run loop (contextual). This loop runs until either an
 	// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
diff --git a/errs/errors.go b/errs/errors.go
index daa814db71dca1788efe8730893a2c504db3f97a..6f0e86b19e6c4e9c46018fd07faf279376994612 100644
--- a/errs/errors.go
+++ b/errs/errors.go
@@ -16,11 +16,7 @@
 
 package errs
 
-import (
-	"fmt"
-
-	"github.com/ethereum/go-ethereum/logger/glog"
-)
+import "fmt"
 
 /*
 Errors implements an error handler providing standardised errors for a package.
@@ -80,9 +76,3 @@ func (self Error) Error() (message string) {
 	}
 	return self.message
 }
-
-func (self Error) Log(v glog.Verbose) {
-	if v {
-		v.Infoln(self)
-	}
-}
diff --git a/eth/api.go b/eth/api.go
index f38c0a6b6e6b9beb5f9a05f73e062599e0bcf2fc..3cec749df4e425f236f05b0fc0d7b62984760e34 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -37,8 +37,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/internal/ethapi"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/miner"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -113,7 +112,7 @@ func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
 	if work, err = s.agent.GetWork(); err == nil {
 		return
 	}
-	glog.V(logger.Debug).Infof("%v", err)
+	log.Debug(fmt.Sprintf("%v", err))
 	return work, fmt.Errorf("mining not ready")
 }
 
diff --git a/eth/backend.go b/eth/backend.go
index ef3ac93c899b38134a0778e860902194d67de7cd..f20e4a5099903bacfc887152ef2bef327c6bd6f5 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -40,8 +40,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/internal/ethapi"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/miner"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p"
@@ -184,7 +183,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
 		return nil, err
 	}
 
-	glog.V(logger.Info).Infof("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId)
+	log.Info(fmt.Sprintf("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId))
 
 	if !config.SkipBcVersionCheck {
 		bcVersion := core.GetBlockChainVersion(chainDb)
@@ -202,7 +201,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
 		if err != nil {
 			return nil, err
 		}
-		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
+		log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
 	}
 
 	if config.ChainConfig == nil {
@@ -212,7 +211,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
 
 	eth.chainConfig = config.ChainConfig
 
-	glog.V(logger.Info).Infoln("Chain config:", eth.chainConfig)
+	log.Info(fmt.Sprint("Chain config:", eth.chainConfig))
 
 	eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.EventMux(), vm.Config{EnablePreimageRecording: config.EnablePreimageRecording})
 	if err != nil {
@@ -273,7 +272,7 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
 		if err != nil {
 			return err
 		}
-		glog.V(logger.Info).Infof("Successfully wrote custom genesis block: %x", block.Hash())
+		log.Info(fmt.Sprintf("Successfully wrote custom genesis block: %x", block.Hash()))
 	}
 	// Load up a test setup if directly injected
 	if config.TestGenesisState != nil {
@@ -292,13 +291,13 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
 func CreatePoW(config *Config) (pow.PoW, error) {
 	switch {
 	case config.PowFake:
-		glog.V(logger.Info).Infof("ethash used in fake mode")
+		log.Info(fmt.Sprintf("ethash used in fake mode"))
 		return pow.PoW(core.FakePow{}), nil
 	case config.PowTest:
-		glog.V(logger.Info).Infof("ethash used in test mode")
+		log.Info(fmt.Sprintf("ethash used in test mode"))
 		return ethash.NewForTesting()
 	case config.PowShared:
-		glog.V(logger.Info).Infof("ethash used in shared mode")
+		log.Info(fmt.Sprintf("ethash used in shared mode"))
 		return ethash.NewShared(), nil
 	default:
 		return ethash.New(), nil
@@ -382,7 +381,7 @@ func (s *Ethereum) StartMining(threads int) error {
 	eb, err := s.Etherbase()
 	if err != nil {
 		err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
-		glog.V(logger.Error).Infoln(err)
+		log.Error(fmt.Sprint(err))
 		return err
 	}
 	go s.miner.Start(eb, threads)
@@ -470,14 +469,14 @@ func (self *Ethereum) StartAutoDAG() {
 		return // already started
 	}
 	go func() {
-		glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir)
+		log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir))
 		var nextEpoch uint64
 		timer := time.After(0)
 		self.autodagquit = make(chan bool)
 		for {
 			select {
 			case <-timer:
-				glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir)
+				log.Info(fmt.Sprintf("checking DAG (ethash dir: %s)", ethash.DefaultDir))
 				currentBlock := self.BlockChain().CurrentBlock().NumberU64()
 				thisEpoch := currentBlock / epochLength
 				if nextEpoch <= thisEpoch {
@@ -486,19 +485,19 @@ func (self *Ethereum) StartAutoDAG() {
 							previousDag, previousDagFull := dagFiles(thisEpoch - 1)
 							os.Remove(filepath.Join(ethash.DefaultDir, previousDag))
 							os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull))
-							glog.V(logger.Info).Infof("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag)
+							log.Info(fmt.Sprintf("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag))
 						}
 						nextEpoch = thisEpoch + 1
 						dag, _ := dagFiles(nextEpoch)
 						if _, err := os.Stat(dag); os.IsNotExist(err) {
-							glog.V(logger.Info).Infof("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag)
+							log.Info(fmt.Sprintf("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag))
 							err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir
 							if err != nil {
-								glog.V(logger.Error).Infof("Error generating DAG for epoch %d (%s)", nextEpoch, dag)
+								log.Error(fmt.Sprintf("Error generating DAG for epoch %d (%s)", nextEpoch, dag))
 								return
 							}
 						} else {
-							glog.V(logger.Error).Infof("DAG for epoch %d (%s)", nextEpoch, dag)
+							log.Error(fmt.Sprintf("DAG for epoch %d (%s)", nextEpoch, dag))
 						}
 					}
 				}
@@ -516,7 +515,7 @@ func (self *Ethereum) StopAutoDAG() {
 		close(self.autodagquit)
 		self.autodagquit = nil
 	}
-	glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir)
+	log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir))
 }
 
 // dagFiles(epoch) returns the two alternative DAG filenames (not a path)
diff --git a/eth/bad_block.go b/eth/bad_block.go
index e0f05f540fd3bf20e10755178ccb2da8955c78a2..0812af7c03ef4fb6f5bcccd4ce86475a9adeeda9 100644
--- a/eth/bad_block.go
+++ b/eth/bad_block.go
@@ -25,8 +25,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 )
 
@@ -66,9 +65,9 @@ func sendBadBlockReport(block *types.Block, err error) {
 	client := http.Client{Timeout: 8 * time.Second}
 	resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
 	if err != nil {
-		glog.V(logger.Debug).Infoln(err)
+		log.Debug(fmt.Sprint(err))
 		return
 	}
-	glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode)
+	log.Debug(fmt.Sprintf("Bad Block Report posted (%d)", resp.StatusCode))
 	resp.Body.Close()
 }
diff --git a/eth/db_upgrade.go b/eth/db_upgrade.go
index 5fd73a58677bf1571c3255414e995a0f9aa47aed..7038ebbbdf97239e9bd82eb471d07328d4051d75 100644
--- a/eth/db_upgrade.go
+++ b/eth/db_upgrade.go
@@ -28,8 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 )
 
@@ -50,7 +49,7 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
 		return nil // empty database, nothing to do
 	}
 
-	glog.V(logger.Info).Infof("Upgrading chain database to use sequential keys")
+	log.Info(fmt.Sprintf("Upgrading chain database to use sequential keys"))
 
 	stopChn := make(chan struct{})
 	stoppedChn := make(chan struct{})
@@ -73,11 +72,11 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
 			err, stopped = upgradeSequentialOrphanedReceipts(db, stopFn)
 		}
 		if err == nil && !stopped {
-			glog.V(logger.Info).Infof("Database conversion successful")
+			log.Info(fmt.Sprintf("Database conversion successful"))
 			db.Put(useSequentialKeys, []byte{42})
 		}
 		if err != nil {
-			glog.V(logger.Error).Infof("Database conversion failed: %v", err)
+			log.Error(fmt.Sprintf("Database conversion failed: %v", err))
 		}
 		close(stoppedChn)
 	}()
@@ -106,7 +105,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
 				it.Release()
 				it = db.(*ethdb.LDBDatabase).NewIterator()
 				it.Seek(keyPtr)
-				glog.V(logger.Info).Infof("converting %d canonical numbers...", cnt)
+				log.Info(fmt.Sprintf("converting %d canonical numbers...", cnt))
 			}
 			number := big.NewInt(0).SetBytes(keyPtr[10:]).Uint64()
 			newKey := []byte("h12345678n")
@@ -125,7 +124,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
 		it.Next()
 	}
 	if cnt > 0 {
-		glog.V(logger.Info).Infof("converted %d canonical numbers...", cnt)
+		log.Info(fmt.Sprintf("converted %d canonical numbers...", cnt))
 	}
 	return nil, false
 }
@@ -149,7 +148,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
 				it.Release()
 				it = db.(*ethdb.LDBDatabase).NewIterator()
 				it.Seek(keyPtr)
-				glog.V(logger.Info).Infof("converting %d blocks...", cnt)
+				log.Info(fmt.Sprintf("converting %d blocks...", cnt))
 			}
 			// convert header, body, td and block receipts
 			var keyPrefix [38]byte
@@ -177,7 +176,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
 		}
 	}
 	if cnt > 0 {
-		glog.V(logger.Info).Infof("converted %d blocks...", cnt)
+		log.Info(fmt.Sprintf("converted %d blocks...", cnt))
 	}
 	return nil, false
 }
@@ -204,7 +203,7 @@ func upgradeSequentialOrphanedReceipts(db ethdb.Database, stopFn func() bool) (e
 		it.Next()
 	}
 	if cnt > 0 {
-		glog.V(logger.Info).Infof("removed %d orphaned block receipts...", cnt)
+		log.Info(fmt.Sprintf("removed %d orphaned block receipts...", cnt))
 	}
 	return nil, false
 }
@@ -267,7 +266,7 @@ func upgradeChainDatabase(db ethdb.Database) error {
 		return nil
 	}
 	// At least some of the database is still the old format, upgrade (skip the head block!)
-	glog.V(logger.Info).Info("Old database detected, upgrading...")
+	log.Info(fmt.Sprint("Old database detected, upgrading..."))
 
 	if db, ok := db.(*ethdb.LDBDatabase); ok {
 		blockPrefix := []byte("block-hash-")
@@ -343,7 +342,7 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
 	}
 
 	tstart := time.Now()
-	glog.V(logger.Info).Infoln("upgrading db log bloom bins")
+	log.Info(fmt.Sprint("upgrading db log bloom bins"))
 	for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
 		hash := core.GetCanonicalHash(db, i)
 		if (hash == common.Hash{}) {
@@ -351,6 +350,6 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
 		}
 		core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
 	}
-	glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart))
+	log.Info(fmt.Sprint("upgrade completed in", time.Since(tstart)))
 	return nil
 }
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 7e2952439a966aa3d3c50494805b8ddba205ab8d..b323c94f95ab4a9a9c502db02715bc6e817702e3 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -33,8 +33,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/trie"
 	"github.com/rcrowley/go-metrics"
@@ -249,9 +248,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
 	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
 	getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
 
-	glog.V(logger.Detail).Infoln("Registering peer", id)
+	log.Trace(fmt.Sprint("Registering peer", id))
 	if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
-		glog.V(logger.Error).Infoln("Register failed:", err)
+		log.Error(fmt.Sprint("Register failed:", err))
 		return err
 	}
 	d.qosReduceConfidence()
@@ -264,9 +263,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
 // the queue.
 func (d *Downloader) UnregisterPeer(id string) error {
 	// Unregister the peer from the active peer set and revoke any fetch tasks
-	glog.V(logger.Detail).Infoln("Unregistering peer", id)
+	log.Trace(fmt.Sprint("Unregistering peer", id))
 	if err := d.peers.Unregister(id); err != nil {
-		glog.V(logger.Error).Infoln("Unregister failed:", err)
+		log.Error(fmt.Sprint("Unregister failed:", err))
 		return err
 	}
 	d.queue.Revoke(id)
@@ -285,24 +284,24 @@ func (d *Downloader) UnregisterPeer(id string) error {
 // Synchronise tries to sync up our local block chain with a remote peer, both
 // adding various sanity checks as well as wrapping it with various log entries.
 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
-	glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td)
+	log.Trace(fmt.Sprintf("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td))
 
 	err := d.synchronise(id, head, td, mode)
 	switch err {
 	case nil:
-		glog.V(logger.Detail).Infof("Synchronisation completed")
+		log.Trace(fmt.Sprintf("Synchronisation completed"))
 
 	case errBusy:
-		glog.V(logger.Detail).Infof("Synchronisation already in progress")
+		log.Trace(fmt.Sprintf("Synchronisation already in progress"))
 
 	case errTimeout, errBadPeer, errStallingPeer,
 		errEmptyHeaderSet, errPeersUnavailable, errTooOld,
 		errInvalidAncestor, errInvalidChain:
-		glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
+		log.Debug(fmt.Sprintf("Removing peer %v: %v", id, err))
 		d.dropPeer(id)
 
 	default:
-		glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
+		log.Warn(fmt.Sprintf("Synchronisation failed: %v", err))
 	}
 	return err
 }
@@ -323,7 +322,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
 
 	// Post a user notification of the sync (only once per session)
 	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
-		glog.V(logger.Info).Infoln("Block synchronisation started")
+		log.Info(fmt.Sprint("Block synchronisation started"))
 	}
 	// Reset the queue, peer set and wake channels to clean any internal leftover state
 	d.queue.Reset()
@@ -388,9 +387,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
 		return errTooOld
 	}
 
-	glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
+	log.Debug(fmt.Sprintf("Synchronising with the network using: %s [eth/%d]", p.id, p.version))
 	defer func(start time.Time) {
-		glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
+		log.Debug(fmt.Sprintf("Synchronisation terminated after %v", time.Since(start)))
 	}(time.Now())
 
 	// Look up the sync boundaries: the common ancestor and the target block
@@ -438,7 +437,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
 				origin = 0
 			}
 		}
-		glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
+		log.Debug(fmt.Sprintf("Fast syncing until pivot block #%d", pivot))
 	}
 	d.queue.Prepare(origin+1, d.mode, pivot, latest)
 	if d.syncInitHook != nil {
@@ -523,7 +522,7 @@ func (d *Downloader) Terminate() {
 // fetchHeight retrieves the head header of the remote peer to aid in estimating
 // the total time a pending synchronisation would take.
 func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
-	glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
+	log.Debug(fmt.Sprintf("%v: retrieving remote chain height", p))
 
 	// Request the advertised remote head block and wait for the response
 	head, _ := p.currentHead()
@@ -538,19 +537,19 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
 		case packet := <-d.headerCh:
 			// Discard anything not from the origin peer
 			if packet.PeerId() != p.id {
-				glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
+				log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
 				break
 			}
 			// Make sure the peer actually gave something valid
 			headers := packet.(*headerPack).headers
 			if len(headers) != 1 {
-				glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers))
+				log.Debug(fmt.Sprintf("%v: invalid number of head headers: %d != 1", p, len(headers)))
 				return nil, errBadPeer
 			}
 			return headers[0], nil
 
 		case <-timeout:
-			glog.V(logger.Debug).Infof("%v: head header timeout", p)
+			log.Debug(fmt.Sprintf("%v: head header timeout", p))
 			return nil, errTimeout
 
 		case <-d.bodyCh:
@@ -567,7 +566,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
 // In the rare scenario when we ended up on a long reorganisation (i.e. none of
 // the head links match), we do a binary search to find the common ancestor.
 func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
-	glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height)
+	log.Debug(fmt.Sprintf("%v: looking for common ancestor (remote height %d)", p, height))
 
 	// Figure out the valid ancestor range to prevent rewrite attacks
 	floor, ceil := int64(-1), d.headHeader().Number.Uint64()
@@ -608,19 +607,19 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 		case packet := <-d.headerCh:
 			// Discard anything not from the origin peer
 			if packet.PeerId() != p.id {
-				glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
+				log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
 				break
 			}
 			// Make sure the peer actually gave something valid
 			headers := packet.(*headerPack).headers
 			if len(headers) == 0 {
-				glog.V(logger.Warn).Infof("%v: empty head header set", p)
+				log.Warn(fmt.Sprintf("%v: empty head header set", p))
 				return 0, errEmptyHeaderSet
 			}
 			// Make sure the peer's reply conforms to the request
 			for i := 0; i < len(headers); i++ {
 				if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
-					glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number)
+					log.Warn(fmt.Sprintf("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number))
 					return 0, errInvalidChain
 				}
 			}
@@ -637,7 +636,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 
 					// If every header is known, even future ones, the peer straight out lied about its head
 					if number > height && i == limit-1 {
-						glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number)
+						log.Warn(fmt.Sprintf("%v: lied about chain head: reported %d, found above %d", p, height, number))
 						return 0, errStallingPeer
 					}
 					break
@@ -645,7 +644,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 			}
 
 		case <-timeout:
-			glog.V(logger.Debug).Infof("%v: head header timeout", p)
+			log.Debug(fmt.Sprintf("%v: head header timeout", p))
 			return 0, errTimeout
 
 		case <-d.bodyCh:
@@ -657,10 +656,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 	// If the head fetch already found an ancestor, return
 	if !common.EmptyHash(hash) {
 		if int64(number) <= floor {
-			glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
+			log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor))
 			return 0, errInvalidAncestor
 		}
-		glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
+		log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, number, hash[:4]))
 		return number, nil
 	}
 	// Ancestor not found, we need to binary search over our chain
@@ -684,13 +683,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 			case packer := <-d.headerCh:
 				// Discard anything not from the origin peer
 				if packer.PeerId() != p.id {
-					glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId())
+					log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packer.PeerId()))
 					break
 				}
 				// Make sure the peer actually gave something valid
 				headers := packer.(*headerPack).headers
 				if len(headers) != 1 {
-					glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers))
+					log.Debug(fmt.Sprintf("%v: invalid search header set (%d)", p, len(headers)))
 					return 0, errBadPeer
 				}
 				arrived = true
@@ -702,13 +701,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 				}
 				header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
 				if header.Number.Uint64() != check {
-					glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check)
+					log.Debug(fmt.Sprintf("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check))
 					return 0, errBadPeer
 				}
 				start = check
 
 			case <-timeout:
-				glog.V(logger.Debug).Infof("%v: search header timeout", p)
+				log.Debug(fmt.Sprintf("%v: search header timeout", p))
 				return 0, errTimeout
 
 			case <-d.bodyCh:
@@ -720,10 +719,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 	}
 	// Ensure valid ancestry and return
 	if int64(start) <= floor {
-		glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
+		log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor))
 		return 0, errInvalidAncestor
 	}
-	glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
+	log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, start, hash[:4]))
 	return start, nil
 }
 
@@ -736,8 +735,8 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 // can fill in the skeleton - not even the origin peer - it's assumed invalid and
 // the origin is dropped.
 func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
-	glog.V(logger.Debug).Infof("%v: directing header downloads from #%d", p, from)
-	defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
+	log.Debug(fmt.Sprintf("%v: directing header downloads from #%d", p, from))
+	defer log.Debug(fmt.Sprintf("%v: header download terminated", p))
 
 	// Create a timeout timer, and the associated header fetcher
 	skeleton := true            // Skeleton assembly phase or finishing up
@@ -751,10 +750,10 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 		timeout.Reset(d.requestTTL())
 
 		if skeleton {
-			glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from)
+			log.Trace(fmt.Sprintf("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from))
 			go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
 		} else {
-			glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from)
+			log.Trace(fmt.Sprintf("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from))
 			go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
 		}
 	}
@@ -769,7 +768,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 		case packet := <-d.headerCh:
 			// Make sure the active peer is giving us the skeleton headers
 			if packet.PeerId() != p.id {
-				glog.V(logger.Debug).Infof("Received skeleton headers from incorrect peer (%s)", packet.PeerId())
+				log.Debug(fmt.Sprintf("Received skeleton headers from incorrect peer (%s)", packet.PeerId()))
 				break
 			}
 			headerReqTimer.UpdateSince(request)
@@ -783,7 +782,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 			}
 			// If no more headers are inbound, notify the content fetchers and return
 			if packet.Items() == 0 {
-				glog.V(logger.Debug).Infof("%v: no available headers", p)
+				log.Debug(fmt.Sprintf("%v: no available headers", p))
 				select {
 				case d.headerProcCh <- nil:
 					return nil
@@ -797,7 +796,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 			if skeleton {
 				filled, proced, err := d.fillHeaderSkeleton(from, headers)
 				if err != nil {
-					glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err)
+					log.Debug(fmt.Sprintf("%v: skeleton chain invalid: %v", p, err))
 					return errInvalidChain
 				}
 				headers = filled[proced:]
@@ -805,7 +804,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 			}
 			// Insert all the new headers and fetch the next batch
 			if len(headers) > 0 {
-				glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
+				log.Trace(fmt.Sprintf("%v: schedule %d headers from #%d", p, len(headers), from))
 				select {
 				case d.headerProcCh <- headers:
 				case <-d.cancelCh:
@@ -817,7 +816,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 
 		case <-timeout.C:
 			// Header retrieval timed out, consider the peer bad and drop
-			glog.V(logger.Debug).Infof("%v: header request timed out", p)
+			log.Debug(fmt.Sprintf("%v: header request timed out", p))
 			headerTimeoutMeter.Mark(1)
 			d.dropPeer(p.id)
 
@@ -847,7 +846,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 // The method returs the entire filled skeleton and also the number of headers
 // already forwarded for processing.
 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
-	glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from)
+	log.Debug(fmt.Sprintf("Filling up skeleton from #%d", from))
 	d.queue.ScheduleSkeleton(from, skeleton)
 
 	var (
@@ -868,7 +867,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
 		d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
 		nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
 
-	glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err)
+	log.Debug(fmt.Sprintf("Skeleton fill terminated: %v", err))
 
 	filled, proced := d.queue.RetrieveHeaders()
 	return filled, proced, err
@@ -878,7 +877,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
 // available peers, reserving a chunk of blocks for each, waiting for delivery
 // and also periodically checking for timeouts.
 func (d *Downloader) fetchBodies(from uint64) error {
-	glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from)
+	log.Debug(fmt.Sprintf("Downloading block bodies from #%d", from))
 
 	var (
 		deliver = func(packet dataPack) (int, error) {
@@ -894,7 +893,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
 		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
 		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
 
-	glog.V(logger.Debug).Infof("Block body download terminated: %v", err)
+	log.Debug(fmt.Sprintf("Block body download terminated: %v", err))
 	return err
 }
 
@@ -902,7 +901,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
 // available peers, reserving a chunk of receipts for each, waiting for delivery
 // and also periodically checking for timeouts.
 func (d *Downloader) fetchReceipts(from uint64) error {
-	glog.V(logger.Debug).Infof("Downloading receipts from #%d", from)
+	log.Debug(fmt.Sprintf("Downloading receipts from #%d", from))
 
 	var (
 		deliver = func(packet dataPack) (int, error) {
@@ -918,7 +917,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
 		d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
 		d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
 
-	glog.V(logger.Debug).Infof("Receipt download terminated: %v", err)
+	log.Debug(fmt.Sprintf("Receipt download terminated: %v", err))
 	return err
 }
 
@@ -926,7 +925,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
 // available peers, reserving a chunk of nodes for each, waiting for delivery and
 // also periodically checking for timeouts.
 func (d *Downloader) fetchNodeData() error {
-	glog.V(logger.Debug).Infof("Downloading node state data")
+	log.Debug(fmt.Sprintf("Downloading node state data"))
 
 	var (
 		deliver = func(packet dataPack) (int, error) {
@@ -934,12 +933,12 @@ func (d *Downloader) fetchNodeData() error {
 			return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
 				// If the peer returned old-requested data, forgive
 				if err == trie.ErrNotRequested {
-					glog.V(logger.Debug).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId())
+					log.Debug(fmt.Sprintf("peer %s: replied to stale state request, forgiving", packet.PeerId()))
 					return
 				}
 				if err != nil {
 					// If the node data processing failed, the root hash is very wrong, abort
-					glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err)
+					log.Error(fmt.Sprintf("peer %d: state processing failed: %v", packet.PeerId(), err))
 					d.cancel()
 					return
 				}
@@ -958,12 +957,12 @@ func (d *Downloader) fetchNodeData() error {
 
 				// If real database progress was made, reset any fast-sync pivot failure
 				if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
-					glog.V(logger.Debug).Infof("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails))
+					log.Debug(fmt.Sprintf("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails)))
 					atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
 				}
 				// Log a message to the user and return
 				if delivered > 0 {
-					glog.V(logger.Info).Infof("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending)
+					log.Info(fmt.Sprintf("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending))
 				}
 			})
 		}
@@ -980,7 +979,7 @@ func (d *Downloader) fetchNodeData() error {
 		d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
 		d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
 
-	glog.V(logger.Debug).Infof("Node state data download terminated: %v", err)
+	log.Debug(fmt.Sprintf("Node state data download terminated: %v", err))
 	return err
 }
 
@@ -1045,11 +1044,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
 				// Issue a log to the user to see what's going on
 				switch {
 				case err == nil && packet.Items() == 0:
-					glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind))
+					log.Trace(fmt.Sprintf("%s: no %s delivered", peer, strings.ToLower(kind)))
 				case err == nil:
-					glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind))
+					log.Trace(fmt.Sprintf("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)))
 				default:
-					glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err)
+					log.Trace(fmt.Sprintf("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err))
 				}
 			}
 			// Blocks assembled, try to update the progress
@@ -1092,10 +1091,10 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
 					// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
 					// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
 					if fails > 2 {
-						glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
+						log.Trace(fmt.Sprintf("%s: %s delivery timeout", peer, strings.ToLower(kind)))
 						setIdle(peer, 0)
 					} else {
-						glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind))
+						log.Debug(fmt.Sprintf("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind)))
 						d.dropPeer(pid)
 					}
 				}
@@ -1103,7 +1102,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
 			// If there's nothing more to fetch, wait or terminate
 			if pending() == 0 {
 				if !inFlight() && finished {
-					glog.V(logger.Debug).Infof("%s fetching completed", kind)
+					log.Debug(fmt.Sprintf("%s fetching completed", kind))
 					return nil
 				}
 				break
@@ -1131,15 +1130,15 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
 				if request == nil {
 					continue
 				}
-				if glog.V(logger.Detail) {
+				log.Trace("", "msg", log.Lazy{Fn: func() string {
 					if request.From > 0 {
-						glog.Infof("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
+						return fmt.Sprintf("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
 					} else if len(request.Headers) > 0 {
-						glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
+						return fmt.Sprintf("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
 					} else {
-						glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
+						return fmt.Sprintf("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
 					}
-				}
+				}})
 				// Fetch the chunk and make sure any errors return the hashes to the queue
 				if fetchHook != nil {
 					fetchHook(request.Headers)
@@ -1194,8 +1193,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 			if d.headBlock != nil {
 				curBlock = d.headBlock().Number()
 			}
-			glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
-				len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock)
+			log.Warn(fmt.Sprintf("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
+				len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock))
 
 			// If we're already past the pivot point, this could be an attack, thread carefully
 			if rollback[len(rollback)-1].Number.Uint64() > pivot {
@@ -1203,7 +1202,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 				if atomic.LoadUint32(&d.fsPivotFails) == 0 {
 					for _, header := range rollback {
 						if header.Number.Uint64() == pivot {
-							glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4])
+							log.Warn(fmt.Sprintf("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]))
 							d.fsPivotLock = header
 						}
 					}
@@ -1299,7 +1298,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 						if n > 0 {
 							rollback = append(rollback, chunk[:n]...)
 						}
-						glog.V(logger.Debug).Infof("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err)
+						log.Debug(fmt.Sprintf("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err))
 						return errInvalidChain
 					}
 					// All verifications passed, store newly found uncertain headers
@@ -1311,7 +1310,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 				// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
 				if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
 					if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
-						glog.V(logger.Warn).Infof("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4])
+						log.Warn(fmt.Sprintf("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]))
 						return errInvalidChain
 					}
 				}
@@ -1328,7 +1327,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 					// Otherwise insert the headers for content retrieval
 					inserts := d.queue.Schedule(chunk, origin)
 					if len(inserts) != len(chunk) {
-						glog.V(logger.Debug).Infof("stale headers")
+						log.Debug(fmt.Sprintf("stale headers"))
 						return errBadPeer
 					}
 				}
@@ -1359,10 +1358,10 @@ func (d *Downloader) processContent() error {
 			d.chainInsertHook(results)
 		}
 		// Actually import the blocks
-		if glog.V(logger.Debug) {
+		log.Debug("", "msg", log.Lazy{Fn: func() string {
 			first, last := results[0].Header, results[len(results)-1].Header
-			glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
-		}
+			return fmt.Sprintf("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
+		}})
 		for len(results) != 0 {
 			// Check for any termination requests
 			select {
@@ -1396,14 +1395,14 @@ func (d *Downloader) processContent() error {
 			case len(receipts) > 0:
 				index, err = d.insertReceipts(blocks, receipts)
 				if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
-					glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4])
+					log.Debug(fmt.Sprintf("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]))
 					index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
 				}
 			default:
 				index, err = d.insertBlocks(blocks)
 			}
 			if err != nil {
-				glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
+				log.Debug(fmt.Sprintf("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err))
 				return errInvalidChain
 			}
 			// Shift the results to the next batch
@@ -1471,7 +1470,7 @@ func (d *Downloader) qosTuner() {
 		atomic.StoreUint64(&d.rttConfidence, conf)
 
 		// Log the new QoS values and sleep until the next RTT
-		glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
+		log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
 		select {
 		case <-d.quitCh:
 			return
@@ -1501,7 +1500,7 @@ func (d *Downloader) qosReduceConfidence() {
 	atomic.StoreUint64(&d.rttConfidence, conf)
 
 	rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
-	glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL())
+	log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
 }
 
 // requestRTT returns the current target round trip time for a download request
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 5be09f37dce3685547dbde6e453b2830d9d47797..fa04e0d23f966ae7e09970f6047e48597afaf099 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -30,8 +30,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/trie"
 	"github.com/rcrowley/go-metrics"
 	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
@@ -365,20 +364,20 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
 		// Make sure chain order is honoured and preserved throughout
 		hash := header.Hash()
 		if header.Number == nil || header.Number.Uint64() != from {
-			glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from)
+			log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from))
 			break
 		}
 		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
-			glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4])
+			log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4]))
 			break
 		}
 		// Make sure no duplicate requests are executed
 		if _, ok := q.blockTaskPool[hash]; ok {
-			glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4])
+			log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4]))
 			continue
 		}
 		if _, ok := q.receiptTaskPool[hash]; ok {
-			glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4])
+			log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]))
 			continue
 		}
 		// Queue the header for content retrieval
@@ -392,7 +391,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
 		}
 		if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
 			// Pivoting point of the fast sync, switch the state retrieval to this
-			glog.V(logger.Debug).Infof("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4])
+			log.Debug(fmt.Sprintf("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4]))
 
 			q.stateTaskIndex = 0
 			q.stateTaskPool = make(map[common.Hash]int)
@@ -873,10 +872,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
 	accepted := len(headers) == MaxHeaderFetch
 	if accepted {
 		if headers[0].Number.Uint64() != request.From {
-			glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From)
+			log.Trace(fmt.Sprintf("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From))
 			accepted = false
 		} else if headers[len(headers)-1].Hash() != target {
-			glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4])
+			log.Trace(fmt.Sprintf("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]))
 			accepted = false
 		}
 	}
@@ -884,12 +883,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
 		for i, header := range headers[1:] {
 			hash := header.Hash()
 			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
-				glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want)
+				log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want))
 				accepted = false
 				break
 			}
 			if headers[i].Hash() != header.ParentHash {
-				glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4])
+				log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4]))
 				accepted = false
 				break
 			}
@@ -897,7 +896,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
 	}
 	// If the batch of headers wasn't accepted, mark as unavailable
 	if !accepted {
-		glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From)
+		log.Trace(fmt.Sprintf("Peer %s: skeleton filling from header #%d not accepted", id, request.From))
 
 		miss := q.headerPeerMiss[id]
 		if miss == nil {
@@ -924,7 +923,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
 
 		select {
 		case headerProcCh <- process:
-			glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number)
+			log.Trace(fmt.Sprintf("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number))
 			q.headerProced += len(process)
 		default:
 		}
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
index e225b49965fc3e5fe29330d3c2d49740bf263da3..33f9dbe93a6fb71d4f15e6438d7a577935e8dff0 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/fetcher.go
@@ -26,8 +26,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
 )
 
@@ -221,7 +220,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
 // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
 // returning those that should be handled differently.
 func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
-	glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers))
+	log.Trace(fmt.Sprintf("[eth/62] filtering %d headers", len(headers)))
 
 	// Send the filter channel to the fetcher
 	filter := make(chan *headerFilterTask)
@@ -249,7 +248,7 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
 // FilterBodies extracts all the block bodies that were explicitly requested by
 // the fetcher, returning those that should be handled differently.
 func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
-	glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles))
+	log.Trace(fmt.Sprintf("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles)))
 
 	// Send the filter channel to the fetcher
 	filter := make(chan *bodyFilterTask)
@@ -324,14 +323,14 @@ func (f *Fetcher) loop() {
 
 			count := f.announces[notification.origin] + 1
 			if count > hashLimit {
-				glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
+				log.Debug(fmt.Sprintf("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit))
 				propAnnounceDOSMeter.Mark(1)
 				break
 			}
 			// If we have a valid block number, check that it's potentially useful
 			if notification.number > 0 {
 				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
-					glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist)
+					log.Debug(fmt.Sprintf("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist))
 					propAnnounceDropMeter.Mark(1)
 					break
 				}
@@ -381,13 +380,15 @@ func (f *Fetcher) loop() {
 			}
 			// Send out all block header requests
 			for peer, hashes := range request {
-				if glog.V(logger.Detail) && len(hashes) > 0 {
-					list := "["
-					for _, hash := range hashes {
-						list += fmt.Sprintf("%x…, ", hash[:4])
-					}
-					list = list[:len(list)-2] + "]"
-					glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
+				if len(hashes) > 0 {
+					log.Trace("", "msg", log.Lazy{Fn: func() string {
+						list := "["
+						for _, hash := range hashes {
+							list += fmt.Sprintf("%x…, ", hash[:4])
+						}
+						list = list[:len(list)-2] + "]"
+						return fmt.Sprintf("[eth/62] Peer %s: fetching headers %s", peer, list)
+					}})
 				}
 				// Create a closure of the fetch and schedule in on a new thread
 				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
@@ -421,14 +422,16 @@ func (f *Fetcher) loop() {
 			}
 			// Send out all block body requests
 			for peer, hashes := range request {
-				if glog.V(logger.Detail) && len(hashes) > 0 {
-					list := "["
-					for _, hash := range hashes {
-						list += fmt.Sprintf("%x…, ", hash[:4])
-					}
-					list = list[:len(list)-2] + "]"
+				if len(hashes) > 0 {
+					log.Trace("", "msg", log.Lazy{Fn: func() string {
+						list := "["
+						for _, hash := range hashes {
+							list += fmt.Sprintf("%x…, ", hash[:4])
+						}
+						list = list[:len(list)-2] + "]"
 
-					glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list)
+						return fmt.Sprintf("[eth/62] Peer %s: fetching bodies %s", peer, list)
+					}})
 				}
 				// Create a closure of the fetch and schedule in on a new thread
 				if f.completingHook != nil {
@@ -462,7 +465,7 @@ func (f *Fetcher) loop() {
 				if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
 					// If the delivered header does not match the promised number, drop the announcer
 					if header.Number.Uint64() != announce.number {
-						glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64())
+						log.Trace(fmt.Sprintf("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()))
 						f.dropPeer(announce.origin)
 						f.forgetHash(hash)
 						continue
@@ -474,7 +477,7 @@ func (f *Fetcher) loop() {
 
 						// If the block is empty (header only), short circuit into the final import queue
 						if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
-							glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
+							log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
 
 							block := types.NewBlockWithHeader(header)
 							block.ReceivedAt = task.time
@@ -486,7 +489,7 @@ func (f *Fetcher) loop() {
 						// Otherwise add to the list of blocks needing completion
 						incomplete = append(incomplete, announce)
 					} else {
-						glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
+						log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
 						f.forgetHash(hash)
 					}
 				} else {
@@ -617,14 +620,14 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
 	// Ensure the peer isn't DOSing us
 	count := f.queues[peer] + 1
 	if count > blockLimit {
-		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
+		log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit))
 		propBroadcastDOSMeter.Mark(1)
 		f.forgetHash(hash)
 		return
 	}
 	// Discard any past or too distant blocks
 	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
-		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
+		log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist))
 		propBroadcastDropMeter.Mark(1)
 		f.forgetHash(hash)
 		return
@@ -641,9 +644,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
 		if f.queueChangeHook != nil {
 			f.queueChangeHook(op.block.Hash(), true)
 		}
-		if glog.V(logger.Debug) {
-			glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
-		}
+		log.Debug("", "msg", log.Lazy{Fn: func() string {
+			return fmt.Sprintf("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
+		}})
 	}
 }
 
@@ -654,14 +657,14 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
 	hash := block.Hash()
 
 	// Run the import on a new thread
-	glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4])
+	log.Debug(fmt.Sprintf("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]))
 	go func() {
 		defer func() { f.done <- hash }()
 
 		// If the parent's unknown, abort insertion
 		parent := f.getBlock(block.ParentHash())
 		if parent == nil {
-			glog.V(logger.Debug).Infof("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4])
+			log.Debug(fmt.Sprintf("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4]))
 			return
 		}
 		// Quickly validate the header and propagate the block if it passes
@@ -676,13 +679,13 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
 
 		default:
 			// Something went very wrong, drop the peer
-			glog.V(logger.Debug).Infof("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
+			log.Debug(fmt.Sprintf("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err))
 			f.dropPeer(peer)
 			return
 		}
 		// Run the actual import and log any issues
 		if _, err := f.insertChain(types.Blocks{block}); err != nil {
-			glog.V(logger.Warn).Infof("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err)
+			log.Warn(fmt.Sprintf("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err))
 			return
 		}
 		// If import succeeded, broadcast the block
diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go
index eb2df4a96e5f2449e491cf3010b1383459477ef8..0e0b1b66ac2fd4f23d9058f16edc98cf72143da5 100644
--- a/eth/gasprice/gasprice.go
+++ b/eth/gasprice/gasprice.go
@@ -17,6 +17,7 @@
 package gasprice
 
 import (
+	"fmt"
 	"math/big"
 	"math/rand"
 	"sync"
@@ -25,8 +26,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -176,7 +176,7 @@ func (self *GasPriceOracle) processBlock(block *types.Block) {
 	self.lastBase = newBase
 	self.lastBaseMutex.Unlock()
 
-	glog.V(logger.Detail).Infof("Processed block #%v, base price is %v\n", i, newBase.Int64())
+	log.Trace(fmt.Sprintf("Processed block #%v, base price is %v\n", i, newBase.Int64()))
 }
 
 // returns the lowers possible price with which a tx was or could have been included
diff --git a/eth/handler.go b/eth/handler.go
index 0e7eed3520c052938f5a0068e1ff35239519adb3..bcb83ed90bc314c05485dc4c0578b6b187bd467d 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -33,8 +33,7 @@ import (
 	"github.com/ethereum/go-ethereum/eth/fetcher"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/params"
@@ -116,7 +115,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
 	}
 	// Figure out whether to allow fast sync or not
 	if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
-		glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
+		log.Info(fmt.Sprintf("blockchain not empty, fast sync disabled"))
 		fastSync = false
 	}
 	if fastSync {
@@ -179,7 +178,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
 	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
 
 	if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
-		glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled")
+		log.Debug(fmt.Sprint("Bad Block Reporting is enabled"))
 		manager.badBlockReportingEnabled = true
 	}
 
@@ -200,12 +199,12 @@ func (pm *ProtocolManager) removePeer(id string) {
 	if peer == nil {
 		return
 	}
-	glog.V(logger.Debug).Infoln("Removing peer", id)
+	log.Debug(fmt.Sprint("Removing peer", id))
 
 	// Unregister the peer from the downloader and Ethereum peer set
 	pm.downloader.UnregisterPeer(id)
 	if err := pm.peers.Unregister(id); err != nil {
-		glog.V(logger.Error).Infoln("Removal failed:", err)
+		log.Error(fmt.Sprint("Removal failed:", err))
 	}
 	// Hard disconnect at the networking layer
 	if peer != nil {
@@ -227,7 +226,7 @@ func (pm *ProtocolManager) Start() {
 }
 
 func (pm *ProtocolManager) Stop() {
-	glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
+	log.Info(fmt.Sprint("Stopping ethereum protocol handler..."))
 
 	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
 	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
@@ -248,7 +247,7 @@ func (pm *ProtocolManager) Stop() {
 	// Wait for all peer handler goroutines and the loops to come down.
 	pm.wg.Wait()
 
-	glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
+	log.Info(fmt.Sprint("Ethereum protocol handler stopped"))
 }
 
 func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
@@ -262,21 +261,21 @@ func (pm *ProtocolManager) handle(p *peer) error {
 		return p2p.DiscTooManyPeers
 	}
 
-	glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
+	log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
 
 	// Execute the Ethereum handshake
 	td, head, genesis := pm.blockchain.Status()
 	if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
-		glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
+		log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
 		return err
 	}
 	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
 		rw.Init(p.version)
 	}
 	// Register the peer locally
-	glog.V(logger.Detail).Infof("%v: adding peer", p)
+	log.Trace(fmt.Sprintf("%v: adding peer", p))
 	if err := pm.peers.Register(p); err != nil {
-		glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
+		log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
 		return err
 	}
 	defer pm.removePeer(p.id)
@@ -297,7 +296,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
 		}
 		// Start a timer to disconnect if the peer doesn't reply in time
 		p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
-			glog.V(logger.Debug).Infof("%v: timed out DAO fork-check, dropping", p)
+			log.Debug(fmt.Sprintf("%v: timed out DAO fork-check, dropping", p))
 			pm.removePeer(p.id)
 		})
 		// Make sure it's cleaned up if the peer dies off
@@ -311,7 +310,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
 	// main loop. handle incoming messages.
 	for {
 		if err := pm.handleMsg(p); err != nil {
-			glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
+			log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
 			return err
 		}
 	}
@@ -387,7 +386,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 				)
 				if next <= current {
 					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
-					glog.V(logger.Warn).Infof("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos)
+					log.Warn(fmt.Sprintf("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos))
 					unknown = true
 				} else {
 					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
@@ -435,7 +434,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			}
 			// If we're seemingly on the same chain, disable the drop timer
 			if verifyDAO {
-				glog.V(logger.Debug).Infof("%v: seems to be on the same side of the DAO fork", p)
+				log.Debug(fmt.Sprintf("%v: seems to be on the same side of the DAO fork", p))
 				p.forkDrop.Stop()
 				p.forkDrop = nil
 				return nil
@@ -452,10 +451,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 
 				// Validate the header and either drop the peer or continue
 				if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
-					glog.V(logger.Debug).Infof("%v: verified to be on the other side of the DAO fork, dropping", p)
+					log.Debug(fmt.Sprintf("%v: verified to be on the other side of the DAO fork, dropping", p))
 					return err
 				}
-				glog.V(logger.Debug).Infof("%v: verified to be on the same side of the DAO fork", p)
+				log.Debug(fmt.Sprintf("%v: verified to be on the same side of the DAO fork", p))
 				return nil
 			}
 			// Irrelevant of the fork checks, send the header to the fetcher just in case
@@ -464,7 +463,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		if len(headers) > 0 || !filter {
 			err := pm.downloader.DeliverHeaders(p.id, headers)
 			if err != nil {
-				glog.V(logger.Debug).Infoln(err)
+				log.Debug(fmt.Sprint(err))
 			}
 		}
 
@@ -517,7 +516,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		if len(trasactions) > 0 || len(uncles) > 0 || !filter {
 			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
 			if err != nil {
-				glog.V(logger.Debug).Infoln(err)
+				log.Debug(fmt.Sprint(err))
 			}
 		}
 
@@ -556,7 +555,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		// Deliver all to the downloader
 		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
-			glog.V(logger.Debug).Infof("failed to deliver node state data: %v", err)
+			log.Debug(fmt.Sprintf("failed to deliver node state data: %v", err))
 		}
 
 	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
@@ -587,7 +586,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			}
 			// If known, encode and queue for response packet
 			if encoded, err := rlp.EncodeToBytes(results); err != nil {
-				glog.V(logger.Error).Infof("failed to encode receipt: %v", err)
+				log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
 			} else {
 				receipts = append(receipts, encoded)
 				bytes += len(encoded)
@@ -603,7 +602,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		// Deliver all to the downloader
 		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
-			glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err)
+			log.Debug(fmt.Sprintf("failed to deliver receipts: %v", err))
 		}
 
 	case msg.Code == NewBlockHashesMsg:
@@ -696,7 +695,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
 		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
 			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
 		} else {
-			glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
+			log.Error(fmt.Sprintf("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]))
 			return
 		}
 		// Send the block to a subset of our peers
@@ -704,14 +703,14 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
 		for _, peer := range transfer {
 			peer.SendNewBlock(block, td)
 		}
-		glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
+		log.Trace(fmt.Sprintf("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)))
 	}
 	// Otherwise if the block is indeed in out own chain, announce it
 	if pm.blockchain.HasBlock(hash) {
 		for _, peer := range peers {
 			peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
 		}
-		glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
+		log.Trace(fmt.Sprintf("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)))
 	}
 }
 
@@ -724,7 +723,7 @@ func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction)
 	for _, peer := range peers {
 		peer.SendTransactions(types.Transactions{tx})
 	}
-	glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers")
+	log.Trace(fmt.Sprint("broadcast tx to", len(peers), "peers"))
 }
 
 // Mined broadcast loop
diff --git a/eth/peer.go b/eth/peer.go
index aa85631ea22d1287f413f75bbe56595180e26365..fcf528af65cf804cbe22b3c06e29136ce69b1611 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -25,8 +25,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rlp"
 	"gopkg.in/fatih/set.v0"
@@ -192,41 +191,41 @@ func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
 // RequestHeaders is a wrapper around the header query functions to fetch a
 // single header. It is used solely by the fetcher.
 func (p *peer) RequestOneHeader(hash common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching a single header: %x", p, hash)
+	log.Debug(fmt.Sprintf("%v fetching a single header: %x", p, hash))
 	return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
 }
 
 // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
 // specified header query, based on the hash of an origin block.
 func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
-	glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
+	log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
 	return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
 }
 
 // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
 // specified header query, based on the number of an origin block.
 func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
-	glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
+	log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
 	return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
 }
 
 // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
 // specified.
 func (p *peer) RequestBodies(hashes []common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
+	log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
 	return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
 }
 
 // RequestNodeData fetches a batch of arbitrary data from a node's known state
 // data, corresponding to the specified hashes.
 func (p *peer) RequestNodeData(hashes []common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(hashes))
+	log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(hashes)))
 	return p2p.Send(p.rw, GetNodeDataMsg, hashes)
 }
 
 // RequestReceipts fetches a batch of transaction receipts from a remote node.
 func (p *peer) RequestReceipts(hashes []common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
+	log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
 	return p2p.Send(p.rw, GetReceiptsMsg, hashes)
 }
 
diff --git a/eth/protocol_test.go b/eth/protocol_test.go
index 3b805643334dcff5b87cdf61fad2344f41a4f6cd..c0458f2be4437ee9d575b33cca6a5c2a311e3ea7 100644
--- a/eth/protocol_test.go
+++ b/eth/protocol_test.go
@@ -30,8 +30,7 @@ import (
 )
 
 func init() {
-	// glog.SetToStderr(true)
-	// glog.SetV(6)
+	// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat())))
 }
 
 var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
diff --git a/eth/sync.go b/eth/sync.go
index 373cc2054b0d342bbbae1695f0a4da905fedbe91..1075578b971aa36e812fbcd0653c3ac04c58a7e1 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -17,6 +17,7 @@
 package eth
 
 import (
+	"fmt"
 	"math/rand"
 	"sync/atomic"
 	"time"
@@ -24,8 +25,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/eth/downloader"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 )
 
@@ -87,7 +87,7 @@ func (pm *ProtocolManager) txsyncLoop() {
 			delete(pending, s.p.ID())
 		}
 		// Send the pack in the background.
-		glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size)
+		log.Trace(fmt.Sprintf("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size))
 		sending = true
 		go func() { done <- pack.p.SendTransactions(pack.txs) }()
 	}
@@ -117,7 +117,7 @@ func (pm *ProtocolManager) txsyncLoop() {
 			sending = false
 			// Stop tracking peers that cause send failures.
 			if err != nil {
-				glog.V(logger.Debug).Infof("%v: tx send failed: %v", pack.p.Peer, err)
+				log.Debug(fmt.Sprintf("%v: tx send failed: %v", pack.p.Peer, err))
 				delete(pending, pack.p.ID())
 			}
 			// Schedule the next send.
@@ -187,7 +187,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
 	if atomic.LoadUint32(&pm.fastSync) == 1 {
 		// Disable fast sync if we indeed have something in our chain
 		if pm.blockchain.CurrentBlock().NumberU64() > 0 {
-			glog.V(logger.Info).Infof("fast sync complete, auto disabling")
+			log.Info(fmt.Sprintf("fast sync complete, auto disabling"))
 			atomic.StoreUint32(&pm.fastSync, 0)
 		}
 	}
diff --git a/ethdb/database.go b/ethdb/database.go
index c0e92a87bcb8d3acca9c8c49669c7580792c561f..e82528f25af8ca9882871f78b479e7fb3cf55e53 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -17,14 +17,14 @@
 package ethdb
 
 import (
+	"fmt"
 	"path/filepath"
 	"strconv"
 	"strings"
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/syndtr/goleveldb/leveldb"
 	"github.com/syndtr/goleveldb/leveldb/errors"
@@ -80,7 +80,7 @@ func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
 	if handles < 16 {
 		handles = 16
 	}
-	glog.V(logger.Info).Infof("Allotted %dMB cache and %d file handles to %s", cache, handles, file)
+	log.Info(fmt.Sprintf("Allotted %dMB cache and %d file handles to %s", cache, handles, file))
 
 	// Open the db and recover any potential corruptions
 	db, err := leveldb.OpenFile(file, &opt.Options{
@@ -167,16 +167,14 @@ func (self *LDBDatabase) Close() {
 		errc := make(chan error)
 		self.quitChan <- errc
 		if err := <-errc; err != nil {
-			glog.V(logger.Error).Infof("metrics failure in '%s': %v\n", self.fn, err)
+			log.Error(fmt.Sprintf("metrics failure in '%s': %v\n", self.fn, err))
 		}
 	}
 	err := self.db.Close()
-	if glog.V(logger.Error) {
-		if err == nil {
-			glog.Infoln("closed db:", self.fn)
-		} else {
-			glog.Errorf("error closing db %s: %v", self.fn, err)
-		}
+	if err == nil {
+		log.Info(fmt.Sprint("closed db:", self.fn))
+	} else {
+		log.Error(fmt.Sprintf("error closing db %s: %v", self.fn, err))
 	}
 }
 
@@ -231,7 +229,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
 		// Retrieve the database stats
 		stats, err := self.db.GetProperty("leveldb.stats")
 		if err != nil {
-			glog.V(logger.Error).Infof("failed to read database stats: %v", err)
+			log.Error(fmt.Sprintf("failed to read database stats: %v", err))
 			return
 		}
 		// Find the compaction table, skip the header
@@ -240,7 +238,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
 			lines = lines[1:]
 		}
 		if len(lines) <= 3 {
-			glog.V(logger.Error).Infof("compaction table not found")
+			log.Error(fmt.Sprintf("compaction table not found"))
 			return
 		}
 		lines = lines[3:]
@@ -256,7 +254,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
 			}
 			for idx, counter := range parts[3:] {
 				if value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64); err != nil {
-					glog.V(logger.Error).Infof("compaction entry parsing failed: %v", err)
+					log.Error(fmt.Sprintf("compaction entry parsing failed: %v", err))
 					return
 				} else {
 					counters[i%2][idx] += value
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index 8692a43bdcf96ce9047c163f40644a81752f66c4..d09431d8719c5b128f5e0c2dc58f7e104f3da838 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -34,8 +34,7 @@ import (
 	"github.com/ethereum/go-ethereum/eth"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/les"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rpc"
@@ -96,13 +95,13 @@ func (s *Service) Start(server *p2p.Server) error {
 	s.server = server
 	go s.loop()
 
-	glog.V(logger.Info).Infoln("Stats daemon started")
+	log.Info(fmt.Sprint("Stats daemon started"))
 	return nil
 }
 
 // Stop implements node.Service, terminating the monitoring and reporting daemon.
 func (s *Service) Stop() error {
-	glog.V(logger.Info).Infoln("Stats daemon stopped")
+	log.Info(fmt.Sprint("Stats daemon stopped"))
 	return nil
 }
 
@@ -131,7 +130,7 @@ func (s *Service) loop() {
 		}
 		conn, err := websocket.Dial(url, "", "http://localhost/")
 		if err != nil {
-			glog.V(logger.Warn).Infof("Stats server unreachable: %v", err)
+			log.Warn(fmt.Sprintf("Stats server unreachable: %v", err))
 			time.Sleep(10 * time.Second)
 			continue
 		}
@@ -139,7 +138,7 @@ func (s *Service) loop() {
 		out := json.NewEncoder(conn)
 
 		if err = s.login(in, out); err != nil {
-			glog.V(logger.Warn).Infof("Stats login failed: %v", err)
+			log.Warn(fmt.Sprintf("Stats login failed: %v", err))
 			conn.Close()
 			time.Sleep(10 * time.Second)
 			continue
@@ -148,12 +147,12 @@ func (s *Service) loop() {
 
 		// Send the initial stats so our node looks decent from the get go
 		if err = s.report(out); err != nil {
-			glog.V(logger.Warn).Infof("Initial stats report failed: %v", err)
+			log.Warn(fmt.Sprintf("Initial stats report failed: %v", err))
 			conn.Close()
 			continue
 		}
 		if err = s.reportHistory(out, nil); err != nil {
-			glog.V(logger.Warn).Infof("History report failed: %v", err)
+			log.Warn(fmt.Sprintf("History report failed: %v", err))
 			conn.Close()
 			continue
 		}
@@ -164,11 +163,11 @@ func (s *Service) loop() {
 			select {
 			case <-fullReport.C:
 				if err = s.report(out); err != nil {
-					glog.V(logger.Warn).Infof("Full stats report failed: %v", err)
+					log.Warn(fmt.Sprintf("Full stats report failed: %v", err))
 				}
 			case list := <-s.histCh:
 				if err = s.reportHistory(out, list); err != nil {
-					glog.V(logger.Warn).Infof("Block history report failed: %v", err)
+					log.Warn(fmt.Sprintf("Block history report failed: %v", err))
 				}
 			case head, ok := <-headSub.Chan():
 				if !ok { // node stopped
@@ -176,10 +175,10 @@ func (s *Service) loop() {
 					return
 				}
 				if err = s.reportBlock(out, head.Data.(core.ChainHeadEvent).Block); err != nil {
-					glog.V(logger.Warn).Infof("Block stats report failed: %v", err)
+					log.Warn(fmt.Sprintf("Block stats report failed: %v", err))
 				}
 				if err = s.reportPending(out); err != nil {
-					glog.V(logger.Warn).Infof("Post-block transaction stats report failed: %v", err)
+					log.Warn(fmt.Sprintf("Post-block transaction stats report failed: %v", err))
 				}
 			case _, ok := <-txSub.Chan():
 				if !ok { // node stopped
@@ -195,7 +194,7 @@ func (s *Service) loop() {
 					}
 				}
 				if err = s.reportPending(out); err != nil {
-					glog.V(logger.Warn).Infof("Transaction stats report failed: %v", err)
+					log.Warn(fmt.Sprintf("Transaction stats report failed: %v", err))
 				}
 			}
 		}
@@ -216,16 +215,16 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
 		// Retrieve the next generic network packet and bail out on error
 		var msg map[string][]interface{}
 		if err := in.Decode(&msg); err != nil {
-			glog.V(logger.Warn).Infof("Failed to decode stats server message: %v", err)
+			log.Warn(fmt.Sprintf("Failed to decode stats server message: %v", err))
 			return
 		}
 		if len(msg["emit"]) == 0 {
-			glog.V(logger.Warn).Infof("Stats server sent non-broadcast: %v", msg)
+			log.Warn(fmt.Sprintf("Stats server sent non-broadcast: %v", msg))
 			return
 		}
 		command, ok := msg["emit"][0].(string)
 		if !ok {
-			glog.V(logger.Warn).Infof("Invalid stats server message type: %v", msg["emit"][0])
+			log.Warn(fmt.Sprintf("Invalid stats server message type: %v", msg["emit"][0]))
 			return
 		}
 		// If the message is a ping reply, deliver (someone must be listening!)
@@ -236,7 +235,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
 				continue
 			default:
 				// Ping routine dead, abort
-				glog.V(logger.Warn).Infof("Stats server pinger seems to have died")
+				log.Warn(fmt.Sprintf("Stats server pinger seems to have died"))
 				return
 			}
 		}
@@ -245,12 +244,12 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
 			// Make sure the request is valid and doesn't crash us
 			request, ok := msg["emit"][1].(map[string]interface{})
 			if !ok {
-				glog.V(logger.Warn).Infof("Invalid history request: %v", msg["emit"][1])
+				log.Warn(fmt.Sprintf("Invalid history request: %v", msg["emit"][1]))
 				return
 			}
 			list, ok := request["list"].([]interface{})
 			if !ok {
-				glog.V(logger.Warn).Infof("Invalid history block list: %v", request["list"])
+				log.Warn(fmt.Sprintf("Invalid history block list: %v", request["list"]))
 				return
 			}
 			// Convert the block number list to an integer list
@@ -258,7 +257,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
 			for i, num := range list {
 				n, ok := num.(float64)
 				if !ok {
-					glog.V(logger.Warn).Infof("Invalid history block number: %v", num)
+					log.Warn(fmt.Sprintf("Invalid history block number: %v", num))
 					return
 				}
 				numbers[i] = uint64(n)
@@ -270,7 +269,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
 			}
 		}
 		// Report anything else and continue
-		glog.V(logger.Info).Infof("Unknown stats message: %v", msg)
+		log.Info(fmt.Sprintf("Unknown stats message: %v", msg))
 	}
 }
 
diff --git a/internal/debug/api.go b/internal/debug/api.go
index 96091541b5c1afb102ff3950e6197750f5253541..01126b41bfd93768792b626a6a5edfc8e5510889 100644
--- a/internal/debug/api.go
+++ b/internal/debug/api.go
@@ -22,6 +22,7 @@ package debug
 
 import (
 	"errors"
+	"fmt"
 	"io"
 	"os"
 	"os/user"
@@ -33,8 +34,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 // Handler is the global debugging handler.
@@ -51,23 +51,22 @@ type HandlerT struct {
 	traceFile string
 }
 
-// Verbosity sets the glog verbosity ceiling.
-// The verbosity of individual packages and source files
-// can be raised using Vmodule.
+// Verbosity sets the log verbosity ceiling. The verbosity of individual packages
+// and source files can be raised using Vmodule.
 func (*HandlerT) Verbosity(level int) {
-	glog.SetV(level)
+	glogger.Verbosity(log.Lvl(level))
 }
 
-// Vmodule sets the glog verbosity pattern. See package
-// glog for details on pattern syntax.
+// Vmodule sets the log verbosity pattern. See package log for details on the
+// pattern syntax.
 func (*HandlerT) Vmodule(pattern string) error {
-	return glog.GetVModule().Set(pattern)
+	return glogger.Vmodule(pattern)
 }
 
-// BacktraceAt sets the glog backtrace location.
-// See package glog for details on pattern syntax.
+// BacktraceAt sets the log backtrace location. See package log for details on
+// the pattern syntax.
 func (*HandlerT) BacktraceAt(location string) error {
-	return glog.GetTraceLocation().Set(location)
+	return glogger.BacktraceAt(location)
 }
 
 // MemStats returns detailed runtime memory statistics.
@@ -112,7 +111,7 @@ func (h *HandlerT) StartCPUProfile(file string) error {
 	}
 	h.cpuW = f
 	h.cpuFile = file
-	glog.V(logger.Info).Infoln("CPU profiling started, writing to", h.cpuFile)
+	log.Info(fmt.Sprint("CPU profiling started, writing to", h.cpuFile))
 	return nil
 }
 
@@ -124,7 +123,7 @@ func (h *HandlerT) StopCPUProfile() error {
 	if h.cpuW == nil {
 		return errors.New("CPU profiling not in progress")
 	}
-	glog.V(logger.Info).Infoln("done writing CPU profile to", h.cpuFile)
+	log.Info(fmt.Sprint("done writing CPU profile to", h.cpuFile))
 	h.cpuW.Close()
 	h.cpuW = nil
 	h.cpuFile = ""
@@ -180,7 +179,7 @@ func (*HandlerT) Stacks() string {
 
 func writeProfile(name, file string) error {
 	p := pprof.Lookup(name)
-	glog.V(logger.Info).Infof("writing %d %s profile records to %s", p.Count(), name, file)
+	log.Info(fmt.Sprintf("writing %d %s profile records to %s", p.Count(), name, file))
 	f, err := os.Create(expandHome(file))
 	if err != nil {
 		return err
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index d7bbfae1ebb6696c7a94e667e7cc67df681baaf5..780aa1647b74636be50a4c3d6b7a4149793d85c0 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -20,28 +20,28 @@ import (
 	"fmt"
 	"net/http"
 	_ "net/http/pprof"
+	"os"
 	"runtime"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"gopkg.in/urfave/cli.v1"
 )
 
 var (
-	verbosityFlag = cli.GenericFlag{
+	verbosityFlag = cli.IntFlag{
 		Name:  "verbosity",
 		Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=core, 5=debug, 6=detail",
-		Value: glog.GetVerbosity(),
+		Value: 3,
 	}
-	vmoduleFlag = cli.GenericFlag{
+	vmoduleFlag = cli.StringFlag{
 		Name:  "vmodule",
 		Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=6,p2p=5)",
-		Value: glog.GetVModule(),
+		Value: "",
 	}
-	backtraceAtFlag = cli.GenericFlag{
+	backtraceAtFlag = cli.StringFlag{
 		Name:  "backtrace",
 		Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")",
-		Value: glog.GetTraceLocation(),
+		Value: "",
 	}
 	pprofFlag = cli.BoolFlag{
 		Name:  "pprof",
@@ -83,12 +83,16 @@ var Flags = []cli.Flag{
 	memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
 }
 
+var glogger = log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
+
 // Setup initializes profiling and logging based on the CLI flags.
 // It should be called as early as possible in the program.
 func Setup(ctx *cli.Context) error {
 	// logging
-	glog.CopyStandardLogTo("INFO")
-	glog.SetToStderr(true)
+	glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name)))
+	glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name))
+	glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name))
+	log.Root().SetHandler(glogger)
 
 	// profiling, tracing
 	runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
@@ -108,8 +112,8 @@ func Setup(ctx *cli.Context) error {
 	if ctx.GlobalBool(pprofFlag.Name) {
 		address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
 		go func() {
-			glog.V(logger.Info).Infof("starting pprof server at http://%s/debug/pprof", address)
-			glog.Errorln(http.ListenAndServe(address, nil))
+			log.Info(fmt.Sprintf("starting pprof server at http://%s/debug/pprof", address))
+			log.Error(fmt.Sprint(http.ListenAndServe(address, nil)))
 		}()
 	}
 	return nil
diff --git a/internal/debug/trace.go b/internal/debug/trace.go
index c0cf921ff0b07d39ca7bfff94d99bcf7bb3e2dda..5e4b9df840bb252bd110b84fabe276f96e58e15e 100644
--- a/internal/debug/trace.go
+++ b/internal/debug/trace.go
@@ -20,11 +20,11 @@ package debug
 
 import (
 	"errors"
+	"fmt"
 	"os"
 	"runtime/trace"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 // StartGoTrace turns on tracing, writing to the given file.
@@ -44,7 +44,7 @@ func (h *HandlerT) StartGoTrace(file string) error {
 	}
 	h.traceW = f
 	h.traceFile = file
-	glog.V(logger.Info).Infoln("trace started, writing to", h.traceFile)
+	log.Info(fmt.Sprint("trace started, writing to", h.traceFile))
 	return nil
 }
 
@@ -56,7 +56,7 @@ func (h *HandlerT) StopGoTrace() error {
 	if h.traceW == nil {
 		return errors.New("trace not in progress")
 	}
-	glog.V(logger.Info).Infoln("done writing trace to", h.traceFile)
+	log.Info(fmt.Sprint("done writing trace to", h.traceFile))
 	h.traceW.Close()
 	h.traceW = nil
 	h.traceFile = ""
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 6b11cbc97d521f07e327cec6a12dd5eec4327390..ca31c9f4be9240cdcba6cd9f4ea0bf946f1a02d5 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -36,8 +36,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/rpc"
@@ -475,7 +474,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
 	if block != nil {
 		uncles := block.Uncles()
 		if index >= hexutil.Uint(len(uncles)) {
-			glog.V(logger.Debug).Infof("uncle block on index %d not found for block #%d", index, blockNr)
+			log.Debug(fmt.Sprintf("uncle block on index %d not found for block #%d", index, blockNr))
 			return nil, nil
 		}
 		block = types.NewBlockWithHeader(uncles[index])
@@ -491,7 +490,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, b
 	if block != nil {
 		uncles := block.Uncles()
 		if index >= hexutil.Uint(len(uncles)) {
-			glog.V(logger.Debug).Infof("uncle block on index %d not found for block %s", index, blockHash.Hex())
+			log.Debug(fmt.Sprintf("uncle block on index %d not found for block %s", index, blockHash.Hex()))
 			return nil, nil
 		}
 		block = types.NewBlockWithHeader(uncles[index])
@@ -577,7 +576,7 @@ type CallArgs struct {
 }
 
 func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, error) {
-	defer func(start time.Time) { glog.V(logger.Debug).Infof("call took %v", time.Since(start)) }(time.Now())
+	defer func(start time.Time) { log.Debug(fmt.Sprintf("call took %v", time.Since(start))) }(time.Now())
 
 	state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
 	if state == nil || err != nil {
@@ -1003,7 +1002,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
 	var err error
 
 	if tx, isPending, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
-		glog.V(logger.Debug).Infof("%v\n", err)
+		log.Debug(fmt.Sprintf("%v\n", err))
 		return nil, nil
 	} else if tx == nil {
 		return nil, nil
@@ -1015,7 +1014,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
 
 	blockHash, _, _, err := getTransactionBlockData(s.b.ChainDb(), txHash)
 	if err != nil {
-		glog.V(logger.Debug).Infof("%v\n", err)
+		log.Debug(fmt.Sprintf("%v\n", err))
 		return nil, nil
 	}
 
@@ -1032,7 +1031,7 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
 	var err error
 
 	if tx, _, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
-		glog.V(logger.Debug).Infof("%v\n", err)
+		log.Debug(fmt.Sprintf("%v\n", err))
 		return nil, nil
 	} else if tx == nil {
 		return nil, nil
@@ -1045,19 +1044,19 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
 func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
 	receipt := core.GetReceipt(s.b.ChainDb(), txHash)
 	if receipt == nil {
-		glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex())
+		log.Debug(fmt.Sprintf("receipt not found for transaction %s", txHash.Hex()))
 		return nil, nil
 	}
 
 	tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash)
 	if err != nil {
-		glog.V(logger.Debug).Infof("%v\n", err)
+		log.Debug(fmt.Sprintf("%v\n", err))
 		return nil, nil
 	}
 
 	txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash)
 	if err != nil {
-		glog.V(logger.Debug).Infof("%v\n", err)
+		log.Debug(fmt.Sprintf("%v\n", err))
 		return nil, nil
 	}
 
@@ -1160,9 +1159,9 @@ func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
 		signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number())
 		from, _ := types.Sender(signer, tx)
 		addr := crypto.CreateAddress(from, tx.Nonce())
-		glog.V(logger.Info).Infof("Tx(%s) created: %s\n", tx.Hash().Hex(), addr.Hex())
+		log.Info(fmt.Sprintf("Tx(%s) created: %s\n", tx.Hash().Hex(), addr.Hex()))
 	} else {
-		glog.V(logger.Info).Infof("Tx(%s) to: %s\n", tx.Hash().Hex(), tx.To().Hex())
+		log.Info(fmt.Sprintf("Tx(%s) to: %s\n", tx.Hash().Hex(), tx.To().Hex()))
 	}
 	return tx.Hash(), nil
 }
@@ -1214,9 +1213,9 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encod
 			return "", err
 		}
 		addr := crypto.CreateAddress(from, tx.Nonce())
-		glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr)
+		log.Info(fmt.Sprintf("Tx(%x) created: %x\n", tx.Hash(), addr))
 	} else {
-		glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To())
+		log.Info(fmt.Sprintf("Tx(%x) to: %x\n", tx.Hash(), tx.To()))
 	}
 
 	return tx.Hash().Hex(), nil
@@ -1421,10 +1420,10 @@ func (api *PrivateDebugAPI) ChaindbCompact() error {
 		return fmt.Errorf("chaindbCompact does not work for memory databases")
 	}
 	for b := byte(0); b < 255; b++ {
-		glog.V(logger.Info).Infof("compacting chain DB range 0x%0.2X-0x%0.2X", b, b+1)
+		log.Info(fmt.Sprintf("compacting chain DB range 0x%0.2X-0x%0.2X", b, b+1))
 		err := ldb.LDB().CompactRange(util.Range{Start: []byte{b}, Limit: []byte{b + 1}})
 		if err != nil {
-			glog.Errorf("compaction error: %v", err)
+			log.Error(fmt.Sprintf("compaction error: %v", err))
 			return err
 		}
 	}
diff --git a/les/backend.go b/les/backend.go
index 21ee084987602d35f024de97f4013aeded916d54..94563c29f7611a72901dc760041247ace79879fa 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -36,8 +36,7 @@ import (
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/internal/ethapi"
 	"github.com/ethereum/go-ethereum/light"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/params"
@@ -188,7 +187,7 @@ func (s *LightEthereum) Protocols() []p2p.Protocol {
 // Start implements node.Service, starting all internal goroutines needed by the
 // Ethereum protocol implementation.
 func (s *LightEthereum) Start(srvr *p2p.Server) error {
-	glog.V(logger.Info).Infof("WARNING: light client mode is an experimental feature")
+	log.Info(fmt.Sprintf("WARNING: light client mode is an experimental feature"))
 	s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId)
 	s.protocolManager.Start(srvr)
 	return nil
diff --git a/les/fetcher.go b/les/fetcher.go
index de706de5e00e5192b54c23a38b8b8a114ac7b179..dcaea87e82f4ae1122bafe7b10498e083c5fdf3d 100644
--- a/les/fetcher.go
+++ b/les/fetcher.go
@@ -18,6 +18,7 @@
 package les
 
 import (
+	"fmt"
 	"math/big"
 	"sync"
 	"time"
@@ -27,8 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/light"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -174,7 +174,7 @@ func (f *lightFetcher) syncLoop() {
 			f.reqMu.Unlock()
 			if ok {
 				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
-				glog.V(logger.Debug).Infof("hard timeout by peer %v", req.peer.id)
+				log.Debug(fmt.Sprintf("hard timeout by peer %v", req.peer.id))
 				go f.pm.removePeer(req.peer.id)
 			}
 		case resp := <-f.deliverChn:
@@ -192,13 +192,13 @@ func (f *lightFetcher) syncLoop() {
 			}
 			f.lock.Lock()
 			if !ok || !(f.syncing || f.processResponse(req, resp)) {
-				glog.V(logger.Debug).Infof("failed processing response by peer %v", resp.peer.id)
+				log.Debug(fmt.Sprintf("failed processing response by peer %v", resp.peer.id))
 				go f.pm.removePeer(resp.peer.id)
 			}
 			f.lock.Unlock()
 		case p := <-f.syncDone:
 			f.lock.Lock()
-			glog.V(logger.Debug).Infof("done synchronising with peer %v", p.id)
+			log.Debug(fmt.Sprintf("done synchronising with peer %v", p.id))
 			f.checkSyncedHeaders(p)
 			f.syncing = false
 			f.lock.Unlock()
@@ -239,17 +239,17 @@ func (f *lightFetcher) removePeer(p *peer) {
 func (f *lightFetcher) announce(p *peer, head *announceData) {
 	f.lock.Lock()
 	defer f.lock.Unlock()
-	glog.V(logger.Debug).Infof("received announce from peer %v  #%d  %016x  reorg: %d", p.id, head.Number, head.Hash[:8], head.ReorgDepth)
+	log.Debug(fmt.Sprintf("received announce from peer %v  #%d  %016x  reorg: %d", p.id, head.Number, head.Hash[:8], head.ReorgDepth))
 
 	fp := f.peers[p]
 	if fp == nil {
-		glog.V(logger.Debug).Infof("announce: unknown peer")
+		log.Debug(fmt.Sprintf("announce: unknown peer"))
 		return
 	}
 
 	if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
 		// announced tds should be strictly monotonic
-		glog.V(logger.Debug).Infof("non-monotonic Td from peer %v", p.id)
+		log.Debug(fmt.Sprintf("non-monotonic Td from peer %v", p.id))
 		go f.pm.removePeer(p.id)
 		return
 	}
@@ -355,14 +355,14 @@ func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bo
 func (f *lightFetcher) request(p *peer, reqID uint64, n *fetcherTreeNode, amount uint64) (uint64, bool) {
 	fp := f.peers[p]
 	if fp == nil {
-		glog.V(logger.Debug).Infof("request: unknown peer")
+		log.Debug(fmt.Sprintf("request: unknown peer"))
 		p.fcServer.DeassignRequest(reqID)
 		return 0, false
 	}
 	if fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) {
 		f.syncing = true
 		go func() {
-			glog.V(logger.Debug).Infof("synchronising with peer %v", p.id)
+			log.Debug(fmt.Sprintf("synchronising with peer %v", p.id))
 			f.pm.synchronise(p)
 			f.syncDone <- p
 		}()
@@ -457,7 +457,7 @@ func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types
 // processResponse processes header download request responses, returns true if successful
 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
 	if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
-		glog.V(logger.Debug).Infof("response mismatch %v %016x != %v %016x", len(resp.headers), resp.headers[0].Hash().Bytes()[:8], req.amount, req.hash[:8])
+		log.Debug(fmt.Sprintf("response mismatch %v %016x != %v %016x", len(resp.headers), resp.headers[0].Hash().Bytes()[:8], req.amount, req.hash[:8]))
 		return false
 	}
 	headers := make([]*types.Header, req.amount)
@@ -468,14 +468,14 @@ func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) boo
 		if err == core.BlockFutureErr {
 			return true
 		}
-		glog.V(logger.Debug).Infof("InsertHeaderChain error: %v", err)
+		log.Debug(fmt.Sprintf("InsertHeaderChain error: %v", err))
 		return false
 	}
 	tds := make([]*big.Int, len(headers))
 	for i, header := range headers {
 		td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
 		if td == nil {
-			glog.V(logger.Debug).Infof("TD not found for header %v of %v", i+1, len(headers))
+			log.Debug(fmt.Sprintf("TD not found for header %v of %v", i+1, len(headers)))
 			return false
 		}
 		tds[i] = td
@@ -490,7 +490,7 @@ func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
 	var maxTd *big.Int
 	for p, fp := range f.peers {
 		if !f.checkAnnouncedHeaders(fp, headers, tds) {
-			glog.V(logger.Debug).Infof("announce inconsistency by peer %v", p.id)
+			log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
 			go f.pm.removePeer(p.id)
 		}
 		if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
@@ -576,7 +576,7 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
 func (f *lightFetcher) checkSyncedHeaders(p *peer) {
 	fp := f.peers[p]
 	if fp == nil {
-		glog.V(logger.Debug).Infof("checkSyncedHeaders: unknown peer")
+		log.Debug(fmt.Sprintf("checkSyncedHeaders: unknown peer"))
 		return
 	}
 	n := fp.lastAnnounced
@@ -589,7 +589,7 @@ func (f *lightFetcher) checkSyncedHeaders(p *peer) {
 	}
 	// now n is the latest downloaded header after syncing
 	if n == nil {
-		glog.V(logger.Debug).Infof("synchronisation failed with peer %v", p.id)
+		log.Debug(fmt.Sprintf("synchronisation failed with peer %v", p.id))
 		go f.pm.removePeer(p.id)
 	} else {
 		header := f.chain.GetHeader(n.hash, n.number)
@@ -610,12 +610,12 @@ func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
 
 	fp := f.peers[p]
 	if fp == nil {
-		glog.V(logger.Debug).Infof("checkKnownNode: unknown peer")
+		log.Debug(fmt.Sprintf("checkKnownNode: unknown peer"))
 		return false
 	}
 	header := f.chain.GetHeader(n.hash, n.number)
 	if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
-		glog.V(logger.Debug).Infof("announce inconsistency by peer %v", p.id)
+		log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
 		go f.pm.removePeer(p.id)
 	}
 	if fp.confirmedTd != nil {
@@ -700,7 +700,7 @@ func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
 	now := mclock.Now()
 	fp := f.peers[p]
 	if fp == nil {
-		glog.V(logger.Debug).Infof("checkUpdateStats: unknown peer")
+		log.Debug(fmt.Sprintf("checkUpdateStats: unknown peer"))
 		return
 	}
 	if newEntry != nil && fp.firstUpdateStats == nil {
diff --git a/les/handler.go b/les/handler.go
index 42a45845d0d4f4c49f69ad57976274ec275b7b22..0d85d483998bb689a57ec4886826e9bf3a9f5a0f 100644
--- a/les/handler.go
+++ b/les/handler.go
@@ -34,8 +34,7 @@ import (
 	"github.com/ethereum/go-ethereum/eth/downloader"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/discv5"
@@ -199,7 +198,7 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, network
 	}
 
 	if lightSync {
-		glog.V(logger.Debug).Infof("LES: create downloader")
+		log.Debug(fmt.Sprintf("LES: create downloader"))
 		manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
 			nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
 			blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
@@ -230,12 +229,12 @@ func (pm *ProtocolManager) removePeer(id string) {
 		if err == errNotRegistered {
 			return
 		}
-		glog.V(logger.Error).Infoln("Removal failed:", err)
+		log.Error(fmt.Sprint("Removal failed:", err))
 	}
-	glog.V(logger.Debug).Infoln("Removing peer", id)
+	log.Debug(fmt.Sprint("Removing peer", id))
 
 	// Unregister the peer from the downloader and Ethereum peer set
-	glog.V(logger.Debug).Infof("LES: unregister peer %v", id)
+	log.Debug(fmt.Sprintf("LES: unregister peer %v", id))
 	if pm.lightSync {
 		pm.downloader.UnregisterPeer(id)
 		if pm.txrelay != nil {
@@ -268,9 +267,9 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
 	} else {
 		if topicDisc != nil {
 			go func() {
-				glog.V(logger.Info).Infoln("Starting registering topic", string(lesTopic))
+				log.Info(fmt.Sprint("Starting registering topic", string(lesTopic)))
 				topicDisc.RegisterTopic(lesTopic, pm.quitSync)
-				glog.V(logger.Info).Infoln("Stopped registering topic", string(lesTopic))
+				log.Info(fmt.Sprint("Stopped registering topic", string(lesTopic)))
 			}()
 		}
 		go func() {
@@ -283,7 +282,7 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
 func (pm *ProtocolManager) Stop() {
 	// Showing a log message. During download / process this could actually
 	// take between 5 to 10 seconds and therefor feedback is required.
-	glog.V(logger.Info).Infoln("Stopping light ethereum protocol handler...")
+	log.Info(fmt.Sprint("Stopping light ethereum protocol handler..."))
 
 	// Quit the sync loop.
 	// After this send has completed, no new peers will be accepted.
@@ -300,7 +299,7 @@ func (pm *ProtocolManager) Stop() {
 	// Wait for any process action
 	pm.wg.Wait()
 
-	glog.V(logger.Info).Infoln("Light ethereum protocol handler stopped")
+	log.Info(fmt.Sprint("Light ethereum protocol handler stopped"))
 }
 
 func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
@@ -310,22 +309,22 @@ func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter
 // handle is the callback invoked to manage the life cycle of a les peer. When
 // this function terminates, the peer is disconnected.
 func (pm *ProtocolManager) handle(p *peer) error {
-	glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
+	log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
 
 	// Execute the LES handshake
 	td, head, genesis := pm.blockchain.Status()
 	headNum := core.GetBlockNumber(pm.chainDb, head)
 	if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil {
-		glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
+		log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
 		return err
 	}
 	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
 		rw.Init(p.version)
 	}
 	// Register the peer locally
-	glog.V(logger.Detail).Infof("%v: adding peer", p)
+	log.Trace(fmt.Sprintf("%v: adding peer", p))
 	if err := pm.peers.Register(p); err != nil {
-		glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
+		log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
 		return err
 	}
 	defer func() {
@@ -336,7 +335,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
 	}()
 
 	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
-	glog.V(logger.Debug).Infof("LES: register peer %v", p.id)
+	log.Debug(fmt.Sprintf("LES: register peer %v", p.id))
 	if pm.lightSync {
 		requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
 			reqID := getNextReqID()
@@ -390,7 +389,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
 	// main loop. handle incoming messages.
 	for {
 		if err := pm.handleMsg(p); err != nil {
-			glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
+			log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
 			return err
 		}
 	}
@@ -407,7 +406,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		return err
 	}
 
-	glog.V(logger.Debug).Infoln("msg:", msg.Code, msg.Size)
+	log.Debug(fmt.Sprint("msg:", msg.Code, msg.Size))
 
 	costs := p.fcCosts[msg.Code]
 	reject := func(reqCnt, maxCnt uint64) bool {
@@ -420,7 +419,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			cost = pm.server.defParams.BufLimit
 		}
 		if cost > bufValue {
-			glog.V(logger.Error).Infof("Request from %v came %v too early", p.id, time.Duration((cost-bufValue)*1000000/pm.server.defParams.MinRecharge))
+			log.Error(fmt.Sprintf("Request from %v came %v too early", p.id, time.Duration((cost-bufValue)*1000000/pm.server.defParams.MinRecharge)))
 			return true
 		}
 		return false
@@ -436,25 +435,25 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 	// Handle the message depending on its contents
 	switch msg.Code {
 	case StatusMsg:
-		glog.V(logger.Debug).Infof("<=== StatusMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== StatusMsg from peer %v", p.id))
 		// Status messages should never arrive after the handshake
 		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
 
 	// Block header query, collect the requested headers and reply
 	case AnnounceMsg:
-		glog.V(logger.Debug).Infoln("<=== AnnounceMsg from peer %v:", p.id)
+		log.Debug(fmt.Sprint("<=== AnnounceMsg from peer %v:", p.id))
 
 		var req announceData
 		if err := msg.Decode(&req); err != nil {
 			return errResp(ErrDecode, "%v: %v", msg, err)
 		}
-		glog.V(logger.Detail).Infoln("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth)
+		log.Trace(fmt.Sprint("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth))
 		if pm.fetcher != nil {
 			pm.fetcher.announce(p, &req)
 		}
 
 	case GetBlockHeadersMsg:
-		glog.V(logger.Debug).Infof("<=== GetBlockHeadersMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== GetBlockHeadersMsg from peer %v", p.id))
 		// Decode the complex header query
 		var req struct {
 			ReqID uint64
@@ -539,7 +538,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			return errResp(ErrUnexpectedResponse, "")
 		}
 
-		glog.V(logger.Debug).Infof("<=== BlockHeadersMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== BlockHeadersMsg from peer %v", p.id))
 		// A batch of headers arrived to one of our previous requests
 		var resp struct {
 			ReqID, BV uint64
@@ -554,12 +553,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		} else {
 			err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
 			if err != nil {
-				glog.V(logger.Debug).Infoln(err)
+				log.Debug(fmt.Sprint(err))
 			}
 		}
 
 	case GetBlockBodiesMsg:
-		glog.V(logger.Debug).Infof("<===  GetBlockBodiesMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<===  GetBlockBodiesMsg from peer %v", p.id))
 		// Decode the retrieval message
 		var req struct {
 			ReqID  uint64
@@ -596,7 +595,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			return errResp(ErrUnexpectedResponse, "")
 		}
 
-		glog.V(logger.Debug).Infof("<===  BlockBodiesMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<===  BlockBodiesMsg from peer %v", p.id))
 		// A batch of block bodies arrived to one of our previous requests
 		var resp struct {
 			ReqID, BV uint64
@@ -613,7 +612,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 
 	case GetCodeMsg:
-		glog.V(logger.Debug).Infof("<===  GetCodeMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<===  GetCodeMsg from peer %v", p.id))
 		// Decode the retrieval message
 		var req struct {
 			ReqID uint64
@@ -657,7 +656,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			return errResp(ErrUnexpectedResponse, "")
 		}
 
-		glog.V(logger.Debug).Infof("<=== CodeMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== CodeMsg from peer %v", p.id))
 		// A batch of node state data arrived to one of our previous requests
 		var resp struct {
 			ReqID, BV uint64
@@ -674,7 +673,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 
 	case GetReceiptsMsg:
-		glog.V(logger.Debug).Infof("<===  GetReceiptsMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<===  GetReceiptsMsg from peer %v", p.id))
 		// Decode the retrieval message
 		var req struct {
 			ReqID  uint64
@@ -705,7 +704,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			}
 			// If known, encode and queue for response packet
 			if encoded, err := rlp.EncodeToBytes(results); err != nil {
-				glog.V(logger.Error).Infof("failed to encode receipt: %v", err)
+				log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
 			} else {
 				receipts = append(receipts, encoded)
 				bytes += len(encoded)
@@ -720,7 +719,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			return errResp(ErrUnexpectedResponse, "")
 		}
 
-		glog.V(logger.Debug).Infof("<=== ReceiptsMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== ReceiptsMsg from peer %v", p.id))
 		// A batch of receipts arrived to one of our previous requests
 		var resp struct {
 			ReqID, BV uint64
@@ -737,7 +736,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 
 	case GetProofsMsg:
-		glog.V(logger.Debug).Infof("<=== GetProofsMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== GetProofsMsg from peer %v", p.id))
 		// Decode the retrieval message
 		var req struct {
 			ReqID uint64
@@ -787,7 +786,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			return errResp(ErrUnexpectedResponse, "")
 		}
 
-		glog.V(logger.Debug).Infof("<=== ProofsMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== ProofsMsg from peer %v", p.id))
 		// A batch of merkle proofs arrived to one of our previous requests
 		var resp struct {
 			ReqID, BV uint64
@@ -804,7 +803,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 
 	case GetHeaderProofsMsg:
-		glog.V(logger.Debug).Infof("<=== GetHeaderProofsMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== GetHeaderProofsMsg from peer %v", p.id))
 		// Decode the retrieval message
 		var req struct {
 			ReqID uint64
@@ -848,7 +847,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			return errResp(ErrUnexpectedResponse, "")
 		}
 
-		glog.V(logger.Debug).Infof("<=== HeaderProofsMsg from peer %v", p.id)
+		log.Debug(fmt.Sprintf("<=== HeaderProofsMsg from peer %v", p.id))
 		var resp struct {
 			ReqID, BV uint64
 			Data      []ChtResp
@@ -885,7 +884,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
 
 	default:
-		glog.V(logger.Debug).Infof("<=== unknown message with code %d from peer %v", msg.Code, p.id)
+		log.Debug(fmt.Sprintf("<=== unknown message with code %d from peer %v", msg.Code, p.id))
 		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
 	}
 
diff --git a/les/odr.go b/les/odr.go
index 88c7d85a59ee08cfc705446b18f7769023aa83b0..78c7c1af44eeb7519774125984466db71016cb1e 100644
--- a/les/odr.go
+++ b/les/odr.go
@@ -19,14 +19,14 @@ package les
 import (
 	"crypto/rand"
 	"encoding/binary"
+	"fmt"
 	"sync"
 	"time"
 
 	"github.com/ethereum/go-ethereum/common/mclock"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/light"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"golang.org/x/net/context"
 )
 
@@ -151,7 +151,7 @@ func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout cha
 	select {
 	case <-delivered:
 	case <-time.After(hardRequestTimeout):
-		glog.V(logger.Debug).Infof("ODR hard request timeout from peer %v", peer.id)
+		log.Debug(fmt.Sprintf("ODR hard request timeout from peer %v", peer.id))
 		go self.removePeer(peer.id)
 	case <-self.stop:
 		return
@@ -237,7 +237,7 @@ func (self *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err err
 		// retrieved from network, store in db
 		req.StoreResult(self.db)
 	} else {
-		glog.V(logger.Debug).Infof("networkRequest  err = %v", err)
+		log.Debug(fmt.Sprintf("networkRequest  err = %v", err))
 	}
 	return
 }
diff --git a/les/odr_requests.go b/les/odr_requests.go
index 2987eb297ac0d6aaea1eda78d18bc6e0a592e951..5321a68cb4f8c8dadd7aa55d9fdc21c2056f2873 100644
--- a/les/odr_requests.go
+++ b/les/odr_requests.go
@@ -21,6 +21,7 @@ package les
 import (
 	"bytes"
 	"encoding/binary"
+	"fmt"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core"
@@ -28,8 +29,7 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/light"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 )
@@ -74,7 +74,7 @@ func (self *BlockRequest) CanSend(peer *peer) bool {
 
 // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
 func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
-	glog.V(logger.Debug).Infof("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id)
+	log.Debug(fmt.Sprintf("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id))
 	return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash})
 }
 
@@ -82,39 +82,39 @@ func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
 // returns true and stores results in memory if the message was a valid reply
 // to the request (implementation of LesOdrRequest)
 func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool {
-	glog.V(logger.Debug).Infof("ODR: validating body of block %08x", self.Hash[:4])
+	log.Debug(fmt.Sprintf("ODR: validating body of block %08x", self.Hash[:4]))
 	if msg.MsgType != MsgBlockBodies {
-		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		log.Debug(fmt.Sprintf("ODR: invalid message type"))
 		return false
 	}
 	bodies := msg.Obj.([]*types.Body)
 	if len(bodies) != 1 {
-		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(bodies))
+		log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(bodies)))
 		return false
 	}
 	body := bodies[0]
 	header := core.GetHeader(db, self.Hash, self.Number)
 	if header == nil {
-		glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
+		log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
 		return false
 	}
 	txHash := types.DeriveSha(types.Transactions(body.Transactions))
 	if header.TxHash != txHash {
-		glog.V(logger.Debug).Infof("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4])
+		log.Debug(fmt.Sprintf("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4]))
 		return false
 	}
 	uncleHash := types.CalcUncleHash(body.Uncles)
 	if header.UncleHash != uncleHash {
-		glog.V(logger.Debug).Infof("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4])
+		log.Debug(fmt.Sprintf("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4]))
 		return false
 	}
 	data, err := rlp.EncodeToBytes(body)
 	if err != nil {
-		glog.V(logger.Debug).Infof("ODR: body RLP encode error: %v", err)
+		log.Debug(fmt.Sprintf("ODR: body RLP encode error: %v", err))
 		return false
 	}
 	self.Rlp = data
-	glog.V(logger.Debug).Infof("ODR: validation successful")
+	log.Debug(fmt.Sprintf("ODR: validation successful"))
 	return true
 }
 
@@ -134,7 +134,7 @@ func (self *ReceiptsRequest) CanSend(peer *peer) bool {
 
 // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
 func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
-	glog.V(logger.Debug).Infof("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id)
+	log.Debug(fmt.Sprintf("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id))
 	return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash})
 }
 
@@ -142,28 +142,28 @@ func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
 // returns true and stores results in memory if the message was a valid reply
 // to the request (implementation of LesOdrRequest)
 func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
-	glog.V(logger.Debug).Infof("ODR: validating receipts for block %08x", self.Hash[:4])
+	log.Debug(fmt.Sprintf("ODR: validating receipts for block %08x", self.Hash[:4]))
 	if msg.MsgType != MsgReceipts {
-		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		log.Debug(fmt.Sprintf("ODR: invalid message type"))
 		return false
 	}
 	receipts := msg.Obj.([]types.Receipts)
 	if len(receipts) != 1 {
-		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(receipts))
+		log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(receipts)))
 		return false
 	}
 	hash := types.DeriveSha(receipts[0])
 	header := core.GetHeader(db, self.Hash, self.Number)
 	if header == nil {
-		glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
+		log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
 		return false
 	}
 	if !bytes.Equal(header.ReceiptHash[:], hash[:]) {
-		glog.V(logger.Debug).Infof("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4])
+		log.Debug(fmt.Sprintf("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4]))
 		return false
 	}
 	self.Receipts = receipts[0]
-	glog.V(logger.Debug).Infof("ODR: validation successful")
+	log.Debug(fmt.Sprintf("ODR: validation successful"))
 	return true
 }
 
@@ -189,7 +189,7 @@ func (self *TrieRequest) CanSend(peer *peer) bool {
 
 // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
 func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
-	glog.V(logger.Debug).Infof("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id)
+	log.Debug(fmt.Sprintf("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id))
 	req := &ProofReq{
 		BHash:  self.Id.BlockHash,
 		AccKey: self.Id.AccKey,
@@ -202,24 +202,24 @@ func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
 // returns true and stores results in memory if the message was a valid reply
 // to the request (implementation of LesOdrRequest)
 func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool {
-	glog.V(logger.Debug).Infof("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4])
+	log.Debug(fmt.Sprintf("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4]))
 
 	if msg.MsgType != MsgProofs {
-		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		log.Debug(fmt.Sprintf("ODR: invalid message type"))
 		return false
 	}
 	proofs := msg.Obj.([][]rlp.RawValue)
 	if len(proofs) != 1 {
-		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs))
+		log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
 		return false
 	}
 	_, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0])
 	if err != nil {
-		glog.V(logger.Debug).Infof("ODR: merkle proof verification error: %v", err)
+		log.Debug(fmt.Sprintf("ODR: merkle proof verification error: %v", err))
 		return false
 	}
 	self.Proof = proofs[0]
-	glog.V(logger.Debug).Infof("ODR: validation successful")
+	log.Debug(fmt.Sprintf("ODR: validation successful"))
 	return true
 }
 
@@ -244,7 +244,7 @@ func (self *CodeRequest) CanSend(peer *peer) bool {
 
 // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
 func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
-	glog.V(logger.Debug).Infof("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id)
+	log.Debug(fmt.Sprintf("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id))
 	req := &CodeReq{
 		BHash:  self.Id.BlockHash,
 		AccKey: self.Id.AccKey,
@@ -256,23 +256,23 @@ func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
 // returns true and stores results in memory if the message was a valid reply
 // to the request (implementation of LesOdrRequest)
 func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool {
-	glog.V(logger.Debug).Infof("ODR: validating node data for hash %08x", self.Hash[:4])
+	log.Debug(fmt.Sprintf("ODR: validating node data for hash %08x", self.Hash[:4]))
 	if msg.MsgType != MsgCode {
-		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		log.Debug(fmt.Sprintf("ODR: invalid message type"))
 		return false
 	}
 	reply := msg.Obj.([][]byte)
 	if len(reply) != 1 {
-		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(reply))
+		log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(reply)))
 		return false
 	}
 	data := reply[0]
 	if hash := crypto.Keccak256Hash(data); self.Hash != hash {
-		glog.V(logger.Debug).Infof("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4])
+		log.Debug(fmt.Sprintf("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4]))
 		return false
 	}
 	self.Data = data
-	glog.V(logger.Debug).Infof("ODR: validation successful")
+	log.Debug(fmt.Sprintf("ODR: validation successful"))
 	return true
 }
 
@@ -304,7 +304,7 @@ func (self *ChtRequest) CanSend(peer *peer) bool {
 
 // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
 func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
-	glog.V(logger.Debug).Infof("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id)
+	log.Debug(fmt.Sprintf("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id))
 	req := &ChtReq{
 		ChtNum:   self.ChtNum,
 		BlockNum: self.BlockNum,
@@ -316,15 +316,15 @@ func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
 // returns true and stores results in memory if the message was a valid reply
 // to the request (implementation of LesOdrRequest)
 func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
-	glog.V(logger.Debug).Infof("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum)
+	log.Debug(fmt.Sprintf("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum))
 
 	if msg.MsgType != MsgHeaderProofs {
-		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		log.Debug(fmt.Sprintf("ODR: invalid message type"))
 		return false
 	}
 	proofs := msg.Obj.([]ChtResp)
 	if len(proofs) != 1 {
-		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs))
+		log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
 		return false
 	}
 	proof := proofs[0]
@@ -332,22 +332,22 @@ func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
 	binary.BigEndian.PutUint64(encNumber[:], self.BlockNum)
 	value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof)
 	if err != nil {
-		glog.V(logger.Debug).Infof("ODR: CHT merkle proof verification error: %v", err)
+		log.Debug(fmt.Sprintf("ODR: CHT merkle proof verification error: %v", err))
 		return false
 	}
 	var node light.ChtNode
 	if err := rlp.DecodeBytes(value, &node); err != nil {
-		glog.V(logger.Debug).Infof("ODR: error decoding CHT node: %v", err)
+		log.Debug(fmt.Sprintf("ODR: error decoding CHT node: %v", err))
 		return false
 	}
 	if node.Hash != proof.Header.Hash() {
-		glog.V(logger.Debug).Infof("ODR: CHT header hash does not match")
+		log.Debug(fmt.Sprintf("ODR: CHT header hash does not match"))
 		return false
 	}
 
 	self.Proof = proof.Proof
 	self.Header = proof.Header
 	self.Td = node.Td
-	glog.V(logger.Debug).Infof("ODR: validation successful")
+	log.Debug(fmt.Sprintf("ODR: validation successful"))
 	return true
 }
diff --git a/les/peer.go b/les/peer.go
index d5008ded112aa023827fe5db99cbbb5c2e4b6440..fc3591c781380e398b08690c0c1eae74bea87d96 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -27,8 +27,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/eth"
 	"github.com/ethereum/go-ethereum/les/flowcontrol"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rlp"
 )
@@ -196,51 +195,51 @@ func (p *peer) SendHeaderProofs(reqID, bv uint64, proofs []ChtResp) error {
 // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
 // specified header query, based on the hash of an origin block.
 func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
-	glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
+	log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
 	return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
 }
 
 // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
 // specified header query, based on the number of an origin block.
 func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
-	glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
+	log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
 	return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
 }
 
 // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
 // specified.
 func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
+	log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
 	return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
 }
 
 // RequestCode fetches a batch of arbitrary data from a node's known state
 // data, corresponding to the specified hashes.
 func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error {
-	glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(reqs))
+	log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(reqs)))
 	return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
 }
 
 // RequestReceipts fetches a batch of transaction receipts from a remote node.
 func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
+	log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
 	return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
 }
 
 // RequestProofs fetches a batch of merkle proofs from a remote node.
 func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error {
-	glog.V(logger.Debug).Infof("%v fetching %v proofs", p, len(reqs))
+	log.Debug(fmt.Sprintf("%v fetching %v proofs", p, len(reqs)))
 	return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs)
 }
 
 // RequestHeaderProofs fetches a batch of header merkle proofs from a remote node.
 func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error {
-	glog.V(logger.Debug).Infof("%v fetching %v header proofs", p, len(reqs))
+	log.Debug(fmt.Sprintf("%v fetching %v header proofs", p, len(reqs)))
 	return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
 }
 
 func (p *peer) SendTxs(cost uint64, txs types.Transactions) error {
-	glog.V(logger.Debug).Infof("%v relaying %v txs", p, len(txs))
+	log.Debug(fmt.Sprintf("%v relaying %v txs", p, len(txs)))
 	reqID := getNextReqID()
 	p.fcServer.MustAssignRequest(reqID)
 	p.fcServer.SendRequest(reqID, cost)
diff --git a/les/server.go b/les/server.go
index c4c6fcab52b71e5ed78cac6216d0af20c9771d25..b04c9c4ca498b581740057270d97d01e238f5f93 100644
--- a/les/server.go
+++ b/les/server.go
@@ -19,6 +19,7 @@ package les
 
 import (
 	"encoding/binary"
+	"fmt"
 	"math"
 	"sync"
 	"time"
@@ -30,8 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/les/flowcontrol"
 	"github.com/ethereum/go-ethereum/light"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
@@ -292,7 +292,7 @@ func (pm *ProtocolManager) blockLoop() {
 						lastHead = header
 						lastBroadcastTd = td
 
-						glog.V(logger.Debug).Infoln("===> ", number, hash, td, reorg)
+						log.Debug(fmt.Sprint("===> ", number, hash, td, reorg))
 
 						announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
 						for _, p := range peers {
@@ -396,7 +396,7 @@ func makeCht(db ethdb.Database) bool {
 	} else {
 		lastChtNum++
 
-		glog.V(logger.Detail).Infof("cht: %d %064x", lastChtNum, root)
+		log.Trace(fmt.Sprintf("cht: %d %064x", lastChtNum, root))
 
 		storeChtRoot(db, lastChtNum, root)
 		var data [8]byte
diff --git a/les/serverpool.go b/les/serverpool.go
index 9735a718e55f15c5f1b2af97adc5aec8c9fb96da..95a8242b3279843d13f93f33fe735f5df3828d69 100644
--- a/les/serverpool.go
+++ b/les/serverpool.go
@@ -18,6 +18,7 @@
 package les
 
 import (
+	"fmt"
 	"io"
 	"math"
 	"math/rand"
@@ -28,8 +29,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common/mclock"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/discv5"
@@ -162,7 +162,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
 	if entry == nil {
 		entry = pool.findOrNewNode(p.ID(), ip, port)
 	}
-	glog.V(logger.Debug).Infof("connecting to %v, state: %v", p.id, entry.state)
+	log.Debug(fmt.Sprintf("connecting to %v, state: %v", p.id, entry.state))
 	if entry.state == psConnected || entry.state == psRegistered {
 		return nil
 	}
@@ -184,7 +184,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
 
 // registered should be called after a successful handshake
 func (pool *serverPool) registered(entry *poolEntry) {
-	glog.V(logger.Debug).Infof("registered %v", entry.id.String())
+	log.Debug(fmt.Sprintf("registered %v", entry.id.String()))
 	pool.lock.Lock()
 	defer pool.lock.Unlock()
 
@@ -202,7 +202,7 @@ func (pool *serverPool) registered(entry *poolEntry) {
 // can be updated optionally (not updated if no registration happened, in this case
 // only connection statistics are updated, just like in case of timeout)
 func (pool *serverPool) disconnect(entry *poolEntry) {
-	glog.V(logger.Debug).Infof("disconnected %v", entry.id.String())
+	log.Debug(fmt.Sprintf("disconnected %v", entry.id.String()))
 	pool.lock.Lock()
 	defer pool.lock.Unlock()
 
@@ -418,7 +418,7 @@ func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16
 	now := mclock.Now()
 	entry := pool.entries[id]
 	if entry == nil {
-		glog.V(logger.Debug).Infof("discovered %v", id.String())
+		log.Debug(fmt.Sprintf("discovered %v", id.String()))
 		entry = &poolEntry{
 			id:         id,
 			addr:       make(map[string]*poolEntryAddress),
@@ -459,11 +459,11 @@ func (pool *serverPool) loadNodes() {
 	var list []*poolEntry
 	err = rlp.DecodeBytes(enc, &list)
 	if err != nil {
-		glog.V(logger.Debug).Infof("node list decode error: %v", err)
+		log.Debug(fmt.Sprintf("node list decode error: %v", err))
 		return
 	}
 	for _, e := range list {
-		glog.V(logger.Debug).Infof("loaded server stats %016x  fails: %v  connStats: %v / %v  delayStats: %v / %v  responseStats: %v / %v  timeoutStats: %v / %v", e.id[0:8], e.lastConnected.fails, e.connectStats.avg, e.connectStats.weight, time.Duration(e.delayStats.avg), e.delayStats.weight, time.Duration(e.responseStats.avg), e.responseStats.weight, e.timeoutStats.avg, e.timeoutStats.weight)
+		log.Debug(fmt.Sprintf("loaded server stats %016x  fails: %v  connStats: %v / %v  delayStats: %v / %v  responseStats: %v / %v  timeoutStats: %v / %v", e.id[0:8], e.lastConnected.fails, e.connectStats.avg, e.connectStats.weight, time.Duration(e.delayStats.avg), e.delayStats.weight, time.Duration(e.responseStats.avg), e.responseStats.weight, e.timeoutStats.avg, e.timeoutStats.weight))
 		pool.entries[e.id] = e
 		pool.knownQueue.setLatest(e)
 		pool.knownSelect.update((*knownEntry)(e))
@@ -568,7 +568,7 @@ func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
 		pool.newSelected++
 	}
 	addr := entry.addrSelect.choose().(*poolEntryAddress)
-	glog.V(logger.Debug).Infof("dialing %v out of %v, known: %v", entry.id.String()+"@"+addr.strKey(), len(entry.addr), knownSelected)
+	log.Debug(fmt.Sprintf("dialing %v out of %v, known: %v", entry.id.String()+"@"+addr.strKey(), len(entry.addr), knownSelected))
 	entry.dialed = addr
 	go func() {
 		pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port))
@@ -589,7 +589,7 @@ func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
 	if entry.state != psDialed {
 		return
 	}
-	glog.V(logger.Debug).Infof("timeout %v", entry.id.String()+"@"+entry.dialed.strKey())
+	log.Debug(fmt.Sprintf("timeout %v", entry.id.String()+"@"+entry.dialed.strKey()))
 	entry.state = psNotConnected
 	if entry.knownSelected {
 		pool.knownSelected--
diff --git a/light/lightchain.go b/light/lightchain.go
index 0d28ad2f454fa28442559e3edc3bb130e158f317..1f0fb9c345d940a7bba3ede7cc774674df90319f 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -17,6 +17,7 @@
 package light
 
 import (
+	"fmt"
 	"math/big"
 	"sync"
 	"sync/atomic"
@@ -27,8 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/pow"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -101,7 +101,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
 		if err != nil {
 			return nil, err
 		}
-		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
+		log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
 	}
 
 	if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) {
@@ -117,7 +117,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
 				Root:   common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"),
 			})
 		}
-		glog.V(logger.Info).Infoln("Added trusted CHT for mainnet")
+		log.Info(fmt.Sprint("Added trusted CHT for mainnet"))
 	} else {
 		if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) {
 			// add trusted CHT for testnet
@@ -125,7 +125,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
 				Number: 452,
 				Root:   common.HexToHash("511da2c88e32b14cf4a4e62f7fcbb297139faebc260a4ab5eb43cce6edcba324"),
 			})
-			glog.V(logger.Info).Infoln("Added trusted CHT for testnet")
+			log.Info(fmt.Sprint("Added trusted CHT for testnet"))
 		} else {
 			DeleteTrustedCht(bc.chainDb)
 		}
@@ -137,9 +137,9 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
 	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
 	for hash := range core.BadHashes {
 		if header := bc.GetHeaderByHash(hash); header != nil {
-			glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
+			log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
 			bc.SetHead(header.Number.Uint64() - 1)
-			glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
+			log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
 		}
 	}
 	return bc, nil
@@ -169,7 +169,7 @@ func (self *LightChain) loadLastState() error {
 	// Issue a status log and return
 	header := self.hc.CurrentHeader()
 	headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
-	glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd)
+	log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd))
 
 	return nil
 }
@@ -246,10 +246,10 @@ func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
 
 	// Prepare the genesis block and reinitialise the chain
 	if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
-		glog.Fatalf("failed to write genesis block TD: %v", err)
+		log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
 	}
 	if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
-		glog.Fatalf("failed to write genesis block: %v", err)
+		log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
 	}
 	bc.genesisBlock = genesis
 	bc.hc.SetGenesis(bc.genesisBlock.Header())
@@ -346,7 +346,7 @@ func (bc *LightChain) Stop() {
 
 	bc.wg.Wait()
 
-	glog.V(logger.Info).Infoln("Chain manager stopped")
+	log.Info(fmt.Sprint("Chain manager stopped"))
 }
 
 // Rollback is designed to remove a chain of links from the database that aren't
@@ -406,15 +406,15 @@ func (self *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
 
 		switch status {
 		case core.CanonStatTy:
-			if glog.V(logger.Debug) {
-				glog.Infof("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
-			}
+			log.Debug("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
+			}})
 			events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()})
 
 		case core.SideStatTy:
-			if glog.V(logger.Detail) {
-				glog.Infof("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
-			}
+			log.Trace("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
+			}})
 			events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)})
 
 		case core.SplitStatTy:
diff --git a/light/odr_util.go b/light/odr_util.go
index 7617116212c1ea5afd88373ab72fbe940f95f68f..e7f94db10ce54d6927fef049c828934f31075d17 100644
--- a/light/odr_util.go
+++ b/light/odr_util.go
@@ -19,6 +19,7 @@ package light
 import (
 	"bytes"
 	"errors"
+	"fmt"
 	"math/big"
 
 	"github.com/ethereum/go-ethereum/common"
@@ -26,8 +27,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"golang.org/x/net/context"
 )
@@ -149,7 +149,7 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint6
 	}
 	body := new(types.Body)
 	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
-		glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
+		log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
 		return nil, err
 	}
 	return body, nil
diff --git a/light/state.go b/light/state.go
index f19748e8958adccb2d11af5fb1a04622964bcbfd..1fb583c1d7cf3de50777c2dadfa464b702258af1 100644
--- a/light/state.go
+++ b/light/state.go
@@ -17,12 +17,12 @@
 package light
 
 import (
+	"fmt"
 	"math/big"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"golang.org/x/net/context"
 )
 
@@ -239,9 +239,9 @@ func (self *LightState) GetOrNewStateObject(ctx context.Context, addr common.Add
 
 // newStateObject creates a state object whether it exists in the state or not
 func (self *LightState) newStateObject(addr common.Address) *StateObject {
-	if glog.V(logger.Debug) {
-		glog.Infof("(+) %x\n", addr)
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("(+) %x\n", addr)
+	}})
 
 	stateObject := NewStateObject(addr, self.odr)
 	self.stateObjects[addr.Str()] = stateObject
diff --git a/light/state_object.go b/light/state_object.go
index e876c15661f86170c8b61a5aed1e899303d7c6cf..03d4868cdcea13a33e02b91105a7e9b1f01d2cee 100644
--- a/light/state_object.go
+++ b/light/state_object.go
@@ -23,8 +23,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"golang.org/x/net/context"
 )
@@ -109,9 +108,9 @@ func (self *StateObject) MarkForDeletion() {
 	self.remove = true
 	self.dirty = true
 
-	if glog.V(logger.Debug) {
-		glog.Infof("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
+	}})
 }
 
 // getAddr gets the storage value at the given address from the trie
@@ -158,18 +157,18 @@ func (self *StateObject) SetState(k, value common.Hash) {
 func (c *StateObject) AddBalance(amount *big.Int) {
 	c.SetBalance(new(big.Int).Add(c.balance, amount))
 
-	if glog.V(logger.Debug) {
-		glog.Infof("%x: #%d %v (+ %v)\n", c.Address(), c.nonce, c.balance, amount)
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.nonce, c.balance, amount)
+	}})
 }
 
 // SubBalance subtracts the given amount from the account balance
 func (c *StateObject) SubBalance(amount *big.Int) {
 	c.SetBalance(new(big.Int).Sub(c.balance, amount))
 
-	if glog.V(logger.Debug) {
-		glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
-	}
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
+	}})
 }
 
 // SetBalance sets the account balance to the given amount
diff --git a/light/txpool.go b/light/txpool.go
index bcdb6123de02fa413b2c327c4264fbfaeb086ab6..365f02d259a3d070108807a701eb957cec44b970 100644
--- a/light/txpool.go
+++ b/light/txpool.go
@@ -26,8 +26,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rlp"
 	"golang.org/x/net/context"
@@ -321,7 +320,7 @@ func (pool *TxPool) eventLoop() {
 func (pool *TxPool) Stop() {
 	close(pool.quit)
 	pool.events.Unsubscribe()
-	glog.V(logger.Info).Infoln("Transaction pool stopped")
+	log.Info(fmt.Sprint("Transaction pool stopped"))
 }
 
 // Stats returns the number of currently pending (locally created) transactions
@@ -417,7 +416,7 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
 		go self.eventMux.Post(core.TxPreEvent{Tx: tx})
 	}
 
-	if glog.V(logger.Debug) {
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
 		var toname string
 		if to := tx.To(); to != nil {
 			toname = common.Bytes2Hex(to[:4])
@@ -428,8 +427,8 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
 		// verified in ValidateTransaction.
 		f, _ := types.Sender(self.signer, tx)
 		from := common.Bytes2Hex(f[:4])
-		glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, hash)
-	}
+		return fmt.Sprintf("(t) %x => %s (%v) %x\n", from, toname, tx.Value(), hash)
+	}})
 
 	return nil
 }
@@ -464,11 +463,11 @@ func (self *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
 
 	for _, tx := range txs {
 		if err := self.add(ctx, tx); err != nil {
-			glog.V(logger.Debug).Infoln("tx error:", err)
+			log.Debug(fmt.Sprint("tx error:", err))
 		} else {
 			sendTx = append(sendTx, tx)
 			h := tx.Hash()
-			glog.V(logger.Debug).Infof("tx %x\n", h[:4])
+			log.Debug(fmt.Sprintf("tx %x\n", h[:4]))
 		}
 	}
 
diff --git a/log/root.go b/log/root.go
index 6814cc2736d85b6bc918926c3bce641209ca8f87..12afbf8b737ad4adf4fbdb3aeada1b4b9a881114 100644
--- a/log/root.go
+++ b/log/root.go
@@ -23,7 +23,7 @@ func init() {
 	}
 
 	root = &logger{[]interface{}{}, new(swapHandler)}
-	root.SetHandler(StdoutHandler)
+	root.SetHandler(LvlFilterHandler(LvlInfo, StdoutHandler))
 }
 
 // New returns a new logger with the given context.
diff --git a/logger/glog/LICENSE b/logger/glog/LICENSE
deleted file mode 100644
index 37ec93a14fdcd0d6e525d97c0cfa6b314eaa98d8..0000000000000000000000000000000000000000
--- a/logger/glog/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/logger/glog/README b/logger/glog/README
deleted file mode 100644
index c7b1e60cc154de7ac585183da757c52b75e3c827..0000000000000000000000000000000000000000
--- a/logger/glog/README
+++ /dev/null
@@ -1,44 +0,0 @@
-glog
-====
-
-Leveled execution logs for Go.
-
-This is an efficient pure Go implementation of leveled logs in the
-manner of the open source C++ package
-	http://code.google.com/p/google-glog
-
-By binding methods to booleans it is possible to use the log package
-without paying the expense of evaluating the arguments to the log.
-Through the -vmodule flag, the package also provides fine-grained
-control over logging at the file level.
-
-The comment from glog.go introduces the ideas:
-
-	Package glog implements logging analogous to the Google-internal
-	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
-	Error, Fatal, plus formatting variants such as Infof. It
-	also provides V-style logging controlled by the -v and
-	-vmodule=file=2 flags.
-
-	Basic examples:
-
-		glog.Info("Prepare to repel boarders")
-
-		glog.Fatalf("Initialization failed: %s", err)
-
-	See the documentation for the V function for an explanation
-	of these examples:
-
-		if glog.V(2) {
-			glog.Info("Starting transaction...")
-		}
-
-		glog.V(2).Infoln("Processed", nItems, "elements")
-
-
-The repository contains an open source version of the log package
-used inside Google. The master copy of the source lives inside
-Google, not here. The code in this repo is for export only and is not itself
-under development. Feature requests will be ignored.
-
-Send bug reports to golang-nuts@googlegroups.com.
diff --git a/logger/glog/glog.go b/logger/glog/glog.go
deleted file mode 100644
index 0b33527c32a0c4040db6f122ff60871bfe9c30a5..0000000000000000000000000000000000000000
--- a/logger/glog/glog.go
+++ /dev/null
@@ -1,1223 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
-// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
-// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
-//
-// Basic examples:
-//
-//	glog.Info("Prepare to repel boarders")
-//
-//	glog.Fatalf("Initialization failed: %s", err)
-//
-// See the documentation for the V function for an explanation of these examples:
-//
-//	if glog.V(2) {
-//		glog.Info("Starting transaction...")
-//	}
-//
-//	glog.V(2).Infoln("Processed", nItems, "elements")
-//
-// Log output is buffered and written periodically using Flush. Programs
-// should call Flush before exiting to guarantee all log output is written.
-//
-// By default, all log statements write to files in a temporary directory.
-// This package provides several flags that modify this behavior.
-// As a result, flag.Parse must be called before any logging is done.
-//
-//	-logtostderr=false
-//		Logs are written to standard error instead of to files.
-//	-alsologtostderr=false
-//		Logs are written to standard error as well as to files.
-//	-stderrthreshold=ERROR
-//		Log events at or above this severity are logged to standard
-//		error as well as to files.
-//	-log_dir=""
-//		Log files will be written to this directory instead of the
-//		default temporary directory.
-//
-//	Other flags provide aids to debugging.
-//
-//	-log_backtrace_at=""
-//		When set to a file and line number holding a logging statement,
-//		such as
-//			-log_backtrace_at=gopherflakes.go:234
-//		a stack trace will be written to the Info log whenever execution
-//		hits that statement. (Unlike with -vmodule, the ".go" must be
-//		present.)
-//	-v=0
-//		Enable V-leveled logging at the specified level.
-//	-vmodule=""
-//		The syntax of the argument is a comma-separated list of pattern=N,
-//		where pattern is a literal file name or "glob" pattern matching
-//		and N is a V level. For instance,
-//
-//			-vmodule=gopher.go=3
-//		sets the V level to 3 in all Go files named "gopher.go".
-//
-//			-vmodule=foo=3
-//		sets V to 3 in all files of any packages whose import path ends in "foo".
-//
-//			-vmodule=foo/*=3
-//		sets V to 3 in all files of any packages whose import path contains "foo".
-package glog
-
-import (
-	"bufio"
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	stdLog "log"
-	"os"
-	"regexp"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"time"
-)
-
-// severity identifies the sort of log: info, warning etc. It also implements
-// the flag.Value interface. The -stderrthreshold flag is of type severity and
-// should be modified only through the flag.Value interface. The values match
-// the corresponding constants in C++.
-type severity int32 // sync/atomic int32
-
-// These constants identify the log levels in order of increasing severity.
-// A message written to a high-severity log file is also written to each
-// lower-severity log file.
-const (
-	infoLog severity = iota
-	warningLog
-	errorLog
-	fatalLog
-	numSeverity = 4
-)
-
-const severityChar = "IWEF"
-
-var severityName = []string{
-	infoLog:    "INFO",
-	warningLog: "WARNING",
-	errorLog:   "ERROR",
-	fatalLog:   "FATAL",
-}
-
-// these path prefixes are trimmed for display, but not when
-// matching vmodule filters.
-var trimPrefixes = []string{
-	"/github.com/ethereum/go-ethereum",
-	"/github.com/ethereum/ethash",
-}
-
-func trimToImportPath(file string) string {
-	if root := strings.LastIndex(file, "src/"); root != 0 {
-		file = file[root+3:]
-	}
-	return file
-}
-
-// SetV sets the global verbosity level
-func SetV(v int) {
-	logging.verbosity.set(Level(v))
-}
-
-// SetToStderr sets the global output style
-func SetToStderr(toStderr bool) {
-	logging.mu.Lock()
-	logging.toStderr = toStderr
-	logging.mu.Unlock()
-}
-
-// GetTraceLocation returns the global TraceLocation flag.
-func GetTraceLocation() *TraceLocation {
-	return &logging.traceLocation
-}
-
-// GetVModule returns the global verbosity pattern flag.
-func GetVModule() *moduleSpec {
-	return &logging.vmodule
-}
-
-// GetVerbosity returns the global verbosity level flag.
-func GetVerbosity() *Level {
-	return &logging.verbosity
-}
-
-// get returns the value of the severity.
-func (s *severity) get() severity {
-	return severity(atomic.LoadInt32((*int32)(s)))
-}
-
-// set sets the value of the severity.
-func (s *severity) set(val severity) {
-	atomic.StoreInt32((*int32)(s), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (s *severity) String() string {
-	return strconv.FormatInt(int64(*s), 10)
-}
-
-// Get is part of the flag.Value interface.
-func (s *severity) Get() interface{} {
-	return *s
-}
-
-// Set is part of the flag.Value interface.
-func (s *severity) Set(value string) error {
-	var threshold severity
-	// Is it a known name?
-	if v, ok := severityByName(value); ok {
-		threshold = v
-	} else {
-		v, err := strconv.Atoi(value)
-		if err != nil {
-			return err
-		}
-		threshold = severity(v)
-	}
-	logging.stderrThreshold.set(threshold)
-	return nil
-}
-
-func severityByName(s string) (severity, bool) {
-	s = strings.ToUpper(s)
-	for i, name := range severityName {
-		if name == s {
-			return severity(i), true
-		}
-	}
-	return 0, false
-}
-
-// OutputStats tracks the number of output lines and bytes written.
-type OutputStats struct {
-	lines int64
-	bytes int64
-}
-
-// Lines returns the number of lines written.
-func (s *OutputStats) Lines() int64 {
-	return atomic.LoadInt64(&s.lines)
-}
-
-// Bytes returns the number of bytes written.
-func (s *OutputStats) Bytes() int64 {
-	return atomic.LoadInt64(&s.bytes)
-}
-
-// Stats tracks the number of lines of output and number of bytes
-// per severity level. Values must be read with atomic.LoadInt64.
-var Stats struct {
-	Info, Warning, Error OutputStats
-}
-
-var severityStats = [numSeverity]*OutputStats{
-	infoLog:    &Stats.Info,
-	warningLog: &Stats.Warning,
-	errorLog:   &Stats.Error,
-}
-
-// Level is exported because it appears in the arguments to V and is
-// the type of the v flag, which can be set programmatically.
-// It's a distinct type because we want to discriminate it from logType.
-// Variables of type level are only changed under logging.mu.
-// The -v flag is read only with atomic ops, so the state of the logging
-// module is consistent.
-
-// Level is treated as a sync/atomic int32.
-
-// Level specifies a level of verbosity for V logs. *Level implements
-// flag.Value; the -v flag is of type Level and should be modified
-// only through the flag.Value interface.
-type Level int32
-
-// get returns the value of the Level.
-func (l *Level) get() Level {
-	return Level(atomic.LoadInt32((*int32)(l)))
-}
-
-// set sets the value of the Level.
-func (l *Level) set(val Level) {
-	atomic.StoreInt32((*int32)(l), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (l *Level) String() string {
-	return strconv.FormatInt(int64(*l), 10)
-}
-
-// Get is part of the flag.Value interface.
-func (l *Level) Get() interface{} {
-	return *l
-}
-
-// Set is part of the flag.Value interface.
-func (l *Level) Set(value string) error {
-	v, err := strconv.Atoi(value)
-	if err != nil {
-		return err
-	}
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	logging.setVState(Level(v), logging.vmodule.filter, false)
-	return nil
-}
-
-// moduleSpec represents the setting of the -vmodule flag.
-type moduleSpec struct {
-	filter []modulePat
-}
-
-// modulePat contains a filter for the -vmodule flag.
-// It holds a verbosity level and a file pattern to match.
-type modulePat struct {
-	pattern *regexp.Regexp
-	level   Level
-}
-
-func (m *moduleSpec) String() string {
-	// Lock because the type is not atomic. TODO: clean this up.
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	var b bytes.Buffer
-	for i, f := range m.filter {
-		if i > 0 {
-			b.WriteRune(',')
-		}
-		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
-	}
-	return b.String()
-}
-
-// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported.
-func (m *moduleSpec) Get() interface{} {
-	return nil
-}
-
-var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
-
-// Syntax: -vmodule=recordio=2,file=1,gfs*=3
-func (m *moduleSpec) Set(value string) error {
-	var filter []modulePat
-	for _, pat := range strings.Split(value, ",") {
-		if len(pat) == 0 {
-			// Empty strings such as from a trailing comma can be ignored.
-			continue
-		}
-		patLev := strings.Split(pat, "=")
-		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
-			return errVmoduleSyntax
-		}
-		pattern := patLev[0]
-		v, err := strconv.Atoi(patLev[1])
-		if err != nil {
-			return errors.New("syntax error: expect comma-separated list of filename=N")
-		}
-		if v < 0 {
-			return errors.New("negative value for vmodule level")
-		}
-		if v == 0 {
-			continue // Ignore. It's harmless but no point in paying the overhead.
-		}
-		// TODO: check syntax of filter?
-		re, _ := compileModulePattern(pattern)
-		filter = append(filter, modulePat{re, Level(v)})
-	}
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	logging.setVState(logging.verbosity, filter, true)
-	return nil
-}
-
-// compiles a vmodule pattern to a regular expression.
-func compileModulePattern(pat string) (*regexp.Regexp, error) {
-	re := ".*"
-	for _, comp := range strings.Split(pat, "/") {
-		if comp == "*" {
-			re += "(/.*)?"
-		} else if comp != "" {
-			// TODO: maybe return error if comp contains *
-			re += "/" + regexp.QuoteMeta(comp)
-		}
-	}
-	if !strings.HasSuffix(pat, ".go") {
-		re += "/[^/]+\\.go"
-	}
-	return regexp.Compile(re + "$")
-}
-
-// traceLocation represents the setting of the -log_backtrace_at flag.
-type TraceLocation struct {
-	file string
-	line int
-}
-
-// isSet reports whether the trace location has been specified.
-// logging.mu is held.
-func (t *TraceLocation) isSet() bool {
-	return t.line > 0
-}
-
-// match reports whether the specified file and line matches the trace location.
-// The argument file name is the full path, not the basename specified in the flag.
-// logging.mu is held.
-func (t *TraceLocation) match(file string, line int) bool {
-	if t.line != line {
-		return false
-	}
-	if i := strings.LastIndex(file, "/"); i >= 0 {
-		file = file[i+1:]
-	}
-	return t.file == file
-}
-
-func (t *TraceLocation) String() string {
-	// Lock because the type is not atomic. TODO: clean this up.
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	return fmt.Sprintf("%s:%d", t.file, t.line)
-}
-
-// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported
-func (t *TraceLocation) Get() interface{} {
-	return nil
-}
-
-var errTraceSyntax = errors.New("syntax error: expect file.go:234")
-
-// Syntax: -log_backtrace_at=gopherflakes.go:234
-// Note that unlike vmodule the file extension is included here.
-func (t *TraceLocation) Set(value string) error {
-	if value == "" {
-		// Unset.
-		logging.mu.Lock()
-		t.line = 0
-		t.file = ""
-		logging.mu.Unlock()
-		return nil
-	}
-
-	fields := strings.Split(value, ":")
-	if len(fields) != 2 {
-		return errTraceSyntax
-	}
-	file, line := fields[0], fields[1]
-	if !strings.Contains(file, ".") {
-		return errTraceSyntax
-	}
-	v, err := strconv.Atoi(line)
-	if err != nil {
-		return errTraceSyntax
-	}
-	if v <= 0 {
-		return errors.New("negative or zero value for level")
-	}
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	t.line = v
-	t.file = file
-	return nil
-}
-
-// flushSyncWriter is the interface satisfied by logging destinations.
-type flushSyncWriter interface {
-	Flush() error
-	Sync() error
-	io.Writer
-}
-
-func init() {
-	//flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
-	//flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
-	//flag.Var(&logging.verbosity, "v", "log level for V logs")
-	//flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
-	//flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
-	//flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
-
-	// Default stderrThreshold is ERROR.
-	logging.stderrThreshold = errorLog
-	logging.setVState(3, nil, false)
-	go logging.flushDaemon()
-}
-
-// Flush flushes all pending log I/O.
-func Flush() {
-	logging.lockAndFlushAll()
-}
-
-// loggingT collects all the global state of the logging setup.
-type loggingT struct {
-	// Boolean flags. Not handled atomically because the flag.Value interface
-	// does not let us avoid the =true, and that shorthand is necessary for
-	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
-	toStderr     bool // The -logtostderr flag.
-	alsoToStderr bool // The -alsologtostderr flag.
-
-	// Level flag. Handled atomically.
-	stderrThreshold severity // The -stderrthreshold flag.
-
-	// freeList is a list of byte buffers, maintained under freeListMu.
-	freeList *buffer
-	// freeListMu maintains the free list. It is separate from the main mutex
-	// so buffers can be grabbed and printed to without holding the main lock,
-	// for better parallelization.
-	freeListMu sync.Mutex
-
-	// mu protects the remaining elements of this structure and is
-	// used to synchronize logging.
-	mu sync.Mutex
-	// file holds writer for each of the log types.
-	file [numSeverity]flushSyncWriter
-	// pcs is used in V to avoid an allocation when computing the caller's PC.
-	pcs [1]uintptr
-	// vmap is a cache of the V Level for each V() call site, identified by PC.
-	// It is wiped whenever the vmodule flag changes state.
-	vmap map[uintptr]Level
-	// filterLength stores the length of the vmodule filter chain. If greater
-	// than zero, it means vmodule is enabled. It may be read safely
-	// using sync.LoadInt32, but is only modified under mu.
-	filterLength int32
-	// traceLocation is the state of the -log_backtrace_at flag.
-	traceLocation TraceLocation
-	// These flags are modified only under lock, although verbosity may be fetched
-	// safely using atomic.LoadInt32.
-	vmodule   moduleSpec // The state of the -vmodule flag.
-	verbosity Level      // V logging level, the value of the -v flag/
-}
-
-// buffer holds a byte Buffer for reuse. The zero value is ready for use.
-type buffer struct {
-	bytes.Buffer
-	tmp  [64]byte // temporary byte array for creating headers.
-	next *buffer
-}
-
-var logging loggingT
-
-// setVState sets a consistent state for V logging.
-// l.mu is held.
-func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
-	// Turn verbosity off so V will not fire while we are in transition.
-	logging.verbosity.set(0)
-	// Ditto for filter length.
-	atomic.StoreInt32(&logging.filterLength, 0)
-
-	// Set the new filters and wipe the pc->Level map if the filter has changed.
-	if setFilter {
-		logging.vmodule.filter = filter
-		logging.vmap = make(map[uintptr]Level)
-	}
-
-	// Things are consistent now, so enable filtering and verbosity.
-	// They are enabled in order opposite to that in V.
-	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
-	logging.verbosity.set(verbosity)
-}
-
-// getBuffer returns a new, ready-to-use buffer.
-func (l *loggingT) getBuffer() *buffer {
-	l.freeListMu.Lock()
-	b := l.freeList
-	if b != nil {
-		l.freeList = b.next
-	}
-	l.freeListMu.Unlock()
-	if b == nil {
-		b = new(buffer)
-	} else {
-		b.next = nil
-		b.Reset()
-	}
-	return b
-}
-
-// putBuffer returns a buffer to the free list.
-func (l *loggingT) putBuffer(b *buffer) {
-	if b.Len() >= 256 {
-		// Let big buffers die a natural death.
-		return
-	}
-	l.freeListMu.Lock()
-	b.next = l.freeList
-	l.freeList = b
-	l.freeListMu.Unlock()
-}
-
-var timeNow = time.Now // Stubbed out for testing.
-
-/*
-header formats a log header as defined by the C++ implementation.
-It returns a buffer containing the formatted header and the user's file and line number.
-The depth specifies how many stack frames above lives the source line to be identified in the log message.
-
-Log lines have this form:
-	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
-where the fields are defined as follows:
-	L                A single character, representing the log level (eg 'I' for INFO)
-	mm               The month (zero padded; ie May is '05')
-	dd               The day (zero padded)
-	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
-	threadid         The space-padded thread ID as returned by GetTID()
-	file             The file name
-	line             The line number
-	msg              The user-supplied message
-*/
-func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
-	_, file, line, ok := runtime.Caller(3 + depth)
-	if !ok {
-		file = "???"
-		line = 1
-	} else {
-		file = trimToImportPath(file)
-		for _, p := range trimPrefixes {
-			if strings.HasPrefix(file, p) {
-				file = file[len(p):]
-				break
-			}
-		}
-		file = file[1:] // drop '/'
-	}
-	return l.formatHeader(s, file, line), file, line
-}
-
-// formatHeader formats a log header using the provided file name and line number.
-func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
-	now := timeNow()
-	if line < 0 {
-		line = 0 // not a real line number, but acceptable to someDigits
-	}
-	if s > fatalLog {
-		s = infoLog // for safety.
-	}
-	buf := l.getBuffer()
-
-	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
-	// It's worth about 3X. Fprintf is hard.
-	_, month, day := now.Date()
-	hour, minute, second := now.Clock()
-	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
-	buf.tmp[0] = severityChar[s]
-	buf.twoDigits(1, int(month))
-	buf.twoDigits(3, day)
-	buf.tmp[5] = ' '
-	buf.twoDigits(6, hour)
-	buf.tmp[8] = ':'
-	buf.twoDigits(9, minute)
-	buf.tmp[11] = ':'
-	buf.twoDigits(12, second)
-	buf.tmp[14] = '.'
-	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
-	buf.tmp[21] = ' '
-	buf.Write(buf.tmp[:22])
-	buf.WriteString(file)
-	buf.tmp[0] = ':'
-	n := buf.someDigits(1, line)
-	buf.tmp[n+1] = ']'
-	buf.tmp[n+2] = ' '
-	buf.Write(buf.tmp[:n+3])
-	return buf
-}
-
-// Some custom tiny helper functions to print the log header efficiently.
-
-const digits = "0123456789"
-
-// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
-func (buf *buffer) twoDigits(i, d int) {
-	buf.tmp[i+1] = digits[d%10]
-	d /= 10
-	buf.tmp[i] = digits[d%10]
-}
-
-// nDigits formats an n-digit integer at buf.tmp[i],
-// padding with pad on the left.
-// It assumes d >= 0.
-func (buf *buffer) nDigits(n, i, d int, pad byte) {
-	j := n - 1
-	for ; j >= 0 && d > 0; j-- {
-		buf.tmp[i+j] = digits[d%10]
-		d /= 10
-	}
-	for ; j >= 0; j-- {
-		buf.tmp[i+j] = pad
-	}
-}
-
-// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
-func (buf *buffer) someDigits(i, d int) int {
-	// Print into the top, then copy down. We know there's space for at least
-	// a 10-digit number.
-	j := len(buf.tmp)
-	for {
-		j--
-		buf.tmp[j] = digits[d%10]
-		d /= 10
-		if d == 0 {
-			break
-		}
-	}
-	return copy(buf.tmp[i:], buf.tmp[j:])
-}
-
-func (l *loggingT) println(s severity, args ...interface{}) {
-	buf, file, line := l.header(s, 0)
-	fmt.Fprintln(buf, args...)
-	l.output(s, buf, file, line, false)
-}
-
-func (l *loggingT) print(s severity, args ...interface{}) {
-	l.printDepth(s, 1, args...)
-}
-
-func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
-	buf, file, line := l.header(s, depth)
-	fmt.Fprint(buf, args...)
-	if buf.Bytes()[buf.Len()-1] != '\n' {
-		buf.WriteByte('\n')
-	}
-	l.output(s, buf, file, line, false)
-}
-
-func (l *loggingT) printfmt(s severity, format string, args ...interface{}) {
-	buf, file, line := l.header(s, 0)
-	fmt.Fprintf(buf, format, args...)
-	if buf.Bytes()[buf.Len()-1] != '\n' {
-		buf.WriteByte('\n')
-	}
-	l.output(s, buf, file, line, false)
-}
-
-// printWithFileLine behaves like print but uses the provided file and line number.  If
-// alsoLogToStderr is true, the log message always appears on standard error; it
-// will also appear in the log file unless --logtostderr is set.
-func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
-	buf := l.formatHeader(s, file, line)
-	fmt.Fprint(buf, args...)
-	if buf.Bytes()[buf.Len()-1] != '\n' {
-		buf.WriteByte('\n')
-	}
-	l.output(s, buf, file, line, alsoToStderr)
-}
-
-// output writes the data to the log files and releases the buffer.
-func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
-	l.mu.Lock()
-	if l.traceLocation.isSet() {
-		if l.traceLocation.match(file, line) {
-			buf.Write(stacks(false))
-		}
-	}
-	data := buf.Bytes()
-	if l.toStderr {
-		os.Stderr.Write(data)
-	} else {
-		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
-			os.Stderr.Write(data)
-		}
-		if l.file[s] == nil {
-			if err := l.createFiles(s); err != nil {
-				os.Stderr.Write(data) // Make sure the message appears somewhere.
-				l.exit(err)
-			}
-		}
-		switch s {
-		case fatalLog:
-			l.file[fatalLog].Write(data)
-			fallthrough
-		case errorLog:
-			l.file[errorLog].Write(data)
-			fallthrough
-		case warningLog:
-			l.file[warningLog].Write(data)
-			fallthrough
-		case infoLog:
-			l.file[infoLog].Write(data)
-		}
-	}
-	if s == fatalLog {
-		// If we got here via Exit rather than Fatal, print no stacks.
-		if atomic.LoadUint32(&fatalNoStacks) > 0 {
-			l.mu.Unlock()
-			timeoutFlush(10 * time.Second)
-			os.Exit(1)
-		}
-		// Dump all goroutine stacks before exiting.
-		// First, make sure we see the trace for the current goroutine on standard error.
-		// If -logtostderr has been specified, the loop below will do that anyway
-		// as the first stack in the full dump.
-		if !l.toStderr {
-			os.Stderr.Write(stacks(false))
-		}
-		// Write the stack trace for all goroutines to the files.
-		trace := stacks(true)
-		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
-		for log := fatalLog; log >= infoLog; log-- {
-			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
-				f.Write(trace)
-			}
-		}
-		l.mu.Unlock()
-		timeoutFlush(10 * time.Second)
-		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
-	}
-	l.putBuffer(buf)
-	l.mu.Unlock()
-	if stats := severityStats[s]; stats != nil {
-		atomic.AddInt64(&stats.lines, 1)
-		atomic.AddInt64(&stats.bytes, int64(len(data)))
-	}
-}
-
-// timeoutFlush calls Flush and returns when it completes or after timeout
-// elapses, whichever happens first.  This is needed because the hooks invoked
-// by Flush may deadlock when glog.Fatal is called from a hook that holds
-// a lock.
-func timeoutFlush(timeout time.Duration) {
-	done := make(chan bool, 1)
-	go func() {
-		Flush() // calls logging.lockAndFlushAll()
-		done <- true
-	}()
-	select {
-	case <-done:
-	case <-time.After(timeout):
-		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
-	}
-}
-
-// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
-func stacks(all bool) []byte {
-	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
-	n := 10000
-	if all {
-		n = 100000
-	}
-	var trace []byte
-	for i := 0; i < 5; i++ {
-		trace = make([]byte, n)
-		nbytes := runtime.Stack(trace, all)
-		if nbytes < len(trace) {
-			return trace[:nbytes]
-		}
-		n *= 2
-	}
-	return trace
-}
-
-// logExitFunc provides a simple mechanism to override the default behavior
-// of exiting on error. Used in testing and to guarantee we reach a required exit
-// for fatal logs. Instead, exit could be a function rather than a method but that
-// would make its use clumsier.
-var logExitFunc func(error)
-
-// exit is called if there is trouble creating or writing log files.
-// It flushes the logs and exits the program; there's no point in hanging around.
-// l.mu is held.
-func (l *loggingT) exit(err error) {
-	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
-	// If logExitFunc is set, we do that instead of exiting.
-	if logExitFunc != nil {
-		logExitFunc(err)
-		return
-	}
-	l.flushAll()
-	os.Exit(2)
-}
-
-// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
-// file's Sync method and providing a wrapper for the Write method that provides log
-// file rotation. There are conflicting methods, so the file cannot be embedded.
-// l.mu is held for all its methods.
-type syncBuffer struct {
-	logger *loggingT
-	*bufio.Writer
-	file   *os.File
-	sev    severity
-	nbytes uint64 // The number of bytes written to this file
-}
-
-func (sb *syncBuffer) Sync() error {
-	return sb.file.Sync()
-}
-
-func (sb *syncBuffer) Write(p []byte) (n int, err error) {
-	if sb.nbytes+uint64(len(p)) >= MaxSize {
-		if err := sb.rotateFile(time.Now()); err != nil {
-			sb.logger.exit(err)
-		}
-	}
-	n, err = sb.Writer.Write(p)
-	sb.nbytes += uint64(n)
-	if err != nil {
-		sb.logger.exit(err)
-	}
-	return
-}
-
-// rotateFile closes the syncBuffer's file and starts a new one.
-func (sb *syncBuffer) rotateFile(now time.Time) error {
-	if sb.file != nil {
-		sb.Flush()
-		sb.file.Close()
-	}
-	var err error
-	sb.file, _, err = create(severityName[sb.sev], now)
-	sb.nbytes = 0
-	if err != nil {
-		return err
-	}
-
-	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
-
-	// Write header.
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
-	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
-	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
-	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
-	n, err := sb.file.Write(buf.Bytes())
-	sb.nbytes += uint64(n)
-	return err
-}
-
-// bufferSize sizes the buffer associated with each log file. It's large
-// so that log records can accumulate without the logging thread blocking
-// on disk I/O. The flushDaemon will block instead.
-const bufferSize = 256 * 1024
-
-// createFiles creates all the log files for severity from sev down to infoLog.
-// l.mu is held.
-func (l *loggingT) createFiles(sev severity) error {
-	now := time.Now()
-	// Files are created in decreasing severity order, so as soon as we find one
-	// has already been created, we can stop.
-	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
-		sb := &syncBuffer{
-			logger: l,
-			sev:    s,
-		}
-		if err := sb.rotateFile(now); err != nil {
-			return err
-		}
-		l.file[s] = sb
-	}
-	return nil
-}
-
-const flushInterval = 30 * time.Second
-
-// flushDaemon periodically flushes the log file buffers.
-func (l *loggingT) flushDaemon() {
-	for range time.NewTicker(flushInterval).C {
-		l.lockAndFlushAll()
-	}
-}
-
-// lockAndFlushAll is like flushAll but locks l.mu first.
-func (l *loggingT) lockAndFlushAll() {
-	l.mu.Lock()
-	l.flushAll()
-	l.mu.Unlock()
-}
-
-// flushAll flushes all the logs and attempts to "sync" their data to disk.
-// l.mu is held.
-func (l *loggingT) flushAll() {
-	// Flush from fatal down, in case there's trouble flushing.
-	for s := fatalLog; s >= infoLog; s-- {
-		file := l.file[s]
-		if file != nil {
-			file.Flush() // ignore error
-			file.Sync()  // ignore error
-		}
-	}
-}
-
-// CopyStandardLogTo arranges for messages written to the Go "log" package's
-// default logs to also appear in the Google logs for the named and lower
-// severities.  Subsequent changes to the standard log's default output location
-// or format may break this behavior.
-//
-// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
-// recognized, CopyStandardLogTo panics.
-func CopyStandardLogTo(name string) {
-	sev, ok := severityByName(name)
-	if !ok {
-		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
-	}
-	// Set a log format that captures the user's file and line:
-	//   d.go:23: message
-	stdLog.SetFlags(stdLog.Lshortfile)
-	stdLog.SetOutput(logBridge(sev))
-}
-
-// logBridge provides the Write method that enables CopyStandardLogTo to connect
-// Go's standard logs to the logs provided by this package.
-type logBridge severity
-
-// Write parses the standard logging line and passes its components to the
-// logger for severity(lb).
-func (lb logBridge) Write(b []byte) (n int, err error) {
-	var (
-		file = "???"
-		line = 1
-		text string
-	)
-	// Split "d.go:23: message" into "d.go", "23", and "message".
-	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
-		text = fmt.Sprintf("bad log format: %s", b)
-	} else {
-		file = string(parts[0])
-		text = string(parts[2][1:]) // skip leading space
-		line, err = strconv.Atoi(string(parts[1]))
-		if err != nil {
-			text = fmt.Sprintf("bad line number: %s", b)
-			line = 1
-		}
-	}
-	// printWithFileLine with alsoToStderr=true, so standard log messages
-	// always appear on standard error.
-	logging.printWithFileLine(severity(lb), file, line, true, text)
-	return len(b), nil
-}
-
-// setV computes and remembers the V level for a given PC
-// when vmodule is enabled.
-// File pattern matching takes the basename of the file, stripped
-// of its .go suffix, and uses filepath.Match, which is a little more
-// general than the *? matching used in C++.
-// l.mu is held.
-func (l *loggingT) setV(pc uintptr) Level {
-	fn := runtime.FuncForPC(pc)
-	file, _ := fn.FileLine(pc)
-	file = trimToImportPath(file)
-	for _, filter := range l.vmodule.filter {
-		if filter.pattern.MatchString(file) {
-			l.vmap[pc] = filter.level
-			return filter.level
-		}
-	}
-	l.vmap[pc] = 0
-	return 0
-}
-
-// Verbose is a boolean type that implements Infof (like Printf) etc.
-// See the documentation of V for more information.
-type Verbose bool
-
-// V reports whether verbosity at the call site is at least the requested level.
-// The returned value is a boolean of type Verbose, which implements Info, Infoln
-// and Infof. These methods will write to the Info log if called.
-// Thus, one may write either
-//	if glog.V(2) { glog.Info("log this") }
-// or
-//	glog.V(2).Info("log this")
-// The second form is shorter but the first is cheaper if logging is off because it does
-// not evaluate its arguments.
-//
-// Whether an individual call to V generates a log record depends on the setting of
-// the -v and --vmodule flags; both are off by default. If the level in the call to
-// V is at least the value of -v, or of -vmodule for the source file containing the
-// call, the V call will log.
-func V(level Level) Verbose {
-	// This function tries hard to be cheap unless there's work to do.
-	// The fast path is two atomic loads and compares.
-
-	// Here is a cheap but safe test to see if V logging is enabled globally.
-	if logging.verbosity.get() >= level {
-		return Verbose(true)
-	}
-
-	// It's off globally but it vmodule may still be set.
-	// Here is another cheap but safe test to see if vmodule is enabled.
-	if atomic.LoadInt32(&logging.filterLength) > 0 {
-		// Now we need a proper lock to use the logging structure. The pcs field
-		// is shared so we must lock before accessing it. This is fairly expensive,
-		// but if V logging is enabled we're slow anyway.
-		logging.mu.Lock()
-		defer logging.mu.Unlock()
-		if runtime.Callers(2, logging.pcs[:]) == 0 {
-			return Verbose(false)
-		}
-		v, ok := logging.vmap[logging.pcs[0]]
-		if !ok {
-			v = logging.setV(logging.pcs[0])
-		}
-		return Verbose(v >= level)
-	}
-	return Verbose(false)
-}
-
-// Info is equivalent to the global Info function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Info(args ...interface{}) {
-	if v {
-		logging.print(infoLog, args...)
-	}
-}
-
-// Infoln is equivalent to the global Infoln function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infoln(args ...interface{}) {
-	if v {
-		logging.println(infoLog, args...)
-	}
-}
-
-// Infof is equivalent to the global Infof function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infof(format string, args ...interface{}) {
-	if v {
-		logging.printfmt(infoLog, format, args...)
-	}
-}
-
-// Info logs to the INFO log.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Info(args ...interface{}) {
-	logging.print(infoLog, args...)
-}
-
-// InfoDepth acts as Info but uses depth to determine which call frame to log.
-// InfoDepth(0, "msg") is the same as Info("msg").
-func InfoDepth(depth int, args ...interface{}) {
-	logging.printDepth(infoLog, depth, args...)
-}
-
-// Infoln logs to the INFO log.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Infoln(args ...interface{}) {
-	logging.print(infoLog, args...)
-}
-
-// Infof logs to the INFO log.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Infof(format string, args ...interface{}) {
-	logging.printfmt(infoLog, format, args...)
-}
-
-// Warning logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Warning(args ...interface{}) {
-	logging.print(warningLog, args...)
-}
-
-// WarningDepth acts as Warning but uses depth to determine which call frame to log.
-// WarningDepth(0, "msg") is the same as Warning("msg").
-func WarningDepth(depth int, args ...interface{}) {
-	logging.printDepth(warningLog, depth, args...)
-}
-
-// Warningln logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Warningln(args ...interface{}) {
-	logging.println(warningLog, args...)
-}
-
-// Warningf logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Warningf(format string, args ...interface{}) {
-	logging.printfmt(warningLog, format, args...)
-}
-
-// Error logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Error(args ...interface{}) {
-	logging.print(errorLog, args...)
-}
-
-// ErrorDepth acts as Error but uses depth to determine which call frame to log.
-// ErrorDepth(0, "msg") is the same as Error("msg").
-func ErrorDepth(depth int, args ...interface{}) {
-	logging.printDepth(errorLog, depth, args...)
-}
-
-// Errorln logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Errorln(args ...interface{}) {
-	logging.println(errorLog, args...)
-}
-
-// Errorf logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Errorf(format string, args ...interface{}) {
-	logging.printfmt(errorLog, format, args...)
-}
-
-// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Fatal(args ...interface{}) {
-	logging.print(fatalLog, args...)
-}
-
-// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
-// FatalDepth(0, "msg") is the same as Fatal("msg").
-func FatalDepth(depth int, args ...interface{}) {
-	logging.printDepth(fatalLog, depth, args...)
-}
-
-// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Fatalln(args ...interface{}) {
-	logging.println(fatalLog, args...)
-}
-
-// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Fatalf(format string, args ...interface{}) {
-	logging.printfmt(fatalLog, format, args...)
-}
-
-// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
-// It allows Exit and relatives to use the Fatal logs.
-var fatalNoStacks uint32
-
-// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Exit(args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.print(fatalLog, args...)
-}
-
-// ExitDepth acts as Exit but uses depth to determine which call frame to log.
-// ExitDepth(0, "msg") is the same as Exit("msg").
-func ExitDepth(depth int, args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.printDepth(fatalLog, depth, args...)
-}
-
-// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-func Exitln(args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.println(fatalLog, args...)
-}
-
-// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Exitf(format string, args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.printfmt(fatalLog, format, args...)
-}
diff --git a/logger/glog/glog_file.go b/logger/glog/glog_file.go
deleted file mode 100644
index 2fc96eb4e1588bee0b99dde6a913ad798cf8b98f..0000000000000000000000000000000000000000
--- a/logger/glog/glog_file.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// File I/O for logs.
-
-package glog
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"os/user"
-	"path/filepath"
-	"strings"
-	"sync"
-	"time"
-)
-
-// MaxSize is the maximum size of a log file in bytes.
-var MaxSize uint64 = 1024 * 1024 * 1800
-
-// logDirs lists the candidate directories for new log files.
-var logDirs []string
-
-// If non-empty, overrides the choice of directory in which to write logs.
-// See createLogDirs for the full list of possible destinations.
-//var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
-var logDir *string = new(string)
-
-func SetLogDir(str string) {
-	*logDir = str
-}
-
-func createLogDirs() {
-	if *logDir != "" {
-		logDirs = append(logDirs, *logDir)
-	}
-	logDirs = append(logDirs, os.TempDir())
-}
-
-var (
-	pid      = os.Getpid()
-	program  = filepath.Base(os.Args[0])
-	host     = "unknownhost"
-	userName = "unknownuser"
-)
-
-func init() {
-	h, err := os.Hostname()
-	if err == nil {
-		host = shortHostname(h)
-	}
-
-	current, err := user.Current()
-	if err == nil {
-		userName = current.Username
-	}
-
-	// Sanitize userName since it may contain filepath separators on Windows.
-	userName = strings.Replace(userName, `\`, "_", -1)
-}
-
-// shortHostname returns its argument, truncating at the first period.
-// For instance, given "www.google.com" it returns "www".
-func shortHostname(hostname string) string {
-	if i := strings.Index(hostname, "."); i >= 0 {
-		return hostname[:i]
-	}
-	return hostname
-}
-
-// logName returns a new log file name containing tag, with start time t, and
-// the name for the symlink for tag.
-func logName(tag string, t time.Time) (name, link string) {
-	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
-		program,
-		host,
-		userName,
-		tag,
-		t.Year(),
-		t.Month(),
-		t.Day(),
-		t.Hour(),
-		t.Minute(),
-		t.Second(),
-		pid)
-	return name, program + "." + tag
-}
-
-var onceLogDirs sync.Once
-
-// create creates a new log file and returns the file and its filename, which
-// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
-// successfully, create also attempts to update the symlink for that tag, ignoring
-// errors.
-func create(tag string, t time.Time) (f *os.File, filename string, err error) {
-	onceLogDirs.Do(createLogDirs)
-	if len(logDirs) == 0 {
-		return nil, "", errors.New("log: no log dirs")
-	}
-	name, link := logName(tag, t)
-	var lastErr error
-	for _, dir := range logDirs {
-		fname := filepath.Join(dir, name)
-		f, err := os.Create(fname)
-		if err == nil {
-			symlink := filepath.Join(dir, link)
-			os.Remove(symlink)        // ignore err
-			os.Symlink(name, symlink) // ignore err
-			return f, fname, nil
-		}
-		lastErr = err
-	}
-	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
-}
diff --git a/logger/glog/glog_test.go b/logger/glog/glog_test.go
deleted file mode 100644
index b58f3d6426aadd4e1ab81e854fb9562c1ada682c..0000000000000000000000000000000000000000
--- a/logger/glog/glog_test.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package glog
-
-import (
-	"bytes"
-	"fmt"
-	stdLog "log"
-	"path/filepath"
-	"runtime"
-	"strconv"
-	"strings"
-	"testing"
-	"time"
-)
-
-// Test that shortHostname works as advertised.
-func TestShortHostname(t *testing.T) {
-	for hostname, expect := range map[string]string{
-		"":                "",
-		"host":            "host",
-		"host.google.com": "host",
-	} {
-		if got := shortHostname(hostname); expect != got {
-			t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
-		}
-	}
-}
-
-// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
-type flushBuffer struct {
-	bytes.Buffer
-}
-
-func (f *flushBuffer) Flush() error {
-	return nil
-}
-
-func (f *flushBuffer) Sync() error {
-	return nil
-}
-
-// swap sets the log writers and returns the old array.
-func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
-	l.mu.Lock()
-	defer l.mu.Unlock()
-	old = l.file
-	for i, w := range writers {
-		logging.file[i] = w
-	}
-	return
-}
-
-// newBuffers sets the log writers to all new byte buffers and returns the old array.
-func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
-	return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
-}
-
-// contents returns the specified log value as a string.
-func contents(s severity) string {
-	return logging.file[s].(*flushBuffer).String()
-}
-
-// contains reports whether the string is contained in the log.
-func contains(s severity, str string, t *testing.T) bool {
-	return strings.Contains(contents(s), str)
-}
-
-// setFlags configures the logging flags how the test expects them.
-func setFlags() {
-	logging.toStderr = false
-}
-
-// Test that Info works as advertised.
-func TestInfo(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	Info("test")
-	if !contains(infoLog, "I", t) {
-		t.Errorf("Info has wrong character: %q", contents(infoLog))
-	}
-	if !contains(infoLog, "test", t) {
-		t.Error("Info failed")
-	}
-}
-
-func TestInfoDepth(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-
-	f := func() { InfoDepth(1, "depth-test1") }
-
-	// The next three lines must stay together
-	_, _, wantLine, _ := runtime.Caller(0)
-	InfoDepth(0, "depth-test0")
-	f()
-
-	msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
-	if len(msgs) != 2 {
-		t.Fatalf("Got %d lines, expected 2", len(msgs))
-	}
-
-	for i, m := range msgs {
-		if !strings.HasPrefix(m, "I") {
-			t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
-		}
-		w := fmt.Sprintf("depth-test%d", i)
-		if !strings.Contains(m, w) {
-			t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
-		}
-
-		// pull out the line number (between : and ])
-		msg := m[strings.LastIndex(m, ":")+1:]
-		x := strings.Index(msg, "]")
-		if x < 0 {
-			t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
-			continue
-		}
-		line, err := strconv.Atoi(msg[:x])
-		if err != nil {
-			t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
-			continue
-		}
-		wantLine++
-		if wantLine != line {
-			t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
-		}
-	}
-}
-
-func init() {
-	CopyStandardLogTo("INFO")
-}
-
-// Test that CopyStandardLogTo panics on bad input.
-func TestCopyStandardLogToPanic(t *testing.T) {
-	defer func() {
-		if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
-			t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
-		}
-	}()
-	CopyStandardLogTo("LOG")
-}
-
-// Test that using the standard log package logs to INFO.
-func TestStandardLog(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	stdLog.Print("test")
-	if !contains(infoLog, "I", t) {
-		t.Errorf("Info has wrong character: %q", contents(infoLog))
-	}
-	if !contains(infoLog, "test", t) {
-		t.Error("Info failed")
-	}
-}
-
-// Test that the header has the correct format.
-func TestHeader(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	defer func(previous func() time.Time) { timeNow = previous }(timeNow)
-	timeNow = func() time.Time {
-		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
-	}
-	pid = 1234
-	Info("test")
-	var line int
-	format := "I0102 15:04:05.067890 logger/glog/glog_test.go:%d] test\n"
-	n, err := fmt.Sscanf(contents(infoLog), format, &line)
-	if n != 1 || err != nil {
-		t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
-	}
-	// Scanf treats multiple spaces as equivalent to a single space,
-	// so check for correct space-padding also.
-	want := fmt.Sprintf(format, line)
-	if contents(infoLog) != want {
-		t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
-	}
-}
-
-// Test that an Error log goes to Warning and Info.
-// Even in the Info log, the source character will be E, so the data should
-// all be identical.
-func TestError(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	Error("test")
-	if !contains(errorLog, "E", t) {
-		t.Errorf("Error has wrong character: %q", contents(errorLog))
-	}
-	if !contains(errorLog, "test", t) {
-		t.Error("Error failed")
-	}
-	str := contents(errorLog)
-	if !contains(warningLog, str, t) {
-		t.Error("Warning failed")
-	}
-	if !contains(infoLog, str, t) {
-		t.Error("Info failed")
-	}
-}
-
-// Test that a Warning log goes to Info.
-// Even in the Info log, the source character will be W, so the data should
-// all be identical.
-func TestWarning(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	Warning("test")
-	if !contains(warningLog, "W", t) {
-		t.Errorf("Warning has wrong character: %q", contents(warningLog))
-	}
-	if !contains(warningLog, "test", t) {
-		t.Error("Warning failed")
-	}
-	str := contents(warningLog)
-	if !contains(infoLog, str, t) {
-		t.Error("Info failed")
-	}
-}
-
-// Test that a V log goes to Info.
-func TestV(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	logging.verbosity.Set("2")
-	defer logging.verbosity.Set("0")
-	V(2).Info("test")
-	if !contains(infoLog, "I", t) {
-		t.Errorf("Info has wrong character: %q", contents(infoLog))
-	}
-	if !contains(infoLog, "test", t) {
-		t.Error("Info failed")
-	}
-}
-
-// Test that a vmodule enables a log in this file.
-func TestVmoduleOn(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	logging.vmodule.Set("glog_test.go=2")
-	defer logging.vmodule.Set("")
-	if !V(1) {
-		t.Error("V not enabled for 1")
-	}
-	if !V(2) {
-		t.Error("V not enabled for 2")
-	}
-	if V(3) {
-		t.Error("V enabled for 3")
-	}
-	V(2).Info("test")
-	if !contains(infoLog, "I", t) {
-		t.Errorf("Info has wrong character: %q", contents(infoLog))
-	}
-	if !contains(infoLog, "test", t) {
-		t.Error("Info failed")
-	}
-}
-
-// Test that a vmodule of another file does not enable a log in this file.
-func TestVmoduleOff(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	logging.vmodule.Set("notthisfile=2")
-	defer logging.vmodule.Set("")
-	for i := 1; i <= 3; i++ {
-		if V(Level(i)) {
-			t.Errorf("V enabled for %d", i)
-		}
-	}
-	V(2).Info("test")
-	if contents(infoLog) != "" {
-		t.Error("V logged incorrectly")
-	}
-}
-
-var patternTests = []struct{ input, want string }{
-	{"foo/bar/x.go", ".*/foo/bar/x\\.go$"},
-	{"foo/*/x.go", ".*/foo(/.*)?/x\\.go$"},
-	{"foo/*", ".*/foo(/.*)?/[^/]+\\.go$"},
-}
-
-func TestCompileModulePattern(t *testing.T) {
-	for _, test := range patternTests {
-		re, err := compileModulePattern(test.input)
-		if err != nil {
-			t.Fatalf("%s: %v", test.input, err)
-		}
-		if re.String() != test.want {
-			t.Errorf("mismatch for %q: got %q, want %q", test.input, re.String(), test.want)
-		}
-	}
-}
-
-// vGlobs are patterns that match/don't match this file at V=2.
-var vGlobs = map[string]bool{
-	// Easy to test the numeric match here.
-	"glog_test.go=1": false, // If -vmodule sets V to 1, V(2) will fail.
-	"glog_test.go=2": true,
-	"glog_test.go=3": true, // If -vmodule sets V to 1, V(3) will succeed.
-
-	// Import path prefix matching
-	"logger/glog=1": false,
-	"logger/glog=2": true,
-	"logger/glog=3": true,
-
-	// Import path glob matching
-	"logger/*=1": false,
-	"logger/*=2": true,
-	"logger/*=3": true,
-
-	// These all use 2 and check the patterns.
-	"*=2": true,
-}
-
-// Test that vmodule globbing works as advertised.
-func testVmoduleGlob(pat string, match bool, t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	defer logging.vmodule.Set("")
-	logging.vmodule.Set(pat)
-	if V(2) != Verbose(match) {
-		t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
-	}
-}
-
-// Test that a vmodule globbing works as advertised.
-func TestVmoduleGlob(t *testing.T) {
-	for glob, match := range vGlobs {
-		testVmoduleGlob(glob, match, t)
-	}
-}
-
-func TestRollover(t *testing.T) {
-	setFlags()
-	var err error
-	defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
-	logExitFunc = func(e error) {
-		err = e
-	}
-	defer func(previous uint64) { MaxSize = previous }(MaxSize)
-	MaxSize = 512
-
-	Info("x") // Be sure we have a file.
-	info, ok := logging.file[infoLog].(*syncBuffer)
-	if !ok {
-		t.Fatal("info wasn't created")
-	}
-	if err != nil {
-		t.Fatalf("info has initial error: %v", err)
-	}
-	fname0 := info.file.Name()
-	Info(strings.Repeat("x", int(MaxSize))) // force a rollover
-	if err != nil {
-		t.Fatalf("info has error after big write: %v", err)
-	}
-
-	// Make sure the next log file gets a file name with a different
-	// time stamp.
-	//
-	// TODO: determine whether we need to support subsecond log
-	// rotation.  C++ does not appear to handle this case (nor does it
-	// handle Daylight Savings Time properly).
-	time.Sleep(1 * time.Second)
-
-	Info("x") // create a new file
-	if err != nil {
-		t.Fatalf("error after rotation: %v", err)
-	}
-	fname1 := info.file.Name()
-	if fname0 == fname1 {
-		t.Errorf("info.f.Name did not change: %v", fname0)
-	}
-	if info.nbytes >= MaxSize {
-		t.Errorf("file size was not reset: %d", info.nbytes)
-	}
-}
-
-func TestLogBacktraceAt(t *testing.T) {
-	setFlags()
-	defer logging.swap(logging.newBuffers())
-	// The peculiar style of this code simplifies line counting and maintenance of the
-	// tracing block below.
-	var infoLine string
-	setTraceLocation := func(file string, line int, ok bool, delta int) {
-		if !ok {
-			t.Fatal("could not get file:line")
-		}
-		_, file = filepath.Split(file)
-		infoLine = fmt.Sprintf("%s:%d", file, line+delta)
-		err := logging.traceLocation.Set(infoLine)
-		if err != nil {
-			t.Fatal("error setting log_backtrace_at: ", err)
-		}
-	}
-	{
-		// Start of tracing block. These lines know about each other's relative position.
-		_, file, line, ok := runtime.Caller(0)
-		setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
-		Info("we want a stack trace here")
-	}
-	numAppearances := strings.Count(contents(infoLog), infoLine)
-	if numAppearances < 2 {
-		// Need 2 appearances, one in the log header and one in the trace:
-		//   log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
-		//   ...
-		//   github.com/glog/glog_test.go:280 (0x41ba91)
-		//   ...
-		// We could be more precise but that would require knowing the details
-		// of the traceback format, which may not be dependable.
-		t.Fatal("got no trace back; log is ", contents(infoLog))
-	}
-}
-
-func BenchmarkHeader(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		buf, _, _ := logging.header(infoLog, 0)
-		logging.putBuffer(buf)
-	}
-}
diff --git a/logger/verbosity.go b/logger/verbosity.go
deleted file mode 100644
index 0771c50d9846323da23b2663c775b3f0302f0c05..0000000000000000000000000000000000000000
--- a/logger/verbosity.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package logger
-
-const (
-	Error = iota + 1
-	Warn
-	Info
-	Debug
-	Detail
-
-	Ridiculousness = 100
-)
diff --git a/metrics/metrics.go b/metrics/metrics.go
index d756894f38c065a44c02ef4b4500e722a10ea6d0..fe400b2b9a13612daae618b2ab8135be9e27314a 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -18,13 +18,13 @@
 package metrics
 
 import (
+	"fmt"
 	"os"
 	"runtime"
 	"strings"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/rcrowley/go-metrics"
 	"github.com/rcrowley/go-metrics/exp"
 )
@@ -41,7 +41,7 @@ var Enabled = false
 func init() {
 	for _, arg := range os.Args {
 		if strings.TrimLeft(arg, "-") == MetricsEnabledFlag {
-			glog.V(logger.Info).Infof("Enabling metrics collection")
+			log.Info(fmt.Sprintf("Enabling metrics collection"))
 			Enabled = true
 		}
 	}
@@ -102,7 +102,7 @@ func CollectProcessMetrics(refresh time.Duration) {
 		diskWrites = metrics.GetOrRegisterMeter("system/disk/writecount", metrics.DefaultRegistry)
 		diskWriteBytes = metrics.GetOrRegisterMeter("system/disk/writedata", metrics.DefaultRegistry)
 	} else {
-		glog.V(logger.Debug).Infof("failed to read disk metrics: %v", err)
+		log.Debug(fmt.Sprintf("failed to read disk metrics: %v", err))
 	}
 	// Iterate loading the different stats and updating the meters
 	for i := 1; ; i++ {
diff --git a/miner/agent.go b/miner/agent.go
index 697e3971b7930534dbb91d73de5a71642c2513cf..21300b5b91a6d0e36ce892b0402a671869171edc 100644
--- a/miner/agent.go
+++ b/miner/agent.go
@@ -17,14 +17,14 @@
 package miner
 
 import (
+	"fmt"
 	"sync"
 
 	"sync/atomic"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/pow"
 )
 
@@ -108,7 +108,7 @@ done:
 }
 
 func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
-	glog.V(logger.Debug).Infof("(re)started agent[%d]. mining...\n", self.index)
+	log.Debug(fmt.Sprintf("(re)started agent[%d]. mining...\n", self.index))
 
 	// Mine
 	nonce, mixDigest := self.pow.Search(work.Block, stop, self.index)
diff --git a/miner/miner.go b/miner/miner.go
index 83059f4b1d3a9f381cb936992abcc5a1fdf51163..33d77e174d98978036ebb1fed1bd3e9b52725fd1 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -30,8 +30,7 @@ import (
 	"github.com/ethereum/go-ethereum/eth/downloader"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/pow"
 )
@@ -87,7 +86,7 @@ out:
 			if self.Mining() {
 				self.Stop()
 				atomic.StoreInt32(&self.shouldStart, 1)
-				glog.V(logger.Info).Infoln("Mining operation aborted due to sync operation")
+				log.Info(fmt.Sprint("Mining operation aborted due to sync operation"))
 			}
 		case downloader.DoneEvent, downloader.FailedEvent:
 			shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
@@ -124,7 +123,7 @@ func (self *Miner) Start(coinbase common.Address, threads int) {
 	self.threads = threads
 
 	if atomic.LoadInt32(&self.canStart) == 0 {
-		glog.V(logger.Info).Infoln("Can not start mining operation due to network sync (starts when finished)")
+		log.Info(fmt.Sprint("Can not start mining operation due to network sync (starts when finished)"))
 		return
 	}
 	atomic.StoreInt32(&self.mining, 1)
@@ -133,7 +132,7 @@ func (self *Miner) Start(coinbase common.Address, threads int) {
 		self.worker.register(NewCpuAgent(i, self.pow))
 	}
 
-	glog.V(logger.Info).Infof("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents))
+	log.Info(fmt.Sprintf("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents)))
 	self.worker.start()
 	self.worker.commitNewWork()
 }
diff --git a/miner/remote_agent.go b/miner/remote_agent.go
index 23277bac8bb7d17e339898e2b4faaa938ae5dc91..ec9d2c1991bffec9912e202a894637efb7b93bb7 100644
--- a/miner/remote_agent.go
+++ b/miner/remote_agent.go
@@ -18,6 +18,7 @@ package miner
 
 import (
 	"errors"
+	"fmt"
 	"math/big"
 	"sync"
 	"sync/atomic"
@@ -26,8 +27,7 @@ import (
 	"github.com/ethereum/ethash"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/pow"
 )
 
@@ -140,13 +140,13 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.
 	// Make sure the work submitted is present
 	work := a.work[hash]
 	if work == nil {
-		glog.V(logger.Info).Infof("Work was submitted for %x but no pending work found", hash)
+		log.Info(fmt.Sprintf("Work was submitted for %x but no pending work found", hash))
 		return false
 	}
 	// Make sure the PoW solutions is indeed valid
 	block := work.Block.WithMiningResult(nonce, mixDigest)
 	if !a.pow.Verify(block) {
-		glog.V(logger.Warn).Infof("Invalid PoW submitted for %x", hash)
+		log.Warn(fmt.Sprintf("Invalid PoW submitted for %x", hash))
 		return false
 	}
 	// Solutions seems to be valid, return to the miner and notify acceptance
diff --git a/miner/unconfirmed.go b/miner/unconfirmed.go
index 86a30de35cb5f1d598c4dbae3a18d84f3b2161e3..bb7d0ff267d4cd26dc4f31c9d41f7a861fcc40e4 100644
--- a/miner/unconfirmed.go
+++ b/miner/unconfirmed.go
@@ -18,12 +18,12 @@ package miner
 
 import (
 	"container/ring"
+	"fmt"
 	"sync"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 // headerRetriever is used by the unconfirmed block set to verify whether a previously
@@ -80,7 +80,7 @@ func (set *unconfirmedBlocks) Insert(index uint64, hash common.Hash) {
 		set.blocks.Move(-1).Link(item)
 	}
 	// Display a log for the user to notify of a new mined block unconfirmed
-	glog.V(logger.Info).Infof("🔨  mined potential block #%d [%x…], waiting for %d blocks to confirm", index, hash.Bytes()[:4], set.depth)
+	log.Info(fmt.Sprintf("🔨  mined potential block #%d [%x…], waiting for %d blocks to confirm", index, hash.Bytes()[:4], set.depth))
 }
 
 // Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth
@@ -100,11 +100,11 @@ func (set *unconfirmedBlocks) Shift(height uint64) {
 		header := set.chain.GetHeaderByNumber(next.index)
 		switch {
 		case header == nil:
-			glog.V(logger.Warn).Infof("failed to retrieve header of mined block #%d [%x…]", next.index, next.hash.Bytes()[:4])
+			log.Warn(fmt.Sprintf("failed to retrieve header of mined block #%d [%x…]", next.index, next.hash.Bytes()[:4]))
 		case header.Hash() == next.hash:
-			glog.V(logger.Info).Infof("🔗  mined block #%d [%x…] reached canonical chain", next.index, next.hash.Bytes()[:4])
+			log.Info(fmt.Sprintf("🔗  mined block #%d [%x…] reached canonical chain", next.index, next.hash.Bytes()[:4]))
 		default:
-			glog.V(logger.Info).Infof("⑂ mined block #%d [%x…] became a side fork", next.index, next.hash.Bytes()[:4])
+			log.Info(fmt.Sprintf("⑂ mined block #%d [%x…] became a side fork", next.index, next.hash.Bytes()[:4]))
 		}
 		// Drop the block out of the ring
 		if set.blocks.Value == set.blocks.Next().Value {
diff --git a/miner/worker.go b/miner/worker.go
index ef64c8fc91b7b655382fa7905c11b75acac03e0e..0379242575aefc3f85c9d34ae51db8d1d21cae49 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -32,8 +32,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/pow"
 	"gopkg.in/fatih/set.v0"
@@ -278,7 +277,7 @@ func (self *worker) wait() {
 
 			if self.fullValidation {
 				if _, err := self.chain.InsertChain(types.Blocks{block}); err != nil {
-					glog.V(logger.Error).Infoln("mining err", err)
+					log.Error(fmt.Sprint("mining err", err))
 					continue
 				}
 				go self.mux.Post(core.NewMinedBlockEvent{Block: block})
@@ -286,19 +285,19 @@ func (self *worker) wait() {
 				work.state.Commit(self.config.IsEIP158(block.Number()))
 				parent := self.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)
 				if parent == nil {
-					glog.V(logger.Error).Infoln("Invalid block found during mining")
+					log.Error(fmt.Sprint("Invalid block found during mining"))
 					continue
 				}
 
 				auxValidator := self.eth.BlockChain().AuxValidator()
 				if err := core.ValidateHeader(self.config, auxValidator, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
-					glog.V(logger.Error).Infoln("Invalid header on mined block:", err)
+					log.Error(fmt.Sprint("Invalid header on mined block:", err))
 					continue
 				}
 
 				stat, err := self.chain.WriteBlock(block)
 				if err != nil {
-					glog.V(logger.Error).Infoln("error writing block to chain", err)
+					log.Error(fmt.Sprint("error writing block to chain", err))
 					continue
 				}
 
@@ -334,7 +333,7 @@ func (self *worker) wait() {
 						self.mux.Post(logs)
 					}
 					if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
-						glog.V(logger.Warn).Infoln("error writing block receipts:", err)
+						log.Warn(fmt.Sprint("error writing block receipts:", err))
 					}
 				}(block, work.state.Logs(), work.receipts)
 			}
@@ -427,7 +426,7 @@ func (self *worker) commitNewWork() {
 	// this will ensure we're not going off too far in the future
 	if now := time.Now().Unix(); tstamp > now+4 {
 		wait := time.Duration(tstamp-now) * time.Second
-		glog.V(logger.Info).Infoln("We are too far in the future. Waiting for", wait)
+		log.Info(fmt.Sprint("We are too far in the future. Waiting for", wait))
 		time.Sleep(wait)
 	}
 
@@ -458,7 +457,7 @@ func (self *worker) commitNewWork() {
 	// Could potentially happen if starting to mine in an odd state.
 	err := self.makeCurrent(parent, header)
 	if err != nil {
-		glog.V(logger.Info).Infoln("Could not create new env for mining, retrying on next block.")
+		log.Info(fmt.Sprint("Could not create new env for mining, retrying on next block."))
 		return
 	}
 	// Create the current work task and check any fork transitions needed
@@ -469,7 +468,7 @@ func (self *worker) commitNewWork() {
 
 	pending, err := self.eth.TxPool().Pending()
 	if err != nil {
-		glog.Errorf("Could not fetch pending transactions: %v", err)
+		log.Error(fmt.Sprintf("Could not fetch pending transactions: %v", err))
 		return
 	}
 
@@ -489,13 +488,12 @@ func (self *worker) commitNewWork() {
 			break
 		}
 		if err := self.commitUncle(work, uncle.Header()); err != nil {
-			if glog.V(logger.Ridiculousness) {
-				glog.V(logger.Detail).Infof("Bad uncle found and will be removed (%x)\n", hash[:4])
-				glog.V(logger.Detail).Infoln(uncle)
-			}
+			log.Trace(fmt.Sprintf("Bad uncle found and will be removed (%x)\n", hash[:4]))
+			log.Trace(fmt.Sprint(uncle))
+
 			badUncles = append(badUncles, hash)
 		} else {
-			glog.V(logger.Debug).Infof("committing %x as uncle\n", hash[:4])
+			log.Debug(fmt.Sprintf("committing %x as uncle\n", hash[:4]))
 			uncles = append(uncles, uncle.Header())
 		}
 	}
@@ -514,7 +512,7 @@ func (self *worker) commitNewWork() {
 
 	// We only care about logging if we're actually mining.
 	if atomic.LoadInt32(&self.mining) == 1 {
-		glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart))
+		log.Info(fmt.Sprintf("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart)))
 		self.unconfirmed.Shift(work.Block.NumberU64() - 1)
 	}
 	self.push(work)
@@ -554,7 +552,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
 		// Check whether the tx is replay protected. If we're not in the EIP155 hf
 		// phase, start ignoring the sender until we do.
 		if tx.Protected() && !env.config.IsEIP155(env.header.Number) {
-			glog.V(logger.Detail).Infof("Transaction (%x) is replay protected, but we haven't yet hardforked. Transaction will be ignored until we hardfork.\n", tx.Hash())
+			log.Trace(fmt.Sprintf("Transaction (%x) is replay protected, but we haven't yet hardforked. Transaction will be ignored until we hardfork.\n", tx.Hash()))
 
 			txs.Pop()
 			continue
@@ -563,7 +561,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
 		// Ignore any transactions (and accounts subsequently) with low gas limits
 		if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
 			// Pop the current low-priced transaction without shifting in the next from the account
-			glog.V(logger.Info).Infof("Transaction (%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4])
+			log.Info(fmt.Sprintf("Transaction (%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4]))
 
 			env.lowGasTxs = append(env.lowGasTxs, tx)
 			txs.Pop()
@@ -577,12 +575,12 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
 		switch {
 		case core.IsGasLimitErr(err):
 			// Pop the current out-of-gas transaction without shifting in the next from the account
-			glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4])
+			log.Trace(fmt.Sprintf("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4]))
 			txs.Pop()
 
 		case err != nil:
 			// Pop the current failed transaction without shifting in the next from the account
-			glog.V(logger.Detail).Infof("Transaction (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err)
+			log.Trace(fmt.Sprintf("Transaction (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err))
 			env.failedTxs = append(env.failedTxs, tx)
 			txs.Pop()
 
diff --git a/mobile/init.go b/mobile/init.go
index 0fbc6bd3e81ad09b3fe9e3e52fc216734c07af8c..d7acc14ce009623c90dedbdb34645ef25e06d073 100644
--- a/mobile/init.go
+++ b/mobile/init.go
@@ -19,16 +19,15 @@
 package geth
 
 import (
+	"os"
 	"runtime"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 func init() {
 	// Initialize the logger
-	glog.SetV(logger.Info)
-	glog.SetToStderr(true)
+	log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat())))
 
 	// Initialize the goroutine count
 	runtime.GOMAXPROCS(runtime.NumCPU())
diff --git a/mobile/logger.go b/mobile/logger.go
index 97f80f9bbc04d9a08958a484d417e7369ef797f9..9f24b3279c01205c7db611153a097f704cb3e086 100644
--- a/mobile/logger.go
+++ b/mobile/logger.go
@@ -17,10 +17,12 @@
 package geth
 
 import (
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"os"
+
+	"github.com/ethereum/go-ethereum/log"
 )
 
 // SetVerbosity sets the global verbosity level (between 0 and 6 - see logger/verbosity.go).
 func SetVerbosity(level int) {
-	glog.SetV(level)
+	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(level), log.StreamHandler(os.Stderr, log.TerminalFormat())))
 }
diff --git a/node/config.go b/node/config.go
index c09f51747b088b23d77bcc5d83212e5a0a6b1ef1..608c9a6b421f8b52cc522cf054db4866fdce05eb 100644
--- a/node/config.go
+++ b/node/config.go
@@ -31,8 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/accounts/usbwallet"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/discv5"
 	"github.com/ethereum/go-ethereum/p2p/nat"
@@ -334,7 +333,7 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
 	if c.DataDir == "" {
 		key, err := crypto.GenerateKey()
 		if err != nil {
-			glog.Fatalf("Failed to generate ephemeral node key: %v", err)
+			log.Crit(fmt.Sprintf("Failed to generate ephemeral node key: %v", err))
 		}
 		return key
 	}
@@ -346,16 +345,16 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
 	// No persistent key found, generate and store a new one.
 	key, err := crypto.GenerateKey()
 	if err != nil {
-		glog.Fatalf("Failed to generate node key: %v", err)
+		log.Crit(fmt.Sprintf("Failed to generate node key: %v", err))
 	}
 	instanceDir := filepath.Join(c.DataDir, c.name())
 	if err := os.MkdirAll(instanceDir, 0700); err != nil {
-		glog.V(logger.Error).Infof("Failed to persist node key: %v", err)
+		log.Error(fmt.Sprintf("Failed to persist node key: %v", err))
 		return key
 	}
 	keyfile = filepath.Join(instanceDir, datadirPrivateKey)
 	if err := crypto.SaveECDSA(keyfile, key); err != nil {
-		glog.V(logger.Error).Infof("Failed to persist node key: %v", err)
+		log.Error(fmt.Sprintf("Failed to persist node key: %v", err))
 	}
 	return key
 }
@@ -383,7 +382,7 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
 	// Load the nodes from the config file.
 	var nodelist []string
 	if err := common.LoadJSON(path, &nodelist); err != nil {
-		glog.V(logger.Error).Infof("Can't load node file %s: %v", path, err)
+		log.Error(fmt.Sprintf("Can't load node file %s: %v", path, err))
 		return nil
 	}
 	// Interpret the list as a discovery node array
@@ -394,7 +393,7 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
 		}
 		node, err := discover.ParseNode(url)
 		if err != nil {
-			glog.V(logger.Error).Infof("Node URL %s: %v\n", url, err)
+			log.Error(fmt.Sprintf("Node URL %s: %v\n", url, err))
 			continue
 		}
 		nodes = append(nodes, node)
@@ -442,7 +441,7 @@ func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
 		keystore.NewKeyStore(keydir, scryptN, scryptP),
 	}
 	if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil {
-		glog.V(logger.Warn).Infof("Failed to start Ledger hub, disabling: %v", err)
+		log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err))
 	} else {
 		backends = append(backends, ledgerhub)
 	}
diff --git a/node/node.go b/node/node.go
index 4b56fba4c5e89dd8c4d9b7cab93f85fc219b906e..c7e28af377fb3842572aef42a478e71c9dd23bf5 100644
--- a/node/node.go
+++ b/node/node.go
@@ -18,6 +18,7 @@ package node
 
 import (
 	"errors"
+	"fmt"
 	"net"
 	"os"
 	"path/filepath"
@@ -30,8 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/internal/debug"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rpc"
 	"github.com/syndtr/goleveldb/leveldb/storage"
@@ -173,7 +173,7 @@ func (n *Node) Start() error {
 		MaxPendingPeers:  n.config.MaxPendingPeers,
 	}
 	running := &p2p.Server{Config: n.serverConfig}
-	glog.V(logger.Info).Infoln("instance:", n.serverConfig.Name)
+	log.Info(fmt.Sprint("instance:", n.serverConfig.Name))
 
 	// Otherwise copy and specialize the P2P configuration
 	services := make(map[reflect.Type]Service)
@@ -301,7 +301,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
 		if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 			return err
 		}
-		glog.V(logger.Debug).Infof("InProc registered %T under '%s'", api.Service, api.Namespace)
+		log.Debug(fmt.Sprintf("InProc registered %T under '%s'", api.Service, api.Namespace))
 	}
 	n.inprocHandler = handler
 	return nil
@@ -327,7 +327,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
 		if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 			return err
 		}
-		glog.V(logger.Debug).Infof("IPC registered %T under '%s'", api.Service, api.Namespace)
+		log.Debug(fmt.Sprintf("IPC registered %T under '%s'", api.Service, api.Namespace))
 	}
 	// All APIs registered, start the IPC listener
 	var (
@@ -338,7 +338,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
 		return err
 	}
 	go func() {
-		glog.V(logger.Info).Infof("IPC endpoint opened: %s", n.ipcEndpoint)
+		log.Info(fmt.Sprintf("IPC endpoint opened: %s", n.ipcEndpoint))
 
 		for {
 			conn, err := listener.Accept()
@@ -351,7 +351,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
 					return
 				}
 				// Not closed, just some error; report and continue
-				glog.V(logger.Error).Infof("IPC accept failed: %v", err)
+				log.Error(fmt.Sprintf("IPC accept failed: %v", err))
 				continue
 			}
 			go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
@@ -370,7 +370,7 @@ func (n *Node) stopIPC() {
 		n.ipcListener.Close()
 		n.ipcListener = nil
 
-		glog.V(logger.Info).Infof("IPC endpoint closed: %s", n.ipcEndpoint)
+		log.Info(fmt.Sprintf("IPC endpoint closed: %s", n.ipcEndpoint))
 	}
 	if n.ipcHandler != nil {
 		n.ipcHandler.Stop()
@@ -396,7 +396,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
 			if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 				return err
 			}
-			glog.V(logger.Debug).Infof("HTTP registered %T under '%s'", api.Service, api.Namespace)
+			log.Debug(fmt.Sprintf("HTTP registered %T under '%s'", api.Service, api.Namespace))
 		}
 	}
 	// All APIs registered, start the HTTP listener
@@ -408,7 +408,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
 		return err
 	}
 	go rpc.NewHTTPServer(cors, handler).Serve(listener)
-	glog.V(logger.Info).Infof("HTTP endpoint opened: http://%s", endpoint)
+	log.Info(fmt.Sprintf("HTTP endpoint opened: http://%s", endpoint))
 
 	// All listeners booted successfully
 	n.httpEndpoint = endpoint
@@ -424,7 +424,7 @@ func (n *Node) stopHTTP() {
 		n.httpListener.Close()
 		n.httpListener = nil
 
-		glog.V(logger.Info).Infof("HTTP endpoint closed: http://%s", n.httpEndpoint)
+		log.Info(fmt.Sprintf("HTTP endpoint closed: http://%s", n.httpEndpoint))
 	}
 	if n.httpHandler != nil {
 		n.httpHandler.Stop()
@@ -450,7 +450,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
 			if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
 				return err
 			}
-			glog.V(logger.Debug).Infof("WebSocket registered %T under '%s'", api.Service, api.Namespace)
+			log.Debug(fmt.Sprintf("WebSocket registered %T under '%s'", api.Service, api.Namespace))
 		}
 	}
 	// All APIs registered, start the HTTP listener
@@ -462,7 +462,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
 		return err
 	}
 	go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
-	glog.V(logger.Info).Infof("WebSocket endpoint opened: ws://%s", endpoint)
+	log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", endpoint))
 
 	// All listeners booted successfully
 	n.wsEndpoint = endpoint
@@ -478,7 +478,7 @@ func (n *Node) stopWS() {
 		n.wsListener.Close()
 		n.wsListener = nil
 
-		glog.V(logger.Info).Infof("WebSocket endpoint closed: ws://%s", n.wsEndpoint)
+		log.Info(fmt.Sprintf("WebSocket endpoint closed: ws://%s", n.wsEndpoint))
 	}
 	if n.wsHandler != nil {
 		n.wsHandler.Stop()
diff --git a/p2p/dial.go b/p2p/dial.go
index 1f5c0f15ac7c96437686cdb4bc5eecfd64ee542e..65180e029416ead1f1d4c8b07a55ddcccd465a2d 100644
--- a/p2p/dial.go
+++ b/p2p/dial.go
@@ -24,8 +24,7 @@ import (
 	"net"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/netutil"
 )
@@ -134,7 +133,7 @@ func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now
 	var newtasks []task
 	addDial := func(flag connFlag, n *discover.Node) bool {
 		if err := s.checkDial(n, peers); err != nil {
-			glog.V(logger.Debug).Infof("skipping dial candidate %x@%v:%d: %v", n.ID[:8], n.IP, n.TCP, err)
+			log.Debug(fmt.Sprintf("skipping dial candidate %x@%v:%d: %v", n.ID[:8], n.IP, n.TCP, err))
 			return false
 		}
 		s.dialing[n.ID] = flag
@@ -163,7 +162,7 @@ func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now
 		err := s.checkDial(t.dest, peers)
 		switch err {
 		case errNotWhitelisted, errSelf:
-			glog.V(logger.Debug).Infof("removing static dial candidate %x@%v:%d: %v", t.dest.ID[:8], t.dest.IP, t.dest.TCP, err)
+			log.Debug(fmt.Sprintf("removing static dial candidate %x@%v:%d: %v", t.dest.ID[:8], t.dest.IP, t.dest.TCP, err))
 			delete(s.static, t.dest.ID)
 		case nil:
 			s.dialing[id] = t.flags
@@ -267,7 +266,7 @@ func (t *dialTask) Do(srv *Server) {
 // The backoff delay resets when the node is found.
 func (t *dialTask) resolve(srv *Server) bool {
 	if srv.ntab == nil {
-		glog.V(logger.Debug).Infof("can't resolve node %x: discovery is disabled", t.dest.ID[:6])
+		log.Debug(fmt.Sprintf("can't resolve node %x: discovery is disabled", t.dest.ID[:6]))
 		return false
 	}
 	if t.resolveDelay == 0 {
@@ -283,23 +282,23 @@ func (t *dialTask) resolve(srv *Server) bool {
 		if t.resolveDelay > maxResolveDelay {
 			t.resolveDelay = maxResolveDelay
 		}
-		glog.V(logger.Debug).Infof("resolving node %x failed (new delay: %v)", t.dest.ID[:6], t.resolveDelay)
+		log.Debug(fmt.Sprintf("resolving node %x failed (new delay: %v)", t.dest.ID[:6], t.resolveDelay))
 		return false
 	}
 	// The node was found.
 	t.resolveDelay = initialResolveDelay
 	t.dest = resolved
-	glog.V(logger.Debug).Infof("resolved node %x: %v:%d", t.dest.ID[:6], t.dest.IP, t.dest.TCP)
+	log.Debug(fmt.Sprintf("resolved node %x: %v:%d", t.dest.ID[:6], t.dest.IP, t.dest.TCP))
 	return true
 }
 
 // dial performs the actual connection attempt.
 func (t *dialTask) dial(srv *Server, dest *discover.Node) bool {
 	addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)}
-	glog.V(logger.Debug).Infof("dial tcp %v (%x)", addr, dest.ID[:6])
+	log.Debug(fmt.Sprintf("dial tcp %v (%x)", addr, dest.ID[:6]))
 	fd, err := srv.Dialer.Dial("tcp", addr.String())
 	if err != nil {
-		glog.V(logger.Detail).Infof("%v", err)
+		log.Trace(fmt.Sprintf("%v", err))
 		return false
 	}
 	mfd := newMeteredConn(fd, false)
diff --git a/p2p/discover/database.go b/p2p/discover/database.go
index 8d20d1ec74840260ae6619beda90c4d9ccb283e3..a8b32d31e8a479fa2ba16ffdb8a6dac9bccaa1c3 100644
--- a/p2p/discover/database.go
+++ b/p2p/discover/database.go
@@ -23,13 +23,13 @@ import (
 	"bytes"
 	"crypto/rand"
 	"encoding/binary"
+	"fmt"
 	"os"
 	"sync"
 	"time"
 
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/syndtr/goleveldb/leveldb"
 	"github.com/syndtr/goleveldb/leveldb/errors"
@@ -180,12 +180,12 @@ func (db *nodeDB) storeInt64(key []byte, n int64) error {
 func (db *nodeDB) node(id NodeID) *Node {
 	blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil)
 	if err != nil {
-		glog.V(logger.Detail).Infof("failed to retrieve node %v: %v", id, err)
+		log.Trace(fmt.Sprintf("failed to retrieve node %v: %v", id, err))
 		return nil
 	}
 	node := new(Node)
 	if err := rlp.DecodeBytes(blob, node); err != nil {
-		glog.V(logger.Warn).Infof("failed to decode node RLP: %v", err)
+		log.Warn(fmt.Sprintf("failed to decode node RLP: %v", err))
 		return nil
 	}
 	node.sha = crypto.Keccak256Hash(node.ID[:])
@@ -233,7 +233,7 @@ func (db *nodeDB) expirer() {
 		select {
 		case <-tick:
 			if err := db.expireNodes(); err != nil {
-				glog.V(logger.Error).Infof("Failed to expire nodedb items: %v", err)
+				log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err))
 			}
 
 		case <-db.quit:
@@ -352,9 +352,7 @@ func nextNode(it iterator.Iterator) *Node {
 		}
 		var n Node
 		if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
-			if glog.V(logger.Warn) {
-				glog.Errorf("invalid node %x: %v", id, err)
-			}
+			log.Warn(fmt.Sprintf("invalid node %x: %v", id, err))
 			continue
 		}
 		return &n
diff --git a/p2p/discover/ntp.go b/p2p/discover/ntp.go
index c1a4b3af107a55d279919dda74d47528b11dcc8c..df67e1c5bd7b76c75d859b0a7c5ea2b2fe95a896 100644
--- a/p2p/discover/ntp.go
+++ b/p2p/discover/ntp.go
@@ -26,8 +26,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -55,12 +54,12 @@ func checkClockDrift() {
 		howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
 		separator := strings.Repeat("-", len(warning))
 
-		glog.V(logger.Warn).Info(separator)
-		glog.V(logger.Warn).Info(warning)
-		glog.V(logger.Warn).Info(howtofix)
-		glog.V(logger.Warn).Info(separator)
+		log.Warn(fmt.Sprint(separator))
+		log.Warn(fmt.Sprint(warning))
+		log.Warn(fmt.Sprint(howtofix))
+		log.Warn(fmt.Sprint(separator))
 	} else {
-		glog.V(logger.Debug).Infof("Sanity NTP check reported %v drift, all ok", drift)
+		log.Debug(fmt.Sprintf("Sanity NTP check reported %v drift, all ok", drift))
 	}
 }
 
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index 839e3ec7e79fc21f494e00c9648356887e9c1e81..03392b563630073109a73c46178434db422ebd07 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -34,8 +34,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -278,10 +277,10 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
 						// Bump the failure counter to detect and evacuate non-bonded entries
 						fails := tab.db.findFails(n.ID) + 1
 						tab.db.updateFindFails(n.ID, fails)
-						glog.V(logger.Detail).Infof("Bumping failures for %x: %d", n.ID[:8], fails)
+						log.Trace(fmt.Sprintf("Bumping failures for %x: %d", n.ID[:8], fails))
 
 						if fails >= maxFindnodeFailures {
-							glog.V(logger.Detail).Infof("Evacuating node %x: %d findnode failures", n.ID[:8], fails)
+							log.Trace(fmt.Sprintf("Evacuating node %x: %d findnode failures", n.ID[:8], fails))
 							tab.delete(n)
 						}
 					}
@@ -384,14 +383,15 @@ func (tab *Table) doRefresh(done chan struct{}) {
 	// (hopefully) still alive.
 	seeds := tab.db.querySeeds(seedCount, seedMaxAge)
 	seeds = tab.bondall(append(seeds, tab.nursery...))
-	if glog.V(logger.Debug) {
-		if len(seeds) == 0 {
-			glog.Infof("no seed nodes found")
-		}
-		for _, n := range seeds {
+
+	if len(seeds) == 0 {
+		log.Debug(fmt.Sprintf("no seed nodes found"))
+	}
+	for _, n := range seeds {
+		log.Debug("", "msg", log.Lazy{Fn: func() string {
 			age := time.Since(tab.db.lastPong(n.ID))
-			glog.Infof("seed node (age %v): %v", age, n)
-		}
+			return fmt.Sprintf("seed node (age %v): %v", age, n)
+		}})
 	}
 	tab.mutex.Lock()
 	tab.stuff(seeds)
@@ -470,7 +470,7 @@ func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16
 	var result error
 	age := time.Since(tab.db.lastPong(id))
 	if node == nil || fails > 0 || age > nodeDBNodeExpiration {
-		glog.V(logger.Detail).Infof("Bonding %x: known=%t, fails=%d age=%v", id[:8], node != nil, fails, age)
+		log.Trace(fmt.Sprintf("Bonding %x: known=%t, fails=%d age=%v", id[:8], node != nil, fails, age))
 
 		tab.bondmu.Lock()
 		w := tab.bonding[id]
diff --git a/p2p/discover/udp.go b/p2p/discover/udp.go
index ae7f9702913a3f2db817da2049d268fdc30c63ec..eafc3f394e29e0d475e00335c9265db9fb3cd310 100644
--- a/p2p/discover/udp.go
+++ b/p2p/discover/udp.go
@@ -26,8 +26,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/nat"
 	"github.com/ethereum/go-ethereum/p2p/netutil"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -224,7 +223,7 @@ func ListenUDP(priv *ecdsa.PrivateKey, laddr string, natm nat.Interface, nodeDBP
 	if err != nil {
 		return nil, err
 	}
-	glog.V(logger.Info).Infoln("Listening,", tab.self)
+	log.Info(fmt.Sprint("Listening,", tab.self))
 	return tab, nil
 }
 
@@ -294,7 +293,7 @@ func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node
 			nreceived++
 			n, err := t.nodeFromRPC(toaddr, rn)
 			if err != nil {
-				glog.V(logger.Detail).Infof("invalid neighbor node (%v) from %v: %v", rn.IP, toaddr, err)
+				log.Trace(fmt.Sprintf("invalid neighbor node (%v) from %v: %v", rn.IP, toaddr, err))
 				continue
 			}
 			nodes = append(nodes, n)
@@ -464,9 +463,9 @@ func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error {
 	if err != nil {
 		return err
 	}
-	glog.V(logger.Detail).Infof(">>> %v %T", toaddr, req)
+	log.Trace(fmt.Sprintf(">>> %v %T", toaddr, req))
 	if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
-		glog.V(logger.Detail).Infoln("UDP send failed:", err)
+		log.Trace(fmt.Sprint("UDP send failed:", err))
 	}
 	return err
 }
@@ -476,13 +475,13 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte,
 	b.Write(headSpace)
 	b.WriteByte(ptype)
 	if err := rlp.Encode(b, req); err != nil {
-		glog.V(logger.Error).Infoln("error encoding packet:", err)
+		log.Error(fmt.Sprint("error encoding packet:", err))
 		return nil, err
 	}
 	packet := b.Bytes()
 	sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
 	if err != nil {
-		glog.V(logger.Error).Infoln("could not sign packet:", err)
+		log.Error(fmt.Sprint("could not sign packet:", err))
 		return nil, err
 	}
 	copy(packet[macSize:], sig)
@@ -504,11 +503,11 @@ func (t *udp) readLoop() {
 		nbytes, from, err := t.conn.ReadFromUDP(buf)
 		if netutil.IsTemporaryError(err) {
 			// Ignore temporary read errors.
-			glog.V(logger.Debug).Infof("Temporary read error: %v", err)
+			log.Debug(fmt.Sprintf("Temporary read error: %v", err))
 			continue
 		} else if err != nil {
 			// Shut down the loop for permament errors.
-			glog.V(logger.Debug).Infof("Read error: %v", err)
+			log.Debug(fmt.Sprintf("Read error: %v", err))
 			return
 		}
 		t.handlePacket(from, buf[:nbytes])
@@ -518,14 +517,14 @@ func (t *udp) readLoop() {
 func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
 	packet, fromID, hash, err := decodePacket(buf)
 	if err != nil {
-		glog.V(logger.Debug).Infof("Bad packet from %v: %v", from, err)
+		log.Debug(fmt.Sprintf("Bad packet from %v: %v", from, err))
 		return err
 	}
 	status := "ok"
 	if err = packet.handle(t, from, fromID, hash); err != nil {
 		status = err.Error()
 	}
-	glog.V(logger.Detail).Infof("<<< %v %T: %s", from, packet, status)
+	log.Trace(fmt.Sprintf("<<< %v %T: %s", from, packet, status))
 	return err
 }
 
diff --git a/p2p/discv5/database.go b/p2p/discv5/database.go
index 44be8a74ec15d14f76c203fd1ba4e2abb2f4ae14..a3b044ec187e764a8d55dbab537c4a881b3678cf 100644
--- a/p2p/discv5/database.go
+++ b/p2p/discv5/database.go
@@ -23,13 +23,13 @@ import (
 	"bytes"
 	"crypto/rand"
 	"encoding/binary"
+	"fmt"
 	"os"
 	"sync"
 	"time"
 
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/syndtr/goleveldb/leveldb"
 	"github.com/syndtr/goleveldb/leveldb/errors"
@@ -192,7 +192,7 @@ func (db *nodeDB) fetchRLP(key []byte, val interface{}) error {
 	}
 	err = rlp.DecodeBytes(blob, val)
 	if err != nil {
-		glog.V(logger.Warn).Infof("key %x (%T) %v", key, val, err)
+		log.Warn(fmt.Sprintf("key %x (%T) %v", key, val, err))
 	}
 	return err
 }
@@ -244,7 +244,7 @@ func (db *nodeDB) expirer() {
 		select {
 		case <-tick:
 			if err := db.expireNodes(); err != nil {
-				glog.V(logger.Error).Infof("Failed to expire nodedb items: %v", err)
+				log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err))
 			}
 
 		case <-db.quit:
@@ -396,9 +396,7 @@ func nextNode(it iterator.Iterator) *Node {
 		}
 		var n Node
 		if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
-			if glog.V(logger.Warn) {
-				glog.Errorf("invalid node %x: %v", id, err)
-			}
+			log.Warn(fmt.Sprintf("invalid node %x: %v", id, err))
 			continue
 		}
 		return &n
diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go
index 74d485836ec5e14ace1d13c3adfc13445e2948c3..a39cfcc645efcd28874ef2832d52f2c398c7d82f 100644
--- a/p2p/discv5/net.go
+++ b/p2p/discv5/net.go
@@ -28,8 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/common/mclock"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/crypto/sha3"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/nat"
 	"github.com/ethereum/go-ethereum/p2p/netutil"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -437,10 +436,10 @@ loop:
 			if err := net.handle(n, pkt.ev, &pkt); err != nil {
 				status = err.Error()
 			}
-			if glog.V(logger.Detail) {
-				glog.Infof("<<< (%d) %v from %x@%v: %v -> %v (%v)",
+			log.Trace("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)",
 					net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
-			}
+			}})
 			// TODO: persist state if n.state goes >= known, delete if it goes <= known
 
 		// State transition timeouts.
@@ -456,10 +455,10 @@ loop:
 			if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
 				status = err.Error()
 			}
-			if glog.V(logger.Detail) {
-				glog.Infof("--- (%d) %v for %x@%v: %v -> %v (%v)",
+			log.Trace("", "msg", log.Lazy{Fn: func() string {
+				return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)",
 					net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
-			}
+			}})
 
 		// Querying.
 		case q := <-net.queryReq:
@@ -655,7 +654,7 @@ loop:
 	}
 	debugLog("loop stopped")
 
-	glog.V(logger.Debug).Infof("shutting down")
+	log.Debug(fmt.Sprintf("shutting down"))
 	if net.conn != nil {
 		net.conn.Close()
 	}
@@ -685,20 +684,20 @@ func (net *Network) refresh(done chan<- struct{}) {
 		seeds = net.nursery
 	}
 	if len(seeds) == 0 {
-		glog.V(logger.Detail).Info("no seed nodes found")
+		log.Trace(fmt.Sprint("no seed nodes found"))
 		close(done)
 		return
 	}
 	for _, n := range seeds {
-		if glog.V(logger.Debug) {
+		log.Debug("", "msg", log.Lazy{Fn: func() string {
 			var age string
 			if net.db != nil {
 				age = time.Since(net.db.lastPong(n.ID)).String()
 			} else {
 				age = "unknown"
 			}
-			glog.Infof("seed node (age %s): %v", age, n)
-		}
+			return fmt.Sprintf("seed node (age %s): %v", age, n)
+		}})
 		n = net.internNodeFromDB(n)
 		if n.state == unknown {
 			net.transition(n, verifyinit)
@@ -1254,7 +1253,7 @@ func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
 	for i, rn := range req.Nodes {
 		nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
 		if err != nil {
-			glog.V(logger.Debug).Infof("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err)
+			log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err))
 			continue
 		}
 		nodes[i] = nn
diff --git a/p2p/discv5/ntp.go b/p2p/discv5/ntp.go
index 81c0e63365803999c621c5e4b52de486eb903838..f78d5dc43cff37add1aca64fa178d2ad4860d89d 100644
--- a/p2p/discv5/ntp.go
+++ b/p2p/discv5/ntp.go
@@ -26,8 +26,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -55,12 +54,12 @@ func checkClockDrift() {
 		howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
 		separator := strings.Repeat("-", len(warning))
 
-		glog.V(logger.Warn).Info(separator)
-		glog.V(logger.Warn).Info(warning)
-		glog.V(logger.Warn).Info(howtofix)
-		glog.V(logger.Warn).Info(separator)
+		log.Warn(fmt.Sprint(separator))
+		log.Warn(fmt.Sprint(warning))
+		log.Warn(fmt.Sprint(howtofix))
+		log.Warn(fmt.Sprint(separator))
 	} else {
-		glog.V(logger.Debug).Infof("Sanity NTP check reported %v drift, all ok", drift)
+		log.Debug(fmt.Sprintf("Sanity NTP check reported %v drift, all ok", drift))
 	}
 }
 
diff --git a/p2p/discv5/sim_test.go b/p2p/discv5/sim_test.go
index 3f7fe7463b7a510b74f15f55c2adf60eb603920e..bf57872e2dc69dbfcf2bb3b7e3257d6dee39d96e 100644
--- a/p2p/discv5/sim_test.go
+++ b/p2p/discv5/sim_test.go
@@ -65,10 +65,6 @@ func TestSimTopics(t *testing.T) {
 	if runWithPlaygroundTime(t) {
 		return
 	}
-
-	// glog.SetV(6)
-	// glog.SetToStderr(true)
-
 	sim := newSimulation()
 	bootnode := sim.launchNode(false)
 
@@ -158,10 +154,6 @@ func TestSimTopicHierarchy(t *testing.T) {
 	if runWithPlaygroundTime(t) {
 		return
 	}
-
-	// glog.SetV(6)
-	// glog.SetToStderr(true)
-
 	sim := newSimulation()
 	bootnode := sim.launchNode(false)
 
diff --git a/p2p/discv5/udp.go b/p2p/discv5/udp.go
index 6cf6cfbcc036ecf6f64c64c0a45c4736a99f1912..26087cd8e5640a13e60358c07d6e5a3c160744e3 100644
--- a/p2p/discv5/udp.go
+++ b/p2p/discv5/udp.go
@@ -26,8 +26,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/nat"
 	"github.com/ethereum/go-ethereum/p2p/netutil"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -348,9 +347,9 @@ func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req inter
 		//fmt.Println(err)
 		return hash, err
 	}
-	glog.V(logger.Detail).Infof(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr)
+	log.Trace(fmt.Sprintf(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr))
 	if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
-		glog.V(logger.Detail).Infoln("UDP send failed:", err)
+		log.Trace(fmt.Sprint("UDP send failed:", err))
 	}
 	//fmt.Println(err)
 	return hash, err
@@ -364,13 +363,13 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (p, hash
 	b.Write(headSpace)
 	b.WriteByte(ptype)
 	if err := rlp.Encode(b, req); err != nil {
-		glog.V(logger.Error).Infoln("error encoding packet:", err)
+		log.Error(fmt.Sprint("error encoding packet:", err))
 		return nil, nil, err
 	}
 	packet := b.Bytes()
 	sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
 	if err != nil {
-		glog.V(logger.Error).Infoln("could not sign packet:", err)
+		log.Error(fmt.Sprint("could not sign packet:", err))
 		return nil, nil, err
 	}
 	copy(packet[macSize:], sig)
@@ -393,11 +392,11 @@ func (t *udp) readLoop() {
 		nbytes, from, err := t.conn.ReadFromUDP(buf)
 		if netutil.IsTemporaryError(err) {
 			// Ignore temporary read errors.
-			glog.V(logger.Debug).Infof("Temporary read error: %v", err)
+			log.Debug(fmt.Sprintf("Temporary read error: %v", err))
 			continue
 		} else if err != nil {
 			// Shut down the loop for permament errors.
-			glog.V(logger.Debug).Infof("Read error: %v", err)
+			log.Debug(fmt.Sprintf("Read error: %v", err))
 			return
 		}
 		t.handlePacket(from, buf[:nbytes])
@@ -407,7 +406,7 @@ func (t *udp) readLoop() {
 func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
 	pkt := ingressPacket{remoteAddr: from}
 	if err := decodePacket(buf, &pkt); err != nil {
-		glog.V(logger.Debug).Infof("Bad packet from %v: %v", from, err)
+		log.Debug(fmt.Sprintf("Bad packet from %v: %v", from, err))
 		//fmt.Println("bad packet", err)
 		return err
 	}
diff --git a/p2p/nat/nat.go b/p2p/nat/nat.go
index 7eb23fa7b7ec20624b56ffefcc6d51292803a23c..e5883cf98d66f994976a1c8433ae4bb234e564fe 100644
--- a/p2p/nat/nat.go
+++ b/p2p/nat/nat.go
@@ -25,8 +25,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/jackpal/go-nat-pmp"
 )
 
@@ -102,13 +101,13 @@ func Map(m Interface, c chan struct{}, protocol string, extport, intport int, na
 	refresh := time.NewTimer(mapUpdateInterval)
 	defer func() {
 		refresh.Stop()
-		glog.V(logger.Debug).Infof("deleting port mapping: %s %d -> %d (%s) using %s", protocol, extport, intport, name, m)
+		log.Debug(fmt.Sprintf("deleting port mapping: %s %d -> %d (%s) using %s", protocol, extport, intport, name, m))
 		m.DeleteMapping(protocol, extport, intport)
 	}()
 	if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil {
-		glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v", protocol, intport, err)
+		log.Debug(fmt.Sprintf("network port %s:%d could not be mapped: %v", protocol, intport, err))
 	} else {
-		glog.V(logger.Info).Infof("mapped network port %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m)
+		log.Info(fmt.Sprintf("mapped network port %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m))
 	}
 	for {
 		select {
@@ -117,9 +116,9 @@ func Map(m Interface, c chan struct{}, protocol string, extport, intport int, na
 				return
 			}
 		case <-refresh.C:
-			glog.V(logger.Detail).Infof("refresh port mapping %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m)
+			log.Trace(fmt.Sprintf("refresh port mapping %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m))
 			if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil {
-				glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v", protocol, intport, err)
+				log.Debug(fmt.Sprintf("network port %s:%d could not be mapped: %v", protocol, intport, err))
 			}
 			refresh.Reset(mapUpdateInterval)
 		}
diff --git a/p2p/peer.go b/p2p/peer.go
index b21c872d666fad1ba27698b1f0cf1141c1da1af2..5d09927a5a838e38953973da7b849d15b71dc542 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -25,8 +25,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/rlp"
 )
@@ -157,27 +156,27 @@ loop:
 			// A write finished. Allow the next write to start if
 			// there was no error.
 			if err != nil {
-				glog.V(logger.Detail).Infof("%v: write error: %v", p, err)
+				log.Trace(fmt.Sprintf("%v: write error: %v", p, err))
 				reason = DiscNetworkError
 				break loop
 			}
 			writeStart <- struct{}{}
 		case err := <-readErr:
 			if r, ok := err.(DiscReason); ok {
-				glog.V(logger.Debug).Infof("%v: remote requested disconnect: %v", p, r)
+				log.Debug(fmt.Sprintf("%v: remote requested disconnect: %v", p, r))
 				requested = true
 				reason = r
 			} else {
-				glog.V(logger.Detail).Infof("%v: read error: %v", p, err)
+				log.Trace(fmt.Sprintf("%v: read error: %v", p, err))
 				reason = DiscNetworkError
 			}
 			break loop
 		case err := <-p.protoErr:
 			reason = discReasonForError(err)
-			glog.V(logger.Debug).Infof("%v: protocol error: %v (%v)", p, err, reason)
+			log.Debug(fmt.Sprintf("%v: protocol error: %v (%v)", p, err, reason))
 			break loop
 		case reason = <-p.disc:
-			glog.V(logger.Debug).Infof("%v: locally requested disconnect: %v", p, reason)
+			log.Debug(fmt.Sprintf("%v: locally requested disconnect: %v", p, reason))
 			break loop
 		}
 	}
@@ -298,14 +297,14 @@ func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error)
 		proto.closed = p.closed
 		proto.wstart = writeStart
 		proto.werr = writeErr
-		glog.V(logger.Detail).Infof("%v: Starting protocol %s/%d", p, proto.Name, proto.Version)
+		log.Trace(fmt.Sprintf("%v: Starting protocol %s/%d", p, proto.Name, proto.Version))
 		go func() {
 			err := proto.Run(p, proto)
 			if err == nil {
-				glog.V(logger.Detail).Infof("%v: Protocol %s/%d returned", p, proto.Name, proto.Version)
+				log.Trace(fmt.Sprintf("%v: Protocol %s/%d returned", p, proto.Name, proto.Version))
 				err = errors.New("protocol returned")
 			} else if err != io.EOF {
-				glog.V(logger.Detail).Infof("%v: Protocol %s/%d error: %v", p, proto.Name, proto.Version, err)
+				log.Trace(fmt.Sprintf("%v: Protocol %s/%d error: %v", p, proto.Name, proto.Version, err))
 			}
 			p.protoErr <- err
 			p.wg.Done()
diff --git a/p2p/server.go b/p2p/server.go
index 298148d3ee4edfa712d1909362fc5bc191795d8b..9f1478a41036d343c88e97e991fa411ec2613b99 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -25,8 +25,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/discv5"
 	"github.com/ethereum/go-ethereum/p2p/nat"
@@ -337,7 +336,7 @@ func (srv *Server) Start() (err error) {
 		return errors.New("server already running")
 	}
 	srv.running = true
-	glog.V(logger.Info).Infoln("Starting Server")
+	log.Info(fmt.Sprint("Starting Server"))
 
 	// static fields
 	if srv.PrivateKey == nil {
@@ -399,7 +398,7 @@ func (srv *Server) Start() (err error) {
 		}
 	}
 	if srv.NoDial && srv.ListenAddr == "" {
-		glog.V(logger.Warn).Infoln("I will be kind-of useless, neither dialing nor listening.")
+		log.Warn(fmt.Sprint("I will be kind-of useless, neither dialing nor listening."))
 	}
 
 	srv.loopWG.Add(1)
@@ -467,7 +466,7 @@ func (srv *Server) run(dialstate dialer) {
 		i := 0
 		for ; len(runningTasks) < maxActiveDialTasks && i < len(ts); i++ {
 			t := ts[i]
-			glog.V(logger.Detail).Infoln("new task:", t)
+			log.Trace(fmt.Sprint("new task:", t))
 			go func() { t.Do(srv); taskdone <- t }()
 			runningTasks = append(runningTasks, t)
 		}
@@ -490,19 +489,19 @@ running:
 		select {
 		case <-srv.quit:
 			// The server was stopped. Run the cleanup logic.
-			glog.V(logger.Detail).Infoln("<-quit: spinning down")
+			log.Trace(fmt.Sprint("<-quit: spinning down"))
 			break running
 		case n := <-srv.addstatic:
 			// This channel is used by AddPeer to add to the
 			// ephemeral static peer list. Add it to the dialer,
 			// it will keep the node connected.
-			glog.V(logger.Detail).Infoln("<-addstatic:", n)
+			log.Trace(fmt.Sprint("<-addstatic:", n))
 			dialstate.addStatic(n)
 		case n := <-srv.removestatic:
 			// This channel is used by RemovePeer to send a
 			// disconnect request to a peer and begin the
 			// stop keeping the node connected
-			glog.V(logger.Detail).Infoln("<-removestatic:", n)
+			log.Trace(fmt.Sprint("<-removestatic:", n))
 			dialstate.removeStatic(n)
 			if p, ok := peers[n.ID]; ok {
 				p.Disconnect(DiscRequested)
@@ -515,7 +514,7 @@ running:
 			// A task got done. Tell dialstate about it so it
 			// can update its state and remove it from the active
 			// tasks list.
-			glog.V(logger.Detail).Infoln("<-taskdone:", t)
+			log.Trace(fmt.Sprint("<-taskdone:", t))
 			dialstate.taskDone(t, time.Now())
 			delTask(t)
 		case c := <-srv.posthandshake:
@@ -525,16 +524,16 @@ running:
 				// Ensure that the trusted flag is set before checking against MaxPeers.
 				c.flags |= trustedConn
 			}
-			glog.V(logger.Detail).Infoln("<-posthandshake:", c)
+			log.Trace(fmt.Sprint("<-posthandshake:", c))
 			// TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them.
 			c.cont <- srv.encHandshakeChecks(peers, c)
 		case c := <-srv.addpeer:
 			// At this point the connection is past the protocol handshake.
 			// Its capabilities are known and the remote identity is verified.
-			glog.V(logger.Detail).Infoln("<-addpeer:", c)
+			log.Trace(fmt.Sprint("<-addpeer:", c))
 			err := srv.protoHandshakeChecks(peers, c)
 			if err != nil {
-				glog.V(logger.Detail).Infof("Not adding %v as peer: %v", c, err)
+				log.Trace(fmt.Sprintf("Not adding %v as peer: %v", c, err))
 			} else {
 				// The handshakes are done and it passed all checks.
 				p := newPeer(c, srv.Protocols)
@@ -547,7 +546,7 @@ running:
 			c.cont <- err
 		case p := <-srv.delpeer:
 			// A peer disconnected.
-			glog.V(logger.Detail).Infoln("<-delpeer:", p)
+			log.Trace(fmt.Sprint("<-delpeer:", p))
 			delete(peers, p.ID())
 		}
 	}
@@ -566,10 +565,10 @@ running:
 	// Wait for peers to shut down. Pending connections and tasks are
 	// not handled here and will terminate soon-ish because srv.quit
 	// is closed.
-	glog.V(logger.Detail).Infof("ignoring %d pending tasks at spindown", len(runningTasks))
+	log.Trace(fmt.Sprintf("ignoring %d pending tasks at spindown", len(runningTasks)))
 	for len(peers) > 0 {
 		p := <-srv.delpeer
-		glog.V(logger.Detail).Infoln("<-delpeer (spindown):", p)
+		log.Trace(fmt.Sprint("<-delpeer (spindown):", p))
 		delete(peers, p.ID())
 	}
 }
@@ -605,7 +604,7 @@ type tempError interface {
 // inbound connections.
 func (srv *Server) listenLoop() {
 	defer srv.loopWG.Done()
-	glog.V(logger.Info).Infoln("Listening on", srv.listener.Addr())
+	log.Info(fmt.Sprint("Listening on", srv.listener.Addr()))
 
 	// This channel acts as a semaphore limiting
 	// active inbound connections that are lingering pre-handshake.
@@ -630,10 +629,10 @@ func (srv *Server) listenLoop() {
 		for {
 			fd, err = srv.listener.Accept()
 			if tempErr, ok := err.(tempError); ok && tempErr.Temporary() {
-				glog.V(logger.Debug).Infof("Temporary read error: %v", err)
+				log.Debug(fmt.Sprintf("Temporary read error: %v", err))
 				continue
 			} else if err != nil {
-				glog.V(logger.Debug).Infof("Read error: %v", err)
+				log.Debug(fmt.Sprintf("Read error: %v", err))
 				return
 			}
 			break
@@ -642,7 +641,7 @@ func (srv *Server) listenLoop() {
 		// Reject connections that do not match NetRestrict.
 		if srv.NetRestrict != nil {
 			if tcp, ok := fd.RemoteAddr().(*net.TCPAddr); ok && !srv.NetRestrict.Contains(tcp.IP) {
-				glog.V(logger.Debug).Infof("Rejected conn %v because it is not whitelisted in NetRestrict", fd.RemoteAddr())
+				log.Debug(fmt.Sprintf("Rejected conn %v because it is not whitelisted in NetRestrict", fd.RemoteAddr()))
 				fd.Close()
 				slots <- struct{}{}
 				continue
@@ -650,7 +649,7 @@ func (srv *Server) listenLoop() {
 		}
 
 		fd = newMeteredConn(fd, true)
-		glog.V(logger.Debug).Infof("Accepted conn %v", fd.RemoteAddr())
+		log.Debug(fmt.Sprintf("Accepted conn %v", fd.RemoteAddr()))
 
 		// Spawn the handler. It will give the slot back when the connection
 		// has been established.
@@ -677,36 +676,36 @@ func (srv *Server) setupConn(fd net.Conn, flags connFlag, dialDest *discover.Nod
 	// Run the encryption handshake.
 	var err error
 	if c.id, err = c.doEncHandshake(srv.PrivateKey, dialDest); err != nil {
-		glog.V(logger.Debug).Infof("%v faild enc handshake: %v", c, err)
+		log.Debug(fmt.Sprintf("%v faild enc handshake: %v", c, err))
 		c.close(err)
 		return
 	}
 	// For dialed connections, check that the remote public key matches.
 	if dialDest != nil && c.id != dialDest.ID {
 		c.close(DiscUnexpectedIdentity)
-		glog.V(logger.Debug).Infof("%v dialed identity mismatch, want %x", c, dialDest.ID[:8])
+		log.Debug(fmt.Sprintf("%v dialed identity mismatch, want %x", c, dialDest.ID[:8]))
 		return
 	}
 	if err := srv.checkpoint(c, srv.posthandshake); err != nil {
-		glog.V(logger.Debug).Infof("%v failed checkpoint posthandshake: %v", c, err)
+		log.Debug(fmt.Sprintf("%v failed checkpoint posthandshake: %v", c, err))
 		c.close(err)
 		return
 	}
 	// Run the protocol handshake
 	phs, err := c.doProtoHandshake(srv.ourHandshake)
 	if err != nil {
-		glog.V(logger.Debug).Infof("%v failed proto handshake: %v", c, err)
+		log.Debug(fmt.Sprintf("%v failed proto handshake: %v", c, err))
 		c.close(err)
 		return
 	}
 	if phs.ID != c.id {
-		glog.V(logger.Debug).Infof("%v wrong proto handshake identity: %x", c, phs.ID[:8])
+		log.Debug(fmt.Sprintf("%v wrong proto handshake identity: %x", c, phs.ID[:8]))
 		c.close(DiscUnexpectedIdentity)
 		return
 	}
 	c.caps, c.name = phs.Caps, phs.Name
 	if err := srv.checkpoint(c, srv.addpeer); err != nil {
-		glog.V(logger.Debug).Infof("%v failed checkpoint addpeer: %v", c, err)
+		log.Debug(fmt.Sprintf("%v failed checkpoint addpeer: %v", c, err))
 		c.close(err)
 		return
 	}
@@ -734,7 +733,7 @@ func (srv *Server) checkpoint(c *conn, stage chan<- *conn) error {
 // it waits until the Peer logic returns and removes
 // the peer.
 func (srv *Server) runPeer(p *Peer) {
-	glog.V(logger.Debug).Infof("Added %v\n", p)
+	log.Debug(fmt.Sprintf("Added %v", p))
 
 	if srv.newPeerHook != nil {
 		srv.newPeerHook(p)
@@ -744,7 +743,7 @@ func (srv *Server) runPeer(p *Peer) {
 	// before returning, so this send should not select on srv.quit.
 	srv.delpeer <- p
 
-	glog.V(logger.Debug).Infof("Removed %v (%v)\n", p, discreason)
+	log.Debug(fmt.Sprintf("Removed %v (%v)", p, discreason))
 }
 
 // NodeInfo represents a short summary of the information known about the host.
diff --git a/p2p/server_test.go b/p2p/server_test.go
index 313d086ec4af8864efbdd1569bd635d1e941525c..46611c7d0235243c6cc85e048cb4597143e436b7 100644
--- a/p2p/server_test.go
+++ b/p2p/server_test.go
@@ -31,8 +31,7 @@ import (
 )
 
 func init() {
-	// glog.SetV(6)
-	// glog.SetToStderr(true)
+	// log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StreamHandler(os.Stderr, log.TerminalFormat())))
 }
 
 type testTransport struct {
diff --git a/rpc/client.go b/rpc/client.go
index 269eb78c86ee05751f36aee0393805403a331d48..78a6fe7899a4f6871185335c81f6487655c79f86 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -30,8 +30,7 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"golang.org/x/net/context"
 )
 
@@ -408,9 +407,9 @@ func (c *Client) newMessage(method string, paramsIn ...interface{}) (*jsonrpcMes
 func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error {
 	select {
 	case c.requestOp <- op:
-		if glog.V(logger.Detail) {
-			glog.Info("sending ", msg)
-		}
+		log.Trace("", "msg", log.Lazy{Fn: func() string {
+			return fmt.Sprint("sending ", msg)
+		}})
 		err := c.write(ctx, msg)
 		c.sendDone <- err
 		return err
@@ -445,7 +444,7 @@ func (c *Client) write(ctx context.Context, msg interface{}) error {
 func (c *Client) reconnect(ctx context.Context) error {
 	newconn, err := c.connectFunc(ctx)
 	if err != nil {
-		glog.V(logger.Detail).Infof("reconnect failed: %v", err)
+		log.Trace(fmt.Sprintf("reconnect failed: %v", err))
 		return err
 	}
 	select {
@@ -496,31 +495,31 @@ func (c *Client) dispatch(conn net.Conn) {
 			for _, msg := range batch {
 				switch {
 				case msg.isNotification():
-					if glog.V(logger.Detail) {
-						glog.Info("<-readResp: notification ", msg)
-					}
+					log.Trace("", "msg", log.Lazy{Fn: func() string {
+						return fmt.Sprint("<-readResp: notification ", msg)
+					}})
 					c.handleNotification(msg)
 				case msg.isResponse():
-					if glog.V(logger.Detail) {
-						glog.Info("<-readResp: response ", msg)
-					}
+					log.Trace("", "msg", log.Lazy{Fn: func() string {
+						return fmt.Sprint("<-readResp: response ", msg)
+					}})
 					c.handleResponse(msg)
 				default:
-					if glog.V(logger.Debug) {
-						glog.Error("<-readResp: dropping weird message", msg)
-					}
+					log.Debug("", "msg", log.Lazy{Fn: func() string {
+						return fmt.Sprint("<-readResp: dropping weird message", msg)
+					}})
 					// TODO: maybe close
 				}
 			}
 
 		case err := <-c.readErr:
-			glog.V(logger.Debug).Infof("<-readErr: %v", err)
+			log.Debug(fmt.Sprintf("<-readErr: %v", err))
 			c.closeRequestOps(err)
 			conn.Close()
 			reading = false
 
 		case newconn := <-c.reconnected:
-			glog.V(logger.Debug).Infof("<-reconnected: (reading=%t) %v", reading, conn.RemoteAddr())
+			log.Debug(fmt.Sprintf("<-reconnected: (reading=%t) %v", reading, conn.RemoteAddr()))
 			if reading {
 				// Wait for the previous read loop to exit. This is a rare case.
 				conn.Close()
@@ -577,7 +576,7 @@ func (c *Client) closeRequestOps(err error) {
 
 func (c *Client) handleNotification(msg *jsonrpcMessage) {
 	if msg.Method != notificationMethod {
-		glog.V(logger.Debug).Info("dropping non-subscription message: ", msg)
+		log.Debug(fmt.Sprint("dropping non-subscription message: ", msg))
 		return
 	}
 	var subResult struct {
@@ -585,7 +584,7 @@ func (c *Client) handleNotification(msg *jsonrpcMessage) {
 		Result json.RawMessage `json:"result"`
 	}
 	if err := json.Unmarshal(msg.Params, &subResult); err != nil {
-		glog.V(logger.Debug).Info("dropping invalid subscription message: ", msg)
+		log.Debug(fmt.Sprint("dropping invalid subscription message: ", msg))
 		return
 	}
 	if c.subs[subResult.ID] != nil {
@@ -596,7 +595,7 @@ func (c *Client) handleNotification(msg *jsonrpcMessage) {
 func (c *Client) handleResponse(msg *jsonrpcMessage) {
 	op := c.respWait[string(msg.ID)]
 	if op == nil {
-		glog.V(logger.Debug).Infof("unsolicited response %v", msg)
+		log.Debug(fmt.Sprintf("unsolicited response %v", msg))
 		return
 	}
 	delete(c.respWait, string(msg.ID))
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 476c8c6f36cb0fb5ea21dcfc25a7b7338b4d31a1..407ed9c0652d42c121549888ad34df1ee1b4e41a 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -30,8 +30,7 @@ import (
 	"time"
 
 	"github.com/davecgh/go-spew/spew"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"golang.org/x/net/context"
 )
 
@@ -147,10 +146,6 @@ func testClientCancel(transport string, t *testing.T) {
 	// You probably want to run with -parallel 1 or comment out
 	// the call to t.Parallel if you enable the logging.
 	t.Parallel()
-	// glog.SetV(6)
-	// glog.SetToStderr(true)
-	// defer glog.SetToStderr(false)
-	// glog.Infoln("testing ", transport)
 
 	// The actual test starts here.
 	var (
@@ -181,7 +176,7 @@ func testClientCancel(transport string, t *testing.T) {
 			// The key thing here is that no call will ever complete successfully.
 			err := client.CallContext(ctx, nil, "service_sleep", 2*maxContextCancelTimeout)
 			if err != nil {
-				glog.V(logger.Debug).Infoln("got expected error:", err)
+				log.Debug(fmt.Sprint("got expected error:", err))
 			} else {
 				t.Errorf("no error for call with %v wait time", timeout)
 			}
@@ -532,7 +527,7 @@ func (l *flakeyListener) Accept() (net.Conn, error) {
 	if err == nil {
 		timeout := time.Duration(rand.Int63n(int64(l.maxKillTimeout)))
 		time.AfterFunc(timeout, func() {
-			glog.V(logger.Debug).Infof("killing conn %v after %v", c.LocalAddr(), timeout)
+			log.Debug(fmt.Sprintf("killing conn %v after %v", c.LocalAddr(), timeout))
 			c.Close()
 		})
 	}
diff --git a/rpc/ipc.go b/rpc/ipc.go
index c2b9e38710f8d0c31e4ff89d3d0d126a60e329e7..3c86d711ce9b654bda66deb696591c11756907cb 100644
--- a/rpc/ipc.go
+++ b/rpc/ipc.go
@@ -17,10 +17,11 @@
 package rpc
 
 import (
+	"fmt"
 	"net"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
+
 	"golang.org/x/net/context"
 )
 
@@ -37,7 +38,7 @@ func (srv *Server) ServeListener(l net.Listener) error {
 		if err != nil {
 			return err
 		}
-		glog.V(logger.Detail).Infoln("accepted conn", conn.RemoteAddr())
+		log.Trace(fmt.Sprint("accepted conn", conn.RemoteAddr()))
 		go srv.ServeCodec(NewJSONCodec(conn), OptionMethodInvocation|OptionSubscriptions)
 	}
 }
diff --git a/rpc/json.go b/rpc/json.go
index 61a4ddf432bc2d3423d6cf61b61d43c186d59405..c777fab6eb4033277a47fb9127ed277fcd51785e 100644
--- a/rpc/json.go
+++ b/rpc/json.go
@@ -26,8 +26,7 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -171,7 +170,7 @@ func parseRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error) {
 			// first param must be subscription name
 			var subscribeMethod [1]string
 			if err := json.Unmarshal(in.Payload, &subscribeMethod); err != nil {
-				glog.V(logger.Debug).Infof("Unable to parse subscription method: %v\n", err)
+				log.Debug(fmt.Sprintf("Unable to parse subscription method: %v\n", err))
 				return nil, false, &invalidRequestError{"Unable to parse subscription request"}
 			}
 
@@ -224,7 +223,7 @@ func parseBatchRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error)
 				// first param must be subscription name
 				var subscribeMethod [1]string
 				if err := json.Unmarshal(r.Payload, &subscribeMethod); err != nil {
-					glog.V(logger.Debug).Infof("Unable to parse subscription method: %v\n", err)
+					log.Debug(fmt.Sprintf("Unable to parse subscription method: %v\n", err))
 					return nil, false, &invalidRequestError{"Unable to parse subscription request"}
 				}
 
diff --git a/rpc/server.go b/rpc/server.go
index 996c6370046e8b2a703db41c6821e6b851283af2..4f9ce541e5c3c5ad1fc57fbe9bc96c3ffb8ae625 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -22,8 +22,8 @@ import (
 	"runtime"
 	"sync/atomic"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
+
 	"golang.org/x/net/context"
 	"gopkg.in/fatih/set.v0"
 )
@@ -149,7 +149,7 @@ func (s *Server) serveRequest(codec ServerCodec, singleShot bool, options CodecO
 			const size = 64 << 10
 			buf := make([]byte, size)
 			buf = buf[:runtime.Stack(buf, false)]
-			glog.Errorln(string(buf))
+			log.Error(fmt.Sprint(string(buf)))
 		}
 
 		s.codecsMu.Lock()
@@ -180,7 +180,7 @@ func (s *Server) serveRequest(codec ServerCodec, singleShot bool, options CodecO
 	for atomic.LoadInt32(&s.run) == 1 {
 		reqs, batch, err := s.readRequest(codec)
 		if err != nil {
-			glog.V(logger.Debug).Infof("read error %v\n", err)
+			log.Debug(fmt.Sprintf("read error %v\n", err))
 			codec.Write(codec.CreateErrorResponse(nil, err))
 			return nil
 		}
@@ -236,7 +236,7 @@ func (s *Server) ServeSingleRequest(codec ServerCodec, options CodecOption) {
 // close all codecs which will cancel pending requests/subscriptions.
 func (s *Server) Stop() {
 	if atomic.CompareAndSwapInt32(&s.run, 1, 0) {
-		glog.V(logger.Debug).Infoln("RPC Server shutdown initiatied")
+		log.Debug(fmt.Sprint("RPC Server shutdown initiatied"))
 		s.codecsMu.Lock()
 		defer s.codecsMu.Unlock()
 		s.codecs.Each(func(c interface{}) bool {
@@ -341,7 +341,7 @@ func (s *Server) exec(ctx context.Context, codec ServerCodec, req *serverRequest
 	}
 
 	if err := codec.Write(response); err != nil {
-		glog.V(logger.Error).Infof("%v\n", err)
+		log.Error(fmt.Sprintf("%v\n", err))
 		codec.Close()
 	}
 
@@ -368,7 +368,7 @@ func (s *Server) execBatch(ctx context.Context, codec ServerCodec, requests []*s
 	}
 
 	if err := codec.Write(responses); err != nil {
-		glog.V(logger.Error).Infof("%v\n", err)
+		log.Error(fmt.Sprintf("%v\n", err))
 		codec.Close()
 	}
 
diff --git a/rpc/websocket.go b/rpc/websocket.go
index fc3cd0709908e01655f6a012f2fd417df35cf174..f4271fda87b85ca1f60a5c18e8793cdde8615f73 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -25,8 +25,8 @@ import (
 	"os"
 	"strings"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
+
 	"golang.org/x/net/context"
 	"golang.org/x/net/websocket"
 	"gopkg.in/fatih/set.v0"
@@ -76,14 +76,14 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http
 		}
 	}
 
-	glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List())
+	log.Debug(fmt.Sprintf("Allowed origin(s) for WS RPC interface %v\n", origins.List()))
 
 	f := func(cfg *websocket.Config, req *http.Request) error {
 		origin := strings.ToLower(req.Header.Get("Origin"))
 		if allowAllOrigins || origins.Has(origin) {
 			return nil
 		}
-		glog.V(logger.Debug).Infof("origin '%s' not allowed on WS-RPC interface\n", origin)
+		log.Debug(fmt.Sprintf("origin '%s' not allowed on WS-RPC interface\n", origin))
 		return fmt.Errorf("origin %s not allowed", origin)
 	}
 
diff --git a/swarm/api/api.go b/swarm/api/api.go
index f92a14653d7f0552446483c563d2d1f4a1b06e1c..3376fb4845f82b01a136a2a3de4ee8e9b371e360 100644
--- a/swarm/api/api.go
+++ b/swarm/api/api.go
@@ -25,8 +25,7 @@ import (
 	"sync"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -72,9 +71,9 @@ type ErrResolve error
 
 // DNS Resolver
 func (self *Api) Resolve(hostPort string, nameresolver bool) (storage.Key, error) {
-	glog.V(logger.Detail).Infof("Resolving : %v", hostPort)
+	log.Trace(fmt.Sprintf("Resolving : %v", hostPort))
 	if hashMatcher.MatchString(hostPort) || self.dns == nil {
-		glog.V(logger.Detail).Infof("host is a contentHash: '%v'", hostPort)
+		log.Trace(fmt.Sprintf("host is a contentHash: '%v'", hostPort))
 		return storage.Key(common.Hex2Bytes(hostPort)), nil
 	}
 	if !nameresolver {
@@ -83,9 +82,9 @@ func (self *Api) Resolve(hostPort string, nameresolver bool) (storage.Key, error
 	contentHash, err := self.dns.Resolve(hostPort)
 	if err != nil {
 		err = ErrResolve(err)
-		glog.V(logger.Warn).Infof("DNS error : %v", err)
+		log.Warn(fmt.Sprintf("DNS error : %v", err))
 	}
-	glog.V(logger.Detail).Infof("host lookup: %v -> %v", err)
+	log.Trace(fmt.Sprintf("host lookup: %v -> %v", err))
 	return contentHash[:], err
 }
 func Parse(uri string) (hostPort, path string) {
@@ -110,7 +109,7 @@ func Parse(uri string) (hostPort, path string) {
 			path = parts[i]
 		}
 	}
-	glog.V(logger.Debug).Infof("host: '%s', path '%s' requested.", hostPort, path)
+	log.Debug(fmt.Sprintf("host: '%s', path '%s' requested.", hostPort, path))
 	return
 }
 
@@ -118,7 +117,7 @@ func (self *Api) parseAndResolve(uri string, nameresolver bool) (key storage.Key
 	hostPort, path = Parse(uri)
 	//resolving host and port
 	contentHash, err := self.Resolve(hostPort, nameresolver)
-	glog.V(logger.Debug).Infof("Resolved '%s' to contentHash: '%s', path: '%s'", uri, contentHash, path)
+	log.Debug(fmt.Sprintf("Resolved '%s' to contentHash: '%s', path: '%s'", uri, contentHash, path))
 	return contentHash[:], hostPort, path, err
 }
 
@@ -152,11 +151,11 @@ func (self *Api) Get(uri string, nameresolver bool) (reader storage.LazySectionR
 	quitC := make(chan bool)
 	trie, err := loadManifest(self.dpa, key, quitC)
 	if err != nil {
-		glog.V(logger.Warn).Infof("loadManifestTrie error: %v", err)
+		log.Warn(fmt.Sprintf("loadManifestTrie error: %v", err))
 		return
 	}
 
-	glog.V(logger.Detail).Infof("getEntry(%s)", path)
+	log.Trace(fmt.Sprintf("getEntry(%s)", path))
 
 	entry, _ := trie.getEntry(path)
 
@@ -164,12 +163,12 @@ func (self *Api) Get(uri string, nameresolver bool) (reader storage.LazySectionR
 		key = common.Hex2Bytes(entry.Hash)
 		status = entry.Status
 		mimeType = entry.ContentType
-		glog.V(logger.Detail).Infof("content lookup key: '%v' (%v)", key, mimeType)
+		log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType))
 		reader = self.dpa.Retrieve(key)
 	} else {
 		status = http.StatusNotFound
 		err = fmt.Errorf("manifest entry for '%s' not found", path)
-		glog.V(logger.Warn).Infof("%v", err)
+		log.Warn(fmt.Sprintf("%v", err))
 	}
 	return
 }
diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go
index b098119591848cc99fe823ade53b438063f95a59..16e90dd329cf4ec8128be0c7d31bc41ec078ca25 100644
--- a/swarm/api/api_test.go
+++ b/swarm/api/api_test.go
@@ -17,13 +17,13 @@
 package api
 
 import (
+	"fmt"
 	"io"
 	"io/ioutil"
 	"os"
 	"testing"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -76,7 +76,7 @@ func checkResponse(t *testing.T, resp *testResponse, exp *Response) {
 
 // func expResponse(content []byte, mimeType string, status int) *Response {
 func expResponse(content string, mimeType string, status int) *Response {
-	glog.V(logger.Detail).Infof("expected content (%v): %v ", len(content), content)
+	log.Trace(fmt.Sprintf("expected content (%v): %v ", len(content), content))
 	return &Response{mimeType, status, int64(len(content)), content}
 }
 
@@ -91,7 +91,7 @@ func testGet(t *testing.T, api *Api, bzzhash string) *testResponse {
 	if err != nil {
 		t.Fatalf("unexpected error: %v", err)
 	}
-	glog.V(logger.Detail).Infof("reader size: %v ", size)
+	log.Trace(fmt.Sprintf("reader size: %v ", size))
 	s := make([]byte, size)
 	_, err = reader.Read(s)
 	if err != io.EOF {
diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go
index 96aaf36df729c4212cabac6f0fbdb59a3bcb1eb8..c2583e265f6d81a8d93d841829b13dc7b6354c2d 100644
--- a/swarm/api/filesystem.go
+++ b/swarm/api/filesystem.go
@@ -26,8 +26,7 @@ import (
 	"sync"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -63,7 +62,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
 	var start int
 	if stat.IsDir() {
 		start = len(localpath)
-		glog.V(logger.Debug).Infof("uploading '%s'", localpath)
+		log.Debug(fmt.Sprintf("uploading '%s'", localpath))
 		err = filepath.Walk(localpath, func(path string, info os.FileInfo, err error) error {
 			if (err == nil) && !info.IsDir() {
 				//fmt.Printf("lp %s  path %s\n", localpath, path)
@@ -198,7 +197,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
 	quitC := make(chan bool)
 	trie, err := loadManifest(self.api.dpa, key, quitC)
 	if err != nil {
-		glog.V(logger.Warn).Infof("fs.Download: loadManifestTrie error: %v", err)
+		log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err))
 		return err
 	}
 
@@ -212,7 +211,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
 
 	prevPath := lpath
 	err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) {
-		glog.V(logger.Detail).Infof("fs.Download: %#v", entry)
+		log.Trace(fmt.Sprintf("fs.Download: %#v", entry))
 
 		key = common.Hex2Bytes(entry.Hash)
 		path := lpath + "/" + suffix
diff --git a/swarm/api/http/roundtripper.go b/swarm/api/http/roundtripper.go
index 7b5bbc88386691aa552e8bfc79f2213b2c2358a9..328177a218b5349faa1f1c090e383a7850963f00 100644
--- a/swarm/api/http/roundtripper.go
+++ b/swarm/api/http/roundtripper.go
@@ -20,8 +20,7 @@ import (
 	"fmt"
 	"net/http"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 /*
@@ -58,7 +57,7 @@ func (self *RoundTripper) RoundTrip(req *http.Request) (resp *http.Response, err
 		host = "localhost"
 	}
 	url := fmt.Sprintf("http://%s:%s/%s:/%s/%s", host, self.Port, req.Proto, req.URL.Host, req.URL.Path)
-	glog.V(logger.Info).Infof("roundtripper: proxying request '%s' to '%s'", req.RequestURI, url)
+	log.Info(fmt.Sprintf("roundtripper: proxying request '%s' to '%s'", req.RequestURI, url))
 	reqProxy, err := http.NewRequest(req.Method, url, req.Body)
 	if err != nil {
 		return nil, err
diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go
index b1cea60fc8d9eca0fd7754ee5566d5dba7a06ada..a61696678075f615cdbc961d158307563e999f78 100644
--- a/swarm/api/http/server.go
+++ b/swarm/api/http/server.go
@@ -21,6 +21,7 @@ package http
 
 import (
 	"bytes"
+	"fmt"
 	"io"
 	"net/http"
 	"regexp"
@@ -29,8 +30,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/api"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 	"github.com/rs/cors"
@@ -86,7 +86,7 @@ func StartHttpServer(api *api.Api, server *Server) {
 	hdlr := c.Handler(serveMux)
 
 	go http.ListenAndServe(server.Addr, hdlr)
-	glog.V(logger.Info).Infof("Swarm HTTP proxy started on localhost:%s", server.Addr)
+	log.Info(fmt.Sprintf("Swarm HTTP proxy started on localhost:%s", server.Addr))
 }
 
 func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
@@ -100,13 +100,13 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 	//			return
 	//		}
 	//	}
-	glog.V(logger.Debug).Infof("HTTP %s request URL: '%s', Host: '%s', Path: '%s', Referer: '%s', Accept: '%s'", r.Method, r.RequestURI, requestURL.Host, requestURL.Path, r.Referer(), r.Header.Get("Accept"))
+	log.Debug(fmt.Sprintf("HTTP %s request URL: '%s', Host: '%s', Path: '%s', Referer: '%s', Accept: '%s'", r.Method, r.RequestURI, requestURL.Host, requestURL.Path, r.Referer(), r.Header.Get("Accept")))
 	uri := requestURL.Path
 	var raw, nameresolver bool
 	var proto string
 
 	// HTTP-based URL protocol handler
-	glog.V(logger.Debug).Infof("BZZ request URI: '%s'", uri)
+	log.Debug(fmt.Sprintf("BZZ request URI: '%s'", uri))
 
 	path := bzzPrefix.ReplaceAllStringFunc(uri, func(p string) string {
 		proto = p
@@ -115,24 +115,18 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 
 	// protocol identification (ugly)
 	if proto == "" {
-		if glog.V(logger.Error) {
-			glog.Errorf(
-				"[BZZ] Swarm: Protocol error in request `%s`.",
-				uri,
-			)
-			http.Error(w, "Invalid request URL: need access protocol (bzz:/, bzzr:/, bzzi:/) as first element in path.", http.StatusBadRequest)
-			return
-		}
+		log.Error(fmt.Sprintf("[BZZ] Swarm: Protocol error in request `%s`.", uri))
+		http.Error(w, "Invalid request URL: need access protocol (bzz:/, bzzr:/, bzzi:/) as first element in path.", http.StatusBadRequest)
+		return
 	}
 	if len(proto) > 4 {
 		raw = proto[1:5] == "bzzr"
 		nameresolver = proto[1:5] != "bzzi"
 	}
 
-	glog.V(logger.Debug).Infof(
-		"[BZZ] Swarm: %s request over protocol %s '%s' received.",
-		r.Method, proto, path,
-	)
+	log.Debug("", "msg", log.Lazy{Fn: func() string {
+		return fmt.Sprintf("[BZZ] Swarm: %s request over protocol %s '%s' received.", r.Method, proto, path)
+	}})
 
 	switch {
 	case r.Method == "POST" || r.Method == "PUT":
@@ -142,7 +136,7 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 		}
 		key, err := a.Store(io.LimitReader(r.Body, r.ContentLength), r.ContentLength, nil)
 		if err == nil {
-			glog.V(logger.Debug).Infof("Content for %v stored", key.Log())
+			log.Debug(fmt.Sprintf("Content for %v stored", key.Log()))
 		} else {
 			http.Error(w, err.Error(), http.StatusBadRequest)
 			return
@@ -164,10 +158,10 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 				path = api.RegularSlashes(path)
 				mime := r.Header.Get("Content-Type")
 				// TODO proper root hash separation
-				glog.V(logger.Debug).Infof("Modify '%s' to store %v as '%s'.", path, key.Log(), mime)
+				log.Debug(fmt.Sprintf("Modify '%s' to store %v as '%s'.", path, key.Log(), mime))
 				newKey, err := a.Modify(path, common.Bytes2Hex(key), mime, nameresolver)
 				if err == nil {
-					glog.V(logger.Debug).Infof("Swarm replaced manifest by '%s'", newKey)
+					log.Debug(fmt.Sprintf("Swarm replaced manifest by '%s'", newKey))
 					w.Header().Set("Content-Type", "text/plain")
 					http.ServeContent(w, r, "", time.Now(), bytes.NewReader([]byte(newKey)))
 				} else {
@@ -182,10 +176,10 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 			return
 		} else {
 			path = api.RegularSlashes(path)
-			glog.V(logger.Debug).Infof("Delete '%s'.", path)
+			log.Debug(fmt.Sprintf("Delete '%s'.", path))
 			newKey, err := a.Modify(path, "", "", nameresolver)
 			if err == nil {
-				glog.V(logger.Debug).Infof("Swarm replaced manifest by '%s'", newKey)
+				log.Debug(fmt.Sprintf("Swarm replaced manifest by '%s'", newKey))
 				w.Header().Set("Content-Type", "text/plain")
 				http.ServeContent(w, r, "", time.Now(), bytes.NewReader([]byte(newKey)))
 			} else {
@@ -206,7 +200,7 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 			if parsedurl == path {
 				key, err := a.Resolve(parsedurl, nameresolver)
 				if err != nil {
-					glog.V(logger.Error).Infof("%v", err)
+					log.Error(fmt.Sprintf("%v", err))
 					http.Error(w, err.Error(), http.StatusBadRequest)
 					return
 				}
@@ -226,12 +220,12 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 			quitC := make(chan bool)
 			size, err := reader.Size(quitC)
 			if err != nil {
-				glog.V(logger.Debug).Infof("Could not determine size: %v", err.Error())
+				log.Debug(fmt.Sprintf("Could not determine size: %v", err.Error()))
 				//An error on call to Size means we don't have the root chunk
 				http.Error(w, err.Error(), http.StatusNotFound)
 				return
 			}
-			glog.V(logger.Debug).Infof("Reading %d bytes.", size)
+			log.Debug(fmt.Sprintf("Reading %d bytes.", size))
 
 			// setting mime type
 			qv := requestURL.Query()
@@ -242,11 +236,11 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 
 			w.Header().Set("Content-Type", mimeType)
 			http.ServeContent(w, r, uri, forever(), reader)
-			glog.V(logger.Debug).Infof("Serve raw content '%s' (%d bytes) as '%s'", uri, size, mimeType)
+			log.Debug(fmt.Sprintf("Serve raw content '%s' (%d bytes) as '%s'", uri, size, mimeType))
 
 			// retrieve path via manifest
 		} else {
-			glog.V(logger.Debug).Infof("Structured GET request '%s' received.", uri)
+			log.Debug(fmt.Sprintf("Structured GET request '%s' received.", uri))
 			// add trailing slash, if missing
 			if rootDocumentUri.MatchString(uri) {
 				http.Redirect(w, r, path+"/", http.StatusFound)
@@ -255,10 +249,10 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 			reader, mimeType, status, err := a.Get(path, nameresolver)
 			if err != nil {
 				if _, ok := err.(api.ErrResolve); ok {
-					glog.V(logger.Debug).Infof("%v", err)
+					log.Debug(fmt.Sprintf("%v", err))
 					status = http.StatusBadRequest
 				} else {
-					glog.V(logger.Debug).Infof("error retrieving '%s': %v", uri, err)
+					log.Debug(fmt.Sprintf("error retrieving '%s': %v", uri, err))
 					status = http.StatusNotFound
 				}
 				http.Error(w, err.Error(), status)
@@ -274,12 +268,12 @@ func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
 			quitC := make(chan bool)
 			size, err := reader.Size(quitC)
 			if err != nil {
-				glog.V(logger.Debug).Infof("Could not determine size: %v", err.Error())
+				log.Debug(fmt.Sprintf("Could not determine size: %v", err.Error()))
 				//An error on call to Size means we don't have the root chunk
 				http.Error(w, err.Error(), http.StatusNotFound)
 				return
 			}
-			glog.V(logger.Debug).Infof("Served '%s' (%d bytes) as '%s' (status code: %v)", uri, size, mimeType, status)
+			log.Debug(fmt.Sprintf("Served '%s' (%d bytes) as '%s' (status code: %v)", uri, size, mimeType, status))
 
 			http.ServeContent(w, r, path, forever(), reader)
 
@@ -293,11 +287,11 @@ func (self *sequentialReader) ReadAt(target []byte, off int64) (n int, err error
 	self.lock.Lock()
 	// assert self.pos <= off
 	if self.pos > off {
-		glog.V(logger.Error).Infof("non-sequential read attempted from sequentialReader; %d > %d", self.pos, off)
+		log.Error(fmt.Sprintf("non-sequential read attempted from sequentialReader; %d > %d", self.pos, off))
 		panic("Non-sequential read attempt")
 	}
 	if self.pos != off {
-		glog.V(logger.Debug).Infof("deferred read in POST at position %d, offset %d.", self.pos, off)
+		log.Debug(fmt.Sprintf("deferred read in POST at position %d, offset %d.", self.pos, off))
 		wait := make(chan bool)
 		self.ahead[off] = wait
 		self.lock.Unlock()
@@ -313,9 +307,9 @@ func (self *sequentialReader) ReadAt(target []byte, off int64) (n int, err error
 	for localPos < len(target) {
 		n, err = self.reader.Read(target[localPos:])
 		localPos += n
-		glog.V(logger.Debug).Infof("Read %d bytes into buffer size %d from POST, error %v.", n, len(target), err)
+		log.Debug(fmt.Sprintf("Read %d bytes into buffer size %d from POST, error %v.", n, len(target), err))
 		if err != nil {
-			glog.V(logger.Debug).Infof("POST stream's reading terminated with %v.", err)
+			log.Debug(fmt.Sprintf("POST stream's reading terminated with %v.", err))
 			for i := range self.ahead {
 				self.ahead[i] <- true
 				delete(self.ahead, i)
@@ -327,7 +321,7 @@ func (self *sequentialReader) ReadAt(target []byte, off int64) (n int, err error
 	}
 	wait := self.ahead[self.pos]
 	if wait != nil {
-		glog.V(logger.Debug).Infof("deferred read in POST at position %d triggered.", self.pos)
+		log.Debug(fmt.Sprintf("deferred read in POST at position %d triggered.", self.pos))
 		delete(self.ahead, self.pos)
 		close(wait)
 	}
diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go
index 36c0b0436202062e3d06af71b292335fe55724ab..199f259e1f5d681b6d0fd4301959db89a6199d17 100644
--- a/swarm/api/manifest.go
+++ b/swarm/api/manifest.go
@@ -23,8 +23,7 @@ import (
 	"sync"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -52,7 +51,7 @@ type manifestTrieEntry struct {
 
 func loadManifest(dpa *storage.DPA, hash storage.Key, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
 
-	glog.V(logger.Detail).Infof("manifest lookup key: '%v'.", hash.Log())
+	log.Trace(fmt.Sprintf("manifest lookup key: '%v'.", hash.Log()))
 	// retrieve manifest via DPA
 	manifestReader := dpa.Retrieve(hash)
 	return readManifest(manifestReader, hash, dpa, quitC)
@@ -70,23 +69,23 @@ func readManifest(manifestReader storage.LazySectionReader, hash storage.Key, dp
 	manifestData := make([]byte, size)
 	read, err := manifestReader.Read(manifestData)
 	if int64(read) < size {
-		glog.V(logger.Detail).Infof("Manifest %v not found.", hash.Log())
+		log.Trace(fmt.Sprintf("Manifest %v not found.", hash.Log()))
 		if err == nil {
 			err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size)
 		}
 		return
 	}
 
-	glog.V(logger.Detail).Infof("Manifest %v retrieved", hash.Log())
+	log.Trace(fmt.Sprintf("Manifest %v retrieved", hash.Log()))
 	man := manifestJSON{}
 	err = json.Unmarshal(manifestData, &man)
 	if err != nil {
 		err = fmt.Errorf("Manifest %v is malformed: %v", hash.Log(), err)
-		glog.V(logger.Detail).Infof("%v", err)
+		log.Trace(fmt.Sprintf("%v", err))
 		return
 	}
 
-	glog.V(logger.Detail).Infof("Manifest %v has %d entries.", hash.Log(), len(man.Entries))
+	log.Trace(fmt.Sprintf("Manifest %v has %d entries.", hash.Log(), len(man.Entries)))
 
 	trie = &manifestTrie{
 		dpa: dpa,
@@ -286,7 +285,7 @@ func (self *manifestTrie) listWithPrefix(prefix string, quitC chan bool, cb func
 
 func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manifestTrieEntry, pos int) {
 
-	glog.V(logger.Detail).Infof("findPrefixOf(%s)", path)
+	log.Trace(fmt.Sprintf("findPrefixOf(%s)", path))
 
 	if len(path) == 0 {
 		return self.entries[256], 0
@@ -298,9 +297,9 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
 		return self.entries[256], 0
 	}
 	epl := len(entry.Path)
-	glog.V(logger.Detail).Infof("path = %v  entry.Path = %v  epl = %v", path, entry.Path, epl)
+	log.Trace(fmt.Sprintf("path = %v  entry.Path = %v  epl = %v", path, entry.Path, epl))
 	if (len(path) >= epl) && (path[:epl] == entry.Path) {
-		glog.V(logger.Detail).Infof("entry.ContentType = %v", entry.ContentType)
+		log.Trace(fmt.Sprintf("entry.ContentType = %v", entry.ContentType))
 		if entry.ContentType == manifestType {
 			err := self.loadSubTrie(entry, quitC)
 			if err != nil {
diff --git a/swarm/network/depo.go b/swarm/network/depo.go
index cd0a17ffa70e6378415080e195cca129ce352548..340128aa876d82e2728e9fd2383bb636faf04ed0 100644
--- a/swarm/network/depo.go
+++ b/swarm/network/depo.go
@@ -19,10 +19,10 @@ package network
 import (
 	"bytes"
 	"encoding/binary"
+	"fmt"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -60,8 +60,8 @@ func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error
 			missing = append(missing, req)
 		}
 	}
-	glog.V(logger.Debug).Infof("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State)
-	glog.V(logger.Detail).Infof("Depo.HandleUnsyncedKeysMsg: received %v", unsynced)
+	log.Debug(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State))
+	log.Trace(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v", unsynced))
 	// send delivery request with missing keys
 	err = p.deliveryRequest(missing)
 	if err != nil {
@@ -81,7 +81,7 @@ func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error
 func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error {
 	deliver := req.Deliver
 	// queue the actual delivery of a chunk ()
-	glog.V(logger.Detail).Infof("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver)
+	log.Trace(fmt.Sprintf("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver))
 	for _, sreq := range deliver {
 		// TODO: look up in cache here or in deliveries
 		// priorities are taken from the message so the remote party can
@@ -104,19 +104,19 @@ func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
 	chunk, err := self.localStore.Get(req.Key)
 	switch {
 	case err != nil:
-		glog.V(logger.Detail).Infof("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key)
+		log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key))
 		// not found in memory cache, ie., a genuine store request
 		// create chunk
 		chunk = storage.NewChunk(req.Key, nil)
 
 	case chunk.SData == nil:
 		// found chunk in memory store, needs the data, validate now
-		glog.V(logger.Detail).Infof("Depo.HandleStoreRequest: %v. request entry found", req)
+		log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v. request entry found", req))
 
 	default:
 		// data is found, store request ignored
 		// this should update access count?
-		glog.V(logger.Detail).Infof("Depo.HandleStoreRequest: %v found locally. ignore.", req)
+		log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req))
 		islocal = true
 		//return
 	}
@@ -126,7 +126,7 @@ func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
 	if !bytes.Equal(hasher.Sum(nil), req.Key) {
 		// data does not validate, ignore
 		// TODO: peer should be penalised/dropped?
-		glog.V(logger.Warn).Infof("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req)
+		log.Warn(fmt.Sprintf("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req))
 		return
 	}
 
@@ -136,7 +136,7 @@ func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
 	// update chunk with size and data
 	chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size  + at least one byte of data)
 	chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8]))
-	glog.V(logger.Detail).Infof("delivery of %v from %v", chunk, p)
+	log.Trace(fmt.Sprintf("delivery of %v from %v", chunk, p))
 	chunk.Source = p
 	self.netStore.Put(chunk)
 }
@@ -152,7 +152,7 @@ func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer)
 		err = p.swap.Add(1)
 	}
 	if err != nil {
-		glog.V(logger.Warn).Infof("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err)
+		log.Warn(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err))
 		return
 	}
 
@@ -163,7 +163,7 @@ func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer)
 	req = self.strategyUpdateRequest(chunk.Req, req)
 	// check if we can immediately deliver
 	if chunk.SData != nil {
-		glog.V(logger.Detail).Infof("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log())
+		log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log()))
 
 		if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size {
 			sreq := &storeRequestMsgData{
@@ -174,16 +174,16 @@ func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer)
 			}
 			p.syncer.addRequest(sreq, DeliverReq)
 		} else {
-			glog.V(logger.Detail).Infof("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log())
+			log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log()))
 		}
 	} else {
-		glog.V(logger.Detail).Infof("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log())
+		log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log()))
 	}
 }
 
 // add peer request the chunk and decides the timeout for the response if still searching
 func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) {
-	glog.V(logger.Detail).Infof("Depo.strategyUpdateRequest: key %v", origReq.Key.Log())
+	log.Trace(fmt.Sprintf("Depo.strategyUpdateRequest: key %v", origReq.Key.Log()))
 	// we do not create an alternative one
 	req = origReq
 	if rs != nil {
@@ -211,7 +211,7 @@ only add if less than requesterCount peers forwarded the same request id so far
 note this is done irrespective of status (searching or found)
 */
 func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) {
-	glog.V(logger.Detail).Infof("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.from, req.Id)
+	log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.from, req.Id))
 	list := rs.Requesters[req.Id]
 	rs.Requesters[req.Id] = append(list, req)
 }
diff --git a/swarm/network/forwarding.go b/swarm/network/forwarding.go
index fef79c70bb68af211e60708ee4d36f50318b4f13..88a82a678c1eb76647f0f2e12d51beb48a00fe78 100644
--- a/swarm/network/forwarding.go
+++ b/swarm/network/forwarding.go
@@ -17,11 +17,11 @@
 package network
 
 import (
+	"fmt"
 	"math/rand"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -56,10 +56,10 @@ var searchTimeout = 3 * time.Second
 // logic propagating retrieve requests to peers given by the kademlia hive
 func (self *forwarder) Retrieve(chunk *storage.Chunk) {
 	peers := self.hive.getPeers(chunk.Key, 0)
-	glog.V(logger.Detail).Infof("forwarder.Retrieve: %v - received %d peers from KΛÐΞMLIΛ...", chunk.Key.Log(), len(peers))
+	log.Trace(fmt.Sprintf("forwarder.Retrieve: %v - received %d peers from KΛÐΞMLIΛ...", chunk.Key.Log(), len(peers)))
 OUT:
 	for _, p := range peers {
-		glog.V(logger.Detail).Infof("forwarder.Retrieve: sending retrieveRequest %v to peer [%v]", chunk.Key.Log(), p)
+		log.Trace(fmt.Sprintf("forwarder.Retrieve: sending retrieveRequest %v to peer [%v]", chunk.Key.Log(), p))
 		for _, recipients := range chunk.Req.Requesters {
 			for _, recipient := range recipients {
 				req := recipient.(*retrieveRequestMsgData)
@@ -80,7 +80,7 @@ OUT:
 			p.retrieve(req)
 			break OUT
 		}
-		glog.V(logger.Warn).Infof("forwarder.Retrieve: unable to send retrieveRequest to peer [%v]: %v", chunk.Key.Log(), err)
+		log.Warn(fmt.Sprintf("forwarder.Retrieve: unable to send retrieveRequest to peer [%v]: %v", chunk.Key.Log(), err))
 	}
 }
 
@@ -98,14 +98,14 @@ func (self *forwarder) Store(chunk *storage.Chunk) {
 		source = chunk.Source.(*peer)
 	}
 	for _, p := range self.hive.getPeers(chunk.Key, 0) {
-		glog.V(logger.Detail).Infof("forwarder.Store: %v %v", p, chunk)
+		log.Trace(fmt.Sprintf("forwarder.Store: %v %v", p, chunk))
 
 		if p.syncer != nil && (source == nil || p.Addr() != source.Addr()) {
 			n++
 			Deliver(p, msg, PropagateReq)
 		}
 	}
-	glog.V(logger.Detail).Infof("forwarder.Store: sent to %v peers (chunk = %v)", n, chunk)
+	log.Trace(fmt.Sprintf("forwarder.Store: sent to %v peers (chunk = %v)", n, chunk))
 }
 
 // once a chunk is found deliver it to its requesters unless timed out
@@ -123,7 +123,7 @@ func (self *forwarder) Deliver(chunk *storage.Chunk) {
 		for id, r := range requesters {
 			req = r.(*retrieveRequestMsgData)
 			if req.timeout == nil || req.timeout.After(time.Now()) {
-				glog.V(logger.Detail).Infof("forwarder.Deliver: %v -> %v", req.Id, req.from)
+				log.Trace(fmt.Sprintf("forwarder.Deliver: %v -> %v", req.Id, req.from))
 				msg.Id = uint64(id)
 				Deliver(req.from, msg, DeliverReq)
 				n++
@@ -133,7 +133,7 @@ func (self *forwarder) Deliver(chunk *storage.Chunk) {
 				}
 			}
 		}
-		glog.V(logger.Detail).Infof("forwarder.Deliver: submit chunk %v (request id %v) for delivery to %v peers", chunk.Key.Log(), id, n)
+		log.Trace(fmt.Sprintf("forwarder.Deliver: submit chunk %v (request id %v) for delivery to %v peers", chunk.Key.Log(), id, n))
 	}
 }
 
diff --git a/swarm/network/hive.go b/swarm/network/hive.go
index f81761b97d2bcd9f5ad9ca5054a4fdb8c3c36cb6..70652c45094ca610436c0d13d14220179adfaffb 100644
--- a/swarm/network/hive.go
+++ b/swarm/network/hive.go
@@ -23,8 +23,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/netutil"
 	"github.com/ethereum/go-ethereum/swarm/network/kademlia"
@@ -129,7 +128,7 @@ func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPee
 	self.listenAddr = listenAddr
 	err = self.kad.Load(self.path, nil)
 	if err != nil {
-		glog.V(logger.Warn).Infof("Warning: error reading kaddb '%s' (skipping): %v", self.path, err)
+		log.Warn(fmt.Sprintf("Warning: error reading kaddb '%s' (skipping): %v", self.path, err))
 		err = nil
 	}
 	// this loop is doing bootstrapping and maintains a healthy table
@@ -145,7 +144,7 @@ func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPee
 			node, need, proxLimit := self.kad.Suggest()
 
 			if node != nil && len(node.Url) > 0 {
-				glog.V(logger.Detail).Infof("call known bee %v", node.Url)
+				log.Trace(fmt.Sprintf("call known bee %v", node.Url))
 				// enode or any lower level connection address is unnecessary in future
 				// discovery table is used to look it up.
 				connectPeer(node.Url)
@@ -159,21 +158,21 @@ func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPee
 					req := &retrieveRequestMsgData{
 						Key: storage.Key(randAddr[:]),
 					}
-					glog.V(logger.Detail).Infof("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0])
+					log.Trace(fmt.Sprintf("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0]))
 					peers[0].(*peer).retrieve(req)
 				} else {
-					glog.V(logger.Warn).Infof("no peer")
+					log.Warn(fmt.Sprintf("no peer"))
 				}
-				glog.V(logger.Detail).Infof("buzz kept alive")
+				log.Trace(fmt.Sprintf("buzz kept alive"))
 			} else {
-				glog.V(logger.Info).Infof("no need for more bees")
+				log.Info(fmt.Sprintf("no need for more bees"))
 			}
 			select {
 			case self.toggle <- need:
 			case <-self.quit:
 				return
 			}
-			glog.V(logger.Debug).Infof("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount())
+			log.Debug(fmt.Sprintf("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount()))
 		}
 	}()
 	return
@@ -192,7 +191,7 @@ func (self *Hive) keepAlive() {
 			if self.kad.DBCount() > 0 {
 				select {
 				case self.more <- true:
-					glog.V(logger.Debug).Infof("buzz wakeup")
+					log.Debug(fmt.Sprintf("buzz wakeup"))
 				default:
 				}
 			}
@@ -224,7 +223,7 @@ func (self *Hive) addPeer(p *peer) error {
 		default:
 		}
 	}()
-	glog.V(logger.Detail).Infof("hi new bee %v", p)
+	log.Trace(fmt.Sprintf("hi new bee %v", p))
 	err := self.kad.On(p, loadSync)
 	if err != nil {
 		return err
@@ -235,21 +234,21 @@ func (self *Hive) addPeer(p *peer) error {
 	// to send the 6 byte self lookup
 	// we do not record as request or forward it, just reply with peers
 	p.retrieve(&retrieveRequestMsgData{})
-	glog.V(logger.Detail).Infof("'whatsup wheresdaparty' sent to %v", p)
+	log.Trace(fmt.Sprintf("'whatsup wheresdaparty' sent to %v", p))
 
 	return nil
 }
 
 // called after peer disconnected
 func (self *Hive) removePeer(p *peer) {
-	glog.V(logger.Debug).Infof("bee %v removed", p)
+	log.Debug(fmt.Sprintf("bee %v removed", p))
 	self.kad.Off(p, saveSync)
 	select {
 	case self.more <- true:
 	default:
 	}
 	if self.kad.Count() == 0 {
-		glog.V(logger.Debug).Infof("empty, all bees gone")
+		log.Debug(fmt.Sprintf("empty, all bees gone"))
 	}
 }
 
@@ -265,7 +264,7 @@ func (self *Hive) getPeers(target storage.Key, max int) (peers []*peer) {
 
 // disconnects all the peers
 func (self *Hive) DropAll() {
-	glog.V(logger.Info).Infof("dropping all bees")
+	log.Info(fmt.Sprintf("dropping all bees"))
 	for _, node := range self.kad.FindClosest(kademlia.Address{}, 0) {
 		node.Drop()
 	}
@@ -290,7 +289,7 @@ func (self *Hive) HandlePeersMsg(req *peersMsgData, from *peer) {
 	var nrs []*kademlia.NodeRecord
 	for _, p := range req.Peers {
 		if err := netutil.CheckRelayIP(from.remoteAddr.IP, p.IP); err != nil {
-			glog.V(logger.Detail).Infof("invalid peer IP %v from %v: %v", from.remoteAddr.IP, p.IP, err)
+			log.Trace(fmt.Sprintf("invalid peer IP %v from %v: %v", from.remoteAddr.IP, p.IP, err))
 			continue
 		}
 		nrs = append(nrs, newNodeRecord(p))
@@ -326,7 +325,7 @@ func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error {
 		return fmt.Errorf("invalid type")
 	}
 	if record.Meta == nil {
-		glog.V(logger.Debug).Infof("no sync state for node record %v setting default", record)
+		log.Debug(fmt.Sprintf("no sync state for node record %v setting default", record))
 		p.syncState = &syncState{DbSyncState: &storage.DbSyncState{}}
 		return nil
 	}
@@ -334,7 +333,7 @@ func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error {
 	if err != nil {
 		return fmt.Errorf("error decoding kddb record meta info into a sync state: %v", err)
 	}
-	glog.V(logger.Detail).Infof("sync state for node record %v read from Meta: %s", record, string(*(record.Meta)))
+	log.Trace(fmt.Sprintf("sync state for node record %v read from Meta: %s", record, string(*(record.Meta))))
 	p.syncState = state
 	return err
 }
@@ -344,10 +343,10 @@ func saveSync(record *kademlia.NodeRecord, node kademlia.Node) {
 	if p, ok := node.(*peer); ok {
 		meta, err := encodeSync(p.syncState)
 		if err != nil {
-			glog.V(logger.Warn).Infof("error saving sync state for %v: %v", node, err)
+			log.Warn(fmt.Sprintf("error saving sync state for %v: %v", node, err))
 			return
 		}
-		glog.V(logger.Detail).Infof("saved sync state for %v: %s", node, string(*meta))
+		log.Trace(fmt.Sprintf("saved sync state for %v: %s", node, string(*meta)))
 		record.Meta = meta
 	}
 }
@@ -370,7 +369,7 @@ func (self *Hive) peers(req *retrieveRequestMsgData) {
 			for _, peer := range self.getPeers(key, int(req.MaxPeers)) {
 				addrs = append(addrs, peer.remoteAddr)
 			}
-			glog.V(logger.Debug).Infof("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log())
+			log.Debug(fmt.Sprintf("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log()))
 
 			peersData := &peersMsgData{
 				Peers: addrs,
diff --git a/swarm/network/kademlia/kaddb.go b/swarm/network/kademlia/kaddb.go
index 53a7db451c6488bb37df090b8eddd563c9672e22..f4279917ea74efafde426213e651897b1a81f88f 100644
--- a/swarm/network/kademlia/kaddb.go
+++ b/swarm/network/kademlia/kaddb.go
@@ -24,8 +24,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 type NodeData interface {
@@ -88,12 +87,12 @@ func (self *KadDb) findOrCreate(index int, a Address, url string) *NodeRecord {
 			Addr: a,
 			Url:  url,
 		}
-		glog.V(logger.Info).Infof("add new record %v to kaddb", record)
+		log.Info(fmt.Sprintf("add new record %v to kaddb", record))
 		// insert in kaddb
 		self.index[a] = record
 		self.Nodes[index] = append(self.Nodes[index], record)
 	} else {
-		glog.V(logger.Info).Infof("found record %v in kaddb", record)
+		log.Info(fmt.Sprintf("found record %v in kaddb", record))
 	}
 	// update last seen time
 	record.setSeen()
@@ -121,13 +120,13 @@ func (self *KadDb) add(nrs []*NodeRecord, proximityBin func(Address) int) {
 			copy(newnodes[:], nodes[:dbcursor])
 			newnodes[dbcursor] = node
 			copy(newnodes[dbcursor+1:], nodes[dbcursor:])
-			glog.V(logger.Detail).Infof("new nodes: %v (keys: %v)\nnodes: %v", newnodes, nodes)
+			log.Trace(fmt.Sprintf("new nodes: %v (keys: %v)\nnodes: %v", newnodes, nodes))
 			self.Nodes[index] = newnodes
 			n++
 		}
 	}
 	if n > 0 {
-		glog.V(logger.Debug).Infof("%d/%d node records (new/known)", n, len(nrs))
+		log.Debug(fmt.Sprintf("%d/%d node records (new/known)", n, len(nrs)))
 	}
 }
 
@@ -207,13 +206,13 @@ func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRe
 
 				// skip already connected nodes
 				if node.node != nil {
-					glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d/%d) already connected", node.Addr, po, cursor, len(dbrow))
+					log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d/%d) already connected", node.Addr, po, cursor, len(dbrow)))
 					continue ROW
 				}
 
 				// if node is scheduled to connect
 				if time.Time(node.After).After(time.Now()) {
-					glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)
+					log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
 					continue ROW
 				}
 
@@ -224,17 +223,17 @@ func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRe
 				if delta > self.purgeInterval {
 					// remove node
 					purge[cursor] = true
-					glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) unreachable since %v. Removed", node.Addr, po, cursor, node.Seen)
+					log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) unreachable since %v. Removed", node.Addr, po, cursor, node.Seen))
 					continue ROW
 				}
 
-				glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)
+				log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
 
 				// scheduling next check
 				interval = time.Duration(delta * time.Duration(self.connRetryExp))
 				after = time.Now().Add(interval)
 
-				glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval)
+				log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval))
 				node.After = after
 				found = true
 			} // ROW
@@ -295,9 +294,9 @@ func (self *KadDb) save(path string, cb func(*NodeRecord, Node)) error {
 	}
 	err = ioutil.WriteFile(path, data, os.ModePerm)
 	if err != nil {
-		glog.V(logger.Warn).Infof("unable to save kaddb with %v nodes to %v: err", n, path, err)
+		log.Warn(fmt.Sprintf("unable to save kaddb with %v nodes to %v: err", n, path, err))
 	} else {
-		glog.V(logger.Info).Infof("saved kaddb with %v nodes to %v", n, path)
+		log.Info(fmt.Sprintf("saved kaddb with %v nodes to %v", n, path))
 	}
 	return err
 }
@@ -338,7 +337,7 @@ func (self *KadDb) load(path string, cb func(*NodeRecord, Node) error) (err erro
 		}
 		self.delete(po, purge)
 	}
-	glog.V(logger.Info).Infof("loaded kaddb with %v nodes from %v", n, path)
+	log.Info(fmt.Sprintf("loaded kaddb with %v nodes from %v", n, path))
 
 	return
 }
diff --git a/swarm/network/kademlia/kademlia.go b/swarm/network/kademlia/kademlia.go
index 87c57cefe6d37f78919361c208bcf785a958d3b9..8d731c038842c65957d639f81f0ceb52935d5200 100644
--- a/swarm/network/kademlia/kademlia.go
+++ b/swarm/network/kademlia/kademlia.go
@@ -23,8 +23,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -117,7 +116,7 @@ func (self *Kademlia) DBCount() int {
 // On is the entry point called when a new nodes is added
 // unsafe in that node is not checked to be already active node (to be called once)
 func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error) {
-	glog.V(logger.Warn).Infof("%v", self)
+	log.Warn(fmt.Sprintf("%v", self))
 	defer self.lock.Unlock()
 	self.lock.Lock()
 
@@ -126,11 +125,11 @@ func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error
 
 	if cb != nil {
 		err = cb(record, node)
-		glog.V(logger.Detail).Infof("cb(%v, %v) ->%v", record, node, err)
+		log.Trace(fmt.Sprintf("cb(%v, %v) ->%v", record, node, err))
 		if err != nil {
 			return fmt.Errorf("unable to add node %v, callback error: %v", node.Addr(), err)
 		}
-		glog.V(logger.Debug).Infof("add node record %v with node %v", record, node)
+		log.Debug(fmt.Sprintf("add node record %v with node %v", record, node))
 	}
 
 	// insert in kademlia table of active nodes
@@ -139,7 +138,7 @@ func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error
 	// TODO: give priority to peers with active traffic
 	if len(bucket) < self.BucketSize { // >= allows us to add peers beyond the bucketsize limitation
 		self.buckets[index] = append(bucket, node)
-		glog.V(logger.Debug).Infof("add node %v to table", node)
+		log.Debug(fmt.Sprintf("add node %v to table", node))
 		self.setProxLimit(index, true)
 		record.node = node
 		self.count++
@@ -159,10 +158,10 @@ func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error
 		}
 	}
 	if replaced == nil {
-		glog.V(logger.Debug).Infof("all peers wanted, PO%03d bucket full", index)
+		log.Debug(fmt.Sprintf("all peers wanted, PO%03d bucket full", index))
 		return fmt.Errorf("bucket full")
 	}
-	glog.V(logger.Debug).Infof("node %v replaced by %v (idle for %v  > %v)", replaced, node, idle, self.MaxIdleInterval)
+	log.Debug(fmt.Sprintf("node %v replaced by %v (idle for %v  > %v)", replaced, node, idle, self.MaxIdleInterval))
 	replaced.Drop()
 	// actually replace in the row. When off(node) is called, the peer is no longer in the row
 	bucket[pos] = node
@@ -195,7 +194,7 @@ func (self *Kademlia) Off(node Node, cb func(*NodeRecord, Node)) (err error) {
 	}
 	record.node = nil
 	self.count--
-	glog.V(logger.Debug).Infof("remove node %v from table, population now is %v", node, self.count)
+	log.Debug(fmt.Sprintf("remove node %v from table, population now is %v", node, self.count))
 
 	return
 }
@@ -223,7 +222,7 @@ func (self *Kademlia) setProxLimit(r int, on bool) {
 			self.proxLimit++
 			curr = len(self.buckets[self.proxLimit])
 
-			glog.V(logger.Detail).Infof("proxbin contraction (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)
+			log.Trace(fmt.Sprintf("proxbin contraction (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r))
 		}
 		return
 	}
@@ -237,7 +236,7 @@ func (self *Kademlia) setProxLimit(r int, on bool) {
 		//
 		self.proxLimit--
 		self.proxSize += len(self.buckets[self.proxLimit])
-		glog.V(logger.Detail).Infof("proxbin expansion (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)
+		log.Trace(fmt.Sprintf("proxbin expansion (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r))
 	}
 }
 
@@ -257,7 +256,7 @@ func (self *Kademlia) FindClosest(target Address, max int) []Node {
 	po := self.proximityBin(target)
 	index := po
 	step := 1
-	glog.V(logger.Detail).Infof("serving %v nodes at %v (PO%02d)", max, index, po)
+	log.Trace(fmt.Sprintf("serving %v nodes at %v (PO%02d)", max, index, po))
 
 	// if max is set to 0, just want a full bucket, dynamic number
 	min := max
@@ -276,7 +275,7 @@ func (self *Kademlia) FindClosest(target Address, max int) []Node {
 			n++
 		}
 		// terminate if index reached the bottom or enough peers > min
-		glog.V(logger.Detail).Infof("add %v -> %v (PO%02d, PO%03d)", len(self.buckets[index]), n, index, po)
+		log.Trace(fmt.Sprintf("add %v -> %v (PO%02d, PO%03d)", len(self.buckets[index]), n, index, po))
 		if n >= min && (step < 0 || max == 0) {
 			break
 		}
@@ -287,7 +286,7 @@ func (self *Kademlia) FindClosest(target Address, max int) []Node {
 		}
 		index += step
 	}
-	glog.V(logger.Detail).Infof("serve %d (<=%d) nodes for target lookup %v (PO%03d)", n, max, target, po)
+	log.Trace(fmt.Sprintf("serve %d (<=%d) nodes for target lookup %v (PO%03d)", n, max, target, po))
 	return r.nodes
 }
 
diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go
index 763fb0b8e2e4aa1b4e04f5806417e3860d87cd7d..44787947c3a990d0f0b9ebe6d73b9bda1c8f33fc 100644
--- a/swarm/network/protocol.go
+++ b/swarm/network/protocol.go
@@ -38,8 +38,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/contracts/chequebook"
 	"github.com/ethereum/go-ethereum/errs"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap"
@@ -201,7 +200,7 @@ func run(requestDb *storage.LDBDatabase, depo StorageHandler, backend chequebook
 	// the main forever loop that handles incoming requests
 	for {
 		if self.hive.blockRead {
-			glog.V(logger.Warn).Infof("Cannot read network")
+			log.Warn(fmt.Sprintf("Cannot read network"))
 			time.Sleep(100 * time.Millisecond)
 			continue
 		}
@@ -221,7 +220,7 @@ func (self *bzz) Drop() {
 // one cycle of the main forever loop that handles and dispatches incoming messages
 func (self *bzz) handle() error {
 	msg, err := self.rw.ReadMsg()
-	glog.V(logger.Debug).Infof("<- %v", msg)
+	log.Debug(fmt.Sprintf("<- %v", msg))
 	if err != nil {
 		return err
 	}
@@ -236,7 +235,7 @@ func (self *bzz) handle() error {
 	case statusMsg:
 		// no extra status message allowed. The one needed already handled by
 		// handleStatus
-		glog.V(logger.Debug).Infof("Status message: %v", msg)
+		log.Debug(fmt.Sprintf("Status message: %v", msg))
 		return self.protoError(ErrExtraStatusMsg, "")
 
 	case storeRequestMsg:
@@ -250,7 +249,7 @@ func (self *bzz) handle() error {
 		}
 		// last Active time is set only when receiving chunks
 		self.lastActive = time.Now()
-		glog.V(logger.Detail).Infof("incoming store request: %s", req.String())
+		log.Trace(fmt.Sprintf("incoming store request: %s", req.String()))
 		// swap accounting is done within forwarding
 		self.storage.HandleStoreRequestMsg(&req, &peer{bzz: self})
 
@@ -263,7 +262,7 @@ func (self *bzz) handle() error {
 		req.from = &peer{bzz: self}
 		// if request is lookup and not to be delivered
 		if req.isLookup() {
-			glog.V(logger.Detail).Infof("self lookup for %v: responding with peers only...", req.from)
+			log.Trace(fmt.Sprintf("self lookup for %v: responding with peers only...", req.from))
 		} else if req.Key == nil {
 			return self.protoError(ErrDecode, "protocol handler: req.Key == nil || req.Timeout == nil")
 		} else {
@@ -281,7 +280,7 @@ func (self *bzz) handle() error {
 			return self.protoError(ErrDecode, "<- %v: %v", msg, err)
 		}
 		req.from = &peer{bzz: self}
-		glog.V(logger.Detail).Infof("<- peer addresses: %v", req)
+		log.Trace(fmt.Sprintf("<- peer addresses: %v", req))
 		self.hive.HandlePeersMsg(&req, &peer{bzz: self})
 
 	case syncRequestMsg:
@@ -289,7 +288,7 @@ func (self *bzz) handle() error {
 		if err := msg.Decode(&req); err != nil {
 			return self.protoError(ErrDecode, "<- %v: %v", msg, err)
 		}
-		glog.V(logger.Debug).Infof("<- sync request: %v", req)
+		log.Debug(fmt.Sprintf("<- sync request: %v", req))
 		self.lastActive = time.Now()
 		self.sync(req.SyncState)
 
@@ -299,7 +298,7 @@ func (self *bzz) handle() error {
 		if err := msg.Decode(&req); err != nil {
 			return self.protoError(ErrDecode, "<- %v: %v", msg, err)
 		}
-		glog.V(logger.Debug).Infof("<- unsynced keys : %s", req.String())
+		log.Debug(fmt.Sprintf("<- unsynced keys : %s", req.String()))
 		err := self.storage.HandleUnsyncedKeysMsg(&req, &peer{bzz: self})
 		self.lastActive = time.Now()
 		if err != nil {
@@ -313,7 +312,7 @@ func (self *bzz) handle() error {
 		if err := msg.Decode(&req); err != nil {
 			return self.protoError(ErrDecode, "<-msg %v: %v", msg, err)
 		}
-		glog.V(logger.Debug).Infof("<- delivery request: %s", req.String())
+		log.Debug(fmt.Sprintf("<- delivery request: %s", req.String()))
 		err := self.storage.HandleDeliveryRequestMsg(&req, &peer{bzz: self})
 		self.lastActive = time.Now()
 		if err != nil {
@@ -327,7 +326,7 @@ func (self *bzz) handle() error {
 			if err := msg.Decode(&req); err != nil {
 				return self.protoError(ErrDecode, "<- %v: %v", msg, err)
 			}
-			glog.V(logger.Debug).Infof("<- payment: %s", req.String())
+			log.Debug(fmt.Sprintf("<- payment: %s", req.String()))
 			self.swap.Receive(int(req.Units), req.Promise)
 		}
 
@@ -385,7 +384,7 @@ func (self *bzz) handleStatus() (err error) {
 	}
 
 	self.remoteAddr = self.peerAddr(status.Addr)
-	glog.V(logger.Detail).Infof("self: advertised IP: %v, peer advertised: %v, local address: %v\npeer: advertised IP: %v, remote address: %v\n", self.selfAddr(), self.remoteAddr, self.peer.LocalAddr(), status.Addr.IP, self.peer.RemoteAddr())
+	log.Trace(fmt.Sprintf("self: advertised IP: %v, peer advertised: %v, local address: %v\npeer: advertised IP: %v, remote address: %v\n", self.selfAddr(), self.remoteAddr, self.peer.LocalAddr(), status.Addr.IP, self.peer.RemoteAddr()))
 
 	if self.swapEnabled {
 		// set remote profile for accounting
@@ -395,14 +394,14 @@ func (self *bzz) handleStatus() (err error) {
 		}
 	}
 
-	glog.V(logger.Info).Infof("Peer %08x is capable (%d/%d)", self.remoteAddr.Addr[:4], status.Version, status.NetworkId)
+	log.Info(fmt.Sprintf("Peer %08x is capable (%d/%d)", self.remoteAddr.Addr[:4], status.Version, status.NetworkId))
 	err = self.hive.addPeer(&peer{bzz: self})
 	if err != nil {
 		return self.protoError(ErrUnwanted, "%v", err)
 	}
 
 	// hive sets syncstate so sync should start after node added
-	glog.V(logger.Info).Infof("syncronisation request sent with %v", self.syncState)
+	log.Info(fmt.Sprintf("syncronisation request sent with %v", self.syncState))
 	self.syncRequest()
 
 	return nil
@@ -421,7 +420,7 @@ func (self *bzz) sync(state *syncState) error {
 	// an explicitly received nil syncstate disables syncronisation
 	if state == nil {
 		self.syncEnabled = false
-		glog.V(logger.Warn).Infof("syncronisation disabled for peer %v", self)
+		log.Warn(fmt.Sprintf("syncronisation disabled for peer %v", self))
 		state = &syncState{DbSyncState: &storage.DbSyncState{}, Synced: true}
 	} else {
 		state.synced = make(chan bool)
@@ -430,7 +429,7 @@ func (self *bzz) sync(state *syncState) error {
 			state.Start = storage.Key(start[:])
 			state.Stop = storage.Key(stop[:])
 		}
-		glog.V(logger.Debug).Infof("syncronisation requested by peer %v at state %v", self, state)
+		log.Debug(fmt.Sprintf("syncronisation requested by peer %v at state %v", self, state))
 	}
 	var err error
 	self.syncer, err = newSyncer(
@@ -443,7 +442,7 @@ func (self *bzz) sync(state *syncState) error {
 	if err != nil {
 		return self.protoError(ErrSync, "%v", err)
 	}
-	glog.V(logger.Detail).Infof("syncer set for peer %v", self)
+	log.Trace(fmt.Sprintf("syncer set for peer %v", self))
 	return nil
 }
 
@@ -490,11 +489,11 @@ func (self *bzz) store(req *storeRequestMsgData) error {
 func (self *bzz) syncRequest() error {
 	req := &syncRequestMsgData{}
 	if self.hive.syncEnabled {
-		glog.V(logger.Debug).Infof("syncronisation request to peer %v at state %v", self, self.syncState)
+		log.Debug(fmt.Sprintf("syncronisation request to peer %v at state %v", self, self.syncState))
 		req.SyncState = self.syncState
 	}
 	if self.syncState == nil {
-		glog.V(logger.Warn).Infof("syncronisation disabled for peer %v at state %v", self, self.syncState)
+		log.Warn(fmt.Sprintf("syncronisation disabled for peer %v at state %v", self, self.syncState))
 	}
 	return self.send(syncRequestMsg, req)
 }
@@ -534,7 +533,7 @@ func (self *bzz) peers(req *peersMsgData) error {
 
 func (self *bzz) protoError(code int, format string, params ...interface{}) (err *errs.Error) {
 	err = self.errors.New(code, format, params...)
-	err.Log(glog.V(logger.Info))
+	log.Info(err.Error())
 	return
 }
 
@@ -542,7 +541,7 @@ func (self *bzz) send(msg uint64, data interface{}) error {
 	if self.hive.blockWrite {
 		return fmt.Errorf("network write blocked")
 	}
-	glog.V(logger.Detail).Infof("-> %v: %v (%T) to %v", msg, data, data, self)
+	log.Trace(fmt.Sprintf("-> %v: %v (%T) to %v", msg, data, data, self))
 	err := p2p.Send(self.rw, msg, data)
 	if err != nil {
 		self.Drop()
diff --git a/swarm/network/syncdb.go b/swarm/network/syncdb.go
index cef32610f6ffc1e7e91271275478960b788e2b35..7216da525e2e0340fb39102d4952222500ae2fe1 100644
--- a/swarm/network/syncdb.go
+++ b/swarm/network/syncdb.go
@@ -20,8 +20,7 @@ import (
 	"encoding/binary"
 	"fmt"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 	"github.com/syndtr/goleveldb/leveldb"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
@@ -80,7 +79,7 @@ func newSyncDb(db *storage.LDBDatabase, key storage.Key, priority uint, bufferSi
 		batch:       make(chan chan int),
 		dbBatchSize: dbBatchSize,
 	}
-	glog.V(logger.Detail).Infof("syncDb[peer: %v, priority: %v] - initialised", key.Log(), priority)
+	log.Trace(fmt.Sprintf("syncDb[peer: %v, priority: %v] - initialised", key.Log(), priority))
 
 	// starts the main forever loop reading from buffer
 	go syncdb.bufferRead(deliver)
@@ -126,9 +125,9 @@ func (self *syncDb) bufferRead(deliver func(interface{}, chan bool) bool) {
 	var counter uint64
 	if err == nil {
 		counter = binary.BigEndian.Uint64(data)
-		glog.V(logger.Detail).Infof("syncDb[%v/%v] - counter read from db at %v", self.key.Log(), self.priority, counter)
+		log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter read from db at %v", self.key.Log(), self.priority, counter))
 	} else {
-		glog.V(logger.Detail).Infof("syncDb[%v/%v] - counter starts at %v", self.key.Log(), self.priority, counter)
+		log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter starts at %v", self.key.Log(), self.priority, counter))
 	}
 
 LOOP:
@@ -142,7 +141,7 @@ LOOP:
 			// if syncdb is stopped. In this case we need to save the item to the db
 			more = deliver(req, self.quit)
 			if !more {
-				glog.V(logger.Debug).Infof("syncDb[%v/%v] quit: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)
+				log.Debug(fmt.Sprintf("syncDb[%v/%v] quit: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total))
 				// received quit signal, save request currently waiting delivery
 				// by switching to db mode and closing the buffer
 				buffer = nil
@@ -152,12 +151,12 @@ LOOP:
 				break      // break from select, this item will be written to the db
 			}
 			self.total++
-			glog.V(logger.Detail).Infof("syncDb[%v/%v] deliver (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)
+			log.Trace(fmt.Sprintf("syncDb[%v/%v] deliver (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total))
 			// by the time deliver returns, there were new writes to the buffer
 			// if buffer contention is detected, switch to db mode which drains
 			// the buffer so no process will block on pushing store requests
 			if len(buffer) == cap(buffer) {
-				glog.V(logger.Debug).Infof("syncDb[%v/%v] buffer full %v: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, cap(buffer), self.dbTotal, self.total)
+				log.Debug(fmt.Sprintf("syncDb[%v/%v] buffer full %v: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, cap(buffer), self.dbTotal, self.total))
 				buffer = nil
 				db = self.buffer
 			}
@@ -170,7 +169,7 @@ LOOP:
 				binary.BigEndian.PutUint64(counterValue, counter)
 				batch.Put(self.counterKey, counterValue) // persist counter in batch
 				self.writeSyncBatch(batch)               // save batch
-				glog.V(logger.Detail).Infof("syncDb[%v/%v] quitting: save current batch to db", self.key.Log(), self.priority)
+				log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save current batch to db", self.key.Log(), self.priority))
 				break LOOP
 			}
 			self.dbTotal++
@@ -181,7 +180,7 @@ LOOP:
 			if inBatch == 0 && quit != nil {
 				// there was no writes since the last batch so db depleted
 				// switch to buffer mode
-				glog.V(logger.Debug).Infof("syncDb[%v/%v] empty db: switching to buffer", self.key.Log(), self.priority)
+				log.Debug(fmt.Sprintf("syncDb[%v/%v] empty db: switching to buffer", self.key.Log(), self.priority))
 				db = nil
 				buffer = self.buffer
 				dbSize <- 0 // indicates to 'caller' that batch has been written
@@ -190,7 +189,7 @@ LOOP:
 			}
 			binary.BigEndian.PutUint64(counterValue, counter)
 			batch.Put(self.counterKey, counterValue)
-			glog.V(logger.Debug).Infof("syncDb[%v/%v] write batch %v/%v - %x - %x", self.key.Log(), self.priority, inBatch, counter, self.counterKey, counterValue)
+			log.Debug(fmt.Sprintf("syncDb[%v/%v] write batch %v/%v - %x - %x", self.key.Log(), self.priority, inBatch, counter, self.counterKey, counterValue))
 			batch = self.writeSyncBatch(batch)
 			dbSize <- inBatch // indicates to 'caller' that batch has been written
 			inBatch = 0
@@ -202,7 +201,7 @@ LOOP:
 			db = self.buffer
 			buffer = nil
 			quit = nil
-			glog.V(logger.Detail).Infof("syncDb[%v/%v] quitting: save buffer to db", self.key.Log(), self.priority)
+			log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save buffer to db", self.key.Log(), self.priority))
 			close(db)
 			continue LOOP
 		}
@@ -210,15 +209,15 @@ LOOP:
 		// only get here if we put req into db
 		entry, err = self.newSyncDbEntry(req, counter)
 		if err != nil {
-			glog.V(logger.Warn).Infof("syncDb[%v/%v] saving request %v (#%v/%v) failed: %v", self.key.Log(), self.priority, req, inBatch, inDb, err)
+			log.Warn(fmt.Sprintf("syncDb[%v/%v] saving request %v (#%v/%v) failed: %v", self.key.Log(), self.priority, req, inBatch, inDb, err))
 			continue LOOP
 		}
 		batch.Put(entry.key, entry.val)
-		glog.V(logger.Detail).Infof("syncDb[%v/%v] to batch %v '%v' (#%v/%v/%v)", self.key.Log(), self.priority, req, entry, inBatch, inDb, counter)
+		log.Trace(fmt.Sprintf("syncDb[%v/%v] to batch %v '%v' (#%v/%v/%v)", self.key.Log(), self.priority, req, entry, inBatch, inDb, counter))
 		// if just switched to db mode and not quitting, then launch dbRead
 		// in a parallel go routine to send deliveries from db
 		if inDb == 0 && quit != nil {
-			glog.V(logger.Detail).Infof("syncDb[%v/%v] start dbRead")
+			log.Trace(fmt.Sprintf("syncDb[%v/%v] start dbRead"))
 			go self.dbRead(true, counter, deliver)
 		}
 		inDb++
@@ -229,7 +228,7 @@ LOOP:
 			batch = self.writeSyncBatch(batch)
 		}
 	}
-	glog.V(logger.Info).Infof("syncDb[%v:%v]: saved %v keys (saved counter at %v)", self.key.Log(), self.priority, inBatch, counter)
+	log.Info(fmt.Sprintf("syncDb[%v:%v]: saved %v keys (saved counter at %v)", self.key.Log(), self.priority, inBatch, counter))
 	close(self.done)
 }
 
@@ -237,7 +236,7 @@ LOOP:
 func (self *syncDb) writeSyncBatch(batch *leveldb.Batch) *leveldb.Batch {
 	err := self.db.Write(batch)
 	if err != nil {
-		glog.V(logger.Warn).Infof("syncDb[%v/%v] saving batch to db failed: %v", self.key.Log(), self.priority, err)
+		log.Warn(fmt.Sprintf("syncDb[%v/%v] saving batch to db failed: %v", self.key.Log(), self.priority, err))
 		return batch
 	}
 	return new(leveldb.Batch)
@@ -311,7 +310,7 @@ func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}
 			continue
 		}
 		del = new(leveldb.Batch)
-		glog.V(logger.Detail).Infof("syncDb[%v/%v]: new iterator: %x (batch %v, count %v)", self.key.Log(), self.priority, key, batches, cnt)
+		log.Trace(fmt.Sprintf("syncDb[%v/%v]: new iterator: %x (batch %v, count %v)", self.key.Log(), self.priority, key, batches, cnt))
 
 		for n = 0; !useBatches || n < cnt; it.Next() {
 			copy(key, it.Key())
@@ -323,11 +322,11 @@ func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}
 			val := make([]byte, 40)
 			copy(val, it.Value())
 			entry = &syncDbEntry{key, val}
-			// glog.V(logger.Detail).Infof("syncDb[%v/%v] - %v, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, self.key.Log(), batches, total, self.dbTotal, self.total)
+			// log.Trace(fmt.Sprintf("syncDb[%v/%v] - %v, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, self.key.Log(), batches, total, self.dbTotal, self.total))
 			more = fun(entry, self.quit)
 			if !more {
 				// quit received when waiting to deliver entry, the entry will not be deleted
-				glog.V(logger.Detail).Infof("syncDb[%v/%v] batch %v quit after %v/%v items", self.key.Log(), self.priority, batches, n, cnt)
+				log.Trace(fmt.Sprintf("syncDb[%v/%v] batch %v quit after %v/%v items", self.key.Log(), self.priority, batches, n, cnt))
 				break
 			}
 			// since subsequent batches of the same db session are indexed incrementally
@@ -337,7 +336,7 @@ func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}
 			n++
 			total++
 		}
-		glog.V(logger.Debug).Infof("syncDb[%v/%v] - db session closed, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, batches, total, self.dbTotal, self.total)
+		log.Debug(fmt.Sprintf("syncDb[%v/%v] - db session closed, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, batches, total, self.dbTotal, self.total))
 		self.db.Write(del) // this could be async called only when db is idle
 		it.Release()
 	}
diff --git a/swarm/network/syncdb_test.go b/swarm/network/syncdb_test.go
index 21453a11029021dc631a8de751682491ff45942e..a9417e1d45a96873b963ee8be38cad3e1d818ee6 100644
--- a/swarm/network/syncdb_test.go
+++ b/swarm/network/syncdb_test.go
@@ -18,6 +18,7 @@ package network
 
 import (
 	"bytes"
+	"fmt"
 	"io/ioutil"
 	"os"
 	"path/filepath"
@@ -25,14 +26,12 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
 func init() {
-	glog.SetV(0)
-	glog.SetToStderr(true)
+	log.Root().SetHandler(log.LvlFilterHandler(log.LvlCrit, log.StreamHandler(os.Stderr, log.TerminalFormat())))
 }
 
 type testSyncDb struct {
@@ -83,7 +82,7 @@ func (self *testSyncDb) push(n int) {
 		self.sent = append(self.sent, self.c)
 		self.c++
 	}
-	glog.V(logger.Debug).Infof("pushed %v requests", n)
+	log.Debug(fmt.Sprintf("pushed %v requests", n))
 }
 
 func (self *testSyncDb) draindb() {
@@ -128,7 +127,7 @@ func (self *testSyncDb) expect(n int, db bool) {
 		}
 		if len(self.sent) > self.at && !bytes.Equal(crypto.Keccak256([]byte{byte(self.sent[self.at])}), self.delivered[self.at]) {
 			self.t.Fatalf("expected delivery %v/%v/%v to be hash of  %v, from db: %v = %v", i, n, self.at, self.sent[self.at], ok, db)
-			glog.V(logger.Debug).Infof("%v/%v/%v to be hash of  %v, from db: %v = %v", i, n, self.at, self.sent[self.at], ok, db)
+			log.Debug(fmt.Sprintf("%v/%v/%v to be hash of  %v, from db: %v = %v", i, n, self.at, self.sent[self.at], ok, db))
 		}
 		if !ok && db {
 			self.t.Fatalf("expected delivery %v/%v/%v from db", i, n, self.at)
diff --git a/swarm/network/syncer.go b/swarm/network/syncer.go
index b6b1ea3b607fa31af2921d801db40a68796f3f32..eb932e9277ec1654e4bede6508fe2ad061a6093c 100644
--- a/swarm/network/syncer.go
+++ b/swarm/network/syncer.go
@@ -22,8 +22,7 @@ import (
 	"fmt"
 	"path/filepath"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/storage"
 )
 
@@ -209,7 +208,7 @@ func newSyncer(
 		// initialise a syncdb instance for each priority queue
 		self.queues[i] = newSyncDb(db, remotekey, uint(i), syncBufferSize, dbBatchSize, self.deliver(uint(i)))
 	}
-	glog.V(logger.Info).Infof("syncer started: %v", state)
+	log.Info(fmt.Sprintf("syncer started: %v", state))
 	// launch chunk delivery service
 	go self.syncDeliveries()
 	// launch sync task manager
@@ -270,14 +269,14 @@ func (self *syncer) sync() {
 
 	// 0. first replay stale requests from request db
 	if state.SessionAt == 0 {
-		glog.V(logger.Debug).Infof("syncer[%v]: nothing to sync", self.key.Log())
+		log.Debug(fmt.Sprintf("syncer[%v]: nothing to sync", self.key.Log()))
 		return
 	}
-	glog.V(logger.Debug).Infof("syncer[%v]: start replaying stale requests from request db", self.key.Log())
+	log.Debug(fmt.Sprintf("syncer[%v]: start replaying stale requests from request db", self.key.Log()))
 	for p := priorities - 1; p >= 0; p-- {
 		self.queues[p].dbRead(false, 0, self.replay())
 	}
-	glog.V(logger.Debug).Infof("syncer[%v]: done replaying stale requests from request db", self.key.Log())
+	log.Debug(fmt.Sprintf("syncer[%v]: done replaying stale requests from request db", self.key.Log()))
 
 	// unless peer is synced sync unfinished history beginning on
 	if !state.Synced {
@@ -286,7 +285,7 @@ func (self *syncer) sync() {
 		if !storage.IsZeroKey(state.Latest) {
 			// 1. there is unfinished earlier sync
 			state.Start = state.Latest
-			glog.V(logger.Debug).Infof("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state)
+			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state))
 			// blocks while the entire history upto state is synced
 			self.syncState(state)
 			if state.Last < state.SessionAt {
@@ -298,7 +297,7 @@ func (self *syncer) sync() {
 		// 2. sync up to last disconnect1
 		if state.First < state.LastSeenAt {
 			state.Last = state.LastSeenAt
-			glog.V(logger.Debug).Infof("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state)
+			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state))
 			self.syncState(state)
 			state.First = state.LastSeenAt
 		}
@@ -313,11 +312,11 @@ func (self *syncer) sync() {
 	// if there have been new chunks since last session
 	if state.LastSeenAt < state.SessionAt {
 		state.Last = state.SessionAt
-		glog.V(logger.Debug).Infof("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state)
+		log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state))
 		// blocks until state syncing is finished
 		self.syncState(state)
 	}
-	glog.V(logger.Info).Infof("syncer[%v]: syncing all history complete", self.key.Log())
+	log.Info(fmt.Sprintf("syncer[%v]: syncing all history complete", self.key.Log()))
 
 }
 
@@ -333,7 +332,7 @@ func (self *syncer) syncState(state *syncState) {
 // stop quits both request processor and saves the request cache to disk
 func (self *syncer) stop() {
 	close(self.quit)
-	glog.V(logger.Detail).Infof("syncer[%v]: stop and save sync request db backlog", self.key.Log())
+	log.Trace(fmt.Sprintf("syncer[%v]: stop and save sync request db backlog", self.key.Log()))
 	for _, db := range self.queues {
 		db.stop()
 	}
@@ -366,7 +365,7 @@ func (self *syncer) newSyncRequest(req interface{}, p int) (*syncRequest, error)
 func (self *syncer) syncHistory(state *syncState) chan interface{} {
 	var n uint
 	history := make(chan interface{})
-	glog.V(logger.Debug).Infof("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop)
+	log.Debug(fmt.Sprintf("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop))
 	it := self.dbAccess.iterator(state)
 	if it != nil {
 		go func() {
@@ -382,13 +381,13 @@ func (self *syncer) syncHistory(state *syncState) chan interface{} {
 				// blocking until history channel is read from
 				case history <- storage.Key(key):
 					n++
-					glog.V(logger.Detail).Infof("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n)
+					log.Trace(fmt.Sprintf("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n))
 					state.Latest = key
 				case <-self.quit:
 					return
 				}
 			}
-			glog.V(logger.Debug).Infof("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n)
+			log.Debug(fmt.Sprintf("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n))
 		}()
 	}
 	return history
@@ -438,14 +437,14 @@ LOOP:
 			for priority = High; priority >= 0; priority-- {
 				// the first priority channel that is non-empty will be assigned to keys
 				if len(self.keys[priority]) > 0 {
-					glog.V(logger.Detail).Infof("syncer[%v]: reading request with	priority %v", self.key.Log(), priority)
+					log.Trace(fmt.Sprintf("syncer[%v]: reading request with	priority %v", self.key.Log(), priority))
 					keys = self.keys[priority]
 					break PRIORITIES
 				}
-				glog.V(logger.Detail).Infof("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low]))
+				log.Trace(fmt.Sprintf("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low])))
 				// if the input queue is empty on this level, resort to history if there is any
 				if uint(priority) == histPrior && history != nil {
-					glog.V(logger.Detail).Infof("syncer[%v]: reading history for %v", self.key.Log(), self.key)
+					log.Trace(fmt.Sprintf("syncer[%v]: reading history for %v", self.key.Log(), self.key))
 					keys = history
 					break PRIORITIES
 				}
@@ -455,7 +454,7 @@ LOOP:
 		// if peer ready to receive but nothing to send
 		if keys == nil && deliveryRequest == nil {
 			// if no items left and switch to waiting mode
-			glog.V(logger.Detail).Infof("syncer[%v]: buffers consumed. Waiting", self.key.Log())
+			log.Trace(fmt.Sprintf("syncer[%v]: buffers consumed. Waiting", self.key.Log()))
 			newUnsyncedKeys = self.newUnsyncedKeys
 		}
 
@@ -476,15 +475,15 @@ LOOP:
 			// (all nonhistorical outgoing traffic sheduled and persisted
 			state.LastSeenAt = self.dbAccess.counter()
 			state.Latest = storage.ZeroKey
-			glog.V(logger.Detail).Infof("syncer[%v]: sending %v", self.key.Log(), unsynced)
+			log.Trace(fmt.Sprintf("syncer[%v]: sending %v", self.key.Log(), unsynced))
 			//  send the unsynced keys
 			stateCopy := *state
 			err := self.unsyncedKeys(unsynced, &stateCopy)
 			if err != nil {
-				glog.V(logger.Warn).Infof("syncer[%v]: unable to send unsynced keys: %v", err)
+				log.Warn(fmt.Sprintf("syncer[%v]: unable to send unsynced keys: %v", err))
 			}
 			self.state = state
-			glog.V(logger.Debug).Infof("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy)
+			log.Debug(fmt.Sprintf("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy))
 			unsynced = nil
 			keys = nil
 		}
@@ -495,7 +494,7 @@ LOOP:
 			break LOOP
 		case req, more = <-keys:
 			if keys == history && !more {
-				glog.V(logger.Detail).Infof("syncer[%v]: syncing history segment complete", self.key.Log())
+				log.Trace(fmt.Sprintf("syncer[%v]: syncing history segment complete", self.key.Log()))
 				// history channel is closed, waiting for new state (called from sync())
 				syncStates = self.syncStates
 				state.Synced = true // this signals that the  current segment is complete
@@ -508,7 +507,7 @@ LOOP:
 				history = nil
 			}
 		case <-deliveryRequest:
-			glog.V(logger.Detail).Infof("syncer[%v]: peer ready to receive", self.key.Log())
+			log.Trace(fmt.Sprintf("syncer[%v]: peer ready to receive", self.key.Log()))
 
 			// this 1 cap channel can wake up the loop
 			// signaling that peer is ready to receive unsynced Keys
@@ -516,7 +515,7 @@ LOOP:
 			deliveryRequest = nil
 
 		case <-newUnsyncedKeys:
-			glog.V(logger.Detail).Infof("syncer[%v]: new unsynced keys available", self.key.Log())
+			log.Trace(fmt.Sprintf("syncer[%v]: new unsynced keys available", self.key.Log()))
 			// this 1 cap channel can wake up the loop
 			// signals that data is available to send if peer is ready to receive
 			newUnsyncedKeys = nil
@@ -526,11 +525,11 @@ LOOP:
 			// this resets the state
 			if !more {
 				state = self.state
-				glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state)
+				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state))
 				state.Synced = true
 				syncStates = nil
 			} else {
-				glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior)
+				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior))
 				state.Synced = false
 				history = self.syncHistory(state)
 				// only one history at a time, only allow another one once the
@@ -542,19 +541,19 @@ LOOP:
 			continue LOOP
 		}
 
-		glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req)
+		log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req))
 		keyCounts[priority]++
 		keyCount++
 		if keys == history {
-			glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced)
+			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
 			historyCnt++
 		}
 		if sreq, err := self.newSyncRequest(req, priority); err == nil {
 			// extract key from req
-			glog.V(logger.Detail).Infof("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced)
+			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
 			unsynced = append(unsynced, sreq)
 		} else {
-			glog.V(logger.Warn).Infof("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, state.Synced, err)
+			log.Warn(fmt.Sprintf("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, state.Synced, err))
 		}
 
 	}
@@ -601,18 +600,18 @@ func (self *syncer) syncDeliveries() {
 		total++
 		msg, err = self.newStoreRequestMsgData(req)
 		if err != nil {
-			glog.V(logger.Warn).Infof("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err)
+			log.Warn(fmt.Sprintf("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err))
 		} else {
 			err = self.store(msg)
 			if err != nil {
-				glog.V(logger.Warn).Infof("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err)
+				log.Warn(fmt.Sprintf("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err))
 			} else {
 				success++
-				glog.V(logger.Detail).Infof("syncer[%v]: %v successfully delivered", self.key.Log(), req)
+				log.Trace(fmt.Sprintf("syncer[%v]: %v successfully delivered", self.key.Log(), req))
 			}
 		}
 		if total%self.SyncBatchSize == 0 {
-			glog.V(logger.Debug).Infof("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low])
+			log.Debug(fmt.Sprintf("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low]))
 		}
 	}
 }
@@ -679,7 +678,7 @@ func (self *syncer) addDelivery(req interface{}, priority uint, quit chan bool)
 func (self *syncer) doDelivery(req interface{}, priority uint, quit chan bool) bool {
 	msgdata, err := self.newStoreRequestMsgData(req)
 	if err != nil {
-		glog.V(logger.Warn).Infof("unable to deliver request %v: %v", msgdata, err)
+		log.Warn(fmt.Sprintf("unable to deliver request %v: %v", msgdata, err))
 		return false
 	}
 	select {
diff --git a/swarm/services/swap/swap.go b/swarm/services/swap/swap.go
index f72036d720f8ea62632fa866a50873abc0c21a3c..eb21a598d054debb4d4fe2b2b85f74ceaa0c69a0 100644
--- a/swarm/services/swap/swap.go
+++ b/swarm/services/swap/swap.go
@@ -31,8 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
 	"golang.org/x/net/context"
 )
@@ -132,19 +131,19 @@ func NewSwap(local *SwapParams, remote *SwapProfile, backend chequebook.Backend,
 	// TODO: monitoring a chequebooks events
 	ok, err = chequebook.ValidateCode(ctx, backend, remote.Contract)
 	if !ok {
-		glog.V(logger.Info).Infof("invalid contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err)
+		log.Info(fmt.Sprintf("invalid contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err))
 	} else {
 		// remote contract valid, create inbox
 		in, err = chequebook.NewInbox(local.privateKey, remote.Contract, local.Beneficiary, crypto.ToECDSAPub(common.FromHex(remote.PublicKey)), backend)
 		if err != nil {
-			glog.V(logger.Warn).Infof("unable to set up inbox for chequebook contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err)
+			log.Warn(fmt.Sprintf("unable to set up inbox for chequebook contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err))
 		}
 	}
 
 	// check if local chequebook contract is valid
 	ok, err = chequebook.ValidateCode(ctx, backend, local.Contract)
 	if !ok {
-		glog.V(logger.Warn).Infof("unable to set up outbox for peer %v:  chequebook contract (owner: %v): %v)", proto, local.owner.Hex(), err)
+		log.Warn(fmt.Sprintf("unable to set up outbox for peer %v:  chequebook contract (owner: %v): %v)", proto, local.owner.Hex(), err))
 	} else {
 		out = chequebook.NewOutbox(local.Chequebook(), remote.Beneficiary)
 	}
@@ -172,7 +171,7 @@ func NewSwap(local *SwapParams, remote *SwapProfile, backend chequebook.Backend,
 	} else {
 		sell = "selling to peer disabled"
 	}
-	glog.V(logger.Warn).Infof("SWAP arrangement with <%v>: %v; %v)", proto, buy, sell)
+	log.Warn(fmt.Sprintf("SWAP arrangement with <%v>: %v; %v)", proto, buy, sell))
 
 	return
 }
@@ -217,13 +216,13 @@ func (self *SwapParams) deployChequebook(ctx context.Context, backend chequebook
 	opts.Value = self.AutoDepositBuffer
 	opts.Context = ctx
 
-	glog.V(logger.Info).Infof("Deploying new chequebook (owner: %v)", opts.From.Hex())
+	log.Info(fmt.Sprintf("Deploying new chequebook (owner: %v)", opts.From.Hex()))
 	contract, err := deployChequebookLoop(opts, backend)
 	if err != nil {
-		glog.V(logger.Error).Infof("unable to deploy new chequebook: %v", err)
+		log.Error(fmt.Sprintf("unable to deploy new chequebook: %v", err))
 		return err
 	}
-	glog.V(logger.Info).Infof("new chequebook deployed at %v (owner: %v)", contract.Hex(), opts.From.Hex())
+	log.Info(fmt.Sprintf("new chequebook deployed at %v (owner: %v)", contract.Hex(), opts.From.Hex()))
 
 	// need to save config at this point
 	self.lock.Lock()
@@ -231,7 +230,7 @@ func (self *SwapParams) deployChequebook(ctx context.Context, backend chequebook
 	err = self.newChequebookFromContract(path, backend)
 	self.lock.Unlock()
 	if err != nil {
-		glog.V(logger.Warn).Infof("error initialising cheque book (owner: %v): %v", opts.From.Hex(), err)
+		log.Warn(fmt.Sprintf("error initialising cheque book (owner: %v): %v", opts.From.Hex(), err))
 	}
 	return err
 }
@@ -244,11 +243,11 @@ func deployChequebookLoop(opts *bind.TransactOpts, backend chequebook.Backend) (
 			time.Sleep(chequebookDeployDelay)
 		}
 		if _, tx, _, err = contract.DeployChequebook(opts, backend); err != nil {
-			glog.V(logger.Warn).Infof("can't send chequebook deploy tx (try %d): %v", try, err)
+			log.Warn(fmt.Sprintf("can't send chequebook deploy tx (try %d): %v", try, err))
 			continue
 		}
 		if addr, err = bind.WaitDeployed(opts.Context, backend, tx); err != nil {
-			glog.V(logger.Warn).Infof("chequebook deploy error (try %d): %v", try, err)
+			log.Warn(fmt.Sprintf("chequebook deploy error (try %d): %v", try, err))
 			continue
 		}
 		return addr, nil
@@ -271,13 +270,13 @@ func (self *SwapParams) newChequebookFromContract(path string, backend chequeboo
 	if err != nil {
 		self.chbook, err = chequebook.NewChequebook(chbookpath, self.Contract, self.privateKey, backend)
 		if err != nil {
-			glog.V(logger.Warn).Infof("unable to initialise chequebook (owner: %v): %v", self.owner.Hex(), err)
+			log.Warn(fmt.Sprintf("unable to initialise chequebook (owner: %v): %v", self.owner.Hex(), err))
 			return fmt.Errorf("unable to initialise chequebook (owner: %v): %v", self.owner.Hex(), err)
 		}
 	}
 
 	self.chbook.AutoDeposit(self.AutoDepositInterval, self.AutoDepositThreshold, self.AutoDepositBuffer)
-	glog.V(logger.Info).Infof("auto deposit ON for %v -> %v: interval = %v, threshold = %v, buffer = %v)", crypto.PubkeyToAddress(*(self.publicKey)).Hex()[:8], self.Contract.Hex()[:8], self.AutoDepositInterval, self.AutoDepositThreshold, self.AutoDepositBuffer)
+	log.Info(fmt.Sprintf("auto deposit ON for %v -> %v: interval = %v, threshold = %v, buffer = %v)", crypto.PubkeyToAddress(*(self.publicKey)).Hex()[:8], self.Contract.Hex()[:8], self.AutoDepositInterval, self.AutoDepositThreshold, self.AutoDepositBuffer))
 
 	return nil
 }
diff --git a/swarm/services/swap/swap/swap.go b/swarm/services/swap/swap/swap.go
index 9d5da7c3a7ee66c1fe7c1de837c617b34c241540..d04194960f2d3d59a2252f884638d5378b21d58c 100644
--- a/swarm/services/swap/swap/swap.go
+++ b/swarm/services/swap/swap/swap.go
@@ -23,8 +23,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 // SwAP Swarm Accounting Protocol with
@@ -130,7 +129,7 @@ func (self *Swap) SetRemote(remote *Profile) {
 		self.Buys = false
 	}
 
-	glog.V(logger.Debug).Infof("<%v> remote profile set: pay at: %v, drop at: %v, buy at: %v, sell at: %v", self.proto, remote.PayAt, remote.DropAt, remote.BuyAt, remote.SellAt)
+	log.Debug(fmt.Sprintf("<%v> remote profile set: pay at: %v, drop at: %v, buy at: %v, sell at: %v", self.proto, remote.PayAt, remote.DropAt, remote.BuyAt, remote.SellAt))
 
 }
 
@@ -148,15 +147,15 @@ func (self *Swap) setParams(local *Params) {
 
 	if self.Sells {
 		self.In.AutoCash(local.AutoCashInterval, local.AutoCashThreshold)
-		glog.V(logger.Info).Infof("<%v> set autocash to every %v, max uncashed limit: %v", self.proto, local.AutoCashInterval, local.AutoCashThreshold)
+		log.Info(fmt.Sprintf("<%v> set autocash to every %v, max uncashed limit: %v", self.proto, local.AutoCashInterval, local.AutoCashThreshold))
 	} else {
-		glog.V(logger.Info).Infof("<%v> autocash off (not selling)", self.proto)
+		log.Info(fmt.Sprintf("<%v> autocash off (not selling)", self.proto))
 	}
 	if self.Buys {
 		self.Out.AutoDeposit(local.AutoDepositInterval, local.AutoDepositThreshold, local.AutoDepositBuffer)
-		glog.V(logger.Info).Infof("<%v> set autodeposit to every %v, pay at: %v, buffer: %v", self.proto, local.AutoDepositInterval, local.AutoDepositThreshold, local.AutoDepositBuffer)
+		log.Info(fmt.Sprintf("<%v> set autodeposit to every %v, pay at: %v, buffer: %v", self.proto, local.AutoDepositInterval, local.AutoDepositThreshold, local.AutoDepositBuffer))
 	} else {
-		glog.V(logger.Info).Infof("<%v> autodeposit off (not buying)", self.proto)
+		log.Info(fmt.Sprintf("<%v> autodeposit off (not buying)", self.proto))
 	}
 }
 
@@ -168,16 +167,16 @@ func (self *Swap) Add(n int) error {
 	self.lock.Lock()
 	self.balance += n
 	if !self.Sells && self.balance > 0 {
-		glog.V(logger.Detail).Infof("<%v> remote peer cannot have debt (balance: %v)", self.proto, self.balance)
+		log.Trace(fmt.Sprintf("<%v> remote peer cannot have debt (balance: %v)", self.proto, self.balance))
 		self.proto.Drop()
 		return fmt.Errorf("[SWAP] <%v> remote peer cannot have debt (balance: %v)", self.proto, self.balance)
 	}
 	if !self.Buys && self.balance < 0 {
-		glog.V(logger.Detail).Infof("<%v> we cannot have debt (balance: %v)", self.proto, self.balance)
+		log.Trace(fmt.Sprintf("<%v> we cannot have debt (balance: %v)", self.proto, self.balance))
 		return fmt.Errorf("[SWAP] <%v> we cannot have debt (balance: %v)", self.proto, self.balance)
 	}
 	if self.balance >= int(self.local.DropAt) {
-		glog.V(logger.Detail).Infof("<%v> remote peer has too much debt (balance: %v, disconnect threshold: %v)", self.proto, self.balance, self.local.DropAt)
+		log.Trace(fmt.Sprintf("<%v> remote peer has too much debt (balance: %v, disconnect threshold: %v)", self.proto, self.balance, self.local.DropAt))
 		self.proto.Drop()
 		return fmt.Errorf("[SWAP] <%v> remote peer has too much debt (balance: %v, disconnect threshold: %v)", self.proto, self.balance, self.local.DropAt)
 	} else if self.balance <= -int(self.remote.PayAt) {
@@ -201,9 +200,9 @@ func (self *Swap) send() {
 		amount.Mul(amount, self.remote.SellAt)
 		promise, err := self.Out.Issue(amount)
 		if err != nil {
-			glog.V(logger.Warn).Infof("<%v> cannot issue cheque (amount: %v, channel: %v): %v", self.proto, amount, self.Out, err)
+			log.Warn(fmt.Sprintf("<%v> cannot issue cheque (amount: %v, channel: %v): %v", self.proto, amount, self.Out, err))
 		} else {
-			glog.V(logger.Warn).Infof("<%v> cheque issued (amount: %v, channel: %v)", self.proto, amount, self.Out)
+			log.Warn(fmt.Sprintf("<%v> cheque issued (amount: %v, channel: %v)", self.proto, amount, self.Out))
 			self.proto.Pay(-self.balance, promise)
 			self.balance = 0
 		}
@@ -229,13 +228,13 @@ func (self *Swap) Receive(units int, promise Promise) error {
 		return fmt.Errorf("invalid amount: %v = %v * %v (units sent in msg * agreed sale unit price) != %v (signed in cheque)", price, units, self.local.SellAt, amount)
 	}
 	if err != nil {
-		glog.V(logger.Detail).Infof("<%v> invalid promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, err)
+		log.Trace(fmt.Sprintf("<%v> invalid promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, err))
 		return err
 	}
 
 	// credit remote peer with units
 	self.Add(-units)
-	glog.V(logger.Detail).Infof("<%v> received promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, promise)
+	log.Trace(fmt.Sprintf("<%v> received promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, promise))
 
 	return nil
 }
diff --git a/swarm/storage/common_test.go b/swarm/storage/common_test.go
index 2a83f471d30d6394e9775bc80c10c2f2cd023248..44d1dd1f72a223eba95b141ddb1dcf04015d5eb2 100644
--- a/swarm/storage/common_test.go
+++ b/swarm/storage/common_test.go
@@ -24,8 +24,7 @@ import (
 	"sync"
 	"testing"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 type brokenLimitedReader struct {
@@ -92,14 +91,14 @@ func testStore(m ChunkStore, l int64, branches int64, t *testing.T) {
 			go func(chunk *Chunk) {
 				storedChunk, err := m.Get(chunk.Key)
 				if err == notFound {
-					glog.V(logger.Detail).Infof("chunk '%v' not found", chunk.Key.Log())
+					log.Trace(fmt.Sprintf("chunk '%v' not found", chunk.Key.Log()))
 				} else if err != nil {
-					glog.V(logger.Detail).Infof("error retrieving chunk %v: %v", chunk.Key.Log(), err)
+					log.Trace(fmt.Sprintf("error retrieving chunk %v: %v", chunk.Key.Log(), err))
 				} else {
 					chunk.SData = storedChunk.SData
 					chunk.Size = storedChunk.Size
 				}
-				glog.V(logger.Detail).Infof("chunk '%v' not found", chunk.Key.Log())
+				log.Trace(fmt.Sprintf("chunk '%v' not found", chunk.Key.Log()))
 				close(chunk.C)
 			}(ch)
 		}
diff --git a/swarm/storage/dbstore.go b/swarm/storage/dbstore.go
index e320cd32795bdbb3afafc684b179112d46299ad4..30925a919ff46a4b3c7b7aa3d00bb6605d2ccf17 100644
--- a/swarm/storage/dbstore.go
+++ b/swarm/storage/dbstore.go
@@ -28,8 +28,7 @@ import (
 	"fmt"
 	"sync"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/syndtr/goleveldb/leveldb"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
@@ -279,7 +278,7 @@ func (s *DbStore) Cleanup() {
 
 		data, err := s.db.Get(getDataKey(index.Idx))
 		if err != nil {
-			glog.V(logger.Warn).Infof("Chunk %x found but could not be accessed: %v", key[:], err)
+			log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err))
 			s.delete(index.Idx, getIndexKey(key[1:]))
 			errorsFound++
 		} else {
@@ -287,7 +286,7 @@ func (s *DbStore) Cleanup() {
 			hasher.Write(data)
 			hash := hasher.Sum(nil)
 			if !bytes.Equal(hash, key[1:]) {
-				glog.V(logger.Warn).Infof("Found invalid chunk. Hash mismatch. hash=%x, key=%x", hash, key[:])
+				log.Warn(fmt.Sprintf("Found invalid chunk. Hash mismatch. hash=%x, key=%x", hash, key[:]))
 				s.delete(index.Idx, getIndexKey(key[1:]))
 				errorsFound++
 			}
@@ -295,7 +294,7 @@ func (s *DbStore) Cleanup() {
 		it.Next()
 	}
 	it.Release()
-	glog.V(logger.Warn).Infof("Found %v errors out of %v entries", errorsFound, total)
+	log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total))
 }
 
 func (s *DbStore) delete(idx uint64, idxKey []byte) {
@@ -324,7 +323,7 @@ func (s *DbStore) Put(chunk *Chunk) {
 		if chunk.dbStored != nil {
 			close(chunk.dbStored)
 		}
-		glog.V(logger.Detail).Infof("Storing to DB: chunk already exists, only update access")
+		log.Trace(fmt.Sprintf("Storing to DB: chunk already exists, only update access"))
 		return // already exists, only update access
 	}
 
@@ -356,7 +355,7 @@ func (s *DbStore) Put(chunk *Chunk) {
 	if chunk.dbStored != nil {
 		close(chunk.dbStored)
 	}
-	glog.V(logger.Detail).Infof("DbStore.Put: %v. db storage counter: %v ", chunk.Key.Log(), s.dataIdx)
+	log.Trace(fmt.Sprintf("DbStore.Put: %v. db storage counter: %v ", chunk.Key.Log(), s.dataIdx))
 }
 
 // try to find index; if found, update access cnt and return true
@@ -390,7 +389,7 @@ func (s *DbStore) Get(key Key) (chunk *Chunk, err error) {
 		var data []byte
 		data, err = s.db.Get(getDataKey(index.Idx))
 		if err != nil {
-			glog.V(logger.Detail).Infof("DBStore: Chunk %v found but could not be accessed: %v", key.Log(), err)
+			log.Trace(fmt.Sprintf("DBStore: Chunk %v found but could not be accessed: %v", key.Log(), err))
 			s.delete(index.Idx, getIndexKey(key))
 			return
 		}
diff --git a/swarm/storage/dpa.go b/swarm/storage/dpa.go
index 7b3e23cac2f64c7012e8438b76268fc63e2ff72d..e16e4aacb475982f0c0e7a4fd92020297d512ce2 100644
--- a/swarm/storage/dpa.go
+++ b/swarm/storage/dpa.go
@@ -18,12 +18,12 @@ package storage
 
 import (
 	"errors"
+	"fmt"
 	"io"
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 /*
@@ -131,17 +131,17 @@ func (self *DPA) retrieveLoop() {
 	for i := 0; i < maxRetrieveProcesses; i++ {
 		go self.retrieveWorker()
 	}
-	glog.V(logger.Detail).Infof("dpa: retrieve loop spawning %v workers", maxRetrieveProcesses)
+	log.Trace(fmt.Sprintf("dpa: retrieve loop spawning %v workers", maxRetrieveProcesses))
 }
 
 func (self *DPA) retrieveWorker() {
 	for chunk := range self.retrieveC {
-		glog.V(logger.Detail).Infof("dpa: retrieve loop : chunk %v", chunk.Key.Log())
+		log.Trace(fmt.Sprintf("dpa: retrieve loop : chunk %v", chunk.Key.Log()))
 		storedChunk, err := self.Get(chunk.Key)
 		if err == notFound {
-			glog.V(logger.Detail).Infof("chunk %v not found", chunk.Key.Log())
+			log.Trace(fmt.Sprintf("chunk %v not found", chunk.Key.Log()))
 		} else if err != nil {
-			glog.V(logger.Detail).Infof("error retrieving chunk %v: %v", chunk.Key.Log(), err)
+			log.Trace(fmt.Sprintf("error retrieving chunk %v: %v", chunk.Key.Log(), err))
 		} else {
 			chunk.SData = storedChunk.SData
 			chunk.Size = storedChunk.Size
@@ -162,7 +162,7 @@ func (self *DPA) storeLoop() {
 	for i := 0; i < maxStoreProcesses; i++ {
 		go self.storeWorker()
 	}
-	glog.V(logger.Detail).Infof("dpa: store spawning %v workers", maxStoreProcesses)
+	log.Trace(fmt.Sprintf("dpa: store spawning %v workers", maxStoreProcesses))
 }
 
 func (self *DPA) storeWorker() {
@@ -170,7 +170,7 @@ func (self *DPA) storeWorker() {
 	for chunk := range self.storeC {
 		self.Put(chunk)
 		if chunk.wg != nil {
-			glog.V(logger.Detail).Infof("dpa: store processor %v", chunk.Key.Log())
+			log.Trace(fmt.Sprintf("dpa: store processor %v", chunk.Key.Log()))
 			chunk.wg.Done()
 
 		}
@@ -203,17 +203,17 @@ func (self *dpaChunkStore) Get(key Key) (chunk *Chunk, err error) {
 	chunk, err = self.netStore.Get(key)
 	// timeout := time.Now().Add(searchTimeout)
 	if chunk.SData != nil {
-		glog.V(logger.Detail).Infof("DPA.Get: %v found locally, %d bytes", key.Log(), len(chunk.SData))
+		log.Trace(fmt.Sprintf("DPA.Get: %v found locally, %d bytes", key.Log(), len(chunk.SData)))
 		return
 	}
 	// TODO: use self.timer time.Timer and reset with defer disableTimer
 	timer := time.After(searchTimeout)
 	select {
 	case <-timer:
-		glog.V(logger.Detail).Infof("DPA.Get: %v request time out ", key.Log())
+		log.Trace(fmt.Sprintf("DPA.Get: %v request time out ", key.Log()))
 		err = notFound
 	case <-chunk.Req.C:
-		glog.V(logger.Detail).Infof("DPA.Get: %v retrieved, %d bytes (%p)", key.Log(), len(chunk.SData), chunk)
+		log.Trace(fmt.Sprintf("DPA.Get: %v retrieved, %d bytes (%p)", key.Log(), len(chunk.SData), chunk))
 	}
 	return
 }
@@ -222,18 +222,18 @@ func (self *dpaChunkStore) Get(key Key) (chunk *Chunk, err error) {
 func (self *dpaChunkStore) Put(entry *Chunk) {
 	chunk, err := self.localStore.Get(entry.Key)
 	if err != nil {
-		glog.V(logger.Detail).Infof("DPA.Put: %v new chunk. call netStore.Put", entry.Key.Log())
+		log.Trace(fmt.Sprintf("DPA.Put: %v new chunk. call netStore.Put", entry.Key.Log()))
 		chunk = entry
 	} else if chunk.SData == nil {
-		glog.V(logger.Detail).Infof("DPA.Put: %v request entry found", entry.Key.Log())
+		log.Trace(fmt.Sprintf("DPA.Put: %v request entry found", entry.Key.Log()))
 		chunk.SData = entry.SData
 		chunk.Size = entry.Size
 	} else {
-		glog.V(logger.Detail).Infof("DPA.Put: %v chunk already known", entry.Key.Log())
+		log.Trace(fmt.Sprintf("DPA.Put: %v chunk already known", entry.Key.Log()))
 		return
 	}
 	// from this point on the storage logic is the same with network storage requests
-	glog.V(logger.Detail).Infof("DPA.Put %v: %v", self.n, chunk.Key.Log())
+	log.Trace(fmt.Sprintf("DPA.Put %v: %v", self.n, chunk.Key.Log()))
 	self.n++
 	self.netStore.Put(chunk)
 }
diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go
index 7903d33e7078ec92fd4b8ec7994bf6accb244ba9..f96792c6ea20c7c46bbc488e5b2ed17813971d25 100644
--- a/swarm/storage/memstore.go
+++ b/swarm/storage/memstore.go
@@ -19,10 +19,10 @@
 package storage
 
 import (
+	"fmt"
 	"sync"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 const (
@@ -287,11 +287,11 @@ func (s *MemStore) removeOldest() {
 	}
 
 	if node.entry.dbStored != nil {
-		glog.V(logger.Detail).Infof("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log())
+		log.Trace(fmt.Sprintf("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log()))
 		<-node.entry.dbStored
-		glog.V(logger.Detail).Infof("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log())
+		log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log()))
 	} else {
-		glog.V(logger.Detail).Infof("Memstore Clean: Chunk %v already in DB. Ready to delete.", node.entry.Key.Log())
+		log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v already in DB. Ready to delete.", node.entry.Key.Log()))
 	}
 
 	if node.entry.SData != nil {
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index 46479b58a51d560b303579e040363e0888939332..7c0436c3fd02e223e37da20321e55259db781926 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -17,12 +17,12 @@
 package storage
 
 import (
+	"fmt"
 	"path/filepath"
 	"sync"
 	"time"
 
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 /*
@@ -98,14 +98,14 @@ func (self *NetStore) Put(entry *Chunk) {
 
 	// handle deliveries
 	if entry.Req != nil {
-		glog.V(logger.Detail).Infof("NetStore.Put: localStore.Put %v hit existing request...delivering", entry.Key.Log())
+		log.Trace(fmt.Sprintf("NetStore.Put: localStore.Put %v hit existing request...delivering", entry.Key.Log()))
 		// closing C signals to other routines (local requests)
 		// that the chunk is has been retrieved
 		close(entry.Req.C)
 		// deliver the chunk to requesters upstream
 		go self.cloud.Deliver(entry)
 	} else {
-		glog.V(logger.Detail).Infof("NetStore.Put: localStore.Put %v stored locally", entry.Key.Log())
+		log.Trace(fmt.Sprintf("NetStore.Put: localStore.Put %v stored locally", entry.Key.Log()))
 		// handle propagating store requests
 		// go self.cloud.Store(entry)
 		go self.cloud.Store(entry)
@@ -118,15 +118,15 @@ func (self *NetStore) Get(key Key) (*Chunk, error) {
 	chunk, err := self.localStore.Get(key)
 	if err == nil {
 		if chunk.Req == nil {
-			glog.V(logger.Detail).Infof("NetStore.Get: %v found locally", key)
+			log.Trace(fmt.Sprintf("NetStore.Get: %v found locally", key))
 		} else {
-			glog.V(logger.Detail).Infof("NetStore.Get: %v hit on an existing request", key)
+			log.Trace(fmt.Sprintf("NetStore.Get: %v hit on an existing request", key))
 			// no need to launch again
 		}
 		return chunk, err
 	}
 	// no data and no request status
-	glog.V(logger.Detail).Infof("NetStore.Get: %v not found locally. open new request", key)
+	log.Trace(fmt.Sprintf("NetStore.Get: %v not found locally. open new request", key))
 	chunk = NewChunk(key, newRequestStatus(key))
 	self.localStore.memStore.Put(chunk)
 	go self.cloud.Retrieve(chunk)
diff --git a/swarm/swarm.go b/swarm/swarm.go
index eab01f036cf66f78761a532d0d3e511b77626641..b2894c6e7612f6628b84744a2f890ce75c1d173e 100644
--- a/swarm/swarm.go
+++ b/swarm/swarm.go
@@ -26,8 +26,7 @@ import (
 	"github.com/ethereum/go-ethereum/contracts/chequebook"
 	"github.com/ethereum/go-ethereum/contracts/ens"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/discover"
@@ -88,7 +87,7 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, config *api.
 		privateKey:  config.Swap.PrivateKey(),
 		corsString:  cors,
 	}
-	glog.V(logger.Debug).Infof("Setting up Swarm service components")
+	log.Debug(fmt.Sprintf("Setting up Swarm service components"))
 
 	hash := storage.MakeHashFunc(config.ChunkerParams.Hash)
 	self.lstore, err = storage.NewLocalStore(hash, config.StoreParams)
@@ -97,10 +96,10 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, config *api.
 	}
 
 	// setup local store
-	glog.V(logger.Debug).Infof("Set up local storage")
+	log.Debug(fmt.Sprintf("Set up local storage"))
 
 	self.dbAccess = network.NewDbAccess(self.lstore)
-	glog.V(logger.Debug).Infof("Set up local db access (iterator/counter)")
+	log.Debug(fmt.Sprintf("Set up local db access (iterator/counter)"))
 
 	// set up the kademlia hive
 	self.hive = network.NewHive(
@@ -109,26 +108,26 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, config *api.
 		swapEnabled,                          // SWAP enabled
 		syncEnabled,                          // syncronisation enabled
 	)
-	glog.V(logger.Debug).Infof("Set up swarm network with Kademlia hive")
+	log.Debug(fmt.Sprintf("Set up swarm network with Kademlia hive"))
 
 	// setup cloud storage backend
 	cloud := network.NewForwarder(self.hive)
-	glog.V(logger.Debug).Infof("-> set swarm forwarder as cloud storage backend")
+	log.Debug(fmt.Sprintf("-> set swarm forwarder as cloud storage backend"))
 	// setup cloud storage internal access layer
 
 	self.storage = storage.NewNetStore(hash, self.lstore, cloud, config.StoreParams)
-	glog.V(logger.Debug).Infof("-> swarm net store shared access layer to Swarm Chunk Store")
+	log.Debug(fmt.Sprintf("-> swarm net store shared access layer to Swarm Chunk Store"))
 
 	// set up Depo (storage handler = cloud storage access layer for incoming remote requests)
 	self.depo = network.NewDepo(hash, self.lstore, self.storage)
-	glog.V(logger.Debug).Infof("-> REmote Access to CHunks")
+	log.Debug(fmt.Sprintf("-> REmote Access to CHunks"))
 
 	// set up DPA, the cloud storage local access layer
 	dpaChunkStore := storage.NewDpaChunkStore(self.lstore, self.storage)
-	glog.V(logger.Debug).Infof("-> Local Access to Swarm")
+	log.Debug(fmt.Sprintf("-> Local Access to Swarm"))
 	// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
 	self.dpa = storage.NewDPA(dpaChunkStore, self.config.ChunkerParams)
-	glog.V(logger.Debug).Infof("-> Content Store API")
+	log.Debug(fmt.Sprintf("-> Content Store API"))
 
 	// set up high level api
 	transactOpts := bind.NewKeyedTransactor(self.privateKey)
@@ -137,11 +136,11 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, config *api.
 	if err != nil {
 		return nil, err
 	}
-	glog.V(logger.Debug).Infof("-> Swarm Domain Name Registrar @ address %v", config.EnsRoot.Hex())
+	log.Debug(fmt.Sprintf("-> Swarm Domain Name Registrar @ address %v", config.EnsRoot.Hex()))
 
 	self.api = api.NewApi(self.dpa, self.dns)
 	// Manifests for Smart Hosting
-	glog.V(logger.Debug).Infof("-> Web3 virtual server API")
+	log.Debug(fmt.Sprintf("-> Web3 virtual server API"))
 
 	return self, nil
 }
@@ -173,21 +172,21 @@ func (self *Swarm) Start(net *p2p.Server) error {
 		if err != nil {
 			return fmt.Errorf("Unable to set chequebook for SWAP: %v", err)
 		}
-		glog.V(logger.Debug).Infof("-> cheque book for SWAP: %v", self.config.Swap.Chequebook())
+		log.Debug(fmt.Sprintf("-> cheque book for SWAP: %v", self.config.Swap.Chequebook()))
 	} else {
-		glog.V(logger.Debug).Infof("SWAP disabled: no cheque book set")
+		log.Debug(fmt.Sprintf("SWAP disabled: no cheque book set"))
 	}
 
-	glog.V(logger.Warn).Infof("Starting Swarm service")
+	log.Warn(fmt.Sprintf("Starting Swarm service"))
 	self.hive.Start(
 		discover.PubkeyID(&net.PrivateKey.PublicKey),
 		func() string { return net.ListenAddr },
 		connectPeer,
 	)
-	glog.V(logger.Info).Infof("Swarm network started on bzz address: %v", self.hive.Addr())
+	log.Info(fmt.Sprintf("Swarm network started on bzz address: %v", self.hive.Addr()))
 
 	self.dpa.Start()
-	glog.V(logger.Debug).Infof("Swarm DPA started")
+	log.Debug(fmt.Sprintf("Swarm DPA started"))
 
 	// start swarm http proxy server
 	if self.config.Port != "" {
@@ -195,10 +194,10 @@ func (self *Swarm) Start(net *p2p.Server) error {
 		go httpapi.StartHttpServer(self.api, &httpapi.Server{Addr: addr, CorsString: self.corsString})
 	}
 
-	glog.V(logger.Debug).Infof("Swarm http proxy started on port: %v", self.config.Port)
+	log.Debug(fmt.Sprintf("Swarm http proxy started on port: %v", self.config.Port))
 
 	if self.corsString != "" {
-		glog.V(logger.Debug).Infof("Swarm http proxy started with corsdomain:", self.corsString)
+		log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain:", self.corsString))
 	}
 
 	return nil
@@ -279,7 +278,7 @@ func (self *Swarm) SetChequebook(ctx context.Context) error {
 	if err != nil {
 		return err
 	}
-	glog.V(logger.Info).Infof("new chequebook set (%v): saving config file, resetting all connections in the hive", self.config.Swap.Contract.Hex())
+	log.Info(fmt.Sprintf("new chequebook set (%v): saving config file, resetting all connections in the hive", self.config.Swap.Contract.Hex()))
 	self.config.Save()
 	self.hive.DropAll()
 	return nil
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 9199be774e4be373cbe8e43452d224292130653a..06792cac177e73a5b4dc9409917fac8008e274ce 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -34,7 +34,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rlp"
 )
@@ -146,14 +146,14 @@ func runBlockTests(homesteadBlock, daoForkBlock, gasPriceFork *big.Int, bt map[s
 
 	for name, test := range bt {
 		if skipTest[name] /*|| name != "CallingCanonicalContractFromFork_CALLCODE"*/ {
-			glog.Infoln("Skipping block test", name)
+			log.Info(fmt.Sprint("Skipping block test", name))
 			continue
 		}
 		// test the block
 		if err := runBlockTest(homesteadBlock, daoForkBlock, gasPriceFork, test); err != nil {
 			return fmt.Errorf("%s: %v", name, err)
 		}
-		glog.Infoln("Block test passed: ", name)
+		log.Info(fmt.Sprint("Block test passed: ", name))
 
 	}
 	return nil
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index a2a0482057e266d4c59384a318893fa9e54415e4..064bf4588637dc1da4f23ce1733abba3bd444edd 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -30,7 +30,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -109,7 +109,7 @@ func runStateTests(chainConfig *params.ChainConfig, tests map[string]VmTest, ski
 
 	for name, test := range tests {
 		if skipTest[name] /*|| name != "JUMPDEST_Attack"*/ {
-			glog.Infoln("Skipping state test", name)
+			log.Info(fmt.Sprint("Skipping state test", name))
 			continue
 		}
 
@@ -118,7 +118,7 @@ func runStateTests(chainConfig *params.ChainConfig, tests map[string]VmTest, ski
 			return fmt.Errorf("%s: %s\n", name, err.Error())
 		}
 
-		//glog.Infoln("State test passed: ", name)
+		//log.Info(fmt.Sprint("State test passed: ", name))
 		//fmt.Println(string(statedb.Dump()))
 	}
 	return nil
diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go
index 678513e11a43d17bdf5942b29e8d0944553de988..d267258673620fd28f01d8d743920a022e1c359a 100644
--- a/tests/transaction_test_util.go
+++ b/tests/transaction_test_util.go
@@ -25,7 +25,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rlp"
 )
@@ -64,14 +64,14 @@ func RunTransactionTestsWithReader(config *params.ChainConfig, r io.Reader, skip
 	for name, test := range bt {
 		// if the test should be skipped, return
 		if skipTest[name] {
-			glog.Infoln("Skipping transaction test", name)
+			log.Info(fmt.Sprint("Skipping transaction test", name))
 			return nil
 		}
 		// test the block
 		if err := runTransactionTest(config, test); err != nil {
 			return err
 		}
-		glog.Infoln("Transaction test passed: ", name)
+		log.Info(fmt.Sprint("Transaction test passed: ", name))
 
 	}
 	return nil
@@ -98,7 +98,7 @@ func runTransactionTests(config *params.ChainConfig, tests map[string]Transactio
 	for name, test := range tests {
 		// if the test should be skipped, return
 		if skipTest[name] {
-			glog.Infoln("Skipping transaction test", name)
+			log.Info(fmt.Sprint("Skipping transaction test", name))
 			return nil
 		}
 
@@ -106,7 +106,7 @@ func runTransactionTests(config *params.ChainConfig, tests map[string]Transactio
 		if err := runTransactionTest(config, test); err != nil {
 			return fmt.Errorf("%s: %v", name, err)
 		}
-		glog.Infoln("Transaction test passed: ", name)
+		log.Info(fmt.Sprint("Transaction test passed: ", name))
 
 	}
 	return nil
diff --git a/tests/util.go b/tests/util.go
index 134d5b4f80bf7764bdfcb3a4fe80b4b5c526d413..c96c2e06dc7f5c23744a4448eb921e24f7a5715e 100644
--- a/tests/util.go
+++ b/tests/util.go
@@ -30,7 +30,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -40,7 +40,7 @@ var (
 )
 
 func init() {
-	glog.SetV(0)
+	log.Root().SetHandler(log.LvlFilterHandler(log.LvlCrit, log.StreamHandler(os.Stderr, log.TerminalFormat())))
 	if os.Getenv("JITVM") == "true" {
 		ForceJit = true
 		EnableJit = true
diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go
index 3b7ba9b31ab04e70e6aae1b86babf6e419f1cde7..4bf2dbfe986b8b923eb2e7d38438d02e3681245c 100644
--- a/tests/vm_test_util.go
+++ b/tests/vm_test_util.go
@@ -29,7 +29,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -130,7 +130,7 @@ func runVmTests(tests map[string]VmTest, skipTests []string) error {
 
 	for name, test := range tests {
 		if skipTest[name] /*|| name != "exp0"*/ {
-			glog.Infoln("Skipping VM test", name)
+			log.Info(fmt.Sprint("Skipping VM test", name))
 			continue
 		}
 
@@ -138,7 +138,7 @@ func runVmTests(tests map[string]VmTest, skipTests []string) error {
 			return fmt.Errorf("%s %s", name, err.Error())
 		}
 
-		glog.Infoln("VM test passed: ", name)
+		log.Info(fmt.Sprint("VM test passed: ", name))
 		//fmt.Println(string(statedb.Dump()))
 	}
 	return nil
diff --git a/trie/proof.go b/trie/proof.go
index bea5e5c0981c6b1597039f5adad13e74b9c58537..06cf827ab1f1b3511cccd569c69afa7e817b075e 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -23,8 +23,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto/sha3"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 )
 
@@ -61,9 +60,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
 			var err error
 			tn, err = t.resolveHash(n, nil, nil)
 			if err != nil {
-				if glog.V(logger.Error) {
-					glog.Errorf("Unhandled trie error: %v", err)
-				}
+				log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
 				return nil
 			}
 		default:
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 8b90da02fe007ee3187362ca2eaa28f6decfaea6..113fb6a1afa8098e0b864cf1b558d201245c1041 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -17,9 +17,10 @@
 package trie
 
 import (
+	"fmt"
+
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 var secureKeyPrefix = []byte("secure-key-")
@@ -70,8 +71,8 @@ func NewSecure(root common.Hash, db Database, cachelimit uint16) (*SecureTrie, e
 // The value bytes must not be modified by the caller.
 func (t *SecureTrie) Get(key []byte) []byte {
 	res, err := t.TryGet(key)
-	if err != nil && glog.V(logger.Error) {
-		glog.Errorf("Unhandled trie error: %v", err)
+	if err != nil {
+		log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
 	}
 	return res
 }
@@ -90,8 +91,8 @@ func (t *SecureTrie) TryGet(key []byte) ([]byte, error) {
 // The value bytes must not be modified by the caller while they are
 // stored in the trie.
 func (t *SecureTrie) Update(key, value []byte) {
-	if err := t.TryUpdate(key, value); err != nil && glog.V(logger.Error) {
-		glog.Errorf("Unhandled trie error: %v", err)
+	if err := t.TryUpdate(key, value); err != nil {
+		log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
 	}
 }
 
@@ -115,8 +116,8 @@ func (t *SecureTrie) TryUpdate(key, value []byte) error {
 
 // Delete removes any existing value for key from the trie.
 func (t *SecureTrie) Delete(key []byte) {
-	if err := t.TryDelete(key); err != nil && glog.V(logger.Error) {
-		glog.Errorf("Unhandled trie error: %v", err)
+	if err := t.TryDelete(key); err != nil {
+		log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
 	}
 }
 
diff --git a/trie/trie.go b/trie/trie.go
index cd9e20cacbe3f5a392a3c5885eccad8ecf5d9769..2a6044068907d93ac0cdac46a2b41efcd4712aa3 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -23,8 +23,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto/sha3"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/rcrowley/go-metrics"
 )
 
@@ -135,8 +134,8 @@ func (t *Trie) Iterator() *Iterator {
 // The value bytes must not be modified by the caller.
 func (t *Trie) Get(key []byte) []byte {
 	res, err := t.TryGet(key)
-	if err != nil && glog.V(logger.Error) {
-		glog.Errorf("Unhandled trie error: %v", err)
+	if err != nil {
+		log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
 	}
 	return res
 }
@@ -198,8 +197,8 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
 // The value bytes must not be modified by the caller while they are
 // stored in the trie.
 func (t *Trie) Update(key, value []byte) {
-	if err := t.TryUpdate(key, value); err != nil && glog.V(logger.Error) {
-		glog.Errorf("Unhandled trie error: %v", err)
+	if err := t.TryUpdate(key, value); err != nil {
+		log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
 	}
 }
 
@@ -300,8 +299,8 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
 
 // Delete removes any existing value for key from the trie.
 func (t *Trie) Delete(key []byte) {
-	if err := t.TryDelete(key); err != nil && glog.V(logger.Error) {
-		glog.Errorf("Unhandled trie error: %v", err)
+	if err := t.TryDelete(key); err != nil {
+		log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
 	}
 }
 
diff --git a/vendor/github.com/ethereum/ethash/ethash.go b/vendor/github.com/ethereum/ethash/ethash.go
index 2a31aaf2d34352631b07a4ba6dd6d1d4c89541b6..8e5cd812809a09a3ca20061a8b2de2a762ba2f82 100644
--- a/vendor/github.com/ethereum/ethash/ethash.go
+++ b/vendor/github.com/ethereum/ethash/ethash.go
@@ -42,8 +42,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/pow"
 )
 
@@ -89,14 +88,14 @@ func (cache *cache) generate() {
 	cache.gen.Do(func() {
 		started := time.Now()
 		seedHash := makeSeedHash(cache.epoch)
-		glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
+		log.Debug(fmt.Sprintf("Generating cache for epoch %d (%x)", cache.epoch, seedHash))
 		size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
 		if cache.test {
 			size = cacheSizeForTesting
 		}
 		cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
 		runtime.SetFinalizer(cache, freeCache)
-		glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
+		log.Debug(fmt.Sprintf("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started)))
 	})
 }
 
@@ -132,7 +131,7 @@ func (l *Light) Verify(block pow.Block) bool {
 	// to prevent DOS attacks.
 	blockNum := block.NumberU64()
 	if blockNum >= epochLength*2048 {
-		glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
+		log.Debug(fmt.Sprintf("block number %d too high, limit is %d", epochLength*2048))
 		return false
 	}
 
@@ -143,7 +142,7 @@ func (l *Light) Verify(block pow.Block) bool {
 	   Ethereum protocol consensus rules here which are not in scope of Ethash
 	*/
 	if difficulty.Cmp(common.Big0) == 0 {
-		glog.V(logger.Debug).Infof("invalid block difficulty")
+		log.Debug(fmt.Sprintf("invalid block difficulty"))
 		return false
 	}
 
@@ -198,22 +197,22 @@ func (l *Light) getCache(blockNum uint64) *cache {
 					evict = cache
 				}
 			}
-			glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch)
+			log.Debug(fmt.Sprintf("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch))
 			delete(l.caches, evict.epoch)
 		}
 		// If we have the new DAG pre-generated, use that, otherwise create a new one
 		if l.future != nil && l.future.epoch == epoch {
-			glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch)
+			log.Debug(fmt.Sprintf("Using pre-generated DAG for epoch %d", epoch))
 			c, l.future = l.future, nil
 		} else {
-			glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch)
+			log.Debug(fmt.Sprintf("No pre-generated DAG available, creating new for epoch %d", epoch))
 			c = &cache{epoch: epoch, test: l.test}
 		}
 		l.caches[epoch] = c
 
 		// If we just used up the future cache, or need a refresh, regenerate
 		if l.future == nil || l.future.epoch <= epoch {
-			glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1)
+			log.Debug(fmt.Sprintf("Pre-generating DAG for epoch %d", epoch+1))
 			l.future = &cache{epoch: epoch + 1, test: l.test}
 			go l.future.generate()
 		}
@@ -256,7 +255,7 @@ func (d *dag) generate() {
 		if d.dir == "" {
 			d.dir = DefaultDir
 		}
-		glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash)
+		log.Info(fmt.Sprintf("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash))
 		// Generate a temporary cache.
 		// TODO: this could share the cache with Light
 		cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
@@ -273,7 +272,7 @@ func (d *dag) generate() {
 			panic("ethash_full_new IO or memory error")
 		}
 		runtime.SetFinalizer(d, freeDAG)
-		glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
+		log.Info(fmt.Sprintf("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started)))
 	})
 }
 
@@ -288,7 +287,7 @@ func (d *dag) Ptr() unsafe.Pointer {
 
 //export ethashGoCallback
 func ethashGoCallback(percent C.unsigned) C.int {
-	glog.V(logger.Info).Infof("Generating DAG: %d%%", percent)
+	log.Info(fmt.Sprintf("Generating DAG: %d%%", percent))
 	return 0
 }
 
diff --git a/whisper/mailserver/mailserver.go b/whisper/mailserver/mailserver.go
index 3e08a3b7e2ebc361f27b3be21663c60431f5c721..f90a2ee7eef530fb13956be3dce7a8a1b8a5d167 100644
--- a/whisper/mailserver/mailserver.go
+++ b/whisper/mailserver/mailserver.go
@@ -19,12 +19,11 @@ package mailserver
 import (
 	"bytes"
 	"encoding/binary"
+	"fmt"
 
-	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
 	"github.com/syndtr/goleveldb/leveldb"
@@ -60,16 +59,16 @@ func NewDbKey(t uint32, h common.Hash) *DBKey {
 func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, pow float64) {
 	var err error
 	if len(path) == 0 {
-		utils.Fatalf("DB file is not specified")
+		log.Crit(fmt.Sprintf("DB file is not specified"))
 	}
 
 	if len(password) == 0 {
-		utils.Fatalf("Password is not specified for MailServer")
+		log.Crit(fmt.Sprintf("Password is not specified for MailServer"))
 	}
 
 	s.db, err = leveldb.OpenFile(path, nil)
 	if err != nil {
-		utils.Fatalf("Failed to open DB file: %s", err)
+		log.Crit(fmt.Sprintf("Failed to open DB file: %s", err))
 	}
 
 	s.w = shh
@@ -77,7 +76,7 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p
 
 	err = s.w.AddSymKey(MailServerKeyName, []byte(password))
 	if err != nil {
-		utils.Fatalf("Failed to create symmetric key for MailServer: %s", err)
+		log.Crit(fmt.Sprintf("Failed to create symmetric key for MailServer: %s", err))
 	}
 	s.key = s.w.GetSymKey(MailServerKeyName)
 }
@@ -92,18 +91,18 @@ func (s *WMailServer) Archive(env *whisper.Envelope) {
 	key := NewDbKey(env.Expiry-env.TTL, env.Hash())
 	rawEnvelope, err := rlp.EncodeToBytes(env)
 	if err != nil {
-		glog.V(logger.Error).Infof("rlp.EncodeToBytes failed: %s", err)
+		log.Error(fmt.Sprintf("rlp.EncodeToBytes failed: %s", err))
 	} else {
 		err = s.db.Put(key.raw, rawEnvelope, nil)
 		if err != nil {
-			glog.V(logger.Error).Infof("Writing to DB failed: %s", err)
+			log.Error(fmt.Sprintf("Writing to DB failed: %s", err))
 		}
 	}
 }
 
 func (s *WMailServer) DeliverMail(peer *whisper.Peer, request *whisper.Envelope) {
 	if peer == nil {
-		glog.V(logger.Error).Info("Whisper peer is nil")
+		log.Error(fmt.Sprint("Whisper peer is nil"))
 		return
 	}
 
@@ -127,7 +126,7 @@ func (s *WMailServer) processRequest(peer *whisper.Peer, lower, upper uint32, to
 		var envelope whisper.Envelope
 		err = rlp.DecodeBytes(i.Value(), &envelope)
 		if err != nil {
-			glog.V(logger.Error).Infof("RLP decoding failed: %s", err)
+			log.Error(fmt.Sprintf("RLP decoding failed: %s", err))
 		}
 
 		if topic == empty || envelope.Topic == topic {
@@ -137,7 +136,7 @@ func (s *WMailServer) processRequest(peer *whisper.Peer, lower, upper uint32, to
 			} else {
 				err = s.w.SendP2PDirect(peer, &envelope)
 				if err != nil {
-					glog.V(logger.Error).Infof("Failed to send direct message to peer: %s", err)
+					log.Error(fmt.Sprintf("Failed to send direct message to peer: %s", err))
 					return nil
 				}
 			}
@@ -146,7 +145,7 @@ func (s *WMailServer) processRequest(peer *whisper.Peer, lower, upper uint32, to
 
 	err = i.Error()
 	if err != nil {
-		glog.V(logger.Error).Infof("Level DB iterator error: %s", err)
+		log.Error(fmt.Sprintf("Level DB iterator error: %s", err))
 	}
 
 	return ret
@@ -161,12 +160,12 @@ func (s *WMailServer) validateRequest(peerID []byte, request *whisper.Envelope)
 	f := whisper.Filter{KeySym: s.key}
 	decrypted := request.Open(&f)
 	if decrypted == nil {
-		glog.V(logger.Warn).Infof("Failed to decrypt p2p request")
+		log.Warn(fmt.Sprintf("Failed to decrypt p2p request"))
 		return false, 0, 0, topic
 	}
 
 	if len(decrypted.Payload) < 8 {
-		glog.V(logger.Warn).Infof("Undersized p2p request")
+		log.Warn(fmt.Sprintf("Undersized p2p request"))
 		return false, 0, 0, topic
 	}
 
@@ -175,7 +174,7 @@ func (s *WMailServer) validateRequest(peerID []byte, request *whisper.Envelope)
 		src = src[1:]
 	}
 	if !bytes.Equal(peerID, src) {
-		glog.V(logger.Warn).Infof("Wrong signature of p2p request")
+		log.Warn(fmt.Sprintf("Wrong signature of p2p request"))
 		return false, 0, 0, topic
 	}
 
diff --git a/whisper/whisperv2/message.go b/whisper/whisperv2/message.go
index e55544ffc9d7d7cf9a8b1cd6754b07efe5a21e8d..66648c3be85fd6a73367e4e435374de4f2d0e018 100644
--- a/whisper/whisperv2/message.go
+++ b/whisper/whisperv2/message.go
@@ -22,14 +22,14 @@ package whisperv2
 import (
 	"crypto/ecdsa"
 	crand "crypto/rand"
+	"fmt"
 	"math/rand"
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/crypto/ecies"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 // Message represents an end-user data packet to transmit through the Whisper
@@ -125,7 +125,7 @@ func (self *Message) Recover() *ecdsa.PublicKey {
 	// Otherwise try and recover the signature
 	pub, err := crypto.SigToPub(self.hash(), self.Signature)
 	if err != nil {
-		glog.V(logger.Error).Infof("Could not get public key from signature: %v", err)
+		log.Error(fmt.Sprintf("Could not get public key from signature: %v", err))
 		return nil
 	}
 	return pub
diff --git a/whisper/whisperv2/peer.go b/whisper/whisperv2/peer.go
index f09ce3523a1c76474f8625b0af8b951a425deef5..71798408b92853a59dacf309093b50d88f655427 100644
--- a/whisper/whisperv2/peer.go
+++ b/whisper/whisperv2/peer.go
@@ -21,8 +21,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rlp"
 	"gopkg.in/fatih/set.v0"
@@ -54,13 +53,13 @@ func newPeer(host *Whisper, remote *p2p.Peer, rw p2p.MsgReadWriter) *peer {
 // into the network.
 func (self *peer) start() {
 	go self.update()
-	glog.V(logger.Debug).Infof("%v: whisper started", self.peer)
+	log.Debug(fmt.Sprintf("%v: whisper started", self.peer))
 }
 
 // stop terminates the peer updater, stopping message forwarding to it.
 func (self *peer) stop() {
 	close(self.quit)
-	glog.V(logger.Debug).Infof("%v: whisper stopped", self.peer)
+	log.Debug(fmt.Sprintf("%v: whisper stopped", self.peer))
 }
 
 // handshake sends the protocol initiation status message to the remote peer and
@@ -112,7 +111,7 @@ func (self *peer) update() {
 
 		case <-transmit.C:
 			if err := self.broadcast(); err != nil {
-				glog.V(logger.Info).Infof("%v: broadcast failed: %v", self.peer, err)
+				log.Info(fmt.Sprintf("%v: broadcast failed: %v", self.peer, err))
 				return
 			}
 
@@ -170,6 +169,6 @@ func (self *peer) broadcast() error {
 	if err := p2p.Send(self.ws, messagesCode, transmit); err != nil {
 		return err
 	}
-	glog.V(logger.Detail).Infoln(self.peer, "broadcasted", len(transmit), "message(s)")
+	log.Trace(fmt.Sprint(self.peer, "broadcasted", len(transmit), "message(s)"))
 	return nil
 }
diff --git a/whisper/whisperv2/whisper.go b/whisper/whisperv2/whisper.go
index d9054959e52c2558062e1450a32309deec438b53..1d7c21bd12c5eb4a75a3eec3d2c53560aad1b830 100644
--- a/whisper/whisperv2/whisper.go
+++ b/whisper/whisperv2/whisper.go
@@ -18,6 +18,7 @@ package whisperv2
 
 import (
 	"crypto/ecdsa"
+	"fmt"
 	"sync"
 	"time"
 
@@ -25,8 +26,7 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/crypto/ecies"
 	"github.com/ethereum/go-ethereum/event/filter"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rpc"
 
@@ -173,7 +173,7 @@ func (self *Whisper) Send(envelope *Envelope) error {
 // Start implements node.Service, starting the background data propagation thread
 // of the Whisper protocol.
 func (self *Whisper) Start(*p2p.Server) error {
-	glog.V(logger.Info).Infoln("Whisper started")
+	log.Info(fmt.Sprint("Whisper started"))
 	go self.update()
 	return nil
 }
@@ -182,7 +182,7 @@ func (self *Whisper) Start(*p2p.Server) error {
 // of the Whisper protocol.
 func (self *Whisper) Stop() error {
 	close(self.quit)
-	glog.V(logger.Info).Infoln("Whisper stopped")
+	log.Info(fmt.Sprint("Whisper stopped"))
 	return nil
 }
 
@@ -233,14 +233,14 @@ func (self *Whisper) handlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
 		}
 		var envelopes []*Envelope
 		if err := packet.Decode(&envelopes); err != nil {
-			glog.V(logger.Info).Infof("%v: failed to decode envelope: %v", peer, err)
+			log.Info(fmt.Sprintf("%v: failed to decode envelope: %v", peer, err))
 			continue
 		}
 		// Inject all envelopes into the internal pool
 		for _, envelope := range envelopes {
 			if err := self.add(envelope); err != nil {
 				// TODO Punish peer here. Invalid envelope.
-				glog.V(logger.Debug).Infof("%v: failed to pool envelope: %v", peer, err)
+				log.Debug(fmt.Sprintf("%v: failed to pool envelope: %v", peer, err))
 			}
 			whisperPeer.mark(envelope)
 		}
@@ -262,7 +262,7 @@ func (self *Whisper) add(envelope *Envelope) error {
 	// Insert the message into the tracked pool
 	hash := envelope.Hash()
 	if _, ok := self.messages[hash]; ok {
-		glog.V(logger.Detail).Infof("whisper envelope already cached: %x\n", envelope)
+		log.Trace(fmt.Sprintf("whisper envelope already cached: %x\n", envelope))
 		return nil
 	}
 	self.messages[hash] = envelope
@@ -277,7 +277,7 @@ func (self *Whisper) add(envelope *Envelope) error {
 		// Notify the local node of a message arrival
 		go self.postEvent(envelope)
 	}
-	glog.V(logger.Detail).Infof("cached whisper envelope %x\n", envelope)
+	log.Trace(fmt.Sprintf("cached whisper envelope %x\n", envelope))
 	return nil
 }
 
diff --git a/whisper/whisperv5/api.go b/whisper/whisperv5/api.go
index 4d33d232142a5ccf8356faaf8c3ea8174e9112b1..d34213e05da1b3d78e91ef1666a3c5d6215978c4 100644
--- a/whisper/whisperv5/api.go
+++ b/whisper/whisperv5/api.go
@@ -25,8 +25,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/common/hexutil"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 var whisperOffLineErr = errors.New("whisper is offline")
@@ -170,25 +169,25 @@ func (api *PublicWhisperAPI) NewFilter(args WhisperFilterArgs) (string, error) {
 
 	if len(args.Topics) == 0 && len(args.KeyName) != 0 {
 		info := "NewFilter: at least one topic must be specified"
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return "", errors.New(info)
 	}
 
 	if len(args.KeyName) != 0 && len(filter.KeySym) == 0 {
 		info := "NewFilter: key was not found by name: " + args.KeyName
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return "", errors.New(info)
 	}
 
 	if len(args.To) == 0 && len(filter.KeySym) == 0 {
 		info := "NewFilter: filter must contain either symmetric or asymmetric key"
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return "", errors.New(info)
 	}
 
 	if len(args.To) != 0 && len(filter.KeySym) != 0 {
 		info := "NewFilter: filter must not contain both symmetric and asymmetric key"
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return "", errors.New(info)
 	}
 
@@ -196,13 +195,13 @@ func (api *PublicWhisperAPI) NewFilter(args WhisperFilterArgs) (string, error) {
 		dst := crypto.ToECDSAPub(common.FromHex(args.To))
 		if !ValidatePublicKey(dst) {
 			info := "NewFilter: Invalid 'To' address"
-			glog.V(logger.Error).Infof(info)
+			log.Error(fmt.Sprintf(info))
 			return "", errors.New(info)
 		}
 		filter.KeyAsym = api.whisper.GetIdentity(string(args.To))
 		if filter.KeyAsym == nil {
 			info := "NewFilter: non-existent identity provided"
-			glog.V(logger.Error).Infof(info)
+			log.Error(fmt.Sprintf(info))
 			return "", errors.New(info)
 		}
 	}
@@ -210,7 +209,7 @@ func (api *PublicWhisperAPI) NewFilter(args WhisperFilterArgs) (string, error) {
 	if len(args.From) > 0 {
 		if !ValidatePublicKey(filter.Src) {
 			info := "NewFilter: Invalid 'From' address"
-			glog.V(logger.Error).Infof(info)
+			log.Error(fmt.Sprintf(info))
 			return "", errors.New(info)
 		}
 	}
@@ -269,13 +268,13 @@ func (api *PublicWhisperAPI) Post(args PostArgs) error {
 		pub := crypto.ToECDSAPub(common.FromHex(args.From))
 		if !ValidatePublicKey(pub) {
 			info := "Post: Invalid 'From' address"
-			glog.V(logger.Error).Infof(info)
+			log.Error(fmt.Sprintf(info))
 			return errors.New(info)
 		}
 		params.Src = api.whisper.GetIdentity(string(args.From))
 		if params.Src == nil {
 			info := "Post: non-existent identity provided"
-			glog.V(logger.Error).Infof(info)
+			log.Error(fmt.Sprintf(info))
 			return errors.New(info)
 		}
 	}
@@ -283,7 +282,7 @@ func (api *PublicWhisperAPI) Post(args PostArgs) error {
 	filter := api.whisper.GetFilter(args.FilterID)
 	if filter == nil && len(args.FilterID) > 0 {
 		info := fmt.Sprintf("Post: wrong filter id %s", args.FilterID)
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return errors.New(info)
 	}
 
@@ -299,7 +298,7 @@ func (api *PublicWhisperAPI) Post(args PostArgs) error {
 			sz := len(filter.Topics)
 			if sz < 1 {
 				info := fmt.Sprintf("Post: no topics in filter # %s", args.FilterID)
-				glog.V(logger.Error).Infof(info)
+				log.Error(fmt.Sprintf(info))
 				return errors.New(info)
 			} else if sz == 1 {
 				params.Topic = filter.Topics[0]
@@ -314,26 +313,26 @@ func (api *PublicWhisperAPI) Post(args PostArgs) error {
 	// validate
 	if len(args.KeyName) != 0 && len(params.KeySym) == 0 {
 		info := "Post: key was not found by name: " + args.KeyName
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return errors.New(info)
 	}
 
 	if len(args.To) == 0 && len(params.KeySym) == 0 {
 		info := "Post: message must be encrypted either symmetrically or asymmetrically"
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return errors.New(info)
 	}
 
 	if len(args.To) != 0 && len(params.KeySym) != 0 {
 		info := "Post: ambigous encryption method requested"
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return errors.New(info)
 	}
 
 	if len(args.To) > 0 {
 		if !ValidatePublicKey(params.Dst) {
 			info := "Post: Invalid 'To' address"
-			glog.V(logger.Error).Infof(info)
+			log.Error(fmt.Sprintf(info))
 			return errors.New(info)
 		}
 	}
@@ -342,17 +341,17 @@ func (api *PublicWhisperAPI) Post(args PostArgs) error {
 	message := NewSentMessage(&params)
 	envelope, err := message.Wrap(&params)
 	if err != nil {
-		glog.V(logger.Error).Infof(err.Error())
+		log.Error(fmt.Sprintf(err.Error()))
 		return err
 	}
 	if len(envelope.Data) > MaxMessageLength {
 		info := "Post: message is too big"
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return errors.New(info)
 	}
 	if (envelope.Topic == TopicType{} && envelope.IsSymmetric()) {
 		info := "Post: topic is missing for symmetric encryption"
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return errors.New(info)
 	}
 
diff --git a/whisper/whisperv5/filter.go b/whisper/whisperv5/filter.go
index 832ebe3f6b0962af2e912bcef0431b464cb793ad..8aa7b2429d2c5c5e460658540b0d0fb7cc52af95 100644
--- a/whisper/whisperv5/filter.go
+++ b/whisper/whisperv5/filter.go
@@ -18,13 +18,12 @@ package whisperv5
 
 import (
 	"crypto/ecdsa"
-	crand "crypto/rand"
+	"crypto/rand"
 	"fmt"
 	"sync"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 )
 
 type Filter struct {
@@ -56,7 +55,7 @@ func NewFilters(w *Whisper) *Filters {
 func (fs *Filters) generateRandomID() (id string, err error) {
 	buf := make([]byte, 20)
 	for i := 0; i < 3; i++ {
-		_, err = crand.Read(buf)
+		_, err = rand.Read(buf)
 		if err != nil {
 			continue
 		}
@@ -107,7 +106,7 @@ func (fs *Filters) NotifyWatchers(env *Envelope, p2pMessage bool) {
 	var msg *ReceivedMessage
 	for j, watcher := range fs.watchers {
 		if p2pMessage && !watcher.AcceptP2P {
-			glog.V(logger.Detail).Infof("msg [%x], filter [%d]: p2p messages are not allowed \n", env.Hash(), j)
+			log.Trace(fmt.Sprintf("msg [%x], filter [%s]: p2p messages are not allowed", env.Hash(), j))
 			continue
 		}
 
@@ -119,10 +118,10 @@ func (fs *Filters) NotifyWatchers(env *Envelope, p2pMessage bool) {
 			if match {
 				msg = env.Open(watcher)
 				if msg == nil {
-					glog.V(logger.Detail).Infof("msg [%x], filter [%d]: failed to open \n", env.Hash(), j)
+					log.Trace(fmt.Sprintf("msg [%x], filter [%s]: failed to open", env.Hash(), j))
 				}
 			} else {
-				glog.V(logger.Detail).Infof("msg [%x], filter [%d]: does not match \n", env.Hash(), j)
+				log.Trace(fmt.Sprintf("msg [%x], filter [%s]: does not match", env.Hash(), j))
 			}
 		}
 
diff --git a/whisper/whisperv5/message.go b/whisper/whisperv5/message.go
index 255ad380d5ae78d4e3f8e3105c7b68e01be9d27d..9677f278e5b89510bddaeda9c56e3c71f4329603 100644
--- a/whisper/whisperv5/message.go
+++ b/whisper/whisperv5/message.go
@@ -31,8 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/crypto/ecies"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"golang.org/x/crypto/pbkdf2"
 )
 
@@ -144,7 +143,7 @@ func (msg *SentMessage) appendPadding(params *MessageParams) {
 func (msg *SentMessage) sign(key *ecdsa.PrivateKey) error {
 	if isMessageSigned(msg.Raw[0]) {
 		// this should not happen, but no reason to panic
-		glog.V(logger.Error).Infof("Trying to sign a message which was already signed")
+		log.Error(fmt.Sprintf("Trying to sign a message which was already signed"))
 		return nil
 	}
 
@@ -238,7 +237,7 @@ func (msg *SentMessage) Wrap(options *MessageParams) (envelope *Envelope, err er
 		}
 	}
 	if len(msg.Raw) > MaxMessageLength {
-		glog.V(logger.Error).Infof("Message size must not exceed %d bytes", MaxMessageLength)
+		log.Error(fmt.Sprintf("Message size must not exceed %d bytes", MaxMessageLength))
 		return nil, errors.New("Oversized message")
 	}
 	var salt, nonce []byte
@@ -281,7 +280,7 @@ func (msg *ReceivedMessage) decryptSymmetric(key []byte, salt []byte, nonce []by
 	}
 	if len(nonce) != aesgcm.NonceSize() {
 		info := fmt.Sprintf("Wrong AES nonce size - want: %d, got: %d", len(nonce), aesgcm.NonceSize())
-		glog.V(logger.Error).Infof(info)
+		log.Error(fmt.Sprintf(info))
 		return errors.New(info)
 	}
 	decrypted, err := aesgcm.Open(nil, nonce, msg.Raw, nil)
@@ -352,7 +351,7 @@ func (msg *ReceivedMessage) SigToPubKey() *ecdsa.PublicKey {
 
 	pub, err := crypto.SigToPub(msg.hash(), msg.Signature)
 	if err != nil {
-		glog.V(logger.Error).Infof("Could not get public key from signature: %v", err)
+		log.Error(fmt.Sprintf("Could not get public key from signature: %v", err))
 		return nil
 	}
 	return pub
diff --git a/whisper/whisperv5/peer.go b/whisper/whisperv5/peer.go
index 42394a0a3f9c83fd890080502a7a06629df2b4e2..e137613f5c68489c95f552cb97dbbfc1c76e2b95 100644
--- a/whisper/whisperv5/peer.go
+++ b/whisper/whisperv5/peer.go
@@ -21,8 +21,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rlp"
 	set "gopkg.in/fatih/set.v0"
@@ -56,13 +55,13 @@ func newPeer(host *Whisper, remote *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
 // into the network.
 func (p *Peer) start() {
 	go p.update()
-	glog.V(logger.Debug).Infof("%v: whisper started", p.peer)
+	log.Debug(fmt.Sprintf("%v: whisper started", p.peer))
 }
 
 // stop terminates the peer updater, stopping message forwarding to it.
 func (p *Peer) stop() {
 	close(p.quit)
-	glog.V(logger.Debug).Infof("%v: whisper stopped", p.peer)
+	log.Debug(fmt.Sprintf("%v: whisper stopped", p.peer))
 }
 
 // handshake sends the protocol initiation status message to the remote peer and
@@ -111,7 +110,7 @@ func (p *Peer) update() {
 
 		case <-transmit.C:
 			if err := p.broadcast(); err != nil {
-				glog.V(logger.Info).Infof("%v: broadcast failed: %v", p.peer, err)
+				log.Info(fmt.Sprintf("%v: broadcast failed: %v", p.peer, err))
 				return
 			}
 
@@ -172,7 +171,7 @@ func (p *Peer) broadcast() error {
 	if err := p2p.Send(p.ws, messagesCode, transmit); err != nil {
 		return err
 	}
-	glog.V(logger.Detail).Infoln(p.peer, "broadcasted", len(transmit), "message(s)")
+	log.Trace(fmt.Sprint(p.peer, "broadcasted", len(transmit), "message(s)"))
 	return nil
 }
 
diff --git a/whisper/whisperv5/peer_test.go b/whisper/whisperv5/peer_test.go
index cce2c92ba6b2ff3074f3d4f9c868d477cc553532..3dcf3bc702273830472c60acf2a03731a6bcff6d 100644
--- a/whisper/whisperv5/peer_test.go
+++ b/whisper/whisperv5/peer_test.go
@@ -107,8 +107,7 @@ func TestSimulation(t *testing.T) {
 }
 
 func initialize(t *testing.T) {
-	//glog.SetV(6)
-	//glog.SetToStderr(true)
+	// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat())))
 
 	var err error
 	ip := net.IPv4(127, 0, 0, 1)
diff --git a/whisper/whisperv5/whisper.go b/whisper/whisperv5/whisper.go
index 2a6ff5f409c527896256b2617ad3d39d1083cef9..558e2909f99776a8edbcf8e4638682dfa909eb65 100644
--- a/whisper/whisperv5/whisper.go
+++ b/whisper/whisperv5/whisper.go
@@ -28,8 +28,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/logger"
-	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/rpc"
 	"golang.org/x/crypto/pbkdf2"
@@ -294,7 +293,7 @@ func (w *Whisper) Send(envelope *Envelope) error {
 // Start implements node.Service, starting the background data propagation thread
 // of the Whisper protocol.
 func (w *Whisper) Start(*p2p.Server) error {
-	glog.V(logger.Info).Infoln("Whisper started")
+	log.Info(fmt.Sprint("Whisper started"))
 	go w.update()
 
 	numCPU := runtime.NumCPU()
@@ -309,7 +308,7 @@ func (w *Whisper) Start(*p2p.Server) error {
 // of the Whisper protocol.
 func (w *Whisper) Stop() error {
 	close(w.quit)
-	glog.V(logger.Info).Infoln("Whisper stopped")
+	log.Info(fmt.Sprint("Whisper stopped"))
 	return nil
 }
 
@@ -351,18 +350,18 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
 		switch packet.Code {
 		case statusCode:
 			// this should not happen, but no need to panic; just ignore this message.
-			glog.V(logger.Warn).Infof("%v: unxepected status message received", p.peer)
+			log.Warn(fmt.Sprintf("%v: unxepected status message received", p.peer))
 		case messagesCode:
 			// decode the contained envelopes
 			var envelopes []*Envelope
 			if err := packet.Decode(&envelopes); err != nil {
-				glog.V(logger.Warn).Infof("%v: failed to decode envelope: [%v], peer will be disconnected", p.peer, err)
+				log.Warn(fmt.Sprintf("%v: failed to decode envelope: [%v], peer will be disconnected", p.peer, err))
 				return fmt.Errorf("garbage received")
 			}
 			// inject all envelopes into the internal pool
 			for _, envelope := range envelopes {
 				if err := wh.add(envelope); err != nil {
-					glog.V(logger.Warn).Infof("%v: bad envelope received: [%v], peer will be disconnected", p.peer, err)
+					log.Warn(fmt.Sprintf("%v: bad envelope received: [%v], peer will be disconnected", p.peer, err))
 					return fmt.Errorf("invalid envelope")
 				}
 				p.mark(envelope)
@@ -375,7 +374,7 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
 			if p.trusted {
 				var envelope Envelope
 				if err := packet.Decode(&envelope); err != nil {
-					glog.V(logger.Warn).Infof("%v: failed to decode direct message: [%v], peer will be disconnected", p.peer, err)
+					log.Warn(fmt.Sprintf("%v: failed to decode direct message: [%v], peer will be disconnected", p.peer, err))
 					return fmt.Errorf("garbage received (directMessage)")
 				}
 				wh.postEvent(&envelope, true)
@@ -385,7 +384,7 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
 			if wh.mailServer != nil {
 				var request Envelope
 				if err := packet.Decode(&request); err != nil {
-					glog.V(logger.Warn).Infof("%v: failed to decode p2p request message: [%v], peer will be disconnected", p.peer, err)
+					log.Warn(fmt.Sprintf("%v: failed to decode p2p request message: [%v], peer will be disconnected", p.peer, err))
 					return fmt.Errorf("garbage received (p2p request)")
 				}
 				wh.mailServer.DeliverMail(p, &request)
@@ -419,7 +418,7 @@ func (wh *Whisper) add(envelope *Envelope) error {
 		if envelope.Expiry+SynchAllowance*2 < now {
 			return fmt.Errorf("very old message")
 		} else {
-			glog.V(logger.Debug).Infof("expired envelope dropped [%x]", envelope.Hash())
+			log.Debug(fmt.Sprintf("expired envelope dropped [%x]", envelope.Hash()))
 			return nil // drop envelope without error
 		}
 	}
@@ -443,7 +442,7 @@ func (wh *Whisper) add(envelope *Envelope) error {
 	}
 
 	if envelope.PoW() < MinimumPoW && !wh.test {
-		glog.V(logger.Debug).Infof("envelope with low PoW dropped: %f [%x]", envelope.PoW(), envelope.Hash())
+		log.Debug(fmt.Sprintf("envelope with low PoW dropped: %f [%x]", envelope.PoW(), envelope.Hash()))
 		return nil // drop envelope without error
 	}
 
@@ -463,9 +462,9 @@ func (wh *Whisper) add(envelope *Envelope) error {
 	wh.poolMu.Unlock()
 
 	if alreadyCached {
-		glog.V(logger.Detail).Infof("whisper envelope already cached [%x]\n", envelope.Hash())
+		log.Trace(fmt.Sprintf("whisper envelope already cached [%x]\n", envelope.Hash()))
 	} else {
-		glog.V(logger.Detail).Infof("cached whisper envelope [%x]: %v\n", envelope.Hash(), envelope)
+		log.Trace(fmt.Sprintf("cached whisper envelope [%x]: %v\n", envelope.Hash(), envelope))
 		wh.postEvent(envelope, false) // notify the local node about the new message
 		if wh.mailServer != nil {
 			wh.mailServer.Archive(envelope)
@@ -496,7 +495,7 @@ func (w *Whisper) checkOverflow() {
 	if queueSize == messageQueueLimit {
 		if !w.overflow {
 			w.overflow = true
-			glog.V(logger.Warn).Infoln("message queue overflow")
+			log.Warn(fmt.Sprint("message queue overflow"))
 		}
 	} else if queueSize <= messageQueueLimit/2 {
 		if w.overflow {