diff --git a/.mailmap b/.mailmap
index e8015249649dfcd86790b69125fd28aa2ca4123f..a36ddc1dc98fe71fca70ba77a36af4843dd20e10 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
 Bas van Kervel <bas@ethdev.com>
 Bas van Kervel <bas@ethdev.com> <basvankervel@ziggo.nl>
 Bas van Kervel <bas@ethdev.com> <basvankervel@gmail.com>
+Bas van Kervel <bas@ethdev.com> <bas-vk@users.noreply.github.com>
 
 Sven Ehlert <sven@ethdev.com>
 
@@ -62,4 +63,30 @@ Joseph Chow <ethereum@outlook.com> ethers <TODO>
 
 Enrique Fynn <enriquefynn@gmail.com>
 
-Vincent G <caktux@gmail.com>
\ No newline at end of file
+Vincent G <caktux@gmail.com>
+
+RJ Catalano <rj@erisindustries.com>
+
+Nchinda Nchinda <nchinda2@gmail.com>
+
+Aron Fischer <homotopycolimit@users.noreply.github.com>
+
+Vlad Gluhovsky <gluk256@users.noreply.github.com>
+
+Ville Sundell <github@solarius.fi>
+
+Elliot Shepherd <elliot@identitii.com>
+
+Yohann Léon <sybiload@gmail.com>
+
+Gregg Dourgarian <greggd@tempworks.com>
+
+Casey Detrio <cdetrio@gmail.com>
+
+Jens Agerberg <github@agerberg.me>
+
+Nick Johnson <arachnid@notdot.net>
+
+Henning Diedrich <hd@eonblast.com>
+Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
+
diff --git a/AUTHORS b/AUTHORS
index 69eb54874fd66df02dfd8f3a38d4a69e471b86d9..50f3c713d1afe434586970412d97c6a8e30ac8b0 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,36 +1,63 @@
 # This is the official list of go-ethereum authors for copyright purposes.
 
+Ales Katona <ales@coinbase.com>
 Alex Leverington <alex@ethdev.com>
 Alexandre Van de Sande <alex.vandesande@ethdev.com>
+Aron Fischer <homotopycolimit@users.noreply.github.com>
 Bas van Kervel <bas@ethdev.com>
+Benjamin Brent <benjamin@benjaminbrent.com>
+Casey Detrio <cdetrio@gmail.com>
 Christoph Jentzsch <jentzsch.software@gmail.com>
 Daniel A. Nagy <nagy.da@gmail.com>
-Drake Burroughs <wildfyre@hotmail.com>
+Elliot Shepherd <elliot@identitii.com>
 Enrique Fynn <enriquefynn@gmail.com>
 Ethan Buchman <ethan@coinculture.info>
 Fabian Vogelsteller <fabian@frozeman.de>
+Fabio Berger <fabioberger1991@gmail.com>
 Felix Lange <fjl@twurst.com>
+Gregg Dourgarian <greggd@tempworks.com>
 Gustav Simonsson <gustav.simonsson@gmail.com>
+Hao Bryan Cheng <haobcheng@gmail.com>
+Henning Diedrich <hd@eonblast.com>
 Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
 Jae Kwon <jkwon.work@gmail.com>
 Jason Carver <jacarver@linkedin.com>
 Jeff R. Allen <jra@nella.org>
 Jeffrey Wilcke <jeffrey@ethereum.org>
+Jens Agerberg <github@agerberg.me>
+Jonathan Brown <jbrown@bluedroplet.com>
 Joseph Chow <ethereum@outlook.com>
+Justin Clark-Casey <justincc@justincc.org>
+Kenji Siu <kenji@isuntv.com>
 Kobi Gurkan <kobigurk@gmail.com>
 Lefteris Karapetsas <lefteris@refu.co>
 Leif Jurvetson <leijurv@gmail.com>
 Maran Hidskes <maran.hidskes@gmail.com>
 Marek Kotewicz <marek.kotewicz@gmail.com>
+Martin Holst Swende <martin@swende.se>
+Matthew Di Ferrante <mattdf@users.noreply.github.com>
 Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
+Nchinda Nchinda <nchinda2@gmail.com>
 Nick Dodson <silentcicero@outlook.com>
+Nick Johnson <arachnid@notdot.net>
+Paulo L F Casaretto <pcasaretto@gmail.com>
 Peter Pratscher <pratscher@gmail.com>
 Péter Szilágyi <peterke@gmail.com>
+RJ Catalano <rj@erisindustries.com>
 Ramesh Nair <ram@hiddentao.com>
 Ricardo Catalinas Jiménez <r@untroubled.be>
 Rémy Roy <remyroy@remyroy.com>
+Stein Dekker <dekker.stein@gmail.com>
+Steven Roose <stevenroose@gmail.com>
 Taylor Gerring <taylor.gerring@gmail.com>
+Thomas Bocek <tom@tomp2p.net>
+Tosh Camille <tochecamille@gmail.com>
 Viktor Trón <viktor.tron@gmail.com>
+Ville Sundell <github@solarius.fi>
 Vincent G <caktux@gmail.com>
 Vitalik Buterin <v@buterin.com>
+Vlad Gluhovsky <gluk256@users.noreply.github.com>
+Yohann Léon <sybiload@gmail.com>
+Yoichi Hirai <i@yoichihirai.com>
 Zsolt Felföldi <zsfelfoldi@gmail.com>
+ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go
index a4e90914f988d448557d4bbb6a38d70dd4cc12f1..4509e222d5231c189e46a5b86038ad1b1ccaa731 100644
--- a/accounts/abi/bind/backend.go
+++ b/accounts/abi/bind/backend.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2015 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 74203a468de7da718ecb9f74607928f5c7bad398..bdc2a98bc8e660dfdf26103ad42be4f8bdd09dc3 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2015 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go
index 965f51e85662ef3d0da02a8dd1b83e87d578b154..b032ef72d96b310682d5de78af5d03673cf247a5 100644
--- a/accounts/abi/bind/base.go
+++ b/accounts/abi/bind/base.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2015 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/build/update-license.go b/build/update-license.go
index 94b319550673d2d9acd146b883655770fadf46da..e0c273deff9765b1535ac377f786093bf7b5d0e1 100644
--- a/build/update-license.go
+++ b/build/update-license.go
@@ -53,6 +53,7 @@ var (
 		"contracts/chequebook/contract/",
 		"contracts/ens/contract/",
 		"contracts/release/contract.go",
+		"p2p/discv5/nodeevent_string.go",
 	}
 
 	// paths with this prefix are licensed as GPL. all other files are LGPL.
diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go
index 40d3cdc1789abb27960f3d39038001fe3eb62bd3..abecac3d8c1e70ad9534bafe580a2f54d94f983c 100644
--- a/cmd/bootnode/main.go
+++ b/cmd/bootnode/main.go
@@ -27,6 +27,7 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/p2p/discover"
+	"github.com/ethereum/go-ethereum/p2p/discv5"
 	"github.com/ethereum/go-ethereum/p2p/nat"
 )
 
@@ -38,6 +39,7 @@ func main() {
 		nodeKeyFile = flag.String("nodekey", "", "private key filename")
 		nodeKeyHex  = flag.String("nodekeyhex", "", "private key as hex (for testing)")
 		natdesc     = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
+		runv5       = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
 
 		nodeKey *ecdsa.PrivateKey
 		err     error
@@ -79,8 +81,15 @@ func main() {
 		os.Exit(0)
 	}
 
-	if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
-		utils.Fatalf("%v", err)
+	if *runv5 {
+		if _, err := discv5.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
+			utils.Fatalf("%v", err)
+		}
+	} else {
+		if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
+			utils.Fatalf("%v", err)
+		}
 	}
+
 	select {}
 }
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index c1bbbd8dcf03d4c6237ff392f9706397774f1ede..7a9cf4ac2a0460169c2197bf6f8c2dcae3968389 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -180,7 +180,7 @@ func exportChain(ctx *cli.Context) error {
 
 func removeDB(ctx *cli.Context) error {
 	stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
-	dbdir := stack.ResolvePath("chaindata")
+	dbdir := stack.ResolvePath(utils.ChainDbName(ctx))
 	if !common.FileExist(dbdir) {
 		fmt.Println(dbdir, "does not exist")
 		return nil
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 5517052084abe060e11ef8f58e4313a8883f00c7..557bf57fa6d7fcb5a3b2b0240717847834a49100 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -118,8 +118,10 @@ participating.
 		utils.KeyStoreDirFlag,
 		utils.OlympicFlag,
 		utils.FastSyncFlag,
+		utils.LightModeFlag,
+		utils.LightServFlag,
+		utils.LightPeersFlag,
 		utils.LightKDFFlag,
-		utils.CacheFlag,
 		utils.TrieCacheGenFlag,
 		utils.JSpathFlag,
 		utils.ListenPortFlag,
@@ -136,6 +138,7 @@ participating.
 		utils.NATFlag,
 		utils.NatspecEnabledFlag,
 		utils.NoDiscoverFlag,
+		utils.DiscoveryV5Flag,
 		utils.NodeKeyFileFlag,
 		utils.NodeKeyHexFlag,
 		utils.RPCEnabledFlag,
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 4c4e27630b7e591a55a51564b76adba5439bf89b..e4abf6b30c99f775c5a93ed3126053779c1a29d9 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -72,6 +72,9 @@ var AppHelpFlagGroups = []flagGroup{
 			utils.DevModeFlag,
 			utils.IdentityFlag,
 			utils.FastSyncFlag,
+			utils.LightModeFlag,
+			utils.LightServFlag,
+			utils.LightPeersFlag,
 			utils.LightKDFFlag,
 		},
 	},
@@ -119,6 +122,7 @@ var AppHelpFlagGroups = []flagGroup{
 			utils.MaxPendingPeersFlag,
 			utils.NATFlag,
 			utils.NoDiscoverFlag,
+			utils.DiscoveryV5Flag,
 			utils.NodeKeyFileFlag,
 			utils.NodeKeyHexFlag,
 		},
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index edb0873f6b8e8cffe4913a87a95d8803d3bbce1b..626c2615d95836bfb28bcbf319cd88d32e8cb076 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -39,6 +39,8 @@ import (
 	"github.com/ethereum/go-ethereum/eth"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/les"
+	"github.com/ethereum/go-ethereum/light"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/metrics"
@@ -145,6 +147,20 @@ var (
 		Name:  "fast",
 		Usage: "Enable fast syncing through state downloads",
 	}
+	LightModeFlag = cli.BoolFlag{
+		Name:  "light",
+		Usage: "Enable light client mode",
+	}
+	LightServFlag = cli.IntFlag{
+		Name:  "lightserv",
+		Usage: "Maximum percentage of time allowed for serving LES requests (0-90)",
+		Value: 0,
+	}
+	LightPeersFlag = cli.IntFlag{
+		Name:  "lightpeers",
+		Usage: "Maximum number of LES client peers",
+		Value: 20,
+	}
 	LightKDFFlag = cli.BoolFlag{
 		Name:  "lightkdf",
 		Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
@@ -348,6 +364,10 @@ var (
 		Name:  "nodiscover",
 		Usage: "Disables the peer discovery mechanism (manual peer addition)",
 	}
+	DiscoveryV5Flag = cli.BoolFlag{
+		Name:  "v5disc",
+		Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism",
+	}
 	WhisperEnabledFlag = cli.BoolFlag{
 		Name:  "shh",
 		Usage: "Enable Whisper",
@@ -491,6 +511,10 @@ func MakeListenAddress(ctx *cli.Context) string {
 	return fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name))
 }
 
+func MakeListenAddressV5(ctx *cli.Context) string {
+	return fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)+1)
+}
+
 // MakeNAT creates a port mapper from set command line flags.
 func MakeNAT(ctx *cli.Context) nat.Interface {
 	natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name))
@@ -621,9 +645,11 @@ func MakeNode(ctx *cli.Context, name, gitCommit string) *node.Node {
 		Name:              name,
 		Version:           vsn,
 		UserIdent:         makeNodeUserIdent(ctx),
-		NoDiscovery:       ctx.GlobalBool(NoDiscoverFlag.Name),
+		NoDiscovery:       ctx.GlobalBool(NoDiscoverFlag.Name) || ctx.GlobalBool(LightModeFlag.Name),
+		DiscoveryV5:       ctx.GlobalBool(DiscoveryV5Flag.Name) || ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalInt(LightServFlag.Name) > 0,
 		BootstrapNodes:    MakeBootstrapNodes(ctx),
 		ListenAddr:        MakeListenAddress(ctx),
+		ListenAddrV5:      MakeListenAddressV5(ctx),
 		NAT:               MakeNAT(ctx),
 		MaxPeers:          ctx.GlobalInt(MaxPeersFlag.Name),
 		MaxPendingPeers:   ctx.GlobalInt(MaxPendingPeersFlag.Name),
@@ -680,6 +706,10 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 		Etherbase:               MakeEtherbase(stack.AccountManager(), ctx),
 		ChainConfig:             MakeChainConfig(ctx, stack),
 		FastSync:                ctx.GlobalBool(FastSyncFlag.Name),
+		LightMode:               ctx.GlobalBool(LightModeFlag.Name),
+		LightServ:               ctx.GlobalInt(LightServFlag.Name),
+		LightPeers:              ctx.GlobalInt(LightPeersFlag.Name),
+		MaxPeers:                ctx.GlobalInt(MaxPeersFlag.Name),
 		DatabaseCache:           ctx.GlobalInt(CacheFlag.Name),
 		DatabaseHandles:         MakeDatabaseHandles(),
 		NetworkId:               ctx.GlobalInt(NetworkIdFlag.Name),
@@ -714,6 +744,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 		}
 		ethConf.Genesis = core.TestNetGenesisBlock()
 		state.StartingNonce = 1048576 // (2**20)
+		light.StartingNonce = 1048576 // (2**20)
 
 	case ctx.GlobalBool(DevModeFlag.Name):
 		ethConf.Genesis = core.OlympicGenesisBlock()
@@ -727,10 +758,23 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
 		state.MaxTrieCacheGen = uint16(gen)
 	}
 
-	if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
-		return eth.New(ctx, ethConf)
-	}); err != nil {
-		Fatalf("Failed to register the Ethereum service: %v", err)
+	if ethConf.LightMode {
+		if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
+			return les.New(ctx, ethConf)
+		}); err != nil {
+			Fatalf("Failed to register the Ethereum light node service: %v", err)
+		}
+	} else {
+		if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
+			fullNode, err := eth.New(ctx, ethConf)
+			if fullNode != nil && ethConf.LightServ > 0 {
+				ls, _ := les.NewLesServer(fullNode, ethConf)
+				fullNode.AddLesServer(ls)
+			}
+			return fullNode, err
+		}); err != nil {
+			Fatalf("Failed to register the Ethereum full node service: %v", err)
+		}
 	}
 }
 
@@ -830,14 +874,23 @@ func MakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainConfi
 	return config
 }
 
+func ChainDbName(ctx *cli.Context) string {
+	if ctx.GlobalBool(LightModeFlag.Name) {
+		return "lightchaindata"
+	} else {
+		return "chaindata"
+	}
+}
+
 // MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
 func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
 	var (
 		cache   = ctx.GlobalInt(CacheFlag.Name)
 		handles = MakeDatabaseHandles()
+		name    = ChainDbName(ctx)
 	)
 
-	chainDb, err := stack.OpenDatabase("chaindata", cache, handles)
+	chainDb, err := stack.OpenDatabase(name, cache, handles)
 	if err != nil {
 		Fatalf("Could not open database: %v", err)
 	}
diff --git a/cmd/utils/version.go b/cmd/utils/version.go
index 03633d694a80b66e3208f1d83bf5b0b61edc7038..b057f4293b7fbce18dc7c696ca0d9725397d7ac4 100644
--- a/cmd/utils/version.go
+++ b/cmd/utils/version.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The go-ethereum Authors
+// Copyright 2016 The go-ethereum Authors
 // This file is part of go-ethereum.
 //
 // go-ethereum is free software: you can redistribute it and/or modify
diff --git a/cmd/v5test/main.go b/cmd/v5test/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..1daff56f87afbfab61cfe3b95491be94226be4cd
--- /dev/null
+++ b/cmd/v5test/main.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+// bootnode runs a bootstrap node for the Ethereum Discovery Protocol.
+package main
+
+import (
+	"flag"
+	"fmt"
+	"math/rand"
+	"strconv"
+	"time"
+
+	"github.com/ethereum/go-ethereum/cmd/utils"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/p2p/discv5"
+	"github.com/ethereum/go-ethereum/p2p/nat"
+)
+
+func main() {
+	var (
+		listenPort = flag.Int("addr", 31000, "beginning of listening port range")
+		natdesc    = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
+		count      = flag.Int("count", 1, "number of v5 topic discovery test nodes (adds default bootnodes to form a test network)")
+		regtopic   = flag.String("reg", "", "topic to register on the network")
+		looktopic  = flag.String("search", "", "topic to search on the network")
+	)
+	flag.Var(glog.GetVerbosity(), "verbosity", "log verbosity (0-9)")
+	flag.Var(glog.GetVModule(), "vmodule", "log verbosity pattern")
+	glog.SetToStderr(true)
+	flag.Parse()
+
+	natm, err := nat.Parse(*natdesc)
+	if err != nil {
+		utils.Fatalf("-nat: %v", err)
+	}
+
+	for i := 0; i < *count; i++ {
+		listenAddr := ":" + strconv.Itoa(*listenPort+i)
+
+		nodeKey, err := crypto.GenerateKey()
+		if err != nil {
+			utils.Fatalf("could not generate key: %v", err)
+		}
+
+		if net, err := discv5.ListenUDP(nodeKey, listenAddr, natm, ""); err != nil {
+			utils.Fatalf("%v", err)
+		} else {
+			if err := net.SetFallbackNodes(discv5.BootNodes); err != nil {
+				utils.Fatalf("%v", err)
+			}
+			go func() {
+				if *looktopic == "" {
+					for i := 0; i < 20; i++ {
+						time.Sleep(time.Millisecond * time.Duration(2000+rand.Intn(2001)))
+						net.BucketFill()
+					}
+				}
+				switch {
+				case *regtopic != "":
+					// register topic
+					fmt.Println("Starting topic register")
+					stop := make(chan struct{})
+					net.RegisterTopic(discv5.Topic(*regtopic), stop)
+				case *looktopic != "":
+					// search topic
+					fmt.Println("Starting topic search")
+					stop := make(chan struct{})
+					found := make(chan string, 100)
+					go net.SearchTopic(discv5.Topic(*looktopic), stop, found)
+					for s := range found {
+						fmt.Println(time.Now(), s)
+					}
+				default:
+					// just keep doing lookups
+					for {
+						time.Sleep(time.Millisecond * time.Duration(40000+rand.Intn(40001)))
+						net.BucketFill()
+					}
+				}
+			}()
+		}
+		fmt.Printf("Started test node #%d with public key %v\n", i, discv5.PubkeyID(&nodeKey.PublicKey))
+	}
+
+	select {}
+}
diff --git a/common/math/exp.go b/common/math/exp.go
index 3fd31bd761b1bb6799dbfcec1b9bcd2b513da768..6f6c040e0764facf5b126d4e50b2384cd69557f4 100644
--- a/common/math/exp.go
+++ b/common/math/exp.go
@@ -1,3 +1,19 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
 package math
 
 import (
diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go
new file mode 100644
index 0000000000000000000000000000000000000000..92005252eb8a1b1e0b3aa3d6fe32272a47e993d8
--- /dev/null
+++ b/common/mclock/mclock.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// package mclock is a wrapper for a monotonic clock source
+package mclock
+
+import (
+	"time"
+
+	"github.com/aristanetworks/goarista/monotime"
+)
+
+type AbsTime time.Duration // absolute monotonic time
+
+func Now() AbsTime {
+	return AbsTime(monotime.Now())
+}
diff --git a/console/bridge.go b/console/bridge.go
index 24a777d78d9b25bec48489a5f5f65fb84d07774c..7f7e6feb11aa314e0e7ee9ace50fb0659849bc2e 100644
--- a/console/bridge.go
+++ b/console/bridge.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The go-ethereum Authors
+// Copyright 2016 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/console/console.go b/console/console.go
index 3cde9b8f53cde1102c26be1c968dc2d4e72ffa2d..6e3d7e43cb6e07f8aafdc1f4e947ec95f27c3d97 100644
--- a/console/console.go
+++ b/console/console.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The go-ethereum Authors
+// Copyright 2016 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/contracts/release/release.go b/contracts/release/release.go
index 5a6665dba4986dc0caa24c0bf6b42ed4c5362ffa..cd79112cd141a8719dfdef809ac0e2a4d27c8c95 100644
--- a/contracts/release/release.go
+++ b/contracts/release/release.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2015 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
@@ -27,6 +27,8 @@ import (
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/internal/ethapi"
+	"github.com/ethereum/go-ethereum/les"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/node"
@@ -60,12 +62,20 @@ type ReleaseService struct {
 // releases and notify the user of such.
 func NewReleaseService(ctx *node.ServiceContext, config Config) (node.Service, error) {
 	// Retrieve the Ethereum service dependency to access the blockchain
+	var apiBackend ethapi.Backend
 	var ethereum *eth.Ethereum
-	if err := ctx.Service(&ethereum); err != nil {
-		return nil, err
+	if err := ctx.Service(&ethereum); err == nil {
+		apiBackend = ethereum.ApiBackend
+	} else {
+		var ethereum *les.LightEthereum
+		if err := ctx.Service(&ethereum); err == nil {
+			apiBackend = ethereum.ApiBackend
+		} else {
+			return nil, err
+		}
 	}
 	// Construct the release service
-	contract, err := NewReleaseOracle(config.Oracle, eth.NewContractBackend(ethereum))
+	contract, err := NewReleaseOracle(config.Oracle, eth.NewContractBackend(apiBackend))
 	if err != nil {
 		return nil, err
 	}
diff --git a/core/blockchain.go b/core/blockchain.go
index d806c143d516a9c64eede98d85dad3bbbce383c0..791a8b91dcd8a9f1b76e45c4a45501b83048055b 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -632,6 +632,37 @@ func (self *BlockChain) Rollback(chain []common.Hash) {
 	}
 }
 
+// SetReceiptsData computes all the non-consensus fields of the receipts
+func SetReceiptsData(block *types.Block, receipts types.Receipts) {
+	transactions, logIndex := block.Transactions(), uint(0)
+
+	for j := 0; j < len(receipts); j++ {
+		// The transaction hash can be retrieved from the transaction itself
+		receipts[j].TxHash = transactions[j].Hash()
+
+		// The contract address can be derived from the transaction itself
+		if MessageCreatesContract(transactions[j]) {
+			from, _ := transactions[j].From()
+			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
+		}
+		// The used gas can be calculated based on previous receipts
+		if j == 0 {
+			receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed)
+		} else {
+			receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed)
+		}
+		// The derived log fields can simply be set from the block and transaction
+		for k := 0; k < len(receipts[j].Logs); k++ {
+			receipts[j].Logs[k].BlockNumber = block.NumberU64()
+			receipts[j].Logs[k].BlockHash = block.Hash()
+			receipts[j].Logs[k].TxHash = receipts[j].TxHash
+			receipts[j].Logs[k].TxIndex = uint(j)
+			receipts[j].Logs[k].Index = logIndex
+			logIndex++
+		}
+	}
+}
+
 // InsertReceiptChain attempts to complete an already existing header chain with
 // transaction and receipt data.
 func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
@@ -673,32 +704,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
 				continue
 			}
 			// Compute all the non-consensus fields of the receipts
-			transactions, logIndex := block.Transactions(), uint(0)
-			for j := 0; j < len(receipts); j++ {
-				// The transaction hash can be retrieved from the transaction itself
-				receipts[j].TxHash = transactions[j].Hash()
-
-				// The contract address can be derived from the transaction itself
-				if MessageCreatesContract(transactions[j]) {
-					from, _ := transactions[j].From()
-					receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
-				}
-				// The used gas can be calculated based on previous receipts
-				if j == 0 {
-					receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed)
-				} else {
-					receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed)
-				}
-				// The derived log fields can simply be set from the block and transaction
-				for k := 0; k < len(receipts[j].Logs); k++ {
-					receipts[j].Logs[k].BlockNumber = block.NumberU64()
-					receipts[j].Logs[k].BlockHash = block.Hash()
-					receipts[j].Logs[k].TxHash = receipts[j].TxHash
-					receipts[j].Logs[k].TxIndex = uint(j)
-					receipts[j].Logs[k].Index = logIndex
-					logIndex++
-				}
-			}
+			SetReceiptsData(block, receipts)
 			// Write all the data out into the database
 			if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
 				errs[index] = fmt.Errorf("failed to write block body: %v", err)
diff --git a/core/database_util.go b/core/database_util.go
index 5f9afe6ba6db2eed822a0c20bb4b19717b56fc99..0fb593554b320834a4609b2a15c36b820d58f1e5 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -347,8 +347,13 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
 	if err != nil {
 		return err
 	}
+	return WriteBodyRLP(db, hash, number, data)
+}
+
+// WriteBodyRLP writes a serialized body of a block into the database.
+func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
 	key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-	if err := db.Put(key, data); err != nil {
+	if err := db.Put(key, rlp); err != nil {
 		glog.Fatalf("failed to store block body into database: %v", err)
 	}
 	glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
@@ -446,6 +451,16 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
 	return nil
 }
 
+// WriteReceipt stores a single transaction receipt into the database.
+func WriteReceipt(db ethdb.Database, receipt *types.Receipt) error {
+	storageReceipt := (*types.ReceiptForStorage)(receipt)
+	data, err := rlp.EncodeToBytes(storageReceipt)
+	if err != nil {
+		return err
+	}
+	return db.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data)
+}
+
 // WriteReceipts stores a batch of transaction receipts into the database.
 func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
 	batch := db.NewBatch()
@@ -614,3 +629,30 @@ func GetChainConfig(db ethdb.Database, hash common.Hash) (*ChainConfig, error) {
 
 	return &config, nil
 }
+
+// FindCommonAncestor returns the last common ancestor of two block headers
+func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header {
+	for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
+		a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
+		if a == nil {
+			return nil
+		}
+	}
+	for an := a.Number.Uint64(); an < b.Number.Uint64(); {
+		b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1)
+		if b == nil {
+			return nil
+		}
+	}
+	for a.Hash() != b.Hash() {
+		a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
+		if a == nil {
+			return nil
+		}
+		b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1)
+		if b == nil {
+			return nil
+		}
+	}
+	return a
+}
diff --git a/core/types/json_test.go b/core/types/json_test.go
index e17424c821b326fce1719418c809fd91331df3ec..a028b5d08c9b10091a58f97c97f6dfcaa0fbba3c 100644
--- a/core/types/json_test.go
+++ b/core/types/json_test.go
@@ -1,3 +1,19 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
 package types
 
 import (
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 42b84bf9ba3a6d23ac492eb7b5630888e3dee3f1..efe9a7a011dc6d3d128a72def65ad076ef5d9153 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -1,18 +1,18 @@
 // Copyright 2015 The go-ethereum Authors
-// This file is part of go-ethereum.
+// This file is part of the go-ethereum library.
 //
-// go-ethereum is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
 // the Free Software Foundation, either version 3 of the License, or
 // (at your option) any later version.
 //
-// go-ethereum is distributed in the hope that it will be useful,
+// The go-ethereum library is distributed in the hope that it will be useful,
 // but WITHOUT ANY WARRANTY; without even the implied warranty of
 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
+// GNU Lesser General Public License for more details.
 //
-// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 
 package eth
 
@@ -44,17 +44,17 @@ func (b *EthApiBackend) SetHead(number uint64) {
 	b.eth.blockchain.SetHead(number)
 }
 
-func (b *EthApiBackend) HeaderByNumber(blockNr rpc.BlockNumber) *types.Header {
+func (b *EthApiBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
 	// Pending block is only known by the miner
 	if blockNr == rpc.PendingBlockNumber {
 		block, _ := b.eth.miner.Pending()
-		return block.Header()
+		return block.Header(), nil
 	}
 	// Otherwise resolve and return the block
 	if blockNr == rpc.LatestBlockNumber {
-		return b.eth.blockchain.CurrentBlock().Header()
+		return b.eth.blockchain.CurrentBlock().Header(), nil
 	}
-	return b.eth.blockchain.GetHeaderByNumber(uint64(blockNr))
+	return b.eth.blockchain.GetHeaderByNumber(uint64(blockNr)), nil
 }
 
 func (b *EthApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) {
@@ -70,16 +70,16 @@ func (b *EthApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumb
 	return b.eth.blockchain.GetBlockByNumber(uint64(blockNr)), nil
 }
 
-func (b *EthApiBackend) StateAndHeaderByNumber(blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) {
+func (b *EthApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) {
 	// Pending state is only known by the miner
 	if blockNr == rpc.PendingBlockNumber {
 		block, state := b.eth.miner.Pending()
 		return EthApiState{state}, block.Header(), nil
 	}
 	// Otherwise resolve the block number and return its state
-	header := b.HeaderByNumber(blockNr)
-	if header == nil {
-		return nil, nil, nil
+	header, err := b.HeaderByNumber(ctx, blockNr)
+	if header == nil || err != nil {
+		return nil, nil, err
 	}
 	stateDb, err := b.eth.BlockChain().StateAt(header.Root)
 	return EthApiState{stateDb}, header, err
diff --git a/eth/backend.go b/eth/backend.go
index 24419d6d8fc4276ffc9863b48715af9bed779eec..ec501043a549c4186be8a0291cc4146bbda5cf3c 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -66,9 +66,13 @@ var (
 type Config struct {
 	ChainConfig *core.ChainConfig // chain configuration
 
-	NetworkId int    // Network ID to use for selecting peers to connect to
-	Genesis   string // Genesis JSON to seed the chain database with
-	FastSync  bool   // Enables the state download based fast synchronisation algorithm
+	NetworkId  int    // Network ID to use for selecting peers to connect to
+	Genesis    string // Genesis JSON to seed the chain database with
+	FastSync   bool   // Enables the state download based fast synchronisation algorithm
+	LightMode  bool   // Running in light client mode
+	LightServ  int    // Maximum percentage of time allowed for serving LES requests
+	LightPeers int    // Maximum number of LES client peers
+	MaxPeers   int    // Maximum number of global peers
 
 	SkipBcVersionCheck bool // e.g. blockchain export
 	DatabaseCache      int
@@ -100,6 +104,12 @@ type Config struct {
 	TestGenesisState ethdb.Database // Genesis state to seed the database with (testing only!)
 }
 
+type LesServer interface {
+	Start(srvr *p2p.Server)
+	Stop()
+	Protocols() []p2p.Protocol
+}
+
 // Ethereum implements the Ethereum full node service.
 type Ethereum struct {
 	chainConfig *core.ChainConfig
@@ -111,6 +121,7 @@ type Ethereum struct {
 	txMu            sync.Mutex
 	blockchain      *core.BlockChain
 	protocolManager *ProtocolManager
+	lesServer       LesServer
 	// DB interfaces
 	chainDb ethdb.Database // Block chain database
 
@@ -119,7 +130,7 @@ type Ethereum struct {
 	httpclient     *httpclient.HTTPClient
 	accountManager *accounts.Manager
 
-	apiBackend *EthApiBackend
+	ApiBackend *EthApiBackend
 
 	miner        *miner.Miner
 	Mining       bool
@@ -135,10 +146,14 @@ type Ethereum struct {
 	netRPCService *ethapi.PublicNetAPI
 }
 
+func (s *Ethereum) AddLesServer(ls LesServer) {
+	s.lesServer = ls
+}
+
 // New creates a new Ethereum object (including the
 // initialisation of the common Ethereum object)
 func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
-	chainDb, err := createDB(ctx, config)
+	chainDb, err := CreateDB(ctx, config, "chaindata")
 	if err != nil {
 		return nil, err
 	}
@@ -217,7 +232,18 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
 	newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
 	eth.txPool = newPool
 
-	if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
+	maxPeers := config.MaxPeers
+	if config.LightServ > 0 {
+		// if we are running a light server, limit the number of ETH peers so that we reserve some space for incoming LES connections
+		// temporary solution until the new peer connectivity API is finished
+		halfPeers := maxPeers / 2
+		maxPeers -= config.LightPeers
+		if maxPeers < halfPeers {
+			maxPeers = halfPeers
+		}
+	}
+
+	if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
 		return nil, err
 	}
 	eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.pow)
@@ -233,14 +259,14 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
 		GpobaseCorrectionFactor: config.GpobaseCorrectionFactor,
 	}
 	gpo := gasprice.NewGasPriceOracle(eth.blockchain, chainDb, eth.eventMux, gpoParams)
-	eth.apiBackend = &EthApiBackend{eth, gpo}
+	eth.ApiBackend = &EthApiBackend{eth, gpo}
 
 	return eth, nil
 }
 
-// createDB creates the chain database.
-func createDB(ctx *node.ServiceContext, config *Config) (ethdb.Database, error) {
-	db, err := ctx.OpenDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles)
+// CreateDB creates the chain database.
+func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Database, error) {
+	db, err := ctx.OpenDatabase(name, config.DatabaseCache, config.DatabaseHandles)
 	if db, ok := db.(*ethdb.LDBDatabase); ok {
 		db.Meter("eth/db/chaindata/")
 	}
@@ -288,7 +314,7 @@ func CreatePoW(config *Config) (*ethash.Ethash, error) {
 // APIs returns the collection of RPC services the ethereum package offers.
 // NOTE, some of these services probably need to be moved to somewhere else.
 func (s *Ethereum) APIs() []rpc.API {
-	return append(ethapi.GetAPIs(s.apiBackend, s.solcPath), []rpc.API{
+	return append(ethapi.GetAPIs(s.ApiBackend, s.solcPath), []rpc.API{
 		{
 			Namespace: "eth",
 			Version:   "1.0",
@@ -312,7 +338,7 @@ func (s *Ethereum) APIs() []rpc.API {
 		}, {
 			Namespace: "eth",
 			Version:   "1.0",
-			Service:   filters.NewPublicFilterAPI(s.chainDb, s.eventMux),
+			Service:   filters.NewPublicFilterAPI(s.ApiBackend, false),
 			Public:    true,
 		}, {
 			Namespace: "admin",
@@ -391,7 +417,11 @@ func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManage
 // Protocols implements node.Service, returning all the currently configured
 // network protocols to start.
 func (s *Ethereum) Protocols() []p2p.Protocol {
-	return s.protocolManager.SubProtocols
+	if s.lesServer == nil {
+		return s.protocolManager.SubProtocols
+	} else {
+		return append(s.protocolManager.SubProtocols, s.lesServer.Protocols()...)
+	}
 }
 
 // Start implements node.Service, starting all internal goroutines needed by the
@@ -402,6 +432,9 @@ func (s *Ethereum) Start(srvr *p2p.Server) error {
 		s.StartAutoDAG()
 	}
 	s.protocolManager.Start()
+	if s.lesServer != nil {
+		s.lesServer.Start(srvr)
+	}
 	return nil
 }
 
@@ -413,6 +446,9 @@ func (s *Ethereum) Stop() error {
 	}
 	s.blockchain.Stop()
 	s.protocolManager.Stop()
+	if s.lesServer != nil {
+		s.lesServer.Stop()
+	}
 	s.txPool.Stop()
 	s.miner.Stop()
 	s.eventMux.Stop()
diff --git a/eth/bad_block.go b/eth/bad_block.go
index 3a6c3d85cb88357fa57cb639351133ee415b81a3..e0f05f540fd3bf20e10755178ccb2da8955c78a2 100644
--- a/eth/bad_block.go
+++ b/eth/bad_block.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The go-ethereum Authors
+// Copyright 2016 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/eth/bind.go b/eth/bind.go
index 532e94460043c038c3cba89be25d4cc9366503f8..747965d37038e8d765e94c6113cf427c8936bd10 100644
--- a/eth/bind.go
+++ b/eth/bind.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2015 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
@@ -43,11 +43,11 @@ type ContractBackend struct {
 
 // NewContractBackend creates a new native contract backend using an existing
 // Etheruem object.
-func NewContractBackend(eth *Ethereum) *ContractBackend {
+func NewContractBackend(apiBackend ethapi.Backend) *ContractBackend {
 	return &ContractBackend{
-		eapi:  ethapi.NewPublicEthereumAPI(eth.apiBackend),
-		bcapi: ethapi.NewPublicBlockChainAPI(eth.apiBackend),
-		txapi: ethapi.NewPublicTransactionPoolAPI(eth.apiBackend),
+		eapi:  ethapi.NewPublicEthereumAPI(apiBackend),
+		bcapi: ethapi.NewPublicBlockChainAPI(apiBackend),
+		txapi: ethapi.NewPublicTransactionPoolAPI(apiBackend),
 	}
 }
 
diff --git a/eth/db_upgrade.go b/eth/db_upgrade.go
index 172bb0954a035e1a2702087d91e5849041105e13..5fd73a58677bf1571c3255414e995a0f9aa47aed 100644
--- a/eth/db_upgrade.go
+++ b/eth/db_upgrade.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The go-ethereum Authors
+// Copyright 2016 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index da20e48a2d17dc8d4fa5f3e2acb9018a047822b1..b1f4b8169bf7204d29464213ac28b9b98ab324e7 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -164,13 +164,13 @@ type Downloader struct {
 }
 
 // New creates a new downloader to fetch hashes and blocks from remote peers.
-func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn,
+func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn,
 	getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn,
 	headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn,
 	insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader {
 
 	dl := &Downloader{
-		mode:             FullSync,
+		mode:             mode,
 		mux:              mux,
 		queue:            newQueue(stateDb),
 		peers:            newPeerSet(),
@@ -1179,10 +1179,23 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 			for i, header := range rollback {
 				hashes[i] = header.Hash()
 			}
-			lastHeader, lastFastBlock, lastBlock := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number()
+			lastHeader, lastFastBlock, lastBlock := d.headHeader().Number, common.Big0, common.Big0
+			if d.headFastBlock != nil {
+				lastFastBlock = d.headFastBlock().Number()
+			}
+			if d.headBlock != nil {
+				lastBlock = d.headBlock().Number()
+			}
 			d.rollback(hashes)
+			curFastBlock, curBlock := common.Big0, common.Big0
+			if d.headFastBlock != nil {
+				curFastBlock = d.headFastBlock().Number()
+			}
+			if d.headBlock != nil {
+				curBlock = d.headBlock().Number()
+			}
 			glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
-				len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, d.headFastBlock().Number(), lastBlock, d.headBlock().Number())
+				len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock)
 
 			// If we're already past the pivot point, this could be an attack, thread carefully
 			if rollback[len(rollback)-1].Number.Uint64() > pivot {
@@ -1229,8 +1242,10 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
 				// L: Sync begins, and finds common ancestor at 11
 				// L: Request new headers up from 11 (R's TD was higher, it must have something)
 				// R: Nothing to give
-				if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
-					return errStallingPeer
+				if d.mode != LightSync {
+					if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
+						return errStallingPeer
+					}
 				}
 				// If fast or light syncing, ensure promised headers are indeed delivered. This is
 				// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 2849712abaef8236d2704fcce4869d84803582d9..ff8cd10443157427e6d2d716129ded43add09253 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -96,7 +96,7 @@ func newTester() *downloadTester {
 	tester.stateDb, _ = ethdb.NewMemDatabase()
 	tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00})
 
-	tester.downloader = New(tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader,
+	tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader,
 		tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd,
 		tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.rollback, tester.dropPeer)
 
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 3bc2203481fa16253b79a291abd9b36088d4c38d..8345132629c9d6db8eedb63ddb055f344cfb5d19 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -52,6 +52,8 @@ type filter struct {
 // PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
 // information related to the Ethereum protocol such als blocks, transactions and logs.
 type PublicFilterAPI struct {
+	backend   Backend
+	useMipMap bool
 	mux       *event.TypeMux
 	quit      chan struct{}
 	chainDb   ethdb.Database
@@ -61,12 +63,14 @@ type PublicFilterAPI struct {
 }
 
 // NewPublicFilterAPI returns a new PublicFilterAPI instance.
-func NewPublicFilterAPI(chainDb ethdb.Database, mux *event.TypeMux) *PublicFilterAPI {
+func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
 	api := &PublicFilterAPI{
-		mux:     mux,
-		chainDb: chainDb,
-		events:  NewEventSystem(mux),
-		filters: make(map[rpc.ID]*filter),
+		backend:   backend,
+		useMipMap: !lightMode,
+		mux:       backend.EventMux(),
+		chainDb:   backend.ChainDb(),
+		events:    NewEventSystem(backend.EventMux(), backend, lightMode),
+		filters:   make(map[rpc.ID]*filter),
 	}
 
 	go api.timeoutLoop()
@@ -314,7 +318,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) rpc.ID {
 // GetLogs returns logs matching the given argument that are stored within the state.
 //
 // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
-func (api *PublicFilterAPI) GetLogs(crit FilterCriteria) []Log {
+func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]Log, error) {
 	if crit.FromBlock == nil {
 		crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
 	}
@@ -322,13 +326,14 @@ func (api *PublicFilterAPI) GetLogs(crit FilterCriteria) []Log {
 		crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
 	}
 
-	filter := New(api.chainDb)
+	filter := New(api.backend, api.useMipMap)
 	filter.SetBeginBlock(crit.FromBlock.Int64())
 	filter.SetEndBlock(crit.ToBlock.Int64())
 	filter.SetAddresses(crit.Addresses)
 	filter.SetTopics(crit.Topics)
 
-	return returnLogs(filter.Find())
+	logs, err := filter.Find(ctx)
+	return returnLogs(logs), err
 }
 
 // UninstallFilter removes the filter with the given filter id.
@@ -352,22 +357,23 @@ func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool {
 // If the filter could not be found an empty array of logs is returned.
 //
 // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs
-func (api *PublicFilterAPI) GetFilterLogs(id rpc.ID) []Log {
+func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]Log, error) {
 	api.filtersMu.Lock()
 	f, found := api.filters[id]
 	api.filtersMu.Unlock()
 
 	if !found || f.typ != LogsSubscription {
-		return []Log{}
+		return []Log{}, nil
 	}
 
-	filter := New(api.chainDb)
+	filter := New(api.backend, api.useMipMap)
 	filter.SetBeginBlock(f.crit.FromBlock.Int64())
 	filter.SetEndBlock(f.crit.ToBlock.Int64())
 	filter.SetAddresses(f.crit.Addresses)
 	filter.SetTopics(f.crit.Topics)
 
-	return returnLogs(filter.Find())
+	logs, err := filter.Find(ctx)
+	return returnLogs(logs), err
 }
 
 // GetFilterChanges returns the logs for the filter with the given id since
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 4226620dc4e80ba55a0de33dd0350727eb81ec12..4004af30026ce0600a1674f9a3f1b1ae385bd57b 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -24,10 +24,23 @@ import (
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/rpc"
+	"golang.org/x/net/context"
 )
 
+type Backend interface {
+	ChainDb() ethdb.Database
+	EventMux() *event.TypeMux
+	HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
+	GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
+}
+
 // Filter can be used to retrieve and filter logs
 type Filter struct {
+	backend   Backend
+	useMipMap bool
+
 	created time.Time
 
 	db         ethdb.Database
@@ -38,8 +51,12 @@ type Filter struct {
 
 // New creates a new filter which uses a bloom filter on blocks to figure out whether
 // a particular block is interesting or not.
-func New(db ethdb.Database) *Filter {
-	return &Filter{db: db}
+func New(backend Backend, useMipMap bool) *Filter {
+	return &Filter{
+		backend:   backend,
+		useMipMap: useMipMap,
+		db:        backend.ChainDb(),
+	}
 }
 
 // SetBeginBlock sets the earliest block for filtering.
@@ -66,30 +83,29 @@ func (f *Filter) SetTopics(topics [][]common.Hash) {
 }
 
 // Run filters logs with the current parameters set
-func (f *Filter) Find() []Log {
-	latestHash := core.GetHeadBlockHash(f.db)
-	latestBlock := core.GetBlock(f.db, latestHash, core.GetBlockNumber(f.db, latestHash))
-	if latestBlock == nil {
-		return []Log{}
+func (f *Filter) Find(ctx context.Context) ([]Log, error) {
+	head, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
+	if head == nil {
+		return nil, nil
 	}
+	headBlockNumber := head.Number.Uint64()
 
 	var beginBlockNo uint64 = uint64(f.begin)
 	if f.begin == -1 {
-		beginBlockNo = latestBlock.NumberU64()
+		beginBlockNo = headBlockNumber
 	}
-
-	endBlockNo := uint64(f.end)
+	var endBlockNo uint64 = uint64(f.end)
 	if f.end == -1 {
-		endBlockNo = latestBlock.NumberU64()
+		endBlockNo = headBlockNumber
 	}
 
 	// if no addresses are present we can't make use of fast search which
 	// uses the mipmap bloom filters to check for fast inclusion and uses
 	// higher range probability in order to ensure at least a false positive
-	if len(f.addresses) == 0 {
-		return f.getLogs(beginBlockNo, endBlockNo)
+	if !f.useMipMap || len(f.addresses) == 0 {
+		return f.getLogs(ctx, beginBlockNo, endBlockNo)
 	}
-	return f.mipFind(beginBlockNo, endBlockNo, 0)
+	return f.mipFind(beginBlockNo, endBlockNo, 0), nil
 }
 
 func (f *Filter) mipFind(start, end uint64, depth int) (logs []Log) {
@@ -107,7 +123,8 @@ func (f *Filter) mipFind(start, end uint64, depth int) (logs []Log) {
 				start := uint64(math.Max(float64(num), float64(start)))
 				end := uint64(math.Min(float64(num+level-1), float64(end)))
 				if depth+1 == len(core.MIPMapLevels) {
-					logs = append(logs, f.getLogs(start, end)...)
+					l, _ := f.getLogs(context.Background(), start, end)
+					logs = append(logs, l...)
 				} else {
 					logs = append(logs, f.mipFind(start, end, depth+1)...)
 				}
@@ -122,28 +139,22 @@ func (f *Filter) mipFind(start, end uint64, depth int) (logs []Log) {
 	return logs
 }
 
-func (f *Filter) getLogs(start, end uint64) (logs []Log) {
-	var block *types.Block
-
+func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []Log, err error) {
 	for i := start; i <= end; i++ {
-		hash := core.GetCanonicalHash(f.db, i)
-		if hash != (common.Hash{}) {
-			block = core.GetBlock(f.db, hash, i)
-		} else { // block not found
-			return logs
-		}
-		if block == nil { // block not found/written
-			return logs
+		header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
+		if header == nil || err != nil {
+			return logs, err
 		}
 
 		// Use bloom filtering to see if this block is interesting given the
 		// current parameters
-		if f.bloomFilter(block) {
+		if f.bloomFilter(header.Bloom) {
 			// Get the logs of the block
-			var (
-				receipts   = core.GetBlockReceipts(f.db, block.Hash(), i)
-				unfiltered []Log
-			)
+			receipts, err := f.backend.GetReceipts(ctx, header.Hash())
+			if err != nil {
+				return nil, err
+			}
+			var unfiltered []Log
 			for _, receipt := range receipts {
 				rl := make([]Log, len(receipt.Logs))
 				for i, l := range receipt.Logs {
@@ -155,7 +166,7 @@ func (f *Filter) getLogs(start, end uint64) (logs []Log) {
 		}
 	}
 
-	return logs
+	return logs, nil
 }
 
 func includes(addresses []common.Address, a common.Address) bool {
@@ -207,11 +218,15 @@ Logs:
 	return ret
 }
 
-func (f *Filter) bloomFilter(block *types.Block) bool {
-	if len(f.addresses) > 0 {
+func (f *Filter) bloomFilter(bloom types.Bloom) bool {
+	return bloomFilter(bloom, f.addresses, f.topics)
+}
+
+func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool {
+	if len(addresses) > 0 {
 		var included bool
-		for _, addr := range f.addresses {
-			if types.BloomLookup(block.Bloom(), addr) {
+		for _, addr := range addresses {
+			if types.BloomLookup(bloom, addr) {
 				included = true
 				break
 			}
@@ -222,10 +237,10 @@ func (f *Filter) bloomFilter(block *types.Block) bool {
 		}
 	}
 
-	for _, sub := range f.topics {
+	for _, sub := range topics {
 		var included bool
 		for _, topic := range sub {
-			if (topic == common.Hash{}) || types.BloomLookup(block.Bloom(), topic) {
+			if (topic == common.Hash{}) || types.BloomLookup(bloom, topic) {
 				included = true
 				break
 			}
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 04a55fd0990f3b01405b077a4c76da76529e9c6b..c2c072a9f91f6a94bd613fc5dc239c2b17fcf9ba 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -31,6 +31,7 @@ import (
 	"github.com/ethereum/go-ethereum/core/vm"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/rpc"
+	"golang.org/x/net/context"
 )
 
 // Type determines the kind of filter and is used to put the filter in to
@@ -95,6 +96,9 @@ type subscription struct {
 type EventSystem struct {
 	mux       *event.TypeMux
 	sub       event.Subscription
+	backend   Backend
+	lightMode bool
+	lastHead  *types.Header
 	install   chan *subscription // install filter for event notification
 	uninstall chan *subscription // remove filter for event notification
 }
@@ -105,9 +109,11 @@ type EventSystem struct {
 //
 // The returned manager has a loop that needs to be stopped with the Stop function
 // or by stopping the given mux.
-func NewEventSystem(mux *event.TypeMux) *EventSystem {
+func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem {
 	m := &EventSystem{
 		mux:       mux,
+		backend:   backend,
+		lightMode: lightMode,
 		install:   make(chan *subscription),
 		uninstall: make(chan *subscription),
 	}
@@ -235,7 +241,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
 type filterIndex map[Type]map[rpc.ID]*subscription
 
 // broadcast event to filters that match criteria.
-func broadcast(filters filterIndex, ev *event.Event) {
+func (es *EventSystem) broadcast(filters filterIndex, ev *event.Event) {
 	if ev == nil {
 		return
 	}
@@ -279,7 +285,77 @@ func broadcast(filters filterIndex, ev *event.Event) {
 				f.headers <- e.Block.Header()
 			}
 		}
+		if es.lightMode && len(filters[LogsSubscription]) > 0 {
+			es.lightFilterNewHead(e.Block.Header(), func(header *types.Header, remove bool) {
+				for _, f := range filters[LogsSubscription] {
+					if ev.Time.After(f.created) {
+						if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
+							f.logs <- matchedLogs
+						}
+					}
+				}
+			})
+		}
+	}
+}
+
+func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) {
+	oldh := es.lastHead
+	es.lastHead = newHeader
+	if oldh == nil {
+		return
+	}
+	newh := newHeader
+	// find common ancestor, create list of rolled back and new block hashes
+	var oldHeaders, newHeaders []*types.Header
+	for oldh.Hash() != newh.Hash() {
+		if oldh.Number.Uint64() >= newh.Number.Uint64() {
+			oldHeaders = append(oldHeaders, oldh)
+			oldh = core.GetHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1)
+		}
+		if oldh.Number.Uint64() < newh.Number.Uint64() {
+			newHeaders = append(newHeaders, newh)
+			newh = core.GetHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1)
+			if newh == nil {
+				// happens when CHT syncing, nothing to do
+				newh = oldh
+			}
+		}
+	}
+	// roll back old blocks
+	for _, h := range oldHeaders {
+		callBack(h, true)
+	}
+	// check new blocks (array is in reverse order)
+	for i := len(newHeaders) - 1; i >= 0; i-- {
+		callBack(newHeaders[i], false)
+	}
+}
+
+// filter logs of a single header in light client mode
+func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []Log {
+	//fmt.Println("lightFilterLogs", header.Number.Uint64(), remove)
+	if bloomFilter(header.Bloom, addresses, topics) {
+		//fmt.Println("bloom match")
+		// Get the logs of the block
+		ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
+		receipts, err := es.backend.GetReceipts(ctx, header.Hash())
+		if err != nil {
+			return nil
+		}
+		var unfiltered []Log
+		for _, receipt := range receipts {
+			rl := make([]Log, len(receipt.Logs))
+			for i, l := range receipt.Logs {
+				rl[i] = Log{l, remove}
+			}
+			unfiltered = append(unfiltered, rl...)
+		}
+		logs := filterLogs(unfiltered, addresses, topics)
+		//fmt.Println("found", len(logs))
+		return logs
 	}
+	return nil
 }
 
 // eventLoop (un)installs filters and processes mux events.
@@ -294,7 +370,7 @@ func (es *EventSystem) eventLoop() {
 			if !active { // system stopped
 				return
 			}
-			broadcast(index, ev)
+			es.broadcast(index, ev)
 		case f := <-es.install:
 			if _, found := index[f.typ]; !found {
 				index[f.typ] = make(map[rpc.ID]*subscription)
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 9e6fde1c64a4553407c0e699a0f24294170be3a0..48d6811c0c06beba816b654384e24657197c9f53 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -22,6 +22,8 @@ import (
 	"testing"
 	"time"
 
+	"golang.org/x/net/context"
+
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/types"
@@ -32,11 +34,43 @@ import (
 )
 
 var (
-	mux   = new(event.TypeMux)
-	db, _ = ethdb.NewMemDatabase()
-	api   = NewPublicFilterAPI(db, mux)
+	mux     = new(event.TypeMux)
+	db, _   = ethdb.NewMemDatabase()
+	backend = &testBackend{mux, db}
+	api     = NewPublicFilterAPI(backend, false)
 )
 
+type testBackend struct {
+	mux *event.TypeMux
+	db  ethdb.Database
+}
+
+func (b *testBackend) ChainDb() ethdb.Database {
+	return b.db
+}
+
+func (b *testBackend) EventMux() *event.TypeMux {
+	return b.mux
+}
+
+func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
+	var hash common.Hash
+	var num uint64
+	if blockNr == rpc.LatestBlockNumber {
+		hash = core.GetHeadBlockHash(b.db)
+		num = core.GetBlockNumber(b.db, hash)
+	} else {
+		num = uint64(blockNr)
+		hash = core.GetCanonicalHash(b.db, num)
+	}
+	return core.GetHeader(b.db, hash, num), nil
+}
+
+func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
+	num := core.GetBlockNumber(b.db, blockHash)
+	return core.GetBlockReceipts(b.db, blockHash, num), nil
+}
+
 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
 // It creates multiple subscriptions:
 // - one at the start and should receive all posted chain events and a second (blockHashes)
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index 7b714f5d5fe7b306bd0b0e3006b82c99949c10f4..e0b24046c0fbb973de942f047e8617c7fca63cd7 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -22,6 +22,8 @@ import (
 	"os"
 	"testing"
 
+	"golang.org/x/net/context"
+
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/types"
@@ -48,6 +50,7 @@ func BenchmarkMipmaps(b *testing.B) {
 
 	var (
 		db, _   = ethdb.NewLDBDatabase(dir, 0, 0)
+		backend = &testBackend{mux, db}
 		key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 		addr1   = crypto.PubkeyToAddress(key1.PublicKey)
 		addr2   = common.BytesToAddress([]byte("jeff"))
@@ -100,13 +103,13 @@ func BenchmarkMipmaps(b *testing.B) {
 	}
 	b.ResetTimer()
 
-	filter := New(db)
+	filter := New(backend, true)
 	filter.SetAddresses([]common.Address{addr1, addr2, addr3, addr4})
 	filter.SetBeginBlock(0)
 	filter.SetEndBlock(-1)
 
 	for i := 0; i < b.N; i++ {
-		logs := filter.Find()
+		logs, _ := filter.Find(context.Background())
 		if len(logs) != 4 {
 			b.Fatal("expected 4 log, got", len(logs))
 		}
@@ -122,6 +125,7 @@ func TestFilters(t *testing.T) {
 
 	var (
 		db, _   = ethdb.NewLDBDatabase(dir, 0, 0)
+		backend = &testBackend{mux, db}
 		key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 		addr    = crypto.PubkeyToAddress(key1.PublicKey)
 
@@ -201,23 +205,23 @@ func TestFilters(t *testing.T) {
 		}
 	}
 
-	filter := New(db)
+	filter := New(backend, true)
 	filter.SetAddresses([]common.Address{addr})
 	filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2, hash3, hash4}})
 	filter.SetBeginBlock(0)
 	filter.SetEndBlock(-1)
 
-	logs := filter.Find()
+	logs, _ := filter.Find(context.Background())
 	if len(logs) != 4 {
 		t.Error("expected 4 log, got", len(logs))
 	}
 
-	filter = New(db)
+	filter = New(backend, true)
 	filter.SetAddresses([]common.Address{addr})
 	filter.SetTopics([][]common.Hash{[]common.Hash{hash3}})
 	filter.SetBeginBlock(900)
 	filter.SetEndBlock(999)
-	logs = filter.Find()
+	logs, _ = filter.Find(context.Background())
 	if len(logs) != 1 {
 		t.Error("expected 1 log, got", len(logs))
 	}
@@ -225,12 +229,12 @@ func TestFilters(t *testing.T) {
 		t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
 	}
 
-	filter = New(db)
+	filter = New(backend, true)
 	filter.SetAddresses([]common.Address{addr})
 	filter.SetTopics([][]common.Hash{[]common.Hash{hash3}})
 	filter.SetBeginBlock(990)
 	filter.SetEndBlock(-1)
-	logs = filter.Find()
+	logs, _ = filter.Find(context.Background())
 	if len(logs) != 1 {
 		t.Error("expected 1 log, got", len(logs))
 	}
@@ -238,44 +242,44 @@ func TestFilters(t *testing.T) {
 		t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
 	}
 
-	filter = New(db)
+	filter = New(backend, true)
 	filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2}})
 	filter.SetBeginBlock(1)
 	filter.SetEndBlock(10)
 
-	logs = filter.Find()
+	logs, _ = filter.Find(context.Background())
 	if len(logs) != 2 {
 		t.Error("expected 2 log, got", len(logs))
 	}
 
 	failHash := common.BytesToHash([]byte("fail"))
-	filter = New(db)
+	filter = New(backend, true)
 	filter.SetTopics([][]common.Hash{[]common.Hash{failHash}})
 	filter.SetBeginBlock(0)
 	filter.SetEndBlock(-1)
 
-	logs = filter.Find()
+	logs, _ = filter.Find(context.Background())
 	if len(logs) != 0 {
 		t.Error("expected 0 log, got", len(logs))
 	}
 
 	failAddr := common.BytesToAddress([]byte("failmenow"))
-	filter = New(db)
+	filter = New(backend, true)
 	filter.SetAddresses([]common.Address{failAddr})
 	filter.SetBeginBlock(0)
 	filter.SetEndBlock(-1)
 
-	logs = filter.Find()
+	logs, _ = filter.Find(context.Background())
 	if len(logs) != 0 {
 		t.Error("expected 0 log, got", len(logs))
 	}
 
-	filter = New(db)
+	filter = New(backend, true)
 	filter.SetTopics([][]common.Hash{[]common.Hash{failHash}, []common.Hash{hash1}})
 	filter.SetBeginBlock(0)
 	filter.SetEndBlock(-1)
 
-	logs = filter.Find()
+	logs, _ = filter.Find(context.Background())
 	if len(logs) != 0 {
 		t.Error("expected 0 log, got", len(logs))
 	}
diff --git a/eth/gasprice/lightprice.go b/eth/gasprice/lightprice.go
new file mode 100644
index 0000000000000000000000000000000000000000..8886d32d7dcef5476875f58fb9da6cf363d5ce48
--- /dev/null
+++ b/eth/gasprice/lightprice.go
@@ -0,0 +1,160 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package gasprice
+
+import (
+	"math/big"
+	"sort"
+	"sync"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/internal/ethapi"
+	"github.com/ethereum/go-ethereum/rpc"
+	"golang.org/x/net/context"
+)
+
+const (
+	LpoAvgCount     = 5
+	LpoMinCount     = 3
+	LpoMaxBlocks    = 20
+	LpoSelect       = 50
+	LpoDefaultPrice = 20000000000
+)
+
+// LightPriceOracle recommends gas prices based on the content of recent
+// blocks. Suitable for both light and full clients.
+type LightPriceOracle struct {
+	backend   ethapi.Backend
+	lastHead  common.Hash
+	lastPrice *big.Int
+	cacheLock sync.RWMutex
+	fetchLock sync.Mutex
+}
+
+// NewLightPriceOracle returns a new oracle.
+func NewLightPriceOracle(backend ethapi.Backend) *LightPriceOracle {
+	return &LightPriceOracle{
+		backend:   backend,
+		lastPrice: big.NewInt(LpoDefaultPrice),
+	}
+}
+
+// SuggestPrice returns the recommended gas price.
+func (self *LightPriceOracle) SuggestPrice(ctx context.Context) (*big.Int, error) {
+	self.cacheLock.RLock()
+	lastHead := self.lastHead
+	lastPrice := self.lastPrice
+	self.cacheLock.RUnlock()
+
+	head, _ := self.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
+	headHash := head.Hash()
+	if headHash == lastHead {
+		return lastPrice, nil
+	}
+
+	self.fetchLock.Lock()
+	defer self.fetchLock.Unlock()
+
+	// try checking the cache again, maybe the last fetch fetched what we need
+	self.cacheLock.RLock()
+	lastHead = self.lastHead
+	lastPrice = self.lastPrice
+	self.cacheLock.RUnlock()
+	if headHash == lastHead {
+		return lastPrice, nil
+	}
+
+	blockNum := head.Number.Uint64()
+	chn := make(chan lpResult, LpoMaxBlocks)
+	sent := 0
+	exp := 0
+	var lps bigIntArray
+	for sent < LpoAvgCount && blockNum > 0 {
+		go self.getLowestPrice(ctx, blockNum, chn)
+		sent++
+		exp++
+		blockNum--
+	}
+	maxEmpty := LpoAvgCount - LpoMinCount
+	for exp > 0 {
+		res := <-chn
+		if res.err != nil {
+			return nil, res.err
+		}
+		exp--
+		if res.price != nil {
+			lps = append(lps, res.price)
+		} else {
+			if maxEmpty > 0 {
+				maxEmpty--
+			} else {
+				if blockNum > 0 && sent < LpoMaxBlocks {
+					go self.getLowestPrice(ctx, blockNum, chn)
+					sent++
+					exp++
+					blockNum--
+				}
+			}
+		}
+	}
+	price := lastPrice
+	if len(lps) > 0 {
+		sort.Sort(lps)
+		price = lps[(len(lps)-1)*LpoSelect/100]
+	}
+
+	self.cacheLock.Lock()
+	self.lastHead = headHash
+	self.lastPrice = price
+	self.cacheLock.Unlock()
+	return price, nil
+}
+
+type lpResult struct {
+	price *big.Int
+	err   error
+}
+
+// getLowestPrice calculates the lowest transaction gas price in a given block
+// and sends it to the result channel. If the block is empty, price is nil.
+func (self *LightPriceOracle) getLowestPrice(ctx context.Context, blockNum uint64, chn chan lpResult) {
+	block, err := self.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum))
+	if block == nil {
+		chn <- lpResult{nil, err}
+		return
+	}
+	txs := block.Transactions()
+	if len(txs) == 0 {
+		chn <- lpResult{nil, nil}
+		return
+	}
+	// find smallest gasPrice
+	minPrice := txs[0].GasPrice()
+	for i := 1; i < len(txs); i++ {
+		price := txs[i].GasPrice()
+		if price.Cmp(minPrice) < 0 {
+			minPrice = price
+		}
+	}
+	chn <- lpResult{minPrice, nil}
+}
+
+type bigIntArray []*big.Int
+
+func (s bigIntArray) Len() int           { return len(s) }
+func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
+func (s bigIntArray) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
diff --git a/eth/handler.go b/eth/handler.go
index e478990f7c7ba8a3e8e482f6e899522042b85b65..9d6b1ced2b329cf9f8cac6aae68ec6dd81e51977 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -68,6 +68,7 @@ type ProtocolManager struct {
 	blockchain  *core.BlockChain
 	chaindb     ethdb.Database
 	chainconfig *core.ChainConfig
+	maxPeers    int
 
 	downloader *downloader.Downloader
 	fetcher    *fetcher.Fetcher
@@ -94,7 +95,7 @@ type ProtocolManager struct {
 
 // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
 // with the ethereum network.
-func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
+func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
 	// Create the protocol manager with the base fields
 	manager := &ProtocolManager{
 		networkId:   networkId,
@@ -103,6 +104,7 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
 		blockchain:  blockchain,
 		chaindb:     chaindb,
 		chainconfig: config,
+		maxPeers:    maxPeers,
 		peers:       newPeerSet(),
 		newPeerCh:   make(chan *peer),
 		noMorePeers: make(chan struct{}),
@@ -156,7 +158,7 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
 		return nil, errIncompatibleConfig
 	}
 	// Construct the different synchronisation mechanisms
-	manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeaderByHash,
+	manager.downloader = downloader.New(downloader.FullSync, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeaderByHash,
 		blockchain.GetBlockByHash, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead,
 		blockchain.GetTdByHash, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
 		manager.removePeer)
@@ -253,6 +255,10 @@ func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *p
 // handle is the callback invoked to manage the life cycle of an eth peer. When
 // this function terminates, the peer is disconnected.
 func (pm *ProtocolManager) handle(p *peer) error {
+	if pm.peers.Len() >= pm.maxPeers {
+		return p2p.DiscTooManyPeers
+	}
+
 	glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
 
 	// Execute the Ethereum handshake
diff --git a/eth/handler_test.go b/eth/handler_test.go
index f0f18d0a6258b3bd7af9d1bc69571f2f64047bbf..64449afdaf8a9d7891e7abafe6a125eced08ebde 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -469,7 +469,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
 		config        = &core.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
 		blockchain, _ = core.NewBlockChain(db, config, pow, evmux)
 	)
-	pm, err := NewProtocolManager(config, false, NetworkId, evmux, new(testTxPool), pow, blockchain, db)
+	pm, err := NewProtocolManager(config, false, NetworkId, 1000, evmux, new(testTxPool), pow, blockchain, db)
 	if err != nil {
 		t.Fatalf("failed to start test protocol manager: %v", err)
 	}
diff --git a/eth/helper_test.go b/eth/helper_test.go
index 732fe89ee7f4a3b3662aa9b666e8ae15c9a7b36d..d5295b3984690df7f16c7842913c775d050fa91d 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -62,7 +62,7 @@ func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core
 		panic(err)
 	}
 
-	pm, err := NewProtocolManager(chainConfig, fastSync, NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db)
+	pm, err := NewProtocolManager(chainConfig, fastSync, NetworkId, 1000, evmux, &testTxPool{added: newtx}, pow, blockchain, db)
 	if err != nil {
 		return nil, err
 	}
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 0cc11eb5b4df228cc8b88e6bc17b083ea601b6ea..102c0d3b21f409bed8cd9412f48cb7d64dca10f8 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -1,3 +1,19 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
 package ethclient
 
 import "github.com/ethereum/go-ethereum"
diff --git a/ethdb/database.go b/ethdb/database.go
index 479c54b607bb406d93713424ea634dde7364fcbe..96d9a5982023c3b238e938b47b38aea86cfd3442 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -40,13 +40,15 @@ var OpenFileLimit = 64
 // cacheRatio specifies how the total allotted cache is distributed between the
 // various system databases.
 var cacheRatio = map[string]float64{
-	"chaindata": 1.0,
+	"chaindata":      1.0,
+	"lightchaindata": 1.0,
 }
 
 // handleRatio specifies how the total allotted file descriptors is distributed
 // between the various system databases.
 var handleRatio = map[string]float64{
-	"chaindata": 1.0,
+	"chaindata":      1.0,
+	"lightchaindata": 1.0,
 }
 
 type LDBDatabase struct {
diff --git a/internal/build/azure.go b/internal/build/azure.go
index ceac6a4cd5ccac19780ef0a0b38a31760799a7b0..32f53555813911bed1667d3d8da68851bd5bf3af 100644
--- a/internal/build/azure.go
+++ b/internal/build/azure.go
@@ -1,3 +1,4 @@
+// Copyright 2016 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/internal/build/pgp.go b/internal/build/pgp.go
index 7938df51a587347bc30f7616fde25e29a2cc8b69..79ab9c06f1b5c42abcfa995df384ff5aeb68a73e 100644
--- a/internal/build/pgp.go
+++ b/internal/build/pgp.go
@@ -1,3 +1,4 @@
+// Copyright 2016 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 0e8e905aa92485dd822d1c5dede15405dc6766e8..36bb8c077c309538dc77cf19638a2189d910c593 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2015 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
@@ -366,14 +366,15 @@ func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI {
 
 // BlockNumber returns the block number of the chain head.
 func (s *PublicBlockChainAPI) BlockNumber() *big.Int {
-	return s.b.HeaderByNumber(rpc.LatestBlockNumber).Number
+	header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available
+	return header.Number
 }
 
 // GetBalance returns the amount of wei for the given address in the state of the
 // given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta
 // block numbers are also allowed.
 func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*big.Int, error) {
-	state, _, err := s.b.StateAndHeaderByNumber(blockNr)
+	state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
 	if state == nil || err != nil {
 		return nil, err
 	}
@@ -458,7 +459,7 @@ func (s *PublicBlockChainAPI) GetUncleCountByBlockHash(ctx context.Context, bloc
 
 // GetCode returns the code stored at the given address in the state for the given block number.
 func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (string, error) {
-	state, _, err := s.b.StateAndHeaderByNumber(blockNr)
+	state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
 	if state == nil || err != nil {
 		return "", err
 	}
@@ -473,7 +474,7 @@ func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Addres
 // block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block
 // numbers are also allowed.
 func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNr rpc.BlockNumber) (string, error) {
-	state, _, err := s.b.StateAndHeaderByNumber(blockNr)
+	state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
 	if state == nil || err != nil {
 		return "0x", err
 	}
@@ -517,7 +518,7 @@ type CallArgs struct {
 func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (string, *big.Int, error) {
 	defer func(start time.Time) { glog.V(logger.Debug).Infof("call took %v", time.Since(start)) }(time.Now())
 
-	state, header, err := s.b.StateAndHeaderByNumber(blockNr)
+	state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
 	if state == nil || err != nil {
 		return "0x", common.Big0, err
 	}
@@ -859,7 +860,7 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx cont
 
 // GetTransactionCount returns the number of transactions the given address has sent for the given block number
 func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*rpc.HexNumber, error) {
-	state, _, err := s.b.StateAndHeaderByNumber(blockNr)
+	state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
 	if state == nil || err != nil {
 		return nil, err
 	}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 0aa3da18d08296bb158fe6e43e85e6abad42b297..f9358b6cfd3dd6082af51eb7b702547770f1381b 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2015 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
@@ -44,9 +44,9 @@ type Backend interface {
 	AccountManager() *accounts.Manager
 	// BlockChain API
 	SetHead(number uint64)
-	HeaderByNumber(blockNr rpc.BlockNumber) *types.Header
+	HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
 	BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error)
-	StateAndHeaderByNumber(blockNr rpc.BlockNumber) (State, *types.Header, error)
+	StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (State, *types.Header, error)
 	GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error)
 	GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
 	GetTd(blockHash common.Hash) *big.Int
diff --git a/internal/jsre/pretty.go b/internal/jsre/pretty.go
index f32e16243487dcc735d05507240029f64057c00b..8fe00cc4c828f1e4b58fd59b5938f5edec7605c6 100644
--- a/internal/jsre/pretty.go
+++ b/internal/jsre/pretty.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The go-ethereum Authors
+// Copyright 2016 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/les/api_backend.go b/les/api_backend.go
new file mode 100644
index 0000000000000000000000000000000000000000..04120c669fd959008603d596c048494abaa7cdf8
--- /dev/null
+++ b/les/api_backend.go
@@ -0,0 +1,144 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"math/big"
+
+	"github.com/ethereum/go-ethereum/accounts"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/eth/downloader"
+	"github.com/ethereum/go-ethereum/eth/gasprice"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/internal/ethapi"
+	"github.com/ethereum/go-ethereum/light"
+	rpc "github.com/ethereum/go-ethereum/rpc"
+	"golang.org/x/net/context"
+)
+
+type LesApiBackend struct {
+	eth *LightEthereum
+	gpo *gasprice.LightPriceOracle
+}
+
+func (b *LesApiBackend) SetHead(number uint64) {
+	b.eth.blockchain.SetHead(number)
+}
+
+func (b *LesApiBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
+	if blockNr == rpc.LatestBlockNumber || blockNr == rpc.PendingBlockNumber {
+		return b.eth.blockchain.CurrentHeader(), nil
+	}
+
+	return b.eth.blockchain.GetHeaderByNumberOdr(ctx, uint64(blockNr))
+}
+
+func (b *LesApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) {
+	header, err := b.HeaderByNumber(ctx, blockNr)
+	if header == nil || err != nil {
+		return nil, err
+	}
+	return b.GetBlock(ctx, header.Hash())
+}
+
+func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) {
+	header, err := b.HeaderByNumber(ctx, blockNr)
+	if header == nil || err != nil {
+		return nil, nil, err
+	}
+	return light.NewLightState(light.StateTrieID(header), b.eth.odr), header, nil
+}
+
+func (b *LesApiBackend) GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error) {
+	return b.eth.blockchain.GetBlockByHash(ctx, blockHash)
+}
+
+func (b *LesApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
+	return light.GetBlockReceipts(ctx, b.eth.odr, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash))
+}
+
+func (b *LesApiBackend) GetTd(blockHash common.Hash) *big.Int {
+	return b.eth.blockchain.GetTdByHash(blockHash)
+}
+
+func (b *LesApiBackend) GetVMEnv(ctx context.Context, msg core.Message, state ethapi.State, header *types.Header) (vm.Environment, func() error, error) {
+	stateDb := state.(*light.LightState).Copy()
+	addr, _ := msg.From()
+	from, err := stateDb.GetOrNewStateObject(ctx, addr)
+	if err != nil {
+		return nil, nil, err
+	}
+	from.SetBalance(common.MaxBig)
+	env := light.NewEnv(ctx, stateDb, b.eth.chainConfig, b.eth.blockchain, msg, header, b.eth.chainConfig.VmConfig)
+	return env, env.Error, nil
+}
+
+func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
+	return b.eth.txPool.Add(ctx, signedTx)
+}
+
+func (b *LesApiBackend) RemoveTx(txHash common.Hash) {
+	b.eth.txPool.RemoveTx(txHash)
+}
+
+func (b *LesApiBackend) GetPoolTransactions() types.Transactions {
+	return b.eth.txPool.GetTransactions()
+}
+
+func (b *LesApiBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction {
+	return b.eth.txPool.GetTransaction(txHash)
+}
+
+func (b *LesApiBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) {
+	return b.eth.txPool.GetNonce(ctx, addr)
+}
+
+func (b *LesApiBackend) Stats() (pending int, queued int) {
+	return b.eth.txPool.Stats(), 0
+}
+
+func (b *LesApiBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
+	return b.eth.txPool.Content()
+}
+
+func (b *LesApiBackend) Downloader() *downloader.Downloader {
+	return b.eth.Downloader()
+}
+
+func (b *LesApiBackend) ProtocolVersion() int {
+	return b.eth.LesVersion() + 10000
+}
+
+func (b *LesApiBackend) SuggestPrice(ctx context.Context) (*big.Int, error) {
+	return b.gpo.SuggestPrice(ctx)
+}
+
+func (b *LesApiBackend) ChainDb() ethdb.Database {
+	return b.eth.chainDb
+}
+
+func (b *LesApiBackend) EventMux() *event.TypeMux {
+	return b.eth.eventMux
+}
+
+func (b *LesApiBackend) AccountManager() *accounts.Manager {
+	return b.eth.accountManager
+}
diff --git a/les/backend.go b/les/backend.go
new file mode 100644
index 0000000000000000000000000000000000000000..5eb53d728abfe019cd844cf1f4e444d9968b209b
--- /dev/null
+++ b/les/backend.go
@@ -0,0 +1,221 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package les implements the Light Ethereum Subprotocol.
+package les
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/ethereum/ethash"
+	"github.com/ethereum/go-ethereum/accounts"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/compiler"
+	"github.com/ethereum/go-ethereum/common/httpclient"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/eth/downloader"
+	"github.com/ethereum/go-ethereum/eth/filters"
+	"github.com/ethereum/go-ethereum/eth/gasprice"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/internal/ethapi"
+	"github.com/ethereum/go-ethereum/light"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/node"
+	"github.com/ethereum/go-ethereum/p2p"
+	rpc "github.com/ethereum/go-ethereum/rpc"
+)
+
+type LightEthereum struct {
+	odr         *LesOdr
+	relay       *LesTxRelay
+	chainConfig *core.ChainConfig
+	// Channel for shutting down the service
+	shutdownChan chan bool
+	// Handlers
+	txPool          *light.TxPool
+	blockchain      *light.LightChain
+	protocolManager *ProtocolManager
+	// DB interfaces
+	chainDb ethdb.Database // Block chain database
+
+	ApiBackend *LesApiBackend
+
+	eventMux       *event.TypeMux
+	pow            *ethash.Ethash
+	httpclient     *httpclient.HTTPClient
+	accountManager *accounts.Manager
+	solcPath       string
+	solc           *compiler.Solidity
+
+	NatSpec       bool
+	PowTest       bool
+	netVersionId  int
+	netRPCService *ethapi.PublicNetAPI
+}
+
+func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
+	chainDb, err := eth.CreateDB(ctx, config, "lightchaindata")
+	if err != nil {
+		return nil, err
+	}
+	if err := eth.SetupGenesisBlock(&chainDb, config); err != nil {
+		return nil, err
+	}
+	pow, err := eth.CreatePoW(config)
+	if err != nil {
+		return nil, err
+	}
+
+	odr := NewLesOdr(chainDb)
+	relay := NewLesTxRelay()
+	eth := &LightEthereum{
+		odr:            odr,
+		relay:          relay,
+		chainDb:        chainDb,
+		eventMux:       ctx.EventMux,
+		accountManager: ctx.AccountManager,
+		pow:            pow,
+		shutdownChan:   make(chan bool),
+		httpclient:     httpclient.New(config.DocRoot),
+		netVersionId:   config.NetworkId,
+		NatSpec:        config.NatSpec,
+		PowTest:        config.PowTest,
+		solcPath:       config.SolcPath,
+	}
+
+	if config.ChainConfig == nil {
+		return nil, errors.New("missing chain config")
+	}
+	eth.chainConfig = config.ChainConfig
+	eth.chainConfig.VmConfig = vm.Config{
+		EnableJit: config.EnableJit,
+		ForceJit:  config.ForceJit,
+	}
+	eth.blockchain, err = light.NewLightChain(odr, eth.chainConfig, eth.pow, eth.eventMux)
+	if err != nil {
+		if err == core.ErrNoGenesis {
+			return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`)
+		}
+		return nil, err
+	}
+
+	eth.txPool = light.NewTxPool(eth.chainConfig, eth.eventMux, eth.blockchain, eth.relay)
+	if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.pow, eth.blockchain, nil, chainDb, odr, relay); err != nil {
+		return nil, err
+	}
+
+	eth.ApiBackend = &LesApiBackend{eth, nil}
+	eth.ApiBackend.gpo = gasprice.NewLightPriceOracle(eth.ApiBackend)
+	return eth, nil
+}
+
+type LightDummyAPI struct{}
+
+// Etherbase is the address that mining rewards will be send to
+func (s *LightDummyAPI) Etherbase() (common.Address, error) {
+	return common.Address{}, fmt.Errorf("not supported")
+}
+
+// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
+func (s *LightDummyAPI) Coinbase() (common.Address, error) {
+	return common.Address{}, fmt.Errorf("not supported")
+}
+
+// Hashrate returns the POW hashrate
+func (s *LightDummyAPI) Hashrate() *rpc.HexNumber {
+	return rpc.NewHexNumber(0)
+}
+
+// Mining returns an indication if this node is currently mining.
+func (s *LightDummyAPI) Mining() bool {
+	return false
+}
+
+// APIs returns the collection of RPC services the ethereum package offers.
+// NOTE, some of these services probably need to be moved to somewhere else.
+func (s *LightEthereum) APIs() []rpc.API {
+	return append(ethapi.GetAPIs(s.ApiBackend, s.solcPath), []rpc.API{
+		{
+			Namespace: "eth",
+			Version:   "1.0",
+			Service:   &LightDummyAPI{},
+			Public:    true,
+		}, {
+			Namespace: "eth",
+			Version:   "1.0",
+			Service:   downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux),
+			Public:    true,
+		}, {
+			Namespace: "eth",
+			Version:   "1.0",
+			Service:   filters.NewPublicFilterAPI(s.ApiBackend, true),
+			Public:    true,
+		}, {
+			Namespace: "net",
+			Version:   "1.0",
+			Service:   s.netRPCService,
+			Public:    true,
+		},
+	}...)
+}
+
+func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) {
+	s.blockchain.ResetWithGenesisBlock(gb)
+}
+
+func (s *LightEthereum) BlockChain() *light.LightChain      { return s.blockchain }
+func (s *LightEthereum) TxPool() *light.TxPool              { return s.txPool }
+func (s *LightEthereum) LesVersion() int                    { return int(s.protocolManager.SubProtocols[0].Version) }
+func (s *LightEthereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
+
+// Protocols implements node.Service, returning all the currently configured
+// network protocols to start.
+func (s *LightEthereum) Protocols() []p2p.Protocol {
+	return s.protocolManager.SubProtocols
+}
+
+// Start implements node.Service, starting all internal goroutines needed by the
+// Ethereum protocol implementation.
+func (s *LightEthereum) Start(srvr *p2p.Server) error {
+	glog.V(logger.Info).Infof("WARNING: light client mode is an experimental feature")
+	s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId)
+	s.protocolManager.Start(srvr)
+	return nil
+}
+
+// Stop implements node.Service, terminating all internal goroutines used by the
+// Ethereum protocol.
+func (s *LightEthereum) Stop() error {
+	s.odr.Stop()
+	s.blockchain.Stop()
+	s.protocolManager.Stop()
+	s.txPool.Stop()
+
+	s.eventMux.Stop()
+
+	time.Sleep(time.Millisecond * 200)
+	s.chainDb.Close()
+	close(s.shutdownChan)
+
+	return nil
+}
diff --git a/les/fetcher.go b/les/fetcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae9bf84749d254bd526ddcdc700517a17a23fbec
--- /dev/null
+++ b/les/fetcher.go
@@ -0,0 +1,295 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package les implements the Light Ethereum Subprotocol.
+package les
+
+import (
+	"math/big"
+	"sync"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+)
+
+type lightFetcher struct {
+	pm    *ProtocolManager
+	odr   *LesOdr
+	chain BlockChain
+
+	headAnnouncedMu sync.Mutex
+	headAnnouncedBy map[common.Hash][]*peer
+	currentTd       *big.Int
+	deliverChn      chan fetchResponse
+	reqMu           sync.RWMutex
+	requested       map[uint64]fetchRequest
+	timeoutChn      chan uint64
+	notifyChn       chan bool // true if initiated from outside
+	syncing         bool
+	syncDone        chan struct{}
+}
+
+type fetchRequest struct {
+	hash   common.Hash
+	amount uint64
+	peer   *peer
+}
+
+type fetchResponse struct {
+	reqID   uint64
+	headers []*types.Header
+}
+
+func newLightFetcher(pm *ProtocolManager) *lightFetcher {
+	f := &lightFetcher{
+		pm:              pm,
+		chain:           pm.blockchain,
+		odr:             pm.odr,
+		headAnnouncedBy: make(map[common.Hash][]*peer),
+		deliverChn:      make(chan fetchResponse, 100),
+		requested:       make(map[uint64]fetchRequest),
+		timeoutChn:      make(chan uint64),
+		notifyChn:       make(chan bool, 100),
+		syncDone:        make(chan struct{}),
+		currentTd:       big.NewInt(0),
+	}
+	go f.syncLoop()
+	return f
+}
+
+func (f *lightFetcher) notify(p *peer, head *announceData) {
+	var headHash common.Hash
+	if head == nil {
+		// initial notify
+		headHash = p.Head()
+	} else {
+		if core.GetTd(f.pm.chainDb, head.Hash, head.Number) != nil {
+			head.haveHeaders = head.Number
+		}
+		//fmt.Println("notify", p.id, head.Number, head.ReorgDepth, head.haveHeaders)
+		if !p.addNotify(head) {
+			//fmt.Println("addNotify fail")
+			f.pm.removePeer(p.id)
+		}
+		headHash = head.Hash
+	}
+	f.headAnnouncedMu.Lock()
+	f.headAnnouncedBy[headHash] = append(f.headAnnouncedBy[headHash], p)
+	f.headAnnouncedMu.Unlock()
+	f.notifyChn <- true
+}
+
+func (f *lightFetcher) gotHeader(header *types.Header) {
+	f.headAnnouncedMu.Lock()
+	defer f.headAnnouncedMu.Unlock()
+
+	hash := header.Hash()
+	peerList := f.headAnnouncedBy[hash]
+	if peerList == nil {
+		return
+	}
+	number := header.Number.Uint64()
+	td := core.GetTd(f.pm.chainDb, hash, number)
+	for _, peer := range peerList {
+		peer.lock.Lock()
+		ok := peer.gotHeader(hash, number, td)
+		peer.lock.Unlock()
+		if !ok {
+			//fmt.Println("gotHeader fail")
+			f.pm.removePeer(peer.id)
+		}
+	}
+	delete(f.headAnnouncedBy, hash)
+}
+
+func (f *lightFetcher) nextRequest() (*peer, *announceData) {
+	var bestPeer *peer
+	bestTd := f.currentTd
+	for _, peer := range f.pm.peers.AllPeers() {
+		peer.lock.RLock()
+		if !peer.headInfo.requested && (peer.headInfo.Td.Cmp(bestTd) > 0 ||
+			(bestPeer != nil && peer.headInfo.Td.Cmp(bestTd) == 0 && peer.headInfo.haveHeaders > bestPeer.headInfo.haveHeaders)) {
+			bestPeer = peer
+			bestTd = peer.headInfo.Td
+		}
+		peer.lock.RUnlock()
+	}
+	if bestPeer == nil {
+		return nil, nil
+	}
+	bestPeer.lock.Lock()
+	res := bestPeer.headInfo
+	res.requested = true
+	bestPeer.lock.Unlock()
+	for _, peer := range f.pm.peers.AllPeers() {
+		if peer != bestPeer {
+			peer.lock.Lock()
+			if peer.headInfo.Hash == bestPeer.headInfo.Hash && peer.headInfo.haveHeaders == bestPeer.headInfo.haveHeaders {
+				peer.headInfo.requested = true
+			}
+			peer.lock.Unlock()
+		}
+	}
+	return bestPeer, res
+}
+
+func (f *lightFetcher) deliverHeaders(reqID uint64, headers []*types.Header) {
+	f.deliverChn <- fetchResponse{reqID: reqID, headers: headers}
+}
+
+func (f *lightFetcher) requestedID(reqID uint64) bool {
+	f.reqMu.RLock()
+	_, ok := f.requested[reqID]
+	f.reqMu.RUnlock()
+	return ok
+}
+
+func (f *lightFetcher) request(p *peer, block *announceData) {
+	//fmt.Println("request", p.id, block.Number, block.haveHeaders)
+	amount := block.Number - block.haveHeaders
+	if amount == 0 {
+		return
+	}
+	if amount > 100 {
+		f.syncing = true
+		go func() {
+			//fmt.Println("f.pm.synchronise(p)")
+			f.pm.synchronise(p)
+			//fmt.Println("sync done")
+			f.syncDone <- struct{}{}
+		}()
+		return
+	}
+
+	reqID := f.odr.getNextReqID()
+	f.reqMu.Lock()
+	f.requested[reqID] = fetchRequest{hash: block.Hash, amount: amount, peer: p}
+	f.reqMu.Unlock()
+	cost := p.GetRequestCost(GetBlockHeadersMsg, int(amount))
+	p.fcServer.SendRequest(reqID, cost)
+	go p.RequestHeadersByHash(reqID, cost, block.Hash, int(amount), 0, true)
+	go func() {
+		time.Sleep(hardRequestTimeout)
+		f.timeoutChn <- reqID
+	}()
+}
+
+func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
+	if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
+		return false
+	}
+	headers := make([]*types.Header, req.amount)
+	for i, header := range resp.headers {
+		headers[int(req.amount)-1-i] = header
+	}
+	if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
+		return false
+	}
+	for _, header := range headers {
+		td := core.GetTd(f.pm.chainDb, header.Hash(), header.Number.Uint64())
+		if td == nil {
+			return false
+		}
+		if td.Cmp(f.currentTd) > 0 {
+			f.currentTd = td
+		}
+		f.gotHeader(header)
+	}
+	return true
+}
+
+func (f *lightFetcher) checkSyncedHeaders() {
+	//fmt.Println("checkSyncedHeaders()")
+	for _, peer := range f.pm.peers.AllPeers() {
+		peer.lock.Lock()
+		h := peer.firstHeadInfo
+		remove := false
+	loop:
+		for h != nil {
+			if td := core.GetTd(f.pm.chainDb, h.Hash, h.Number); td != nil {
+				//fmt.Println(" found", h.Number)
+				ok := peer.gotHeader(h.Hash, h.Number, td)
+				if !ok {
+					remove = true
+					break loop
+				}
+				if td.Cmp(f.currentTd) > 0 {
+					f.currentTd = td
+				}
+			}
+			h = h.next
+		}
+		peer.lock.Unlock()
+		if remove {
+			//fmt.Println("checkSync fail")
+			f.pm.removePeer(peer.id)
+		}
+	}
+}
+
+func (f *lightFetcher) syncLoop() {
+	f.pm.wg.Add(1)
+	defer f.pm.wg.Done()
+
+	srtoNotify := false
+	for {
+		select {
+		case <-f.pm.quitSync:
+			return
+		case ext := <-f.notifyChn:
+			//fmt.Println("<-f.notifyChn", f.syncing, ext, srtoNotify)
+			s := srtoNotify
+			srtoNotify = false
+			if !f.syncing && !(ext && s) {
+				if p, r := f.nextRequest(); r != nil {
+					srtoNotify = true
+					go func() {
+						time.Sleep(softRequestTimeout)
+						f.notifyChn <- false
+					}()
+					f.request(p, r)
+				}
+			}
+		case reqID := <-f.timeoutChn:
+			f.reqMu.Lock()
+			req, ok := f.requested[reqID]
+			if ok {
+				delete(f.requested, reqID)
+			}
+			f.reqMu.Unlock()
+			if ok {
+				//fmt.Println("hard timeout")
+				f.pm.removePeer(req.peer.id)
+			}
+		case resp := <-f.deliverChn:
+			//fmt.Println("<-f.deliverChn", f.syncing)
+			f.reqMu.Lock()
+			req, ok := f.requested[resp.reqID]
+			delete(f.requested, resp.reqID)
+			f.reqMu.Unlock()
+			if !ok || !(f.syncing || f.processResponse(req, resp)) {
+				//fmt.Println("processResponse fail")
+				f.pm.removePeer(req.peer.id)
+			}
+		case <-f.syncDone:
+			//fmt.Println("<-f.syncDone", f.syncing)
+			f.checkSyncedHeaders()
+			f.syncing = false
+		}
+	}
+}
diff --git a/les/flowcontrol/control.go b/les/flowcontrol/control.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b8d7f58f52dff9dc7026ddc25b6ceecd076b28d
--- /dev/null
+++ b/les/flowcontrol/control.go
@@ -0,0 +1,172 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package flowcontrol implements a client side flow control mechanism
+package flowcontrol
+
+import (
+	"sync"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common/mclock"
+)
+
+const fcTimeConst = 1000000
+
+type ServerParams struct {
+	BufLimit, MinRecharge uint64
+}
+
+type ClientNode struct {
+	params   *ServerParams
+	bufValue uint64
+	lastTime int64
+	lock     sync.Mutex
+	cm       *ClientManager
+	cmNode   *cmNode
+}
+
+func NewClientNode(cm *ClientManager, params *ServerParams) *ClientNode {
+	node := &ClientNode{
+		cm:       cm,
+		params:   params,
+		bufValue: params.BufLimit,
+		lastTime: getTime(),
+	}
+	node.cmNode = cm.addNode(node)
+	return node
+}
+
+func (peer *ClientNode) Remove(cm *ClientManager) {
+	cm.removeNode(peer.cmNode)
+}
+
+func (peer *ClientNode) recalcBV(time int64) {
+	dt := uint64(time - peer.lastTime)
+	if time < peer.lastTime {
+		dt = 0
+	}
+	peer.bufValue += peer.params.MinRecharge * dt / fcTimeConst
+	if peer.bufValue > peer.params.BufLimit {
+		peer.bufValue = peer.params.BufLimit
+	}
+	peer.lastTime = time
+}
+
+func (peer *ClientNode) AcceptRequest() (uint64, bool) {
+	peer.lock.Lock()
+	defer peer.lock.Unlock()
+
+	time := getTime()
+	peer.recalcBV(time)
+	return peer.bufValue, peer.cm.accept(peer.cmNode, time)
+}
+
+func (peer *ClientNode) RequestProcessed(cost uint64) (bv, realCost uint64) {
+	peer.lock.Lock()
+	defer peer.lock.Unlock()
+
+	time := getTime()
+	peer.recalcBV(time)
+	peer.bufValue -= cost
+	peer.recalcBV(time)
+	rcValue, rcost := peer.cm.processed(peer.cmNode, time)
+	if rcValue < peer.params.BufLimit {
+		bv := peer.params.BufLimit - rcValue
+		if bv > peer.bufValue {
+			peer.bufValue = bv
+		}
+	}
+	return peer.bufValue, rcost
+}
+
+type ServerNode struct {
+	bufEstimate uint64
+	lastTime    int64
+	params      *ServerParams
+	sumCost     uint64            // sum of req costs sent to this server
+	pending     map[uint64]uint64 // value = sumCost after sending the given req
+	lock        sync.Mutex
+}
+
+func NewServerNode(params *ServerParams) *ServerNode {
+	return &ServerNode{
+		bufEstimate: params.BufLimit,
+		lastTime:    getTime(),
+		params:      params,
+		pending:     make(map[uint64]uint64),
+	}
+}
+
+func getTime() int64 {
+	return int64(mclock.Now())
+}
+
+func (peer *ServerNode) recalcBLE(time int64) {
+	dt := uint64(time - peer.lastTime)
+	if time < peer.lastTime {
+		dt = 0
+	}
+	peer.bufEstimate += peer.params.MinRecharge * dt / fcTimeConst
+	if peer.bufEstimate > peer.params.BufLimit {
+		peer.bufEstimate = peer.params.BufLimit
+	}
+	peer.lastTime = time
+}
+
+func (peer *ServerNode) canSend(maxCost uint64) uint64 {
+	if peer.bufEstimate >= maxCost {
+		return 0
+	}
+	return (maxCost - peer.bufEstimate) * fcTimeConst / peer.params.MinRecharge
+}
+
+func (peer *ServerNode) CanSend(maxCost uint64) uint64 {
+	peer.lock.Lock()
+	defer peer.lock.Unlock()
+
+	return peer.canSend(maxCost)
+}
+
+// blocks until request can be sent
+func (peer *ServerNode) SendRequest(reqID, maxCost uint64) {
+	peer.lock.Lock()
+	defer peer.lock.Unlock()
+
+	peer.recalcBLE(getTime())
+	for peer.bufEstimate < maxCost {
+		time.Sleep(time.Duration(peer.canSend(maxCost)))
+		peer.recalcBLE(getTime())
+	}
+	peer.bufEstimate -= maxCost
+	peer.sumCost += maxCost
+	if reqID >= 0 {
+		peer.pending[reqID] = peer.sumCost
+	}
+}
+
+func (peer *ServerNode) GotReply(reqID, bv uint64) {
+	peer.lock.Lock()
+	defer peer.lock.Unlock()
+
+	sc, ok := peer.pending[reqID]
+	if !ok {
+		return
+	}
+	delete(peer.pending, reqID)
+	peer.bufEstimate = bv - (peer.sumCost - sc)
+	peer.lastTime = getTime()
+}
diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go
new file mode 100644
index 0000000000000000000000000000000000000000..786884437b7c00223bb6c88f8810a9017a208afd
--- /dev/null
+++ b/les/flowcontrol/manager.go
@@ -0,0 +1,223 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package flowcontrol implements a client side flow control mechanism
+package flowcontrol
+
+import (
+	"sync"
+	"time"
+)
+
+const rcConst = 1000000
+
+type cmNode struct {
+	node                       *ClientNode
+	lastUpdate                 int64
+	reqAccepted                int64
+	serving, recharging        bool
+	rcWeight                   uint64
+	rcValue, rcDelta           int64
+	finishRecharge, startValue int64
+}
+
+func (node *cmNode) update(time int64) {
+	dt := time - node.lastUpdate
+	node.rcValue += node.rcDelta * dt / rcConst
+	node.lastUpdate = time
+	if node.recharging && time >= node.finishRecharge {
+		node.recharging = false
+		node.rcDelta = 0
+		node.rcValue = 0
+	}
+}
+
+func (node *cmNode) set(serving bool, simReqCnt, sumWeight uint64) {
+	if node.serving && !serving {
+		node.recharging = true
+		sumWeight += node.rcWeight
+	}
+	node.serving = serving
+	if node.recharging && serving {
+		node.recharging = false
+		sumWeight -= node.rcWeight
+	}
+
+	node.rcDelta = 0
+	if serving {
+		node.rcDelta = int64(rcConst / simReqCnt)
+	}
+	if node.recharging {
+		node.rcDelta = -int64(node.node.cm.rcRecharge * node.rcWeight / sumWeight)
+		node.finishRecharge = node.lastUpdate + node.rcValue*rcConst/(-node.rcDelta)
+	}
+}
+
+type ClientManager struct {
+	lock                             sync.Mutex
+	nodes                            map[*cmNode]struct{}
+	simReqCnt, sumWeight, rcSumValue uint64
+	maxSimReq, maxRcSum              uint64
+	rcRecharge                       uint64
+	resumeQueue                      chan chan bool
+	time                             int64
+}
+
+func NewClientManager(rcTarget, maxSimReq, maxRcSum uint64) *ClientManager {
+	cm := &ClientManager{
+		nodes:       make(map[*cmNode]struct{}),
+		resumeQueue: make(chan chan bool),
+		rcRecharge:  rcConst * rcConst / (100*rcConst/rcTarget - rcConst),
+		maxSimReq:   maxSimReq,
+		maxRcSum:    maxRcSum,
+	}
+	go cm.queueProc()
+	return cm
+}
+
+func (self *ClientManager) Stop() {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	// signal any waiting accept routines to return false
+	self.nodes = make(map[*cmNode]struct{})
+	close(self.resumeQueue)
+}
+
+func (self *ClientManager) addNode(cnode *ClientNode) *cmNode {
+	time := getTime()
+	node := &cmNode{
+		node:           cnode,
+		lastUpdate:     time,
+		finishRecharge: time,
+		rcWeight:       1,
+	}
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	self.nodes[node] = struct{}{}
+	self.update(getTime())
+	return node
+}
+
+func (self *ClientManager) removeNode(node *cmNode) {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	time := getTime()
+	self.stop(node, time)
+	delete(self.nodes, node)
+	self.update(time)
+}
+
+// recalc sumWeight
+func (self *ClientManager) updateNodes(time int64) (rce bool) {
+	var sumWeight, rcSum uint64
+	for node, _ := range self.nodes {
+		rc := node.recharging
+		node.update(time)
+		if rc && !node.recharging {
+			rce = true
+		}
+		if node.recharging {
+			sumWeight += node.rcWeight
+		}
+		rcSum += uint64(node.rcValue)
+	}
+	self.sumWeight = sumWeight
+	self.rcSumValue = rcSum
+	return
+}
+
+func (self *ClientManager) update(time int64) {
+	for {
+		firstTime := time
+		for node, _ := range self.nodes {
+			if node.recharging && node.finishRecharge < firstTime {
+				firstTime = node.finishRecharge
+			}
+		}
+		if self.updateNodes(firstTime) {
+			for node, _ := range self.nodes {
+				if node.recharging {
+					node.set(node.serving, self.simReqCnt, self.sumWeight)
+				}
+			}
+		} else {
+			self.time = time
+			return
+		}
+	}
+}
+
+func (self *ClientManager) canStartReq() bool {
+	return self.simReqCnt < self.maxSimReq && self.rcSumValue < self.maxRcSum
+}
+
+func (self *ClientManager) queueProc() {
+	for rc := range self.resumeQueue {
+		for {
+			time.Sleep(time.Millisecond * 10)
+			self.lock.Lock()
+			self.update(getTime())
+			cs := self.canStartReq()
+			self.lock.Unlock()
+			if cs {
+				break
+			}
+		}
+		close(rc)
+	}
+}
+
+func (self *ClientManager) accept(node *cmNode, time int64) bool {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	self.update(time)
+	if !self.canStartReq() {
+		resume := make(chan bool)
+		self.lock.Unlock()
+		self.resumeQueue <- resume
+		<-resume
+		self.lock.Lock()
+		if _, ok := self.nodes[node]; !ok {
+			return false // reject if node has been removed or manager has been stopped
+		}
+	}
+	self.simReqCnt++
+	node.set(true, self.simReqCnt, self.sumWeight)
+	node.startValue = node.rcValue
+	self.update(self.time)
+	return true
+}
+
+func (self *ClientManager) stop(node *cmNode, time int64) {
+	if node.serving {
+		self.update(time)
+		self.simReqCnt--
+		node.set(false, self.simReqCnt, self.sumWeight)
+		self.update(time)
+	}
+}
+
+func (self *ClientManager) processed(node *cmNode, time int64) (rcValue, rcCost uint64) {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	self.stop(node, time)
+	return uint64(node.rcValue), uint64(node.rcValue - node.startValue)
+}
diff --git a/les/handler.go b/les/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..44a0a1661b6e83dc4abdb481668d0b34165a6ca6
--- /dev/null
+++ b/les/handler.go
@@ -0,0 +1,901 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package les implements the Light Ethereum Subprotocol.
+package les
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math/big"
+	"sync"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/state"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/eth/downloader"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/p2p/discover"
+	"github.com/ethereum/go-ethereum/p2p/discv5"
+	"github.com/ethereum/go-ethereum/pow"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/trie"
+)
+
+const (
+	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
+	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
+
+	ethVersion = 63 // equivalent eth version for the downloader
+
+	MaxHeaderFetch       = 192 // Amount of block headers to be fetched per retrieval request
+	MaxBodyFetch         = 32  // Amount of block bodies to be fetched per retrieval request
+	MaxReceiptFetch      = 128 // Amount of transaction receipts to allow fetching per request
+	MaxCodeFetch         = 64  // Amount of contract codes to allow fetching per request
+	MaxProofsFetch       = 64  // Amount of merkle proofs to be fetched per retrieval request
+	MaxHeaderProofsFetch = 64  // Amount of merkle proofs to be fetched per retrieval request
+	MaxTxSend            = 64  // Amount of transactions to be send per request
+
+	disableClientRemovePeer = true
+)
+
+// errIncompatibleConfig is returned if the requested protocols and configs are
+// not compatible (low protocol version restrictions and high requirements).
+var errIncompatibleConfig = errors.New("incompatible configuration")
+
+func errResp(code errCode, format string, v ...interface{}) error {
+	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
+}
+
+type hashFetcherFn func(common.Hash) error
+
+type BlockChain interface {
+	HasHeader(hash common.Hash) bool
+	GetHeader(hash common.Hash, number uint64) *types.Header
+	GetHeaderByHash(hash common.Hash) *types.Header
+	CurrentHeader() *types.Header
+	GetTdByHash(hash common.Hash) *big.Int
+	InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error)
+	Rollback(chain []common.Hash)
+	Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash)
+	GetHeaderByNumber(number uint64) *types.Header
+	GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash
+	LastBlockHash() common.Hash
+	Genesis() *types.Block
+}
+
+type txPool interface {
+	// AddTransactions should add the given transactions to the pool.
+	AddBatch([]*types.Transaction)
+}
+
+type ProtocolManager struct {
+	lightSync   bool
+	txpool      txPool
+	txrelay     *LesTxRelay
+	networkId   int
+	chainConfig *core.ChainConfig
+	blockchain  BlockChain
+	chainDb     ethdb.Database
+	odr         *LesOdr
+	server      *LesServer
+
+	topicDisc *discv5.Network
+	lesTopic  discv5.Topic
+	p2pServer *p2p.Server
+
+	downloader *downloader.Downloader
+	fetcher    *lightFetcher
+	peers      *peerSet
+
+	SubProtocols []p2p.Protocol
+
+	eventMux *event.TypeMux
+
+	// channels for fetcher, syncer, txsyncLoop
+	newPeerCh   chan *peer
+	quitSync    chan struct{}
+	noMorePeers chan struct{}
+
+	syncMu   sync.Mutex
+	syncing  bool
+	syncDone chan struct{}
+
+	// wait group is used for graceful shutdowns during downloading
+	// and processing
+	wg sync.WaitGroup
+}
+
+// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
+// with the ethereum network.
+func NewProtocolManager(chainConfig *core.ChainConfig, lightSync bool, networkId int, mux *event.TypeMux, pow pow.PoW, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay) (*ProtocolManager, error) {
+	// Create the protocol manager with the base fields
+	manager := &ProtocolManager{
+		lightSync:   lightSync,
+		eventMux:    mux,
+		blockchain:  blockchain,
+		chainConfig: chainConfig,
+		chainDb:     chainDb,
+		networkId:   networkId,
+		txpool:      txpool,
+		txrelay:     txrelay,
+		odr:         odr,
+		peers:       newPeerSet(),
+		newPeerCh:   make(chan *peer),
+		quitSync:    make(chan struct{}),
+		noMorePeers: make(chan struct{}),
+	}
+	// Initiate a sub-protocol for every implemented version we can handle
+	manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
+	for i, version := range ProtocolVersions {
+		// Compatible, initialize the sub-protocol
+		version := version // Closure for the run
+		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
+			Name:    "les",
+			Version: version,
+			Length:  ProtocolLengths[i],
+			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+				peer := manager.newPeer(int(version), networkId, p, rw)
+				select {
+				case manager.newPeerCh <- peer:
+					manager.wg.Add(1)
+					defer manager.wg.Done()
+					return manager.handle(peer)
+				case <-manager.quitSync:
+					return p2p.DiscQuitting
+				}
+			},
+			NodeInfo: func() interface{} {
+				return manager.NodeInfo()
+			},
+			PeerInfo: func(id discover.NodeID) interface{} {
+				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
+					return p.Info()
+				}
+				return nil
+			},
+		})
+	}
+	if len(manager.SubProtocols) == 0 {
+		return nil, errIncompatibleConfig
+	}
+
+	removePeer := manager.removePeer
+	if disableClientRemovePeer {
+		removePeer = func(id string) {}
+	}
+
+	if lightSync {
+		glog.V(logger.Debug).Infof("LES: create downloader")
+		manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
+			nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
+			blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
+		manager.fetcher = newLightFetcher(manager)
+	}
+
+	if odr != nil {
+		odr.removePeer = removePeer
+	}
+
+	/*validator := func(block *types.Block, parent *types.Block) error {
+		return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false)
+	}
+	heighter := func() uint64 {
+		return chainman.LastBlockNumberU64()
+	}
+	manager.fetcher = fetcher.New(chainman.GetBlockNoOdr, validator, nil, heighter, chainman.InsertChain, manager.removePeer)
+	*/
+	return manager, nil
+}
+
+func (pm *ProtocolManager) removePeer(id string) {
+	// Short circuit if the peer was already removed
+	peer := pm.peers.Peer(id)
+	if peer == nil {
+		return
+	}
+	glog.V(logger.Debug).Infoln("Removing peer", id)
+
+	// Unregister the peer from the downloader and Ethereum peer set
+	glog.V(logger.Debug).Infof("LES: unregister peer %v", id)
+	if pm.lightSync {
+		pm.downloader.UnregisterPeer(id)
+		pm.odr.UnregisterPeer(peer)
+		if pm.txrelay != nil {
+			pm.txrelay.removePeer(id)
+		}
+	}
+	if err := pm.peers.Unregister(id); err != nil {
+		glog.V(logger.Error).Infoln("Removal failed:", err)
+	}
+	// Hard disconnect at the networking layer
+	if peer != nil {
+		peer.Peer.Disconnect(p2p.DiscUselessPeer)
+	}
+}
+
+func (pm *ProtocolManager) findServers() {
+	if pm.p2pServer == nil {
+		return
+	}
+	enodes := make(chan string, 100)
+	stop := make(chan struct{})
+	go pm.topicDisc.SearchTopic(pm.lesTopic, stop, enodes)
+	go func() {
+		added := make(map[string]bool)
+		for {
+			select {
+			case enode := <-enodes:
+				if !added[enode] {
+					fmt.Println("Found LES server:", enode)
+					added[enode] = true
+					if node, err := discover.ParseNode(enode); err == nil {
+						pm.p2pServer.AddPeer(node)
+					}
+				}
+			case <-stop:
+				return
+			}
+		}
+	}()
+	time.Sleep(time.Second * 20)
+	close(stop)
+}
+
+func (pm *ProtocolManager) Start(srvr *p2p.Server) {
+	pm.p2pServer = srvr
+	if srvr != nil {
+		pm.topicDisc = srvr.DiscV5
+	}
+	pm.lesTopic = discv5.Topic("LES@" + common.Bytes2Hex(pm.blockchain.Genesis().Hash().Bytes()[0:8]))
+	if pm.lightSync {
+		// start sync handler
+		go pm.findServers()
+		go pm.syncer()
+	} else {
+		if pm.topicDisc != nil {
+			go func() {
+				fmt.Println("Starting topic register")
+				pm.topicDisc.RegisterTopic(pm.lesTopic, pm.quitSync)
+				fmt.Println("Stopped topic register")
+			}()
+		}
+		go func() {
+			for range pm.newPeerCh {
+			}
+		}()
+	}
+}
+
+func (pm *ProtocolManager) Stop() {
+	// Showing a log message. During download / process this could actually
+	// take between 5 to 10 seconds and therefor feedback is required.
+	glog.V(logger.Info).Infoln("Stopping light ethereum protocol handler...")
+
+	// Quit the sync loop.
+	// After this send has completed, no new peers will be accepted.
+	pm.noMorePeers <- struct{}{}
+
+	close(pm.quitSync) // quits syncer, fetcher
+
+	// Disconnect existing sessions.
+	// This also closes the gate for any new registrations on the peer set.
+	// sessions which are already established but not added to pm.peers yet
+	// will exit when they try to register.
+	pm.peers.Close()
+
+	// Wait for any process action
+	pm.wg.Wait()
+
+	glog.V(logger.Info).Infoln("Light ethereum protocol handler stopped")
+}
+
+func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
+	return newPeer(pv, nv, p, newMeteredMsgWriter(rw))
+}
+
+// handle is the callback invoked to manage the life cycle of a les peer. When
+// this function terminates, the peer is disconnected.
+func (pm *ProtocolManager) handle(p *peer) error {
+	glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
+
+	// Execute the LES handshake
+	td, head, genesis := pm.blockchain.Status()
+	headNum := core.GetBlockNumber(pm.chainDb, head)
+	if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil {
+		glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
+		return err
+	}
+	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
+		rw.Init(p.version)
+	}
+	// Register the peer locally
+	glog.V(logger.Detail).Infof("%v: adding peer", p)
+	if err := pm.peers.Register(p); err != nil {
+		glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
+		return err
+	}
+	defer func() {
+		if pm.server != nil && pm.server.fcManager != nil && p.fcClient != nil {
+			p.fcClient.Remove(pm.server.fcManager)
+		}
+		pm.removePeer(p.id)
+	}()
+
+	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
+	glog.V(logger.Debug).Infof("LES: register peer %v", p.id)
+	if pm.lightSync {
+		requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
+			reqID := pm.odr.getNextReqID()
+			cost := p.GetRequestCost(GetBlockHeadersMsg, amount)
+			p.fcServer.SendRequest(reqID, cost)
+			return p.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse)
+		}
+		requestHeadersByNumber := func(origin uint64, amount int, skip int, reverse bool) error {
+			reqID := pm.odr.getNextReqID()
+			cost := p.GetRequestCost(GetBlockHeadersMsg, amount)
+			p.fcServer.SendRequest(reqID, cost)
+			return p.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse)
+		}
+		if err := pm.downloader.RegisterPeer(p.id, ethVersion, p.HeadAndTd,
+			requestHeadersByHash, requestHeadersByNumber, nil, nil, nil); err != nil {
+			return err
+		}
+		pm.odr.RegisterPeer(p)
+		if pm.txrelay != nil {
+			pm.txrelay.addPeer(p)
+		}
+
+		pm.fetcher.notify(p, nil)
+	}
+
+	stop := make(chan struct{})
+	defer close(stop)
+	go func() {
+		// new block announce loop
+		for {
+			select {
+			case announce := <-p.announceChn:
+				p.SendAnnounce(announce)
+				//fmt.Println("  BROADCAST sent")
+			case <-stop:
+				return
+			}
+		}
+	}()
+
+	// main loop. handle incoming messages.
+	for {
+		if err := pm.handleMsg(p); err != nil {
+			glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
+			//fmt.Println("handleMsg err:", err)
+			return err
+		}
+	}
+}
+
+var reqList = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsMsg, SendTxMsg, GetHeaderProofsMsg}
+
+// handleMsg is invoked whenever an inbound message is received from a remote
+// peer. The remote connection is torn down upon returning any error.
+func (pm *ProtocolManager) handleMsg(p *peer) error {
+	// Read the next message from the remote peer, and ensure it's fully consumed
+	msg, err := p.rw.ReadMsg()
+	if err != nil {
+		return err
+	}
+
+	var costs *requestCosts
+	var reqCnt, maxReqs int
+
+	//fmt.Println("MSG", msg.Code, msg.Size)
+	if rc, ok := p.fcCosts[msg.Code]; ok { // check if msg is a supported request type
+		costs = rc
+		if p.fcClient == nil {
+			return errResp(ErrRequestRejected, "")
+		}
+		bv, ok := p.fcClient.AcceptRequest()
+		if !ok || bv < costs.baseCost {
+			return errResp(ErrRequestRejected, "")
+		}
+		maxReqs = 10000
+		if bv < pm.server.defParams.BufLimit {
+			d := bv - costs.baseCost
+			if d/10000 < costs.reqCost {
+				maxReqs = int(d / costs.reqCost)
+			}
+		}
+	}
+
+	if msg.Size > ProtocolMaxMsgSize {
+		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
+	}
+	defer msg.Discard()
+
+	var deliverMsg *Msg
+
+	// Handle the message depending on its contents
+	switch msg.Code {
+	case StatusMsg:
+		glog.V(logger.Debug).Infof("LES: received StatusMsg from peer %v", p.id)
+		// Status messages should never arrive after the handshake
+		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
+
+	// Block header query, collect the requested headers and reply
+	case AnnounceMsg:
+		var req announceData
+		if err := msg.Decode(&req); err != nil {
+			return errResp(ErrDecode, "%v: %v", msg, err)
+		}
+		//fmt.Println("RECEIVED", req.Number, req.Hash, req.Td, req.ReorgDepth)
+		pm.fetcher.notify(p, &req)
+
+	case GetBlockHeadersMsg:
+		glog.V(logger.Debug).Infof("LES: received GetBlockHeadersMsg from peer %v", p.id)
+		// Decode the complex header query
+		var req struct {
+			ReqID uint64
+			Query getBlockHeadersData
+		}
+		if err := msg.Decode(&req); err != nil {
+			return errResp(ErrDecode, "%v: %v", msg, err)
+		}
+
+		query := req.Query
+		if query.Amount > uint64(maxReqs) || query.Amount > MaxHeaderFetch {
+			return errResp(ErrRequestRejected, "")
+		}
+
+		hashMode := query.Origin.Hash != (common.Hash{})
+
+		// Gather headers until the fetch or network limits is reached
+		var (
+			bytes   common.StorageSize
+			headers []*types.Header
+			unknown bool
+		)
+		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit {
+			// Retrieve the next header satisfying the query
+			var origin *types.Header
+			if hashMode {
+				origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
+			} else {
+				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
+			}
+			if origin == nil {
+				break
+			}
+			number := origin.Number.Uint64()
+			headers = append(headers, origin)
+			bytes += estHeaderRlpSize
+
+			// Advance to the next header of the query
+			switch {
+			case query.Origin.Hash != (common.Hash{}) && query.Reverse:
+				// Hash based traversal towards the genesis block
+				for i := 0; i < int(query.Skip)+1; i++ {
+					if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
+						query.Origin.Hash = header.ParentHash
+						number--
+					} else {
+						unknown = true
+						break
+					}
+				}
+			case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
+				// Hash based traversal towards the leaf block
+				if header := pm.blockchain.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil {
+					if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
+						query.Origin.Hash = header.Hash()
+					} else {
+						unknown = true
+					}
+				} else {
+					unknown = true
+				}
+			case query.Reverse:
+				// Number based traversal towards the genesis block
+				if query.Origin.Number >= query.Skip+1 {
+					query.Origin.Number -= (query.Skip + 1)
+				} else {
+					unknown = true
+				}
+
+			case !query.Reverse:
+				// Number based traversal towards the leaf block
+				query.Origin.Number += (query.Skip + 1)
+			}
+		}
+
+		bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + query.Amount*costs.reqCost)
+		pm.server.fcCostStats.update(msg.Code, query.Amount, rcost)
+		return p.SendBlockHeaders(req.ReqID, bv, headers)
+
+	case BlockHeadersMsg:
+		if pm.downloader == nil {
+			return errResp(ErrUnexpectedResponse, "")
+		}
+
+		glog.V(logger.Debug).Infof("LES: received BlockHeadersMsg from peer %v", p.id)
+		// A batch of headers arrived to one of our previous requests
+		var resp struct {
+			ReqID, BV uint64
+			Headers   []*types.Header
+		}
+		if err := msg.Decode(&resp); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		p.fcServer.GotReply(resp.ReqID, resp.BV)
+		if pm.fetcher.requestedID(resp.ReqID) {
+			pm.fetcher.deliverHeaders(resp.ReqID, resp.Headers)
+		} else {
+			err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
+			if err != nil {
+				glog.V(logger.Debug).Infoln(err)
+			}
+		}
+
+	case GetBlockBodiesMsg:
+		glog.V(logger.Debug).Infof("LES: received GetBlockBodiesMsg from peer %v", p.id)
+		// Decode the retrieval message
+		var req struct {
+			ReqID  uint64
+			Hashes []common.Hash
+		}
+		if err := msg.Decode(&req); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Gather blocks until the fetch or network limits is reached
+		var (
+			bytes  int
+			bodies []rlp.RawValue
+		)
+		reqCnt = len(req.Hashes)
+		if reqCnt > maxReqs || reqCnt > MaxBodyFetch {
+			return errResp(ErrRequestRejected, "")
+		}
+		for _, hash := range req.Hashes {
+			if bytes >= softResponseLimit {
+				break
+			}
+			// Retrieve the requested block body, stopping if enough was found
+			if data := core.GetBodyRLP(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash)); len(data) != 0 {
+				bodies = append(bodies, data)
+				bytes += len(data)
+			}
+		}
+		bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
+		pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
+		return p.SendBlockBodiesRLP(req.ReqID, bv, bodies)
+
+	case BlockBodiesMsg:
+		if pm.odr == nil {
+			return errResp(ErrUnexpectedResponse, "")
+		}
+
+		glog.V(logger.Debug).Infof("LES: received BlockBodiesMsg from peer %v", p.id)
+		// A batch of block bodies arrived to one of our previous requests
+		var resp struct {
+			ReqID, BV uint64
+			Data      []*types.Body
+		}
+		if err := msg.Decode(&resp); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		p.fcServer.GotReply(resp.ReqID, resp.BV)
+		deliverMsg = &Msg{
+			MsgType: MsgBlockBodies,
+			ReqID:   resp.ReqID,
+			Obj:     resp.Data,
+		}
+
+	case GetCodeMsg:
+		glog.V(logger.Debug).Infof("LES: received GetCodeMsg from peer %v", p.id)
+		// Decode the retrieval message
+		var req struct {
+			ReqID uint64
+			Reqs  []CodeReq
+		}
+		if err := msg.Decode(&req); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Gather state data until the fetch or network limits is reached
+		var (
+			bytes int
+			data  [][]byte
+		)
+		reqCnt = len(req.Reqs)
+		if reqCnt > maxReqs || reqCnt > MaxCodeFetch {
+			return errResp(ErrRequestRejected, "")
+		}
+		for _, req := range req.Reqs {
+			// Retrieve the requested state entry, stopping if enough was found
+			if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil {
+				if trie, _ := trie.New(header.Root, pm.chainDb); trie != nil {
+					sdata := trie.Get(req.AccKey)
+					var acc state.Account
+					if err := rlp.DecodeBytes(sdata, &acc); err == nil {
+						entry, _ := pm.chainDb.Get(acc.CodeHash)
+						if bytes+len(entry) >= softResponseLimit {
+							break
+						}
+						data = append(data, entry)
+						bytes += len(entry)
+					}
+				}
+			}
+		}
+		bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
+		pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
+		return p.SendCode(req.ReqID, bv, data)
+
+	case CodeMsg:
+		if pm.odr == nil {
+			return errResp(ErrUnexpectedResponse, "")
+		}
+
+		glog.V(logger.Debug).Infof("LES: received CodeMsg from peer %v", p.id)
+		// A batch of node state data arrived to one of our previous requests
+		var resp struct {
+			ReqID, BV uint64
+			Data      [][]byte
+		}
+		if err := msg.Decode(&resp); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		p.fcServer.GotReply(resp.ReqID, resp.BV)
+		deliverMsg = &Msg{
+			MsgType: MsgCode,
+			ReqID:   resp.ReqID,
+			Obj:     resp.Data,
+		}
+
+	case GetReceiptsMsg:
+		glog.V(logger.Debug).Infof("LES: received GetReceiptsMsg from peer %v", p.id)
+		// Decode the retrieval message
+		var req struct {
+			ReqID  uint64
+			Hashes []common.Hash
+		}
+		if err := msg.Decode(&req); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Gather state data until the fetch or network limits is reached
+		var (
+			bytes    int
+			receipts []rlp.RawValue
+		)
+		reqCnt = len(req.Hashes)
+		if reqCnt > maxReqs || reqCnt > MaxReceiptFetch {
+			return errResp(ErrRequestRejected, "")
+		}
+		for _, hash := range req.Hashes {
+			if bytes >= softResponseLimit {
+				break
+			}
+			// Retrieve the requested block's receipts, skipping if unknown to us
+			results := core.GetBlockReceipts(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash))
+			if results == nil {
+				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
+					continue
+				}
+			}
+			// If known, encode and queue for response packet
+			if encoded, err := rlp.EncodeToBytes(results); err != nil {
+				glog.V(logger.Error).Infof("failed to encode receipt: %v", err)
+			} else {
+				receipts = append(receipts, encoded)
+				bytes += len(encoded)
+			}
+		}
+		bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
+		pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
+		return p.SendReceiptsRLP(req.ReqID, bv, receipts)
+
+	case ReceiptsMsg:
+		if pm.odr == nil {
+			return errResp(ErrUnexpectedResponse, "")
+		}
+
+		glog.V(logger.Debug).Infof("LES: received ReceiptsMsg from peer %v", p.id)
+		// A batch of receipts arrived to one of our previous requests
+		var resp struct {
+			ReqID, BV uint64
+			Receipts  []types.Receipts
+		}
+		if err := msg.Decode(&resp); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		p.fcServer.GotReply(resp.ReqID, resp.BV)
+		deliverMsg = &Msg{
+			MsgType: MsgReceipts,
+			ReqID:   resp.ReqID,
+			Obj:     resp.Receipts,
+		}
+
+	case GetProofsMsg:
+		glog.V(logger.Debug).Infof("LES: received GetProofsMsg from peer %v", p.id)
+		// Decode the retrieval message
+		var req struct {
+			ReqID uint64
+			Reqs  []ProofReq
+		}
+		if err := msg.Decode(&req); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Gather state data until the fetch or network limits is reached
+		var (
+			bytes  int
+			proofs proofsData
+		)
+		reqCnt = len(req.Reqs)
+		if reqCnt > maxReqs || reqCnt > MaxProofsFetch {
+			return errResp(ErrRequestRejected, "")
+		}
+		for _, req := range req.Reqs {
+			if bytes >= softResponseLimit {
+				break
+			}
+			// Retrieve the requested state entry, stopping if enough was found
+			if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil {
+				if tr, _ := trie.New(header.Root, pm.chainDb); tr != nil {
+					if len(req.AccKey) > 0 {
+						sdata := tr.Get(req.AccKey)
+						tr = nil
+						var acc state.Account
+						if err := rlp.DecodeBytes(sdata, &acc); err == nil {
+							tr, _ = trie.New(acc.Root, pm.chainDb)
+						}
+					}
+					if tr != nil {
+						proof := tr.Prove(req.Key)
+						proofs = append(proofs, proof)
+						bytes += len(proof)
+					}
+				}
+			}
+		}
+		bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
+		pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
+		return p.SendProofs(req.ReqID, bv, proofs)
+
+	case ProofsMsg:
+		if pm.odr == nil {
+			return errResp(ErrUnexpectedResponse, "")
+		}
+
+		glog.V(logger.Debug).Infof("LES: received ProofsMsg from peer %v", p.id)
+		// A batch of merkle proofs arrived to one of our previous requests
+		var resp struct {
+			ReqID, BV uint64
+			Data      [][]rlp.RawValue
+		}
+		if err := msg.Decode(&resp); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		p.fcServer.GotReply(resp.ReqID, resp.BV)
+		deliverMsg = &Msg{
+			MsgType: MsgProofs,
+			ReqID:   resp.ReqID,
+			Obj:     resp.Data,
+		}
+
+	case GetHeaderProofsMsg:
+		glog.V(logger.Debug).Infof("LES: received GetHeaderProofsMsg from peer %v", p.id)
+		// Decode the retrieval message
+		var req struct {
+			ReqID uint64
+			Reqs  []ChtReq
+		}
+		if err := msg.Decode(&req); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Gather state data until the fetch or network limits is reached
+		var (
+			bytes  int
+			proofs []ChtResp
+		)
+		reqCnt = len(req.Reqs)
+		if reqCnt > maxReqs || reqCnt > MaxHeaderProofsFetch {
+			return errResp(ErrRequestRejected, "")
+		}
+		for _, req := range req.Reqs {
+			if bytes >= softResponseLimit {
+				break
+			}
+
+			if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
+				if root := getChtRoot(pm.chainDb, req.ChtNum); root != (common.Hash{}) {
+					if tr, _ := trie.New(root, pm.chainDb); tr != nil {
+						var encNumber [8]byte
+						binary.BigEndian.PutUint64(encNumber[:], req.BlockNum)
+						proof := tr.Prove(encNumber[:])
+						proofs = append(proofs, ChtResp{Header: header, Proof: proof})
+						bytes += len(proof) + estHeaderRlpSize
+					}
+				}
+			}
+		}
+		bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
+		pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
+		return p.SendHeaderProofs(req.ReqID, bv, proofs)
+
+	case HeaderProofsMsg:
+		if pm.odr == nil {
+			return errResp(ErrUnexpectedResponse, "")
+		}
+
+		glog.V(logger.Debug).Infof("LES: received HeaderProofsMsg from peer %v", p.id)
+		var resp struct {
+			ReqID, BV uint64
+			Data      []ChtResp
+		}
+		if err := msg.Decode(&resp); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		p.fcServer.GotReply(resp.ReqID, resp.BV)
+		deliverMsg = &Msg{
+			MsgType: MsgHeaderProofs,
+			ReqID:   resp.ReqID,
+			Obj:     resp.Data,
+		}
+
+	case SendTxMsg:
+		if pm.txpool == nil {
+			return errResp(ErrUnexpectedResponse, "")
+		}
+		// Transactions arrived, parse all of them and deliver to the pool
+		var txs []*types.Transaction
+		if err := msg.Decode(&txs); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		reqCnt = len(txs)
+		if reqCnt > maxReqs || reqCnt > MaxTxSend {
+			return errResp(ErrRequestRejected, "")
+		}
+		pm.txpool.AddBatch(txs)
+		_, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
+		pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
+
+	default:
+		glog.V(logger.Debug).Infof("LES: received unknown message with code %d from peer %v", msg.Code, p.id)
+		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
+	}
+
+	if deliverMsg != nil {
+		return pm.odr.Deliver(p, deliverMsg)
+	}
+
+	return nil
+}
+
+// NodeInfo retrieves some protocol metadata about the running host node.
+func (self *ProtocolManager) NodeInfo() *eth.EthNodeInfo {
+	return &eth.EthNodeInfo{
+		Network:    self.networkId,
+		Difficulty: self.blockchain.GetTdByHash(self.blockchain.LastBlockHash()),
+		Genesis:    self.blockchain.Genesis().Hash(),
+		Head:       self.blockchain.LastBlockHash(),
+	}
+}
diff --git a/les/handler_test.go b/les/handler_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..37c5dd2268627aadc0055eafff035ae2e81a46ce
--- /dev/null
+++ b/les/handler_test.go
@@ -0,0 +1,338 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"math/rand"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/eth/downloader"
+	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/trie"
+)
+
+func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
+	type resp struct {
+		ReqID, BV uint64
+		Data      interface{}
+	}
+	return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
+}
+
+// Tests that block headers can be retrieved from a remote chain based on user queries.
+func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
+
+func testGetBlockHeaders(t *testing.T, protocol int) {
+	pm, _, _ := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil)
+	bc := pm.blockchain.(*core.BlockChain)
+	peer, _ := newTestPeer(t, "peer", protocol, pm, true)
+	defer peer.close()
+
+	// Create a "random" unknown hash for testing
+	var unknown common.Hash
+	for i, _ := range unknown {
+		unknown[i] = byte(i)
+	}
+	// Create a batch of tests for various scenarios
+	limit := uint64(MaxHeaderFetch)
+	tests := []struct {
+		query  *getBlockHeadersData // The query to execute for header retrieval
+		expect []common.Hash        // The hashes of the block whose headers are expected
+	}{
+		// A single random block should be retrievable by hash and number too
+		{
+			&getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
+			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
+		}, {
+			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
+			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
+		},
+		// Multiple headers should be retrievable in both directions
+		{
+			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
+			[]common.Hash{
+				bc.GetBlockByNumber(limit / 2).Hash(),
+				bc.GetBlockByNumber(limit/2 + 1).Hash(),
+				bc.GetBlockByNumber(limit/2 + 2).Hash(),
+			},
+		}, {
+			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
+			[]common.Hash{
+				bc.GetBlockByNumber(limit / 2).Hash(),
+				bc.GetBlockByNumber(limit/2 - 1).Hash(),
+				bc.GetBlockByNumber(limit/2 - 2).Hash(),
+			},
+		},
+		// Multiple headers with skip lists should be retrievable
+		{
+			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
+			[]common.Hash{
+				bc.GetBlockByNumber(limit / 2).Hash(),
+				bc.GetBlockByNumber(limit/2 + 4).Hash(),
+				bc.GetBlockByNumber(limit/2 + 8).Hash(),
+			},
+		}, {
+			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
+			[]common.Hash{
+				bc.GetBlockByNumber(limit / 2).Hash(),
+				bc.GetBlockByNumber(limit/2 - 4).Hash(),
+				bc.GetBlockByNumber(limit/2 - 8).Hash(),
+			},
+		},
+		// The chain endpoints should be retrievable
+		{
+			&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
+			[]common.Hash{bc.GetBlockByNumber(0).Hash()},
+		}, {
+			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},
+			[]common.Hash{bc.CurrentBlock().Hash()},
+		},
+		// Ensure protocol limits are honored
+		/*{
+			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
+			bc.GetBlockHashesFromHash(bc.CurrentBlock().Hash(), limit),
+		},*/
+		// Check that requesting more than available is handled gracefully
+		{
+			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
+			[]common.Hash{
+				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
+				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),
+			},
+		}, {
+			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
+			[]common.Hash{
+				bc.GetBlockByNumber(4).Hash(),
+				bc.GetBlockByNumber(0).Hash(),
+			},
+		},
+		// Check that requesting more than available is handled gracefully, even if mid skip
+		{
+			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
+			[]common.Hash{
+				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
+				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),
+			},
+		}, {
+			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
+			[]common.Hash{
+				bc.GetBlockByNumber(4).Hash(),
+				bc.GetBlockByNumber(1).Hash(),
+			},
+		},
+		// Check that non existing headers aren't returned
+		{
+			&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
+			[]common.Hash{},
+		}, {
+			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},
+			[]common.Hash{},
+		},
+	}
+	// Run each of the tests and verify the results against the chain
+	var reqID uint64
+	for i, tt := range tests {
+		// Collect the headers to expect in the response
+		headers := []*types.Header{}
+		for _, hash := range tt.expect {
+			headers = append(headers, bc.GetHeaderByHash(hash))
+		}
+		// Send the hash request and verify the response
+		reqID++
+		cost := peer.GetRequestCost(GetBlockHeadersMsg, int(tt.query.Amount))
+		sendRequest(peer.app, GetBlockHeadersMsg, reqID, cost, tt.query)
+		if err := expectResponse(peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
+			t.Errorf("test %d: headers mismatch: %v", i, err)
+		}
+	}
+}
+
+// Tests that block contents can be retrieved from a remote chain based on their hashes.
+func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
+
+func testGetBlockBodies(t *testing.T, protocol int) {
+	pm, _, _ := newTestProtocolManagerMust(t, false, downloader.MaxBlockFetch+15, nil)
+	bc := pm.blockchain.(*core.BlockChain)
+	peer, _ := newTestPeer(t, "peer", protocol, pm, true)
+	defer peer.close()
+
+	// Create a batch of tests for various scenarios
+	limit := MaxBodyFetch
+	tests := []struct {
+		random    int           // Number of blocks to fetch randomly from the chain
+		explicit  []common.Hash // Explicitly requested blocks
+		available []bool        // Availability of explicitly requested blocks
+		expected  int           // Total number of existing blocks to expect
+	}{
+		{1, nil, nil, 1},         // A single random block should be retrievable
+		{10, nil, nil, 10},       // Multiple random blocks should be retrievable
+		{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
+		//{limit + 1, nil, nil, limit},                                  // No more than the possible block count should be returned
+		{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable
+		{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
+		{0, []common.Hash{common.Hash{}}, []bool{false}, 0},           // A non existent block should not be returned
+
+		// Existing and non-existing blocks interleaved should not cause problems
+		{0, []common.Hash{
+			common.Hash{},
+			bc.GetBlockByNumber(1).Hash(),
+			common.Hash{},
+			bc.GetBlockByNumber(10).Hash(),
+			common.Hash{},
+			bc.GetBlockByNumber(100).Hash(),
+			common.Hash{},
+		}, []bool{false, true, false, true, false, true, false}, 3},
+	}
+	// Run each of the tests and verify the results against the chain
+	var reqID uint64
+	for i, tt := range tests {
+		// Collect the hashes to request, and the response to expect
+		hashes, seen := []common.Hash{}, make(map[int64]bool)
+		bodies := []*types.Body{}
+
+		for j := 0; j < tt.random; j++ {
+			for {
+				num := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))
+				if !seen[num] {
+					seen[num] = true
+
+					block := bc.GetBlockByNumber(uint64(num))
+					hashes = append(hashes, block.Hash())
+					if len(bodies) < tt.expected {
+						bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
+					}
+					break
+				}
+			}
+		}
+		for j, hash := range tt.explicit {
+			hashes = append(hashes, hash)
+			if tt.available[j] && len(bodies) < tt.expected {
+				block := bc.GetBlockByHash(hash)
+				bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
+			}
+		}
+		reqID++
+		// Send the hash request and verify the response
+		cost := peer.GetRequestCost(GetBlockBodiesMsg, len(hashes))
+		sendRequest(peer.app, GetBlockBodiesMsg, reqID, cost, hashes)
+		if err := expectResponse(peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
+			t.Errorf("test %d: bodies mismatch: %v", i, err)
+		}
+	}
+}
+
+// Tests that the contract codes can be retrieved based on account addresses.
+func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
+
+func testGetCode(t *testing.T, protocol int) {
+	// Assemble the test environment
+	pm, _, _ := newTestProtocolManagerMust(t, false, 4, testChainGen)
+	bc := pm.blockchain.(*core.BlockChain)
+	peer, _ := newTestPeer(t, "peer", protocol, pm, true)
+	defer peer.close()
+
+	var codereqs []*CodeReq
+	var codes [][]byte
+
+	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
+		header := bc.GetHeaderByNumber(i)
+		req := &CodeReq{
+			BHash:  header.Hash(),
+			AccKey: crypto.Keccak256(testContractAddr[:]),
+		}
+		codereqs = append(codereqs, req)
+		if i >= testContractDeployed {
+			codes = append(codes, testContractCodeDeployed)
+		}
+	}
+
+	cost := peer.GetRequestCost(GetCodeMsg, len(codereqs))
+	sendRequest(peer.app, GetCodeMsg, 42, cost, codereqs)
+	if err := expectResponse(peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
+		t.Errorf("codes mismatch: %v", err)
+	}
+}
+
+// Tests that the transaction receipts can be retrieved based on hashes.
+func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
+
+func testGetReceipt(t *testing.T, protocol int) {
+	// Assemble the test environment
+	pm, db, _ := newTestProtocolManagerMust(t, false, 4, testChainGen)
+	bc := pm.blockchain.(*core.BlockChain)
+	peer, _ := newTestPeer(t, "peer", protocol, pm, true)
+	defer peer.close()
+
+	// Collect the hashes to request, and the response to expect
+	hashes, receipts := []common.Hash{}, []types.Receipts{}
+	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
+		block := bc.GetBlockByNumber(i)
+
+		hashes = append(hashes, block.Hash())
+		receipts = append(receipts, core.GetBlockReceipts(db, block.Hash(), block.NumberU64()))
+	}
+	// Send the hash request and verify the response
+	cost := peer.GetRequestCost(GetReceiptsMsg, len(hashes))
+	sendRequest(peer.app, GetReceiptsMsg, 42, cost, hashes)
+	if err := expectResponse(peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
+		t.Errorf("receipts mismatch: %v", err)
+	}
+}
+
+// Tests that trie merkle proofs can be retrieved
+func TestGetProofsLes1(t *testing.T) { testGetReceipt(t, 1) }
+
+func testGetProofs(t *testing.T, protocol int) {
+	// Assemble the test environment
+	pm, db, _ := newTestProtocolManagerMust(t, false, 4, testChainGen)
+	bc := pm.blockchain.(*core.BlockChain)
+	peer, _ := newTestPeer(t, "peer", protocol, pm, true)
+	defer peer.close()
+
+	var proofreqs []ProofReq
+	var proofs [][]rlp.RawValue
+
+	accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, common.Address{}}
+	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
+		header := bc.GetHeaderByNumber(i)
+		root := header.Root
+		trie, _ := trie.New(root, db)
+
+		for _, acc := range accounts {
+			req := ProofReq{
+				BHash: header.Hash(),
+				Key:   acc[:],
+			}
+			proofreqs = append(proofreqs, req)
+
+			proof := trie.Prove(crypto.Keccak256(acc[:]))
+			proofs = append(proofs, proof)
+		}
+	}
+	// Send the proof request and verify the response
+	cost := peer.GetRequestCost(GetProofsMsg, len(proofreqs))
+	sendRequest(peer.app, GetProofsMsg, 42, cost, proofreqs)
+	if err := expectResponse(peer.app, ProofsMsg, 42, testBufLimit, proofs); err != nil {
+		t.Errorf("proofs mismatch: %v", err)
+	}
+}
diff --git a/les/helper_test.go b/les/helper_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b3c29e48798271190ffdd683442921458e8696a
--- /dev/null
+++ b/les/helper_test.go
@@ -0,0 +1,334 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// This file contains some shares testing functionality, common to  multiple
+// different files and modules being tested.
+
+package les
+
+import (
+	"crypto/ecdsa"
+	"crypto/rand"
+	"math/big"
+	"sync"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/les/flowcontrol"
+	"github.com/ethereum/go-ethereum/light"
+	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/p2p/discover"
+	"github.com/ethereum/go-ethereum/params"
+)
+
+var (
+	testBankKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+	testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
+	testBankFunds   = big.NewInt(1000000)
+
+	acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+	acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
+	acc1Addr   = crypto.PubkeyToAddress(acc1Key.PublicKey)
+	acc2Addr   = crypto.PubkeyToAddress(acc2Key.PublicKey)
+
+	testContractCode         = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056")
+	testContractAddr         common.Address
+	testContractCodeDeployed = testContractCode[16:]
+	testContractDeployed     = uint64(2)
+
+	testBufLimit = uint64(100)
+)
+
+/*
+contract test {
+
+    uint256[100] data;
+
+    function Put(uint256 addr, uint256 value) {
+        data[addr] = value;
+    }
+
+    function Get(uint256 addr) constant returns (uint256 value) {
+        return data[addr];
+    }
+}
+*/
+
+func testChainGen(i int, block *core.BlockGen) {
+	switch i {
+	case 0:
+		// In block 1, the test bank sends account #1 some ether.
+		tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey)
+		block.AddTx(tx)
+	case 1:
+		// In block 2, the test bank sends some more ether to account #1.
+		// acc1Addr passes it on to account #2.
+		// acc1Addr creates a test contract.
+		tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey)
+		nonce := block.TxNonce(acc1Addr)
+		tx2, _ := types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key)
+		nonce++
+		tx3, _ := types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(200000), big.NewInt(0), testContractCode).SignECDSA(acc1Key)
+		testContractAddr = crypto.CreateAddress(acc1Addr, nonce)
+		block.AddTx(tx1)
+		block.AddTx(tx2)
+		block.AddTx(tx3)
+	case 2:
+		// Block 3 is empty but was mined by account #2.
+		block.SetCoinbase(acc2Addr)
+		block.SetExtra([]byte("yeehaw"))
+		data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001")
+		tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey)
+		block.AddTx(tx)
+	case 3:
+		// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
+		b2 := block.PrevBlock(1).Header()
+		b2.Extra = []byte("foo")
+		block.AddUncle(b2)
+		b3 := block.PrevBlock(2).Header()
+		b3.Extra = []byte("foo")
+		block.AddUncle(b3)
+		data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002")
+		tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey)
+		block.AddTx(tx)
+	}
+}
+
+func testRCL() RequestCostList {
+	cl := make(RequestCostList, len(reqList))
+	for i, code := range reqList {
+		cl[i].MsgCode = code
+		cl[i].BaseCost = 0
+		cl[i].ReqCost = 0
+	}
+	return cl
+}
+
+// newTestProtocolManager creates a new protocol manager for testing purposes,
+// with the given number of blocks already known, and potential notification
+// channels for different events.
+func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *core.BlockGen)) (*ProtocolManager, ethdb.Database, *LesOdr, error) {
+	var (
+		evmux       = new(event.TypeMux)
+		pow         = new(core.FakePow)
+		db, _       = ethdb.NewMemDatabase()
+		genesis     = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{Address: testBankAddress, Balance: testBankFunds})
+		chainConfig = &core.ChainConfig{HomesteadBlock: big.NewInt(0)} // homestead set to 0 because of chain maker
+		odr         *LesOdr
+		chain       BlockChain
+	)
+
+	if lightSync {
+		odr = NewLesOdr(db)
+		chain, _ = light.NewLightChain(odr, chainConfig, pow, evmux)
+	} else {
+		blockchain, _ := core.NewBlockChain(db, chainConfig, pow, evmux)
+		gchain, _ := core.GenerateChain(nil, genesis, db, blocks, generator)
+		if _, err := blockchain.InsertChain(gchain); err != nil {
+			panic(err)
+		}
+		chain = blockchain
+	}
+
+	pm, err := NewProtocolManager(chainConfig, lightSync, NetworkId, evmux, pow, chain, nil, db, odr, nil)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	if !lightSync {
+		srv := &LesServer{protocolManager: pm}
+		pm.server = srv
+
+		srv.defParams = &flowcontrol.ServerParams{
+			BufLimit:    testBufLimit,
+			MinRecharge: 1,
+		}
+
+		srv.fcManager = flowcontrol.NewClientManager(50, 10, 1000000000)
+		srv.fcCostStats = newCostStats(nil)
+	}
+	pm.Start(nil)
+	return pm, db, odr, nil
+}
+
+// newTestProtocolManagerMust creates a new protocol manager for testing purposes,
+// with the given number of blocks already known, and potential notification
+// channels for different events. In case of an error, the constructor force-
+// fails the test.
+func newTestProtocolManagerMust(t *testing.T, lightSync bool, blocks int, generator func(int, *core.BlockGen)) (*ProtocolManager, ethdb.Database, *LesOdr) {
+	pm, db, odr, err := newTestProtocolManager(lightSync, blocks, generator)
+	if err != nil {
+		t.Fatalf("Failed to create protocol manager: %v", err)
+	}
+	return pm, db, odr
+}
+
+// testTxPool is a fake, helper transaction pool for testing purposes
+type testTxPool struct {
+	pool  []*types.Transaction        // Collection of all transactions
+	added chan<- []*types.Transaction // Notification channel for new transactions
+
+	lock sync.RWMutex // Protects the transaction pool
+}
+
+// AddTransactions appends a batch of transactions to the pool, and notifies any
+// listeners if the addition channel is non nil
+func (p *testTxPool) AddBatch(txs []*types.Transaction) {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	p.pool = append(p.pool, txs...)
+	if p.added != nil {
+		p.added <- txs
+	}
+}
+
+// GetTransactions returns all the transactions known to the pool
+func (p *testTxPool) GetTransactions() types.Transactions {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	txs := make([]*types.Transaction, len(p.pool))
+	copy(txs, p.pool)
+
+	return txs
+}
+
+// newTestTransaction create a new dummy transaction.
+func newTestTransaction(from *ecdsa.PrivateKey, nonce uint64, datasize int) *types.Transaction {
+	tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize))
+	tx, _ = tx.SignECDSA(from)
+
+	return tx
+}
+
+// testPeer is a simulated peer to allow testing direct network calls.
+type testPeer struct {
+	net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging
+	app *p2p.MsgPipeRW    // Application layer reader/writer to simulate the local side
+	*peer
+}
+
+// newTestPeer creates a new peer registered at the given protocol manager.
+func newTestPeer(t *testing.T, name string, version int, pm *ProtocolManager, shake bool) (*testPeer, <-chan error) {
+	// Create a message pipe to communicate through
+	app, net := p2p.MsgPipe()
+
+	// Generate a random id and create the peer
+	var id discover.NodeID
+	rand.Read(id[:])
+
+	peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
+
+	// Start the peer on a new thread
+	errc := make(chan error, 1)
+	go func() {
+		select {
+		case pm.newPeerCh <- peer:
+			errc <- pm.handle(peer)
+		case <-pm.quitSync:
+			errc <- p2p.DiscQuitting
+		}
+	}()
+	tp := &testPeer{
+		app:  app,
+		net:  net,
+		peer: peer,
+	}
+	// Execute any implicitly requested handshakes and return
+	if shake {
+		td, head, genesis := pm.blockchain.Status()
+		headNum := pm.blockchain.CurrentHeader().Number.Uint64()
+		tp.handshake(t, td, head, headNum, genesis)
+	}
+	return tp, errc
+}
+
+func newTestPeerPair(name string, version int, pm, pm2 *ProtocolManager) (*peer, <-chan error, *peer, <-chan error) {
+	// Create a message pipe to communicate through
+	app, net := p2p.MsgPipe()
+
+	// Generate a random id and create the peer
+	var id discover.NodeID
+	rand.Read(id[:])
+
+	peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
+	peer2 := pm2.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), app)
+
+	// Start the peer on a new thread
+	errc := make(chan error, 1)
+	errc2 := make(chan error, 1)
+	go func() {
+		select {
+		case pm.newPeerCh <- peer:
+			errc <- pm.handle(peer)
+		case <-pm.quitSync:
+			errc <- p2p.DiscQuitting
+		}
+	}()
+	go func() {
+		select {
+		case pm2.newPeerCh <- peer2:
+			errc2 <- pm2.handle(peer2)
+		case <-pm2.quitSync:
+			errc2 <- p2p.DiscQuitting
+		}
+	}()
+	return peer, errc, peer2, errc2
+}
+
+// handshake simulates a trivial handshake that expects the same state from the
+// remote side as we are simulating locally.
+func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash) {
+	var expList keyValueList
+	expList = expList.add("protocolVersion", uint64(p.version))
+	expList = expList.add("networkId", uint64(NetworkId))
+	expList = expList.add("headTd", td)
+	expList = expList.add("headHash", head)
+	expList = expList.add("headNum", headNum)
+	expList = expList.add("genesisHash", genesis)
+	sendList := make(keyValueList, len(expList))
+	copy(sendList, expList)
+	expList = expList.add("serveHeaders", nil)
+	expList = expList.add("serveChainSince", uint64(0))
+	expList = expList.add("serveStateSince", uint64(0))
+	expList = expList.add("txRelay", nil)
+	expList = expList.add("flowControl/BL", testBufLimit)
+	expList = expList.add("flowControl/MRR", uint64(1))
+	expList = expList.add("flowControl/MRC", testRCL())
+
+	if err := p2p.ExpectMsg(p.app, StatusMsg, expList); err != nil {
+		t.Fatalf("status recv: %v", err)
+	}
+	if err := p2p.Send(p.app, StatusMsg, sendList); err != nil {
+		t.Fatalf("status send: %v", err)
+	}
+
+	p.fcServerParams = &flowcontrol.ServerParams{
+		BufLimit:    testBufLimit,
+		MinRecharge: 1,
+	}
+}
+
+// close terminates the local side of the peer, notifying the remote protocol
+// manager of termination.
+func (p *testPeer) close() {
+	p.app.Close()
+}
diff --git a/les/metrics.go b/les/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa0796790e1db8280b64c2ad9d93d4c7950cee34
--- /dev/null
+++ b/les/metrics.go
@@ -0,0 +1,111 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"github.com/ethereum/go-ethereum/metrics"
+	"github.com/ethereum/go-ethereum/p2p"
+)
+
+var (
+	/*	propTxnInPacketsMeter     = metrics.NewMeter("eth/prop/txns/in/packets")
+		propTxnInTrafficMeter     = metrics.NewMeter("eth/prop/txns/in/traffic")
+		propTxnOutPacketsMeter    = metrics.NewMeter("eth/prop/txns/out/packets")
+		propTxnOutTrafficMeter    = metrics.NewMeter("eth/prop/txns/out/traffic")
+		propHashInPacketsMeter    = metrics.NewMeter("eth/prop/hashes/in/packets")
+		propHashInTrafficMeter    = metrics.NewMeter("eth/prop/hashes/in/traffic")
+		propHashOutPacketsMeter   = metrics.NewMeter("eth/prop/hashes/out/packets")
+		propHashOutTrafficMeter   = metrics.NewMeter("eth/prop/hashes/out/traffic")
+		propBlockInPacketsMeter   = metrics.NewMeter("eth/prop/blocks/in/packets")
+		propBlockInTrafficMeter   = metrics.NewMeter("eth/prop/blocks/in/traffic")
+		propBlockOutPacketsMeter  = metrics.NewMeter("eth/prop/blocks/out/packets")
+		propBlockOutTrafficMeter  = metrics.NewMeter("eth/prop/blocks/out/traffic")
+		reqHashInPacketsMeter     = metrics.NewMeter("eth/req/hashes/in/packets")
+		reqHashInTrafficMeter     = metrics.NewMeter("eth/req/hashes/in/traffic")
+		reqHashOutPacketsMeter    = metrics.NewMeter("eth/req/hashes/out/packets")
+		reqHashOutTrafficMeter    = metrics.NewMeter("eth/req/hashes/out/traffic")
+		reqBlockInPacketsMeter    = metrics.NewMeter("eth/req/blocks/in/packets")
+		reqBlockInTrafficMeter    = metrics.NewMeter("eth/req/blocks/in/traffic")
+		reqBlockOutPacketsMeter   = metrics.NewMeter("eth/req/blocks/out/packets")
+		reqBlockOutTrafficMeter   = metrics.NewMeter("eth/req/blocks/out/traffic")
+		reqHeaderInPacketsMeter   = metrics.NewMeter("eth/req/headers/in/packets")
+		reqHeaderInTrafficMeter   = metrics.NewMeter("eth/req/headers/in/traffic")
+		reqHeaderOutPacketsMeter  = metrics.NewMeter("eth/req/headers/out/packets")
+		reqHeaderOutTrafficMeter  = metrics.NewMeter("eth/req/headers/out/traffic")
+		reqBodyInPacketsMeter     = metrics.NewMeter("eth/req/bodies/in/packets")
+		reqBodyInTrafficMeter     = metrics.NewMeter("eth/req/bodies/in/traffic")
+		reqBodyOutPacketsMeter    = metrics.NewMeter("eth/req/bodies/out/packets")
+		reqBodyOutTrafficMeter    = metrics.NewMeter("eth/req/bodies/out/traffic")
+		reqStateInPacketsMeter    = metrics.NewMeter("eth/req/states/in/packets")
+		reqStateInTrafficMeter    = metrics.NewMeter("eth/req/states/in/traffic")
+		reqStateOutPacketsMeter   = metrics.NewMeter("eth/req/states/out/packets")
+		reqStateOutTrafficMeter   = metrics.NewMeter("eth/req/states/out/traffic")
+		reqReceiptInPacketsMeter  = metrics.NewMeter("eth/req/receipts/in/packets")
+		reqReceiptInTrafficMeter  = metrics.NewMeter("eth/req/receipts/in/traffic")
+		reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
+		reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")*/
+	miscInPacketsMeter  = metrics.NewMeter("les/misc/in/packets")
+	miscInTrafficMeter  = metrics.NewMeter("les/misc/in/traffic")
+	miscOutPacketsMeter = metrics.NewMeter("les/misc/out/packets")
+	miscOutTrafficMeter = metrics.NewMeter("les/misc/out/traffic")
+)
+
+// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
+// accumulating the above defined metrics based on the data stream contents.
+type meteredMsgReadWriter struct {
+	p2p.MsgReadWriter     // Wrapped message stream to meter
+	version           int // Protocol version to select correct meters
+}
+
+// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
+// metrics system is disabled, this fucntion returns the original object.
+func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
+	if !metrics.Enabled {
+		return rw
+	}
+	return &meteredMsgReadWriter{MsgReadWriter: rw}
+}
+
+// Init sets the protocol version used by the stream to know which meters to
+// increment in case of overlapping message ids between protocol versions.
+func (rw *meteredMsgReadWriter) Init(version int) {
+	rw.version = version
+}
+
+func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
+	// Read the message and short circuit in case of an error
+	msg, err := rw.MsgReadWriter.ReadMsg()
+	if err != nil {
+		return msg, err
+	}
+	// Account for the data traffic
+	packets, traffic := miscInPacketsMeter, miscInTrafficMeter
+	packets.Mark(1)
+	traffic.Mark(int64(msg.Size))
+
+	return msg, err
+}
+
+func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
+	// Account for the data traffic
+	packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
+	packets.Mark(1)
+	traffic.Mark(int64(msg.Size))
+
+	// Send the packet to the p2p layer
+	return rw.MsgReadWriter.WriteMsg(msg)
+}
diff --git a/les/odr.go b/les/odr.go
new file mode 100644
index 0000000000000000000000000000000000000000..444b1da2a2c371fd94f533425d2221551dc41957
--- /dev/null
+++ b/les/odr.go
@@ -0,0 +1,248 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"sync"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common/mclock"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/light"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"golang.org/x/net/context"
+)
+
+var (
+	softRequestTimeout = time.Millisecond * 500
+	hardRequestTimeout = time.Second * 10
+	retryPeers         = time.Second * 1
+)
+
+// peerDropFn is a callback type for dropping a peer detected as malicious.
+type peerDropFn func(id string)
+
+type LesOdr struct {
+	light.OdrBackend
+	db           ethdb.Database
+	stop         chan struct{}
+	removePeer   peerDropFn
+	mlock, clock sync.Mutex
+	sentReqs     map[uint64]*sentReq
+	peers        *odrPeerSet
+	lastReqID    uint64
+}
+
+func NewLesOdr(db ethdb.Database) *LesOdr {
+	return &LesOdr{
+		db:       db,
+		stop:     make(chan struct{}),
+		peers:    newOdrPeerSet(),
+		sentReqs: make(map[uint64]*sentReq),
+	}
+}
+
+func (odr *LesOdr) Stop() {
+	close(odr.stop)
+}
+
+func (odr *LesOdr) Database() ethdb.Database {
+	return odr.db
+}
+
+// validatorFunc is a function that processes a message and returns true if
+// it was a meaningful answer to a given request
+type validatorFunc func(ethdb.Database, *Msg) bool
+
+// sentReq is a request waiting for an answer that satisfies its valFunc
+type sentReq struct {
+	valFunc  validatorFunc
+	sentTo   map[*peer]chan struct{}
+	lock     sync.RWMutex  // protects acces to sentTo
+	answered chan struct{} // closed and set to nil when any peer answers it
+}
+
+// RegisterPeer registers a new LES peer to the ODR capable peer set
+func (self *LesOdr) RegisterPeer(p *peer) error {
+	return self.peers.register(p)
+}
+
+// UnregisterPeer removes a peer from the ODR capable peer set
+func (self *LesOdr) UnregisterPeer(p *peer) {
+	self.peers.unregister(p)
+}
+
+const (
+	MsgBlockBodies = iota
+	MsgCode
+	MsgReceipts
+	MsgProofs
+	MsgHeaderProofs
+)
+
+// Msg encodes a LES message that delivers reply data for a request
+type Msg struct {
+	MsgType int
+	ReqID   uint64
+	Obj     interface{}
+}
+
+// Deliver is called by the LES protocol manager to deliver ODR reply messages to waiting requests
+func (self *LesOdr) Deliver(peer *peer, msg *Msg) error {
+	var delivered chan struct{}
+	self.mlock.Lock()
+	req, ok := self.sentReqs[msg.ReqID]
+	self.mlock.Unlock()
+	if ok {
+		req.lock.Lock()
+		delivered, ok = req.sentTo[peer]
+		req.lock.Unlock()
+	}
+
+	if !ok {
+		return errResp(ErrUnexpectedResponse, "reqID = %v", msg.ReqID)
+	}
+
+	if req.valFunc(self.db, msg) {
+		close(delivered)
+		req.lock.Lock()
+		if req.answered != nil {
+			close(req.answered)
+			req.answered = nil
+		}
+		req.lock.Unlock()
+		return nil
+	}
+	return errResp(ErrInvalidResponse, "reqID = %v", msg.ReqID)
+}
+
+func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout chan struct{}, reqWg *sync.WaitGroup) {
+	stime := mclock.Now()
+	defer func() {
+		req.lock.Lock()
+		delete(req.sentTo, peer)
+		req.lock.Unlock()
+		reqWg.Done()
+	}()
+
+	select {
+	case <-delivered:
+		servTime := uint64(mclock.Now() - stime)
+		self.peers.updateTimeout(peer, false)
+		self.peers.updateServTime(peer, servTime)
+		return
+	case <-time.After(softRequestTimeout):
+		close(timeout)
+		if self.peers.updateTimeout(peer, true) {
+			self.removePeer(peer.id)
+		}
+	case <-self.stop:
+		return
+	}
+
+	select {
+	case <-delivered:
+		servTime := uint64(mclock.Now() - stime)
+		self.peers.updateServTime(peer, servTime)
+		return
+	case <-time.After(hardRequestTimeout):
+		self.removePeer(peer.id)
+	case <-self.stop:
+		return
+	}
+}
+
+// networkRequest sends a request to known peers until an answer is received
+// or the context is cancelled
+func (self *LesOdr) networkRequest(ctx context.Context, lreq LesOdrRequest) error {
+	answered := make(chan struct{})
+	req := &sentReq{
+		valFunc:  lreq.Valid,
+		sentTo:   make(map[*peer]chan struct{}),
+		answered: answered, // reply delivered by any peer
+	}
+	reqID := self.getNextReqID()
+	self.mlock.Lock()
+	self.sentReqs[reqID] = req
+	self.mlock.Unlock()
+
+	reqWg := new(sync.WaitGroup)
+	reqWg.Add(1)
+	defer reqWg.Done()
+	go func() {
+		reqWg.Wait()
+		self.mlock.Lock()
+		delete(self.sentReqs, reqID)
+		self.mlock.Unlock()
+	}()
+
+	exclude := make(map[*peer]struct{})
+	for {
+		if peer := self.peers.bestPeer(lreq, exclude); peer == nil {
+			select {
+			case <-ctx.Done():
+				return ctx.Err()
+			case <-req.answered:
+				return nil
+			case <-time.After(retryPeers):
+			}
+		} else {
+			exclude[peer] = struct{}{}
+			delivered := make(chan struct{})
+			timeout := make(chan struct{})
+			req.lock.Lock()
+			req.sentTo[peer] = delivered
+			req.lock.Unlock()
+			reqWg.Add(1)
+			cost := lreq.GetCost(peer)
+			peer.fcServer.SendRequest(reqID, cost)
+			go self.requestPeer(req, peer, delivered, timeout, reqWg)
+			lreq.Request(reqID, peer)
+
+			select {
+			case <-ctx.Done():
+				return ctx.Err()
+			case <-answered:
+				return nil
+			case <-timeout:
+			}
+		}
+	}
+}
+
+// Retrieve tries to fetch an object from the local db, then from the LES network.
+// If the network retrieval was successful, it stores the object in local db.
+func (self *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err error) {
+	lreq := LesRequest(req)
+	err = self.networkRequest(ctx, lreq)
+	if err == nil {
+		// retrieved from network, store in db
+		req.StoreResult(self.db)
+	} else {
+		glog.V(logger.Debug).Infof("networkRequest  err = %v", err)
+	}
+	return
+}
+
+func (self *LesOdr) getNextReqID() uint64 {
+	self.clock.Lock()
+	defer self.clock.Unlock()
+
+	self.lastReqID++
+	return self.lastReqID
+}
diff --git a/les/odr_peerset.go b/les/odr_peerset.go
new file mode 100644
index 0000000000000000000000000000000000000000..e9b7eec7ff0af95a14efdc22404cae80ad5daf4f
--- /dev/null
+++ b/les/odr_peerset.go
@@ -0,0 +1,120 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"sync"
+)
+
+const dropTimeoutRatio = 20
+
+type odrPeerInfo struct {
+	reqTimeSum, reqTimeCnt, reqCnt, timeoutCnt uint64
+}
+
+// odrPeerSet represents the collection of active peer participating in the block
+// download procedure.
+type odrPeerSet struct {
+	peers map[*peer]*odrPeerInfo
+	lock  sync.RWMutex
+}
+
+// newPeerSet creates a new peer set top track the active download sources.
+func newOdrPeerSet() *odrPeerSet {
+	return &odrPeerSet{
+		peers: make(map[*peer]*odrPeerInfo),
+	}
+}
+
+// Register injects a new peer into the working set, or returns an error if the
+// peer is already known.
+func (ps *odrPeerSet) register(p *peer) error {
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	if _, ok := ps.peers[p]; ok {
+		return errAlreadyRegistered
+	}
+	ps.peers[p] = &odrPeerInfo{}
+	return nil
+}
+
+// Unregister removes a remote peer from the active set, disabling any further
+// actions to/from that particular entity.
+func (ps *odrPeerSet) unregister(p *peer) error {
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	if _, ok := ps.peers[p]; !ok {
+		return errNotRegistered
+	}
+	delete(ps.peers, p)
+	return nil
+}
+
+func (ps *odrPeerSet) peerPriority(p *peer, info *odrPeerInfo, req LesOdrRequest) uint64 {
+	tm := p.fcServer.CanSend(req.GetCost(p))
+	if info.reqTimeCnt > 0 {
+		tm += info.reqTimeSum / info.reqTimeCnt
+	}
+	return tm
+}
+
+func (ps *odrPeerSet) bestPeer(req LesOdrRequest, exclude map[*peer]struct{}) *peer {
+	var best *peer
+	var bpv uint64
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	for p, info := range ps.peers {
+		if _, ok := exclude[p]; !ok {
+			pv := ps.peerPriority(p, info, req)
+			if best == nil || pv < bpv {
+				best = p
+				bpv = pv
+			}
+		}
+	}
+	return best
+}
+
+func (ps *odrPeerSet) updateTimeout(p *peer, timeout bool) (drop bool) {
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	if info, ok := ps.peers[p]; ok {
+		info.reqCnt++
+		if timeout {
+			// check ratio before increase to allow an extra timeout
+			if info.timeoutCnt*dropTimeoutRatio >= info.reqCnt {
+				return true
+			}
+			info.timeoutCnt++
+		}
+	}
+	return false
+}
+
+func (ps *odrPeerSet) updateServTime(p *peer, servTime uint64) {
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	if info, ok := ps.peers[p]; ok {
+		info.reqTimeSum += servTime
+		info.reqTimeCnt++
+	}
+}
diff --git a/les/odr_requests.go b/les/odr_requests.go
new file mode 100644
index 0000000000000000000000000000000000000000..f4bd5188866bf02e2ee6ffb6aaf512bbd541b7aa
--- /dev/null
+++ b/les/odr_requests.go
@@ -0,0 +1,325 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package light implements on-demand retrieval capable state and chain objects
+// for the Ethereum Light Client.
+package les
+
+import (
+	"bytes"
+	"encoding/binary"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/light"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/trie"
+)
+
+type LesOdrRequest interface {
+	GetCost(*peer) uint64
+	Request(uint64, *peer) error
+	Valid(ethdb.Database, *Msg) bool // if true, keeps the retrieved object
+}
+
+func LesRequest(req light.OdrRequest) LesOdrRequest {
+	switch r := req.(type) {
+	case *light.BlockRequest:
+		return (*BlockRequest)(r)
+	case *light.ReceiptsRequest:
+		return (*ReceiptsRequest)(r)
+	case *light.TrieRequest:
+		return (*TrieRequest)(r)
+	case *light.CodeRequest:
+		return (*CodeRequest)(r)
+	case *light.ChtRequest:
+		return (*ChtRequest)(r)
+	default:
+		return nil
+	}
+}
+
+// BlockRequest is the ODR request type for block bodies
+type BlockRequest light.BlockRequest
+
+// GetCost returns the cost of the given ODR request according to the serving
+// peer's cost table (implementation of LesOdrRequest)
+func (self *BlockRequest) GetCost(peer *peer) uint64 {
+	return peer.GetRequestCost(GetBlockBodiesMsg, 1)
+}
+
+// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
+func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
+	glog.V(logger.Debug).Infof("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id)
+	return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash})
+}
+
+// Valid processes an ODR request reply message from the LES network
+// returns true and stores results in memory if the message was a valid reply
+// to the request (implementation of LesOdrRequest)
+func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool {
+	glog.V(logger.Debug).Infof("ODR: validating body of block %08x", self.Hash[:4])
+	if msg.MsgType != MsgBlockBodies {
+		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		return false
+	}
+	bodies := msg.Obj.([]*types.Body)
+	if len(bodies) != 1 {
+		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(bodies))
+		return false
+	}
+	body := bodies[0]
+	header := core.GetHeader(db, self.Hash, self.Number)
+	if header == nil {
+		glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
+		return false
+	}
+	txHash := types.DeriveSha(types.Transactions(body.Transactions))
+	if header.TxHash != txHash {
+		glog.V(logger.Debug).Infof("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4])
+		return false
+	}
+	uncleHash := types.CalcUncleHash(body.Uncles)
+	if header.UncleHash != uncleHash {
+		glog.V(logger.Debug).Infof("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4])
+		return false
+	}
+	data, err := rlp.EncodeToBytes(body)
+	if err != nil {
+		glog.V(logger.Debug).Infof("ODR: body RLP encode error: %v", err)
+		return false
+	}
+	self.Rlp = data
+	glog.V(logger.Debug).Infof("ODR: validation successful")
+	return true
+}
+
+// ReceiptsRequest is the ODR request type for block receipts by block hash
+type ReceiptsRequest light.ReceiptsRequest
+
+// GetCost returns the cost of the given ODR request according to the serving
+// peer's cost table (implementation of LesOdrRequest)
+func (self *ReceiptsRequest) GetCost(peer *peer) uint64 {
+	return peer.GetRequestCost(GetReceiptsMsg, 1)
+}
+
+// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
+func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
+	glog.V(logger.Debug).Infof("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id)
+	return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash})
+}
+
+// Valid processes an ODR request reply message from the LES network
+// returns true and stores results in memory if the message was a valid reply
+// to the request (implementation of LesOdrRequest)
+func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
+	glog.V(logger.Debug).Infof("ODR: validating receipts for block %08x", self.Hash[:4])
+	if msg.MsgType != MsgReceipts {
+		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		return false
+	}
+	receipts := msg.Obj.([]types.Receipts)
+	if len(receipts) != 1 {
+		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(receipts))
+		return false
+	}
+	hash := types.DeriveSha(receipts[0])
+	header := core.GetHeader(db, self.Hash, self.Number)
+	if header == nil {
+		glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
+		return false
+	}
+	if !bytes.Equal(header.ReceiptHash[:], hash[:]) {
+		glog.V(logger.Debug).Infof("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4])
+		return false
+	}
+	self.Receipts = receipts[0]
+	glog.V(logger.Debug).Infof("ODR: validation successful")
+	return true
+}
+
+type ProofReq struct {
+	BHash       common.Hash
+	AccKey, Key []byte
+	FromLevel   uint
+}
+
+// ODR request type for state/storage trie entries, see LesOdrRequest interface
+type TrieRequest light.TrieRequest
+
+// GetCost returns the cost of the given ODR request according to the serving
+// peer's cost table (implementation of LesOdrRequest)
+func (self *TrieRequest) GetCost(peer *peer) uint64 {
+	return peer.GetRequestCost(GetProofsMsg, 1)
+}
+
+// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
+func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
+	glog.V(logger.Debug).Infof("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id)
+	req := &ProofReq{
+		BHash:  self.Id.BlockHash,
+		AccKey: self.Id.AccKey,
+		Key:    self.Key,
+	}
+	return peer.RequestProofs(reqID, self.GetCost(peer), []*ProofReq{req})
+}
+
+// Valid processes an ODR request reply message from the LES network
+// returns true and stores results in memory if the message was a valid reply
+// to the request (implementation of LesOdrRequest)
+func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool {
+	glog.V(logger.Debug).Infof("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4])
+
+	if msg.MsgType != MsgProofs {
+		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		return false
+	}
+	proofs := msg.Obj.([][]rlp.RawValue)
+	if len(proofs) != 1 {
+		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs))
+		return false
+	}
+	_, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0])
+	if err != nil {
+		glog.V(logger.Debug).Infof("ODR: merkle proof verification error: %v", err)
+		return false
+	}
+	self.Proof = proofs[0]
+	glog.V(logger.Debug).Infof("ODR: validation successful")
+	return true
+}
+
+type CodeReq struct {
+	BHash  common.Hash
+	AccKey []byte
+}
+
+// ODR request type for node data (used for retrieving contract code), see LesOdrRequest interface
+type CodeRequest light.CodeRequest
+
+// GetCost returns the cost of the given ODR request according to the serving
+// peer's cost table (implementation of LesOdrRequest)
+func (self *CodeRequest) GetCost(peer *peer) uint64 {
+	return peer.GetRequestCost(GetCodeMsg, 1)
+}
+
+// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
+func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
+	glog.V(logger.Debug).Infof("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id)
+	req := &CodeReq{
+		BHash:  self.Id.BlockHash,
+		AccKey: self.Id.AccKey,
+	}
+	return peer.RequestCode(reqID, self.GetCost(peer), []*CodeReq{req})
+}
+
+// Valid processes an ODR request reply message from the LES network
+// returns true and stores results in memory if the message was a valid reply
+// to the request (implementation of LesOdrRequest)
+func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool {
+	glog.V(logger.Debug).Infof("ODR: validating node data for hash %08x", self.Hash[:4])
+	if msg.MsgType != MsgCode {
+		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		return false
+	}
+	reply := msg.Obj.([][]byte)
+	if len(reply) != 1 {
+		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(reply))
+		return false
+	}
+	data := reply[0]
+	hash := crypto.Sha3Hash(data)
+	if !bytes.Equal(self.Hash[:], hash[:]) {
+		glog.V(logger.Debug).Infof("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4])
+		return false
+	}
+	self.Data = data
+	glog.V(logger.Debug).Infof("ODR: validation successful")
+	return true
+}
+
+type ChtReq struct {
+	ChtNum, BlockNum, FromLevel uint64
+}
+
+type ChtResp struct {
+	Header *types.Header
+	Proof  []rlp.RawValue
+}
+
+// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
+type ChtRequest light.ChtRequest
+
+// GetCost returns the cost of the given ODR request according to the serving
+// peer's cost table (implementation of LesOdrRequest)
+func (self *ChtRequest) GetCost(peer *peer) uint64 {
+	return peer.GetRequestCost(GetHeaderProofsMsg, 1)
+}
+
+// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
+func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
+	glog.V(logger.Debug).Infof("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id)
+	req := &ChtReq{
+		ChtNum:   self.ChtNum,
+		BlockNum: self.BlockNum,
+	}
+	return peer.RequestHeaderProofs(reqID, self.GetCost(peer), []*ChtReq{req})
+}
+
+// Valid processes an ODR request reply message from the LES network
+// returns true and stores results in memory if the message was a valid reply
+// to the request (implementation of LesOdrRequest)
+func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
+	glog.V(logger.Debug).Infof("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum)
+
+	if msg.MsgType != MsgHeaderProofs {
+		glog.V(logger.Debug).Infof("ODR: invalid message type")
+		return false
+	}
+	proofs := msg.Obj.([]ChtResp)
+	if len(proofs) != 1 {
+		glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs))
+		return false
+	}
+	proof := proofs[0]
+	var encNumber [8]byte
+	binary.BigEndian.PutUint64(encNumber[:], self.BlockNum)
+	value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof)
+	if err != nil {
+		glog.V(logger.Debug).Infof("ODR: CHT merkle proof verification error: %v", err)
+		return false
+	}
+	var node light.ChtNode
+	if err := rlp.DecodeBytes(value, &node); err != nil {
+		glog.V(logger.Debug).Infof("ODR: error decoding CHT node: %v", err)
+		return false
+	}
+	if node.Hash != proof.Header.Hash() {
+		glog.V(logger.Debug).Infof("ODR: CHT header hash does not match")
+		return false
+	}
+
+	self.Proof = proof.Proof
+	self.Header = proof.Header
+	self.Td = node.Td
+	glog.V(logger.Debug).Infof("ODR: validation successful")
+	return true
+}
diff --git a/les/odr_test.go b/les/odr_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..3c8ea8c3fad87aacc6e5a7dc92c2ec706d829cec
--- /dev/null
+++ b/les/odr_test.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"bytes"
+	"math/big"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/state"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/light"
+	"github.com/ethereum/go-ethereum/rlp"
+	"golang.org/x/net/context"
+)
+
+type odrTestFn func(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte
+
+func TestOdrGetBlockLes1(t *testing.T) { testOdr(t, 1, 1, odrGetBlock) }
+
+func odrGetBlock(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
+	var block *types.Block
+	if bc != nil {
+		block = bc.GetBlockByHash(bhash)
+	} else {
+		block, _ = lc.GetBlockByHash(ctx, bhash)
+	}
+	if block == nil {
+		return nil
+	}
+	rlp, _ := rlp.EncodeToBytes(block)
+	return rlp
+}
+
+func TestOdrGetReceiptsLes1(t *testing.T) { testOdr(t, 1, 1, odrGetReceipts) }
+
+func odrGetReceipts(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
+	var receipts types.Receipts
+	if bc != nil {
+		receipts = core.GetBlockReceipts(db, bhash, core.GetBlockNumber(db, bhash))
+	} else {
+		receipts, _ = light.GetBlockReceipts(ctx, lc.Odr(), bhash, core.GetBlockNumber(db, bhash))
+	}
+	if receipts == nil {
+		return nil
+	}
+	rlp, _ := rlp.EncodeToBytes(receipts)
+	return rlp
+}
+
+func TestOdrAccountsLes1(t *testing.T) { testOdr(t, 1, 1, odrAccounts) }
+
+func odrAccounts(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
+	dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
+	acc := []common.Address{testBankAddress, acc1Addr, acc2Addr, dummyAddr}
+
+	var res []byte
+	for _, addr := range acc {
+		if bc != nil {
+			header := bc.GetHeaderByHash(bhash)
+			st, err := state.New(header.Root, db)
+			if err == nil {
+				bal := st.GetBalance(addr)
+				rlp, _ := rlp.EncodeToBytes(bal)
+				res = append(res, rlp...)
+			}
+		} else {
+			header := lc.GetHeaderByHash(bhash)
+			st := light.NewLightState(light.StateTrieID(header), lc.Odr())
+			bal, err := st.GetBalance(ctx, addr)
+			if err == nil {
+				rlp, _ := rlp.EncodeToBytes(bal)
+				res = append(res, rlp...)
+			}
+		}
+	}
+
+	return res
+}
+
+func TestOdrContractCallLes1(t *testing.T) { testOdr(t, 1, 2, odrContractCall) }
+
+// fullcallmsg is the message type used for call transations.
+type fullcallmsg struct {
+	from          *state.StateObject
+	to            *common.Address
+	gas, gasPrice *big.Int
+	value         *big.Int
+	data          []byte
+}
+
+// accessor boilerplate to implement core.Message
+func (m fullcallmsg) From() (common.Address, error)         { return m.from.Address(), nil }
+func (m fullcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil }
+func (m fullcallmsg) Nonce() uint64                         { return 0 }
+func (m fullcallmsg) CheckNonce() bool                      { return false }
+func (m fullcallmsg) To() *common.Address                   { return m.to }
+func (m fullcallmsg) GasPrice() *big.Int                    { return m.gasPrice }
+func (m fullcallmsg) Gas() *big.Int                         { return m.gas }
+func (m fullcallmsg) Value() *big.Int                       { return m.value }
+func (m fullcallmsg) Data() []byte                          { return m.data }
+
+// callmsg is the message type used for call transations.
+type lightcallmsg struct {
+	from          *light.StateObject
+	to            *common.Address
+	gas, gasPrice *big.Int
+	value         *big.Int
+	data          []byte
+}
+
+// accessor boilerplate to implement core.Message
+func (m lightcallmsg) From() (common.Address, error)         { return m.from.Address(), nil }
+func (m lightcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil }
+func (m lightcallmsg) Nonce() uint64                         { return 0 }
+func (m lightcallmsg) CheckNonce() bool                      { return false }
+func (m lightcallmsg) To() *common.Address                   { return m.to }
+func (m lightcallmsg) GasPrice() *big.Int                    { return m.gasPrice }
+func (m lightcallmsg) Gas() *big.Int                         { return m.gas }
+func (m lightcallmsg) Value() *big.Int                       { return m.value }
+func (m lightcallmsg) Data() []byte                          { return m.data }
+
+func odrContractCall(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
+	data := common.Hex2Bytes("60CD26850000000000000000000000000000000000000000000000000000000000000000")
+
+	var res []byte
+	for i := 0; i < 3; i++ {
+		data[35] = byte(i)
+		if bc != nil {
+			header := bc.GetHeaderByHash(bhash)
+			statedb, err := state.New(header.Root, db)
+			if err == nil {
+				from := statedb.GetOrNewStateObject(testBankAddress)
+				from.SetBalance(common.MaxBig)
+
+				msg := fullcallmsg{
+					from:     from,
+					gas:      big.NewInt(100000),
+					gasPrice: big.NewInt(0),
+					value:    big.NewInt(0),
+					data:     data,
+					to:       &testContractAddr,
+				}
+
+				vmenv := core.NewEnv(statedb, config, bc, msg, header, config.VmConfig)
+				gp := new(core.GasPool).AddGas(common.MaxBig)
+				ret, _, _ := core.ApplyMessage(vmenv, msg, gp)
+				res = append(res, ret...)
+			}
+		} else {
+			header := lc.GetHeaderByHash(bhash)
+			state := light.NewLightState(light.StateTrieID(header), lc.Odr())
+			from, err := state.GetOrNewStateObject(ctx, testBankAddress)
+			if err == nil {
+				from.SetBalance(common.MaxBig)
+
+				msg := lightcallmsg{
+					from:     from,
+					gas:      big.NewInt(100000),
+					gasPrice: big.NewInt(0),
+					value:    big.NewInt(0),
+					data:     data,
+					to:       &testContractAddr,
+				}
+
+				vmenv := light.NewEnv(ctx, state, config, lc, msg, header, config.VmConfig)
+				gp := new(core.GasPool).AddGas(common.MaxBig)
+				ret, _, _ := core.ApplyMessage(vmenv, msg, gp)
+				if vmenv.Error() == nil {
+					res = append(res, ret...)
+				}
+			}
+		}
+	}
+	return res
+}
+
+func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
+	// Assemble the test environment
+	pm, db, odr := newTestProtocolManagerMust(t, false, 4, testChainGen)
+	lpm, ldb, odr := newTestProtocolManagerMust(t, true, 0, nil)
+	_, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm)
+	select {
+	case <-time.After(time.Millisecond * 100):
+	case err := <-err1:
+		t.Fatalf("peer 1 handshake error: %v", err)
+	case err := <-err2:
+		t.Fatalf("peer 1 handshake error: %v", err)
+	}
+
+	lpm.synchronise(lpeer)
+
+	test := func(expFail uint64) {
+		for i := uint64(0); i <= pm.blockchain.CurrentHeader().Number.Uint64(); i++ {
+			bhash := core.GetCanonicalHash(db, i)
+			b1 := fn(light.NoOdr, db, pm.chainConfig, pm.blockchain.(*core.BlockChain), nil, bhash)
+			ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond)
+			b2 := fn(ctx, ldb, lpm.chainConfig, nil, lpm.blockchain.(*light.LightChain), bhash)
+			eq := bytes.Equal(b1, b2)
+			exp := i < expFail
+			if exp && !eq {
+				t.Errorf("odr mismatch")
+			}
+			if !exp && eq {
+				t.Errorf("unexpected odr match")
+			}
+		}
+	}
+
+	// temporarily remove peer to test odr fails
+	odr.UnregisterPeer(lpeer)
+	// expect retrievals to fail (except genesis block) without a les peer
+	test(expFail)
+	odr.RegisterPeer(lpeer)
+	// expect all retrievals to pass
+	test(5)
+	odr.UnregisterPeer(lpeer)
+	// still expect all retrievals to pass, now data should be cached locally
+	test(5)
+}
diff --git a/les/peer.go b/les/peer.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d566d8992868f4b6585077dbdfa92e2d1897290
--- /dev/null
+++ b/les/peer.go
@@ -0,0 +1,584 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package les implements the Light Ethereum Subprotocol.
+package les
+
+import (
+	"errors"
+	"fmt"
+	"math/big"
+	"sync"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/les/flowcontrol"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/rlp"
+)
+
+var (
+	errClosed            = errors.New("peer set is closed")
+	errAlreadyRegistered = errors.New("peer is already registered")
+	errNotRegistered     = errors.New("peer is not registered")
+)
+
+const maxHeadInfoLen = 20
+
+type peer struct {
+	*p2p.Peer
+
+	rw p2p.MsgReadWriter
+
+	version int // Protocol version negotiated
+	network int // Network ID being on
+
+	id string
+
+	firstHeadInfo, headInfo *announceData
+	headInfoLen             int
+	lock                    sync.RWMutex
+
+	announceChn chan announceData
+
+	fcClient       *flowcontrol.ClientNode // nil if the peer is server only
+	fcServer       *flowcontrol.ServerNode // nil if the peer is client only
+	fcServerParams *flowcontrol.ServerParams
+	fcCosts        requestCostTable
+}
+
+func newPeer(version, network int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
+	id := p.ID()
+
+	return &peer{
+		Peer:        p,
+		rw:          rw,
+		version:     version,
+		network:     network,
+		id:          fmt.Sprintf("%x", id[:8]),
+		announceChn: make(chan announceData, 20),
+	}
+}
+
+// Info gathers and returns a collection of metadata known about a peer.
+func (p *peer) Info() *eth.PeerInfo {
+	return &eth.PeerInfo{
+		Version:    p.version,
+		Difficulty: p.Td(),
+		Head:       fmt.Sprintf("%x", p.Head()),
+	}
+}
+
+// Head retrieves a copy of the current head (most recent) hash of the peer.
+func (p *peer) Head() (hash common.Hash) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	copy(hash[:], p.headInfo.Hash[:])
+	return hash
+}
+
+func (p *peer) HeadAndTd() (hash common.Hash, td *big.Int) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	copy(hash[:], p.headInfo.Hash[:])
+	return hash, p.headInfo.Td
+}
+
+func (p *peer) headBlockInfo() blockInfo {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}
+}
+
+func (p *peer) addNotify(announce *announceData) bool {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	if announce.Td.Cmp(p.headInfo.Td) < 1 {
+		return false
+	}
+	if p.headInfoLen >= maxHeadInfoLen {
+		//return false
+		p.firstHeadInfo = p.firstHeadInfo.next
+		p.headInfoLen--
+	}
+	if announce.haveHeaders == 0 {
+		hh := p.headInfo.Number - announce.ReorgDepth
+		if p.headInfo.haveHeaders < hh {
+			hh = p.headInfo.haveHeaders
+		}
+		announce.haveHeaders = hh
+	}
+	p.headInfo.next = announce
+	p.headInfo = announce
+	p.headInfoLen++
+	return true
+}
+
+func (p *peer) gotHeader(hash common.Hash, number uint64, td *big.Int) bool {
+	h := p.firstHeadInfo
+	ptr := 0
+	for h != nil {
+		if h.Hash == hash {
+			if h.Number != number || h.Td.Cmp(td) != 0 {
+				return false
+			}
+			h.headKnown = true
+			h.haveHeaders = h.Number
+			p.firstHeadInfo = h
+			p.headInfoLen -= ptr
+			last := h
+			h = h.next
+			// propagate haveHeaders through the chain
+			for h != nil {
+				hh := last.Number - h.ReorgDepth
+				if last.haveHeaders < hh {
+					hh = last.haveHeaders
+				}
+				if hh > h.haveHeaders {
+					h.haveHeaders = hh
+				} else {
+					return true
+				}
+				last = h
+				h = h.next
+			}
+			return true
+		}
+		h = h.next
+		ptr++
+	}
+	return true
+}
+
+// Td retrieves the current total difficulty of a peer.
+func (p *peer) Td() *big.Int {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	return new(big.Int).Set(p.headInfo.Td)
+}
+
+func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error {
+	type req struct {
+		ReqID uint64
+		Data  interface{}
+	}
+	return p2p.Send(w, msgcode, req{reqID, data})
+}
+
+func sendResponse(w p2p.MsgWriter, msgcode, reqID, bv uint64, data interface{}) error {
+	type resp struct {
+		ReqID, BV uint64
+		Data      interface{}
+	}
+	return p2p.Send(w, msgcode, resp{reqID, bv, data})
+}
+
+func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
+	cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
+	if cost > p.fcServerParams.BufLimit {
+		cost = p.fcServerParams.BufLimit
+	}
+	return cost
+}
+
+// SendAnnounce announces the availability of a number of blocks through
+// a hash notification.
+func (p *peer) SendAnnounce(request announceData) error {
+	return p2p.Send(p.rw, AnnounceMsg, request)
+}
+
+// SendBlockHeaders sends a batch of block headers to the remote peer.
+func (p *peer) SendBlockHeaders(reqID, bv uint64, headers []*types.Header) error {
+	return sendResponse(p.rw, BlockHeadersMsg, reqID, bv, headers)
+}
+
+// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
+// an already RLP encoded format.
+func (p *peer) SendBlockBodiesRLP(reqID, bv uint64, bodies []rlp.RawValue) error {
+	return sendResponse(p.rw, BlockBodiesMsg, reqID, bv, bodies)
+}
+
+// SendCodeRLP sends a batch of arbitrary internal data, corresponding to the
+// hashes requested.
+func (p *peer) SendCode(reqID, bv uint64, data [][]byte) error {
+	return sendResponse(p.rw, CodeMsg, reqID, bv, data)
+}
+
+// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
+// ones requested from an already RLP encoded format.
+func (p *peer) SendReceiptsRLP(reqID, bv uint64, receipts []rlp.RawValue) error {
+	return sendResponse(p.rw, ReceiptsMsg, reqID, bv, receipts)
+}
+
+// SendProofs sends a batch of merkle proofs, corresponding to the ones requested.
+func (p *peer) SendProofs(reqID, bv uint64, proofs proofsData) error {
+	return sendResponse(p.rw, ProofsMsg, reqID, bv, proofs)
+}
+
+// SendHeaderProofs sends a batch of header proofs, corresponding to the ones requested.
+func (p *peer) SendHeaderProofs(reqID, bv uint64, proofs []ChtResp) error {
+	return sendResponse(p.rw, HeaderProofsMsg, reqID, bv, proofs)
+}
+
+// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
+// specified header query, based on the hash of an origin block.
+func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
+	glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse)
+	return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
+}
+
+// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
+// specified header query, based on the number of an origin block.
+func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
+	glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse)
+	return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
+}
+
+// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
+// specified.
+func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
+	glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes))
+	return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
+}
+
+// RequestCode fetches a batch of arbitrary data from a node's known state
+// data, corresponding to the specified hashes.
+func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error {
+	glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(reqs))
+	return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
+}
+
+// RequestReceipts fetches a batch of transaction receipts from a remote node.
+func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
+	glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes))
+	return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
+}
+
+// RequestProofs fetches a batch of merkle proofs from a remote node.
+func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error {
+	glog.V(logger.Debug).Infof("%v fetching %v proofs", p, len(reqs))
+	return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs)
+}
+
+// RequestHeaderProofs fetches a batch of header merkle proofs from a remote node.
+func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error {
+	glog.V(logger.Debug).Infof("%v fetching %v header proofs", p, len(reqs))
+	return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
+}
+
+func (p *peer) SendTxs(cost uint64, txs types.Transactions) error {
+	glog.V(logger.Debug).Infof("%v relaying %v txs", p, len(txs))
+	p.fcServer.SendRequest(0, cost)
+	return p2p.Send(p.rw, SendTxMsg, txs)
+}
+
+type keyValueEntry struct {
+	Key   string
+	Value rlp.RawValue
+}
+type keyValueList []keyValueEntry
+type keyValueMap map[string]rlp.RawValue
+
+func (l keyValueList) add(key string, val interface{}) keyValueList {
+	var entry keyValueEntry
+	entry.Key = key
+	if val == nil {
+		val = uint64(0)
+	}
+	enc, err := rlp.EncodeToBytes(val)
+	if err == nil {
+		entry.Value = enc
+	}
+	return append(l, entry)
+}
+
+func (l keyValueList) decode() keyValueMap {
+	m := make(keyValueMap)
+	for _, entry := range l {
+		m[entry.Key] = entry.Value
+	}
+	return m
+}
+
+func (m keyValueMap) get(key string, val interface{}) error {
+	enc, ok := m[key]
+	if !ok {
+		return errResp(ErrHandshakeMissingKey, "%s", key)
+	}
+	if val == nil {
+		return nil
+	}
+	return rlp.DecodeBytes(enc, val)
+}
+
+func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) {
+	// Send out own handshake in a new thread
+	errc := make(chan error, 1)
+	go func() {
+		errc <- p2p.Send(p.rw, StatusMsg, sendList)
+	}()
+	// In the mean time retrieve the remote status message
+	msg, err := p.rw.ReadMsg()
+	if err != nil {
+		return nil, err
+	}
+	if msg.Code != StatusMsg {
+		return nil, errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
+	}
+	if msg.Size > ProtocolMaxMsgSize {
+		return nil, errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
+	}
+	// Decode the handshake
+	var recvList keyValueList
+	if err := msg.Decode(&recvList); err != nil {
+		return nil, errResp(ErrDecode, "msg %v: %v", msg, err)
+	}
+	if err := <-errc; err != nil {
+		return nil, err
+	}
+	return recvList, nil
+}
+
+// Handshake executes the les protocol handshake, negotiating version number,
+// network IDs, difficulties, head and genesis blocks.
+func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	var send keyValueList
+	send = send.add("protocolVersion", uint64(p.version))
+	send = send.add("networkId", uint64(p.network))
+	send = send.add("headTd", td)
+	send = send.add("headHash", head)
+	send = send.add("headNum", headNum)
+	send = send.add("genesisHash", genesis)
+	if server != nil {
+		send = send.add("serveHeaders", nil)
+		send = send.add("serveChainSince", uint64(0))
+		send = send.add("serveStateSince", uint64(0))
+		send = send.add("txRelay", nil)
+		send = send.add("flowControl/BL", server.defParams.BufLimit)
+		send = send.add("flowControl/MRR", server.defParams.MinRecharge)
+		list := server.fcCostStats.getCurrentList()
+		send = send.add("flowControl/MRC", list)
+		p.fcCosts = list.decode()
+	}
+	recvList, err := p.sendReceiveHandshake(send)
+	if err != nil {
+		return err
+	}
+	recv := recvList.decode()
+
+	var rGenesis, rHash common.Hash
+	var rVersion, rNetwork, rNum uint64
+	var rTd *big.Int
+
+	if err := recv.get("protocolVersion", &rVersion); err != nil {
+		return err
+	}
+	if err := recv.get("networkId", &rNetwork); err != nil {
+		return err
+	}
+	if err := recv.get("headTd", &rTd); err != nil {
+		return err
+	}
+	if err := recv.get("headHash", &rHash); err != nil {
+		return err
+	}
+	if err := recv.get("headNum", &rNum); err != nil {
+		return err
+	}
+	if err := recv.get("genesisHash", &rGenesis); err != nil {
+		return err
+	}
+
+	if rGenesis != genesis {
+		return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis, genesis)
+	}
+	if int(rNetwork) != p.network {
+		return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network)
+	}
+	if int(rVersion) != p.version {
+		return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
+	}
+	if server != nil {
+		if recv.get("serveStateSince", nil) == nil {
+			return errResp(ErrUselessPeer, "wanted client, got server")
+		}
+		p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
+	} else {
+		if recv.get("serveChainSince", nil) != nil {
+			return errResp(ErrUselessPeer, "peer cannot serve chain")
+		}
+		if recv.get("serveStateSince", nil) != nil {
+			return errResp(ErrUselessPeer, "peer cannot serve state")
+		}
+		if recv.get("txRelay", nil) != nil {
+			return errResp(ErrUselessPeer, "peer cannot relay transactions")
+		}
+		params := &flowcontrol.ServerParams{}
+		if err := recv.get("flowControl/BL", &params.BufLimit); err != nil {
+			return err
+		}
+		if err := recv.get("flowControl/MRR", &params.MinRecharge); err != nil {
+			return err
+		}
+		var MRC RequestCostList
+		if err := recv.get("flowControl/MRC", &MRC); err != nil {
+			return err
+		}
+		p.fcServerParams = params
+		p.fcServer = flowcontrol.NewServerNode(params)
+		p.fcCosts = MRC.decode()
+	}
+
+	p.firstHeadInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
+	p.headInfo = p.firstHeadInfo
+	p.headInfoLen = 1
+	return nil
+}
+
+// String implements fmt.Stringer.
+func (p *peer) String() string {
+	return fmt.Sprintf("Peer %s [%s]", p.id,
+		fmt.Sprintf("les/%d", p.version),
+	)
+}
+
+// peerSet represents the collection of active peers currently participating in
+// the Light Ethereum sub-protocol.
+type peerSet struct {
+	peers  map[string]*peer
+	lock   sync.RWMutex
+	closed bool
+}
+
+// newPeerSet creates a new peer set to track the active participants.
+func newPeerSet() *peerSet {
+	return &peerSet{
+		peers: make(map[string]*peer),
+	}
+}
+
+// Register injects a new peer into the working set, or returns an error if the
+// peer is already known.
+func (ps *peerSet) Register(p *peer) error {
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	if ps.closed {
+		return errClosed
+	}
+	if _, ok := ps.peers[p.id]; ok {
+		return errAlreadyRegistered
+	}
+	ps.peers[p.id] = p
+	return nil
+}
+
+// Unregister removes a remote peer from the active set, disabling any further
+// actions to/from that particular entity.
+func (ps *peerSet) Unregister(id string) error {
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	if _, ok := ps.peers[id]; !ok {
+		return errNotRegistered
+	}
+	delete(ps.peers, id)
+	return nil
+}
+
+// AllPeerIDs returns a list of all registered peer IDs
+func (ps *peerSet) AllPeerIDs() []string {
+	ps.lock.RLock()
+	defer ps.lock.RUnlock()
+
+	res := make([]string, len(ps.peers))
+	idx := 0
+	for id, _ := range ps.peers {
+		res[idx] = id
+		idx++
+	}
+	return res
+}
+
+// Peer retrieves the registered peer with the given id.
+func (ps *peerSet) Peer(id string) *peer {
+	ps.lock.RLock()
+	defer ps.lock.RUnlock()
+
+	return ps.peers[id]
+}
+
+// Len returns if the current number of peers in the set.
+func (ps *peerSet) Len() int {
+	ps.lock.RLock()
+	defer ps.lock.RUnlock()
+
+	return len(ps.peers)
+}
+
+// BestPeer retrieves the known peer with the currently highest total difficulty.
+func (ps *peerSet) BestPeer() *peer {
+	ps.lock.RLock()
+	defer ps.lock.RUnlock()
+
+	var (
+		bestPeer *peer
+		bestTd   *big.Int
+	)
+	for _, p := range ps.peers {
+		if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 {
+			bestPeer, bestTd = p, td
+		}
+	}
+	return bestPeer
+}
+
+// AllPeers returns all peers in a list
+func (ps *peerSet) AllPeers() []*peer {
+	ps.lock.RLock()
+	defer ps.lock.RUnlock()
+
+	list := make([]*peer, len(ps.peers))
+	i := 0
+	for _, peer := range ps.peers {
+		list[i] = peer
+		i++
+	}
+	return list
+}
+
+// Close disconnects all peers.
+// No new peers can be registered after Close has returned.
+func (ps *peerSet) Close() {
+	ps.lock.Lock()
+	defer ps.lock.Unlock()
+
+	for _, p := range ps.peers {
+		p.Disconnect(p2p.DiscQuitting)
+	}
+	ps.closed = true
+}
diff --git a/les/protocol.go b/les/protocol.go
new file mode 100644
index 0000000000000000000000000000000000000000..46da2b8c8b595fd61f73bfd1bb7594ae4625815b
--- /dev/null
+++ b/les/protocol.go
@@ -0,0 +1,198 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package les implements the Light Ethereum Subprotocol.
+package les
+
+import (
+	"fmt"
+	"io"
+	"math/big"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/rlp"
+)
+
+// Constants to match up protocol versions and messages
+const (
+	lpv1 = 1
+)
+
+// Supported versions of the les protocol (first is primary).
+var ProtocolVersions = []uint{lpv1}
+
+// Number of implemented message corresponding to different protocol versions.
+var ProtocolLengths = []uint64{15}
+
+const (
+	NetworkId          = 1
+	ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
+)
+
+// les protocol message codes
+const (
+	// Protocol messages belonging to LPV1
+	StatusMsg          = 0x00
+	AnnounceMsg        = 0x01
+	GetBlockHeadersMsg = 0x02
+	BlockHeadersMsg    = 0x03
+	GetBlockBodiesMsg  = 0x04
+	BlockBodiesMsg     = 0x05
+	GetReceiptsMsg     = 0x06
+	ReceiptsMsg        = 0x07
+	GetProofsMsg       = 0x08
+	ProofsMsg          = 0x09
+	GetCodeMsg         = 0x0a
+	CodeMsg            = 0x0b
+	SendTxMsg          = 0x0c
+	GetHeaderProofsMsg = 0x0d
+	HeaderProofsMsg    = 0x0e
+)
+
+type errCode int
+
+const (
+	ErrMsgTooLarge = iota
+	ErrDecode
+	ErrInvalidMsgCode
+	ErrProtocolVersionMismatch
+	ErrNetworkIdMismatch
+	ErrGenesisBlockMismatch
+	ErrNoStatusMsg
+	ErrExtraStatusMsg
+	ErrSuspendedPeer
+	ErrUselessPeer
+	ErrRequestRejected
+	ErrUnexpectedResponse
+	ErrInvalidResponse
+	ErrTooManyTimeouts
+	ErrHandshakeMissingKey
+)
+
+func (e errCode) String() string {
+	return errorToString[int(e)]
+}
+
+// XXX change once legacy code is out
+var errorToString = map[int]string{
+	ErrMsgTooLarge:             "Message too long",
+	ErrDecode:                  "Invalid message",
+	ErrInvalidMsgCode:          "Invalid message code",
+	ErrProtocolVersionMismatch: "Protocol version mismatch",
+	ErrNetworkIdMismatch:       "NetworkId mismatch",
+	ErrGenesisBlockMismatch:    "Genesis block mismatch",
+	ErrNoStatusMsg:             "No status message",
+	ErrExtraStatusMsg:          "Extra status message",
+	ErrSuspendedPeer:           "Suspended peer",
+	ErrRequestRejected:         "Request rejected",
+	ErrUnexpectedResponse:      "Unexpected response",
+	ErrInvalidResponse:         "Invalid response",
+	ErrTooManyTimeouts:         "Too many request timeouts",
+	ErrHandshakeMissingKey:     "Key missing from handshake message",
+}
+
+type chainManager interface {
+	GetBlockHashesFromHash(hash common.Hash, amount uint64) (hashes []common.Hash)
+	GetBlock(hash common.Hash) (block *types.Block)
+	Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash)
+}
+
+// announceData is the network packet for the block announcements.
+type announceData struct {
+	Hash       common.Hash // Hash of one particular block being announced
+	Number     uint64      // Number of one particular block being announced
+	Td         *big.Int    // Total difficulty of one particular block being announced
+	ReorgDepth uint64
+	Update     keyValueList
+
+	haveHeaders uint64 // we have the headers of the remote peer's chain up to this number
+	headKnown   bool
+	requested   bool
+	next        *announceData
+}
+
+type blockInfo struct {
+	Hash   common.Hash // Hash of one particular block being announced
+	Number uint64      // Number of one particular block being announced
+	Td     *big.Int    // Total difficulty of one particular block being announced
+}
+
+// getBlockHashesData is the network packet for the hash based hash retrieval.
+type getBlockHashesData struct {
+	Hash   common.Hash
+	Amount uint64
+}
+
+// getBlockHeadersData represents a block header query.
+type getBlockHeadersData struct {
+	Origin  hashOrNumber // Block from which to retrieve headers
+	Amount  uint64       // Maximum number of headers to retrieve
+	Skip    uint64       // Blocks to skip between consecutive headers
+	Reverse bool         // Query direction (false = rising towards latest, true = falling towards genesis)
+}
+
+// hashOrNumber is a combined field for specifying an origin block.
+type hashOrNumber struct {
+	Hash   common.Hash // Block hash from which to retrieve headers (excludes Number)
+	Number uint64      // Block hash from which to retrieve headers (excludes Hash)
+}
+
+// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the
+// two contained union fields.
+func (hn *hashOrNumber) EncodeRLP(w io.Writer) error {
+	if hn.Hash == (common.Hash{}) {
+		return rlp.Encode(w, hn.Number)
+	}
+	if hn.Number != 0 {
+		return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number)
+	}
+	return rlp.Encode(w, hn.Hash)
+}
+
+// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents
+// into either a block hash or a block number.
+func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error {
+	_, size, _ := s.Kind()
+	origin, err := s.Raw()
+	if err == nil {
+		switch {
+		case size == 32:
+			err = rlp.DecodeBytes(origin, &hn.Hash)
+		case size <= 8:
+			err = rlp.DecodeBytes(origin, &hn.Number)
+		default:
+			err = fmt.Errorf("invalid input size %d for origin", size)
+		}
+	}
+	return err
+}
+
+// newBlockData is the network packet for the block propagation message.
+type newBlockData struct {
+	Block *types.Block
+	TD    *big.Int
+}
+
+// blockBodiesData is the network packet for block content distribution.
+type blockBodiesData []*types.Body
+
+// CodeData is the network response packet for a node data retrieval.
+type CodeData []struct {
+	Value []byte
+}
+
+type proofsData [][]rlp.RawValue
diff --git a/les/request_test.go b/les/request_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a6fbb06ce3da31712658e8877952cd6333724792
--- /dev/null
+++ b/les/request_test.go
@@ -0,0 +1,110 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/light"
+	"golang.org/x/net/context"
+)
+
+var testBankSecureTrieKey = secAddr(testBankAddress)
+
+func secAddr(addr common.Address) []byte {
+	return crypto.Keccak256(addr[:])
+}
+
+type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest
+
+func TestBlockAccessLes1(t *testing.T) { testAccess(t, 1, tfBlockAccess) }
+
+func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
+	return &light.BlockRequest{Hash: bhash, Number: number}
+}
+
+func TestReceiptsAccessLes1(t *testing.T) { testAccess(t, 1, tfReceiptsAccess) }
+
+func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
+	return &light.ReceiptsRequest{Hash: bhash, Number: number}
+}
+
+func TestTrieEntryAccessLes1(t *testing.T) { testAccess(t, 1, tfTrieEntryAccess) }
+
+func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
+	return &light.TrieRequest{Id: light.StateTrieID(core.GetHeader(db, bhash, core.GetBlockNumber(db, bhash))), Key: testBankSecureTrieKey}
+}
+
+func TestCodeAccessLes1(t *testing.T) { testAccess(t, 1, tfCodeAccess) }
+
+func tfCodeAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
+	header := core.GetHeader(db, bhash, core.GetBlockNumber(db, bhash))
+	if header.Number.Uint64() < testContractDeployed {
+		return nil
+	}
+	sti := light.StateTrieID(header)
+	ci := light.StorageTrieID(sti, testContractAddr, common.Hash{})
+	return &light.CodeRequest{Id: ci, Hash: crypto.Keccak256Hash(testContractCodeDeployed)}
+}
+
+func testAccess(t *testing.T, protocol int, fn accessTestFn) {
+	// Assemble the test environment
+	pm, db, _ := newTestProtocolManagerMust(t, false, 4, testChainGen)
+	lpm, ldb, odr := newTestProtocolManagerMust(t, true, 0, nil)
+	_, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm)
+	select {
+	case <-time.After(time.Millisecond * 100):
+	case err := <-err1:
+		t.Fatalf("peer 1 handshake error: %v", err)
+	case err := <-err2:
+		t.Fatalf("peer 1 handshake error: %v", err)
+	}
+
+	lpm.synchronise(lpeer)
+
+	test := func(expFail uint64) {
+		for i := uint64(0); i <= pm.blockchain.CurrentHeader().Number.Uint64(); i++ {
+			bhash := core.GetCanonicalHash(db, i)
+			if req := fn(ldb, bhash, i); req != nil {
+				ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond)
+				err := odr.Retrieve(ctx, req)
+				got := err == nil
+				exp := i < expFail
+				if exp && !got {
+					t.Errorf("object retrieval failed")
+				}
+				if !exp && got {
+					t.Errorf("unexpected object retrieval success")
+				}
+			}
+		}
+	}
+
+	// temporarily remove peer to test odr fails
+	odr.UnregisterPeer(lpeer)
+	// expect retrievals to fail (except genesis block) without a les peer
+	test(0)
+	odr.RegisterPeer(lpeer)
+	// expect all retrievals to pass
+	test(5)
+	odr.UnregisterPeer(lpeer)
+}
diff --git a/les/server.go b/les/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..daa28be05840f3a6349b36eb9d791ccaab3724b8
--- /dev/null
+++ b/les/server.go
@@ -0,0 +1,402 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package les implements the Light Ethereum Subprotocol.
+package les
+
+import (
+	"encoding/binary"
+	"fmt"
+	"math"
+	"sync"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/les/flowcontrol"
+	"github.com/ethereum/go-ethereum/light"
+	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/trie"
+)
+
+type LesServer struct {
+	protocolManager *ProtocolManager
+	fcManager       *flowcontrol.ClientManager // nil if our node is client only
+	fcCostStats     *requestCostStats
+	defParams       *flowcontrol.ServerParams
+}
+
+func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
+	pm, err := NewProtocolManager(config.ChainConfig, false, config.NetworkId, eth.EventMux(), eth.Pow(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	pm.blockLoop()
+
+	srv := &LesServer{protocolManager: pm}
+	pm.server = srv
+
+	srv.defParams = &flowcontrol.ServerParams{
+		BufLimit:    300000000,
+		MinRecharge: 50000,
+	}
+	srv.fcManager = flowcontrol.NewClientManager(uint64(config.LightServ), 10, 1000000000)
+	srv.fcCostStats = newCostStats(eth.ChainDb())
+	return srv, nil
+}
+
+func (s *LesServer) Protocols() []p2p.Protocol {
+	return s.protocolManager.SubProtocols
+}
+
+func (s *LesServer) Start(srvr *p2p.Server) {
+	s.protocolManager.Start(srvr)
+
+}
+
+func (s *LesServer) Stop() {
+	s.fcCostStats.store()
+	s.fcManager.Stop()
+	go func() {
+		<-s.protocolManager.noMorePeers
+	}()
+	s.protocolManager.Stop()
+}
+
+type requestCosts struct {
+	baseCost, reqCost uint64
+}
+
+type requestCostTable map[uint64]*requestCosts
+
+type RequestCostList []struct {
+	MsgCode, BaseCost, ReqCost uint64
+}
+
+func (list RequestCostList) decode() requestCostTable {
+	table := make(requestCostTable)
+	for _, e := range list {
+		table[e.MsgCode] = &requestCosts{
+			baseCost: e.BaseCost,
+			reqCost:  e.ReqCost,
+		}
+	}
+	return table
+}
+
+func (table requestCostTable) encode() RequestCostList {
+	list := make(RequestCostList, len(table))
+	for idx, code := range reqList {
+		list[idx].MsgCode = code
+		list[idx].BaseCost = table[code].baseCost
+		list[idx].ReqCost = table[code].reqCost
+	}
+	return list
+}
+
+type linReg struct {
+	sumX, sumY, sumXX, sumXY float64
+	cnt                      uint64
+}
+
+const linRegMaxCnt = 100000
+
+func (l *linReg) add(x, y float64) {
+	if l.cnt >= linRegMaxCnt {
+		sub := float64(l.cnt+1-linRegMaxCnt) / linRegMaxCnt
+		l.sumX -= l.sumX * sub
+		l.sumY -= l.sumY * sub
+		l.sumXX -= l.sumXX * sub
+		l.sumXY -= l.sumXY * sub
+		l.cnt = linRegMaxCnt - 1
+	}
+	l.cnt++
+	l.sumX += x
+	l.sumY += y
+	l.sumXX += x * x
+	l.sumXY += x * y
+}
+
+func (l *linReg) calc() (b, m float64) {
+	if l.cnt == 0 {
+		return 0, 0
+	}
+	cnt := float64(l.cnt)
+	d := cnt*l.sumXX - l.sumX*l.sumX
+	if d < 0.001 {
+		return l.sumY / cnt, 0
+	}
+	m = (cnt*l.sumXY - l.sumX*l.sumY) / d
+	b = (l.sumY / cnt) - (m * l.sumX / cnt)
+	return b, m
+}
+
+func (l *linReg) toBytes() []byte {
+	var arr [40]byte
+	binary.BigEndian.PutUint64(arr[0:8], math.Float64bits(l.sumX))
+	binary.BigEndian.PutUint64(arr[8:16], math.Float64bits(l.sumY))
+	binary.BigEndian.PutUint64(arr[16:24], math.Float64bits(l.sumXX))
+	binary.BigEndian.PutUint64(arr[24:32], math.Float64bits(l.sumXY))
+	binary.BigEndian.PutUint64(arr[32:40], l.cnt)
+	return arr[:]
+}
+
+func linRegFromBytes(data []byte) *linReg {
+	if len(data) != 40 {
+		return nil
+	}
+	l := &linReg{}
+	l.sumX = math.Float64frombits(binary.BigEndian.Uint64(data[0:8]))
+	l.sumY = math.Float64frombits(binary.BigEndian.Uint64(data[8:16]))
+	l.sumXX = math.Float64frombits(binary.BigEndian.Uint64(data[16:24]))
+	l.sumXY = math.Float64frombits(binary.BigEndian.Uint64(data[24:32]))
+	l.cnt = binary.BigEndian.Uint64(data[32:40])
+	return l
+}
+
+type requestCostStats struct {
+	lock  sync.RWMutex
+	db    ethdb.Database
+	stats map[uint64]*linReg
+}
+
+type requestCostStatsRlp []struct {
+	MsgCode uint64
+	Data    []byte
+}
+
+var rcStatsKey = []byte("_requestCostStats")
+
+func newCostStats(db ethdb.Database) *requestCostStats {
+	stats := make(map[uint64]*linReg)
+	for _, code := range reqList {
+		stats[code] = &linReg{cnt: 100}
+	}
+
+	if db != nil {
+		data, err := db.Get(rcStatsKey)
+		var statsRlp requestCostStatsRlp
+		if err == nil {
+			err = rlp.DecodeBytes(data, &statsRlp)
+		}
+		if err == nil {
+			for _, r := range statsRlp {
+				if stats[r.MsgCode] != nil {
+					if l := linRegFromBytes(r.Data); l != nil {
+						stats[r.MsgCode] = l
+					}
+				}
+			}
+		}
+	}
+
+	return &requestCostStats{
+		db:    db,
+		stats: stats,
+	}
+}
+
+func (s *requestCostStats) store() {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	statsRlp := make(requestCostStatsRlp, len(reqList))
+	for i, code := range reqList {
+		statsRlp[i].MsgCode = code
+		statsRlp[i].Data = s.stats[code].toBytes()
+	}
+
+	if data, err := rlp.EncodeToBytes(statsRlp); err == nil {
+		s.db.Put(rcStatsKey, data)
+	}
+}
+
+func (s *requestCostStats) getCurrentList() RequestCostList {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	list := make(RequestCostList, len(reqList))
+	//fmt.Println("RequestCostList")
+	for idx, code := range reqList {
+		b, m := s.stats[code].calc()
+		//fmt.Println(code, s.stats[code].cnt, b/1000000, m/1000000)
+		if m < 0 {
+			b += m
+			m = 0
+		}
+		if b < 0 {
+			b = 0
+		}
+
+		list[idx].MsgCode = code
+		list[idx].BaseCost = uint64(b * 2)
+		list[idx].ReqCost = uint64(m * 2)
+	}
+	return list
+}
+
+func (s *requestCostStats) update(msgCode, reqCnt, cost uint64) {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	c, ok := s.stats[msgCode]
+	if !ok || reqCnt == 0 {
+		return
+	}
+	c.add(float64(reqCnt), float64(cost))
+}
+
+func (pm *ProtocolManager) blockLoop() {
+	pm.wg.Add(1)
+	sub := pm.eventMux.Subscribe(core.ChainHeadEvent{})
+	newCht := make(chan struct{}, 10)
+	newCht <- struct{}{}
+	go func() {
+		var mu sync.Mutex
+		var lastHead *types.Header
+		lastBroadcastTd := common.Big0
+		for {
+			select {
+			case ev := <-sub.Chan():
+				peers := pm.peers.AllPeers()
+				if len(peers) > 0 {
+					header := ev.Data.(core.ChainHeadEvent).Block.Header()
+					hash := header.Hash()
+					number := header.Number.Uint64()
+					td := core.GetTd(pm.chainDb, hash, number)
+					if td != nil && td.Cmp(lastBroadcastTd) > 0 {
+						var reorg uint64
+						if lastHead != nil {
+							reorg = lastHead.Number.Uint64() - core.FindCommonAncestor(pm.chainDb, header, lastHead).Number.Uint64()
+						}
+						lastHead = header
+						lastBroadcastTd = td
+						//fmt.Println("BROADCAST", number, hash, td, reorg)
+						announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
+						for _, p := range peers {
+							select {
+							case p.announceChn <- announce:
+							default:
+								pm.removePeer(p.id)
+							}
+						}
+					}
+				}
+				newCht <- struct{}{}
+			case <-newCht:
+				go func() {
+					mu.Lock()
+					more := makeCht(pm.chainDb)
+					mu.Unlock()
+					if more {
+						time.Sleep(time.Millisecond * 10)
+						newCht <- struct{}{}
+					}
+				}()
+			case <-pm.quitSync:
+				sub.Unsubscribe()
+				pm.wg.Done()
+				return
+			}
+		}
+	}()
+}
+
+var (
+	lastChtKey       = []byte("LastChtNumber") // chtNum (uint64 big endian)
+	chtPrefix        = []byte("cht")           // chtPrefix + chtNum (uint64 big endian) -> trie root hash
+	chtConfirmations = light.ChtFrequency / 2
+)
+
+func getChtRoot(db ethdb.Database, num uint64) common.Hash {
+	var encNumber [8]byte
+	binary.BigEndian.PutUint64(encNumber[:], num)
+	data, _ := db.Get(append(chtPrefix, encNumber[:]...))
+	return common.BytesToHash(data)
+}
+
+func storeChtRoot(db ethdb.Database, num uint64, root common.Hash) {
+	var encNumber [8]byte
+	binary.BigEndian.PutUint64(encNumber[:], num)
+	db.Put(append(chtPrefix, encNumber[:]...), root[:])
+}
+
+func makeCht(db ethdb.Database) bool {
+	headHash := core.GetHeadBlockHash(db)
+	headNum := core.GetBlockNumber(db, headHash)
+
+	var newChtNum uint64
+	if headNum > chtConfirmations {
+		newChtNum = (headNum - chtConfirmations) / light.ChtFrequency
+	}
+
+	var lastChtNum uint64
+	data, _ := db.Get(lastChtKey)
+	if len(data) == 8 {
+		lastChtNum = binary.BigEndian.Uint64(data[:])
+	}
+	if newChtNum <= lastChtNum {
+		return false
+	}
+
+	var t *trie.Trie
+	if lastChtNum > 0 {
+		var err error
+		t, err = trie.New(getChtRoot(db, lastChtNum), db)
+		if err != nil {
+			lastChtNum = 0
+		}
+	}
+	if lastChtNum == 0 {
+		t, _ = trie.New(common.Hash{}, db)
+	}
+
+	for num := lastChtNum * light.ChtFrequency; num < (lastChtNum+1)*light.ChtFrequency; num++ {
+		hash := core.GetCanonicalHash(db, num)
+		if hash == (common.Hash{}) {
+			panic("Canonical hash not found")
+		}
+		td := core.GetTd(db, hash, num)
+		if td == nil {
+			panic("TD not found")
+		}
+		var encNumber [8]byte
+		binary.BigEndian.PutUint64(encNumber[:], num)
+		var node light.ChtNode
+		node.Hash = hash
+		node.Td = td
+		data, _ := rlp.EncodeToBytes(node)
+		t.Update(encNumber[:], data)
+	}
+
+	root, err := t.Commit()
+	if err != nil {
+		lastChtNum = 0
+	} else {
+		lastChtNum++
+		fmt.Printf("CHT %d %064x\n", lastChtNum, root)
+		storeChtRoot(db, lastChtNum, root)
+		var data [8]byte
+		binary.BigEndian.PutUint64(data[:], lastChtNum)
+		db.Put(lastChtKey, data[:])
+	}
+
+	return newChtNum > lastChtNum
+}
diff --git a/les/sync.go b/les/sync.go
new file mode 100644
index 0000000000000000000000000000000000000000..72c979c61fe44429b8a586d3a2f99ce32e3c771d
--- /dev/null
+++ b/les/sync.go
@@ -0,0 +1,84 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"time"
+
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/eth/downloader"
+	"github.com/ethereum/go-ethereum/light"
+	"golang.org/x/net/context"
+)
+
+const (
+	//forceSyncCycle      = 10 * time.Second // Time interval to force syncs, even if few peers are available
+	minDesiredPeerCount = 5 // Amount of peers desired to start syncing
+)
+
+// syncer is responsible for periodically synchronising with the network, both
+// downloading hashes and blocks as well as handling the announcement handler.
+func (pm *ProtocolManager) syncer() {
+	// Start and ensure cleanup of sync mechanisms
+	//pm.fetcher.Start()
+	//defer pm.fetcher.Stop()
+	defer pm.downloader.Terminate()
+
+	// Wait for different events to fire synchronisation operations
+	//forceSync := time.Tick(forceSyncCycle)
+	for {
+		select {
+		case <-pm.newPeerCh:
+/*			// Make sure we have peers to select from, then sync
+			if pm.peers.Len() < minDesiredPeerCount {
+				break
+			}
+			go pm.synchronise(pm.peers.BestPeer())
+*/
+		/*case <-forceSync:
+		// Force a sync even if not enough peers are present
+		go pm.synchronise(pm.peers.BestPeer())
+		*/
+		case <-pm.noMorePeers:
+			return
+		}
+	}
+}
+
+func (pm *ProtocolManager) needToSync(peerHead blockInfo) bool {
+	head := pm.blockchain.CurrentHeader()
+	currentTd := core.GetTd(pm.chainDb, head.Hash(), head.Number.Uint64())
+	return currentTd != nil && peerHead.Td.Cmp(currentTd) > 0
+}
+
+// synchronise tries to sync up our local block chain with a remote peer.
+func (pm *ProtocolManager) synchronise(peer *peer) {
+	// Short circuit if no peers are available
+	if peer == nil {
+		return
+	}
+
+	// Make sure the peer's TD is higher than our own.
+	if !pm.needToSync(peer.headBlockInfo()) {
+		return
+	}
+
+	ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
+	pm.blockchain.(*light.LightChain).SyncCht(ctx)
+
+	pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync)
+}
diff --git a/les/txrelay.go b/les/txrelay.go
new file mode 100644
index 0000000000000000000000000000000000000000..036158f5d252f98dbf8b40cc919d3ac95074aa90
--- /dev/null
+++ b/les/txrelay.go
@@ -0,0 +1,157 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package les
+
+import (
+	"sync"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+)
+
+type ltrInfo struct {
+	tx     *types.Transaction
+	sentTo map[*peer]struct{}
+}
+
+type LesTxRelay struct {
+	txSent       map[common.Hash]*ltrInfo
+	txPending    map[common.Hash]struct{}
+	ps           *peerSet
+	peerList     []*peer
+	peerStartPos int
+	lock         sync.RWMutex
+}
+
+func NewLesTxRelay() *LesTxRelay {
+	return &LesTxRelay{
+		txSent:    make(map[common.Hash]*ltrInfo),
+		txPending: make(map[common.Hash]struct{}),
+		ps:        newPeerSet(),
+	}
+}
+
+func (self *LesTxRelay) addPeer(p *peer) {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	self.ps.Register(p)
+	self.peerList = self.ps.AllPeers()
+}
+
+func (self *LesTxRelay) removePeer(id string) {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	self.ps.Unregister(id)
+	self.peerList = self.ps.AllPeers()
+}
+
+// send sends a list of transactions to at most a given number of peers at
+// once, never resending any particular transaction to the same peer twice
+func (self *LesTxRelay) send(txs types.Transactions, count int) {
+	sendTo := make(map[*peer]types.Transactions)
+
+	self.peerStartPos++ // rotate the starting position of the peer list
+	if self.peerStartPos >= len(self.peerList) {
+		self.peerStartPos = 0
+	}
+
+	for _, tx := range txs {
+		hash := tx.Hash()
+		ltr, ok := self.txSent[hash]
+		if !ok {
+			ltr = &ltrInfo{
+				tx:     tx,
+				sentTo: make(map[*peer]struct{}),
+			}
+			self.txSent[hash] = ltr
+			self.txPending[hash] = struct{}{}
+		}
+
+		if len(self.peerList) > 0 {
+			cnt := count
+			pos := self.peerStartPos
+			for {
+				peer := self.peerList[pos]
+				if _, ok := ltr.sentTo[peer]; !ok {
+					sendTo[peer] = append(sendTo[peer], tx)
+					ltr.sentTo[peer] = struct{}{}
+					cnt--
+				}
+				if cnt == 0 {
+					break // sent it to the desired number of peers
+				}
+				pos++
+				if pos == len(self.peerList) {
+					pos = 0
+				}
+				if pos == self.peerStartPos {
+					break // tried all available peers
+				}
+			}
+		}
+	}
+
+	for p, list := range sendTo {
+		cost := p.GetRequestCost(SendTxMsg, len(list))
+		go func(p *peer, list types.Transactions, cost uint64) {
+			p.fcServer.SendRequest(0, cost)
+			p.SendTxs(cost, list)
+		}(p, list, cost)
+	}
+}
+
+func (self *LesTxRelay) Send(txs types.Transactions) {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	self.send(txs, 3)
+}
+
+func (self *LesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	for _, hash := range mined {
+		delete(self.txPending, hash)
+	}
+
+	for _, hash := range rollback {
+		self.txPending[hash] = struct{}{}
+	}
+
+	if len(self.txPending) > 0 {
+		txs := make(types.Transactions, len(self.txPending))
+		i := 0
+		for hash, _ := range self.txPending {
+			txs[i] = self.txSent[hash].tx
+			i++
+		}
+		self.send(txs, 1)
+	}
+}
+
+func (self *LesTxRelay) Discard(hashes []common.Hash) {
+	self.lock.Lock()
+	defer self.lock.Unlock()
+
+	for _, hash := range hashes {
+		delete(self.txSent, hash)
+		delete(self.txPending, hash)
+	}
+}
diff --git a/light/lightchain.go b/light/lightchain.go
new file mode 100644
index 0000000000000000000000000000000000000000..461030369a02cf25cad80f7e4141c153f6444cce
--- /dev/null
+++ b/light/lightchain.go
@@ -0,0 +1,506 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package light
+
+import (
+	"math/big"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/pow"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/hashicorp/golang-lru"
+	"golang.org/x/net/context"
+)
+
+var (
+	bodyCacheLimit  = 256
+	blockCacheLimit = 256
+)
+
+// LightChain represents a canonical chain that by default only handles block
+// headers, downloading block bodies and receipts on demand through an ODR
+// interface. It only does header validation during chain insertion.
+type LightChain struct {
+	hc           *core.HeaderChain
+	chainDb      ethdb.Database
+	odr          OdrBackend
+	eventMux     *event.TypeMux
+	genesisBlock *types.Block
+
+	mu      sync.RWMutex
+	chainmu sync.RWMutex
+	procmu  sync.RWMutex
+
+	bodyCache    *lru.Cache // Cache for the most recent block bodies
+	bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
+	blockCache   *lru.Cache // Cache for the most recent entire blocks
+
+	quit    chan struct{}
+	running int32 // running must be called automically
+	// procInterrupt must be atomically called
+	procInterrupt int32 // interrupt signaler for block processing
+	wg            sync.WaitGroup
+
+	pow       pow.PoW
+	validator core.HeaderValidator
+}
+
+// NewLightChain returns a fully initialised light chain using information
+// available in the database. It initialises the default Ethereum header
+// validator.
+func NewLightChain(odr OdrBackend, config *core.ChainConfig, pow pow.PoW, mux *event.TypeMux) (*LightChain, error) {
+	bodyCache, _ := lru.New(bodyCacheLimit)
+	bodyRLPCache, _ := lru.New(bodyCacheLimit)
+	blockCache, _ := lru.New(blockCacheLimit)
+
+	bc := &LightChain{
+		chainDb:      odr.Database(),
+		odr:          odr,
+		eventMux:     mux,
+		quit:         make(chan struct{}),
+		bodyCache:    bodyCache,
+		bodyRLPCache: bodyRLPCache,
+		blockCache:   blockCache,
+		pow:          pow,
+	}
+
+	var err error
+	bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.Validator, bc.getProcInterrupt)
+	bc.SetValidator(core.NewHeaderValidator(config, bc.hc, pow))
+	if err != nil {
+		return nil, err
+	}
+
+	bc.genesisBlock, _ = bc.GetBlockByNumber(NoOdr, 0)
+	if bc.genesisBlock == nil {
+		bc.genesisBlock, err = core.WriteDefaultGenesisBlock(odr.Database())
+		if err != nil {
+			return nil, err
+		}
+		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
+	}
+
+	if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) {
+		// add trusted CHT
+		if config.DAOForkSupport {
+			WriteTrustedCht(bc.chainDb, TrustedCht{
+				Number: 612,
+				Root:   common.HexToHash("8c87a93e0ee531e2aca1b4460e4c201a60c19ffec4f5979262bf14ceeeff8471"),
+			})
+		} else {
+			WriteTrustedCht(bc.chainDb, TrustedCht{
+				Number: 523,
+				Root:   common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"),
+			})
+		}
+		glog.V(logger.Info).Infoln("Added trusted CHT for mainnet")
+	} else {
+		if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) {
+			// add trusted CHT for testnet
+			WriteTrustedCht(bc.chainDb, TrustedCht{
+				Number: 436,
+				Root:   common.HexToHash("97a12df5d04d72bde4b4b840e1018e4f08aee34b7d0bf2c5dbfc052b86fe7439"),
+			})
+			glog.V(logger.Info).Infoln("Added trusted CHT for testnet")
+		} else {
+			DeleteTrustedCht(bc.chainDb)
+		}
+	}
+
+	if err := bc.loadLastState(); err != nil {
+		return nil, err
+	}
+	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
+	for hash, _ := range core.BadHashes {
+		if header := bc.GetHeaderByHash(hash); header != nil {
+			glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
+			bc.SetHead(header.Number.Uint64() - 1)
+			glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
+		}
+	}
+	return bc, nil
+}
+
+func (self *LightChain) getProcInterrupt() bool {
+	return atomic.LoadInt32(&self.procInterrupt) == 1
+}
+
+// Odr returns the ODR backend of the chain
+func (self *LightChain) Odr() OdrBackend {
+	return self.odr
+}
+
+// loadLastState loads the last known chain state from the database. This method
+// assumes that the chain manager mutex is held.
+func (self *LightChain) loadLastState() error {
+	if head := core.GetHeadHeaderHash(self.chainDb); head == (common.Hash{}) {
+		// Corrupt or empty database, init from scratch
+		self.Reset()
+	} else {
+		if header := self.GetHeaderByHash(head); header != nil {
+			self.hc.SetCurrentHeader(header)
+		}
+	}
+
+	// Issue a status log and return
+	header := self.hc.CurrentHeader()
+	headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
+	glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd)
+
+	return nil
+}
+
+// SetHead rewinds the local chain to a new head. Everything above the new
+// head will be deleted and the new one set.
+func (bc *LightChain) SetHead(head uint64) {
+	bc.mu.Lock()
+	defer bc.mu.Unlock()
+
+	bc.hc.SetHead(head, nil)
+	bc.loadLastState()
+}
+
+// GasLimit returns the gas limit of the current HEAD block.
+func (self *LightChain) GasLimit() *big.Int {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	return self.hc.CurrentHeader().GasLimit
+}
+
+// LastBlockHash return the hash of the HEAD block.
+func (self *LightChain) LastBlockHash() common.Hash {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	return self.hc.CurrentHeader().Hash()
+}
+
+// Status returns status information about the current chain such as the HEAD Td,
+// the HEAD hash and the hash of the genesis block.
+func (self *LightChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	header := self.hc.CurrentHeader()
+	hash := header.Hash()
+	return self.GetTd(hash, header.Number.Uint64()), hash, self.genesisBlock.Hash()
+}
+
+// SetValidator sets the validator which is used to validate incoming headers.
+func (self *LightChain) SetValidator(validator core.HeaderValidator) {
+	self.procmu.Lock()
+	defer self.procmu.Unlock()
+	self.validator = validator
+}
+
+// Validator returns the current header validator.
+func (self *LightChain) Validator() core.HeaderValidator {
+	self.procmu.RLock()
+	defer self.procmu.RUnlock()
+	return self.validator
+}
+
+// State returns a new mutable state based on the current HEAD block.
+func (self *LightChain) State() *LightState {
+	return NewLightState(StateTrieID(self.hc.CurrentHeader()), self.odr)
+}
+
+// Reset purges the entire blockchain, restoring it to its genesis state.
+func (bc *LightChain) Reset() {
+	bc.ResetWithGenesisBlock(bc.genesisBlock)
+}
+
+// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
+// specified genesis state.
+func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
+	// Dump the entire block chain and purge the caches
+	bc.SetHead(0)
+
+	bc.mu.Lock()
+	defer bc.mu.Unlock()
+
+	// Prepare the genesis block and reinitialise the chain
+	if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
+		glog.Fatalf("failed to write genesis block TD: %v", err)
+	}
+	if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
+		glog.Fatalf("failed to write genesis block: %v", err)
+	}
+	bc.genesisBlock = genesis
+	bc.hc.SetGenesis(bc.genesisBlock.Header())
+	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
+}
+
+// Accessors
+
+// Genesis returns the genesis block
+func (bc *LightChain) Genesis() *types.Block {
+	return bc.genesisBlock
+}
+
+// GetBody retrieves a block body (transactions and uncles) from the database
+// or ODR service by hash, caching it if found.
+func (self *LightChain) GetBody(ctx context.Context, hash common.Hash) (*types.Body, error) {
+	// Short circuit if the body's already in the cache, retrieve otherwise
+	if cached, ok := self.bodyCache.Get(hash); ok {
+		body := cached.(*types.Body)
+		return body, nil
+	}
+	body, err := GetBody(ctx, self.odr, hash, self.hc.GetBlockNumber(hash))
+	if err != nil {
+		return nil, err
+	}
+	// Cache the found body for next time and return
+	self.bodyCache.Add(hash, body)
+	return body, nil
+}
+
+// GetBodyRLP retrieves a block body in RLP encoding from the database or
+// ODR service by hash, caching it if found.
+func (self *LightChain) GetBodyRLP(ctx context.Context, hash common.Hash) (rlp.RawValue, error) {
+	// Short circuit if the body's already in the cache, retrieve otherwise
+	if cached, ok := self.bodyRLPCache.Get(hash); ok {
+		return cached.(rlp.RawValue), nil
+	}
+	body, err := GetBodyRLP(ctx, self.odr, hash, self.hc.GetBlockNumber(hash))
+	if err != nil {
+		return nil, err
+	}
+	// Cache the found body for next time and return
+	self.bodyRLPCache.Add(hash, body)
+	return body, nil
+}
+
+// HasBlock checks if a block is fully present in the database or not, caching
+// it if present.
+func (bc *LightChain) HasBlock(hash common.Hash) bool {
+	blk, _ := bc.GetBlockByHash(NoOdr, hash)
+	return blk != nil
+}
+
+// GetBlock retrieves a block from the database or ODR service by hash and number,
+// caching it if found.
+func (self *LightChain) GetBlock(ctx context.Context, hash common.Hash, number uint64) (*types.Block, error) {
+	// Short circuit if the block's already in the cache, retrieve otherwise
+	if block, ok := self.blockCache.Get(hash); ok {
+		return block.(*types.Block), nil
+	}
+	block, err := GetBlock(ctx, self.odr, hash, number)
+	if err != nil {
+		return nil, err
+	}
+	// Cache the found block for next time and return
+	self.blockCache.Add(block.Hash(), block)
+	return block, nil
+}
+
+// GetBlockByHash retrieves a block from the database or ODR service by hash,
+// caching it if found.
+func (self *LightChain) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
+	return self.GetBlock(ctx, hash, self.hc.GetBlockNumber(hash))
+}
+
+// GetBlockByNumber retrieves a block from the database or ODR service by
+// number, caching it (associated with its hash) if found.
+func (self *LightChain) GetBlockByNumber(ctx context.Context, number uint64) (*types.Block, error) {
+	hash, err := GetCanonicalHash(ctx, self.odr, number)
+	if hash == (common.Hash{}) || err != nil {
+		return nil, err
+	}
+	return self.GetBlock(ctx, hash, number)
+}
+
+// Stop stops the blockchain service. If any imports are currently in progress
+// it will abort them using the procInterrupt.
+func (bc *LightChain) Stop() {
+	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
+		return
+	}
+	close(bc.quit)
+	atomic.StoreInt32(&bc.procInterrupt, 1)
+
+	bc.wg.Wait()
+
+	glog.V(logger.Info).Infoln("Chain manager stopped")
+}
+
+// Rollback is designed to remove a chain of links from the database that aren't
+// certain enough to be valid.
+func (self *LightChain) Rollback(chain []common.Hash) {
+	self.mu.Lock()
+	defer self.mu.Unlock()
+
+	for i := len(chain) - 1; i >= 0; i-- {
+		hash := chain[i]
+
+		if head := self.hc.CurrentHeader(); head.Hash() == hash {
+			self.hc.SetCurrentHeader(self.GetHeader(head.ParentHash, head.Number.Uint64()-1))
+		}
+	}
+}
+
+// postChainEvents iterates over the events generated by a chain insertion and
+// posts them into the event mux.
+func (self *LightChain) postChainEvents(events []interface{}) {
+	for _, event := range events {
+		if event, ok := event.(core.ChainEvent); ok {
+			if self.LastBlockHash() == event.Hash {
+				self.eventMux.Post(core.ChainHeadEvent{Block: event.Block})
+			}
+		}
+		// Fire the insertion events individually too
+		self.eventMux.Post(event)
+	}
+}
+
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verfy nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
+//
+// In the case of a light chain, InsertHeaderChain also creates and posts light
+// chain events when necessary.
+func (self *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+	// Make sure only one thread manipulates the chain at once
+	self.chainmu.Lock()
+	defer self.chainmu.Unlock()
+
+	self.wg.Add(1)
+	defer self.wg.Done()
+
+	var events []interface{}
+	whFunc := func(header *types.Header) error {
+		self.mu.Lock()
+		defer self.mu.Unlock()
+
+		status, err := self.hc.WriteHeader(header)
+
+		switch status {
+		case core.CanonStatTy:
+			if glog.V(logger.Debug) {
+				glog.Infof("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
+			}
+			events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()})
+
+		case core.SideStatTy:
+			if glog.V(logger.Detail) {
+				glog.Infof("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
+			}
+			events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)})
+
+		case core.SplitStatTy:
+			events = append(events, core.ChainSplitEvent{Block: types.NewBlockWithHeader(header)})
+		}
+
+		return err
+	}
+	i, err := self.hc.InsertHeaderChain(chain, checkFreq, whFunc)
+	go self.postChainEvents(events)
+	return i, err
+}
+
+// CurrentHeader retrieves the current head header of the canonical chain. The
+// header is retrieved from the HeaderChain's internal cache.
+func (self *LightChain) CurrentHeader() *types.Header {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	return self.hc.CurrentHeader()
+}
+
+// GetTd retrieves a block's total difficulty in the canonical chain from the
+// database by hash and number, caching it if found.
+func (self *LightChain) GetTd(hash common.Hash, number uint64) *big.Int {
+	return self.hc.GetTd(hash, number)
+}
+
+// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
+// database by hash, caching it if found.
+func (self *LightChain) GetTdByHash(hash common.Hash) *big.Int {
+	return self.hc.GetTdByHash(hash)
+}
+
+// GetHeader retrieves a block header from the database by hash and number,
+// caching it if found.
+func (self *LightChain) GetHeader(hash common.Hash, number uint64) *types.Header {
+	return self.hc.GetHeader(hash, number)
+}
+
+// GetHeaderByHash retrieves a block header from the database by hash, caching it if
+// found.
+func (self *LightChain) GetHeaderByHash(hash common.Hash) *types.Header {
+	return self.hc.GetHeaderByHash(hash)
+}
+
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (bc *LightChain) HasHeader(hash common.Hash) bool {
+	return bc.hc.HasHeader(hash)
+}
+
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (self *LightChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+	return self.hc.GetBlockHashesFromHash(hash, max)
+}
+
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (self *LightChain) GetHeaderByNumber(number uint64) *types.Header {
+	return self.hc.GetHeaderByNumber(number)
+}
+
+// GetHeaderByNumberOdr retrieves a block header from the database or network
+// by number, caching it (associated with its hash) if found.
+func (self *LightChain) GetHeaderByNumberOdr(ctx context.Context, number uint64) (*types.Header, error) {
+	if header := self.hc.GetHeaderByNumber(number); header != nil {
+		return header, nil
+	}
+	return GetHeaderByNumber(ctx, self.odr, number)
+}
+
+func (self *LightChain) SyncCht(ctx context.Context) bool {
+	headNum := self.CurrentHeader().Number.Uint64()
+	cht := GetTrustedCht(self.chainDb)
+	if headNum+1 < cht.Number*ChtFrequency {
+		num := cht.Number*ChtFrequency - 1
+		header, err := GetHeaderByNumber(ctx, self.odr, num)
+		if header != nil && err == nil {
+			self.mu.Lock()
+			if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
+				self.hc.SetCurrentHeader(header)
+			}
+			self.mu.Unlock()
+			return true
+		}
+	}
+	return false
+}
diff --git a/light/lightchain_test.go b/light/lightchain_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e42feb026cbc14f478eecb7f1b07bc7311797b2e
--- /dev/null
+++ b/light/lightchain_test.go
@@ -0,0 +1,403 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package light
+
+import (
+	"fmt"
+	"math/big"
+	"runtime"
+	"testing"
+
+	"github.com/ethereum/ethash"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/pow"
+	"github.com/hashicorp/golang-lru"
+	"golang.org/x/net/context"
+)
+
+// So we can deterministically seed different blockchains
+var (
+	canonicalSeed = 1
+	forkSeed      = 2
+)
+
+// makeHeaderChain creates a deterministic chain of headers rooted at parent.
+func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header {
+	blocks, _ := core.GenerateChain(nil, types.NewBlockWithHeader(parent), db, n, func(i int, b *core.BlockGen) {
+		b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
+	})
+	headers := make([]*types.Header, len(blocks))
+	for i, block := range blocks {
+		headers[i] = block.Header()
+	}
+	return headers
+}
+
+func testChainConfig() *core.ChainConfig {
+	return &core.ChainConfig{HomesteadBlock: big.NewInt(0)}
+}
+
+// newCanonical creates a chain database, and injects a deterministic canonical
+// chain. Depending on the full flag, if creates either a full block chain or a
+// header only chain.
+func newCanonical(n int) (ethdb.Database, *LightChain, error) {
+	// Create te new chain database
+	db, _ := ethdb.NewMemDatabase()
+	evmux := &event.TypeMux{}
+
+	// Initialize a fresh chain with only a genesis block
+	genesis, _ := core.WriteTestNetGenesisBlock(db)
+
+	blockchain, _ := NewLightChain(&dummyOdr{db: db}, testChainConfig(), core.FakePow{}, evmux)
+	// Create and inject the requested chain
+	if n == 0 {
+		return db, blockchain, nil
+	}
+	// Header-only chain requested
+	headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
+	_, err := blockchain.InsertHeaderChain(headers, 1)
+	return db, blockchain, err
+}
+
+func init() {
+	runtime.GOMAXPROCS(runtime.NumCPU())
+}
+
+func thePow() pow.PoW {
+	pow, _ := ethash.NewForTesting()
+	return pow
+}
+
+func theLightChain(db ethdb.Database, t *testing.T) *LightChain {
+	var eventMux event.TypeMux
+	core.WriteTestNetGenesisBlock(db)
+	LightChain, err := NewLightChain(&dummyOdr{db: db}, testChainConfig(), thePow(), &eventMux)
+	if err != nil {
+		t.Error("failed creating LightChain:", err)
+		t.FailNow()
+		return nil
+	}
+
+	return LightChain
+}
+
+// Test fork of length N starting from block i
+func testFork(t *testing.T, LightChain *LightChain, i, n int, comparator func(td1, td2 *big.Int)) {
+	// Copy old chain up to #i into a new db
+	db, LightChain2, err := newCanonical(i)
+	if err != nil {
+		t.Fatal("could not make new canonical in testFork", err)
+	}
+	// Assert the chains have the same header/block at #i
+	var hash1, hash2 common.Hash
+	hash1 = LightChain.GetHeaderByNumber(uint64(i)).Hash()
+	hash2 = LightChain2.GetHeaderByNumber(uint64(i)).Hash()
+	if hash1 != hash2 {
+		t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
+	}
+	// Extend the newly created chain
+	var (
+		headerChainB []*types.Header
+	)
+	headerChainB = makeHeaderChain(LightChain2.CurrentHeader(), n, db, forkSeed)
+	if _, err := LightChain2.InsertHeaderChain(headerChainB, 1); err != nil {
+		t.Fatalf("failed to insert forking chain: %v", err)
+	}
+	// Sanity check that the forked chain can be imported into the original
+	var tdPre, tdPost *big.Int
+
+	tdPre = LightChain.GetTdByHash(LightChain.CurrentHeader().Hash())
+	if err := testHeaderChainImport(headerChainB, LightChain); err != nil {
+		t.Fatalf("failed to import forked header chain: %v", err)
+	}
+	tdPost = LightChain.GetTdByHash(headerChainB[len(headerChainB)-1].Hash())
+	// Compare the total difficulties of the chains
+	comparator(tdPre, tdPost)
+}
+
+func printChain(bc *LightChain) {
+	for i := bc.CurrentHeader().Number.Uint64(); i > 0; i-- {
+		b := bc.GetHeaderByNumber(uint64(i))
+		fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty)
+	}
+}
+
+// testHeaderChainImport tries to process a chain of header, writing them into
+// the database if successful.
+func testHeaderChainImport(chain []*types.Header, LightChain *LightChain) error {
+	for _, header := range chain {
+		// Try and validate the header
+		if err := LightChain.Validator().ValidateHeader(header, LightChain.GetHeaderByHash(header.ParentHash), false); err != nil {
+			return err
+		}
+		// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
+		LightChain.mu.Lock()
+		core.WriteTd(LightChain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, LightChain.GetTdByHash(header.ParentHash)))
+		core.WriteHeader(LightChain.chainDb, header)
+		LightChain.mu.Unlock()
+	}
+	return nil
+}
+
+// Tests that given a starting canonical chain of a given size, it can be extended
+// with various length chains.
+func TestExtendCanonicalHeaders(t *testing.T) {
+	length := 5
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length)
+	if err != nil {
+		t.Fatalf("failed to make new canonical chain: %v", err)
+	}
+	// Define the difficulty comparator
+	better := func(td1, td2 *big.Int) {
+		if td2.Cmp(td1) <= 0 {
+			t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
+		}
+	}
+	// Start fork from current height
+	testFork(t, processor, length, 1, better)
+	testFork(t, processor, length, 2, better)
+	testFork(t, processor, length, 5, better)
+	testFork(t, processor, length, 10, better)
+}
+
+// Tests that given a starting canonical chain of a given size, creating shorter
+// forks do not take canonical ownership.
+func TestShorterForkHeaders(t *testing.T) {
+	length := 10
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length)
+	if err != nil {
+		t.Fatalf("failed to make new canonical chain: %v", err)
+	}
+	// Define the difficulty comparator
+	worse := func(td1, td2 *big.Int) {
+		if td2.Cmp(td1) >= 0 {
+			t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
+		}
+	}
+	// Sum of numbers must be less than `length` for this to be a shorter fork
+	testFork(t, processor, 0, 3, worse)
+	testFork(t, processor, 0, 7, worse)
+	testFork(t, processor, 1, 1, worse)
+	testFork(t, processor, 1, 7, worse)
+	testFork(t, processor, 5, 3, worse)
+	testFork(t, processor, 5, 4, worse)
+}
+
+// Tests that given a starting canonical chain of a given size, creating longer
+// forks do take canonical ownership.
+func TestLongerForkHeaders(t *testing.T) {
+	length := 10
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length)
+	if err != nil {
+		t.Fatalf("failed to make new canonical chain: %v", err)
+	}
+	// Define the difficulty comparator
+	better := func(td1, td2 *big.Int) {
+		if td2.Cmp(td1) <= 0 {
+			t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
+		}
+	}
+	// Sum of numbers must be greater than `length` for this to be a longer fork
+	testFork(t, processor, 0, 11, better)
+	testFork(t, processor, 0, 15, better)
+	testFork(t, processor, 1, 10, better)
+	testFork(t, processor, 1, 12, better)
+	testFork(t, processor, 5, 6, better)
+	testFork(t, processor, 5, 8, better)
+}
+
+// Tests that given a starting canonical chain of a given size, creating equal
+// forks do take canonical ownership.
+func TestEqualForkHeaders(t *testing.T) {
+	length := 10
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length)
+	if err != nil {
+		t.Fatalf("failed to make new canonical chain: %v", err)
+	}
+	// Define the difficulty comparator
+	equal := func(td1, td2 *big.Int) {
+		if td2.Cmp(td1) != 0 {
+			t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
+		}
+	}
+	// Sum of numbers must be equal to `length` for this to be an equal fork
+	testFork(t, processor, 0, 10, equal)
+	testFork(t, processor, 1, 9, equal)
+	testFork(t, processor, 2, 8, equal)
+	testFork(t, processor, 5, 5, equal)
+	testFork(t, processor, 6, 4, equal)
+	testFork(t, processor, 9, 1, equal)
+}
+
+// Tests that chains missing links do not get accepted by the processor.
+func TestBrokenHeaderChain(t *testing.T) {
+	// Make chain starting from genesis
+	db, LightChain, err := newCanonical(10)
+	if err != nil {
+		t.Fatalf("failed to make new canonical chain: %v", err)
+	}
+	// Create a forked chain, and try to insert with a missing link
+	chain := makeHeaderChain(LightChain.CurrentHeader(), 5, db, forkSeed)[1:]
+	if err := testHeaderChainImport(chain, LightChain); err == nil {
+		t.Errorf("broken header chain not reported")
+	}
+}
+
+type bproc struct{}
+
+func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
+
+func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
+	var chain []*types.Header
+	for i, difficulty := range d {
+		header := &types.Header{
+			Coinbase:    common.Address{seed},
+			Number:      big.NewInt(int64(i + 1)),
+			Difficulty:  big.NewInt(int64(difficulty)),
+			UncleHash:   types.EmptyUncleHash,
+			TxHash:      types.EmptyRootHash,
+			ReceiptHash: types.EmptyRootHash,
+		}
+		if i == 0 {
+			header.ParentHash = genesis.Hash()
+		} else {
+			header.ParentHash = chain[i-1].Hash()
+		}
+		chain = append(chain, types.CopyHeader(header))
+	}
+	return chain
+}
+
+type dummyOdr struct {
+	OdrBackend
+	db ethdb.Database
+}
+
+func (odr *dummyOdr) Database() ethdb.Database {
+	return odr.db
+}
+
+func (odr *dummyOdr) Retrieve(ctx context.Context, req OdrRequest) error {
+	return nil
+}
+
+func chm(genesis *types.Block, db ethdb.Database) *LightChain {
+	odr := &dummyOdr{db: db}
+	var eventMux event.TypeMux
+	bc := &LightChain{odr: odr, chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: core.FakePow{}}
+	bc.hc, _ = core.NewHeaderChain(db, testChainConfig(), bc.Validator, bc.getProcInterrupt)
+	bc.bodyCache, _ = lru.New(100)
+	bc.bodyRLPCache, _ = lru.New(100)
+	bc.blockCache, _ = lru.New(100)
+	bc.SetValidator(bproc{})
+	bc.ResetWithGenesisBlock(genesis)
+
+	return bc
+}
+
+// Tests that reorganizing a long difficult chain after a short easy one
+// overwrites the canonical numbers and links in the database.
+func TestReorgLongHeaders(t *testing.T) {
+	testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10)
+}
+
+// Tests that reorganizing a short difficult chain after a long easy one
+// overwrites the canonical numbers and links in the database.
+func TestReorgShortHeaders(t *testing.T) {
+	testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11)
+}
+
+func testReorg(t *testing.T, first, second []int, td int64) {
+	// Create a pristine block chain
+	db, _ := ethdb.NewMemDatabase()
+	genesis, _ := core.WriteTestNetGenesisBlock(db)
+	bc := chm(genesis, db)
+
+	// Insert an easy and a difficult chain afterwards
+	bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), 1)
+	bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), 1)
+	// Check that the chain is valid number and link wise
+	prev := bc.CurrentHeader()
+	for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
+		if prev.ParentHash != header.Hash() {
+			t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
+		}
+	}
+	// Make sure the chain total difficulty is the correct one
+	want := new(big.Int).Add(genesis.Difficulty(), big.NewInt(td))
+	if have := bc.GetTdByHash(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 {
+		t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
+	}
+}
+
+// Tests that the insertion functions detect banned hashes.
+func TestBadHeaderHashes(t *testing.T) {
+	// Create a pristine block chain
+	db, _ := ethdb.NewMemDatabase()
+	genesis, _ := core.WriteTestNetGenesisBlock(db)
+	bc := chm(genesis, db)
+
+	// Create a chain, ban a hash and try to import
+	var err error
+	headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10)
+	core.BadHashes[headers[2].Hash()] = true
+	_, err = bc.InsertHeaderChain(headers, 1)
+	if !core.IsBadHashError(err) {
+		t.Errorf("error mismatch: want: BadHashError, have: %v", err)
+	}
+}
+
+// Tests that bad hashes are detected on boot, and the chan rolled back to a
+// good state prior to the bad hash.
+func TestReorgBadHeaderHashes(t *testing.T) {
+	// Create a pristine block chain
+	db, _ := ethdb.NewMemDatabase()
+	genesis, _ := core.WriteTestNetGenesisBlock(db)
+	bc := chm(genesis, db)
+
+	// Create a chain, import and ban aferwards
+	headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
+
+	if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
+		t.Fatalf("failed to import headers: %v", err)
+	}
+	if bc.CurrentHeader().Hash() != headers[3].Hash() {
+		t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash())
+	}
+	core.BadHashes[headers[3].Hash()] = true
+	defer func() { delete(core.BadHashes, headers[3].Hash()) }()
+	// Create a new chain manager and check it rolled back the state
+	ncm, err := NewLightChain(&dummyOdr{db: db}, testChainConfig(), core.FakePow{}, new(event.TypeMux))
+	if err != nil {
+		t.Fatalf("failed to create new chain manager: %v", err)
+	}
+	if ncm.CurrentHeader().Hash() != headers[2].Hash() {
+		t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
+	}
+}
diff --git a/light/odr.go b/light/odr.go
index 4c69040ef869738cf76457ddba766cdaa42a2e5f..679569bf903a488b76674fff75ab9d04103e579b 100644
--- a/light/odr.go
+++ b/light/odr.go
@@ -19,14 +19,22 @@
 package light
 
 import (
+	"math/big"
+
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/rlp"
 	"golang.org/x/net/context"
 )
 
-// OdrBackend is an interface to a backend service that handles odr retrievals
+// NoOdr is the default context passed to an ODR capable function when the ODR
+// service is not required.
+var NoOdr = context.Background()
+
+// OdrBackend is an interface to a backend service that handles ODR retrievals
 type OdrBackend interface {
 	Database() ethdb.Database
 	Retrieve(ctx context.Context, req OdrRequest) error
@@ -37,17 +45,44 @@ type OdrRequest interface {
 	StoreResult(db ethdb.Database)
 }
 
+// TrieID identifies a state or account storage trie
+type TrieID struct {
+	BlockHash, Root common.Hash
+	AccKey          []byte
+}
+
+// StateTrieID returns a TrieID for a state trie belonging to a certain block
+// header.
+func StateTrieID(header *types.Header) *TrieID {
+	return &TrieID{
+		BlockHash: header.Hash(),
+		AccKey:    nil,
+		Root:      header.Root,
+	}
+}
+
+// StorageTrieID returns a TrieID for a contract storage trie at a given account
+// of a given state trie. It also requires the root hash of the trie for
+// checking Merkle proofs.
+func StorageTrieID(state *TrieID, addr common.Address, root common.Hash) *TrieID {
+	return &TrieID{
+		BlockHash: state.BlockHash,
+		AccKey:    crypto.Keccak256(addr[:]),
+		Root:      root,
+	}
+}
+
 // TrieRequest is the ODR request type for state/storage trie entries
 type TrieRequest struct {
 	OdrRequest
-	root  common.Hash
-	key   []byte
-	proof []rlp.RawValue
+	Id    *TrieID
+	Key   []byte
+	Proof []rlp.RawValue
 }
 
 // StoreResult stores the retrieved data in local database
 func (req *TrieRequest) StoreResult(db ethdb.Database) {
-	storeProof(db, req.proof)
+	storeProof(db, req.Proof)
 }
 
 // storeProof stores the new trie nodes obtained from a merkle proof in the database
@@ -61,38 +96,61 @@ func storeProof(db ethdb.Database, proof []rlp.RawValue) {
 	}
 }
 
-// NodeDataRequest is the ODR request type for node data (used for retrieving contract code)
-type NodeDataRequest struct {
+// CodeRequest is the ODR request type for retrieving contract code
+type CodeRequest struct {
 	OdrRequest
-	hash common.Hash
-	data []byte
+	Id   *TrieID
+	Hash common.Hash
+	Data []byte
 }
 
-// GetData returns the retrieved node data after a successful request
-func (req *NodeDataRequest) GetData() []byte {
-	return req.data
+// StoreResult stores the retrieved data in local database
+func (req *CodeRequest) StoreResult(db ethdb.Database) {
+	db.Put(req.Hash[:], req.Data)
+}
+
+// BlockRequest is the ODR request type for retrieving block bodies
+type BlockRequest struct {
+	OdrRequest
+	Hash   common.Hash
+	Number uint64
+	Rlp    []byte
 }
 
 // StoreResult stores the retrieved data in local database
-func (req *NodeDataRequest) StoreResult(db ethdb.Database) {
-	db.Put(req.hash[:], req.GetData())
+func (req *BlockRequest) StoreResult(db ethdb.Database) {
+	core.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp)
 }
 
-var sha3_nil = crypto.Keccak256Hash(nil)
+// ReceiptsRequest is the ODR request type for retrieving block bodies
+type ReceiptsRequest struct {
+	OdrRequest
+	Hash     common.Hash
+	Number   uint64
+	Receipts types.Receipts
+}
 
-// retrieveNodeData tries to retrieve node data with the given hash from the network
-func retrieveNodeData(ctx context.Context, odr OdrBackend, hash common.Hash) ([]byte, error) {
-	if hash == sha3_nil {
-		return nil, nil
-	}
-	res, _ := odr.Database().Get(hash[:])
-	if res != nil {
-		return res, nil
-	}
-	r := &NodeDataRequest{hash: hash}
-	if err := odr.Retrieve(ctx, r); err != nil {
-		return nil, err
-	} else {
-		return r.GetData(), nil
-	}
+// StoreResult stores the retrieved data in local database
+func (req *ReceiptsRequest) StoreResult(db ethdb.Database) {
+	core.WriteBlockReceipts(db, req.Hash, req.Number, req.Receipts)
+}
+
+// TrieRequest is the ODR request type for state/storage trie entries
+type ChtRequest struct {
+	OdrRequest
+	ChtNum, BlockNum uint64
+	ChtRoot          common.Hash
+	Header           *types.Header
+	Td               *big.Int
+	Proof            []rlp.RawValue
+}
+
+// StoreResult stores the retrieved data in local database
+func (req *ChtRequest) StoreResult(db ethdb.Database) {
+	// if there is a canonical hash, there is a header too
+	core.WriteHeader(db, req.Header)
+	hash, num := req.Header.Hash(), req.Header.Number.Uint64()
+	core.WriteTd(db, hash, num, req.Td)
+	core.WriteCanonicalHash(db, hash, num)
+	//storeProof(db, req.Proof)
 }
diff --git a/light/odr_test.go b/light/odr_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c83e91c688c5284b4fc2d70bd1ff7ca7cbb1d34
--- /dev/null
+++ b/light/odr_test.go
@@ -0,0 +1,339 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package light
+
+import (
+	"bytes"
+	"errors"
+	"math/big"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/state"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/params"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/trie"
+	"golang.org/x/net/context"
+)
+
+var (
+	testBankKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+	testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
+	testBankFunds   = big.NewInt(100000000)
+
+	acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+	acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
+	acc1Addr   = crypto.PubkeyToAddress(acc1Key.PublicKey)
+	acc2Addr   = crypto.PubkeyToAddress(acc2Key.PublicKey)
+
+	testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056")
+	testContractAddr common.Address
+)
+
+type testOdr struct {
+	OdrBackend
+	sdb, ldb ethdb.Database
+	disable  bool
+}
+
+func (odr *testOdr) Database() ethdb.Database {
+	return odr.ldb
+}
+
+var ErrOdrDisabled = errors.New("ODR disabled")
+
+func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
+	if odr.disable {
+		return ErrOdrDisabled
+	}
+	switch req := req.(type) {
+	case *BlockRequest:
+		req.Rlp = core.GetBodyRLP(odr.sdb, req.Hash, core.GetBlockNumber(odr.sdb, req.Hash))
+	case *ReceiptsRequest:
+		req.Receipts = core.GetBlockReceipts(odr.sdb, req.Hash, core.GetBlockNumber(odr.sdb, req.Hash))
+	case *TrieRequest:
+		t, _ := trie.New(req.Id.Root, odr.sdb)
+		req.Proof = t.Prove(req.Key)
+	case *CodeRequest:
+		req.Data, _ = odr.sdb.Get(req.Hash[:])
+	}
+	req.StoreResult(odr.ldb)
+	return nil
+}
+
+type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte
+
+func TestOdrGetBlockLes1(t *testing.T) { testChainOdr(t, 1, 1, odrGetBlock) }
+
+func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte {
+	var block *types.Block
+	if bc != nil {
+		block = bc.GetBlockByHash(bhash)
+	} else {
+		block, _ = lc.GetBlockByHash(ctx, bhash)
+	}
+	if block == nil {
+		return nil
+	}
+	rlp, _ := rlp.EncodeToBytes(block)
+	return rlp
+}
+
+func TestOdrGetReceiptsLes1(t *testing.T) { testChainOdr(t, 1, 1, odrGetReceipts) }
+
+func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte {
+	var receipts types.Receipts
+	if bc != nil {
+		receipts = core.GetBlockReceipts(db, bhash, core.GetBlockNumber(db, bhash))
+	} else {
+		receipts, _ = GetBlockReceipts(ctx, lc.Odr(), bhash, core.GetBlockNumber(db, bhash))
+	}
+	if receipts == nil {
+		return nil
+	}
+	rlp, _ := rlp.EncodeToBytes(receipts)
+	return rlp
+}
+
+func TestOdrAccountsLes1(t *testing.T) { testChainOdr(t, 1, 1, odrAccounts) }
+
+func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte {
+	dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
+	acc := []common.Address{testBankAddress, acc1Addr, acc2Addr, dummyAddr}
+
+	var res []byte
+	for _, addr := range acc {
+		if bc != nil {
+			header := bc.GetHeaderByHash(bhash)
+			st, err := state.New(header.Root, db)
+			if err == nil {
+				bal := st.GetBalance(addr)
+				rlp, _ := rlp.EncodeToBytes(bal)
+				res = append(res, rlp...)
+			}
+		} else {
+			header := lc.GetHeaderByHash(bhash)
+			st := NewLightState(StateTrieID(header), lc.Odr())
+			bal, err := st.GetBalance(ctx, addr)
+			if err == nil {
+				rlp, _ := rlp.EncodeToBytes(bal)
+				res = append(res, rlp...)
+			}
+		}
+	}
+
+	return res
+}
+
+func TestOdrContractCallLes1(t *testing.T) { testChainOdr(t, 1, 2, odrContractCall) }
+
+// fullcallmsg is the message type used for call transations.
+type fullcallmsg struct {
+	from          *state.StateObject
+	to            *common.Address
+	gas, gasPrice *big.Int
+	value         *big.Int
+	data          []byte
+}
+
+// accessor boilerplate to implement core.Message
+func (m fullcallmsg) From() (common.Address, error)         { return m.from.Address(), nil }
+func (m fullcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil }
+func (m fullcallmsg) Nonce() uint64                         { return 0 }
+func (m fullcallmsg) CheckNonce() bool                      { return false }
+func (m fullcallmsg) To() *common.Address                   { return m.to }
+func (m fullcallmsg) GasPrice() *big.Int                    { return m.gasPrice }
+func (m fullcallmsg) Gas() *big.Int                         { return m.gas }
+func (m fullcallmsg) Value() *big.Int                       { return m.value }
+func (m fullcallmsg) Data() []byte                          { return m.data }
+
+// callmsg is the message type used for call transations.
+type lightcallmsg struct {
+	from          *StateObject
+	to            *common.Address
+	gas, gasPrice *big.Int
+	value         *big.Int
+	data          []byte
+}
+
+// accessor boilerplate to implement core.Message
+func (m lightcallmsg) From() (common.Address, error)         { return m.from.Address(), nil }
+func (m lightcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil }
+func (m lightcallmsg) Nonce() uint64                         { return 0 }
+func (m lightcallmsg) CheckNonce() bool                      { return false }
+func (m lightcallmsg) To() *common.Address                   { return m.to }
+func (m lightcallmsg) GasPrice() *big.Int                    { return m.gasPrice }
+func (m lightcallmsg) Gas() *big.Int                         { return m.gas }
+func (m lightcallmsg) Value() *big.Int                       { return m.value }
+func (m lightcallmsg) Data() []byte                          { return m.data }
+
+func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte {
+	data := common.Hex2Bytes("60CD26850000000000000000000000000000000000000000000000000000000000000000")
+
+	var res []byte
+	for i := 0; i < 3; i++ {
+		data[35] = byte(i)
+		if bc != nil {
+			header := bc.GetHeaderByHash(bhash)
+			statedb, err := state.New(header.Root, db)
+			if err == nil {
+				from := statedb.GetOrNewStateObject(testBankAddress)
+				from.SetBalance(common.MaxBig)
+
+				msg := fullcallmsg{
+					from:     from,
+					gas:      big.NewInt(100000),
+					gasPrice: big.NewInt(0),
+					value:    big.NewInt(0),
+					data:     data,
+					to:       &testContractAddr,
+				}
+
+				vmenv := core.NewEnv(statedb, testChainConfig(), bc, msg, header, vm.Config{})
+				gp := new(core.GasPool).AddGas(common.MaxBig)
+				ret, _, _ := core.ApplyMessage(vmenv, msg, gp)
+				res = append(res, ret...)
+			}
+		} else {
+			header := lc.GetHeaderByHash(bhash)
+			state := NewLightState(StateTrieID(header), lc.Odr())
+			from, err := state.GetOrNewStateObject(ctx, testBankAddress)
+			if err == nil {
+				from.SetBalance(common.MaxBig)
+
+				msg := lightcallmsg{
+					from:     from,
+					gas:      big.NewInt(100000),
+					gasPrice: big.NewInt(0),
+					value:    big.NewInt(0),
+					data:     data,
+					to:       &testContractAddr,
+				}
+
+				vmenv := NewEnv(ctx, state, testChainConfig(), lc, msg, header, vm.Config{})
+				gp := new(core.GasPool).AddGas(common.MaxBig)
+				ret, _, _ := core.ApplyMessage(vmenv, msg, gp)
+				if vmenv.Error() == nil {
+					res = append(res, ret...)
+				}
+			}
+		}
+	}
+	return res
+}
+
+func testChainGen(i int, block *core.BlockGen) {
+	switch i {
+	case 0:
+		// In block 1, the test bank sends account #1 some ether.
+		tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey)
+		block.AddTx(tx)
+	case 1:
+		// In block 2, the test bank sends some more ether to account #1.
+		// acc1Addr passes it on to account #2.
+		// acc1Addr creates a test contract.
+		tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey)
+		nonce := block.TxNonce(acc1Addr)
+		tx2, _ := types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key)
+		nonce++
+		tx3, _ := types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(1000000), big.NewInt(0), testContractCode).SignECDSA(acc1Key)
+		testContractAddr = crypto.CreateAddress(acc1Addr, nonce)
+		block.AddTx(tx1)
+		block.AddTx(tx2)
+		block.AddTx(tx3)
+	case 2:
+		// Block 3 is empty but was mined by account #2.
+		block.SetCoinbase(acc2Addr)
+		block.SetExtra([]byte("yeehaw"))
+		data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001")
+		tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey)
+		block.AddTx(tx)
+	case 3:
+		// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
+		b2 := block.PrevBlock(1).Header()
+		b2.Extra = []byte("foo")
+		block.AddUncle(b2)
+		b3 := block.PrevBlock(2).Header()
+		b3.Extra = []byte("foo")
+		block.AddUncle(b3)
+		data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002")
+		tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey)
+		block.AddTx(tx)
+	}
+}
+
+func testChainOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
+	var (
+		evmux   = new(event.TypeMux)
+		pow     = new(core.FakePow)
+		sdb, _  = ethdb.NewMemDatabase()
+		ldb, _  = ethdb.NewMemDatabase()
+		genesis = core.WriteGenesisBlockForTesting(sdb, core.GenesisAccount{Address: testBankAddress, Balance: testBankFunds})
+	)
+	core.WriteGenesisBlockForTesting(ldb, core.GenesisAccount{Address: testBankAddress, Balance: testBankFunds})
+	// Assemble the test environment
+	blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux)
+	gchain, _ := core.GenerateChain(nil, genesis, sdb, 4, testChainGen)
+	if _, err := blockchain.InsertChain(gchain); err != nil {
+		panic(err)
+	}
+
+	odr := &testOdr{sdb: sdb, ldb: ldb}
+	lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux)
+	lightchain.SetValidator(bproc{})
+	headers := make([]*types.Header, len(gchain))
+	for i, block := range gchain {
+		headers[i] = block.Header()
+	}
+	if _, err := lightchain.InsertHeaderChain(headers, 1); err != nil {
+		panic(err)
+	}
+
+	test := func(expFail uint64) {
+		for i := uint64(0); i <= blockchain.CurrentHeader().Number.Uint64(); i++ {
+			bhash := core.GetCanonicalHash(sdb, i)
+			b1 := fn(NoOdr, sdb, blockchain, nil, bhash)
+			ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond)
+			b2 := fn(ctx, ldb, nil, lightchain, bhash)
+			eq := bytes.Equal(b1, b2)
+			exp := i < expFail
+			if exp && !eq {
+				t.Errorf("odr mismatch")
+			}
+			if !exp && eq {
+				t.Errorf("unexpected odr match")
+			}
+		}
+	}
+
+	odr.disable = true
+	// expect retrievals to fail (except genesis block) without a les peer
+	test(expFail)
+	odr.disable = false
+	// expect all retrievals to pass
+	test(5)
+	odr.disable = true
+	// still expect all retrievals to pass, now data should be cached locally
+	test(5)
+}
diff --git a/light/odr_util.go b/light/odr_util.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c72f90e9b89d9af10c721a031507035cb55c0b1
--- /dev/null
+++ b/light/odr_util.go
@@ -0,0 +1,186 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package light
+
+import (
+	"bytes"
+	"errors"
+	"math/big"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/rlp"
+	"golang.org/x/net/context"
+)
+
+var sha3_nil = crypto.Keccak256Hash(nil)
+
+var (
+	ErrNoTrustedCht = errors.New("No trusted canonical hash trie")
+	ErrNoHeader     = errors.New("Header not found")
+
+	ChtFrequency  = uint64(4096)
+	trustedChtKey = []byte("TrustedCHT")
+)
+
+type ChtNode struct {
+	Hash common.Hash
+	Td   *big.Int
+}
+
+type TrustedCht struct {
+	Number uint64
+	Root   common.Hash
+}
+
+func GetTrustedCht(db ethdb.Database) TrustedCht {
+	data, _ := db.Get(trustedChtKey)
+	var res TrustedCht
+	if err := rlp.DecodeBytes(data, &res); err != nil {
+		return TrustedCht{0, common.Hash{}}
+	}
+	return res
+}
+
+func WriteTrustedCht(db ethdb.Database, cht TrustedCht) {
+	data, _ := rlp.EncodeToBytes(cht)
+	db.Put(trustedChtKey, data)
+}
+
+func DeleteTrustedCht(db ethdb.Database) {
+	db.Delete(trustedChtKey)
+}
+
+func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*types.Header, error) {
+	db := odr.Database()
+	hash := core.GetCanonicalHash(db, number)
+	if (hash != common.Hash{}) {
+		// if there is a canonical hash, there is a header too
+		header := core.GetHeader(db, hash, number)
+		if header == nil {
+			panic("Canonical hash present but header not found")
+		}
+		return header, nil
+	}
+
+	cht := GetTrustedCht(db)
+	if number >= cht.Number*ChtFrequency {
+		return nil, ErrNoTrustedCht
+	}
+
+	r := &ChtRequest{ChtRoot: cht.Root, ChtNum: cht.Number, BlockNum: number}
+	if err := odr.Retrieve(ctx, r); err != nil {
+		return nil, err
+	} else {
+		return r.Header, nil
+	}
+}
+
+func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) {
+	hash := core.GetCanonicalHash(odr.Database(), number)
+	if (hash != common.Hash{}) {
+		return hash, nil
+	}
+	header, err := GetHeaderByNumber(ctx, odr, number)
+	if header != nil {
+		return header.Hash(), nil
+	}
+	return common.Hash{}, err
+}
+
+// retrieveContractCode tries to retrieve the contract code of the given account
+// with the given hash from the network (id points to the storage trie belonging
+// to the same account)
+func retrieveContractCode(ctx context.Context, odr OdrBackend, id *TrieID, hash common.Hash) ([]byte, error) {
+	if hash == sha3_nil {
+		return nil, nil
+	}
+	res, _ := odr.Database().Get(hash[:])
+	if res != nil {
+		return res, nil
+	}
+	r := &CodeRequest{Id: id, Hash: hash}
+	if err := odr.Retrieve(ctx, r); err != nil {
+		return nil, err
+	} else {
+		return r.Data, nil
+	}
+}
+
+// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
+func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (rlp.RawValue, error) {
+	if data := core.GetBodyRLP(odr.Database(), hash, number); data != nil {
+		return data, nil
+	}
+	r := &BlockRequest{Hash: hash, Number: number}
+	if err := odr.Retrieve(ctx, r); err != nil {
+		return nil, err
+	} else {
+		return r.Rlp, nil
+	}
+}
+
+// GetBody retrieves the block body (transactons, uncles) corresponding to the
+// hash.
+func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Body, error) {
+	data, err := GetBodyRLP(ctx, odr, hash, number)
+	if err != nil {
+		return nil, err
+	}
+	body := new(types.Body)
+	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
+		glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
+		return nil, err
+	}
+	return body, nil
+}
+
+// GetBlock retrieves an entire block corresponding to the hash, assembling it
+// back from the stored header and body.
+func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Block, error) {
+	// Retrieve the block header and body contents
+	header := core.GetHeader(odr.Database(), hash, number)
+	if header == nil {
+		return nil, ErrNoHeader
+	}
+	body, err := GetBody(ctx, odr, hash, number)
+	if err != nil {
+		return nil, err
+	}
+	// Reassemble the block and return
+	return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles), nil
+}
+
+// GetBlockReceipts retrieves the receipts generated by the transactions included
+// in a block given by its hash.
+func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) {
+	receipts := core.GetBlockReceipts(odr.Database(), hash, number)
+	if receipts != nil {
+		return receipts, nil
+	}
+	r := &ReceiptsRequest{Hash: hash, Number: number}
+	if err := odr.Retrieve(ctx, r); err != nil {
+		return nil, err
+	} else {
+		return r.Receipts, nil
+	}
+}
diff --git a/light/state.go b/light/state.go
index 4f2177238990f606c284fcd859983e312ad5ec88..88f60efbbe37b467be8a6187cdd8dcdd0d4c8883 100644
--- a/light/state.go
+++ b/light/state.go
@@ -20,6 +20,7 @@ import (
 	"math/big"
 
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"golang.org/x/net/context"
@@ -33,10 +34,11 @@ var StartingNonce uint64
 // state, retrieving unknown parts on-demand from the ODR backend. Changes are
 // never stored in the local database, only in the memory objects.
 type LightState struct {
-	odr  OdrBackend
-	trie *LightTrie
-
+	odr          OdrBackend
+	trie         *LightTrie
+	id           *TrieID
 	stateObjects map[string]*StateObject
+	refund       *big.Int
 }
 
 // NewLightState creates a new LightState with the specified root.
@@ -44,15 +46,25 @@ type LightState struct {
 // root is non-existent. In that case, ODR retrieval will always be unsuccessful
 // and every operation will return with an error or wait for the context to be
 // cancelled.
-func NewLightState(root common.Hash, odr OdrBackend) *LightState {
-	tr := NewLightTrie(root, odr, true)
+func NewLightState(id *TrieID, odr OdrBackend) *LightState {
+	var tr *LightTrie
+	if id != nil {
+		tr = NewLightTrie(id, odr, true)
+	}
 	return &LightState{
 		odr:          odr,
 		trie:         tr,
+		id:           id,
 		stateObjects: make(map[string]*StateObject),
+		refund:       new(big.Int),
 	}
 }
 
+// AddRefund adds an amount to the refund value collected during a vm execution
+func (self *LightState) AddRefund(gas *big.Int) {
+	self.refund.Add(self.refund, gas)
+}
+
 // HasAccount returns true if an account exists at the given address
 func (self *LightState) HasAccount(ctx context.Context, addr common.Address) (bool, error) {
 	so, err := self.GetStateObject(ctx, addr)
@@ -109,9 +121,9 @@ func (self *LightState) GetState(ctx context.Context, a common.Address, b common
 	return common.Hash{}, err
 }
 
-// IsDeleted returns true if the given account has been marked for deletion
+// HasSuicided returns true if the given account has been marked for deletion
 // or false if the account does not exist
-func (self *LightState) IsDeleted(ctx context.Context, addr common.Address) (bool, error) {
+func (self *LightState) HasSuicided(ctx context.Context, addr common.Address) (bool, error) {
 	stateObject, err := self.GetStateObject(ctx, addr)
 	if err == nil && stateObject != nil {
 		return stateObject.remove, nil
@@ -145,7 +157,7 @@ func (self *LightState) SetNonce(ctx context.Context, addr common.Address, nonce
 func (self *LightState) SetCode(ctx context.Context, addr common.Address, code []byte) error {
 	stateObject, err := self.GetOrNewStateObject(ctx, addr)
 	if err == nil && stateObject != nil {
-		stateObject.SetCode(code)
+		stateObject.SetCode(crypto.Keccak256Hash(code), code)
 	}
 	return err
 }
@@ -160,7 +172,7 @@ func (self *LightState) SetState(ctx context.Context, addr common.Address, key c
 }
 
 // Delete marks an account to be removed and clears its balance
-func (self *LightState) Delete(ctx context.Context, addr common.Address) (bool, error) {
+func (self *LightState) Suicide(ctx context.Context, addr common.Address) (bool, error) {
 	stateObject, err := self.GetOrNewStateObject(ctx, addr)
 	if err == nil && stateObject != nil {
 		stateObject.MarkForDeletion()
@@ -194,7 +206,7 @@ func (self *LightState) GetStateObject(ctx context.Context, addr common.Address)
 		return nil, nil
 	}
 
-	stateObject, err = DecodeObject(ctx, addr, self.odr, []byte(data))
+	stateObject, err = DecodeObject(ctx, self.id, addr, self.odr, []byte(data))
 	if err != nil {
 		return nil, err
 	}
@@ -258,14 +270,16 @@ func (self *LightState) CreateStateObject(ctx context.Context, addr common.Addre
 // Copy creates a copy of the state
 func (self *LightState) Copy() *LightState {
 	// ignore error - we assume state-to-be-copied always exists
-	state := NewLightState(common.Hash{}, self.odr)
+	state := NewLightState(nil, self.odr)
 	state.trie = self.trie
+	state.id = self.id
 	for k, stateObject := range self.stateObjects {
 		if stateObject.dirty {
 			state.stateObjects[k] = stateObject.Copy()
 		}
 	}
 
+	state.refund.Set(self.refund)
 	return state
 }
 
@@ -274,4 +288,10 @@ func (self *LightState) Copy() *LightState {
 func (self *LightState) Set(state *LightState) {
 	self.trie = state.trie
 	self.stateObjects = state.stateObjects
+	self.refund = state.refund
+}
+
+// GetRefund returns the refund value collected during a vm execution
+func (self *LightState) GetRefund() *big.Int {
+	return self.refund
 }
diff --git a/light/state_object.go b/light/state_object.go
index 1e9c7f4b1d63c5e49914fa85102cb6dbf2bd7139..61c3888fe372c2f8756915683be92677394de838 100644
--- a/light/state_object.go
+++ b/light/state_object.go
@@ -40,7 +40,7 @@ func (self Code) String() string {
 }
 
 // Storage is a memory map cache of a contract storage
-type Storage map[string]common.Hash
+type Storage map[common.Hash]common.Hash
 
 // String returns a string representation of the storage cache
 func (self Storage) String() (str string) {
@@ -100,7 +100,7 @@ func NewStateObject(address common.Address, odr OdrBackend) *StateObject {
 		codeHash: emptyCodeHash,
 		storage:  make(Storage),
 	}
-	object.trie = NewLightTrie(common.Hash{}, odr, true)
+	object.trie = NewLightTrie(&TrieID{}, odr, true)
 	return object
 }
 
@@ -133,8 +133,7 @@ func (self *StateObject) Storage() Storage {
 // GetState returns the storage value at the given address from either the cache
 // or the trie
 func (self *StateObject) GetState(ctx context.Context, key common.Hash) (common.Hash, error) {
-	strkey := key.Str()
-	value, exists := self.storage[strkey]
+	value, exists := self.storage[key]
 	if !exists {
 		var err error
 		value, err = self.getAddr(ctx, key)
@@ -142,7 +141,7 @@ func (self *StateObject) GetState(ctx context.Context, key common.Hash) (common.
 			return common.Hash{}, err
 		}
 		if (value != common.Hash{}) {
-			self.storage[strkey] = value
+			self.storage[key] = value
 		}
 	}
 
@@ -151,7 +150,7 @@ func (self *StateObject) GetState(ctx context.Context, key common.Hash) (common.
 
 // SetState sets the storage value at the given address
 func (self *StateObject) SetState(k, value common.Hash) {
-	self.storage[k.Str()] = value
+	self.storage[k] = value
 	self.dirty = true
 }
 
@@ -179,6 +178,9 @@ func (c *StateObject) SetBalance(amount *big.Int) {
 	c.dirty = true
 }
 
+// ReturnGas returns the gas back to the origin. Used by the Virtual machine or Closures
+func (c *StateObject) ReturnGas(gas, price *big.Int) {}
+
 // Copy creates a copy of the state object
 func (self *StateObject) Copy() *StateObject {
 	stateObject := NewStateObject(self.Address(), self.odr)
@@ -215,9 +217,9 @@ func (self *StateObject) Code() []byte {
 }
 
 // SetCode sets the contract code
-func (self *StateObject) SetCode(code []byte) {
+func (self *StateObject) SetCode(hash common.Hash, code []byte) {
 	self.code = code
-	self.codeHash = crypto.Keccak256(code)
+	self.codeHash = hash[:]
 	self.dirty = true
 }
 
@@ -232,6 +234,23 @@ func (self *StateObject) Nonce() uint64 {
 	return self.nonce
 }
 
+// ForEachStorage calls a callback function for every key/value pair found
+// in the local storage cache. Note that unlike core/state.StateObject,
+// light.StateObject only returns cached values and doesn't download the
+// entire storage tree.
+func (self *StateObject) ForEachStorage(cb func(key, value common.Hash) bool) {
+	for h, v := range self.storage {
+		cb(h, v)
+	}
+}
+
+// Never called, but must be present to allow StateObject to be used
+// as a vm.Account interface that also satisfies the vm.ContractRef
+// interface. Interfaces are awesome.
+func (self *StateObject) Value() *big.Int {
+	panic("Value on StateObject should never be called")
+}
+
 // Encoding
 
 type extStateObject struct {
@@ -242,7 +261,7 @@ type extStateObject struct {
 }
 
 // DecodeObject decodes an RLP-encoded state object.
-func DecodeObject(ctx context.Context, address common.Address, odr OdrBackend, data []byte) (*StateObject, error) {
+func DecodeObject(ctx context.Context, stateID *TrieID, address common.Address, odr OdrBackend, data []byte) (*StateObject, error) {
 	var (
 		obj = &StateObject{address: address, odr: odr, storage: make(Storage)}
 		ext extStateObject
@@ -251,9 +270,10 @@ func DecodeObject(ctx context.Context, address common.Address, odr OdrBackend, d
 	if err = rlp.DecodeBytes(data, &ext); err != nil {
 		return nil, err
 	}
-	obj.trie = NewLightTrie(ext.Root, odr, true)
+	trieID := StorageTrieID(stateID, address, ext.Root)
+	obj.trie = NewLightTrie(trieID, odr, true)
 	if !bytes.Equal(ext.CodeHash, emptyCodeHash) {
-		if obj.code, err = retrieveNodeData(ctx, obj.odr, common.BytesToHash(ext.CodeHash)); err != nil {
+		if obj.code, err = retrieveContractCode(ctx, obj.odr, trieID, common.BytesToHash(ext.CodeHash)); err != nil {
 			return nil, fmt.Errorf("can't find code for hash %x: %v", ext.CodeHash, err)
 		}
 	}
diff --git a/light/state_test.go b/light/state_test.go
index a6b115786883c257c9918984c248576ce25a5103..89a64483ddaee0437c032748822e58648607c183 100644
--- a/light/state_test.go
+++ b/light/state_test.go
@@ -22,33 +22,13 @@ import (
 	"testing"
 
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/state"
+	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/ethdb"
-	"github.com/ethereum/go-ethereum/trie"
 	"golang.org/x/net/context"
 )
 
-type testOdr struct {
-	OdrBackend
-	sdb, ldb ethdb.Database
-}
-
-func (odr *testOdr) Database() ethdb.Database {
-	return odr.ldb
-}
-
-func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
-	switch req := req.(type) {
-	case *TrieRequest:
-		t, _ := trie.New(req.root, odr.sdb)
-		req.proof = t.Prove(req.key)
-	case *NodeDataRequest:
-		req.data, _ = odr.sdb.Get(req.hash[:])
-	}
-	req.StoreResult(odr.ldb)
-	return nil
-}
-
 func makeTestState() (common.Hash, ethdb.Database) {
 	sdb, _ := ethdb.NewMemDatabase()
 	st, _ := state.New(common.Hash{}, sdb)
@@ -67,9 +47,11 @@ func makeTestState() (common.Hash, ethdb.Database) {
 
 func TestLightStateOdr(t *testing.T) {
 	root, sdb := makeTestState()
+	header := &types.Header{Root: root, Number: big.NewInt(0)}
+	core.WriteHeader(sdb, header)
 	ldb, _ := ethdb.NewMemDatabase()
 	odr := &testOdr{sdb: sdb, ldb: ldb}
-	ls := NewLightState(root, odr)
+	ls := NewLightState(StateTrieID(header), odr)
 	ctx := context.Background()
 
 	for i := byte(0); i < 100; i++ {
@@ -151,9 +133,11 @@ func TestLightStateOdr(t *testing.T) {
 
 func TestLightStateSetCopy(t *testing.T) {
 	root, sdb := makeTestState()
+	header := &types.Header{Root: root, Number: big.NewInt(0)}
+	core.WriteHeader(sdb, header)
 	ldb, _ := ethdb.NewMemDatabase()
 	odr := &testOdr{sdb: sdb, ldb: ldb}
-	ls := NewLightState(root, odr)
+	ls := NewLightState(StateTrieID(header), odr)
 	ctx := context.Background()
 
 	for i := byte(0); i < 100; i++ {
@@ -227,9 +211,11 @@ func TestLightStateSetCopy(t *testing.T) {
 
 func TestLightStateDelete(t *testing.T) {
 	root, sdb := makeTestState()
+	header := &types.Header{Root: root, Number: big.NewInt(0)}
+	core.WriteHeader(sdb, header)
 	ldb, _ := ethdb.NewMemDatabase()
 	odr := &testOdr{sdb: sdb, ldb: ldb}
-	ls := NewLightState(root, odr)
+	ls := NewLightState(StateTrieID(header), odr)
 	ctx := context.Background()
 
 	addr := common.Address{42}
@@ -242,21 +228,21 @@ func TestLightStateDelete(t *testing.T) {
 		t.Fatalf("HasAccount returned false, expected true")
 	}
 
-	b, err = ls.IsDeleted(ctx, addr)
+	b, err = ls.HasSuicided(ctx, addr)
 	if err != nil {
-		t.Fatalf("IsDeleted error: %v", err)
+		t.Fatalf("HasSuicided error: %v", err)
 	}
 	if b {
-		t.Fatalf("IsDeleted returned true, expected false")
+		t.Fatalf("HasSuicided returned true, expected false")
 	}
 
-	ls.Delete(ctx, addr)
+	ls.Suicide(ctx, addr)
 
-	b, err = ls.IsDeleted(ctx, addr)
+	b, err = ls.HasSuicided(ctx, addr)
 	if err != nil {
-		t.Fatalf("IsDeleted error: %v", err)
+		t.Fatalf("HasSuicided error: %v", err)
 	}
 	if !b {
-		t.Fatalf("IsDeleted returned false, expected true")
+		t.Fatalf("HasSuicided returned false, expected true")
 	}
 }
diff --git a/light/trie.go b/light/trie.go
index 42a943d50c7b43c3074ea5bef670ca03c8331ef1..c5525358a43be75b619785924db5ece1a114ed67 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -17,7 +17,6 @@
 package light
 
 import (
-	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/trie"
 	"golang.org/x/net/context"
@@ -25,28 +24,28 @@ import (
 
 // LightTrie is an ODR-capable wrapper around trie.SecureTrie
 type LightTrie struct {
-	trie         *trie.SecureTrie
-	originalRoot common.Hash
-	odr          OdrBackend
-	db           ethdb.Database
+	trie *trie.SecureTrie
+	id   *TrieID
+	odr  OdrBackend
+	db   ethdb.Database
 }
 
 // NewLightTrie creates a new LightTrie instance. It doesn't instantly try to
 // access the db or network and retrieve the root node, it only initializes its
 // encapsulated SecureTrie at the first actual operation.
-func NewLightTrie(root common.Hash, odr OdrBackend, useFakeMap bool) *LightTrie {
+func NewLightTrie(id *TrieID, odr OdrBackend, useFakeMap bool) *LightTrie {
 	return &LightTrie{
 		// SecureTrie is initialized before first request
-		originalRoot: root,
-		odr:          odr,
-		db:           odr.Database(),
+		id:  id,
+		odr: odr,
+		db:  odr.Database(),
 	}
 }
 
 // retrieveKey retrieves a single key, returns true and stores nodes in local
 // database if successful
 func (t *LightTrie) retrieveKey(ctx context.Context, key []byte) bool {
-	r := &TrieRequest{root: t.originalRoot, key: key}
+	r := &TrieRequest{Id: t.id, Key: key}
 	return t.odr.Retrieve(ctx, r) == nil
 }
 
@@ -79,7 +78,7 @@ func (t *LightTrie) do(ctx context.Context, fallbackKey []byte, fn func() error)
 func (t *LightTrie) Get(ctx context.Context, key []byte) (res []byte, err error) {
 	err = t.do(ctx, key, func() (err error) {
 		if t.trie == nil {
-			t.trie, err = trie.NewSecure(t.originalRoot, t.db, 0)
+			t.trie, err = trie.NewSecure(t.id.Root, t.db, 0)
 		}
 		if err == nil {
 			res, err = t.trie.TryGet(key)
@@ -98,7 +97,7 @@ func (t *LightTrie) Get(ctx context.Context, key []byte) (res []byte, err error)
 func (t *LightTrie) Update(ctx context.Context, key, value []byte) (err error) {
 	err = t.do(ctx, key, func() (err error) {
 		if t.trie == nil {
-			t.trie, err = trie.NewSecure(t.originalRoot, t.db, 0)
+			t.trie, err = trie.NewSecure(t.id.Root, t.db, 0)
 		}
 		if err == nil {
 			err = t.trie.TryUpdate(key, value)
@@ -112,7 +111,7 @@ func (t *LightTrie) Update(ctx context.Context, key, value []byte) (err error) {
 func (t *LightTrie) Delete(ctx context.Context, key []byte) (err error) {
 	err = t.do(ctx, key, func() (err error) {
 		if t.trie == nil {
-			t.trie, err = trie.NewSecure(t.originalRoot, t.db, 0)
+			t.trie, err = trie.NewSecure(t.id.Root, t.db, 0)
 		}
 		if err == nil {
 			err = t.trie.TryDelete(key)
diff --git a/light/txpool.go b/light/txpool.go
new file mode 100644
index 0000000000000000000000000000000000000000..825a0f9099eabe4abc79b5950b115359ca0e042a
--- /dev/null
+++ b/light/txpool.go
@@ -0,0 +1,551 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package light
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/rlp"
+	"golang.org/x/net/context"
+)
+
+// txPermanent is the number of mined blocks after a mined transaction is
+// considered permanent and no rollback is expected
+var txPermanent = uint64(500)
+
+// TxPool implements the transaction pool for light clients, which keeps track
+// of the status of locally created transactions, detecting if they are included
+// in a block (mined) or rolled back. There are no queued transactions since we
+// always receive all locally signed transactions in the same order as they are
+// created.
+type TxPool struct {
+	config   *core.ChainConfig
+	quit     chan bool
+	eventMux *event.TypeMux
+	events   event.Subscription
+	mu       sync.RWMutex
+	chain    *LightChain
+	odr      OdrBackend
+	chainDb  ethdb.Database
+	relay    TxRelayBackend
+	head     common.Hash
+	nonce    map[common.Address]uint64            // "pending" nonce
+	pending  map[common.Hash]*types.Transaction   // pending transactions by tx hash
+	mined    map[common.Hash][]*types.Transaction // mined transactions by block hash
+	clearIdx uint64                               // earliest block nr that can contain mined tx info
+
+	homestead bool
+}
+
+// TxRelayBackend provides an interface to the mechanism that forwards transacions
+// to the ETH network. The implementations of the functions should be non-blocking.
+//
+// Send instructs backend to forward new transactions
+// NewHead notifies backend about a new head after processed by the tx pool,
+//  including  mined and rolled back transactions since the last event
+// Discard notifies backend about transactions that should be discarded either
+//  because they have been replaced by a re-send or because they have been mined
+//  long ago and no rollback is expected
+type TxRelayBackend interface {
+	Send(txs types.Transactions)
+	NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)
+	Discard(hashes []common.Hash)
+}
+
+// NewTxPool creates a new light transaction pool
+func NewTxPool(config *core.ChainConfig, eventMux *event.TypeMux, chain *LightChain, relay TxRelayBackend) *TxPool {
+	pool := &TxPool{
+		config:   config,
+		nonce:    make(map[common.Address]uint64),
+		pending:  make(map[common.Hash]*types.Transaction),
+		mined:    make(map[common.Hash][]*types.Transaction),
+		quit:     make(chan bool),
+		eventMux: eventMux,
+		events:   eventMux.Subscribe(core.ChainHeadEvent{}),
+		chain:    chain,
+		relay:    relay,
+		odr:      chain.Odr(),
+		chainDb:  chain.Odr().Database(),
+		head:     chain.CurrentHeader().Hash(),
+		clearIdx: chain.CurrentHeader().Number.Uint64(),
+	}
+	go pool.eventLoop()
+
+	return pool
+}
+
+// currentState returns the light state of the current head header
+func (pool *TxPool) currentState() *LightState {
+	return NewLightState(StateTrieID(pool.chain.CurrentHeader()), pool.odr)
+}
+
+// GetNonce returns the "pending" nonce of a given address. It always queries
+// the nonce belonging to the latest header too in order to detect if another
+// client using the same key sent a transaction.
+func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) {
+	nonce, err := pool.currentState().GetNonce(ctx, addr)
+	if err != nil {
+		return 0, err
+	}
+	sn, ok := pool.nonce[addr]
+	if ok && sn > nonce {
+		nonce = sn
+	}
+	if !ok || sn < nonce {
+		pool.nonce[addr] = nonce
+	}
+	return nonce, nil
+}
+
+type txBlockData struct {
+	BlockHash  common.Hash
+	BlockIndex uint64
+	Index      uint64
+}
+
+// storeTxBlockData stores the block position of a mined tx in the local db
+func (pool *TxPool) storeTxBlockData(txh common.Hash, tbd txBlockData) {
+	//fmt.Println("storeTxBlockData", txh, tbd)
+	data, _ := rlp.EncodeToBytes(tbd)
+	pool.chainDb.Put(append(txh[:], byte(1)), data)
+}
+
+// removeTxBlockData removes the stored block position of a rolled back tx
+func (pool *TxPool) removeTxBlockData(txh common.Hash) {
+	//fmt.Println("removeTxBlockData", txh)
+	pool.chainDb.Delete(append(txh[:], byte(1)))
+}
+
+// txStateChanges stores the recent changes between pending/mined states of
+// transactions. True means mined, false means rolled back, no entry means no change
+type txStateChanges map[common.Hash]bool
+
+// setState sets the status of a tx to either recently mined or recently rolled back
+func (txc txStateChanges) setState(txHash common.Hash, mined bool) {
+	val, ent := txc[txHash]
+	if ent && (val != mined) {
+		delete(txc, txHash)
+	} else {
+		txc[txHash] = mined
+	}
+}
+
+// getLists creates lists of mined and rolled back tx hashes
+func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) {
+	for hash, val := range txc {
+		if val {
+			mined = append(mined, hash)
+		} else {
+			rollback = append(rollback, hash)
+		}
+	}
+	return
+}
+
+// checkMinedTxs checks newly added blocks for the currently pending transactions
+// and marks them as mined if necessary. It also stores block position in the db
+// and adds them to the received txStateChanges map.
+func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, idx uint64, txc txStateChanges) error {
+	//fmt.Println("checkMinedTxs")
+	if len(pool.pending) == 0 {
+		return nil
+	}
+	//fmt.Println("len(pool) =", len(pool.pending))
+
+	block, err := GetBlock(ctx, pool.odr, hash, idx)
+	var receipts types.Receipts
+	if err != nil {
+		//fmt.Println(err)
+		return err
+	}
+	//fmt.Println("len(block.Transactions()) =", len(block.Transactions()))
+
+	list := pool.mined[hash]
+	for i, tx := range block.Transactions() {
+		txHash := tx.Hash()
+		//fmt.Println(" txHash:", txHash)
+		if tx, ok := pool.pending[txHash]; ok {
+			//fmt.Println("TX FOUND")
+			if receipts == nil {
+				receipts, err = GetBlockReceipts(ctx, pool.odr, hash, idx)
+				if err != nil {
+					return err
+				}
+				if len(receipts) != len(block.Transactions()) {
+					panic(nil) // should never happen if hashes did match
+				}
+				core.SetReceiptsData(block, receipts)
+			}
+			//fmt.Println("WriteReceipt", receipts[i].TxHash)
+			core.WriteReceipt(pool.chainDb, receipts[i])
+			pool.storeTxBlockData(txHash, txBlockData{hash, idx, uint64(i)})
+			delete(pool.pending, txHash)
+			list = append(list, tx)
+			txc.setState(txHash, true)
+		}
+	}
+	if list != nil {
+		pool.mined[hash] = list
+	}
+	return nil
+}
+
+// rollbackTxs marks the transactions contained in recently rolled back blocks
+// as rolled back. It also removes block position info from the db and adds them
+// to the received txStateChanges map.
+func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
+	if list, ok := pool.mined[hash]; ok {
+		for _, tx := range list {
+			txHash := tx.Hash()
+			pool.removeTxBlockData(txHash)
+			pool.pending[txHash] = tx
+			txc.setState(txHash, false)
+		}
+		delete(pool.mined, hash)
+	}
+}
+
+// setNewHead sets a new head header, processing (and rolling back if necessary)
+// the blocks since the last known head and returns a txStateChanges map containing
+// the recently mined and rolled back transaction hashes. If an error (context
+// timeout) occurs during checking new blocks, it leaves the locally known head
+// at the latest checked block and still returns a valid txStateChanges, making it
+// possible to continue checking the missing blocks at the next chain head event
+func (pool *TxPool) setNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) {
+	txc := make(txStateChanges)
+	oldh := pool.chain.GetHeaderByHash(pool.head)
+	newh := newHeader
+	// find common ancestor, create list of rolled back and new block hashes
+	var oldHashes, newHashes []common.Hash
+	for oldh.Hash() != newh.Hash() {
+		if oldh.Number.Uint64() >= newh.Number.Uint64() {
+			oldHashes = append(oldHashes, oldh.Hash())
+			oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1)
+		}
+		if oldh.Number.Uint64() < newh.Number.Uint64() {
+			newHashes = append(newHashes, newh.Hash())
+			newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1)
+			if newh == nil {
+				// happens when CHT syncing, nothing to do
+				newh = oldh
+			}
+		}
+	}
+	if oldh.Number.Uint64() < pool.clearIdx {
+		pool.clearIdx = oldh.Number.Uint64()
+	}
+	// roll back old blocks
+	for _, hash := range oldHashes {
+		pool.rollbackTxs(hash, txc)
+	}
+	pool.head = oldh.Hash()
+	// check mined txs of new blocks (array is in reversed order)
+	for i := len(newHashes) - 1; i >= 0; i-- {
+		hash := newHashes[i]
+		if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil {
+			return txc, err
+		}
+		pool.head = hash
+	}
+
+	// clear old mined tx entries of old blocks
+	if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent {
+		idx2 := idx - txPermanent
+		for i := pool.clearIdx; i < idx2; i++ {
+			hash := core.GetCanonicalHash(pool.chainDb, i)
+			if list, ok := pool.mined[hash]; ok {
+				hashes := make([]common.Hash, len(list))
+				for i, tx := range list {
+					hashes[i] = tx.Hash()
+				}
+				pool.relay.Discard(hashes)
+				delete(pool.mined, hash)
+			}
+		}
+		pool.clearIdx = idx2
+	}
+
+	return txc, nil
+}
+
+// blockCheckTimeout is the time limit for checking new blocks for mined
+// transactions. Checking resumes at the next chain head event if timed out.
+const blockCheckTimeout = time.Second * 3
+
+// eventLoop processes chain head events and also notifies the tx relay backend
+// about the new head hash and tx state changes
+func (pool *TxPool) eventLoop() {
+	for ev := range pool.events.Chan() {
+		switch ev.Data.(type) {
+		case core.ChainHeadEvent:
+			pool.mu.Lock()
+			ctx, _ := context.WithTimeout(context.Background(), blockCheckTimeout)
+			head := pool.chain.CurrentHeader()
+			txc, _ := pool.setNewHead(ctx, head)
+			m, r := txc.getLists()
+			pool.relay.NewHead(pool.head, m, r)
+			pool.homestead = pool.config.IsHomestead(head.Number)
+			pool.mu.Unlock()
+		}
+	}
+}
+
+// Stop stops the light transaction pool
+func (pool *TxPool) Stop() {
+	close(pool.quit)
+	pool.events.Unsubscribe()
+	glog.V(logger.Info).Infoln("Transaction pool stopped")
+}
+
+// Stats returns the number of currently pending (locally created) transactions
+func (pool *TxPool) Stats() (pending int) {
+	pool.mu.RLock()
+	defer pool.mu.RUnlock()
+
+	pending = len(pool.pending)
+	return
+}
+
+// validateTx checks whether a transaction is valid according to the consensus rules.
+func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error {
+	// Validate sender
+	var (
+		from common.Address
+		err  error
+	)
+
+	// Validate the transaction sender and it's sig. Throw
+	// if the from fields is invalid.
+	if from, err = tx.From(); err != nil {
+		return core.ErrInvalidSender
+	}
+
+	// Make sure the account exist. Non existent accounts
+	// haven't got funds and well therefor never pass.
+	currentState := pool.currentState()
+	if h, err := currentState.HasAccount(ctx, from); err == nil {
+		if !h {
+			return core.ErrNonExistentAccount
+		}
+	} else {
+		return err
+	}
+
+	// Last but not least check for nonce errors
+	if n, err := currentState.GetNonce(ctx, from); err == nil {
+		if n > tx.Nonce() {
+			return core.ErrNonce
+		}
+	} else {
+		return err
+	}
+
+	// Check the transaction doesn't exceed the current
+	// block limit gas.
+	header := pool.chain.GetHeaderByHash(pool.head)
+	if header.GasLimit.Cmp(tx.Gas()) < 0 {
+		return core.ErrGasLimit
+	}
+
+	// Transactions can't be negative. This may never happen
+	// using RLP decoded transactions but may occur if you create
+	// a transaction using the RPC for example.
+	if tx.Value().Cmp(common.Big0) < 0 {
+		return core.ErrNegativeValue
+	}
+
+	// Transactor should have enough funds to cover the costs
+	// cost == V + GP * GL
+	if b, err := currentState.GetBalance(ctx, from); err == nil {
+		if b.Cmp(tx.Cost()) < 0 {
+			return core.ErrInsufficientFunds
+		}
+	} else {
+		return err
+	}
+
+	// Should supply enough intrinsic gas
+	if tx.Gas().Cmp(core.IntrinsicGas(tx.Data(), core.MessageCreatesContract(tx), pool.homestead)) < 0 {
+		return core.ErrIntrinsicGas
+	}
+
+	return nil
+}
+
+// add validates a new transaction and sets its state pending if processable.
+// It also updates the locally stored nonce if necessary.
+func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
+	hash := tx.Hash()
+
+	if self.pending[hash] != nil {
+		return fmt.Errorf("Known transaction (%x)", hash[:4])
+	}
+	err := self.validateTx(ctx, tx)
+	if err != nil {
+		return err
+	}
+
+	if _, ok := self.pending[hash]; !ok {
+		self.pending[hash] = tx
+
+		nonce := tx.Nonce() + 1
+		addr, _ := tx.From()
+		if nonce > self.nonce[addr] {
+			self.nonce[addr] = nonce
+		}
+
+		// Notify the subscribers. This event is posted in a goroutine
+		// because it's possible that somewhere during the post "Remove transaction"
+		// gets called which will then wait for the global tx pool lock and deadlock.
+		go self.eventMux.Post(core.TxPreEvent{Tx: tx})
+	}
+
+	if glog.V(logger.Debug) {
+		var toname string
+		if to := tx.To(); to != nil {
+			toname = common.Bytes2Hex(to[:4])
+		} else {
+			toname = "[NEW_CONTRACT]"
+		}
+		// we can ignore the error here because From is
+		// verified in ValidateTransaction.
+		f, _ := tx.From()
+		from := common.Bytes2Hex(f[:4])
+		glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, hash)
+	}
+
+	return nil
+}
+
+// Add adds a transaction to the pool if valid and passes it to the tx relay
+// backend
+func (self *TxPool) Add(ctx context.Context, tx *types.Transaction) error {
+	self.mu.Lock()
+	defer self.mu.Unlock()
+
+	data, err := rlp.EncodeToBytes(tx)
+	if err != nil {
+		return err
+	}
+
+	if err := self.add(ctx, tx); err != nil {
+		return err
+	}
+	//fmt.Println("Send", tx.Hash())
+	self.relay.Send(types.Transactions{tx})
+
+	self.chainDb.Put(tx.Hash().Bytes(), data)
+	return nil
+}
+
+// AddTransactions adds all valid transactions to the pool and passes them to
+// the tx relay backend
+func (self *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
+	self.mu.Lock()
+	defer self.mu.Unlock()
+	var sendTx types.Transactions
+
+	for _, tx := range txs {
+		if err := self.add(ctx, tx); err != nil {
+			glog.V(logger.Debug).Infoln("tx error:", err)
+		} else {
+			sendTx = append(sendTx, tx)
+			h := tx.Hash()
+			glog.V(logger.Debug).Infof("tx %x\n", h[:4])
+		}
+	}
+
+	if len(sendTx) > 0 {
+		self.relay.Send(sendTx)
+	}
+}
+
+// GetTransaction returns a transaction if it is contained in the pool
+// and nil otherwise.
+func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
+	// check the txs first
+	if tx, ok := tp.pending[hash]; ok {
+		return tx
+	}
+	return nil
+}
+
+// GetTransactions returns all currently processable transactions.
+// The returned slice may be modified by the caller.
+func (self *TxPool) GetTransactions() (txs types.Transactions) {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	txs = make(types.Transactions, len(self.pending))
+	i := 0
+	for _, tx := range self.pending {
+		txs[i] = tx
+		i++
+	}
+	return txs
+}
+
+// Content retrieves the data content of the transaction pool, returning all the
+// pending as well as queued transactions, grouped by account and nonce.
+func (self *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	// Retrieve all the pending transactions and sort by account and by nonce
+	pending := make(map[common.Address]types.Transactions)
+	for _, tx := range self.pending {
+		account, _ := tx.From()
+		pending[account] = append(pending[account], tx)
+	}
+	// There are no queued transactions in a light pool, just return an empty map
+	queued := make(map[common.Address]types.Transactions)
+	return pending, queued
+}
+
+// RemoveTransactions removes all given transactions from the pool.
+func (self *TxPool) RemoveTransactions(txs types.Transactions) {
+	self.mu.Lock()
+	defer self.mu.Unlock()
+	var hashes []common.Hash
+	for _, tx := range txs {
+		//self.RemoveTx(tx.Hash())
+		hash := tx.Hash()
+		delete(self.pending, hash)
+		self.chainDb.Delete(hash[:])
+		hashes = append(hashes, hash)
+	}
+	self.relay.Discard(hashes)
+}
+
+// RemoveTx removes the transaction with the given hash from the pool.
+func (pool *TxPool) RemoveTx(hash common.Hash) {
+	pool.mu.Lock()
+	defer pool.mu.Unlock()
+	// delete from pending pool
+	delete(pool.pending, hash)
+	pool.chainDb.Delete(hash[:])
+	pool.relay.Discard([]common.Hash{hash})
+}
diff --git a/light/txpool_test.go b/light/txpool_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ff1006e52bbfd21fd6c9ab7223e9a77e7d0c7e1
--- /dev/null
+++ b/light/txpool_test.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package light
+
+import (
+	"math"
+	"math/big"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/event"
+	"github.com/ethereum/go-ethereum/params"
+	"golang.org/x/net/context"
+)
+
+type testTxRelay struct {
+	send, nhMined, nhRollback, discard int
+}
+
+func (self *testTxRelay) Send(txs types.Transactions) {
+	self.send = len(txs)
+}
+
+func (self *testTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
+	self.nhMined = len(mined)
+	self.nhRollback = len(rollback)
+}
+
+func (self *testTxRelay) Discard(hashes []common.Hash) {
+	self.discard = len(hashes)
+}
+
+const poolTestTxs = 1000
+const poolTestBlocks = 100
+
+// test tx 0..n-1
+var testTx [poolTestTxs]*types.Transaction
+
+// txs sent before block i
+func sentTx(i int) int {
+	return int(math.Pow(float64(i)/float64(poolTestBlocks), 0.9) * poolTestTxs)
+}
+
+// txs included in block i or before that (minedTx(i) <= sentTx(i))
+func minedTx(i int) int {
+	return int(math.Pow(float64(i)/float64(poolTestBlocks), 1.1) * poolTestTxs)
+}
+
+func txPoolTestChainGen(i int, block *core.BlockGen) {
+	s := minedTx(i)
+	e := minedTx(i + 1)
+	for i := s; i < e; i++ {
+		block.AddTx(testTx[i])
+	}
+}
+
+func TestTxPool(t *testing.T) {
+	for i, _ := range testTx {
+		testTx[i], _ = types.NewTransaction(uint64(i), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey)
+	}
+
+	var (
+		evmux   = new(event.TypeMux)
+		pow     = new(core.FakePow)
+		sdb, _  = ethdb.NewMemDatabase()
+		ldb, _  = ethdb.NewMemDatabase()
+		genesis = core.WriteGenesisBlockForTesting(sdb, core.GenesisAccount{Address: testBankAddress, Balance: testBankFunds})
+	)
+	core.WriteGenesisBlockForTesting(ldb, core.GenesisAccount{Address: testBankAddress, Balance: testBankFunds})
+	// Assemble the test environment
+	blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux)
+	gchain, _ := core.GenerateChain(nil, genesis, sdb, poolTestBlocks, txPoolTestChainGen)
+	if _, err := blockchain.InsertChain(gchain); err != nil {
+		panic(err)
+	}
+
+	odr := &testOdr{sdb: sdb, ldb: ldb}
+	relay := &testTxRelay{}
+	lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux)
+	lightchain.SetValidator(bproc{})
+	txPermanent = 50
+	pool := NewTxPool(testChainConfig(), evmux, lightchain, relay)
+
+	for ii, block := range gchain {
+		i := ii + 1
+		ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond)
+		s := sentTx(i - 1)
+		e := sentTx(i)
+		for i := s; i < e; i++ {
+			relay.send = 0
+			pool.Add(ctx, testTx[i])
+			got := relay.send
+			exp := 1
+			if got != exp {
+				t.Errorf("relay.Send expected len = %d, got %d", exp, got)
+			}
+		}
+
+		relay.nhMined = 0
+		relay.nhRollback = 0
+		relay.discard = 0
+		if _, err := lightchain.InsertHeaderChain([]*types.Header{block.Header()}, 1); err != nil {
+			panic(err)
+		}
+		time.Sleep(time.Millisecond * 30)
+
+		got := relay.nhMined
+		exp := minedTx(i) - minedTx(i-1)
+		if got != exp {
+			t.Errorf("relay.NewHead expected len(mined) = %d, got %d", exp, got)
+		}
+
+		got = relay.discard
+		exp = 0
+		if i > int(txPermanent)+1 {
+			exp = minedTx(i-int(txPermanent)-1) - minedTx(i-int(txPermanent)-2)
+		}
+		if got != exp {
+			t.Errorf("relay.Discard expected len = %d, got %d", exp, got)
+		}
+	}
+}
diff --git a/light/vm_env.go b/light/vm_env.go
new file mode 100644
index 0000000000000000000000000000000000000000..0978755cf1165e7b637d7b72be5acb31c46eb3ff
--- /dev/null
+++ b/light/vm_env.go
@@ -0,0 +1,271 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package light
+
+import (
+	"math/big"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/crypto"
+	"golang.org/x/net/context"
+)
+
+// VMEnv is the light client version of the vm execution environment.
+// Unlike other structures, VMEnv holds a context that is applied by state
+// retrieval requests through the entire execution. If any state operation
+// returns an error, the execution fails.
+type VMEnv struct {
+	vm.Environment
+	ctx         context.Context
+	chainConfig *core.ChainConfig
+	evm         *vm.EVM
+	state       *VMState
+	header      *types.Header
+	msg         core.Message
+	depth       int
+	chain       *LightChain
+	err         error
+}
+
+// NewEnv creates a new execution environment based on an ODR capable light state
+func NewEnv(ctx context.Context, state *LightState, chainConfig *core.ChainConfig, chain *LightChain, msg core.Message, header *types.Header, cfg vm.Config) *VMEnv {
+	env := &VMEnv{
+		chainConfig: chainConfig,
+		chain:       chain,
+		header:      header,
+		msg:         msg,
+	}
+	env.state = &VMState{ctx: ctx, state: state, env: env}
+
+	env.evm = vm.New(env, cfg)
+	return env
+}
+
+func (self *VMEnv) RuleSet() vm.RuleSet      { return self.chainConfig }
+func (self *VMEnv) Vm() vm.Vm                { return self.evm }
+func (self *VMEnv) Origin() common.Address   { f, _ := self.msg.From(); return f }
+func (self *VMEnv) BlockNumber() *big.Int    { return self.header.Number }
+func (self *VMEnv) Coinbase() common.Address { return self.header.Coinbase }
+func (self *VMEnv) Time() *big.Int           { return self.header.Time }
+func (self *VMEnv) Difficulty() *big.Int     { return self.header.Difficulty }
+func (self *VMEnv) GasLimit() *big.Int       { return self.header.GasLimit }
+func (self *VMEnv) Db() vm.Database          { return self.state }
+func (self *VMEnv) Depth() int               { return self.depth }
+func (self *VMEnv) SetDepth(i int)           { self.depth = i }
+func (self *VMEnv) GetHash(n uint64) common.Hash {
+	for header := self.chain.GetHeader(self.header.ParentHash, self.header.Number.Uint64()-1); header != nil; header = self.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) {
+		if header.Number.Uint64() == n {
+			return header.Hash()
+		}
+	}
+
+	return common.Hash{}
+}
+
+func (self *VMEnv) AddLog(log *vm.Log) {
+	//self.state.AddLog(log)
+}
+func (self *VMEnv) CanTransfer(from common.Address, balance *big.Int) bool {
+	return self.state.GetBalance(from).Cmp(balance) >= 0
+}
+
+func (self *VMEnv) SnapshotDatabase() int {
+	return self.state.SnapshotDatabase()
+}
+
+func (self *VMEnv) RevertToSnapshot(idx int) {
+	self.state.RevertToSnapshot(idx)
+}
+
+func (self *VMEnv) Transfer(from, to vm.Account, amount *big.Int) {
+	core.Transfer(from, to, amount)
+}
+
+func (self *VMEnv) Call(me vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) {
+	return core.Call(self, me, addr, data, gas, price, value)
+}
+func (self *VMEnv) CallCode(me vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) {
+	return core.CallCode(self, me, addr, data, gas, price, value)
+}
+
+func (self *VMEnv) DelegateCall(me vm.ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error) {
+	return core.DelegateCall(self, me, addr, data, gas, price)
+}
+
+func (self *VMEnv) Create(me vm.ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) {
+	return core.Create(self, me, data, gas, price, value)
+}
+
+// Error returns the error (if any) that happened during execution.
+func (self *VMEnv) Error() error {
+	return self.err
+}
+
+// VMState is a wrapper for the light state that holds the actual context and
+// passes it to any state operation that requires it.
+type VMState struct {
+	vm.Database
+	ctx       context.Context
+	state     *LightState
+	snapshots []*LightState
+	env       *VMEnv
+}
+
+// errHandler handles and stores any state error that happens during execution.
+func (s *VMState) errHandler(err error) {
+	if err != nil && s.env.err == nil {
+		s.env.err = err
+	}
+}
+
+func (self *VMState) SnapshotDatabase() int {
+	self.snapshots = append(self.snapshots, self.state.Copy())
+	return len(self.snapshots) - 1
+}
+
+func (self *VMState) RevertToSnapshot(idx int) {
+	self.state.Set(self.snapshots[idx])
+	self.snapshots = self.snapshots[:idx]
+}
+
+// GetAccount returns the account object of the given account or nil if the
+// account does not exist
+func (s *VMState) GetAccount(addr common.Address) vm.Account {
+	so, err := s.state.GetStateObject(s.ctx, addr)
+	s.errHandler(err)
+	if err != nil {
+		// return a dummy state object to avoid panics
+		so = s.state.newStateObject(addr)
+	}
+	return so
+}
+
+// CreateAccount creates creates a new account object and takes ownership.
+func (s *VMState) CreateAccount(addr common.Address) vm.Account {
+	so, err := s.state.CreateStateObject(s.ctx, addr)
+	s.errHandler(err)
+	if err != nil {
+		// return a dummy state object to avoid panics
+		so = s.state.newStateObject(addr)
+	}
+	return so
+}
+
+// AddBalance adds the given amount to the balance of the specified account
+func (s *VMState) AddBalance(addr common.Address, amount *big.Int) {
+	err := s.state.AddBalance(s.ctx, addr, amount)
+	s.errHandler(err)
+}
+
+// GetBalance retrieves the balance from the given address or 0 if the account does
+// not exist
+func (s *VMState) GetBalance(addr common.Address) *big.Int {
+	res, err := s.state.GetBalance(s.ctx, addr)
+	s.errHandler(err)
+	return res
+}
+
+// GetNonce returns the nonce at the given address or 0 if the account does
+// not exist
+func (s *VMState) GetNonce(addr common.Address) uint64 {
+	res, err := s.state.GetNonce(s.ctx, addr)
+	s.errHandler(err)
+	return res
+}
+
+// SetNonce sets the nonce of the specified account
+func (s *VMState) SetNonce(addr common.Address, nonce uint64) {
+	err := s.state.SetNonce(s.ctx, addr, nonce)
+	s.errHandler(err)
+}
+
+// GetCode returns the contract code at the given address or nil if the account
+// does not exist
+func (s *VMState) GetCode(addr common.Address) []byte {
+	res, err := s.state.GetCode(s.ctx, addr)
+	s.errHandler(err)
+	return res
+}
+
+// GetCodeHash returns the contract code hash at the given address
+func (s *VMState) GetCodeHash(addr common.Address) common.Hash {
+	res, err := s.state.GetCode(s.ctx, addr)
+	s.errHandler(err)
+	return crypto.Keccak256Hash(res)
+}
+
+// GetCodeSize returns the contract code size at the given address
+func (s *VMState) GetCodeSize(addr common.Address) int {
+	res, err := s.state.GetCode(s.ctx, addr)
+	s.errHandler(err)
+	return len(res)
+}
+
+// SetCode sets the contract code at the specified account
+func (s *VMState) SetCode(addr common.Address, code []byte) {
+	err := s.state.SetCode(s.ctx, addr, code)
+	s.errHandler(err)
+}
+
+// AddRefund adds an amount to the refund value collected during a vm execution
+func (s *VMState) AddRefund(gas *big.Int) {
+	s.state.AddRefund(gas)
+}
+
+// GetRefund returns the refund value collected during a vm execution
+func (s *VMState) GetRefund() *big.Int {
+	return s.state.GetRefund()
+}
+
+// GetState returns the contract storage value at storage address b from the
+// contract address a or common.Hash{} if the account does not exist
+func (s *VMState) GetState(a common.Address, b common.Hash) common.Hash {
+	res, err := s.state.GetState(s.ctx, a, b)
+	s.errHandler(err)
+	return res
+}
+
+// SetState sets the storage value at storage address key of the account addr
+func (s *VMState) SetState(addr common.Address, key common.Hash, value common.Hash) {
+	err := s.state.SetState(s.ctx, addr, key, value)
+	s.errHandler(err)
+}
+
+// Suicide marks an account to be removed and clears its balance
+func (s *VMState) Suicide(addr common.Address) bool {
+	res, err := s.state.Suicide(s.ctx, addr)
+	s.errHandler(err)
+	return res
+}
+
+// Exist returns true if an account exists at the given address
+func (s *VMState) Exist(addr common.Address) bool {
+	res, err := s.state.HasAccount(s.ctx, addr)
+	s.errHandler(err)
+	return res
+}
+
+// HasSuicided returns true if the given account has been marked for deletion
+// or false if the account does not exist
+func (s *VMState) HasSuicided(addr common.Address) bool {
+	res, err := s.state.HasSuicided(s.ctx, addr)
+	s.errHandler(err)
+	return res
+}
diff --git a/node/config.go b/node/config.go
index 15884a12eccbc52246116f15cd7ccde80f7b3ad9..8af9215a05528c98da4af8fd94512b882fcb171b 100644
--- a/node/config.go
+++ b/node/config.go
@@ -95,12 +95,16 @@ type Config struct {
 	// or not. Disabling is usually useful for protocol debugging (manual topology).
 	NoDiscovery bool
 
+	DiscoveryV5 bool
+
 	// Bootstrap nodes used to establish connectivity with the rest of the network.
 	BootstrapNodes []*discover.Node
 
 	// Network interface address on which the node should listen for inbound peers.
 	ListenAddr string
 
+	ListenAddrV5 string
+
 	// If set to a non-nil value, the given NAT port mapper is used to make the
 	// listening port available to the Internet.
 	NAT nat.Interface
diff --git a/node/node.go b/node/node.go
index 41c9eb27f7fe4128191639efaafeee70a3af230b..15f43fc6b2705cdfccd4c7718af6e7fb0a9b5eef 100644
--- a/node/node.go
+++ b/node/node.go
@@ -157,11 +157,13 @@ func (n *Node) Start() error {
 		PrivateKey:      n.config.NodeKey(),
 		Name:            n.config.NodeName(),
 		Discovery:       !n.config.NoDiscovery,
+		DiscoveryV5:     n.config.DiscoveryV5,
 		BootstrapNodes:  n.config.BootstrapNodes,
 		StaticNodes:     n.config.StaticNodes(),
 		TrustedNodes:    n.config.TrusterNodes(),
 		NodeDatabase:    n.config.NodeDB(),
 		ListenAddr:      n.config.ListenAddr,
+		ListenAddrV5:    n.config.ListenAddrV5,
 		NAT:             n.config.NAT,
 		Dialer:          n.config.Dialer,
 		NoDial:          n.config.NoDial,
diff --git a/p2p/discv5/crypto.go b/p2p/discv5/crypto.go
new file mode 100644
index 0000000000000000000000000000000000000000..48b2a8a729afe68384fd6735880ce0c2da6e3db7
--- /dev/null
+++ b/p2p/discv5/crypto.go
@@ -0,0 +1,31 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	//"github.com/btcsuite/btcd/btcec"
+	"github.com/ethereum/go-ethereum/crypto/secp256k1"
+)
+
+func S256() *secp256k1.BitCurve {
+	return secp256k1.S256()
+}
+
+// This version should be used for NaCl compilation
+/*func S256() *btcec.KoblitzCurve {
+	return S256()
+}*/
diff --git a/p2p/discv5/database.go b/p2p/discv5/database.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c47c27fd0f4f11f012b72340cd882ecaa51727e
--- /dev/null
+++ b/p2p/discv5/database.go
@@ -0,0 +1,413 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Contains the node database, storing previously seen nodes and any collected
+// metadata about them for QoS purposes.
+
+package discv5
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"os"
+	"sync"
+	"time"
+
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/syndtr/goleveldb/leveldb"
+	"github.com/syndtr/goleveldb/leveldb/errors"
+	"github.com/syndtr/goleveldb/leveldb/iterator"
+	"github.com/syndtr/goleveldb/leveldb/opt"
+	"github.com/syndtr/goleveldb/leveldb/storage"
+	"github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	nodeDBNilNodeID      = NodeID{}       // Special node ID to use as a nil element.
+	nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
+	nodeDBCleanupCycle   = time.Hour      // Time period for running the expiration task.
+)
+
+// nodeDB stores all nodes we know about.
+type nodeDB struct {
+	lvl    *leveldb.DB   // Interface to the database itself
+	self   NodeID        // Own node id to prevent adding it into the database
+	runner sync.Once     // Ensures we can start at most one expirer
+	quit   chan struct{} // Channel to signal the expiring thread to stop
+}
+
+// Schema layout for the node database
+var (
+	nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
+	nodeDBItemPrefix = []byte("n:")      // Identifier to prefix node entries with
+
+	nodeDBDiscoverRoot          = ":discover"
+	nodeDBDiscoverPing          = nodeDBDiscoverRoot + ":lastping"
+	nodeDBDiscoverPong          = nodeDBDiscoverRoot + ":lastpong"
+	nodeDBDiscoverFindFails     = nodeDBDiscoverRoot + ":findfail"
+	nodeDBDiscoverLocalEndpoint = nodeDBDiscoverRoot + ":localendpoint"
+	nodeDBTopicRegTickets       = ":tickets"
+)
+
+// newNodeDB creates a new node database for storing and retrieving infos about
+// known peers in the network. If no path is given, an in-memory, temporary
+// database is constructed.
+func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
+	if path == "" {
+		return newMemoryNodeDB(self)
+	}
+	return newPersistentNodeDB(path, version, self)
+}
+
+// newMemoryNodeDB creates a new in-memory node database without a persistent
+// backend.
+func newMemoryNodeDB(self NodeID) (*nodeDB, error) {
+	db, err := leveldb.Open(storage.NewMemStorage(), nil)
+	if err != nil {
+		return nil, err
+	}
+	return &nodeDB{
+		lvl:  db,
+		self: self,
+		quit: make(chan struct{}),
+	}, nil
+}
+
+// newPersistentNodeDB creates/opens a leveldb backed persistent node database,
+// also flushing its contents in case of a version mismatch.
+func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
+	opts := &opt.Options{OpenFilesCacheCapacity: 5}
+	db, err := leveldb.OpenFile(path, opts)
+	if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
+		db, err = leveldb.RecoverFile(path, nil)
+	}
+	if err != nil {
+		return nil, err
+	}
+	// The nodes contained in the cache correspond to a certain protocol version.
+	// Flush all nodes if the version doesn't match.
+	currentVer := make([]byte, binary.MaxVarintLen64)
+	currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
+
+	blob, err := db.Get(nodeDBVersionKey, nil)
+	switch err {
+	case leveldb.ErrNotFound:
+		// Version not found (i.e. empty cache), insert it
+		if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
+			db.Close()
+			return nil, err
+		}
+
+	case nil:
+		// Version present, flush if different
+		if !bytes.Equal(blob, currentVer) {
+			db.Close()
+			if err = os.RemoveAll(path); err != nil {
+				return nil, err
+			}
+			return newPersistentNodeDB(path, version, self)
+		}
+	}
+	return &nodeDB{
+		lvl:  db,
+		self: self,
+		quit: make(chan struct{}),
+	}, nil
+}
+
+// makeKey generates the leveldb key-blob from a node id and its particular
+// field of interest.
+func makeKey(id NodeID, field string) []byte {
+	if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
+		return []byte(field)
+	}
+	return append(nodeDBItemPrefix, append(id[:], field...)...)
+}
+
+// splitKey tries to split a database key into a node id and a field part.
+func splitKey(key []byte) (id NodeID, field string) {
+	// If the key is not of a node, return it plainly
+	if !bytes.HasPrefix(key, nodeDBItemPrefix) {
+		return NodeID{}, string(key)
+	}
+	// Otherwise split the id and field
+	item := key[len(nodeDBItemPrefix):]
+	copy(id[:], item[:len(id)])
+	field = string(item[len(id):])
+
+	return id, field
+}
+
+// fetchInt64 retrieves an integer instance associated with a particular
+// database key.
+func (db *nodeDB) fetchInt64(key []byte) int64 {
+	blob, err := db.lvl.Get(key, nil)
+	if err != nil {
+		return 0
+	}
+	val, read := binary.Varint(blob)
+	if read <= 0 {
+		return 0
+	}
+	return val
+}
+
+// storeInt64 update a specific database entry to the current time instance as a
+// unix timestamp.
+func (db *nodeDB) storeInt64(key []byte, n int64) error {
+	blob := make([]byte, binary.MaxVarintLen64)
+	blob = blob[:binary.PutVarint(blob, n)]
+	return db.lvl.Put(key, blob, nil)
+}
+
+func (db *nodeDB) storeRLP(key []byte, val interface{}) error {
+	blob, err := rlp.EncodeToBytes(val)
+	if err != nil {
+		return err
+	}
+	return db.lvl.Put(key, blob, nil)
+}
+
+func (db *nodeDB) fetchRLP(key []byte, val interface{}) error {
+	blob, err := db.lvl.Get(key, nil)
+	if err != nil {
+		return err
+	}
+	err = rlp.DecodeBytes(blob, val)
+	if err != nil {
+		glog.V(logger.Warn).Infof("key %x (%T) %v", key, val, err)
+	}
+	return err
+}
+
+// node retrieves a node with a given id from the database.
+func (db *nodeDB) node(id NodeID) *Node {
+	var node Node
+	if err := db.fetchRLP(makeKey(id, nodeDBDiscoverRoot), &node); err != nil {
+		return nil
+	}
+	node.sha = crypto.Keccak256Hash(node.ID[:])
+	return &node
+}
+
+// updateNode inserts - potentially overwriting - a node into the peer database.
+func (db *nodeDB) updateNode(node *Node) error {
+	return db.storeRLP(makeKey(node.ID, nodeDBDiscoverRoot), node)
+}
+
+// deleteNode deletes all information/keys associated with a node.
+func (db *nodeDB) deleteNode(id NodeID) error {
+	deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
+	for deleter.Next() {
+		if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// ensureExpirer is a small helper method ensuring that the data expiration
+// mechanism is running. If the expiration goroutine is already running, this
+// method simply returns.
+//
+// The goal is to start the data evacuation only after the network successfully
+// bootstrapped itself (to prevent dumping potentially useful seed nodes). Since
+// it would require significant overhead to exactly trace the first successful
+// convergence, it's simpler to "ensure" the correct state when an appropriate
+// condition occurs (i.e. a successful bonding), and discard further events.
+func (db *nodeDB) ensureExpirer() {
+	db.runner.Do(func() { go db.expirer() })
+}
+
+// expirer should be started in a go routine, and is responsible for looping ad
+// infinitum and dropping stale data from the database.
+func (db *nodeDB) expirer() {
+	tick := time.Tick(nodeDBCleanupCycle)
+	for {
+		select {
+		case <-tick:
+			if err := db.expireNodes(); err != nil {
+				glog.V(logger.Error).Infof("Failed to expire nodedb items: %v", err)
+			}
+
+		case <-db.quit:
+			return
+		}
+	}
+}
+
+// expireNodes iterates over the database and deletes all nodes that have not
+// been seen (i.e. received a pong from) for some allotted time.
+func (db *nodeDB) expireNodes() error {
+	threshold := time.Now().Add(-nodeDBNodeExpiration)
+
+	// Find discovered nodes that are older than the allowance
+	it := db.lvl.NewIterator(nil, nil)
+	defer it.Release()
+
+	for it.Next() {
+		// Skip the item if not a discovery node
+		id, field := splitKey(it.Key())
+		if field != nodeDBDiscoverRoot {
+			continue
+		}
+		// Skip the node if not expired yet (and not self)
+		if bytes.Compare(id[:], db.self[:]) != 0 {
+			if seen := db.lastPong(id); seen.After(threshold) {
+				continue
+			}
+		}
+		// Otherwise delete all associated information
+		db.deleteNode(id)
+	}
+	return nil
+}
+
+// lastPing retrieves the time of the last ping packet send to a remote node,
+// requesting binding.
+func (db *nodeDB) lastPing(id NodeID) time.Time {
+	return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
+}
+
+// updateLastPing updates the last time we tried contacting a remote node.
+func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
+	return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
+}
+
+// lastPong retrieves the time of the last successful contact from remote node.
+func (db *nodeDB) lastPong(id NodeID) time.Time {
+	return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
+}
+
+// updateLastPong updates the last time a remote node successfully contacted.
+func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error {
+	return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
+}
+
+// findFails retrieves the number of findnode failures since bonding.
+func (db *nodeDB) findFails(id NodeID) int {
+	return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
+}
+
+// updateFindFails updates the number of findnode failures since bonding.
+func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
+	return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
+}
+
+// localEndpoint returns the last local endpoint communicated to the
+// given remote node.
+func (db *nodeDB) localEndpoint(id NodeID) *rpcEndpoint {
+	var ep rpcEndpoint
+	if err := db.fetchRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep); err != nil {
+		return nil
+	}
+	return &ep
+}
+
+func (db *nodeDB) updateLocalEndpoint(id NodeID, ep rpcEndpoint) error {
+	return db.storeRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep)
+}
+
+// querySeeds retrieves random nodes to be used as potential seed nodes
+// for bootstrapping.
+func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
+	var (
+		now   = time.Now()
+		nodes = make([]*Node, 0, n)
+		it    = db.lvl.NewIterator(nil, nil)
+		id    NodeID
+	)
+	defer it.Release()
+
+seek:
+	for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
+		// Seek to a random entry. The first byte is incremented by a
+		// random amount each time in order to increase the likelihood
+		// of hitting all existing nodes in very small databases.
+		ctr := id[0]
+		rand.Read(id[:])
+		id[0] = ctr + id[0]%16
+		it.Seek(makeKey(id, nodeDBDiscoverRoot))
+
+		n := nextNode(it)
+		if n == nil {
+			id[0] = 0
+			continue seek // iterator exhausted
+		}
+		if n.ID == db.self {
+			continue seek
+		}
+		if now.Sub(db.lastPong(n.ID)) > maxAge {
+			continue seek
+		}
+		for i := range nodes {
+			if nodes[i].ID == n.ID {
+				continue seek // duplicate
+			}
+		}
+		nodes = append(nodes, n)
+	}
+	return nodes
+}
+
+func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) {
+	key := makeKey(id, nodeDBTopicRegTickets)
+	blob, _ := db.lvl.Get(key, nil)
+	if len(blob) != 8 {
+		return 0, 0
+	}
+	issued = binary.BigEndian.Uint32(blob[0:4])
+	used = binary.BigEndian.Uint32(blob[4:8])
+	return
+}
+
+func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) error {
+	key := makeKey(id, nodeDBTopicRegTickets)
+	blob := make([]byte, 8)
+	binary.BigEndian.PutUint32(blob[0:4], issued)
+	binary.BigEndian.PutUint32(blob[4:8], used)
+	return db.lvl.Put(key, blob, nil)
+}
+
+// reads the next node record from the iterator, skipping over other
+// database entries.
+func nextNode(it iterator.Iterator) *Node {
+	for end := false; !end; end = !it.Next() {
+		id, field := splitKey(it.Key())
+		if field != nodeDBDiscoverRoot {
+			continue
+		}
+		var n Node
+		if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
+			if glog.V(logger.Warn) {
+				glog.Errorf("invalid node %x: %v", id, err)
+			}
+			continue
+		}
+		return &n
+	}
+	return nil
+}
+
+// close flushes and closes the database files.
+func (db *nodeDB) close() {
+	close(db.quit)
+	db.lvl.Close()
+}
diff --git a/p2p/discv5/database_test.go b/p2p/discv5/database_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4d3330ed228e39cf88ec096b9cd8b3d64cbf5dd2
--- /dev/null
+++ b/p2p/discv5/database_test.go
@@ -0,0 +1,380 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"bytes"
+	"io/ioutil"
+	"net"
+	"os"
+	"path/filepath"
+	"reflect"
+	"testing"
+	"time"
+)
+
+var nodeDBKeyTests = []struct {
+	id    NodeID
+	field string
+	key   []byte
+}{
+	{
+		id:    NodeID{},
+		field: "version",
+		key:   []byte{0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e}, // field
+	},
+	{
+		id:    MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+		field: ":discover",
+		key: []byte{0x6e, 0x3a, // prefix
+			0x1d, 0xd9, 0xd6, 0x5c, 0x45, 0x52, 0xb5, 0xeb, // node id
+			0x43, 0xd5, 0xad, 0x55, 0xa2, 0xee, 0x3f, 0x56, //
+			0xc6, 0xcb, 0xc1, 0xc6, 0x4a, 0x5c, 0x8d, 0x65, //
+			0x9f, 0x51, 0xfc, 0xd5, 0x1b, 0xac, 0xe2, 0x43, //
+			0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, //
+			0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
+			0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
+			0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
+			0x3a, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, // field
+		},
+	},
+}
+
+func TestNodeDBKeys(t *testing.T) {
+	for i, tt := range nodeDBKeyTests {
+		if key := makeKey(tt.id, tt.field); !bytes.Equal(key, tt.key) {
+			t.Errorf("make test %d: key mismatch: have 0x%x, want 0x%x", i, key, tt.key)
+		}
+		id, field := splitKey(tt.key)
+		if !bytes.Equal(id[:], tt.id[:]) {
+			t.Errorf("split test %d: id mismatch: have 0x%x, want 0x%x", i, id, tt.id)
+		}
+		if field != tt.field {
+			t.Errorf("split test %d: field mismatch: have 0x%x, want 0x%x", i, field, tt.field)
+		}
+	}
+}
+
+var nodeDBInt64Tests = []struct {
+	key   []byte
+	value int64
+}{
+	{key: []byte{0x01}, value: 1},
+	{key: []byte{0x02}, value: 2},
+	{key: []byte{0x03}, value: 3},
+}
+
+func TestNodeDBInt64(t *testing.T) {
+	db, _ := newNodeDB("", Version, NodeID{})
+	defer db.close()
+
+	tests := nodeDBInt64Tests
+	for i := 0; i < len(tests); i++ {
+		// Insert the next value
+		if err := db.storeInt64(tests[i].key, tests[i].value); err != nil {
+			t.Errorf("test %d: failed to store value: %v", i, err)
+		}
+		// Check all existing and non existing values
+		for j := 0; j < len(tests); j++ {
+			num := db.fetchInt64(tests[j].key)
+			switch {
+			case j <= i && num != tests[j].value:
+				t.Errorf("test %d, item %d: value mismatch: have %v, want %v", i, j, num, tests[j].value)
+			case j > i && num != 0:
+				t.Errorf("test %d, item %d: value mismatch: have %v, want %v", i, j, num, 0)
+			}
+		}
+	}
+}
+
+func TestNodeDBFetchStore(t *testing.T) {
+	node := NewNode(
+		MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+		net.IP{192, 168, 0, 1},
+		30303,
+		30303,
+	)
+	inst := time.Now()
+	num := 314
+
+	db, _ := newNodeDB("", Version, NodeID{})
+	defer db.close()
+
+	// Check fetch/store operations on a node ping object
+	if stored := db.lastPing(node.ID); stored.Unix() != 0 {
+		t.Errorf("ping: non-existing object: %v", stored)
+	}
+	if err := db.updateLastPing(node.ID, inst); err != nil {
+		t.Errorf("ping: failed to update: %v", err)
+	}
+	if stored := db.lastPing(node.ID); stored.Unix() != inst.Unix() {
+		t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
+	}
+	// Check fetch/store operations on a node pong object
+	if stored := db.lastPong(node.ID); stored.Unix() != 0 {
+		t.Errorf("pong: non-existing object: %v", stored)
+	}
+	if err := db.updateLastPong(node.ID, inst); err != nil {
+		t.Errorf("pong: failed to update: %v", err)
+	}
+	if stored := db.lastPong(node.ID); stored.Unix() != inst.Unix() {
+		t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
+	}
+	// Check fetch/store operations on a node findnode-failure object
+	if stored := db.findFails(node.ID); stored != 0 {
+		t.Errorf("find-node fails: non-existing object: %v", stored)
+	}
+	if err := db.updateFindFails(node.ID, num); err != nil {
+		t.Errorf("find-node fails: failed to update: %v", err)
+	}
+	if stored := db.findFails(node.ID); stored != num {
+		t.Errorf("find-node fails: value mismatch: have %v, want %v", stored, num)
+	}
+	// Check fetch/store operations on an actual node object
+	if stored := db.node(node.ID); stored != nil {
+		t.Errorf("node: non-existing object: %v", stored)
+	}
+	if err := db.updateNode(node); err != nil {
+		t.Errorf("node: failed to update: %v", err)
+	}
+	if stored := db.node(node.ID); stored == nil {
+		t.Errorf("node: not found")
+	} else if !reflect.DeepEqual(stored, node) {
+		t.Errorf("node: data mismatch: have %v, want %v", stored, node)
+	}
+}
+
+var nodeDBSeedQueryNodes = []struct {
+	node *Node
+	pong time.Time
+}{
+	// This one should not be in the result set because its last
+	// pong time is too far in the past.
+	{
+		node: NewNode(
+			MustHexID("0x84d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{127, 0, 0, 3},
+			30303,
+			30303,
+		),
+		pong: time.Now().Add(-3 * time.Hour),
+	},
+	// This one shouldn't be in in the result set because its
+	// nodeID is the local node's ID.
+	{
+		node: NewNode(
+			MustHexID("0x57d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{127, 0, 0, 3},
+			30303,
+			30303,
+		),
+		pong: time.Now().Add(-4 * time.Second),
+	},
+
+	// These should be in the result set.
+	{
+		node: NewNode(
+			MustHexID("0x22d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{127, 0, 0, 1},
+			30303,
+			30303,
+		),
+		pong: time.Now().Add(-2 * time.Second),
+	},
+	{
+		node: NewNode(
+			MustHexID("0x44d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{127, 0, 0, 2},
+			30303,
+			30303,
+		),
+		pong: time.Now().Add(-3 * time.Second),
+	},
+	{
+		node: NewNode(
+			MustHexID("0xe2d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{127, 0, 0, 3},
+			30303,
+			30303,
+		),
+		pong: time.Now().Add(-1 * time.Second),
+	},
+}
+
+func TestNodeDBSeedQuery(t *testing.T) {
+	db, _ := newNodeDB("", Version, nodeDBSeedQueryNodes[1].node.ID)
+	defer db.close()
+
+	// Insert a batch of nodes for querying
+	for i, seed := range nodeDBSeedQueryNodes {
+		if err := db.updateNode(seed.node); err != nil {
+			t.Fatalf("node %d: failed to insert: %v", i, err)
+		}
+		if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
+			t.Fatalf("node %d: failed to insert lastPong: %v", i, err)
+		}
+	}
+
+	// Retrieve the entire batch and check for duplicates
+	seeds := db.querySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
+	have := make(map[NodeID]struct{})
+	for _, seed := range seeds {
+		have[seed.ID] = struct{}{}
+	}
+	want := make(map[NodeID]struct{})
+	for _, seed := range nodeDBSeedQueryNodes[2:] {
+		want[seed.node.ID] = struct{}{}
+	}
+	if len(seeds) != len(want) {
+		t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
+	}
+	for id, _ := range have {
+		if _, ok := want[id]; !ok {
+			t.Errorf("extra seed: %v", id)
+		}
+	}
+	for id, _ := range want {
+		if _, ok := have[id]; !ok {
+			t.Errorf("missing seed: %v", id)
+		}
+	}
+}
+
+func TestNodeDBPersistency(t *testing.T) {
+	root, err := ioutil.TempDir("", "nodedb-")
+	if err != nil {
+		t.Fatalf("failed to create temporary data folder: %v", err)
+	}
+	defer os.RemoveAll(root)
+
+	var (
+		testKey = []byte("somekey")
+		testInt = int64(314)
+	)
+
+	// Create a persistent database and store some values
+	db, err := newNodeDB(filepath.Join(root, "database"), Version, NodeID{})
+	if err != nil {
+		t.Fatalf("failed to create persistent database: %v", err)
+	}
+	if err := db.storeInt64(testKey, testInt); err != nil {
+		t.Fatalf("failed to store value: %v.", err)
+	}
+	db.close()
+
+	// Reopen the database and check the value
+	db, err = newNodeDB(filepath.Join(root, "database"), Version, NodeID{})
+	if err != nil {
+		t.Fatalf("failed to open persistent database: %v", err)
+	}
+	if val := db.fetchInt64(testKey); val != testInt {
+		t.Fatalf("value mismatch: have %v, want %v", val, testInt)
+	}
+	db.close()
+
+	// Change the database version and check flush
+	db, err = newNodeDB(filepath.Join(root, "database"), Version+1, NodeID{})
+	if err != nil {
+		t.Fatalf("failed to open persistent database: %v", err)
+	}
+	if val := db.fetchInt64(testKey); val != 0 {
+		t.Fatalf("value mismatch: have %v, want %v", val, 0)
+	}
+	db.close()
+}
+
+var nodeDBExpirationNodes = []struct {
+	node *Node
+	pong time.Time
+	exp  bool
+}{
+	{
+		node: NewNode(
+			MustHexID("0x01d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{127, 0, 0, 1},
+			30303,
+			30303,
+		),
+		pong: time.Now().Add(-nodeDBNodeExpiration + time.Minute),
+		exp:  false,
+	}, {
+		node: NewNode(
+			MustHexID("0x02d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{127, 0, 0, 2},
+			30303,
+			30303,
+		),
+		pong: time.Now().Add(-nodeDBNodeExpiration - time.Minute),
+		exp:  true,
+	},
+}
+
+func TestNodeDBExpiration(t *testing.T) {
+	db, _ := newNodeDB("", Version, NodeID{})
+	defer db.close()
+
+	// Add all the test nodes and set their last pong time
+	for i, seed := range nodeDBExpirationNodes {
+		if err := db.updateNode(seed.node); err != nil {
+			t.Fatalf("node %d: failed to insert: %v", i, err)
+		}
+		if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
+			t.Fatalf("node %d: failed to update pong: %v", i, err)
+		}
+	}
+	// Expire some of them, and check the rest
+	if err := db.expireNodes(); err != nil {
+		t.Fatalf("failed to expire nodes: %v", err)
+	}
+	for i, seed := range nodeDBExpirationNodes {
+		node := db.node(seed.node.ID)
+		if (node == nil && !seed.exp) || (node != nil && seed.exp) {
+			t.Errorf("node %d: expiration mismatch: have %v, want %v", i, node, seed.exp)
+		}
+	}
+}
+
+func TestNodeDBSelfExpiration(t *testing.T) {
+	// Find a node in the tests that shouldn't expire, and assign it as self
+	var self NodeID
+	for _, node := range nodeDBExpirationNodes {
+		if !node.exp {
+			self = node.node.ID
+			break
+		}
+	}
+	db, _ := newNodeDB("", Version, self)
+	defer db.close()
+
+	// Add all the test nodes and set their last pong time
+	for i, seed := range nodeDBExpirationNodes {
+		if err := db.updateNode(seed.node); err != nil {
+			t.Fatalf("node %d: failed to insert: %v", i, err)
+		}
+		if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
+			t.Fatalf("node %d: failed to update pong: %v", i, err)
+		}
+	}
+	// Expire the nodes and make sure self has been evacuated too
+	if err := db.expireNodes(); err != nil {
+		t.Fatalf("failed to expire nodes: %v", err)
+	}
+	node := db.node(self)
+	if node != nil {
+		t.Errorf("self not evacuated")
+	}
+}
diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go
new file mode 100644
index 0000000000000000000000000000000000000000..afc92e99fbf149ca9fe31097998cee59f5fd1b7c
--- /dev/null
+++ b/p2p/discv5/net.go
@@ -0,0 +1,1200 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"bytes"
+	"crypto/ecdsa"
+	"errors"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/mclock"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/crypto/sha3"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/p2p/nat"
+	"github.com/ethereum/go-ethereum/rlp"
+)
+
+var (
+	errInvalidEvent = errors.New("invalid in current state")
+	errNoQuery      = errors.New("no pending query")
+	errWrongAddress = errors.New("unknown sender address")
+)
+
+const (
+	autoRefreshInterval = 1 * time.Hour
+	seedCount           = 30
+	seedMaxAge          = 5 * 24 * time.Hour
+)
+
+const testTopic = "foo"
+
+const (
+	printDebugLogs   = false
+	printTestImgLogs = false
+)
+
+func debugLog(s string) {
+	if printDebugLogs {
+		fmt.Println(s)
+	}
+}
+
+// BootNodes are the enode URLs of the P2P bootstrap nodes for the experimental RLPx v5 "Topic Discovery" network
+// warning: local bootnodes for testing!!!
+var BootNodes = []*Node{
+	//MustParseNode("enode://6f974ede10d07334e7e651c1501cb540d087dd3a6dea81432620895c913f281790b49459d72cb8011bfbbfbd24fad956356189c31b7181a96cd44ccfb68bfc71@127.0.0.1:30301"),
+	MustParseNode("enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30305"),
+}
+
+// Network manages the table and all protocol interaction.
+type Network struct {
+	db   *nodeDB // database of known nodes
+	conn transport
+
+	closed           chan struct{}          // closed when loop is done
+	closeReq         chan struct{}          // 'request to close'
+	refreshReq       chan []*Node           // lookups ask for refresh on this channel
+	refreshResp      chan (<-chan struct{}) // ...and get the channel to block on from this one
+	read             chan ingressPacket     // ingress packets arrive here
+	timeout          chan timeoutEvent
+	queryReq         chan *findnodeQuery // lookups submit findnode queries on this channel
+	tableOpReq       chan func()
+	tableOpResp      chan struct{}
+	topicRegisterReq chan topicRegisterReq
+	topicSearchReq   chan topicSearchReq
+	bucketFillChn    chan chan struct{}
+
+	// State of the main loop.
+	tab           *Table
+	topictab      *topicTable
+	ticketStore   *ticketStore
+	nursery       []*Node
+	nodes         map[NodeID]*Node // tracks active nodes with state != known
+	timeoutTimers map[timeoutEvent]*time.Timer
+
+	// Revalidation queues.
+	// Nodes put on these queues will be pinged eventually.
+	slowRevalidateQueue []*Node
+	fastRevalidateQueue []*Node
+
+	// Buffers for state transition.
+	sendBuf []*ingressPacket
+}
+
+// transport is implemented by the UDP transport.
+// it is an interface so we can test without opening lots of UDP
+// sockets and without generating a private key.
+type transport interface {
+	sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte)
+	sendNeighbours(remote *Node, nodes []*Node)
+	sendFindnodeHash(remote *Node, target common.Hash)
+	sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte)
+	sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node)
+
+	send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte)
+
+	localAddr() *net.UDPAddr
+	Close()
+}
+
+type findnodeQuery struct {
+	remote   *Node
+	target   common.Hash
+	reply    chan<- []*Node
+	nresults int // counter for received nodes
+}
+
+type topicRegisterReq struct {
+	add   bool
+	topic Topic
+}
+
+type topicSearchReq struct {
+	topic Topic
+	found chan<- string
+}
+
+type timeoutEvent struct {
+	ev   nodeEvent
+	node *Node
+}
+
+func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, natm nat.Interface, dbPath string) (*Network, error) {
+	ourID := PubkeyID(&ourPubkey)
+
+	var db *nodeDB
+	if dbPath != "<no database>" {
+		var err error
+		if db, err = newNodeDB(dbPath, Version, ourID); err != nil {
+			return nil, err
+		}
+	}
+
+	tab := newTable(ourID, conn.localAddr())
+	net := &Network{
+		db:               db,
+		conn:             conn,
+		tab:              tab,
+		topictab:         newTopicTable(db, tab.self),
+		ticketStore:      newTicketStore(),
+		refreshReq:       make(chan []*Node),
+		refreshResp:      make(chan (<-chan struct{})),
+		closed:           make(chan struct{}),
+		closeReq:         make(chan struct{}),
+		read:             make(chan ingressPacket, 100),
+		timeout:          make(chan timeoutEvent),
+		timeoutTimers:    make(map[timeoutEvent]*time.Timer),
+		tableOpReq:       make(chan func()),
+		tableOpResp:      make(chan struct{}),
+		queryReq:         make(chan *findnodeQuery),
+		topicRegisterReq: make(chan topicRegisterReq),
+		topicSearchReq:   make(chan topicSearchReq),
+		bucketFillChn:    make(chan chan struct{}, 1),
+		nodes:            make(map[NodeID]*Node),
+	}
+	go net.loop()
+	return net, nil
+}
+
+// Close terminates the network listener and flushes the node database.
+func (net *Network) Close() {
+	net.conn.Close()
+	select {
+	case <-net.closed:
+	case net.closeReq <- struct{}{}:
+		<-net.closed
+	}
+}
+
+// Self returns the local node.
+// The returned node should not be modified by the caller.
+func (net *Network) Self() *Node {
+	return net.tab.self
+}
+
+// ReadRandomNodes fills the given slice with random nodes from the
+// table. It will not write the same node more than once. The nodes in
+// the slice are copies and can be modified by the caller.
+func (net *Network) ReadRandomNodes(buf []*Node) (n int) {
+	net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) })
+	return n
+}
+
+// SetFallbackNodes sets the initial points of contact. These nodes
+// are used to connect to the network if the table is empty and there
+// are no known nodes in the database.
+func (net *Network) SetFallbackNodes(nodes []*Node) error {
+	nursery := make([]*Node, 0, len(nodes))
+	for _, n := range nodes {
+		if err := n.validateComplete(); err != nil {
+			return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
+		}
+		// Recompute cpy.sha because the node might not have been
+		// created by NewNode or ParseNode.
+		cpy := *n
+		cpy.sha = crypto.Keccak256Hash(n.ID[:])
+		nursery = append(nursery, &cpy)
+	}
+	net.reqRefresh(nursery)
+	return nil
+}
+
+// Resolve searches for a specific node with the given ID.
+// It returns nil if the node could not be found.
+func (net *Network) Resolve(targetID NodeID) *Node {
+	result := net.lookup(crypto.Keccak256Hash(targetID[:]), true)
+	for _, n := range result {
+		if n.ID == targetID {
+			return n
+		}
+	}
+	return nil
+}
+
+// Lookup performs a network search for nodes close
+// to the given target. It approaches the target by querying
+// nodes that are closer to it on each iteration.
+// The given target does not need to be an actual node
+// identifier.
+//
+// The local node may be included in the result.
+func (net *Network) Lookup(targetID NodeID) []*Node {
+	return net.lookup(crypto.Keccak256Hash(targetID[:]), false)
+}
+
+func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
+	var (
+		asked          = make(map[NodeID]bool)
+		seen           = make(map[NodeID]bool)
+		reply          = make(chan []*Node, alpha)
+		result         = nodesByDistance{target: target}
+		pendingQueries = 0
+	)
+	// Get initial answers from the local node.
+	result.push(net.tab.self, bucketSize)
+	for {
+		// Ask the α closest nodes that we haven't asked yet.
+		for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
+			n := result.entries[i]
+			if !asked[n.ID] {
+				asked[n.ID] = true
+				pendingQueries++
+				net.reqQueryFindnode(n, target, reply)
+			}
+		}
+		if pendingQueries == 0 {
+			// We have asked all closest nodes, stop the search.
+			break
+		}
+		// Wait for the next reply.
+		for _, n := range <-reply {
+			if n != nil && !seen[n.ID] {
+				seen[n.ID] = true
+				result.push(n, bucketSize)
+				if stopOnMatch && n.sha == target {
+					return result.entries
+				}
+			}
+		}
+		pendingQueries--
+	}
+	return result.entries
+}
+
+func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
+	select {
+	case net.topicRegisterReq <- topicRegisterReq{true, topic}:
+	case <-net.closed:
+		return
+	}
+	select {
+	case <-net.closed:
+	case <-stop:
+		select {
+		case net.topicRegisterReq <- topicRegisterReq{false, topic}:
+		case <-net.closed:
+		}
+	}
+}
+
+func (net *Network) SearchTopic(topic Topic, stop <-chan struct{}, found chan<- string) {
+	select {
+	case net.topicSearchReq <- topicSearchReq{topic, found}:
+	case <-net.closed:
+		return
+	}
+	select {
+	case <-net.closed:
+	case <-stop:
+		select {
+		case net.topicSearchReq <- topicSearchReq{topic, nil}:
+		case <-net.closed:
+		}
+	}
+}
+
+func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} {
+	select {
+	case net.refreshReq <- nursery:
+		return <-net.refreshResp
+	case <-net.closed:
+		return net.closed
+	}
+}
+
+func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool {
+	q := &findnodeQuery{remote: n, target: target, reply: reply}
+	select {
+	case net.queryReq <- q:
+		return true
+	case <-net.closed:
+		return false
+	}
+}
+
+func (net *Network) reqReadPacket(pkt ingressPacket) {
+	select {
+	case net.read <- pkt:
+	case <-net.closed:
+	}
+}
+
+func (net *Network) reqTableOp(f func()) (called bool) {
+	select {
+	case net.tableOpReq <- f:
+		<-net.tableOpResp
+		return true
+	case <-net.closed:
+		return false
+	}
+}
+
+// TODO: external address handling.
+
+func (net *Network) loop() {
+	var (
+		refreshTimer = time.NewTicker(autoRefreshInterval)
+		refreshDone  chan struct{} // closed when the 'refresh' lookup has ended
+	)
+
+	// Tracking the next ticket to register.
+	var (
+		nextTicket        *ticketRef
+		nextRegisterTimer *time.Timer
+		nextRegisterTime  <-chan time.Time
+	)
+	defer func() {
+		if nextRegisterTimer != nil {
+			nextRegisterTimer.Stop()
+		}
+	}()
+	resetNextTicket := func() {
+		t, timeout := net.ticketStore.nextFilteredTicket()
+		if t != nextTicket {
+			nextTicket = t
+			if nextRegisterTimer != nil {
+				nextRegisterTimer.Stop()
+				nextRegisterTime = nil
+			}
+			if t != nil {
+				nextRegisterTimer = time.NewTimer(timeout)
+				nextRegisterTime = nextRegisterTimer.C
+			}
+		}
+	}
+
+	// Tracking registration and search lookups.
+	var (
+		topicRegisterLookupTarget lookupInfo
+		topicRegisterLookupDone   chan []*Node
+		topicRegisterLookupTick   = time.NewTimer(0)
+		topicSearchLookupTarget   lookupInfo
+	)
+	topicSearchLookupDone := make(chan []*Node, 1)
+	<-topicRegisterLookupTick.C
+
+	statsDump := time.NewTicker(10 * time.Second)
+
+loop:
+	for {
+		resetNextTicket()
+
+		select {
+		case <-net.closeReq:
+			debugLog("<-net.closeReq")
+			break loop
+
+		// Ingress packet handling.
+		case pkt := <-net.read:
+			debugLog("<-net.read")
+			n := net.internNode(&pkt)
+			prestate := n.state
+			status := "ok"
+			if err := net.handle(n, pkt.ev, &pkt); err != nil {
+				status = err.Error()
+			}
+			if glog.V(logger.Detail) {
+				glog.Infof("<<< (%d) %v from %x@%v: %v -> %v (%v)",
+					net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
+			}
+			// TODO: persist state if n.state goes >= known, delete if it goes <= known
+
+		// State transition timeouts.
+		case timeout := <-net.timeout:
+			debugLog("<-net.timeout")
+			if net.timeoutTimers[timeout] == nil {
+				// Stale timer (was aborted).
+				continue
+			}
+			delete(net.timeoutTimers, timeout)
+			prestate := timeout.node.state
+			status := "ok"
+			if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
+				status = err.Error()
+			}
+			if glog.V(logger.Detail) {
+				glog.Infof("--- (%d) %v for %x@%v: %v -> %v (%v)",
+					net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
+			}
+
+		// Querying.
+		case q := <-net.queryReq:
+			debugLog("<-net.queryReq")
+			if !q.start(net) {
+				q.remote.deferQuery(q)
+			}
+
+		// Interacting with the table.
+		case f := <-net.tableOpReq:
+			debugLog("<-net.tableOpReq")
+			f()
+			net.tableOpResp <- struct{}{}
+
+		// Topic registration stuff.
+		case req := <-net.topicRegisterReq:
+			debugLog("<-net.topicRegisterReq")
+			if !req.add {
+				net.ticketStore.removeRegisterTopic(req.topic)
+				continue
+			}
+			net.ticketStore.addTopic(req.topic, true)
+			// If we're currently waiting idle (nothing to look up), give the ticket store a
+			// chance to start it sooner. This should speed up convergence of the radius
+			// determination for new topics.
+			// if topicRegisterLookupDone == nil {
+			if topicRegisterLookupTarget.target == (common.Hash{}) {
+				debugLog("topicRegisterLookupTarget == null")
+				if topicRegisterLookupTick.Stop() {
+					<-topicRegisterLookupTick.C
+				}
+				target, delay := net.ticketStore.nextRegisterLookup()
+				topicRegisterLookupTarget = target
+				topicRegisterLookupTick.Reset(delay)
+			}
+
+		case nodes := <-topicRegisterLookupDone:
+			debugLog("<-topicRegisterLookupDone")
+			net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte {
+				net.ping(n, n.addr())
+				return n.pingEcho
+			})
+			target, delay := net.ticketStore.nextRegisterLookup()
+			topicRegisterLookupTarget = target
+			topicRegisterLookupTick.Reset(delay)
+			topicRegisterLookupDone = nil
+
+		case <-topicRegisterLookupTick.C:
+			debugLog("<-topicRegisterLookupTick")
+			if (topicRegisterLookupTarget.target == common.Hash{}) {
+				target, delay := net.ticketStore.nextRegisterLookup()
+				topicRegisterLookupTarget = target
+				topicRegisterLookupTick.Reset(delay)
+				topicRegisterLookupDone = nil
+			} else {
+				topicRegisterLookupDone = make(chan []*Node)
+				target := topicRegisterLookupTarget.target
+				go func() { topicRegisterLookupDone <- net.lookup(target, false) }()
+			}
+
+		case <-nextRegisterTime:
+			debugLog("<-nextRegisterTime")
+			net.ticketStore.ticketRegistered(*nextTicket)
+			//fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
+			net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong)
+
+		case req := <-net.topicSearchReq:
+			debugLog("<-net.topicSearchReq")
+			if req.found == nil {
+				net.ticketStore.removeSearchTopic(req.topic)
+				continue
+			}
+			net.ticketStore.addSearchTopic(req.topic, req.found)
+			if (topicSearchLookupTarget.target == common.Hash{}) {
+				topicSearchLookupDone <- nil
+			}
+
+		case nodes := <-topicSearchLookupDone:
+			debugLog("<-topicSearchLookupDone")
+			net.ticketStore.searchLookupDone(topicSearchLookupTarget, nodes, func(n *Node) []byte {
+				net.ping(n, n.addr())
+				return n.pingEcho
+			}, func(n *Node, topic Topic) []byte {
+				return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
+			})
+			topicSearchLookupTarget = net.ticketStore.nextSearchLookup()
+			target := topicSearchLookupTarget.target
+			if (target != common.Hash{}) {
+				go func() { topicSearchLookupDone <- net.lookup(target, false) }()
+			}
+
+		case <-statsDump.C:
+			debugLog("<-statsDump.C")
+			/*r, ok := net.ticketStore.radius[testTopic]
+			if !ok {
+				fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now())
+			} else {
+				topics := len(net.ticketStore.tickets)
+				tickets := len(net.ticketStore.nodes)
+				rad := r.radius / (maxRadius/10000+1)
+				fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now())
+			}*/
+
+			tm := mclock.Now()
+			for topic, r := range net.ticketStore.radius {
+				if printTestImgLogs {
+					rad := r.radius / (maxRadius/1000000 + 1)
+					minrad := r.minRadius / (maxRadius/1000000 + 1)
+					fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad)
+					fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad)
+				}
+			}
+			for topic, t := range net.topictab.topics {
+				wp := t.wcl.nextWaitPeriod(tm)
+				if printTestImgLogs {
+					fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000)
+				}
+			}
+
+		// Periodic / lookup-initiated bucket refresh.
+		case <-refreshTimer.C:
+			debugLog("<-refreshTimer.C")
+			// TODO: ideally we would start the refresh timer after
+			// fallback nodes have been set for the first time.
+			if refreshDone == nil {
+				refreshDone = make(chan struct{})
+				net.refresh(refreshDone)
+			}
+		case doneChn := <-net.bucketFillChn:
+			debugLog("bucketFill")
+			net.bucketFill(doneChn)
+		case newNursery := <-net.refreshReq:
+			debugLog("<-net.refreshReq")
+			if newNursery != nil {
+				net.nursery = newNursery
+			}
+			if refreshDone == nil {
+				refreshDone = make(chan struct{})
+				net.refresh(refreshDone)
+			}
+			net.refreshResp <- refreshDone
+		case <-refreshDone:
+			debugLog("<-net.refreshDone")
+			refreshDone = nil
+		}
+	}
+	debugLog("loop stopped")
+
+	glog.V(logger.Debug).Infof("shutting down")
+	if net.conn != nil {
+		net.conn.Close()
+	}
+	if refreshDone != nil {
+		// TODO: wait for pending refresh.
+		//<-refreshResults
+	}
+	// Cancel all pending timeouts.
+	for _, timer := range net.timeoutTimers {
+		timer.Stop()
+	}
+	if net.db != nil {
+		net.db.close()
+	}
+	close(net.closed)
+}
+
+// Everything below runs on the Network.loop goroutine
+// and can modify Node, Table and Network at any time without locking.
+
+func (net *Network) refresh(done chan<- struct{}) {
+	var seeds []*Node
+	if net.db != nil {
+		seeds = net.db.querySeeds(seedCount, seedMaxAge)
+	}
+	if len(seeds) == 0 {
+		seeds = net.nursery
+	}
+	if len(seeds) == 0 {
+		glog.V(logger.Detail).Info("no seed nodes found")
+		close(done)
+		return
+	}
+	for _, n := range seeds {
+		if glog.V(logger.Debug) {
+			var age string
+			if net.db != nil {
+				age = time.Since(net.db.lastPong(n.ID)).String()
+			} else {
+				age = "unknown"
+			}
+			glog.Infof("seed node (age %s): %v", age, n)
+		}
+		n = net.internNodeFromDB(n)
+		if n.state == unknown {
+			net.transition(n, verifyinit)
+		}
+		// Force-add the seed node so Lookup does something.
+		// It will be deleted again if verification fails.
+		net.tab.add(n)
+	}
+	// Start self lookup to fill up the buckets.
+	go func() {
+		net.Lookup(net.tab.self.ID)
+		close(done)
+	}()
+}
+
+func (net *Network) bucketFill(done chan<- struct{}) {
+	target := net.tab.chooseBucketFillTarget()
+	go func() {
+		net.lookup(target, false)
+		close(done)
+	}()
+}
+
+func (net *Network) BucketFill() {
+	done := make(chan struct{})
+	select {
+	case net.bucketFillChn <- done:
+		<-done
+	case <-net.closed:
+		close(done)
+	}
+}
+
+// Node Interning.
+
+func (net *Network) internNode(pkt *ingressPacket) *Node {
+	if n := net.nodes[pkt.remoteID]; n != nil {
+		return n
+	}
+	n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port))
+	n.state = unknown
+	net.nodes[pkt.remoteID] = n
+	return n
+}
+
+func (net *Network) internNodeFromDB(dbn *Node) *Node {
+	if n := net.nodes[dbn.ID]; n != nil {
+		return n
+	}
+	n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP)
+	n.state = unknown
+	net.nodes[n.ID] = n
+	return n
+}
+
+func (net *Network) internNodeFromNeighbours(rn rpcNode) (n *Node, err error) {
+	if rn.ID == net.tab.self.ID {
+		return nil, errors.New("is self")
+	}
+	n = net.nodes[rn.ID]
+	if n == nil {
+		// We haven't seen this node before.
+		n, err = nodeFromRPC(rn)
+		n.state = unknown
+		if err == nil {
+			net.nodes[n.ID] = n
+		}
+		return n, err
+	}
+	if !bytes.Equal(n.IP, rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
+		err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
+	}
+	return n, err
+}
+
+// nodeNetGuts is embedded in Node and contains fields.
+type nodeNetGuts struct {
+	// This is a cached copy of sha3(ID) which is used for node
+	// distance calculations. This is part of Node in order to make it
+	// possible to write tests that need a node at a certain distance.
+	// In those tests, the content of sha will not actually correspond
+	// with ID.
+	sha common.Hash
+
+	// State machine fields. Access to these fields
+	// is restricted to the Network.loop goroutine.
+	state             *nodeState
+	pingEcho          []byte           // hash of last ping sent by us
+	pingTopics        []Topic          // topic set sent by us in last ping
+	deferredQueries   []*findnodeQuery // queries that can't be sent yet
+	pendingNeighbours *findnodeQuery   // current query, waiting for reply
+	queryTimeouts     int
+}
+
+func (n *nodeNetGuts) deferQuery(q *findnodeQuery) {
+	n.deferredQueries = append(n.deferredQueries, q)
+}
+
+func (n *nodeNetGuts) startNextQuery(net *Network) {
+	if len(n.deferredQueries) == 0 {
+		return
+	}
+	nextq := n.deferredQueries[0]
+	if nextq.start(net) {
+		n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...)
+	}
+}
+
+func (q *findnodeQuery) start(net *Network) bool {
+	// Satisfy queries against the local node directly.
+	if q.remote == net.tab.self {
+		closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
+		q.reply <- closest.entries
+		return true
+	}
+	if q.remote.state.canQuery && q.remote.pendingNeighbours == nil {
+		net.conn.sendFindnodeHash(q.remote, q.target)
+		net.timedEvent(respTimeout, q.remote, neighboursTimeout)
+		q.remote.pendingNeighbours = q
+		return true
+	}
+	// If the node is not known yet, it won't accept queries.
+	// Initiate the transition to known.
+	// The request will be sent later when the node reaches known state.
+	if q.remote.state == unknown {
+		net.transition(q.remote, verifyinit)
+	}
+	return false
+}
+
+// Node Events (the input to the state machine).
+
+type nodeEvent uint
+
+//go:generate stringer -type=nodeEvent
+
+const (
+	invalidEvent nodeEvent = iota // zero is reserved
+
+	// Packet type events.
+	// These correspond to packet types in the UDP protocol.
+	pingPacket
+	pongPacket
+	findnodePacket
+	neighborsPacket
+	findnodeHashPacket
+	topicRegisterPacket
+	topicQueryPacket
+	topicNodesPacket
+
+	// Non-packet events.
+	// Event values in this category are allocated outside
+	// the packet type range (packet types are encoded as a single byte).
+	pongTimeout nodeEvent = iota + 256
+	pingTimeout
+	neighboursTimeout
+)
+
+// Node State Machine.
+
+type nodeState struct {
+	name     string
+	handle   func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error)
+	enter    func(*Network, *Node)
+	canQuery bool
+}
+
+func (s *nodeState) String() string {
+	return s.name
+}
+
+var (
+	unknown          *nodeState
+	verifyinit       *nodeState
+	verifywait       *nodeState
+	remoteverifywait *nodeState
+	known            *nodeState
+	contested        *nodeState
+	unresponsive     *nodeState
+)
+
+func init() {
+	unknown = &nodeState{
+		name: "unknown",
+		enter: func(net *Network, n *Node) {
+			net.tab.delete(n)
+			n.pingEcho = nil
+			// Abort active queries.
+			for _, q := range n.deferredQueries {
+				q.reply <- nil
+			}
+			n.deferredQueries = nil
+			if n.pendingNeighbours != nil {
+				n.pendingNeighbours.reply <- nil
+				n.pendingNeighbours = nil
+			}
+			n.queryTimeouts = 0
+		},
+		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+			switch ev {
+			case pingPacket:
+				net.handlePing(n, pkt)
+				net.ping(n, pkt.remoteAddr)
+				return verifywait, nil
+			default:
+				return unknown, errInvalidEvent
+			}
+		},
+	}
+
+	verifyinit = &nodeState{
+		name: "verifyinit",
+		enter: func(net *Network, n *Node) {
+			net.ping(n, n.addr())
+		},
+		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+			switch ev {
+			case pingPacket:
+				net.handlePing(n, pkt)
+				return verifywait, nil
+			case pongPacket:
+				err := net.handleKnownPong(n, pkt)
+				return remoteverifywait, err
+			case pongTimeout:
+				return unknown, nil
+			default:
+				return verifyinit, errInvalidEvent
+			}
+		},
+	}
+
+	verifywait = &nodeState{
+		name: "verifywait",
+		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+			switch ev {
+			case pongPacket:
+				err := net.handleKnownPong(n, pkt)
+				return known, err
+			case pongTimeout:
+				return unknown, nil
+			default:
+				return verifywait, errInvalidEvent
+			}
+		},
+	}
+
+	remoteverifywait = &nodeState{
+		name: "remoteverifywait",
+		enter: func(net *Network, n *Node) {
+			net.timedEvent(respTimeout, n, pingTimeout)
+		},
+		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+			switch ev {
+			case pingPacket:
+				net.handlePing(n, pkt)
+				return remoteverifywait, nil
+			case pingTimeout:
+				return known, nil
+			default:
+				return remoteverifywait, errInvalidEvent
+			}
+		},
+	}
+
+	known = &nodeState{
+		name:     "known",
+		canQuery: true,
+		enter: func(net *Network, n *Node) {
+			n.queryTimeouts = 0
+			n.startNextQuery(net)
+			// Insert into the table and start revalidation of the last node
+			// in the bucket if it is full.
+			last := net.tab.add(n)
+			if last != nil && last.state == known {
+				// TODO: do this asynchronously
+				net.transition(last, contested)
+			}
+		},
+		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+			switch ev {
+			case pingPacket:
+				net.handlePing(n, pkt)
+				return known, nil
+			case pongPacket:
+				err := net.handleKnownPong(n, pkt)
+				return known, err
+			default:
+				return net.handleQueryEvent(n, ev, pkt)
+			}
+		},
+	}
+
+	contested = &nodeState{
+		name:     "contested",
+		canQuery: true,
+		enter: func(net *Network, n *Node) {
+			net.ping(n, n.addr())
+		},
+		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+			switch ev {
+			case pongPacket:
+				// Node is still alive.
+				err := net.handleKnownPong(n, pkt)
+				return known, err
+			case pongTimeout:
+				net.tab.deleteReplace(n)
+				return unresponsive, nil
+			case pingPacket:
+				net.handlePing(n, pkt)
+				return contested, nil
+			default:
+				return net.handleQueryEvent(n, ev, pkt)
+			}
+		},
+	}
+
+	unresponsive = &nodeState{
+		name:     "unresponsive",
+		canQuery: true,
+		handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+			switch ev {
+			case pingPacket:
+				net.handlePing(n, pkt)
+				return known, nil
+			case pongPacket:
+				err := net.handleKnownPong(n, pkt)
+				return known, err
+			default:
+				return net.handleQueryEvent(n, ev, pkt)
+			}
+		},
+	}
+}
+
+// handle processes packets sent by n and events related to n.
+func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error {
+	if pkt != nil {
+		if err := net.checkPacket(n, ev, pkt); err != nil {
+			return err
+		}
+		// Start the background expiration goroutine after the first
+		// successful communication. Subsequent calls have no effect if it
+		// is already running. We do this here instead of somewhere else
+		// so that the search for seed nodes also considers older nodes
+		// that would otherwise be removed by the expirer.
+		if net.db != nil {
+			net.db.ensureExpirer()
+		}
+	}
+	if n.state == nil {
+		n.state = unknown //???
+	}
+	next, err := n.state.handle(net, n, ev, pkt)
+	net.transition(n, next)
+	return err
+}
+
+func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error {
+	// Replay prevention checks.
+	switch ev {
+	case pingPacket, findnodeHashPacket, neighborsPacket:
+		// TODO: check date is > last date seen
+		// TODO: check ping version
+	case pongPacket:
+		if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) {
+			// fmt.Println("pong reply token mismatch")
+			return fmt.Errorf("pong reply token mismatch")
+		}
+		n.pingEcho = nil
+	}
+	// Address validation.
+	// TODO: Ideally we would do the following:
+	//  - reject all packets with wrong address except ping.
+	//  - for ping with new address, transition to verifywait but keep the
+	//    previous node (with old address) around. if the new one reaches known,
+	//    swap it out.
+	return nil
+}
+
+func (net *Network) transition(n *Node, next *nodeState) {
+	if n.state != next {
+		n.state = next
+		if next.enter != nil {
+			next.enter(net, n)
+		}
+	}
+
+	// TODO: persist/unpersist node
+}
+
+func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) {
+	timeout := timeoutEvent{ev, n}
+	net.timeoutTimers[timeout] = time.AfterFunc(d, func() {
+		select {
+		case net.timeout <- timeout:
+		case <-net.closed:
+		}
+	})
+}
+
+func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) {
+	timer := net.timeoutTimers[timeoutEvent{ev, n}]
+	if timer != nil {
+		timer.Stop()
+		delete(net.timeoutTimers, timeoutEvent{ev, n})
+	}
+}
+
+func (net *Network) ping(n *Node, addr *net.UDPAddr) {
+	debugLog(fmt.Sprintf("ping(node = %x)", n.ID[:8]))
+	n.pingTopics = net.ticketStore.regTopicSet()
+	n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics)
+	net.timedEvent(respTimeout, n, pongTimeout)
+}
+
+func (net *Network) handlePing(n *Node, pkt *ingressPacket) {
+	debugLog(fmt.Sprintf("handlePing(node = %x)", n.ID[:8]))
+	ping := pkt.data.(*ping)
+	n.TCP = ping.From.TCP
+	t := net.topictab.getTicket(n, ping.Topics)
+
+	pong := &pong{
+		To:         makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB
+		ReplyTok:   pkt.hash,
+		Expiration: uint64(time.Now().Add(expiration).Unix()),
+	}
+	ticketToPong(t, pong)
+	net.conn.send(n, pongPacket, pong)
+}
+
+func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error {
+	debugLog(fmt.Sprintf("handleKnownPong(node = %x)", n.ID[:8]))
+	net.abortTimedEvent(n, pongTimeout)
+	now := mclock.Now()
+	ticket, err := pongToTicket(now, n.pingTopics, n, pkt)
+	if err == nil {
+		// fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data)
+		net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket)
+	} else {
+		debugLog(fmt.Sprintf(" error: %v", err))
+	}
+
+	n.pingEcho = nil
+	n.pingTopics = nil
+	return err
+}
+
+func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) {
+	switch ev {
+	case findnodePacket:
+		target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:])
+		results := net.tab.closest(target, bucketSize).entries
+		net.conn.sendNeighbours(n, results)
+		return n.state, nil
+	case neighborsPacket:
+		err := net.handleNeighboursPacket(n, pkt.data.(*neighbors))
+		return n.state, err
+	case neighboursTimeout:
+		if n.pendingNeighbours != nil {
+			n.pendingNeighbours.reply <- nil
+			n.pendingNeighbours = nil
+		}
+		n.queryTimeouts++
+		if n.queryTimeouts > maxFindnodeFailures && n.state == known {
+			return contested, errors.New("too many timeouts")
+		}
+		return n.state, nil
+
+	// v5
+
+	case findnodeHashPacket:
+		results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries
+		net.conn.sendNeighbours(n, results)
+		return n.state, nil
+	case topicRegisterPacket:
+		//fmt.Println("got topicRegisterPacket")
+		regdata := pkt.data.(*topicRegister)
+		pong, err := net.checkTopicRegister(regdata)
+		if err != nil {
+			//fmt.Println(err)
+			return n.state, fmt.Errorf("bad waiting ticket: %v", err)
+		}
+		net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods)
+		return n.state, nil
+	case topicQueryPacket:
+		// TODO: handle expiration
+		topic := pkt.data.(*topicQuery).Topic
+		results := net.topictab.getEntries(topic)
+		if _, ok := net.ticketStore.tickets[topic]; ok {
+			results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too
+		}
+		if len(results) > 10 {
+			results = results[:10]
+		}
+		var hash common.Hash
+		copy(hash[:], pkt.hash)
+		net.conn.sendTopicNodes(n, hash, results)
+		return n.state, nil
+	case topicNodesPacket:
+		p := pkt.data.(*topicNodes)
+		if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) {
+			n.queryTimeouts++
+			if n.queryTimeouts > maxFindnodeFailures && n.state == known {
+				return contested, errors.New("too many timeouts")
+			}
+		}
+		return n.state, nil
+
+	default:
+		return n.state, errInvalidEvent
+	}
+}
+
+func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
+	var pongpkt ingressPacket
+	if err := decodePacket(data.Pong, &pongpkt); err != nil {
+		return nil, err
+	}
+	if pongpkt.ev != pongPacket {
+		return nil, errors.New("is not pong packet")
+	}
+	if pongpkt.remoteID != net.tab.self.ID {
+		return nil, errors.New("not signed by us")
+	}
+	// check that we previously authorised all topics
+	// that the other side is trying to register.
+	if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash {
+		return nil, errors.New("topic hash mismatch")
+	}
+	if data.Idx < 0 || int(data.Idx) >= len(data.Topics) {
+		return nil, errors.New("topic index out of range")
+	}
+	return pongpkt.data.(*pong), nil
+}
+
+func rlpHash(x interface{}) (h common.Hash) {
+	hw := sha3.NewKeccak256()
+	rlp.Encode(hw, x)
+	hw.Sum(h[:0])
+	return h
+}
+
+func (net *Network) handleNeighboursPacket(n *Node, req *neighbors) error {
+	if n.pendingNeighbours == nil {
+		return errNoQuery
+	}
+	net.abortTimedEvent(n, neighboursTimeout)
+
+	nodes := make([]*Node, len(req.Nodes))
+	for i, rn := range req.Nodes {
+		nn, err := net.internNodeFromNeighbours(rn)
+		if err != nil {
+			glog.V(logger.Debug).Infof("invalid neighbour from %x: %v", n.ID[:8], err)
+			continue
+		}
+		nodes[i] = nn
+		// Start validation of query results immediately.
+		// This fills the table quickly.
+		// TODO: generates way too many packets, maybe do it via queue.
+		if nn.state == unknown {
+			net.transition(nn, verifyinit)
+		}
+	}
+	// TODO: don't ignore second packet
+	n.pendingNeighbours.reply <- nodes
+	n.pendingNeighbours = nil
+	// Now that this query is done, start the next one.
+	n.startNextQuery(net)
+	return nil
+}
diff --git a/p2p/discv5/net_test.go b/p2p/discv5/net_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..422daa33bada4336998f40af179a1106ddc5204b
--- /dev/null
+++ b/p2p/discv5/net_test.go
@@ -0,0 +1,371 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"fmt"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
+)
+
+func TestNetwork_Lookup(t *testing.T) {
+	key, _ := crypto.GenerateKey()
+	network, err := newNetwork(lookupTestnet, key.PublicKey, nil, "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	lookupTestnet.net = network
+	defer network.Close()
+
+	// lookup on empty table returns no nodes
+	// if results := network.Lookup(lookupTestnet.target, false); len(results) > 0 {
+	// 	t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results)
+	// }
+	// seed table with initial node (otherwise lookup will terminate immediately)
+	seeds := []*Node{NewNode(lookupTestnet.dists[256][0], net.IP{}, 256, 999)}
+	if err := network.SetFallbackNodes(seeds); err != nil {
+		t.Fatal(err)
+	}
+	time.Sleep(3 * time.Second)
+
+	results := network.Lookup(lookupTestnet.target)
+	t.Logf("results:")
+	for _, e := range results {
+		t.Logf("  ld=%d, %x", logdist(lookupTestnet.targetSha, e.sha), e.sha[:])
+	}
+	if len(results) != bucketSize {
+		t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSize)
+	}
+	if hasDuplicates(results) {
+		t.Errorf("result set contains duplicate entries")
+	}
+	if !sortedByDistanceTo(lookupTestnet.targetSha, results) {
+		t.Errorf("result set not sorted by distance to target")
+	}
+	// TODO: check result nodes are actually closest
+}
+
+// This is the test network for the Lookup test.
+// The nodes were obtained by running testnet.mine with a random NodeID as target.
+var lookupTestnet = &preminedTestnet{
+	target:    MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"),
+	targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61},
+	dists: [257][]NodeID{
+		240: []NodeID{
+			MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"),
+			MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"),
+		},
+		244: []NodeID{
+			MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"),
+		},
+		246: []NodeID{
+			MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"),
+			MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"),
+			MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"),
+		},
+		247: []NodeID{
+			MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
+			MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
+			MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
+			MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
+			MustHexID("8b58c6073dd98bbad4e310b97186c8f822d3a5c7d57af40e2136e88e315afd115edb27d2d0685a908cfe5aa49d0debdda6e6e63972691d6bd8c5af2d771dd2a9"),
+			MustHexID("2cbb718b7dc682da19652e7d9eb4fefaf7b7147d82c1c2b6805edf77b85e29fde9f6da195741467ff2638dc62c8d3e014ea5686693c15ed0080b6de90354c137"),
+			MustHexID("e84027696d3f12f2de30a9311afea8fbd313c2360daff52bb5fc8c7094d5295758bec3134e4eef24e4cdf377b40da344993284628a7a346eba94f74160998feb"),
+			MustHexID("f1357a4f04f9d33753a57c0b65ba20a5d8777abbffd04e906014491c9103fb08590e45548d37aa4bd70965e2e81ddba94f31860348df01469eec8c1829200a68"),
+			MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"),
+			MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"),
+		},
+		248: []NodeID{
+			MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"),
+			MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"),
+			MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"),
+			MustHexID("3ed426322dee7572b08592e1e079f8b6c6b30e10e6243edd144a6a48fdbdb83df73a6e41b1143722cb82604f2203a32758610b5d9544f44a1a7921ba001528c1"),
+			MustHexID("b2e2a2b7fdd363572a3256e75435fab1da3b16f7891a8bd2015f30995dae665d7eabfd194d87d99d5df628b4bbc7b04e5b492c596422dd8272746c7a1b0b8e4f"),
+			MustHexID("0c69c9756162c593e85615b814ce57a2a8ca2df6c690b9c4e4602731b61e1531a3bbe3f7114271554427ffabea80ad8f36fa95a49fa77b675ae182c6ccac1728"),
+			MustHexID("8d28be21d5a97b0876442fa4f5e5387f5bf3faad0b6f13b8607b64d6e448c0991ca28dd7fe2f64eb8eadd7150bff5d5666aa6ed868b84c71311f4ba9a38569dd"),
+			MustHexID("2c677e1c64b9c9df6359348a7f5f33dc79e22f0177042486d125f8b6ca7f0dc756b1f672aceee5f1746bcff80aaf6f92a8dc0c9fbeb259b3fa0da060de5ab7e8"),
+			MustHexID("3994880f94a8678f0cd247a43f474a8af375d2a072128da1ad6cae84a244105ff85e94fc7d8496f639468de7ee998908a91c7e33ef7585fff92e984b210941a1"),
+			MustHexID("b45a9153c08d002a48090d15d61a7c7dad8c2af85d4ff5bd36ce23a9a11e0709bf8d56614c7b193bc028c16cbf7f20dfbcc751328b64a924995d47b41e452422"),
+			MustHexID("057ab3a9e53c7a84b0f3fc586117a525cdd18e313f52a67bf31798d48078e325abe5cfee3f6c2533230cb37d0549289d692a29dd400e899b8552d4b928f6f907"),
+			MustHexID("0ddf663d308791eb92e6bd88a2f8cb45e4f4f35bb16708a0e6ff7f1362aa6a73fedd0a1b1557fb3365e38e1b79d6918e2fae2788728b70c9ab6b51a3b94a4338"),
+			MustHexID("f637e07ff50cc1e3731735841c4798411059f2023abcf3885674f3e8032531b0edca50fd715df6feb489b6177c345374d64f4b07d257a7745de393a107b013a5"),
+			MustHexID("e24ec7c6eec094f63c7b3239f56d311ec5a3e45bc4e622a1095a65b95eea6fe13e29f3b6b7a2cbfe40906e3989f17ac834c3102dd0cadaaa26e16ee06d782b72"),
+			MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"),
+			MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"),
+		},
+		249: []NodeID{
+			MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"),
+			MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"),
+			MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"),
+			MustHexID("b1b4bfbda514d9b8f35b1c28961da5d5216fe50548f4066f69af3b7666a3b2e06eac646735e963e5c8f8138a2fb95af15b13b23ff00c6986eccc0efaa8ee6fb4"),
+			MustHexID("d2139281b289ad0e4d7b4243c4364f5c51aac8b60f4806135de06b12b5b369c9e43a6eb494eab860d115c15c6fbb8c5a1b0e382972e0e460af395b8385363de7"),
+			MustHexID("4a693df4b8fc5bdc7cec342c3ed2e228d7c5b4ab7321ddaa6cccbeb45b05a9f1d95766b4002e6d4791c2deacb8a667aadea6a700da28a3eea810a30395701bbc"),
+			MustHexID("ab41611195ec3c62bb8cd762ee19fb182d194fd141f4a66780efbef4b07ce916246c022b841237a3a6b512a93431157edd221e854ed2a259b72e9c5351f44d0c"),
+			MustHexID("68e8e26099030d10c3c703ae7045c0a48061fb88058d853b3e67880014c449d4311014da99d617d3150a20f1a3da5e34bf0f14f1c51fe4dd9d58afd222823176"),
+			MustHexID("3fbcacf546fb129cd70fc48de3b593ba99d3c473798bc309292aca280320e0eacc04442c914cad5c4cf6950345ba79b0d51302df88285d4e83ee3fe41339eee7"),
+			MustHexID("1d4a623659f7c8f80b6c3939596afdf42e78f892f682c768ad36eb7bfba402dbf97aea3a268f3badd8fe7636be216edf3d67ee1e08789ebbc7be625056bd7109"),
+			MustHexID("a283c474ab09da02bbc96b16317241d0627646fcc427d1fe790b76a7bf1989ced90f92101a973047ae9940c92720dffbac8eff21df8cae468a50f72f9e159417"),
+			MustHexID("dbf7e5ad7f87c3dfecae65d87c3039e14ed0bdc56caf00ce81931073e2e16719d746295512ff7937a15c3b03603e7c41a4f9df94fcd37bb200dd8f332767e9cb"),
+			MustHexID("caaa070a26692f64fc77f30d7b5ae980d419b4393a0f442b1c821ef58c0862898b0d22f74a4f8c5d83069493e3ec0b92f17dc1fe6e4cd437c1ec25039e7ce839"),
+			MustHexID("874cc8d1213beb65c4e0e1de38ef5d8165235893ac74ab5ea937c885eaab25c8d79dad0456e9fd3e9450626cac7e107b004478fb59842f067857f39a47cee695"),
+			MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"),
+			MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"),
+		},
+		250: []NodeID{
+			MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"),
+			MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"),
+			MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"),
+			MustHexID("1e1eac1c9add703eb252eb991594f8f5a173255d526a855fab24ae57dc277e055bc3c7a7ae0b45d437c4f47a72d97eb7b126f2ba344ba6c0e14b2c6f27d4b1e6"),
+			MustHexID("ae28953f63d4bc4e706712a59319c111f5ff8f312584f65d7436b4cd3d14b217b958f8486bad666b4481fe879019fb1f767cf15b3e3e2711efc33b56d460448a"),
+			MustHexID("934bb1edf9c7a318b82306aca67feb3d6b434421fa275d694f0b4927afd8b1d3935b727fd4ff6e3d012e0c82f1824385174e8c6450ade59c2a43281a4b3446b6"),
+			MustHexID("9eef3f28f70ce19637519a0916555bf76d26de31312ac656cf9d3e379899ea44e4dd7ffcce923b4f3563f8a00489a34bd6936db0cbb4c959d32c49f017e07d05"),
+			MustHexID("82200872e8f871c48f1fad13daec6478298099b591bb3dbc4ef6890aa28ebee5860d07d70be62f4c0af85085a90ae8179ee8f937cf37915c67ea73e704b03ee7"),
+			MustHexID("6c75a5834a08476b7fc37ff3dc2011dc3ea3b36524bad7a6d319b18878fad813c0ba76d1f4555cacd3890c865438c21f0e0aed1f80e0a157e642124c69f43a11"),
+			MustHexID("995b873742206cb02b736e73a88580c2aacb0bd4a3c97a647b647bcab3f5e03c0e0736520a8b3600da09edf4248991fb01091ec7ff3ec7cdc8a1beae011e7aae"),
+			MustHexID("c773a056594b5cdef2e850d30891ff0e927c3b1b9c35cd8e8d53a1017001e237468e1ece3ae33d612ca3e6abb0a9169aa352e9dcda358e5af2ad982b577447db"),
+			MustHexID("2b46a5f6923f475c6be99ec6d134437a6d11f6bb4b4ac6bcd94572fa1092639d1c08aeefcb51f0912f0a060f71d4f38ee4da70ecc16010b05dd4a674aab14c3a"),
+			MustHexID("af6ab501366debbaa0d22e20e9688f32ef6b3b644440580fd78de4fe0e99e2a16eb5636bbae0d1c259df8ddda77b35b9a35cbc36137473e9c68fbc9d203ba842"),
+			MustHexID("c9f6f2dd1a941926f03f770695bda289859e85fabaf94baaae20b93e5015dc014ba41150176a36a1884adb52f405194693e63b0c464a6891cc9cc1c80d450326"),
+			MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"),
+			MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"),
+		},
+		251: []NodeID{
+			MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"),
+			MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"),
+			MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"),
+			MustHexID("42c7483781727051a0b3660f14faf39e0d33de5e643702ae933837d036508ab856ce7eec8ec89c4929a4901256e5233a3d847d5d4893f91bcf21835a9a880fee"),
+			MustHexID("873bae27bf1dc854408fba94046a53ab0c965cebe1e4e12290806fc62b88deb1f4a47f9e18f78fc0e7913a0c6e42ac4d0fc3a20cea6bc65f0c8a0ca90b67521e"),
+			MustHexID("a7e3a370bbd761d413f8d209e85886f68bf73d5c3089b2dc6fa42aab1ecb5162635497eed95dee2417f3c9c74a3e76319625c48ead2e963c7de877cd4551f347"),
+			MustHexID("528597534776a40df2addaaea15b6ff832ce36b9748a265768368f657e76d58569d9f30dbb91e91cf0ae7efe8f402f17aa0ae15f5c55051ba03ba830287f4c42"),
+			MustHexID("461d8bd4f13c3c09031fdb84f104ed737a52f630261463ce0bdb5704259bab4b737dda688285b8444dbecaecad7f50f835190b38684ced5e90c54219e5adf1bc"),
+			MustHexID("6ec50c0be3fd232737090fc0111caaf0bb6b18f72be453428087a11a97fd6b52db0344acbf789a689bd4f5f50f79017ea784f8fd6fe723ad6ae675b9e3b13e21"),
+			MustHexID("12fc5e2f77a83fdcc727b79d8ae7fe6a516881138d3011847ee136b400fed7cfba1f53fd7a9730253c7aa4f39abeacd04f138417ba7fcb0f36cccc3514e0dab6"),
+			MustHexID("4fdbe75914ccd0bce02101606a1ccf3657ec963e3b3c20239d5fec87673fe446d649b4f15f1fe1a40e6cfbd446dda2d31d40bb602b1093b8fcd5f139ba0eb46a"),
+			MustHexID("3753668a0f6281e425ea69b52cb2d17ab97afbe6eb84cf5d25425bc5e53009388857640668fadd7c110721e6047c9697803bd8a6487b43bb343bfa32ebf24039"),
+			MustHexID("2e81b16346637dec4410fd88e527346145b9c0a849dbf2628049ac7dae016c8f4305649d5659ec77f1e8a0fac0db457b6080547226f06283598e3740ad94849a"),
+			MustHexID("802c3cc27f91c89213223d758f8d2ecd41135b357b6d698f24d811cdf113033a81c38e0bdff574a5c005b00a8c193dc2531f8c1fa05fa60acf0ab6f2858af09f"),
+			MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"),
+			MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"),
+		},
+		252: []NodeID{
+			MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"),
+			MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"),
+			MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"),
+			MustHexID("0ddc736077da9a12ba410dc5ea63cbcbe7659dd08596485b2bff3435221f82c10d263efd9af938e128464be64a178b7cd22e19f400d5802f4c9df54bf89f2619"),
+			MustHexID("784aa34d833c6ce63fcc1279630113c3272e82c4ae8c126c5a52a88ac461b6baeed4244e607b05dc14e5b2f41c70a273c3804dea237f14f7a1e546f6d1309d14"),
+			MustHexID("f253a2c354ee0e27cfcae786d726753d4ad24be6516b279a936195a487de4a59dbc296accf20463749ff55293263ed8c1b6365eecb248d44e75e9741c0d18205"),
+			MustHexID("a1910b80357b3ad9b4593e0628922939614dc9056a5fbf477279c8b2c1d0b4b31d89a0c09d0d41f795271d14d3360ef08a3f821e65e7e1f56c07a36afe49c7c5"),
+			MustHexID("f1168552c2efe541160f0909b0b4a9d6aeedcf595cdf0e9b165c97e3e197471a1ee6320e93389edfba28af6eaf10de98597ad56e7ab1b504ed762451996c3b98"),
+			MustHexID("b0c8e5d2c8634a7930e1a6fd082e448c6cf9d2d8b7293558b59238815a4df926c286bf297d2049f14e8296a6eb3256af614ec1812c4f2bbe807673b58bf14c8c"),
+			MustHexID("0fb346076396a38badc342df3679b55bd7f40a609ab103411fe45082c01f12ea016729e95914b2b5540e987ff5c9b133e85862648e7f36abdfd23100d248d234"),
+			MustHexID("f736e0cc83417feaa280d9483f5d4d72d1b036cd0c6d9cbdeb8ac35ceb2604780de46dddaa32a378474e1d5ccdf79b373331c30c7911ade2ae32f98832e5de1f"),
+			MustHexID("8b02991457602f42b38b342d3f2259ae4100c354b3843885f7e4e07bd644f64dab94bb7f38a3915f8b7f11d8e3f81c28e07a0078cf79d7397e38a7b7e0c857e2"),
+			MustHexID("9221d9f04a8a184993d12baa91116692bb685f887671302999d69300ad103eb2d2c75a09d8979404c6dd28f12362f58a1a43619c493d9108fd47588a23ce5824"),
+			MustHexID("652797801744dada833fff207d67484742eea6835d695925f3e618d71b68ec3c65bdd85b4302b2cdcb835ad3f94fd00d8da07e570b41bc0d2bcf69a8de1b3284"),
+			MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"),
+			MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"),
+		},
+		253: []NodeID{
+			MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"),
+			MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"),
+			MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"),
+			MustHexID("d97bf55f88c83fae36232661af115d66ca600fc4bd6d1fb35ff9bb4dad674c02cf8c8d05f317525b5522250db58bb1ecafb7157392bf5aa61b178c61f098d995"),
+			MustHexID("7045d678f1f9eb7a4613764d17bd5698796494d0bf977b16f2dbc272b8a0f7858a60805c022fc3d1fe4f31c37e63cdaca0416c0d053ef48a815f8b19121605e0"),
+			MustHexID("14e1f21418d445748de2a95cd9a8c3b15b506f86a0acabd8af44bb968ce39885b19c8822af61b3dd58a34d1f265baec30e3ae56149dc7d2aa4a538f7319f69c8"),
+			MustHexID("b9453d78281b66a4eac95a1546017111eaaa5f92a65d0de10b1122940e92b319728a24edf4dec6acc412321b1c95266d39c7b3a5d265c629c3e49a65fb022c09"),
+			MustHexID("e8a49248419e3824a00d86af422f22f7366e2d4922b304b7169937616a01d9d6fa5abf5cc01061a352dc866f48e1fa2240dbb453d872b1d7be62bdfc1d5e248c"),
+			MustHexID("bebcff24b52362f30e0589ee573ce2d86f073d58d18e6852a592fa86ceb1a6c9b96d7fb9ec7ed1ed98a51b6743039e780279f6bb49d0a04327ac7a182d9a56f6"),
+			MustHexID("d0835e5a4291db249b8d2fca9f503049988180c7d247bedaa2cf3a1bad0a76709360a85d4f9a1423b2cbc82bb4d94b47c0cde20afc430224834c49fe312a9ae3"),
+			MustHexID("6b087fe2a2da5e4f0b0f4777598a4a7fb66bf77dbd5bfc44e8a7eaa432ab585a6e226891f56a7d4f5ed11a7c57b90f1661bba1059590ca4267a35801c2802913"),
+			MustHexID("d901e5bde52d1a0f4ddf010a686a53974cdae4ebe5c6551b3c37d6b6d635d38d5b0e5f80bc0186a2c7809dbf3a42870dd09643e68d32db896c6da8ba734579e7"),
+			MustHexID("96419fb80efae4b674402bb969ebaab86c1274f29a83a311e24516d36cdf148fe21754d46c97688cdd7468f24c08b13e4727c29263393638a3b37b99ff60ebca"),
+			MustHexID("7b9c1889ae916a5d5abcdfb0aaedcc9c6f9eb1c1a4f68d0c2d034fe79ac610ce917c3abc670744150fa891bfcd8ab14fed6983fca964de920aa393fa7b326748"),
+			MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"),
+			MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"),
+		},
+		254: []NodeID{
+			MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"),
+			MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"),
+			MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"),
+			MustHexID("27f4a16cc085e72d86e25c98bd2eca173eaaee7565c78ec5a52e9e12b2211f35de81b5b45e9195de2ebfe29106742c59112b951a04eb7ae48822911fc1f9389e"),
+			MustHexID("55db5ee7d98e7f0b1c3b9d5be6f2bc619a1b86c3cdd513160ad4dcf267037a5fffad527ac15d50aeb32c59c13d1d4c1e567ebbf4de0d25236130c8361f9aac63"),
+			MustHexID("883df308b0130fc928a8559fe50667a0fff80493bc09685d18213b2db241a3ad11310ed86b0ef662b3ce21fc3d9aa7f3fc24b8d9afe17c7407e9afd3345ae548"),
+			MustHexID("c7af968cc9bc8200c3ee1a387405f7563be1dce6710a3439f42ea40657d0eae9d2b3c16c42d779605351fcdece4da637b9804e60ca08cfb89aec32c197beffa6"),
+			MustHexID("3e66f2b788e3ff1d04106b80597915cd7afa06c405a7ae026556b6e583dca8e05cfbab5039bb9a1b5d06083ffe8de5780b1775550e7218f5e98624bf7af9a0a8"),
+			MustHexID("4fc7f53764de3337fdaec0a711d35d3a923e72fa65025444d12230b3552ed43d9b2d1ad08ccb11f2d50c58809e6dd74dde910e195294fca3b47ae5a3967cc479"),
+			MustHexID("bafdfdcf6ccaa989436752fa97c77477b6baa7deb374b16c095492c529eb133e8e2f99e1977012b64767b9d34b2cf6d2048ed489bd822b5139b523f6a423167b"),
+			MustHexID("7f5d78008a4312fe059104ce80202c82b8915c2eb4411c6b812b16f7642e57c00f2c9425121f5cbac4257fe0b3e81ef5dea97ea2dbaa98f6a8b6fd4d1e5980bb"),
+			MustHexID("598c37fe78f922751a052f463aeb0cb0bc7f52b7c2a4cf2da72ec0931c7c32175d4165d0f8998f7320e87324ac3311c03f9382a5385c55f0407b7a66b2acd864"),
+			MustHexID("f758c4136e1c148777a7f3275a76e2db0b2b04066fd738554ec398c1c6cc9fb47e14a3b4c87bd47deaeab3ffd2110514c3855685a374794daff87b605b27ee2e"),
+			MustHexID("0307bb9e4fd865a49dcf1fe4333d1b944547db650ab580af0b33e53c4fef6c789531110fac801bbcbce21fc4d6f61b6d5b24abdf5b22e3030646d579f6dca9c2"),
+			MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"),
+			MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"),
+		},
+		255: []NodeID{
+			MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"),
+			MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"),
+			MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"),
+			MustHexID("c08397d5751b47bd3da044b908be0fb0e510d3149574dff7aeab33749b023bb171b5769990fe17469dbebc100bc150e798aeda426a2dcc766699a225fddd75c6"),
+			MustHexID("0222c1c194b749736e593f937fad67ee348ac57287a15c7e42877aa38a9b87732a408bca370f812efd0eedbff13e6d5b854bf3ba1dec431a796ed47f32552b09"),
+			MustHexID("03d859cd46ef02d9bfad5268461a6955426845eef4126de6be0fa4e8d7e0727ba2385b78f1a883a8239e95ebb814f2af8379632c7d5b100688eebc5841209582"),
+			MustHexID("64d5004b7e043c39ff0bd10cb20094c287721d5251715884c280a612b494b3e9e1c64ba6f67614994c7d969a0d0c0295d107d53fc225d47c44c4b82852d6f960"),
+			MustHexID("b0a5eefb2dab6f786670f35bf9641eefe6dd87fd3f1362bcab4aaa792903500ab23d88fae68411372e0813b057535a601d46e454323745a948017f6063a47b1f"),
+			MustHexID("0cc6df0a3433d448b5684d2a3ffa9d1a825388177a18f44ad0008c7bd7702f1ec0fc38b83506f7de689c3b6ecb552599927e29699eed6bb867ff08f80068b287"),
+			MustHexID("50772f7b8c03a4e153355fbbf79c8a80cf32af656ff0c7873c99911099d04a0dae0674706c357e0145ad017a0ade65e6052cb1b0d574fcd6f67da3eee0ace66b"),
+			MustHexID("1ae37829c9ef41f8b508b82259ebac76b1ed900d7a45c08b7970f25d2d48ddd1829e2f11423a18749940b6dab8598c6e416cef0efd47e46e51f29a0bc65b37cd"),
+			MustHexID("ba973cab31c2af091fc1644a93527d62b2394999e2b6ccbf158dd5ab9796a43d408786f1803ef4e29debfeb62fce2b6caa5ab2b24d1549c822a11c40c2856665"),
+			MustHexID("bc413ad270dd6ea25bddba78f3298b03b8ba6f8608ac03d06007d4116fa78ef5a0cfe8c80155089382fc7a193243ee5500082660cb5d7793f60f2d7d18650964"),
+			MustHexID("5a6a9ef07634d9eec3baa87c997b529b92652afa11473dfee41ef7037d5c06e0ddb9fe842364462d79dd31cff8a59a1b8d5bc2b810dea1d4cbbd3beb80ecec83"),
+			MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"),
+			MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"),
+		},
+		256: []NodeID{
+			MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"),
+			MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"),
+			MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"),
+			MustHexID("7feaee0d818c03eb30e4e0bf03ade0f3c21ca38e938a761aa1781cf70bda8cc5cd631a6cc53dd44f1d4a6d3e2dae6513c6c66ee50cb2f0e9ad6f7e319b309fd9"),
+			MustHexID("4ca3b657b139311db8d583c25dd5963005e46689e1317620496cc64129c7f3e52870820e0ec7941d28809311df6db8a2867bbd4f235b4248af24d7a9c22d1232"),
+			MustHexID("1181defb1d16851d42dd951d84424d6bd1479137f587fa184d5a8152be6b6b16ed08bcdb2c2ed8539bcde98c80c432875f9f724737c316a2bd385a39d3cab1d8"),
+			MustHexID("d9dd818769fa0c3ec9f553c759b92476f082817252a04a47dc1777740b1731d280058c66f982812f173a294acf4944a85ba08346e2de153ba3ba41ce8a62cb64"),
+			MustHexID("bd7c4f8a9e770aa915c771b15e107ca123d838762da0d3ffc53aa6b53e9cd076cffc534ec4d2e4c334c683f1f5ea72e0e123f6c261915ed5b58ac1b59f003d88"),
+			MustHexID("3dd5739c73649d510456a70e9d6b46a855864a4a3f744e088fd8c8da11b18e4c9b5f2d7da50b1c147b2bae5ca9609ae01f7a3cdea9dce34f80a91d29cd82f918"),
+			MustHexID("f0d7df1efc439b4bcc0b762118c1cfa99b2a6143a9f4b10e3c9465125f4c9fca4ab88a2504169bbcad65492cf2f50da9dd5d077c39574a944f94d8246529066b"),
+			MustHexID("dd598b9ba441448e5fb1a6ec6c5f5aa9605bad6e223297c729b1705d11d05f6bfd3d41988b694681ae69bb03b9a08bff4beab5596503d12a39bffb5cd6e94c7c"),
+			MustHexID("3fce284ac97e567aebae681b15b7a2b6df9d873945536335883e4bbc26460c064370537f323fd1ada828ea43154992d14ac0cec0940a2bd2a3f42ec156d60c83"),
+			MustHexID("7c8dfa8c1311cb14fb29a8ac11bca23ecc115e56d9fcf7b7ac1db9066aa4eb39f8b1dabf46e192a65be95ebfb4e839b5ab4533fef414921825e996b210dd53bd"),
+			MustHexID("cafa6934f82120456620573d7f801390ed5e16ed619613a37e409e44ab355ef755e83565a913b48a9466db786f8d4fbd590bfec474c2524d4a2608d4eafd6abd"),
+			MustHexID("9d16600d0dd310d77045769fed2cb427f32db88cd57d86e49390c2ba8a9698cfa856f775be2013237226e7bf47b248871cf865d23015937d1edeb20db5e3e760"),
+			MustHexID("17be6b6ba54199b1d80eff866d348ea11d8a4b341d63ad9a6681d3ef8a43853ac564d153eb2a8737f0afc9ab320f6f95c55aa11aaa13bbb1ff422fd16bdf8188"),
+		},
+	},
+}
+
+type preminedTestnet struct {
+	target    NodeID
+	targetSha common.Hash // sha3(target)
+	dists     [hashBits + 1][]NodeID
+	net       *Network
+}
+
+func (tn *preminedTestnet) sendFindnode(to *Node, target NodeID) {
+	panic("sendFindnode called")
+}
+
+func (tn *preminedTestnet) sendFindnodeHash(to *Node, target common.Hash) {
+	// current log distance is encoded in port number
+	// fmt.Println("findnode query at dist", toaddr.Port)
+	if to.UDP == 0 {
+		panic("query to node at distance 0")
+	}
+	next := to.UDP - 1
+	var result []rpcNode
+	for i, id := range tn.dists[to.UDP] {
+		result = append(result, nodeToRPC(NewNode(id, net.ParseIP("127.0.0.1"), next, uint16(i)+1)))
+	}
+	injectResponse(tn.net, to, neighborsPacket, &neighbors{Nodes: result})
+}
+
+func (tn *preminedTestnet) sendPing(to *Node, addr *net.UDPAddr, topics []Topic) []byte {
+	injectResponse(tn.net, to, pongPacket, &pong{ReplyTok: []byte{1}})
+	return []byte{1}
+}
+
+func (tn *preminedTestnet) send(to *Node, ptype nodeEvent, data interface{}) (hash []byte) {
+	switch ptype {
+	case pingPacket:
+		injectResponse(tn.net, to, pongPacket, &pong{ReplyTok: []byte{1}})
+	case pongPacket:
+		// ignored
+	case findnodeHashPacket:
+		// current log distance is encoded in port number
+		// fmt.Println("findnode query at dist", toaddr.Port)
+		if to.UDP == 0 {
+			panic("query to node at distance 0")
+		}
+		next := to.UDP - 1
+		var result []rpcNode
+		for i, id := range tn.dists[to.UDP] {
+			result = append(result, nodeToRPC(NewNode(id, net.ParseIP("127.0.0.1"), next, uint16(i)+1)))
+		}
+		injectResponse(tn.net, to, neighborsPacket, &neighbors{Nodes: result})
+	default:
+		panic("send(" + ptype.String() + ")")
+	}
+	return []byte{2}
+}
+
+func (tn *preminedTestnet) sendNeighbours(to *Node, nodes []*Node) {
+	panic("sendNeighbours called")
+}
+
+func (tn *preminedTestnet) sendTopicQuery(to *Node, topic Topic) {
+	panic("sendTopicQuery called")
+}
+
+func (tn *preminedTestnet) sendTopicNodes(to *Node, queryHash common.Hash, nodes []*Node) {
+	panic("sendTopicNodes called")
+}
+
+func (tn *preminedTestnet) sendTopicRegister(to *Node, topics []Topic, idx int, pong []byte) {
+	panic("sendTopicRegister called")
+}
+
+func (*preminedTestnet) Close()                  {}
+func (*preminedTestnet) localAddr() *net.UDPAddr { return new(net.UDPAddr) }
+
+// mine generates a testnet struct literal with nodes at
+// various distances to the given target.
+func (n *preminedTestnet) mine(target NodeID) {
+	n.target = target
+	n.targetSha = crypto.Keccak256Hash(n.target[:])
+	found := 0
+	for found < bucketSize*10 {
+		k := newkey()
+		id := PubkeyID(&k.PublicKey)
+		sha := crypto.Keccak256Hash(id[:])
+		ld := logdist(n.targetSha, sha)
+		if len(n.dists[ld]) < bucketSize {
+			n.dists[ld] = append(n.dists[ld], id)
+			fmt.Println("found ID with ld", ld)
+			found++
+		}
+	}
+	fmt.Println("&preminedTestnet{")
+	fmt.Printf("	target: %#v,\n", n.target)
+	fmt.Printf("	targetSha: %#v,\n", n.targetSha)
+	fmt.Printf("	dists: [%d][]NodeID{\n", len(n.dists))
+	for ld, ns := range n.dists {
+		if len(ns) == 0 {
+			continue
+		}
+		fmt.Printf("		%d: []NodeID{\n", ld)
+		for _, n := range ns {
+			fmt.Printf("			MustHexID(\"%x\"),\n", n[:])
+		}
+		fmt.Println("		},")
+	}
+	fmt.Println("	},")
+	fmt.Println("}")
+}
+
+func injectResponse(net *Network, from *Node, ev nodeEvent, packet interface{}) {
+	go net.reqReadPacket(ingressPacket{remoteID: from.ID, remoteAddr: from.addr(), ev: ev, data: packet})
+}
diff --git a/p2p/discv5/node.go b/p2p/discv5/node.go
new file mode 100644
index 0000000000000000000000000000000000000000..b6b6f149ddfa3b12141e0c1f14f926113c8197d3
--- /dev/null
+++ b/p2p/discv5/node.go
@@ -0,0 +1,423 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"bytes"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"math/big"
+	"math/rand"
+	"net"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
+)
+
+// Node represents a host on the network.
+// The public fields of Node may not be modified.
+type Node struct {
+	IP       net.IP // len 4 for IPv4 or 16 for IPv6
+	UDP, TCP uint16 // port numbers
+	ID       NodeID // the node's public key
+
+	// Network-related fields are contained in nodeNetGuts.
+	// These fields are not supposed to be used off the
+	// Network.loop goroutine.
+	nodeNetGuts
+}
+
+// NewNode creates a new node. It is mostly meant to be used for
+// testing purposes.
+func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node {
+	if ipv4 := ip.To4(); ipv4 != nil {
+		ip = ipv4
+	}
+	return &Node{
+		IP:          ip,
+		UDP:         udpPort,
+		TCP:         tcpPort,
+		ID:          id,
+		nodeNetGuts: nodeNetGuts{sha: crypto.Keccak256Hash(id[:])},
+	}
+}
+
+func (n *Node) addr() *net.UDPAddr {
+	return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)}
+}
+
+func (n *Node) setAddr(a *net.UDPAddr) {
+	n.IP = a.IP
+	if ipv4 := a.IP.To4(); ipv4 != nil {
+		n.IP = ipv4
+	}
+	n.UDP = uint16(a.Port)
+}
+
+// compares the given address against the stored values.
+func (n *Node) addrEqual(a *net.UDPAddr) bool {
+	ip := a.IP
+	if ipv4 := a.IP.To4(); ipv4 != nil {
+		ip = ipv4
+	}
+	return n.UDP == uint16(a.Port) && bytes.Equal(n.IP, ip)
+}
+
+// Incomplete returns true for nodes with no IP address.
+func (n *Node) Incomplete() bool {
+	return n.IP == nil
+}
+
+// checks whether n is a valid complete node.
+func (n *Node) validateComplete() error {
+	if n.Incomplete() {
+		return errors.New("incomplete node")
+	}
+	if n.UDP == 0 {
+		return errors.New("missing UDP port")
+	}
+	if n.TCP == 0 {
+		return errors.New("missing TCP port")
+	}
+	if n.IP.IsMulticast() || n.IP.IsUnspecified() {
+		return errors.New("invalid IP (multicast/unspecified)")
+	}
+	_, err := n.ID.Pubkey() // validate the key (on curve, etc.)
+	return err
+}
+
+// The string representation of a Node is a URL.
+// Please see ParseNode for a description of the format.
+func (n *Node) String() string {
+	u := url.URL{Scheme: "enode"}
+	if n.Incomplete() {
+		u.Host = fmt.Sprintf("%x", n.ID[:])
+	} else {
+		addr := net.TCPAddr{IP: n.IP, Port: int(n.TCP)}
+		u.User = url.User(fmt.Sprintf("%x", n.ID[:]))
+		u.Host = addr.String()
+		if n.UDP != n.TCP {
+			u.RawQuery = "discport=" + strconv.Itoa(int(n.UDP))
+		}
+	}
+	return u.String()
+}
+
+var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$")
+
+// ParseNode parses a node designator.
+//
+// There are two basic forms of node designators
+//   - incomplete nodes, which only have the public key (node ID)
+//   - complete nodes, which contain the public key and IP/Port information
+//
+// For incomplete nodes, the designator must look like one of these
+//
+//    enode://<hex node id>
+//    <hex node id>
+//
+// For complete nodes, the node ID is encoded in the username portion
+// of the URL, separated from the host by an @ sign. The hostname can
+// only be given as an IP address, DNS domain names are not allowed.
+// The port in the host name section is the TCP listening port. If the
+// TCP and UDP (discovery) ports differ, the UDP port is specified as
+// query parameter "discport".
+//
+// In the following example, the node URL describes
+// a node with IP address 10.3.58.6, TCP listening port 30303
+// and UDP discovery port 30301.
+//
+//    enode://<hex node id>@10.3.58.6:30303?discport=30301
+func ParseNode(rawurl string) (*Node, error) {
+	if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
+		id, err := HexID(m[1])
+		if err != nil {
+			return nil, fmt.Errorf("invalid node ID (%v)", err)
+		}
+		return NewNode(id, nil, 0, 0), nil
+	}
+	return parseComplete(rawurl)
+}
+
+func parseComplete(rawurl string) (*Node, error) {
+	var (
+		id               NodeID
+		ip               net.IP
+		tcpPort, udpPort uint64
+	)
+	u, err := url.Parse(rawurl)
+	if err != nil {
+		return nil, err
+	}
+	if u.Scheme != "enode" {
+		return nil, errors.New("invalid URL scheme, want \"enode\"")
+	}
+	// Parse the Node ID from the user portion.
+	if u.User == nil {
+		return nil, errors.New("does not contain node ID")
+	}
+	if id, err = HexID(u.User.String()); err != nil {
+		return nil, fmt.Errorf("invalid node ID (%v)", err)
+	}
+	// Parse the IP address.
+	host, port, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		return nil, fmt.Errorf("invalid host: %v", err)
+	}
+	if ip = net.ParseIP(host); ip == nil {
+		return nil, errors.New("invalid IP address")
+	}
+	// Ensure the IP is 4 bytes long for IPv4 addresses.
+	if ipv4 := ip.To4(); ipv4 != nil {
+		ip = ipv4
+	}
+	// Parse the port numbers.
+	if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil {
+		return nil, errors.New("invalid port")
+	}
+	udpPort = tcpPort
+	qv := u.Query()
+	if qv.Get("discport") != "" {
+		udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16)
+		if err != nil {
+			return nil, errors.New("invalid discport in query")
+		}
+	}
+	return NewNode(id, ip, uint16(udpPort), uint16(tcpPort)), nil
+}
+
+// MustParseNode parses a node URL. It panics if the URL is not valid.
+func MustParseNode(rawurl string) *Node {
+	n, err := ParseNode(rawurl)
+	if err != nil {
+		panic("invalid node URL: " + err.Error())
+	}
+	return n
+}
+
+// type nodeQueue []*Node
+//
+// // pushNew adds n to the end if it is not present.
+// func (nl *nodeList) appendNew(n *Node) {
+// 	for _, entry := range n {
+// 		if entry == n {
+// 			return
+// 		}
+// 	}
+// 	*nq = append(*nq, n)
+// }
+//
+// // popRandom removes a random node. Nodes closer to
+// // to the head of the beginning of the have a slightly higher probability.
+// func (nl *nodeList) popRandom() *Node {
+// 	ix := rand.Intn(len(*nq))
+// 	//TODO: probability as mentioned above.
+// 	nl.removeIndex(ix)
+// }
+//
+// func (nl *nodeList) removeIndex(i int) *Node {
+// 	slice = *nl
+// 	if len(*slice) <= i {
+// 		return nil
+// 	}
+// 	*nl = append(slice[:i], slice[i+1:]...)
+// }
+
+const nodeIDBits = 512
+
+// NodeID is a unique identifier for each node.
+// The node identifier is a marshaled elliptic curve public key.
+type NodeID [nodeIDBits / 8]byte
+
+// NodeID prints as a long hexadecimal number.
+func (n NodeID) String() string {
+	return fmt.Sprintf("%x", n[:])
+}
+
+// The Go syntax representation of a NodeID is a call to HexID.
+func (n NodeID) GoString() string {
+	return fmt.Sprintf("discover.HexID(\"%x\")", n[:])
+}
+
+// HexID converts a hex string to a NodeID.
+// The string may be prefixed with 0x.
+func HexID(in string) (NodeID, error) {
+	if strings.HasPrefix(in, "0x") {
+		in = in[2:]
+	}
+	var id NodeID
+	b, err := hex.DecodeString(in)
+	if err != nil {
+		return id, err
+	} else if len(b) != len(id) {
+		return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
+	}
+	copy(id[:], b)
+	return id, nil
+}
+
+// MustHexID converts a hex string to a NodeID.
+// It panics if the string is not a valid NodeID.
+func MustHexID(in string) NodeID {
+	id, err := HexID(in)
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+// PubkeyID returns a marshaled representation of the given public key.
+func PubkeyID(pub *ecdsa.PublicKey) NodeID {
+	var id NodeID
+	pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y)
+	if len(pbytes)-1 != len(id) {
+		panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes)))
+	}
+	copy(id[:], pbytes[1:])
+	return id
+}
+
+// Pubkey returns the public key represented by the node ID.
+// It returns an error if the ID is not a point on the curve.
+func (id NodeID) Pubkey() (*ecdsa.PublicKey, error) {
+	p := &ecdsa.PublicKey{Curve: S256(), X: new(big.Int), Y: new(big.Int)}
+	half := len(id) / 2
+	p.X.SetBytes(id[:half])
+	p.Y.SetBytes(id[half:])
+	if !p.Curve.IsOnCurve(p.X, p.Y) {
+		return nil, errors.New("id is invalid secp256k1 curve point")
+	}
+	return p, nil
+}
+
+func (id NodeID) mustPubkey() ecdsa.PublicKey {
+	pk, err := id.Pubkey()
+	if err != nil {
+		panic(err)
+	}
+	return *pk
+}
+
+// recoverNodeID computes the public key used to sign the
+// given hash from the signature.
+func recoverNodeID(hash, sig []byte) (id NodeID, err error) {
+	pubkey, err := crypto.Ecrecover(hash, sig)
+	if err != nil {
+		return id, err
+	}
+	if len(pubkey)-1 != len(id) {
+		return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8)
+	}
+	for i := range id {
+		id[i] = pubkey[i+1]
+	}
+	return id, nil
+}
+
+// distcmp compares the distances a->target and b->target.
+// Returns -1 if a is closer to target, 1 if b is closer to target
+// and 0 if they are equal.
+func distcmp(target, a, b common.Hash) int {
+	for i := range target {
+		da := a[i] ^ target[i]
+		db := b[i] ^ target[i]
+		if da > db {
+			return 1
+		} else if da < db {
+			return -1
+		}
+	}
+	return 0
+}
+
+// table of leading zero counts for bytes [0..255]
+var lzcount = [256]int{
+	8, 7, 6, 6, 5, 5, 5, 5,
+	4, 4, 4, 4, 4, 4, 4, 4,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	2, 2, 2, 2, 2, 2, 2, 2,
+	2, 2, 2, 2, 2, 2, 2, 2,
+	2, 2, 2, 2, 2, 2, 2, 2,
+	2, 2, 2, 2, 2, 2, 2, 2,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+}
+
+// logdist returns the logarithmic distance between a and b, log2(a ^ b).
+func logdist(a, b common.Hash) int {
+	lz := 0
+	for i := range a {
+		x := a[i] ^ b[i]
+		if x == 0 {
+			lz += 8
+		} else {
+			lz += lzcount[x]
+			break
+		}
+	}
+	return len(a)*8 - lz
+}
+
+// hashAtDistance returns a random hash such that logdist(a, b) == n
+func hashAtDistance(a common.Hash, n int) (b common.Hash) {
+	if n == 0 {
+		return a
+	}
+	// flip bit at position n, fill the rest with random bits
+	b = a
+	pos := len(a) - n/8 - 1
+	bit := byte(0x01) << (byte(n%8) - 1)
+	if bit == 0 {
+		pos++
+		bit = 0x80
+	}
+	b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
+	for i := pos + 1; i < len(a); i++ {
+		b[i] = byte(rand.Intn(255))
+	}
+	return b
+}
diff --git a/p2p/discv5/node_test.go b/p2p/discv5/node_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce4ad9e4d48ef945557892c2222a528fb333fda6
--- /dev/null
+++ b/p2p/discv5/node_test.go
@@ -0,0 +1,305 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"fmt"
+	"math/big"
+	"math/rand"
+	"net"
+	"reflect"
+	"strings"
+	"testing"
+	"testing/quick"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
+)
+
+func ExampleNewNode() {
+	id := MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
+
+	// Complete nodes contain UDP and TCP endpoints:
+	n1 := NewNode(id, net.ParseIP("2001:db8:3c4d:15::abcd:ef12"), 52150, 30303)
+	fmt.Println("n1:", n1)
+	fmt.Println("n1.Incomplete() ->", n1.Incomplete())
+
+	// An incomplete node can be created by passing zero values
+	// for all parameters except id.
+	n2 := NewNode(id, nil, 0, 0)
+	fmt.Println("n2:", n2)
+	fmt.Println("n2.Incomplete() ->", n2.Incomplete())
+
+	// Output:
+	// n1: enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:30303?discport=52150
+	// n1.Incomplete() -> false
+	// n2: enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439
+	// n2.Incomplete() -> true
+}
+
+var parseNodeTests = []struct {
+	rawurl     string
+	wantError  string
+	wantResult *Node
+}{
+	{
+		rawurl:    "http://foobar",
+		wantError: `invalid URL scheme, want "enode"`,
+	},
+	{
+		rawurl:    "enode://01010101@123.124.125.126:3",
+		wantError: `invalid node ID (wrong length, want 128 hex chars)`,
+	},
+	// Complete nodes with IP address.
+	{
+		rawurl:    "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@hostname:3",
+		wantError: `invalid IP address`,
+	},
+	{
+		rawurl:    "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:foo",
+		wantError: `invalid port`,
+	},
+	{
+		rawurl:    "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:3?discport=foo",
+		wantError: `invalid discport in query`,
+	},
+	{
+		rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150",
+		wantResult: NewNode(
+			MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{0x7f, 0x0, 0x0, 0x1},
+			52150,
+			52150,
+		),
+	},
+	{
+		rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[::]:52150",
+		wantResult: NewNode(
+			MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.ParseIP("::"),
+			52150,
+			52150,
+		),
+	},
+	{
+		rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150",
+		wantResult: NewNode(
+			MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.ParseIP("2001:db8:3c4d:15::abcd:ef12"),
+			52150,
+			52150,
+		),
+	},
+	{
+		rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150?discport=22334",
+		wantResult: NewNode(
+			MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			net.IP{0x7f, 0x0, 0x0, 0x1},
+			22334,
+			52150,
+		),
+	},
+	// Incomplete nodes with no address.
+	{
+		rawurl: "1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439",
+		wantResult: NewNode(
+			MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			nil, 0, 0,
+		),
+	},
+	{
+		rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439",
+		wantResult: NewNode(
+			MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+			nil, 0, 0,
+		),
+	},
+	// Invalid URLs
+	{
+		rawurl:    "01010101",
+		wantError: `invalid node ID (wrong length, want 128 hex chars)`,
+	},
+	{
+		rawurl:    "enode://01010101",
+		wantError: `invalid node ID (wrong length, want 128 hex chars)`,
+	},
+	{
+		// This test checks that errors from url.Parse are handled.
+		rawurl:    "://foo",
+		wantError: `parse ://foo: missing protocol scheme`,
+	},
+}
+
+func TestParseNode(t *testing.T) {
+	for _, test := range parseNodeTests {
+		n, err := ParseNode(test.rawurl)
+		if test.wantError != "" {
+			if err == nil {
+				t.Errorf("test %q:\n  got nil error, expected %#q", test.rawurl, test.wantError)
+				continue
+			} else if err.Error() != test.wantError {
+				t.Errorf("test %q:\n  got error %#q, expected %#q", test.rawurl, err.Error(), test.wantError)
+				continue
+			}
+		} else {
+			if err != nil {
+				t.Errorf("test %q:\n  unexpected error: %v", test.rawurl, err)
+				continue
+			}
+			if !reflect.DeepEqual(n, test.wantResult) {
+				t.Errorf("test %q:\n  result mismatch:\ngot:  %#v, want: %#v", test.rawurl, n, test.wantResult)
+			}
+		}
+	}
+}
+
+func TestNodeString(t *testing.T) {
+	for i, test := range parseNodeTests {
+		if test.wantError == "" && strings.HasPrefix(test.rawurl, "enode://") {
+			str := test.wantResult.String()
+			if str != test.rawurl {
+				t.Errorf("test %d: Node.String() mismatch:\ngot:  %s\nwant: %s", i, str, test.rawurl)
+			}
+		}
+	}
+}
+
+func TestHexID(t *testing.T) {
+	ref := NodeID{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 106, 217, 182, 31, 165, 174, 1, 67, 7, 235, 220, 150, 66, 83, 173, 205, 159, 44, 10, 57, 42, 161, 26, 188}
+	id1 := MustHexID("0x000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc")
+	id2 := MustHexID("000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc")
+
+	if id1 != ref {
+		t.Errorf("wrong id1\ngot  %v\nwant %v", id1[:], ref[:])
+	}
+	if id2 != ref {
+		t.Errorf("wrong id2\ngot  %v\nwant %v", id2[:], ref[:])
+	}
+}
+
+func TestNodeID_recover(t *testing.T) {
+	prv := newkey()
+	hash := make([]byte, 32)
+	sig, err := crypto.Sign(hash, prv)
+	if err != nil {
+		t.Fatalf("signing error: %v", err)
+	}
+
+	pub := PubkeyID(&prv.PublicKey)
+	recpub, err := recoverNodeID(hash, sig)
+	if err != nil {
+		t.Fatalf("recovery error: %v", err)
+	}
+	if pub != recpub {
+		t.Errorf("recovered wrong pubkey:\ngot:  %v\nwant: %v", recpub, pub)
+	}
+
+	ecdsa, err := pub.Pubkey()
+	if err != nil {
+		t.Errorf("Pubkey error: %v", err)
+	}
+	if !reflect.DeepEqual(ecdsa, &prv.PublicKey) {
+		t.Errorf("Pubkey mismatch:\n  got:  %#v\n  want: %#v", ecdsa, &prv.PublicKey)
+	}
+}
+
+func TestNodeID_pubkeyBad(t *testing.T) {
+	ecdsa, err := NodeID{}.Pubkey()
+	if err == nil {
+		t.Error("expected error for zero ID")
+	}
+	if ecdsa != nil {
+		t.Error("expected nil result")
+	}
+}
+
+func TestNodeID_distcmp(t *testing.T) {
+	distcmpBig := func(target, a, b common.Hash) int {
+		tbig := new(big.Int).SetBytes(target[:])
+		abig := new(big.Int).SetBytes(a[:])
+		bbig := new(big.Int).SetBytes(b[:])
+		return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig))
+	}
+	if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg()); err != nil {
+		t.Error(err)
+	}
+}
+
+// the random tests is likely to miss the case where they're equal.
+func TestNodeID_distcmpEqual(t *testing.T) {
+	base := common.Hash{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+	x := common.Hash{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}
+	if distcmp(base, x, x) != 0 {
+		t.Errorf("distcmp(base, x, x) != 0")
+	}
+}
+
+func TestNodeID_logdist(t *testing.T) {
+	logdistBig := func(a, b common.Hash) int {
+		abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:])
+		return new(big.Int).Xor(abig, bbig).BitLen()
+	}
+	if err := quick.CheckEqual(logdist, logdistBig, quickcfg()); err != nil {
+		t.Error(err)
+	}
+}
+
+// the random tests is likely to miss the case where they're equal.
+func TestNodeID_logdistEqual(t *testing.T) {
+	x := common.Hash{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+	if logdist(x, x) != 0 {
+		t.Errorf("logdist(x, x) != 0")
+	}
+}
+
+func TestNodeID_hashAtDistance(t *testing.T) {
+	// we don't use quick.Check here because its output isn't
+	// very helpful when the test fails.
+	cfg := quickcfg()
+	for i := 0; i < cfg.MaxCount; i++ {
+		a := gen(common.Hash{}, cfg.Rand).(common.Hash)
+		dist := cfg.Rand.Intn(len(common.Hash{}) * 8)
+		result := hashAtDistance(a, dist)
+		actualdist := logdist(result, a)
+
+		if dist != actualdist {
+			t.Log("a:     ", a)
+			t.Log("result:", result)
+			t.Fatalf("#%d: distance of result is %d, want %d", i, actualdist, dist)
+		}
+	}
+}
+
+func quickcfg() *quick.Config {
+	return &quick.Config{
+		MaxCount: 5000,
+		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
+	}
+}
+
+// TODO: The Generate method can be dropped when we require Go >= 1.5
+// because testing/quick learned to generate arrays in 1.5.
+
+func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value {
+	var id NodeID
+	m := rand.Intn(len(id))
+	for i := len(id) - 1; i > m; i-- {
+		id[i] = byte(rand.Uint32())
+	}
+	return reflect.ValueOf(id)
+}
diff --git a/p2p/discv5/nodeevent_string.go b/p2p/discv5/nodeevent_string.go
new file mode 100644
index 0000000000000000000000000000000000000000..fde9045c52c318aa93ff1b287cdd6003afc7caed
--- /dev/null
+++ b/p2p/discv5/nodeevent_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type nodeEvent"; DO NOT EDIT
+
+package discv5
+
+import "fmt"
+
+const (
+	_nodeEvent_name_0 = "invalidEventpingPacketpongPacketfindnodePacketneighborsPacketfindnodeHashPackettopicRegisterPackettopicQueryPackettopicNodesPacket"
+	_nodeEvent_name_1 = "pongTimeoutpingTimeoutneighboursTimeout"
+)
+
+var (
+	_nodeEvent_index_0 = [...]uint8{0, 12, 22, 32, 46, 61, 79, 98, 114, 130}
+	_nodeEvent_index_1 = [...]uint8{0, 11, 22, 39}
+)
+
+func (i nodeEvent) String() string {
+	switch {
+	case 0 <= i && i <= 8:
+		return _nodeEvent_name_0[_nodeEvent_index_0[i]:_nodeEvent_index_0[i+1]]
+	case 265 <= i && i <= 267:
+		i -= 265
+		return _nodeEvent_name_1[_nodeEvent_index_1[i]:_nodeEvent_index_1[i+1]]
+	default:
+		return fmt.Sprintf("nodeEvent(%d)", i)
+	}
+}
diff --git a/p2p/discv5/ntp.go b/p2p/discv5/ntp.go
new file mode 100644
index 0000000000000000000000000000000000000000..81c0e63365803999c621c5e4b52de486eb903838
--- /dev/null
+++ b/p2p/discv5/ntp.go
@@ -0,0 +1,127 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Contains the NTP time drift detection via the SNTP protocol:
+//   https://tools.ietf.org/html/rfc4330
+
+package discv5
+
+import (
+	"fmt"
+	"net"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+)
+
+const (
+	ntpPool   = "pool.ntp.org" // ntpPool is the NTP server to query for the current time
+	ntpChecks = 3              // Number of measurements to do against the NTP server
+)
+
+// durationSlice attaches the methods of sort.Interface to []time.Duration,
+// sorting in increasing order.
+type durationSlice []time.Duration
+
+func (s durationSlice) Len() int           { return len(s) }
+func (s durationSlice) Less(i, j int) bool { return s[i] < s[j] }
+func (s durationSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// checkClockDrift queries an NTP server for clock drifts and warns the user if
+// one large enough is detected.
+func checkClockDrift() {
+	drift, err := sntpDrift(ntpChecks)
+	if err != nil {
+		return
+	}
+	if drift < -driftThreshold || drift > driftThreshold {
+		warning := fmt.Sprintf("System clock seems off by %v, which can prevent network connectivity", drift)
+		howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
+		separator := strings.Repeat("-", len(warning))
+
+		glog.V(logger.Warn).Info(separator)
+		glog.V(logger.Warn).Info(warning)
+		glog.V(logger.Warn).Info(howtofix)
+		glog.V(logger.Warn).Info(separator)
+	} else {
+		glog.V(logger.Debug).Infof("Sanity NTP check reported %v drift, all ok", drift)
+	}
+}
+
+// sntpDrift does a naive time resolution against an NTP server and returns the
+// measured drift. This method uses the simple version of NTP. It's not precise
+// but should be fine for these purposes.
+//
+// Note, it executes two extra measurements compared to the number of requested
+// ones to be able to discard the two extremes as outliers.
+func sntpDrift(measurements int) (time.Duration, error) {
+	// Resolve the address of the NTP server
+	addr, err := net.ResolveUDPAddr("udp", ntpPool+":123")
+	if err != nil {
+		return 0, err
+	}
+	// Construct the time request (empty package with only 2 fields set):
+	//   Bits 3-5: Protocol version, 3
+	//   Bits 6-8: Mode of operation, client, 3
+	request := make([]byte, 48)
+	request[0] = 3<<3 | 3
+
+	// Execute each of the measurements
+	drifts := []time.Duration{}
+	for i := 0; i < measurements+2; i++ {
+		// Dial the NTP server and send the time retrieval request
+		conn, err := net.DialUDP("udp", nil, addr)
+		if err != nil {
+			return 0, err
+		}
+		defer conn.Close()
+
+		sent := time.Now()
+		if _, err = conn.Write(request); err != nil {
+			return 0, err
+		}
+		// Retrieve the reply and calculate the elapsed time
+		conn.SetDeadline(time.Now().Add(5 * time.Second))
+
+		reply := make([]byte, 48)
+		if _, err = conn.Read(reply); err != nil {
+			return 0, err
+		}
+		elapsed := time.Since(sent)
+
+		// Reconstruct the time from the reply data
+		sec := uint64(reply[43]) | uint64(reply[42])<<8 | uint64(reply[41])<<16 | uint64(reply[40])<<24
+		frac := uint64(reply[47]) | uint64(reply[46])<<8 | uint64(reply[45])<<16 | uint64(reply[44])<<24
+
+		nanosec := sec*1e9 + (frac*1e9)>>32
+
+		t := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(nanosec)).Local()
+
+		// Calculate the drift based on an assumed answer time of RRT/2
+		drifts = append(drifts, sent.Sub(t)+elapsed/2)
+	}
+	// Calculate average drif (drop two extremities to avoid outliers)
+	sort.Sort(durationSlice(drifts))
+
+	drift := time.Duration(0)
+	for i := 1; i < len(drifts)-1; i++ {
+		drift += drifts[i]
+	}
+	return drift / time.Duration(measurements), nil
+}
diff --git a/p2p/discv5/sim_run_test.go b/p2p/discv5/sim_run_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..bded0cc023442a8f551385dbcd5da1a16cfcf0ac
--- /dev/null
+++ b/p2p/discv5/sim_run_test.go
@@ -0,0 +1,126 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"os/exec"
+	"runtime"
+	"strings"
+	"testing"
+)
+
+func getnacl() (string, error) {
+	switch runtime.GOARCH {
+	case "amd64":
+		_, err := exec.LookPath("sel_ldr_x86_64")
+		return "amd64p32", err
+	case "i386":
+		_, err := exec.LookPath("sel_ldr_i386")
+		return "i386", err
+	default:
+		return "", errors.New("nacl is not supported on " + runtime.GOARCH)
+	}
+}
+
+// runWithPlaygroundTime executes the caller
+// in the NaCl sandbox with faketime enabled.
+//
+// This function must be called from a Test* function
+// and the caller must skip the actual test when isHost is true.
+func runWithPlaygroundTime(t *testing.T) (isHost bool) {
+	if runtime.GOOS == "nacl" {
+		return false
+	}
+
+	// Get the caller.
+	callerPC, _, _, ok := runtime.Caller(1)
+	if !ok {
+		panic("can't get caller")
+	}
+	callerFunc := runtime.FuncForPC(callerPC)
+	if callerFunc == nil {
+		panic("can't get caller")
+	}
+	callerName := callerFunc.Name()[strings.LastIndexByte(callerFunc.Name(), '.')+1:]
+	if !strings.HasPrefix(callerName, "Test") {
+		panic("must be called from witin a Test* function")
+	}
+	testPattern := "^" + callerName + "$"
+
+	// Unfortunately runtime.faketime (playground time mode) only works on NaCl. The NaCl
+	// SDK must be installed and linked into PATH for this to work.
+	arch, err := getnacl()
+	if err != nil {
+		t.Skip(err)
+	}
+
+	// Compile and run the calling test using NaCl.
+	// The extra tag ensures that the TestMain function in sim_main_test.go is used.
+	cmd := exec.Command("go", "test", "-v", "-tags", "faketime_simulation", "-timeout", "100h", "-run", testPattern, ".")
+	cmd.Env = append([]string{"GOOS=nacl", "GOARCH=" + arch}, os.Environ()...)
+	stdout, _ := cmd.StdoutPipe()
+	stderr, _ := cmd.StderrPipe()
+	go skipPlaygroundOutputHeaders(os.Stdout, stdout)
+	go skipPlaygroundOutputHeaders(os.Stderr, stderr)
+	if err := cmd.Run(); err != nil {
+		t.Error(err)
+	}
+
+	// Ensure that the test function doesn't run in the (non-NaCl) host process.
+	return true
+}
+
+func skipPlaygroundOutputHeaders(out io.Writer, in io.Reader) {
+	// Additional output can be printed without the headers
+	// before the NaCl binary starts running (e.g. compiler error messages).
+	bufin := bufio.NewReader(in)
+	output, err := bufin.ReadBytes(0)
+	output = bytes.TrimSuffix(output, []byte{0})
+	if len(output) > 0 {
+		out.Write(output)
+	}
+	if err != nil {
+		return
+	}
+	bufin.UnreadByte()
+
+	// Playback header: 0 0 P B <8-byte time> <4-byte data length>
+	head := make([]byte, 4+8+4)
+	for {
+		if _, err := io.ReadFull(bufin, head); err != nil {
+			if err != io.EOF {
+				fmt.Fprintln(out, "read error:", err)
+			}
+			return
+		}
+		if !bytes.HasPrefix(head, []byte{0x00, 0x00, 'P', 'B'}) {
+			fmt.Fprintf(out, "expected playback header, got %q\n", head)
+			io.Copy(out, bufin)
+			return
+		}
+		// Copy data until next header.
+		size := binary.BigEndian.Uint32(head[12:])
+		io.CopyN(out, bufin, int64(size))
+	}
+}
diff --git a/p2p/discv5/sim_test.go b/p2p/discv5/sim_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e232fbaacf77fb3fdde581105fa467ef3efbcd1
--- /dev/null
+++ b/p2p/discv5/sim_test.go
@@ -0,0 +1,464 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"crypto/ecdsa"
+	"encoding/binary"
+	"fmt"
+	"math/rand"
+	"net"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+)
+
+// In this test, nodes try to randomly resolve each other.
+func TestSimRandomResolve(t *testing.T) {
+	t.Skip("boring")
+	if runWithPlaygroundTime(t) {
+		return
+	}
+
+	sim := newSimulation()
+	bootnode := sim.launchNode(false)
+
+	// A new node joins every 10s.
+	launcher := time.NewTicker(10 * time.Second)
+	go func() {
+		for range launcher.C {
+			net := sim.launchNode(false)
+			go randomResolves(t, sim, net)
+			if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil {
+				panic(err)
+			}
+			fmt.Printf("launched @ %v: %x\n", time.Now(), net.Self().ID[:16])
+		}
+	}()
+
+	time.Sleep(3 * time.Hour)
+	launcher.Stop()
+	sim.shutdown()
+	sim.printStats()
+}
+
+func TestSimTopics(t *testing.T) {
+	t.Skip("NaCl test")
+	if runWithPlaygroundTime(t) {
+		return
+	}
+
+	// glog.SetV(6)
+	// glog.SetToStderr(true)
+
+	sim := newSimulation()
+	bootnode := sim.launchNode(false)
+
+	go func() {
+		nets := make([]*Network, 1024)
+		for i, _ := range nets {
+			net := sim.launchNode(false)
+			nets[i] = net
+			if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil {
+				panic(err)
+			}
+			time.Sleep(time.Second * 5)
+		}
+
+		for i, net := range nets {
+			if i < 256 {
+				stop := make(chan struct{})
+				go net.RegisterTopic(testTopic, stop)
+				go func() {
+					//time.Sleep(time.Second * 36000)
+					time.Sleep(time.Second * 40000)
+					close(stop)
+				}()
+				time.Sleep(time.Millisecond * 100)
+			}
+			//			time.Sleep(time.Second * 10)
+			//time.Sleep(time.Second)
+			/*if i%500 == 499 {
+				time.Sleep(time.Second * 9501)
+			} else {
+				time.Sleep(time.Second)
+			}*/
+		}
+	}()
+
+	// A new node joins every 10s.
+	/*	launcher := time.NewTicker(5 * time.Second)
+		cnt := 0
+		var printNet *Network
+		go func() {
+			for range launcher.C {
+				cnt++
+				if cnt <= 1000 {
+					log := false //(cnt == 500)
+					net := sim.launchNode(log)
+					if log {
+						printNet = net
+					}
+					if cnt > 500 {
+						go net.RegisterTopic(testTopic, nil)
+					}
+					if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil {
+						panic(err)
+					}
+				}
+				//fmt.Printf("launched @ %v: %x\n", time.Now(), net.Self().ID[:16])
+			}
+		}()
+	*/
+	time.Sleep(55000 * time.Second)
+	//launcher.Stop()
+	sim.shutdown()
+	//sim.printStats()
+	//printNet.log.printLogs()
+}
+
+/*func testHierarchicalTopics(i int) []Topic {
+	digits := strconv.FormatInt(int64(256+i/4), 4)
+	res := make([]Topic, 5)
+	for i, _ := range res {
+		res[i] = Topic("foo" + digits[1:i+1])
+	}
+	return res
+}*/
+
+func testHierarchicalTopics(i int) []Topic {
+	digits := strconv.FormatInt(int64(128+i/8), 2)
+	res := make([]Topic, 8)
+	for i, _ := range res {
+		res[i] = Topic("foo" + digits[1:i+1])
+	}
+	return res
+}
+
+func TestSimTopicHierarchy(t *testing.T) {
+	t.Skip("NaCl test")
+	if runWithPlaygroundTime(t) {
+		return
+	}
+
+	// glog.SetV(6)
+	// glog.SetToStderr(true)
+
+	sim := newSimulation()
+	bootnode := sim.launchNode(false)
+
+	go func() {
+		nets := make([]*Network, 1024)
+		for i, _ := range nets {
+			net := sim.launchNode(false)
+			nets[i] = net
+			if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil {
+				panic(err)
+			}
+			time.Sleep(time.Second * 5)
+		}
+
+		stop := make(chan struct{})
+		for i, net := range nets {
+			//if i < 256 {
+			for _, topic := range testHierarchicalTopics(i)[:5] {
+				//fmt.Println("reg", topic)
+				go net.RegisterTopic(topic, stop)
+			}
+			time.Sleep(time.Millisecond * 100)
+			//}
+		}
+		time.Sleep(time.Second * 90000)
+		close(stop)
+	}()
+
+	time.Sleep(100000 * time.Second)
+	sim.shutdown()
+}
+
+func randomResolves(t *testing.T, s *simulation, net *Network) {
+	randtime := func() time.Duration {
+		return time.Duration(rand.Intn(50)+20) * time.Second
+	}
+	lookup := func(target NodeID) bool {
+		result := net.Resolve(target)
+		return result != nil && result.ID == target
+	}
+
+	timer := time.NewTimer(randtime())
+	for {
+		select {
+		case <-timer.C:
+			target := s.randomNode().Self().ID
+			if !lookup(target) {
+				t.Errorf("node %x: target %x not found", net.Self().ID[:8], target[:8])
+			}
+			timer.Reset(randtime())
+		case <-net.closed:
+			return
+		}
+	}
+}
+
+type simulation struct {
+	mu      sync.RWMutex
+	nodes   map[NodeID]*Network
+	nodectr uint32
+}
+
+func newSimulation() *simulation {
+	return &simulation{nodes: make(map[NodeID]*Network)}
+}
+
+func (s *simulation) shutdown() {
+	s.mu.RLock()
+	alive := make([]*Network, 0, len(s.nodes))
+	for _, n := range s.nodes {
+		alive = append(alive, n)
+	}
+	defer s.mu.RUnlock()
+
+	for _, n := range alive {
+		n.Close()
+	}
+}
+
+func (s *simulation) printStats() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	fmt.Println("node counter:", s.nodectr)
+	fmt.Println("alive nodes:", len(s.nodes))
+
+	// for _, n := range s.nodes {
+	// 	fmt.Printf("%x\n", n.tab.self.ID[:8])
+	// 	transport := n.conn.(*simTransport)
+	// 	fmt.Println("   joined:", transport.joinTime)
+	// 	fmt.Println("   sends:", transport.hashctr)
+	// 	fmt.Println("   table size:", n.tab.count)
+	// }
+
+	/*for _, n := range s.nodes {
+		fmt.Println()
+		fmt.Printf("*** Node %x\n", n.tab.self.ID[:8])
+		n.log.printLogs()
+	}*/
+
+}
+
+func (s *simulation) randomNode() *Network {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	n := rand.Intn(len(s.nodes))
+	for _, net := range s.nodes {
+		if n == 0 {
+			return net
+		}
+		n--
+	}
+	return nil
+}
+
+func (s *simulation) launchNode(log bool) *Network {
+	var (
+		num = s.nodectr
+		key = newkey()
+		id  = PubkeyID(&key.PublicKey)
+		ip  = make(net.IP, 4)
+	)
+	s.nodectr++
+	binary.BigEndian.PutUint32(ip, num)
+	ip[0] = 10
+	addr := &net.UDPAddr{IP: ip, Port: 30303}
+
+	transport := &simTransport{joinTime: time.Now(), sender: id, senderAddr: addr, sim: s, priv: key}
+	net, err := newNetwork(transport, key.PublicKey, nil, "<no database>")
+	if err != nil {
+		panic("cannot launch new node: " + err.Error())
+	}
+
+	s.mu.Lock()
+	s.nodes[id] = net
+	s.mu.Unlock()
+
+	return net
+}
+
+func (s *simulation) dropNode(id NodeID) {
+	s.mu.Lock()
+	n := s.nodes[id]
+	delete(s.nodes, id)
+	s.mu.Unlock()
+
+	n.Close()
+}
+
+type simTransport struct {
+	joinTime   time.Time
+	sender     NodeID
+	senderAddr *net.UDPAddr
+	sim        *simulation
+	hashctr    uint64
+	priv       *ecdsa.PrivateKey
+}
+
+func (st *simTransport) localAddr() *net.UDPAddr {
+	return st.senderAddr
+}
+
+func (st *simTransport) Close() {}
+
+func (st *simTransport) send(remote *Node, ptype nodeEvent, data interface{}) (hash []byte) {
+	hash = st.nextHash()
+	var raw []byte
+	if ptype == pongPacket {
+		var err error
+		raw, _, err = encodePacket(st.priv, byte(ptype), data)
+		if err != nil {
+			panic(err)
+		}
+	}
+
+	st.sendPacket(remote.ID, ingressPacket{
+		remoteID:   st.sender,
+		remoteAddr: st.senderAddr,
+		hash:       hash,
+		ev:         ptype,
+		data:       data,
+		rawData:    raw,
+	})
+	return hash
+}
+
+func (st *simTransport) sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) []byte {
+	hash := st.nextHash()
+	st.sendPacket(remote.ID, ingressPacket{
+		remoteID:   st.sender,
+		remoteAddr: st.senderAddr,
+		hash:       hash,
+		ev:         pingPacket,
+		data: &ping{
+			Version:    4,
+			From:       rpcEndpoint{IP: st.senderAddr.IP, UDP: uint16(st.senderAddr.Port), TCP: 30303},
+			To:         rpcEndpoint{IP: remoteAddr.IP, UDP: uint16(remoteAddr.Port), TCP: 30303},
+			Expiration: uint64(time.Now().Unix() + int64(expiration)),
+			Topics:     topics,
+		},
+	})
+	return hash
+}
+
+func (st *simTransport) sendPong(remote *Node, pingHash []byte) {
+	raddr := remote.addr()
+
+	st.sendPacket(remote.ID, ingressPacket{
+		remoteID:   st.sender,
+		remoteAddr: st.senderAddr,
+		hash:       st.nextHash(),
+		ev:         pongPacket,
+		data: &pong{
+			To:         rpcEndpoint{IP: raddr.IP, UDP: uint16(raddr.Port), TCP: 30303},
+			ReplyTok:   pingHash,
+			Expiration: uint64(time.Now().Unix() + int64(expiration)),
+		},
+	})
+}
+
+func (st *simTransport) sendFindnodeHash(remote *Node, target common.Hash) {
+	st.sendPacket(remote.ID, ingressPacket{
+		remoteID:   st.sender,
+		remoteAddr: st.senderAddr,
+		hash:       st.nextHash(),
+		ev:         findnodeHashPacket,
+		data: &findnodeHash{
+			Target:     target,
+			Expiration: uint64(time.Now().Unix() + int64(expiration)),
+		},
+	})
+}
+
+func (st *simTransport) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []byte) {
+	//fmt.Println("send", topics, pong)
+	st.sendPacket(remote.ID, ingressPacket{
+		remoteID:   st.sender,
+		remoteAddr: st.senderAddr,
+		hash:       st.nextHash(),
+		ev:         topicRegisterPacket,
+		data: &topicRegister{
+			Topics: topics,
+			Idx:    uint(idx),
+			Pong:   pong,
+		},
+	})
+}
+
+func (st *simTransport) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) {
+	rnodes := make([]rpcNode, len(nodes))
+	for i := range nodes {
+		rnodes[i] = nodeToRPC(nodes[i])
+	}
+	st.sendPacket(remote.ID, ingressPacket{
+		remoteID:   st.sender,
+		remoteAddr: st.senderAddr,
+		hash:       st.nextHash(),
+		ev:         topicNodesPacket,
+		data:       &topicNodes{Echo: queryHash, Nodes: rnodes},
+	})
+}
+
+func (st *simTransport) sendNeighbours(remote *Node, nodes []*Node) {
+	// TODO: send multiple packets
+	rnodes := make([]rpcNode, len(nodes))
+	for i := range nodes {
+		rnodes[i] = nodeToRPC(nodes[i])
+	}
+	st.sendPacket(remote.ID, ingressPacket{
+		remoteID:   st.sender,
+		remoteAddr: st.senderAddr,
+		hash:       st.nextHash(),
+		ev:         neighborsPacket,
+		data: &neighbors{
+			Nodes:      rnodes,
+			Expiration: uint64(time.Now().Unix() + int64(expiration)),
+		},
+	})
+}
+
+func (st *simTransport) nextHash() []byte {
+	v := atomic.AddUint64(&st.hashctr, 1)
+	var hash common.Hash
+	binary.BigEndian.PutUint64(hash[:], v)
+	return hash[:]
+}
+
+const packetLoss = 0 // 1/1000
+
+func (st *simTransport) sendPacket(remote NodeID, p ingressPacket) {
+	if rand.Int31n(1000) >= packetLoss {
+		st.sim.mu.RLock()
+		recipient := st.sim.nodes[remote]
+		st.sim.mu.RUnlock()
+
+		time.AfterFunc(200*time.Millisecond, func() {
+			recipient.reqReadPacket(p)
+		})
+	}
+}
diff --git a/p2p/discv5/sim_testmain_test.go b/p2p/discv5/sim_testmain_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..77e751c419fee0cb8dc070e042f7098a2eaa12a1
--- /dev/null
+++ b/p2p/discv5/sim_testmain_test.go
@@ -0,0 +1,43 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// +build go1.4,nacl,faketime_simulation
+
+package discv5
+
+import (
+	"os"
+	"runtime"
+	"testing"
+	"unsafe"
+)
+
+// Enable fake time mode in the runtime, like on the go playground.
+// There is a slight chance that this won't work because some go code
+// might have executed before the variable is set.
+
+//go:linkname faketime runtime.faketime
+var faketime = 1
+
+func TestMain(m *testing.M) {
+	// We need to use unsafe somehow in order to get access to go:linkname.
+	_ = unsafe.Sizeof(0)
+
+	// Run the actual test. runWithPlaygroundTime ensures that the only test
+	// that runs is the one calling it.
+	runtime.GOMAXPROCS(8)
+	os.Exit(m.Run())
+}
diff --git a/p2p/discv5/table.go b/p2p/discv5/table.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c8c507060d05b47cd9fe43a87be7c6f32819173
--- /dev/null
+++ b/p2p/discv5/table.go
@@ -0,0 +1,305 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package discv5 implements the RLPx v5 Topic Discovery Protocol.
+//
+// The Topic Discovery protocol provides a way to find RLPx nodes that
+// can be connected to. It uses a Kademlia-like protocol to maintain a
+// distributed database of the IDs and endpoints of all listening
+// nodes.
+package discv5
+
+import (
+	"crypto/rand"
+	"encoding/binary"
+	"net"
+	"sort"
+
+	"github.com/ethereum/go-ethereum/common"
+)
+
+const (
+	alpha      = 3  // Kademlia concurrency factor
+	bucketSize = 16 // Kademlia bucket size
+	hashBits   = len(common.Hash{}) * 8
+	nBuckets   = hashBits + 1 // Number of buckets
+
+	maxBondingPingPongs = 16
+	maxFindnodeFailures = 5
+)
+
+type Table struct {
+	count         int               // number of nodes
+	buckets       [nBuckets]*bucket // index of known nodes by distance
+	nodeAddedHook func(*Node)       // for testing
+	self          *Node             // metadata of the local node
+}
+
+// bucket contains nodes, ordered by their last activity. the entry
+// that was most recently active is the first element in entries.
+type bucket struct {
+	entries      []*Node
+	replacements []*Node
+}
+
+func newTable(ourID NodeID, ourAddr *net.UDPAddr) *Table {
+	self := NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port))
+	tab := &Table{self: self}
+	for i := range tab.buckets {
+		tab.buckets[i] = new(bucket)
+	}
+	return tab
+}
+
+func (tab *Table) chooseBucketFillTarget() common.Hash {
+	bucketCount := nBuckets
+	for bucketCount > 0 && len(tab.buckets[nBuckets-bucketCount].entries) == 0 {
+		bucketCount--
+	}
+	var bucket int
+	for {
+		// select a target hash that could go into a certain randomly selected bucket
+		// buckets are chosen with an even chance out of the existing ones that contain
+		// less that bucketSize entries, plus a potential new one beyond these
+		bucket = nBuckets - 1 - int(randUint(uint32(bucketCount+1)))
+		if bucket == bucketCount || len(tab.buckets[bucket].entries) < bucketSize {
+			break
+		}
+	}
+
+	// calculate target that has the desired log distance from our own address hash
+	target := tab.self.sha.Bytes()
+	prefix := binary.BigEndian.Uint64(target[0:8])
+	shift := uint(nBuckets - 1 - bucket)
+	if bucket != bucketCount {
+		shift++
+	}
+	var b [8]byte
+	rand.Read(b[:])
+	rnd := binary.BigEndian.Uint64(b[:])
+	rndMask := (^uint64(0)) >> shift
+	addrMask := ^rndMask
+	xorMask := uint64(0)
+	if bucket != bucketCount {
+		xorMask = rndMask + 1
+	}
+	prefix = (prefix&addrMask ^ xorMask) | (rnd & rndMask)
+	binary.BigEndian.PutUint64(target[0:8], prefix)
+	rand.Read(target[8:])
+	return common.BytesToHash(target)
+}
+
+// readRandomNodes fills the given slice with random nodes from the
+// table. It will not write the same node more than once. The nodes in
+// the slice are copies and can be modified by the caller.
+func (tab *Table) readRandomNodes(buf []*Node) (n int) {
+	// TODO: tree-based buckets would help here
+	// Find all non-empty buckets and get a fresh slice of their entries.
+	var buckets [][]*Node
+	for _, b := range tab.buckets {
+		if len(b.entries) > 0 {
+			buckets = append(buckets, b.entries[:])
+		}
+	}
+	if len(buckets) == 0 {
+		return 0
+	}
+	// Shuffle the buckets.
+	for i := uint32(len(buckets)) - 1; i > 0; i-- {
+		j := randUint(i)
+		buckets[i], buckets[j] = buckets[j], buckets[i]
+	}
+	// Move head of each bucket into buf, removing buckets that become empty.
+	var i, j int
+	for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
+		b := buckets[j]
+		buf[i] = &(*b[0])
+		buckets[j] = b[1:]
+		if len(b) == 1 {
+			buckets = append(buckets[:j], buckets[j+1:]...)
+		}
+		if len(buckets) == 0 {
+			break
+		}
+	}
+	return i + 1
+}
+
+func randUint(max uint32) uint32 {
+	if max < 2 {
+		return 0
+	}
+	var b [4]byte
+	rand.Read(b[:])
+	return binary.BigEndian.Uint32(b[:]) % max
+}
+
+func randUint64n(max uint64) uint64 {
+	if max < 2 {
+		return 0
+	}
+	var b [8]byte
+	rand.Read(b[:])
+	return binary.BigEndian.Uint64(b[:]) % max
+}
+
+// closest returns the n nodes in the table that are closest to the
+// given id. The caller must hold tab.mutex.
+func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
+	// This is a very wasteful way to find the closest nodes but
+	// obviously correct. I believe that tree-based buckets would make
+	// this easier to implement efficiently.
+	close := &nodesByDistance{target: target}
+	for _, b := range tab.buckets {
+		for _, n := range b.entries {
+			close.push(n, nresults)
+		}
+	}
+	return close
+}
+
+// add attempts to add the given node its corresponding bucket. If the
+// bucket has space available, adding the node succeeds immediately.
+// Otherwise, the node is added to the replacement cache for the bucket.
+func (tab *Table) add(n *Node) (contested *Node) {
+	b := tab.buckets[logdist(tab.self.sha, n.sha)]
+	switch {
+	case b.bump(n):
+		// n exists in b.
+		return nil
+	case len(b.entries) < bucketSize:
+		// b has space available.
+		b.addFront(n)
+		tab.count++
+		if tab.nodeAddedHook != nil {
+			tab.nodeAddedHook(n)
+		}
+		return nil
+	default:
+		// b has no space left, add to replacement cache
+		// and revalidate the last entry.
+		// TODO: drop previous node
+		b.replacements = append(b.replacements, n)
+		if len(b.replacements) > bucketSize {
+			copy(b.replacements, b.replacements[1:])
+			b.replacements = b.replacements[:len(b.replacements)-1]
+		}
+		return b.entries[len(b.entries)-1]
+	}
+}
+
+// stuff adds nodes the table to the end of their corresponding bucket
+// if the bucket is not full.
+func (tab *Table) stuff(nodes []*Node) {
+outer:
+	for _, n := range nodes {
+		if n.ID == tab.self.ID {
+			continue // don't add self
+		}
+		bucket := tab.buckets[logdist(tab.self.sha, n.sha)]
+		for i := range bucket.entries {
+			if bucket.entries[i].ID == n.ID {
+				continue outer // already in bucket
+			}
+		}
+		if len(bucket.entries) < bucketSize {
+			bucket.entries = append(bucket.entries, n)
+			tab.count++
+			if tab.nodeAddedHook != nil {
+				tab.nodeAddedHook(n)
+			}
+		}
+	}
+}
+
+// delete removes an entry from the node table (used to evacuate
+// failed/non-bonded discovery peers).
+func (tab *Table) delete(node *Node) {
+	bucket := tab.buckets[logdist(tab.self.sha, node.sha)]
+	for i := range bucket.entries {
+		if bucket.entries[i].ID == node.ID {
+			bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...)
+			tab.count--
+			return
+		}
+	}
+}
+
+func (tab *Table) deleteReplace(node *Node) {
+	b := tab.buckets[logdist(tab.self.sha, node.sha)]
+	i := 0
+	for i < len(b.entries) {
+		if b.entries[i].ID == node.ID {
+			b.entries = append(b.entries[:i], b.entries[i+1:]...)
+			tab.count--
+		} else {
+			i++
+		}
+	}
+	// refill from replacement cache
+	// TODO: maybe use random index
+	if len(b.entries) < bucketSize && len(b.replacements) > 0 {
+		ri := len(b.replacements) - 1
+		b.addFront(b.replacements[ri])
+		tab.count++
+		b.replacements[ri] = nil
+		b.replacements = b.replacements[:ri]
+	}
+}
+
+func (b *bucket) addFront(n *Node) {
+	b.entries = append(b.entries, nil)
+	copy(b.entries[1:], b.entries)
+	b.entries[0] = n
+}
+
+func (b *bucket) bump(n *Node) bool {
+	for i := range b.entries {
+		if b.entries[i].ID == n.ID {
+			// move it to the front
+			copy(b.entries[1:], b.entries[:i])
+			b.entries[0] = n
+			return true
+		}
+	}
+	return false
+}
+
+// nodesByDistance is a list of nodes, ordered by
+// distance to target.
+type nodesByDistance struct {
+	entries []*Node
+	target  common.Hash
+}
+
+// push adds the given node to the list, keeping the total size below maxElems.
+func (h *nodesByDistance) push(n *Node, maxElems int) {
+	ix := sort.Search(len(h.entries), func(i int) bool {
+		return distcmp(h.target, h.entries[i].sha, n.sha) > 0
+	})
+	if len(h.entries) < maxElems {
+		h.entries = append(h.entries, n)
+	}
+	if ix == len(h.entries) {
+		// farther away than all nodes we already have.
+		// if there was room for it, the node is now the last element.
+	} else {
+		// slide existing entries down to make room
+		// this will overwrite the entry we just appended.
+		copy(h.entries[ix+1:], h.entries[ix:])
+		h.entries[ix] = n
+	}
+}
diff --git a/p2p/discv5/table_test.go b/p2p/discv5/table_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a29943dab9b3274706a938773d835d28e445d75d
--- /dev/null
+++ b/p2p/discv5/table_test.go
@@ -0,0 +1,337 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"crypto/ecdsa"
+	"fmt"
+	"math/rand"
+
+	"net"
+	"reflect"
+	"testing"
+	"testing/quick"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
+)
+
+type nullTransport struct{}
+
+func (nullTransport) sendPing(remote *Node, remoteAddr *net.UDPAddr) []byte { return []byte{1} }
+func (nullTransport) sendPong(remote *Node, pingHash []byte)                {}
+func (nullTransport) sendFindnode(remote *Node, target NodeID)              {}
+func (nullTransport) sendNeighbours(remote *Node, nodes []*Node)            {}
+func (nullTransport) localAddr() *net.UDPAddr                               { return new(net.UDPAddr) }
+func (nullTransport) Close()                                                {}
+
+// func TestTable_pingReplace(t *testing.T) {
+// 	doit := func(newNodeIsResponding, lastInBucketIsResponding bool) {
+// 		transport := newPingRecorder()
+// 		tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{})
+// 		defer tab.Close()
+// 		pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
+//
+// 		// fill up the sender's bucket.
+// 		last := fillBucket(tab, 253)
+//
+// 		// this call to bond should replace the last node
+// 		// in its bucket if the node is not responding.
+// 		transport.responding[last.ID] = lastInBucketIsResponding
+// 		transport.responding[pingSender.ID] = newNodeIsResponding
+// 		tab.bond(true, pingSender.ID, &net.UDPAddr{}, 0)
+//
+// 		// first ping goes to sender (bonding pingback)
+// 		if !transport.pinged[pingSender.ID] {
+// 			t.Error("table did not ping back sender")
+// 		}
+// 		if newNodeIsResponding {
+// 			// second ping goes to oldest node in bucket
+// 			// to see whether it is still alive.
+// 			if !transport.pinged[last.ID] {
+// 				t.Error("table did not ping last node in bucket")
+// 			}
+// 		}
+//
+// 		tab.mutex.Lock()
+// 		defer tab.mutex.Unlock()
+// 		if l := len(tab.buckets[253].entries); l != bucketSize {
+// 			t.Errorf("wrong bucket size after bond: got %d, want %d", l, bucketSize)
+// 		}
+//
+// 		if lastInBucketIsResponding || !newNodeIsResponding {
+// 			if !contains(tab.buckets[253].entries, last.ID) {
+// 				t.Error("last entry was removed")
+// 			}
+// 			if contains(tab.buckets[253].entries, pingSender.ID) {
+// 				t.Error("new entry was added")
+// 			}
+// 		} else {
+// 			if contains(tab.buckets[253].entries, last.ID) {
+// 				t.Error("last entry was not removed")
+// 			}
+// 			if !contains(tab.buckets[253].entries, pingSender.ID) {
+// 				t.Error("new entry was not added")
+// 			}
+// 		}
+// 	}
+//
+// 	doit(true, true)
+// 	doit(false, true)
+// 	doit(true, false)
+// 	doit(false, false)
+// }
+
+func TestBucket_bumpNoDuplicates(t *testing.T) {
+	t.Parallel()
+	cfg := &quick.Config{
+		MaxCount: 1000,
+		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
+		Values: func(args []reflect.Value, rand *rand.Rand) {
+			// generate a random list of nodes. this will be the content of the bucket.
+			n := rand.Intn(bucketSize-1) + 1
+			nodes := make([]*Node, n)
+			for i := range nodes {
+				nodes[i] = nodeAtDistance(common.Hash{}, 200)
+			}
+			args[0] = reflect.ValueOf(nodes)
+			// generate random bump positions.
+			bumps := make([]int, rand.Intn(100))
+			for i := range bumps {
+				bumps[i] = rand.Intn(len(nodes))
+			}
+			args[1] = reflect.ValueOf(bumps)
+		},
+	}
+
+	prop := func(nodes []*Node, bumps []int) (ok bool) {
+		b := &bucket{entries: make([]*Node, len(nodes))}
+		copy(b.entries, nodes)
+		for i, pos := range bumps {
+			b.bump(b.entries[pos])
+			if hasDuplicates(b.entries) {
+				t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps))
+				for _, n := range b.entries {
+					t.Logf("  %p", n)
+				}
+				return false
+			}
+		}
+		return true
+	}
+	if err := quick.Check(prop, cfg); err != nil {
+		t.Error(err)
+	}
+}
+
+// fillBucket inserts nodes into the given bucket until
+// it is full. The node's IDs dont correspond to their
+// hashes.
+func fillBucket(tab *Table, ld int) (last *Node) {
+	b := tab.buckets[ld]
+	for len(b.entries) < bucketSize {
+		b.entries = append(b.entries, nodeAtDistance(tab.self.sha, ld))
+	}
+	return b.entries[bucketSize-1]
+}
+
+// nodeAtDistance creates a node for which logdist(base, n.sha) == ld.
+// The node's ID does not correspond to n.sha.
+func nodeAtDistance(base common.Hash, ld int) (n *Node) {
+	n = new(Node)
+	n.sha = hashAtDistance(base, ld)
+	copy(n.ID[:], n.sha[:]) // ensure the node still has a unique ID
+	return n
+}
+
+type pingRecorder struct{ responding, pinged map[NodeID]bool }
+
+func newPingRecorder() *pingRecorder {
+	return &pingRecorder{make(map[NodeID]bool), make(map[NodeID]bool)}
+}
+
+func (t *pingRecorder) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
+	panic("findnode called on pingRecorder")
+}
+func (t *pingRecorder) close() {}
+func (t *pingRecorder) waitping(from NodeID) error {
+	return nil // remote always pings
+}
+func (t *pingRecorder) ping(toid NodeID, toaddr *net.UDPAddr) error {
+	t.pinged[toid] = true
+	if t.responding[toid] {
+		return nil
+	} else {
+		return errTimeout
+	}
+}
+
+func TestTable_closest(t *testing.T) {
+	t.Parallel()
+
+	test := func(test *closeTest) bool {
+		// for any node table, Target and N
+		tab := newTable(test.Self, &net.UDPAddr{})
+		tab.stuff(test.All)
+
+		// check that doClosest(Target, N) returns nodes
+		result := tab.closest(test.Target, test.N).entries
+		if hasDuplicates(result) {
+			t.Errorf("result contains duplicates")
+			return false
+		}
+		if !sortedByDistanceTo(test.Target, result) {
+			t.Errorf("result is not sorted by distance to target")
+			return false
+		}
+
+		// check that the number of results is min(N, tablen)
+		wantN := test.N
+		if tab.count < test.N {
+			wantN = tab.count
+		}
+		if len(result) != wantN {
+			t.Errorf("wrong number of nodes: got %d, want %d", len(result), wantN)
+			return false
+		} else if len(result) == 0 {
+			return true // no need to check distance
+		}
+
+		// check that the result nodes have minimum distance to target.
+		for _, b := range tab.buckets {
+			for _, n := range b.entries {
+				if contains(result, n.ID) {
+					continue // don't run the check below for nodes in result
+				}
+				farthestResult := result[len(result)-1].sha
+				if distcmp(test.Target, n.sha, farthestResult) < 0 {
+					t.Errorf("table contains node that is closer to target but it's not in result")
+					t.Logf("  Target:          %v", test.Target)
+					t.Logf("  Farthest Result: %v", farthestResult)
+					t.Logf("  ID:              %v", n.ID)
+					return false
+				}
+			}
+		}
+		return true
+	}
+	if err := quick.Check(test, quickcfg()); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestTable_ReadRandomNodesGetAll(t *testing.T) {
+	cfg := &quick.Config{
+		MaxCount: 200,
+		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
+		Values: func(args []reflect.Value, rand *rand.Rand) {
+			args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000)))
+		},
+	}
+	test := func(buf []*Node) bool {
+		tab := newTable(NodeID{}, &net.UDPAddr{})
+		for i := 0; i < len(buf); i++ {
+			ld := cfg.Rand.Intn(len(tab.buckets))
+			tab.stuff([]*Node{nodeAtDistance(tab.self.sha, ld)})
+		}
+		gotN := tab.readRandomNodes(buf)
+		if gotN != tab.count {
+			t.Errorf("wrong number of nodes, got %d, want %d", gotN, tab.count)
+			return false
+		}
+		if hasDuplicates(buf[:gotN]) {
+			t.Errorf("result contains duplicates")
+			return false
+		}
+		return true
+	}
+	if err := quick.Check(test, cfg); err != nil {
+		t.Error(err)
+	}
+}
+
+type closeTest struct {
+	Self   NodeID
+	Target common.Hash
+	All    []*Node
+	N      int
+}
+
+func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
+	t := &closeTest{
+		Self:   gen(NodeID{}, rand).(NodeID),
+		Target: gen(common.Hash{}, rand).(common.Hash),
+		N:      rand.Intn(bucketSize),
+	}
+	for _, id := range gen([]NodeID{}, rand).([]NodeID) {
+		t.All = append(t.All, &Node{ID: id})
+	}
+	return reflect.ValueOf(t)
+}
+
+func hasDuplicates(slice []*Node) bool {
+	seen := make(map[NodeID]bool)
+	for i, e := range slice {
+		if e == nil {
+			panic(fmt.Sprintf("nil *Node at %d", i))
+		}
+		if seen[e.ID] {
+			return true
+		}
+		seen[e.ID] = true
+	}
+	return false
+}
+
+func sortedByDistanceTo(distbase common.Hash, slice []*Node) bool {
+	var last common.Hash
+	for i, e := range slice {
+		if i > 0 && distcmp(distbase, e.sha, last) < 0 {
+			return false
+		}
+		last = e.sha
+	}
+	return true
+}
+
+func contains(ns []*Node, id NodeID) bool {
+	for _, n := range ns {
+		if n.ID == id {
+			return true
+		}
+	}
+	return false
+}
+
+// gen wraps quick.Value so it's easier to use.
+// it generates a random value of the given value's type.
+func gen(typ interface{}, rand *rand.Rand) interface{} {
+	v, ok := quick.Value(reflect.TypeOf(typ), rand)
+	if !ok {
+		panic(fmt.Sprintf("couldn't generate random value of type %T", typ))
+	}
+	return v.Interface()
+}
+
+func newkey() *ecdsa.PrivateKey {
+	key, err := crypto.GenerateKey()
+	if err != nil {
+		panic("couldn't generate key: " + err.Error())
+	}
+	return key
+}
diff --git a/p2p/discv5/ticket.go b/p2p/discv5/ticket.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ee2f7fc4633802b6bbc3be0edc9283d2ec33d16
--- /dev/null
+++ b/p2p/discv5/ticket.go
@@ -0,0 +1,969 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"math"
+	"math/rand"
+	"sort"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/mclock"
+	"github.com/ethereum/go-ethereum/crypto"
+)
+
+const (
+	ticketTimeBucketLen = time.Minute
+	timeWindow          = 10 // * ticketTimeBucketLen
+	wantTicketsInWindow = 10
+	collectFrequency    = time.Second * 30
+	registerFrequency   = time.Second * 60
+	maxCollectDebt      = 10
+	maxRegisterDebt     = 5
+	keepTicketConst     = time.Minute * 10
+	keepTicketExp       = time.Minute * 5
+	targetWaitTime      = time.Minute * 10
+	topicQueryTimeout   = time.Second * 5
+	topicQueryResend    = time.Minute
+	// topic radius detection
+	maxRadius           = 0xffffffffffffffff
+	radiusTC            = time.Minute * 20
+	radiusBucketsPerBit = 8
+	minSlope            = 1
+	minPeakSize         = 40
+	maxNoAdjust         = 20
+	lookupWidth         = 8
+	minRightSum         = 20
+	searchForceQuery    = 4
+)
+
+// timeBucket represents absolute monotonic time in minutes.
+// It is used as the index into the per-topic ticket buckets.
+type timeBucket int
+
+type ticket struct {
+	topics  []Topic
+	regTime []mclock.AbsTime // Per-topic local absolute time when the ticket can be used.
+
+	// The serial number that was issued by the server.
+	serial uint32
+	// Used by registrar, tracks absolute time when the ticket was created.
+	issueTime mclock.AbsTime
+
+	// Fields used only by registrants
+	node   *Node  // the registrar node that signed this ticket
+	refCnt int    // tracks number of topics that will be registered using this ticket
+	pong   []byte // encoded pong packet signed by the registrar
+}
+
+// ticketRef refers to a single topic in a ticket.
+type ticketRef struct {
+	t   *ticket
+	idx int // index of the topic in t.topics and t.regTime
+}
+
+func (ref ticketRef) topic() Topic {
+	return ref.t.topics[ref.idx]
+}
+
+func (ref ticketRef) topicRegTime() mclock.AbsTime {
+	return ref.t.regTime[ref.idx]
+}
+
+func pongToTicket(localTime mclock.AbsTime, topics []Topic, node *Node, p *ingressPacket) (*ticket, error) {
+	wps := p.data.(*pong).WaitPeriods
+	if len(topics) != len(wps) {
+		return nil, fmt.Errorf("bad wait period list: got %d values, want %d", len(topics), len(wps))
+	}
+	if rlpHash(topics) != p.data.(*pong).TopicHash {
+		return nil, fmt.Errorf("bad topic hash")
+	}
+	t := &ticket{
+		issueTime: localTime,
+		node:      node,
+		topics:    topics,
+		pong:      p.rawData,
+		regTime:   make([]mclock.AbsTime, len(wps)),
+	}
+	// Convert wait periods to local absolute time.
+	for i, wp := range wps {
+		t.regTime[i] = localTime + mclock.AbsTime(time.Second*time.Duration(wp))
+	}
+	return t, nil
+}
+
+func ticketToPong(t *ticket, pong *pong) {
+	pong.Expiration = uint64(t.issueTime / mclock.AbsTime(time.Second))
+	pong.TopicHash = rlpHash(t.topics)
+	pong.TicketSerial = t.serial
+	pong.WaitPeriods = make([]uint32, len(t.regTime))
+	for i, regTime := range t.regTime {
+		pong.WaitPeriods[i] = uint32(time.Duration(regTime-t.issueTime) / time.Second)
+	}
+}
+
+type ticketStore struct {
+	// radius detector and target address generator
+	// exists for both searched and registered topics
+	radius map[Topic]*topicRadius
+
+	// Contains buckets (for each absolute minute) of tickets
+	// that can be used in that minute.
+	// This is only set if the topic is being registered.
+	tickets     map[Topic]topicTickets
+	regtopics   []Topic
+	nodes       map[*Node]*ticket
+	nodeLastReq map[*Node]reqInfo
+
+	lastBucketFetched timeBucket
+	nextTicketCached  *ticketRef
+	nextTicketReg     mclock.AbsTime
+
+	searchTopicMap        map[Topic]searchTopic
+	searchTopicList       []Topic
+	searchTopicPtr        int
+	nextTopicQueryCleanup mclock.AbsTime
+	queriesSent           map[*Node]map[common.Hash]sentQuery
+	radiusLookupCnt       int
+}
+
+type searchTopic struct {
+	foundChn chan<- string
+	listIdx  int
+}
+
+type sentQuery struct {
+	sent   mclock.AbsTime
+	lookup lookupInfo
+}
+
+type topicTickets struct {
+	buckets             map[timeBucket][]ticketRef
+	nextLookup, nextReg mclock.AbsTime
+}
+
+func newTicketStore() *ticketStore {
+	return &ticketStore{
+		radius:         make(map[Topic]*topicRadius),
+		tickets:        make(map[Topic]topicTickets),
+		nodes:          make(map[*Node]*ticket),
+		nodeLastReq:    make(map[*Node]reqInfo),
+		searchTopicMap: make(map[Topic]searchTopic),
+		queriesSent:    make(map[*Node]map[common.Hash]sentQuery),
+	}
+}
+
+// addTopic starts tracking a topic. If register is true,
+// the local node will register the topic and tickets will be collected.
+func (s *ticketStore) addTopic(t Topic, register bool) {
+	debugLog(fmt.Sprintf(" addTopic(%v, %v)", t, register))
+	if s.radius[t] == nil {
+		s.radius[t] = newTopicRadius(t)
+	}
+	if register && s.tickets[t].buckets == nil {
+		s.tickets[t] = topicTickets{buckets: make(map[timeBucket][]ticketRef)}
+	}
+}
+
+func (s *ticketStore) addSearchTopic(t Topic, foundChn chan<- string) {
+	s.addTopic(t, false)
+	if s.searchTopicMap[t].foundChn == nil {
+		s.searchTopicList = append(s.searchTopicList, t)
+		s.searchTopicMap[t] = searchTopic{foundChn: foundChn, listIdx: len(s.searchTopicList) - 1}
+	}
+}
+
+func (s *ticketStore) removeSearchTopic(t Topic) {
+	if st := s.searchTopicMap[t]; st.foundChn != nil {
+		lastIdx := len(s.searchTopicList) - 1
+		lastTopic := s.searchTopicList[lastIdx]
+		s.searchTopicList[st.listIdx] = lastTopic
+		sl := s.searchTopicMap[lastTopic]
+		sl.listIdx = st.listIdx
+		s.searchTopicMap[lastTopic] = sl
+		s.searchTopicList = s.searchTopicList[:lastIdx]
+		delete(s.searchTopicMap, t)
+	}
+}
+
+// removeRegisterTopic deletes all tickets for the given topic.
+func (s *ticketStore) removeRegisterTopic(topic Topic) {
+	debugLog(fmt.Sprintf(" removeRegisterTopic(%v)", topic))
+	for _, list := range s.tickets[topic].buckets {
+		for _, ref := range list {
+			ref.t.refCnt--
+			if ref.t.refCnt == 0 {
+				delete(s.nodes, ref.t.node)
+				delete(s.nodeLastReq, ref.t.node)
+			}
+		}
+	}
+	delete(s.tickets, topic)
+}
+
+func (s *ticketStore) regTopicSet() []Topic {
+	topics := make([]Topic, 0, len(s.tickets))
+	for topic := range s.tickets {
+		topics = append(topics, topic)
+	}
+	return topics
+}
+
+// nextRegisterLookup returns the target of the next lookup for ticket collection.
+func (s *ticketStore) nextRegisterLookup() (lookup lookupInfo, delay time.Duration) {
+	debugLog("nextRegisterLookup()")
+	firstTopic, ok := s.iterRegTopics()
+	for topic := firstTopic; ok; {
+		debugLog(fmt.Sprintf(" checking topic %v, len(s.tickets[topic]) = %d", topic, len(s.tickets[topic].buckets)))
+		if s.tickets[topic].buckets != nil && s.needMoreTickets(topic) {
+			next := s.radius[topic].nextTarget(false)
+			debugLog(fmt.Sprintf(" %x 1s", next.target[:8]))
+			return next, 100 * time.Millisecond
+		}
+		topic, ok = s.iterRegTopics()
+		if topic == firstTopic {
+			break // We have checked all topics.
+		}
+	}
+	debugLog(" null, 40s")
+	return lookupInfo{}, 40 * time.Second
+}
+
+func (s *ticketStore) nextSearchLookup() lookupInfo {
+	if len(s.searchTopicList) == 0 {
+		return lookupInfo{}
+	}
+	if s.searchTopicPtr >= len(s.searchTopicList) {
+		s.searchTopicPtr = 0
+	}
+	topic := s.searchTopicList[s.searchTopicPtr]
+	s.searchTopicPtr++
+	target := s.radius[topic].nextTarget(s.radiusLookupCnt >= searchForceQuery)
+	if target.radiusLookup {
+		s.radiusLookupCnt++
+	} else {
+		s.radiusLookupCnt = 0
+	}
+	return target
+}
+
+// iterRegTopics returns topics to register in arbitrary order.
+// The second return value is false if there are no topics.
+func (s *ticketStore) iterRegTopics() (Topic, bool) {
+	debugLog("iterRegTopics()")
+	if len(s.regtopics) == 0 {
+		if len(s.tickets) == 0 {
+			debugLog(" false")
+			return "", false
+		}
+		// Refill register list.
+		for t := range s.tickets {
+			s.regtopics = append(s.regtopics, t)
+		}
+	}
+	topic := s.regtopics[len(s.regtopics)-1]
+	s.regtopics = s.regtopics[:len(s.regtopics)-1]
+	debugLog(" " + string(topic) + " true")
+	return topic, true
+}
+
+func (s *ticketStore) needMoreTickets(t Topic) bool {
+	return s.tickets[t].nextLookup < mclock.Now()
+}
+
+// ticketsInWindow returns the tickets of a given topic in the registration window.
+func (s *ticketStore) ticketsInWindow(t Topic) []ticketRef {
+	ltBucket := s.lastBucketFetched
+	var res []ticketRef
+	tickets := s.tickets[t].buckets
+	for g := ltBucket; g < ltBucket+timeWindow; g++ {
+		res = append(res, tickets[g]...)
+	}
+	debugLog(fmt.Sprintf("ticketsInWindow(%v) = %v", t, len(res)))
+	return res
+}
+
+func (s *ticketStore) removeExcessTickets(t Topic) {
+	tickets := s.ticketsInWindow(t)
+	if len(tickets) <= wantTicketsInWindow {
+		return
+	}
+	sort.Sort(ticketRefByWaitTime(tickets))
+	for _, r := range tickets[wantTicketsInWindow:] {
+		s.removeTicketRef(r)
+	}
+}
+
+type ticketRefByWaitTime []ticketRef
+
+// Len is the number of elements in the collection.
+func (s ticketRefByWaitTime) Len() int {
+	return len(s)
+}
+
+func (r ticketRef) waitTime() mclock.AbsTime {
+	return r.t.regTime[r.idx] - r.t.issueTime
+}
+
+// Less reports whether the element with
+// index i should sort before the element with index j.
+func (s ticketRefByWaitTime) Less(i, j int) bool {
+	return s[i].waitTime() < s[j].waitTime()
+}
+
+// Swap swaps the elements with indexes i and j.
+func (s ticketRefByWaitTime) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s *ticketStore) addTicketRef(r ticketRef) {
+	topic := r.t.topics[r.idx]
+	t := s.tickets[topic]
+	if t.buckets == nil {
+		return
+	}
+	bucket := timeBucket(r.t.regTime[r.idx] / mclock.AbsTime(ticketTimeBucketLen))
+	t.buckets[bucket] = append(t.buckets[bucket], r)
+	r.t.refCnt++
+
+	min := mclock.Now() - mclock.AbsTime(collectFrequency)*maxCollectDebt
+	if t.nextLookup < min {
+		t.nextLookup = min
+	}
+	t.nextLookup += mclock.AbsTime(collectFrequency)
+	s.tickets[topic] = t
+
+	//s.removeExcessTickets(topic)
+}
+
+func (s *ticketStore) nextFilteredTicket() (t *ticketRef, wait time.Duration) {
+	now := mclock.Now()
+	for {
+		t, wait = s.nextRegisterableTicket()
+		if t == nil {
+			return
+		}
+		regTime := now + mclock.AbsTime(wait)
+		topic := t.t.topics[t.idx]
+		if regTime >= s.tickets[topic].nextReg {
+			return
+		}
+		s.removeTicketRef(*t)
+	}
+}
+
+func (s *ticketStore) ticketRegistered(t ticketRef) {
+	now := mclock.Now()
+
+	topic := t.t.topics[t.idx]
+	tt := s.tickets[topic]
+	min := now - mclock.AbsTime(registerFrequency)*maxRegisterDebt
+	if min > tt.nextReg {
+		tt.nextReg = min
+	}
+	tt.nextReg += mclock.AbsTime(registerFrequency)
+	s.tickets[topic] = tt
+
+	s.removeTicketRef(t)
+}
+
+// nextRegisterableTicket returns the next ticket that can be used
+// to register.
+//
+// If the returned wait time <= zero the ticket can be used. For a positive
+// wait time, the caller should requery the next ticket later.
+//
+// A ticket can be returned more than once with <= zero wait time in case
+// the ticket contains multiple topics.
+func (s *ticketStore) nextRegisterableTicket() (t *ticketRef, wait time.Duration) {
+	defer func() {
+		if t == nil {
+			debugLog(" nil")
+		} else {
+			debugLog(fmt.Sprintf(" node = %x sn = %v wait = %v", t.t.node.ID[:8], t.t.serial, wait))
+		}
+	}()
+
+	debugLog("nextRegisterableTicket()")
+	now := mclock.Now()
+	if s.nextTicketCached != nil {
+		return s.nextTicketCached, time.Duration(s.nextTicketCached.topicRegTime() - now)
+	}
+
+	for bucket := s.lastBucketFetched; ; bucket++ {
+		var (
+			empty      = true    // true if there are no tickets
+			nextTicket ticketRef // uninitialized if this bucket is empty
+		)
+		for _, tickets := range s.tickets {
+			//s.removeExcessTickets(topic)
+			if len(tickets.buckets) != 0 {
+				empty = false
+				if list := tickets.buckets[bucket]; list != nil {
+					for _, ref := range list {
+						//debugLog(fmt.Sprintf(" nrt bucket = %d node = %x sn = %v wait = %v", bucket, ref.t.node.ID[:8], ref.t.serial, time.Duration(ref.topicRegTime()-now)))
+						if nextTicket.t == nil || ref.topicRegTime() < nextTicket.topicRegTime() {
+							nextTicket = ref
+						}
+					}
+				}
+			}
+		}
+		if empty {
+			return nil, 0
+		}
+		if nextTicket.t != nil {
+			wait = time.Duration(nextTicket.topicRegTime() - now)
+			s.nextTicketCached = &nextTicket
+			return &nextTicket, wait
+		}
+		s.lastBucketFetched = bucket
+	}
+}
+
+// removeTicket removes a ticket from the ticket store
+func (s *ticketStore) removeTicketRef(ref ticketRef) {
+	debugLog(fmt.Sprintf("removeTicketRef(node = %x sn = %v)", ref.t.node.ID[:8], ref.t.serial))
+	topic := ref.topic()
+	tickets := s.tickets[topic].buckets
+	if tickets == nil {
+		return
+	}
+	bucket := timeBucket(ref.t.regTime[ref.idx] / mclock.AbsTime(ticketTimeBucketLen))
+	list := tickets[bucket]
+	idx := -1
+	for i, bt := range list {
+		if bt.t == ref.t {
+			idx = i
+			break
+		}
+	}
+	if idx == -1 {
+		panic(nil)
+	}
+	list = append(list[:idx], list[idx+1:]...)
+	if len(list) != 0 {
+		tickets[bucket] = list
+	} else {
+		delete(tickets, bucket)
+	}
+	ref.t.refCnt--
+	if ref.t.refCnt == 0 {
+		delete(s.nodes, ref.t.node)
+		delete(s.nodeLastReq, ref.t.node)
+	}
+
+	// Make nextRegisterableTicket return the next available ticket.
+	s.nextTicketCached = nil
+}
+
+type lookupInfo struct {
+	target       common.Hash
+	topic        Topic
+	radiusLookup bool
+}
+
+type reqInfo struct {
+	pingHash []byte
+	lookup   lookupInfo
+	time     mclock.AbsTime
+}
+
+// returns -1 if not found
+func (t *ticket) findIdx(topic Topic) int {
+	for i, tt := range t.topics {
+		if tt == topic {
+			return i
+		}
+	}
+	return -1
+}
+
+func (s *ticketStore) registerLookupDone(lookup lookupInfo, nodes []*Node, ping func(n *Node) []byte) {
+	now := mclock.Now()
+	for i, n := range nodes {
+		if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius {
+			if lookup.radiusLookup {
+				if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC {
+					s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now}
+				}
+			} else {
+				if s.nodes[n] == nil {
+					s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now}
+				}
+			}
+		}
+	}
+}
+
+func (s *ticketStore) searchLookupDone(lookup lookupInfo, nodes []*Node, ping func(n *Node) []byte, query func(n *Node, topic Topic) []byte) {
+	now := mclock.Now()
+	for i, n := range nodes {
+		if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius {
+			if lookup.radiusLookup {
+				if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC {
+					s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now}
+				}
+			} // else {
+			if s.canQueryTopic(n, lookup.topic) {
+				hash := query(n, lookup.topic)
+				s.addTopicQuery(common.BytesToHash(hash), n, lookup)
+			}
+			//}
+		}
+	}
+}
+
+func (s *ticketStore) adjustWithTicket(now mclock.AbsTime, targetHash common.Hash, t *ticket) {
+	for i, topic := range t.topics {
+		if tt, ok := s.radius[topic]; ok {
+			tt.adjustWithTicket(now, targetHash, ticketRef{t, i})
+		}
+	}
+}
+
+func (s *ticketStore) addTicket(localTime mclock.AbsTime, pingHash []byte, t *ticket) {
+	debugLog(fmt.Sprintf("add(node = %x sn = %v)", t.node.ID[:8], t.serial))
+
+	lastReq, ok := s.nodeLastReq[t.node]
+	if !(ok && bytes.Equal(pingHash, lastReq.pingHash)) {
+		return
+	}
+	s.adjustWithTicket(localTime, lastReq.lookup.target, t)
+
+	if lastReq.lookup.radiusLookup || s.nodes[t.node] != nil {
+		return
+	}
+
+	topic := lastReq.lookup.topic
+	topicIdx := t.findIdx(topic)
+	if topicIdx == -1 {
+		return
+	}
+
+	bucket := timeBucket(localTime / mclock.AbsTime(ticketTimeBucketLen))
+	if s.lastBucketFetched == 0 || bucket < s.lastBucketFetched {
+		s.lastBucketFetched = bucket
+	}
+
+	if _, ok := s.tickets[topic]; ok {
+		wait := t.regTime[topicIdx] - localTime
+		rnd := rand.ExpFloat64()
+		if rnd > 10 {
+			rnd = 10
+		}
+		if float64(wait) < float64(keepTicketConst)+float64(keepTicketExp)*rnd {
+			// use the ticket to register this topic
+			//fmt.Println("addTicket", t.node.ID[:8], t.node.addr().String(), t.serial, t.pong)
+			s.addTicketRef(ticketRef{t, topicIdx})
+		}
+	}
+
+	if t.refCnt > 0 {
+		s.nextTicketCached = nil
+		s.nodes[t.node] = t
+	}
+}
+
+func (s *ticketStore) getNodeTicket(node *Node) *ticket {
+	if s.nodes[node] == nil {
+		debugLog(fmt.Sprintf("getNodeTicket(%x) sn = nil", node.ID[:8]))
+	} else {
+		debugLog(fmt.Sprintf("getNodeTicket(%x) sn = %v", node.ID[:8], s.nodes[node].serial))
+	}
+	return s.nodes[node]
+}
+
+func (s *ticketStore) canQueryTopic(node *Node, topic Topic) bool {
+	qq := s.queriesSent[node]
+	if qq != nil {
+		now := mclock.Now()
+		for _, sq := range qq {
+			if sq.lookup.topic == topic && sq.sent > now-mclock.AbsTime(topicQueryResend) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func (s *ticketStore) addTopicQuery(hash common.Hash, node *Node, lookup lookupInfo) {
+	now := mclock.Now()
+	qq := s.queriesSent[node]
+	if qq == nil {
+		qq = make(map[common.Hash]sentQuery)
+		s.queriesSent[node] = qq
+	}
+	qq[hash] = sentQuery{sent: now, lookup: lookup}
+	s.cleanupTopicQueries(now)
+}
+
+func (s *ticketStore) cleanupTopicQueries(now mclock.AbsTime) {
+	if s.nextTopicQueryCleanup > now {
+		return
+	}
+	exp := now - mclock.AbsTime(topicQueryResend)
+	for n, qq := range s.queriesSent {
+		for h, q := range qq {
+			if q.sent < exp {
+				delete(qq, h)
+			}
+		}
+		if len(qq) == 0 {
+			delete(s.queriesSent, n)
+		}
+	}
+	s.nextTopicQueryCleanup = now + mclock.AbsTime(topicQueryTimeout)
+}
+
+func (s *ticketStore) gotTopicNodes(from *Node, hash common.Hash, nodes []rpcNode) (timeout bool) {
+	now := mclock.Now()
+	//fmt.Println("got", from.addr().String(), hash, len(nodes))
+	qq := s.queriesSent[from]
+	if qq == nil {
+		return true
+	}
+	q, ok := qq[hash]
+	if !ok || now > q.sent+mclock.AbsTime(topicQueryTimeout) {
+		return true
+	}
+	inside := float64(0)
+	if len(nodes) > 0 {
+		inside = 1
+	}
+	s.radius[q.lookup.topic].adjust(now, q.lookup.target, from.sha, inside)
+	chn := s.searchTopicMap[q.lookup.topic].foundChn
+	if chn == nil {
+		//fmt.Println("no channel")
+		return false
+	}
+	for _, node := range nodes {
+		ip := node.IP
+		if ip.IsUnspecified() || ip.IsLoopback() {
+			ip = from.IP
+		}
+		enode := NewNode(node.ID, ip, node.UDP-1, node.TCP-1).String() // subtract one from port while discv5 is running in test mode on UDPport+1
+		select {
+		case chn <- enode:
+		default:
+			return false
+		}
+	}
+	return false
+}
+
+type topicRadius struct {
+	topic             Topic
+	topicHashPrefix   uint64
+	radius, minRadius uint64
+	buckets           []topicRadiusBucket
+}
+
+type topicRadiusEvent int
+
+const (
+	trOutside topicRadiusEvent = iota
+	trInside
+	trNoAdjust
+	trCount
+)
+
+type topicRadiusBucket struct {
+	weights    [trCount]float64
+	lastTime   mclock.AbsTime
+	value      float64
+	lookupSent map[common.Hash]mclock.AbsTime
+}
+
+func (b *topicRadiusBucket) update(now mclock.AbsTime) {
+	if now == b.lastTime {
+		return
+	}
+	exp := math.Exp(-float64(now-b.lastTime) / float64(radiusTC))
+	for i, w := range b.weights {
+		b.weights[i] = w * exp
+	}
+	b.lastTime = now
+
+	for target, tm := range b.lookupSent {
+		if now-tm > mclock.AbsTime(pingTimeout) {
+			b.weights[trNoAdjust] += 1
+			delete(b.lookupSent, target)
+		}
+	}
+}
+
+func (b *topicRadiusBucket) adjust(now mclock.AbsTime, inside float64) {
+	b.update(now)
+	if inside <= 0 {
+		b.weights[trOutside] += 1
+	} else {
+		if inside >= 1 {
+			b.weights[trInside] += 1
+		} else {
+			b.weights[trInside] += inside
+			b.weights[trOutside] += 1 - inside
+		}
+	}
+}
+
+func newTopicRadius(t Topic) *topicRadius {
+	topicHash := crypto.Keccak256Hash([]byte(t))
+	topicHashPrefix := binary.BigEndian.Uint64(topicHash[0:8])
+
+	return &topicRadius{
+		topic:           t,
+		topicHashPrefix: topicHashPrefix,
+		radius:          maxRadius,
+		minRadius:       maxRadius,
+	}
+}
+
+func (r *topicRadius) getBucketIdx(addrHash common.Hash) int {
+	prefix := binary.BigEndian.Uint64(addrHash[0:8])
+	var log2 float64
+	if prefix != r.topicHashPrefix {
+		log2 = math.Log2(float64(prefix ^ r.topicHashPrefix))
+	}
+	bucket := int((64 - log2) * radiusBucketsPerBit)
+	max := 64*radiusBucketsPerBit - 1
+	if bucket > max {
+		return max
+	}
+	if bucket < 0 {
+		return 0
+	}
+	return bucket
+}
+
+func (r *topicRadius) targetForBucket(bucket int) common.Hash {
+	min := math.Pow(2, 64-float64(bucket+1)/radiusBucketsPerBit)
+	max := math.Pow(2, 64-float64(bucket)/radiusBucketsPerBit)
+	a := uint64(min)
+	b := randUint64n(uint64(max - min))
+	xor := a + b
+	if xor < a {
+		xor = ^uint64(0)
+	}
+	prefix := r.topicHashPrefix ^ xor
+	var target common.Hash
+	binary.BigEndian.PutUint64(target[0:8], prefix)
+	globalRandRead(target[8:])
+	return target
+}
+
+// package rand provides a Read function in Go 1.6 and later, but
+// we can't use it yet because we still support Go 1.5.
+func globalRandRead(b []byte) {
+	pos := 0
+	val := 0
+	for n := 0; n < len(b); n++ {
+		if pos == 0 {
+			val = rand.Int()
+			pos = 7
+		}
+		b[n] = byte(val)
+		val >>= 8
+		pos--
+	}
+}
+
+func (r *topicRadius) isInRadius(addrHash common.Hash) bool {
+	nodePrefix := binary.BigEndian.Uint64(addrHash[0:8])
+	dist := nodePrefix ^ r.topicHashPrefix
+	return dist < r.radius
+}
+
+func (r *topicRadius) chooseLookupBucket(a, b int) int {
+	if a < 0 {
+		a = 0
+	}
+	if a > b {
+		return -1
+	}
+	c := 0
+	for i := a; i <= b; i++ {
+		if i >= len(r.buckets) || r.buckets[i].weights[trNoAdjust] < maxNoAdjust {
+			c++
+		}
+	}
+	if c == 0 {
+		return -1
+	}
+	rnd := randUint(uint32(c))
+	for i := a; i <= b; i++ {
+		if i >= len(r.buckets) || r.buckets[i].weights[trNoAdjust] < maxNoAdjust {
+			if rnd == 0 {
+				return i
+			}
+			rnd--
+		}
+	}
+	panic(nil) // should never happen
+}
+
+func (r *topicRadius) needMoreLookups(a, b int, maxValue float64) bool {
+	var max float64
+	if a < 0 {
+		a = 0
+	}
+	if b >= len(r.buckets) {
+		b = len(r.buckets) - 1
+		if r.buckets[b].value > max {
+			max = r.buckets[b].value
+		}
+	}
+	if b >= a {
+		for i := a; i <= b; i++ {
+			if r.buckets[i].value > max {
+				max = r.buckets[i].value
+			}
+		}
+	}
+	return maxValue-max < minPeakSize
+}
+
+func (r *topicRadius) recalcRadius() (radius uint64, radiusLookup int) {
+	maxBucket := 0
+	maxValue := float64(0)
+	now := mclock.Now()
+	v := float64(0)
+	for i, _ := range r.buckets {
+		r.buckets[i].update(now)
+		v += r.buckets[i].weights[trOutside] - r.buckets[i].weights[trInside]
+		r.buckets[i].value = v
+		//fmt.Printf("%v %v | ", v, r.buckets[i].weights[trNoAdjust])
+	}
+	//fmt.Println()
+	slopeCross := -1
+	for i, b := range r.buckets {
+		v := b.value
+		if v < float64(i)*minSlope {
+			slopeCross = i
+			break
+		}
+		if v > maxValue {
+			maxValue = v
+			maxBucket = i + 1
+		}
+	}
+
+	minRadBucket := len(r.buckets)
+	sum := float64(0)
+	for minRadBucket > 0 && sum < minRightSum {
+		minRadBucket--
+		b := r.buckets[minRadBucket]
+		sum += b.weights[trInside] + b.weights[trOutside]
+	}
+	r.minRadius = uint64(math.Pow(2, 64-float64(minRadBucket)/radiusBucketsPerBit))
+
+	lookupLeft := -1
+	if r.needMoreLookups(0, maxBucket-lookupWidth-1, maxValue) {
+		lookupLeft = r.chooseLookupBucket(maxBucket-lookupWidth, maxBucket-1)
+	}
+	lookupRight := -1
+	if slopeCross != maxBucket && (minRadBucket <= maxBucket || r.needMoreLookups(maxBucket+lookupWidth, len(r.buckets)-1, maxValue)) {
+		for len(r.buckets) <= maxBucket+lookupWidth {
+			r.buckets = append(r.buckets, topicRadiusBucket{lookupSent: make(map[common.Hash]mclock.AbsTime)})
+		}
+		lookupRight = r.chooseLookupBucket(maxBucket, maxBucket+lookupWidth-1)
+	}
+	if lookupLeft == -1 {
+		radiusLookup = lookupRight
+	} else {
+		if lookupRight == -1 {
+			radiusLookup = lookupLeft
+		} else {
+			if randUint(2) == 0 {
+				radiusLookup = lookupLeft
+			} else {
+				radiusLookup = lookupRight
+			}
+		}
+	}
+
+	//fmt.Println("mb", maxBucket, "sc", slopeCross, "mrb", minRadBucket, "ll", lookupLeft, "lr", lookupRight, "mv", maxValue)
+
+	if radiusLookup == -1 {
+		// no more radius lookups needed at the moment, return a radius
+		rad := maxBucket
+		if minRadBucket < rad {
+			rad = minRadBucket
+		}
+		radius = ^uint64(0)
+		if rad > 0 {
+			radius = uint64(math.Pow(2, 64-float64(rad)/radiusBucketsPerBit))
+		}
+		r.radius = radius
+	}
+
+	return
+}
+
+func (r *topicRadius) nextTarget(forceRegular bool) lookupInfo {
+	if !forceRegular {
+		_, radiusLookup := r.recalcRadius()
+		if radiusLookup != -1 {
+			target := r.targetForBucket(radiusLookup)
+			r.buckets[radiusLookup].lookupSent[target] = mclock.Now()
+			return lookupInfo{target: target, topic: r.topic, radiusLookup: true}
+		}
+	}
+
+	radExt := r.radius / 2
+	if radExt > maxRadius-r.radius {
+		radExt = maxRadius - r.radius
+	}
+	rnd := randUint64n(r.radius) + randUint64n(2*radExt)
+	if rnd > radExt {
+		rnd -= radExt
+	} else {
+		rnd = radExt - rnd
+	}
+
+	prefix := r.topicHashPrefix ^ rnd
+	var target common.Hash
+	binary.BigEndian.PutUint64(target[0:8], prefix)
+	globalRandRead(target[8:])
+	return lookupInfo{target: target, topic: r.topic, radiusLookup: false}
+}
+
+func (r *topicRadius) adjustWithTicket(now mclock.AbsTime, targetHash common.Hash, t ticketRef) {
+	wait := t.t.regTime[t.idx] - t.t.issueTime
+	inside := float64(wait)/float64(targetWaitTime) - 0.5
+	if inside > 1 {
+		inside = 1
+	}
+	if inside < 0 {
+		inside = 0
+	}
+	r.adjust(now, targetHash, t.t.node.sha, inside)
+}
+
+func (r *topicRadius) adjust(now mclock.AbsTime, targetHash, addrHash common.Hash, inside float64) {
+	bucket := r.getBucketIdx(addrHash)
+	//fmt.Println("adjust", bucket, len(r.buckets), inside)
+	if bucket >= len(r.buckets) {
+		return
+	}
+	r.buckets[bucket].adjust(now, inside)
+	delete(r.buckets[bucket].lookupSent, targetHash)
+}
diff --git a/p2p/discv5/topic.go b/p2p/discv5/topic.go
new file mode 100644
index 0000000000000000000000000000000000000000..625921e84c116d016aa6ecc5cee1efc931c45d3e
--- /dev/null
+++ b/p2p/discv5/topic.go
@@ -0,0 +1,406 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"container/heap"
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common/mclock"
+)
+
+const (
+	maxEntries         = 10000
+	maxEntriesPerTopic = 50
+
+	fallbackRegistrationExpiry = 1 * time.Hour
+)
+
+type Topic string
+
+type topicEntry struct {
+	topic   Topic
+	fifoIdx uint64
+	node    *Node
+	expire  mclock.AbsTime
+}
+
+type topicInfo struct {
+	entries            map[uint64]*topicEntry
+	fifoHead, fifoTail uint64
+	rqItem             *topicRequestQueueItem
+	wcl                waitControlLoop
+}
+
+// removes tail element from the fifo
+func (t *topicInfo) getFifoTail() *topicEntry {
+	for t.entries[t.fifoTail] == nil {
+		t.fifoTail++
+	}
+	tail := t.entries[t.fifoTail]
+	t.fifoTail++
+	return tail
+}
+
+type nodeInfo struct {
+	entries                          map[Topic]*topicEntry
+	lastIssuedTicket, lastUsedTicket uint32
+	// you can't register a ticket newer than lastUsedTicket before noRegUntil (absolute time)
+	noRegUntil mclock.AbsTime
+}
+
+type topicTable struct {
+	db                    *nodeDB
+	self                  *Node
+	nodes                 map[*Node]*nodeInfo
+	topics                map[Topic]*topicInfo
+	globalEntries         uint64
+	requested             topicRequestQueue
+	requestCnt            uint64
+	lastGarbageCollection mclock.AbsTime
+}
+
+func newTopicTable(db *nodeDB, self *Node) *topicTable {
+	if printTestImgLogs {
+		fmt.Printf("*N %016x\n", self.sha[:8])
+	}
+	return &topicTable{
+		db:     db,
+		nodes:  make(map[*Node]*nodeInfo),
+		topics: make(map[Topic]*topicInfo),
+		self:   self,
+	}
+}
+
+func (t *topicTable) getOrNewTopic(topic Topic) *topicInfo {
+	ti := t.topics[topic]
+	if ti == nil {
+		rqItem := &topicRequestQueueItem{
+			topic:    topic,
+			priority: t.requestCnt,
+		}
+		ti = &topicInfo{
+			entries: make(map[uint64]*topicEntry),
+			rqItem:  rqItem,
+		}
+		t.topics[topic] = ti
+		heap.Push(&t.requested, rqItem)
+	}
+	return ti
+}
+
+func (t *topicTable) checkDeleteTopic(topic Topic) {
+	ti := t.topics[topic]
+	if ti == nil {
+		return
+	}
+	if len(ti.entries) == 0 && ti.wcl.hasMinimumWaitPeriod() {
+		delete(t.topics, topic)
+		heap.Remove(&t.requested, ti.rqItem.index)
+	}
+}
+
+func (t *topicTable) getOrNewNode(node *Node) *nodeInfo {
+	n := t.nodes[node]
+	if n == nil {
+		//fmt.Printf("newNode %016x %016x\n", t.self.sha[:8], node.sha[:8])
+		var issued, used uint32
+		if t.db != nil {
+			issued, used = t.db.fetchTopicRegTickets(node.ID)
+		}
+		n = &nodeInfo{
+			entries:          make(map[Topic]*topicEntry),
+			lastIssuedTicket: issued,
+			lastUsedTicket:   used,
+		}
+		t.nodes[node] = n
+	}
+	return n
+}
+
+func (t *topicTable) checkDeleteNode(node *Node) {
+	if n, ok := t.nodes[node]; ok && len(n.entries) == 0 && n.noRegUntil < mclock.Now() {
+		//fmt.Printf("deleteNode %016x %016x\n", t.self.sha[:8], node.sha[:8])
+		delete(t.nodes, node)
+	}
+}
+
+func (t *topicTable) storeTicketCounters(node *Node) {
+	n := t.getOrNewNode(node)
+	if t.db != nil {
+		t.db.updateTopicRegTickets(node.ID, n.lastIssuedTicket, n.lastUsedTicket)
+	}
+}
+
+func (t *topicTable) getEntries(topic Topic) []*Node {
+	t.collectGarbage()
+
+	te := t.topics[topic]
+	if te == nil {
+		return nil
+	}
+	nodes := make([]*Node, len(te.entries))
+	i := 0
+	for _, e := range te.entries {
+		nodes[i] = e.node
+		i++
+	}
+	t.requestCnt++
+	t.requested.update(te.rqItem, t.requestCnt)
+	return nodes
+}
+
+func (t *topicTable) addEntry(node *Node, topic Topic) {
+	n := t.getOrNewNode(node)
+	// clear previous entries by the same node
+	for _, e := range n.entries {
+		t.deleteEntry(e)
+	}
+	// ***
+	n = t.getOrNewNode(node)
+
+	tm := mclock.Now()
+	te := t.getOrNewTopic(topic)
+
+	if len(te.entries) == maxEntriesPerTopic {
+		t.deleteEntry(te.getFifoTail())
+	}
+
+	if t.globalEntries == maxEntries {
+		t.deleteEntry(t.leastRequested()) // not empty, no need to check for nil
+	}
+
+	fifoIdx := te.fifoHead
+	te.fifoHead++
+	entry := &topicEntry{
+		topic:   topic,
+		fifoIdx: fifoIdx,
+		node:    node,
+		expire:  tm + mclock.AbsTime(fallbackRegistrationExpiry),
+	}
+	if printTestImgLogs {
+		fmt.Printf("*+ %d %v %016x %016x\n", tm/1000000, topic, t.self.sha[:8], node.sha[:8])
+	}
+	te.entries[fifoIdx] = entry
+	n.entries[topic] = entry
+	t.globalEntries++
+	te.wcl.registered(tm)
+}
+
+// removes least requested element from the fifo
+func (t *topicTable) leastRequested() *topicEntry {
+	for t.requested.Len() > 0 && t.topics[t.requested[0].topic] == nil {
+		heap.Pop(&t.requested)
+	}
+	if t.requested.Len() == 0 {
+		return nil
+	}
+	return t.topics[t.requested[0].topic].getFifoTail()
+}
+
+// entry should exist
+func (t *topicTable) deleteEntry(e *topicEntry) {
+	if printTestImgLogs {
+		fmt.Printf("*- %d %v %016x %016x\n", mclock.Now()/1000000, e.topic, t.self.sha[:8], e.node.sha[:8])
+	}
+	ne := t.nodes[e.node].entries
+	delete(ne, e.topic)
+	if len(ne) == 0 {
+		t.checkDeleteNode(e.node)
+	}
+	te := t.topics[e.topic]
+	delete(te.entries, e.fifoIdx)
+	if len(te.entries) == 0 {
+		t.checkDeleteTopic(e.topic)
+	}
+	t.globalEntries--
+}
+
+// It is assumed that topics and waitPeriods have the same length.
+func (t *topicTable) useTicket(node *Node, serialNo uint32, topics []Topic, idx int, issueTime uint64, waitPeriods []uint32) (registered bool) {
+	debugLog(fmt.Sprintf("useTicket %v %v %v", serialNo, topics, waitPeriods))
+	//fmt.Println("useTicket", serialNo, topics, waitPeriods)
+	t.collectGarbage()
+
+	n := t.getOrNewNode(node)
+	if serialNo < n.lastUsedTicket {
+		return false
+	}
+
+	tm := mclock.Now()
+	if serialNo > n.lastUsedTicket && tm < n.noRegUntil {
+		return false
+	}
+	if serialNo != n.lastUsedTicket {
+		n.lastUsedTicket = serialNo
+		n.noRegUntil = tm + mclock.AbsTime(noRegTimeout())
+		t.storeTicketCounters(node)
+	}
+
+	currTime := uint64(tm / mclock.AbsTime(time.Second))
+	regTime := issueTime + uint64(waitPeriods[idx])
+	relTime := int64(currTime - regTime)
+	if relTime >= -1 && relTime <= regTimeWindow+1 { // give clients a little security margin on both ends
+		if e := n.entries[topics[idx]]; e == nil {
+			t.addEntry(node, topics[idx])
+		} else {
+			// if there is an active entry, don't move to the front of the FIFO but prolong expire time
+			e.expire = tm + mclock.AbsTime(fallbackRegistrationExpiry)
+		}
+		return true
+	}
+
+	return false
+}
+
+func (topictab *topicTable) getTicket(node *Node, topics []Topic) *ticket {
+	topictab.collectGarbage()
+
+	now := mclock.Now()
+	n := topictab.getOrNewNode(node)
+	n.lastIssuedTicket++
+	topictab.storeTicketCounters(node)
+
+	t := &ticket{
+		issueTime: now,
+		topics:    topics,
+		serial:    n.lastIssuedTicket,
+		regTime:   make([]mclock.AbsTime, len(topics)),
+	}
+	for i, topic := range topics {
+		var waitPeriod time.Duration
+		if topic := topictab.topics[topic]; topic != nil {
+			waitPeriod = topic.wcl.waitPeriod
+		} else {
+			waitPeriod = minWaitPeriod
+		}
+
+		t.regTime[i] = now + mclock.AbsTime(waitPeriod)
+	}
+	return t
+}
+
+const gcInterval = time.Minute
+
+func (t *topicTable) collectGarbage() {
+	tm := mclock.Now()
+	if time.Duration(tm-t.lastGarbageCollection) < gcInterval {
+		return
+	}
+	t.lastGarbageCollection = tm
+
+	for node, n := range t.nodes {
+		for _, e := range n.entries {
+			if e.expire <= tm {
+				t.deleteEntry(e)
+			}
+		}
+
+		t.checkDeleteNode(node)
+	}
+
+	for topic, _ := range t.topics {
+		t.checkDeleteTopic(topic)
+	}
+}
+
+const (
+	minWaitPeriod   = time.Minute
+	regTimeWindow   = 10 // seconds
+	avgnoRegTimeout = time.Minute * 10
+	// target average interval between two incoming ad requests
+	wcTargetRegInterval = time.Minute * 10 / maxEntriesPerTopic
+	//
+	wcTimeConst = time.Minute * 10
+)
+
+// initialization is not required, will set to minWaitPeriod at first registration
+type waitControlLoop struct {
+	lastIncoming mclock.AbsTime
+	waitPeriod   time.Duration
+}
+
+func (w *waitControlLoop) registered(tm mclock.AbsTime) {
+	w.waitPeriod = w.nextWaitPeriod(tm)
+	w.lastIncoming = tm
+}
+
+func (w *waitControlLoop) nextWaitPeriod(tm mclock.AbsTime) time.Duration {
+	period := tm - w.lastIncoming
+	wp := time.Duration(float64(w.waitPeriod) * math.Exp((float64(wcTargetRegInterval)-float64(period))/float64(wcTimeConst)))
+	if wp < minWaitPeriod {
+		wp = minWaitPeriod
+	}
+	return wp
+}
+
+func (w *waitControlLoop) hasMinimumWaitPeriod() bool {
+	return w.nextWaitPeriod(mclock.Now()) == minWaitPeriod
+}
+
+func noRegTimeout() time.Duration {
+	e := rand.ExpFloat64()
+	if e > 100 {
+		e = 100
+	}
+	return time.Duration(float64(avgnoRegTimeout) * e)
+}
+
+type topicRequestQueueItem struct {
+	topic    Topic
+	priority uint64
+	index    int
+}
+
+// A topicRequestQueue implements heap.Interface and holds topicRequestQueueItems.
+type topicRequestQueue []*topicRequestQueueItem
+
+func (tq topicRequestQueue) Len() int { return len(tq) }
+
+func (tq topicRequestQueue) Less(i, j int) bool {
+	return tq[i].priority < tq[j].priority
+}
+
+func (tq topicRequestQueue) Swap(i, j int) {
+	tq[i], tq[j] = tq[j], tq[i]
+	tq[i].index = i
+	tq[j].index = j
+}
+
+func (tq *topicRequestQueue) Push(x interface{}) {
+	n := len(*tq)
+	item := x.(*topicRequestQueueItem)
+	item.index = n
+	*tq = append(*tq, item)
+}
+
+func (tq *topicRequestQueue) Pop() interface{} {
+	old := *tq
+	n := len(old)
+	item := old[n-1]
+	item.index = -1
+	*tq = old[0 : n-1]
+	return item
+}
+
+func (tq *topicRequestQueue) update(item *topicRequestQueueItem, priority uint64) {
+	item.priority = priority
+	heap.Fix(tq, item.index)
+}
diff --git a/p2p/discv5/topic_test.go b/p2p/discv5/topic_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba79993f29e96500dcf080dacfb75b1fb2e731e8
--- /dev/null
+++ b/p2p/discv5/topic_test.go
@@ -0,0 +1,71 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"encoding/binary"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/mclock"
+)
+
+func TestTopicRadius(t *testing.T) {
+	now := mclock.Now()
+	topic := Topic("qwerty")
+	rad := newTopicRadius(topic)
+	targetRad := (^uint64(0)) / 100
+
+	waitFn := func(addr common.Hash) time.Duration {
+		prefix := binary.BigEndian.Uint64(addr[0:8])
+		dist := prefix ^ rad.topicHashPrefix
+		relDist := float64(dist) / float64(targetRad)
+		relTime := (1 - relDist/2) * 2
+		if relTime < 0 {
+			relTime = 0
+		}
+		return time.Duration(float64(targetWaitTime) * relTime)
+	}
+
+	bcnt := 0
+	cnt := 0
+	var sum float64
+	for cnt < 100 {
+		addr := rad.nextTarget(false).target
+		wait := waitFn(addr)
+		ticket := &ticket{
+			topics:  []Topic{topic},
+			regTime: []mclock.AbsTime{mclock.AbsTime(wait)},
+			node:    &Node{nodeNetGuts: nodeNetGuts{sha: addr}},
+		}
+		rad.adjustWithTicket(now, addr, ticketRef{ticket, 0})
+		if rad.radius != maxRadius {
+			cnt++
+			sum += float64(rad.radius)
+		} else {
+			bcnt++
+			if bcnt > 500 {
+				t.Errorf("Radius did not converge in 500 iterations")
+			}
+		}
+	}
+	avgRel := sum / float64(cnt) / float64(targetRad)
+	if avgRel > 1.05 || avgRel < 0.95 {
+		t.Errorf("Average/target ratio is too far from 1 (%v)", avgRel)
+	}
+}
diff --git a/p2p/discv5/udp.go b/p2p/discv5/udp.go
new file mode 100644
index 0000000000000000000000000000000000000000..af961984cabff7e9d13583da842fb859ab47c019
--- /dev/null
+++ b/p2p/discv5/udp.go
@@ -0,0 +1,456 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"bytes"
+	"crypto/ecdsa"
+	"errors"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/logger"
+	"github.com/ethereum/go-ethereum/logger/glog"
+	"github.com/ethereum/go-ethereum/p2p/nat"
+	"github.com/ethereum/go-ethereum/rlp"
+)
+
+const Version = 4
+
+// Errors
+var (
+	errPacketTooSmall   = errors.New("too small")
+	errBadHash          = errors.New("bad hash")
+	errExpired          = errors.New("expired")
+	errUnsolicitedReply = errors.New("unsolicited reply")
+	errUnknownNode      = errors.New("unknown node")
+	errTimeout          = errors.New("RPC timeout")
+	errClockWarp        = errors.New("reply deadline too far in the future")
+	errClosed           = errors.New("socket closed")
+)
+
+// Timeouts
+const (
+	respTimeout = 500 * time.Millisecond
+	sendTimeout = 500 * time.Millisecond
+	expiration  = 20 * time.Second
+
+	ntpFailureThreshold = 32               // Continuous timeouts after which to check NTP
+	ntpWarningCooldown  = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning
+	driftThreshold      = 10 * time.Second // Allowed clock drift before warning user
+)
+
+// RPC request structures
+type (
+	ping struct {
+		Version    uint
+		From, To   rpcEndpoint
+		Expiration uint64
+
+		// v5
+		Topics []Topic
+
+		// Ignore additional fields (for forward compatibility).
+		Rest []rlp.RawValue `rlp:"tail"`
+	}
+
+	// pong is the reply to ping.
+	pong struct {
+		// This field should mirror the UDP envelope address
+		// of the ping packet, which provides a way to discover the
+		// the external address (after NAT).
+		To rpcEndpoint
+
+		ReplyTok   []byte // This contains the hash of the ping packet.
+		Expiration uint64 // Absolute timestamp at which the packet becomes invalid.
+
+		// v5
+		TopicHash    common.Hash
+		TicketSerial uint32
+		WaitPeriods  []uint32
+
+		// Ignore additional fields (for forward compatibility).
+		Rest []rlp.RawValue `rlp:"tail"`
+	}
+
+	// findnode is a query for nodes close to the given target.
+	findnode struct {
+		Target     NodeID // doesn't need to be an actual public key
+		Expiration uint64
+		// Ignore additional fields (for forward compatibility).
+		Rest []rlp.RawValue `rlp:"tail"`
+	}
+
+	// findnode is a query for nodes close to the given target.
+	findnodeHash struct {
+		Target     common.Hash
+		Expiration uint64
+		// Ignore additional fields (for forward compatibility).
+		Rest []rlp.RawValue `rlp:"tail"`
+	}
+
+	// reply to findnode
+	neighbors struct {
+		Nodes      []rpcNode
+		Expiration uint64
+		// Ignore additional fields (for forward compatibility).
+		Rest []rlp.RawValue `rlp:"tail"`
+	}
+
+	topicRegister struct {
+		Topics []Topic
+		Idx    uint
+		Pong   []byte
+	}
+
+	topicQuery struct {
+		Topic      Topic
+		Expiration uint64
+	}
+
+	// reply to topicQuery
+	topicNodes struct {
+		Echo  common.Hash
+		Nodes []rpcNode
+	}
+
+	rpcNode struct {
+		IP  net.IP // len 4 for IPv4 or 16 for IPv6
+		UDP uint16 // for discovery protocol
+		TCP uint16 // for RLPx protocol
+		ID  NodeID
+	}
+
+	rpcEndpoint struct {
+		IP  net.IP // len 4 for IPv4 or 16 for IPv6
+		UDP uint16 // for discovery protocol
+		TCP uint16 // for RLPx protocol
+	}
+)
+
+const (
+	macSize  = 256 / 8
+	sigSize  = 520 / 8
+	headSize = macSize + sigSize // space of packet frame data
+)
+
+// Neighbors replies are sent across multiple packets to
+// stay below the 1280 byte limit. We compute the maximum number
+// of entries by stuffing a packet until it grows too large.
+var maxNeighbors = func() int {
+	p := neighbors{Expiration: ^uint64(0)}
+	maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
+	for n := 0; ; n++ {
+		p.Nodes = append(p.Nodes, maxSizeNode)
+		size, _, err := rlp.EncodeToReader(p)
+		if err != nil {
+			// If this ever happens, it will be caught by the unit tests.
+			panic("cannot encode: " + err.Error())
+		}
+		if headSize+size+1 >= 1280 {
+			return n
+		}
+	}
+}()
+
+var maxTopicNodes = func() int {
+	p := topicNodes{}
+	maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
+	for n := 0; ; n++ {
+		p.Nodes = append(p.Nodes, maxSizeNode)
+		size, _, err := rlp.EncodeToReader(p)
+		if err != nil {
+			// If this ever happens, it will be caught by the unit tests.
+			panic("cannot encode: " + err.Error())
+		}
+		if headSize+size+1 >= 1280 {
+			return n
+		}
+	}
+}()
+
+func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
+	ip := addr.IP.To4()
+	if ip == nil {
+		ip = addr.IP.To16()
+	}
+	return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort}
+}
+
+func (e1 rpcEndpoint) equal(e2 rpcEndpoint) bool {
+	return e1.UDP == e2.UDP && e1.TCP == e2.TCP && bytes.Equal(e1.IP, e2.IP)
+}
+
+func nodeFromRPC(rn rpcNode) (*Node, error) {
+	// TODO: don't accept localhost, LAN addresses from internet hosts
+	n := NewNode(rn.ID, rn.IP, rn.UDP, rn.TCP)
+	err := n.validateComplete()
+	return n, err
+}
+
+func nodeToRPC(n *Node) rpcNode {
+	return rpcNode{ID: n.ID, IP: n.IP, UDP: n.UDP, TCP: n.TCP}
+}
+
+type ingressPacket struct {
+	remoteID   NodeID
+	remoteAddr *net.UDPAddr
+	ev         nodeEvent
+	hash       []byte
+	data       interface{} // one of the RPC structs
+	rawData    []byte
+}
+
+type conn interface {
+	ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error)
+	WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error)
+	Close() error
+	LocalAddr() net.Addr
+}
+
+// udp implements the RPC protocol.
+type udp struct {
+	conn        conn
+	priv        *ecdsa.PrivateKey
+	ourEndpoint rpcEndpoint
+	nat         nat.Interface
+	net         *Network
+}
+
+// ListenUDP returns a new table that listens for UDP packets on laddr.
+func ListenUDP(priv *ecdsa.PrivateKey, laddr string, natm nat.Interface, nodeDBPath string) (*Network, error) {
+	transport, err := listenUDP(priv, laddr)
+	if err != nil {
+		return nil, err
+	}
+	net, err := newNetwork(transport, priv.PublicKey, natm, nodeDBPath)
+	if err != nil {
+		return nil, err
+	}
+	transport.net = net
+	go transport.readLoop()
+	return net, nil
+}
+
+func listenUDP(priv *ecdsa.PrivateKey, laddr string) (*udp, error) {
+	addr, err := net.ResolveUDPAddr("udp", laddr)
+	if err != nil {
+		return nil, err
+	}
+	conn, err := net.ListenUDP("udp", addr)
+	if err != nil {
+		return nil, err
+	}
+	return &udp{conn: conn, priv: priv, ourEndpoint: makeEndpoint(addr, uint16(addr.Port))}, nil
+}
+
+func (t *udp) localAddr() *net.UDPAddr {
+	return t.conn.LocalAddr().(*net.UDPAddr)
+}
+
+func (t *udp) Close() {
+	t.conn.Close()
+}
+
+func (t *udp) send(remote *Node, ptype nodeEvent, data interface{}) (hash []byte) {
+	hash, _ = t.sendPacket(remote.ID, remote.addr(), byte(ptype), data)
+	return hash
+}
+
+func (t *udp) sendPing(remote *Node, toaddr *net.UDPAddr, topics []Topic) (hash []byte) {
+	hash, _ = t.sendPacket(remote.ID, toaddr, byte(pingPacket), ping{
+		Version:    Version,
+		From:       t.ourEndpoint,
+		To:         makeEndpoint(toaddr, uint16(toaddr.Port)), // TODO: maybe use known TCP port from DB
+		Expiration: uint64(time.Now().Add(expiration).Unix()),
+		Topics:     topics,
+	})
+	return hash
+}
+
+func (t *udp) sendFindnode(remote *Node, target NodeID) {
+	t.sendPacket(remote.ID, remote.addr(), byte(findnodePacket), findnode{
+		Target:     target,
+		Expiration: uint64(time.Now().Add(expiration).Unix()),
+	})
+}
+
+func (t *udp) sendNeighbours(remote *Node, results []*Node) {
+	// Send neighbors in chunks with at most maxNeighbors per packet
+	// to stay below the 1280 byte limit.
+	p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
+	for i, result := range results {
+		p.Nodes = append(p.Nodes, nodeToRPC(result))
+		if len(p.Nodes) == maxNeighbors || i == len(results)-1 {
+			t.sendPacket(remote.ID, remote.addr(), byte(neighborsPacket), p)
+			p.Nodes = p.Nodes[:0]
+		}
+	}
+}
+
+func (t *udp) sendFindnodeHash(remote *Node, target common.Hash) {
+	t.sendPacket(remote.ID, remote.addr(), byte(findnodeHashPacket), findnodeHash{
+		Target:     target,
+		Expiration: uint64(time.Now().Add(expiration).Unix()),
+	})
+}
+
+func (t *udp) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []byte) {
+	t.sendPacket(remote.ID, remote.addr(), byte(topicRegisterPacket), topicRegister{
+		Topics: topics,
+		Idx:    uint(idx),
+		Pong:   pong,
+	})
+}
+
+func (t *udp) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) {
+	p := topicNodes{Echo: queryHash}
+	if len(nodes) == 0 {
+		t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
+		return
+	}
+	for i, result := range nodes {
+		p.Nodes = append(p.Nodes, nodeToRPC(result))
+		if len(p.Nodes) == maxTopicNodes || i == len(nodes)-1 {
+			t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
+			p.Nodes = p.Nodes[:0]
+		}
+	}
+}
+
+func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req interface{}) (hash []byte, err error) {
+	packet, hash, err := encodePacket(t.priv, ptype, req)
+	if err != nil {
+		return hash, err
+	}
+	glog.V(logger.Detail).Infof(">>> %v to %x@%v\n", nodeEvent(ptype), toid[:8], toaddr)
+	if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
+		glog.V(logger.Detail).Infoln("UDP send failed:", err)
+	}
+	return hash, err
+}
+
+// zeroed padding space for encodePacket.
+var headSpace = make([]byte, headSize)
+
+func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (p, hash []byte, err error) {
+	b := new(bytes.Buffer)
+	b.Write(headSpace)
+	b.WriteByte(ptype)
+	if err := rlp.Encode(b, req); err != nil {
+		glog.V(logger.Error).Infoln("error encoding packet:", err)
+		return nil, nil, err
+	}
+	packet := b.Bytes()
+	sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
+	if err != nil {
+		glog.V(logger.Error).Infoln("could not sign packet:", err)
+		return nil, nil, err
+	}
+	copy(packet[macSize:], sig)
+	// add the hash to the front. Note: this doesn't protect the
+	// packet in any way.
+	hash = crypto.Keccak256(packet[macSize:])
+	copy(packet, hash)
+	return packet, hash, nil
+}
+
+// readLoop runs in its own goroutine. it injects ingress UDP packets
+// into the network loop.
+func (t *udp) readLoop() {
+	defer t.conn.Close()
+	// Discovery packets are defined to be no larger than 1280 bytes.
+	// Packets larger than this size will be cut at the end and treated
+	// as invalid because their hash won't match.
+	buf := make([]byte, 1280)
+	for {
+		nbytes, from, err := t.conn.ReadFromUDP(buf)
+		if isTemporaryError(err) {
+			// Ignore temporary read errors.
+			glog.V(logger.Debug).Infof("Temporary read error: %v", err)
+			continue
+		} else if err != nil {
+			// Shut down the loop for permament errors.
+			glog.V(logger.Debug).Infof("Read error: %v", err)
+			return
+		}
+		t.handlePacket(from, buf[:nbytes])
+	}
+}
+
+func isTemporaryError(err error) bool {
+	tempErr, ok := err.(interface {
+		Temporary() bool
+	})
+	return ok && tempErr.Temporary() || isPacketTooBig(err)
+}
+
+func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
+	pkt := ingressPacket{remoteAddr: from}
+	if err := decodePacket(buf, &pkt); err != nil {
+		glog.V(logger.Debug).Infof("Bad packet from %v: %v\n", from, err)
+		return err
+	}
+	t.net.reqReadPacket(pkt)
+	return nil
+}
+
+func decodePacket(buffer []byte, pkt *ingressPacket) error {
+	if len(buffer) < headSize+1 {
+		return errPacketTooSmall
+	}
+	buf := make([]byte, len(buffer))
+	copy(buf, buffer)
+	hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:]
+	shouldhash := crypto.Keccak256(buf[macSize:])
+	if !bytes.Equal(hash, shouldhash) {
+		return errBadHash
+	}
+	fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig)
+	if err != nil {
+		return err
+	}
+	pkt.rawData = buf
+	pkt.hash = hash
+	pkt.remoteID = fromID
+	switch pkt.ev = nodeEvent(sigdata[0]); pkt.ev {
+	case pingPacket:
+		pkt.data = new(ping)
+	case pongPacket:
+		pkt.data = new(pong)
+	case findnodePacket:
+		pkt.data = new(findnode)
+	case neighborsPacket:
+		pkt.data = new(neighbors)
+	case findnodeHashPacket:
+		pkt.data = new(findnodeHash)
+	case topicRegisterPacket:
+		pkt.data = new(topicRegister)
+	case topicQueryPacket:
+		pkt.data = new(topicQuery)
+	case topicNodesPacket:
+		pkt.data = new(topicNodes)
+	default:
+		return fmt.Errorf("unknown packet type: %d", sigdata[0])
+	}
+	s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
+	err = s.Decode(pkt.data)
+	return err
+}
diff --git a/p2p/discv5/udp_notwindows.go b/p2p/discv5/udp_notwindows.go
new file mode 100644
index 0000000000000000000000000000000000000000..4da18d0f64a9663c968b0838b45471d5b232a3b2
--- /dev/null
+++ b/p2p/discv5/udp_notwindows.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+//+build !windows
+
+package discv5
+
+// reports whether err indicates that a UDP packet didn't
+// fit the receive buffer. There is no such error on
+// non-Windows platforms.
+func isPacketTooBig(err error) bool {
+	return false
+}
diff --git a/p2p/discv5/udp_test.go b/p2p/discv5/udp_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..cacc0f00468ad95672b4de9c88f09a8caafb7704
--- /dev/null
+++ b/p2p/discv5/udp_test.go
@@ -0,0 +1,505 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package discv5
+
+import (
+	"encoding/hex"
+	"errors"
+	"io"
+	"net"
+	"reflect"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/davecgh/go-spew/spew"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/rlp"
+)
+
+func init() {
+	spew.Config.DisableMethods = true
+}
+
+// This test checks that isPacketTooBig correctly identifies
+// errors that result from receiving a UDP packet larger
+// than the supplied receive buffer.
+func TestIsPacketTooBig(t *testing.T) {
+	listener, err := net.ListenPacket("udp", "127.0.0.1:0")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer listener.Close()
+	sender, err := net.Dial("udp", listener.LocalAddr().String())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer sender.Close()
+
+	sendN := 1800
+	recvN := 300
+	for i := 0; i < 20; i++ {
+		go func() {
+			buf := make([]byte, sendN)
+			for i := range buf {
+				buf[i] = byte(i)
+			}
+			sender.Write(buf)
+		}()
+
+		buf := make([]byte, recvN)
+		listener.SetDeadline(time.Now().Add(1 * time.Second))
+		n, _, err := listener.ReadFrom(buf)
+		if err != nil {
+			if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
+				continue
+			}
+			if !isPacketTooBig(err) {
+				t.Fatal("unexpected read error:", spew.Sdump(err))
+			}
+			continue
+		}
+		if n != recvN {
+			t.Fatalf("short read: %d, want %d", n, recvN)
+		}
+		for i := range buf {
+			if buf[i] != byte(i) {
+				t.Fatalf("error in pattern")
+				break
+			}
+		}
+	}
+}
+
+// shared test variables
+var (
+	futureExp          = uint64(time.Now().Add(10 * time.Hour).Unix())
+	testTarget         = NodeID{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}
+	testRemote         = rpcEndpoint{IP: net.ParseIP("1.1.1.1").To4(), UDP: 1, TCP: 2}
+	testLocalAnnounced = rpcEndpoint{IP: net.ParseIP("2.2.2.2").To4(), UDP: 3, TCP: 4}
+	testLocal          = rpcEndpoint{IP: net.ParseIP("3.3.3.3").To4(), UDP: 5, TCP: 6}
+)
+
+// type udpTest struct {
+// 	t                   *testing.T
+// 	pipe                *dgramPipe
+// 	table               *Table
+// 	udp                 *udp
+// 	sent                [][]byte
+// 	localkey, remotekey *ecdsa.PrivateKey
+// 	remoteaddr          *net.UDPAddr
+// }
+//
+// func newUDPTest(t *testing.T) *udpTest {
+// 	test := &udpTest{
+// 		t:          t,
+// 		pipe:       newpipe(),
+// 		localkey:   newkey(),
+// 		remotekey:  newkey(),
+// 		remoteaddr: &net.UDPAddr{IP: net.IP{1, 2, 3, 4}, Port: 30303},
+// 	}
+// 	test.table, test.udp, _ = newUDP(test.localkey, test.pipe, nil, "")
+// 	return test
+// }
+//
+// // handles a packet as if it had been sent to the transport.
+// func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
+// 	enc, err := encodePacket(test.remotekey, ptype, data)
+// 	if err != nil {
+// 		return test.errorf("packet (%d) encode error: %v", ptype, err)
+// 	}
+// 	test.sent = append(test.sent, enc)
+// 	if err = test.udp.handlePacket(test.remoteaddr, enc); err != wantError {
+// 		return test.errorf("error mismatch: got %q, want %q", err, wantError)
+// 	}
+// 	return nil
+// }
+//
+// // waits for a packet to be sent by the transport.
+// // validate should have type func(*udpTest, X) error, where X is a packet type.
+// func (test *udpTest) waitPacketOut(validate interface{}) error {
+// 	dgram := test.pipe.waitPacketOut()
+// 	p, _, _, err := decodePacket(dgram)
+// 	if err != nil {
+// 		return test.errorf("sent packet decode error: %v", err)
+// 	}
+// 	fn := reflect.ValueOf(validate)
+// 	exptype := fn.Type().In(0)
+// 	if reflect.TypeOf(p) != exptype {
+// 		return test.errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype)
+// 	}
+// 	fn.Call([]reflect.Value{reflect.ValueOf(p)})
+// 	return nil
+// }
+//
+// func (test *udpTest) errorf(format string, args ...interface{}) error {
+// 	_, file, line, ok := runtime.Caller(2) // errorf + waitPacketOut
+// 	if ok {
+// 		file = filepath.Base(file)
+// 	} else {
+// 		file = "???"
+// 		line = 1
+// 	}
+// 	err := fmt.Errorf(format, args...)
+// 	fmt.Printf("\t%s:%d: %v\n", file, line, err)
+// 	test.t.Fail()
+// 	return err
+// }
+//
+// func TestUDP_packetErrors(t *testing.T) {
+// 	test := newUDPTest(t)
+// 	defer test.table.Close()
+//
+// 	test.packetIn(errExpired, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version})
+// 	test.packetIn(errUnsolicitedReply, pongPacket, &pong{ReplyTok: []byte{}, Expiration: futureExp})
+// 	test.packetIn(errUnknownNode, findnodePacket, &findnode{Expiration: futureExp})
+// 	test.packetIn(errUnsolicitedReply, neighborsPacket, &neighbors{Expiration: futureExp})
+// }
+//
+// func TestUDP_findnode(t *testing.T) {
+// 	test := newUDPTest(t)
+// 	defer test.table.Close()
+//
+// 	// put a few nodes into the table. their exact
+// 	// distribution shouldn't matter much, altough we need to
+// 	// take care not to overflow any bucket.
+// 	targetHash := crypto.Keccak256Hash(testTarget[:])
+// 	nodes := &nodesByDistance{target: targetHash}
+// 	for i := 0; i < bucketSize; i++ {
+// 		nodes.push(nodeAtDistance(test.table.self.sha, i+2), bucketSize)
+// 	}
+// 	test.table.stuff(nodes.entries)
+//
+// 	// ensure there's a bond with the test node,
+// 	// findnode won't be accepted otherwise.
+// 	test.table.db.updateNode(NewNode(
+// 		PubkeyID(&test.remotekey.PublicKey),
+// 		test.remoteaddr.IP,
+// 		uint16(test.remoteaddr.Port),
+// 		99,
+// 	))
+// 	// check that closest neighbors are returned.
+// 	test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
+// 	expected := test.table.closest(targetHash, bucketSize)
+//
+// 	waitNeighbors := func(want []*Node) {
+// 		test.waitPacketOut(func(p *neighbors) {
+// 			if len(p.Nodes) != len(want) {
+// 				t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
+// 			}
+// 			for i := range p.Nodes {
+// 				if p.Nodes[i].ID != want[i].ID {
+// 					t.Errorf("result mismatch at %d:\n  got:  %v\n  want: %v", i, p.Nodes[i], expected.entries[i])
+// 				}
+// 			}
+// 		})
+// 	}
+// 	waitNeighbors(expected.entries[:maxNeighbors])
+// 	waitNeighbors(expected.entries[maxNeighbors:])
+// }
+//
+// func TestUDP_findnodeMultiReply(t *testing.T) {
+// 	test := newUDPTest(t)
+// 	defer test.table.Close()
+//
+// 	// queue a pending findnode request
+// 	resultc, errc := make(chan []*Node), make(chan error)
+// 	go func() {
+// 		rid := PubkeyID(&test.remotekey.PublicKey)
+// 		ns, err := test.udp.findnode(rid, test.remoteaddr, testTarget)
+// 		if err != nil && len(ns) == 0 {
+// 			errc <- err
+// 		} else {
+// 			resultc <- ns
+// 		}
+// 	}()
+//
+// 	// wait for the findnode to be sent.
+// 	// after it is sent, the transport is waiting for a reply
+// 	test.waitPacketOut(func(p *findnode) {
+// 		if p.Target != testTarget {
+// 			t.Errorf("wrong target: got %v, want %v", p.Target, testTarget)
+// 		}
+// 	})
+//
+// 	// send the reply as two packets.
+// 	list := []*Node{
+// 		MustParseNode("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304"),
+// 		MustParseNode("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303"),
+// 		MustParseNode("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17"),
+// 		MustParseNode("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303"),
+// 	}
+// 	rpclist := make([]rpcNode, len(list))
+// 	for i := range list {
+// 		rpclist[i] = nodeToRPC(list[i])
+// 	}
+// 	test.packetIn(nil, neighborsPacket, &neighbors{Expiration: futureExp, Nodes: rpclist[:2]})
+// 	test.packetIn(nil, neighborsPacket, &neighbors{Expiration: futureExp, Nodes: rpclist[2:]})
+//
+// 	// check that the sent neighbors are all returned by findnode
+// 	select {
+// 	case result := <-resultc:
+// 		if !reflect.DeepEqual(result, list) {
+// 			t.Errorf("neighbors mismatch:\n  got:  %v\n  want: %v", result, list)
+// 		}
+// 	case err := <-errc:
+// 		t.Errorf("findnode error: %v", err)
+// 	case <-time.After(5 * time.Second):
+// 		t.Error("findnode did not return within 5 seconds")
+// 	}
+// }
+//
+// func TestUDP_successfulPing(t *testing.T) {
+// 	test := newUDPTest(t)
+// 	added := make(chan *Node, 1)
+// 	test.table.nodeAddedHook = func(n *Node) { added <- n }
+// 	defer test.table.Close()
+//
+// 	// The remote side sends a ping packet to initiate the exchange.
+// 	go test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version, Expiration: futureExp})
+//
+// 	// the ping is replied to.
+// 	test.waitPacketOut(func(p *pong) {
+// 		pinghash := test.sent[0][:macSize]
+// 		if !bytes.Equal(p.ReplyTok, pinghash) {
+// 			t.Errorf("got pong.ReplyTok %x, want %x", p.ReplyTok, pinghash)
+// 		}
+// 		wantTo := rpcEndpoint{
+// 			// The mirrored UDP address is the UDP packet sender
+// 			IP: test.remoteaddr.IP, UDP: uint16(test.remoteaddr.Port),
+// 			// The mirrored TCP port is the one from the ping packet
+// 			TCP: testRemote.TCP,
+// 		}
+// 		if !reflect.DeepEqual(p.To, wantTo) {
+// 			t.Errorf("got pong.To %v, want %v", p.To, wantTo)
+// 		}
+// 	})
+//
+// 	// remote is unknown, the table pings back.
+// 	test.waitPacketOut(func(p *ping) error {
+// 		if !reflect.DeepEqual(p.From, test.udp.ourEndpoint) {
+// 			t.Errorf("got ping.From %v, want %v", p.From, test.udp.ourEndpoint)
+// 		}
+// 		wantTo := rpcEndpoint{
+// 			// The mirrored UDP address is the UDP packet sender.
+// 			IP: test.remoteaddr.IP, UDP: uint16(test.remoteaddr.Port),
+// 			TCP: 0,
+// 		}
+// 		if !reflect.DeepEqual(p.To, wantTo) {
+// 			t.Errorf("got ping.To %v, want %v", p.To, wantTo)
+// 		}
+// 		return nil
+// 	})
+// 	test.packetIn(nil, pongPacket, &pong{Expiration: futureExp})
+//
+// 	// the node should be added to the table shortly after getting the
+// 	// pong packet.
+// 	select {
+// 	case n := <-added:
+// 		rid := PubkeyID(&test.remotekey.PublicKey)
+// 		if n.ID != rid {
+// 			t.Errorf("node has wrong ID: got %v, want %v", n.ID, rid)
+// 		}
+// 		if !bytes.Equal(n.IP, test.remoteaddr.IP) {
+// 			t.Errorf("node has wrong IP: got %v, want: %v", n.IP, test.remoteaddr.IP)
+// 		}
+// 		if int(n.UDP) != test.remoteaddr.Port {
+// 			t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP, test.remoteaddr.Port)
+// 		}
+// 		if n.TCP != testRemote.TCP {
+// 			t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP, testRemote.TCP)
+// 		}
+// 	case <-time.After(2 * time.Second):
+// 		t.Errorf("node was not added within 2 seconds")
+// 	}
+// }
+
+var testPackets = []struct {
+	input      string
+	wantPacket interface{}
+}{
+	{
+		input: "71dbda3a79554728d4f94411e42ee1f8b0d561c10e1e5f5893367948c6a7d70bb87b235fa28a77070271b6c164a2dce8c7e13a5739b53b5e96f2e5acb0e458a02902f5965d55ecbeb2ebb6cabb8b2b232896a36b737666c55265ad0a68412f250001ea04cb847f000001820cfa8215a8d790000000000000000000000000000000018208ae820d058443b9a355",
+		wantPacket: &ping{
+			Version:    4,
+			From:       rpcEndpoint{net.ParseIP("127.0.0.1").To4(), 3322, 5544},
+			To:         rpcEndpoint{net.ParseIP("::1"), 2222, 3333},
+			Expiration: 1136239445,
+			Rest:       []rlp.RawValue{},
+		},
+	},
+	{
+		input: "e9614ccfd9fc3e74360018522d30e1419a143407ffcce748de3e22116b7e8dc92ff74788c0b6663aaa3d67d641936511c8f8d6ad8698b820a7cf9e1be7155e9a241f556658c55428ec0563514365799a4be2be5a685a80971ddcfa80cb422cdd0101ec04cb847f000001820cfa8215a8d790000000000000000000000000000000018208ae820d058443b9a3550102",
+		wantPacket: &ping{
+			Version:    4,
+			From:       rpcEndpoint{net.ParseIP("127.0.0.1").To4(), 3322, 5544},
+			To:         rpcEndpoint{net.ParseIP("::1"), 2222, 3333},
+			Expiration: 1136239445,
+			Rest:       []rlp.RawValue{{0x01}, {0x02}},
+		},
+	},
+	{
+		input: "577be4349c4dd26768081f58de4c6f375a7a22f3f7adda654d1428637412c3d7fe917cadc56d4e5e7ffae1dbe3efffb9849feb71b262de37977e7c7a44e677295680e9e38ab26bee2fcbae207fba3ff3d74069a50b902a82c9903ed37cc993c50001f83e82022bd79020010db83c4d001500000000abcdef12820cfa8215a8d79020010db885a308d313198a2e037073488208ae82823a8443b9a355c5010203040531b9019afde696e582a78fa8d95ea13ce3297d4afb8ba6433e4154caa5ac6431af1b80ba76023fa4090c408f6b4bc3701562c031041d4702971d102c9ab7fa5eed4cd6bab8f7af956f7d565ee1917084a95398b6a21eac920fe3dd1345ec0a7ef39367ee69ddf092cbfe5b93e5e568ebc491983c09c76d922dc3",
+		wantPacket: &ping{
+			Version:    555,
+			From:       rpcEndpoint{net.ParseIP("2001:db8:3c4d:15::abcd:ef12"), 3322, 5544},
+			To:         rpcEndpoint{net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"), 2222, 33338},
+			Expiration: 1136239445,
+			Rest:       []rlp.RawValue{{0xC5, 0x01, 0x02, 0x03, 0x04, 0x05}},
+		},
+	},
+	{
+		input: "09b2428d83348d27cdf7064ad9024f526cebc19e4958f0fdad87c15eb598dd61d08423e0bf66b2069869e1724125f820d851c136684082774f870e614d95a2855d000f05d1648b2d5945470bc187c2d2216fbe870f43ed0909009882e176a46b0102f846d79020010db885a308d313198a2e037073488208ae82823aa0fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c9548443b9a355c6010203c2040506a0c969a58f6f9095004c0177a6b47f451530cab38966a25cca5cb58f055542124e",
+		wantPacket: &pong{
+			To:         rpcEndpoint{net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"), 2222, 33338},
+			ReplyTok:   common.Hex2Bytes("fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c954"),
+			Expiration: 1136239445,
+			Rest:       []rlp.RawValue{{0xC6, 0x01, 0x02, 0x03, 0xC2, 0x04, 0x05}, {0x06}},
+		},
+	},
+	{
+		input: "c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260add7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396",
+		wantPacket: &findnode{
+			Target:     MustHexID("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"),
+			Expiration: 1136239445,
+			Rest:       []rlp.RawValue{{0x82, 0x99, 0x99}, {0x83, 0x99, 0x99, 0x99}},
+		},
+	},
+	{
+		input: "c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db8403155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32f84984010203040101b840312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069dbf8599020010db83c4d001500000000abcdef12820d05820d05b84038643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aacf8599020010db885a308d313198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df738443b9a355010203b525a138aa34383fec3d2719a0",
+		wantPacket: &neighbors{
+			Nodes: []rpcNode{
+				{
+					ID:  MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
+					IP:  net.ParseIP("99.33.22.55").To4(),
+					UDP: 4444,
+					TCP: 4445,
+				},
+				{
+					ID:  MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
+					IP:  net.ParseIP("1.2.3.4").To4(),
+					UDP: 1,
+					TCP: 1,
+				},
+				{
+					ID:  MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
+					IP:  net.ParseIP("2001:db8:3c4d:15::abcd:ef12"),
+					UDP: 3333,
+					TCP: 3333,
+				},
+				{
+					ID:  MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
+					IP:  net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"),
+					UDP: 999,
+					TCP: 1000,
+				},
+			},
+			Expiration: 1136239445,
+			Rest:       []rlp.RawValue{{0x01}, {0x02}, {0x03}},
+		},
+	},
+}
+
+func TestForwardCompatibility(t *testing.T) {
+	t.Skip("skipped while working on discovery v5")
+
+	testkey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+	wantNodeID := PubkeyID(&testkey.PublicKey)
+
+	for _, test := range testPackets {
+		input, err := hex.DecodeString(test.input)
+		if err != nil {
+			t.Fatalf("invalid hex: %s", test.input)
+		}
+		var pkt ingressPacket
+		if err := decodePacket(input, &pkt); err != nil {
+			t.Errorf("did not accept packet %s\n%v", test.input, err)
+			continue
+		}
+		if !reflect.DeepEqual(pkt.data, test.wantPacket) {
+			t.Errorf("got %s\nwant %s", spew.Sdump(pkt.data), spew.Sdump(test.wantPacket))
+		}
+		if pkt.remoteID != wantNodeID {
+			t.Errorf("got id %v\nwant id %v", pkt.remoteID, wantNodeID)
+		}
+	}
+}
+
+// dgramPipe is a fake UDP socket. It queues all sent datagrams.
+type dgramPipe struct {
+	mu      *sync.Mutex
+	cond    *sync.Cond
+	closing chan struct{}
+	closed  bool
+	queue   [][]byte
+}
+
+func newpipe() *dgramPipe {
+	mu := new(sync.Mutex)
+	return &dgramPipe{
+		closing: make(chan struct{}),
+		cond:    &sync.Cond{L: mu},
+		mu:      mu,
+	}
+}
+
+// WriteToUDP queues a datagram.
+func (c *dgramPipe) WriteToUDP(b []byte, to *net.UDPAddr) (n int, err error) {
+	msg := make([]byte, len(b))
+	copy(msg, b)
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.closed {
+		return 0, errors.New("closed")
+	}
+	c.queue = append(c.queue, msg)
+	c.cond.Signal()
+	return len(b), nil
+}
+
+// ReadFromUDP just hangs until the pipe is closed.
+func (c *dgramPipe) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
+	<-c.closing
+	return 0, nil, io.EOF
+}
+
+func (c *dgramPipe) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if !c.closed {
+		close(c.closing)
+		c.closed = true
+	}
+	return nil
+}
+
+func (c *dgramPipe) LocalAddr() net.Addr {
+	return &net.UDPAddr{IP: testLocal.IP, Port: int(testLocal.UDP)}
+}
+
+func (c *dgramPipe) waitPacketOut() []byte {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	for len(c.queue) == 0 {
+		c.cond.Wait()
+	}
+	p := c.queue[0]
+	copy(c.queue, c.queue[1:])
+	c.queue = c.queue[:len(c.queue)-1]
+	return p
+}
diff --git a/p2p/discv5/udp_windows.go b/p2p/discv5/udp_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ab9d655ec623b7fa04c11a8581be1f95f26c264
--- /dev/null
+++ b/p2p/discv5/udp_windows.go
@@ -0,0 +1,40 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+//+build windows
+
+package discv5
+
+import (
+	"net"
+	"os"
+	"syscall"
+)
+
+const _WSAEMSGSIZE = syscall.Errno(10040)
+
+// reports whether err indicates that a UDP packet didn't
+// fit the receive buffer. On Windows, WSARecvFrom returns
+// code WSAEMSGSIZE and no data if this happens.
+func isPacketTooBig(err error) bool {
+	if opErr, ok := err.(*net.OpError); ok {
+		if scErr, ok := opErr.Err.(*os.SyscallError); ok {
+			return scErr.Err == _WSAEMSGSIZE
+		}
+		return opErr.Err == _WSAEMSGSIZE
+	}
+	return false
+}
diff --git a/p2p/server.go b/p2p/server.go
index 8e3cd93f99b79c0906a6a8fbe2953aad0533a142..649fbfb82d1e93f69f86426d7a35d3a8a5013456 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -28,6 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/p2p/discover"
+	"github.com/ethereum/go-ethereum/p2p/discv5"
 	"github.com/ethereum/go-ethereum/p2p/nat"
 )
 
@@ -72,6 +73,8 @@ type Config struct {
 	// or not. Disabling is usually useful for protocol debugging (manual topology).
 	Discovery bool
 
+	DiscoveryV5 bool
+
 	// Name sets the node name of this server.
 	// Use common.MakeName to create a name that follows existing conventions.
 	Name string
@@ -105,6 +108,8 @@ type Config struct {
 	// the server is started.
 	ListenAddr string
 
+	ListenAddrV5 string
+
 	// If set to a non-nil value, the given NAT port mapper
 	// is used to make the listening port available to the
 	// Internet.
@@ -135,6 +140,7 @@ type Server struct {
 	listener     net.Listener
 	ourHandshake *protoHandshake
 	lastLookup   time.Time
+	DiscV5       *discv5.Network
 
 	// These are for Peers, PeerCount (and nothing else).
 	peerOp     chan peerOpFunc
@@ -352,6 +358,17 @@ func (srv *Server) Start() (err error) {
 		srv.ntab = ntab
 	}
 
+	if srv.DiscoveryV5 {
+		ntab, err := discv5.ListenUDP(srv.PrivateKey, srv.ListenAddrV5, srv.NAT, "") //srv.NodeDatabase)
+		if err != nil {
+			return err
+		}
+		if err := ntab.SetFallbackNodes(discv5.BootNodes); err != nil {
+			return err
+		}
+		srv.DiscV5 = ntab
+	}
+
 	dynPeers := (srv.MaxPeers + 1) / 2
 	if !srv.Discovery {
 		dynPeers = 0
@@ -527,6 +544,9 @@ running:
 	if srv.ntab != nil {
 		srv.ntab.Close()
 	}
+	if srv.DiscV5 != nil {
+		srv.DiscV5.Close()
+	}
 	// Disconnect all peers.
 	for _, p := range peers {
 		p.Disconnect(DiscQuitting)
diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go
index 91dea8cac3c1f83dd41478804c4c341c154f45e1..988d0ac923c978340565153e537fcd2c82867e15 100644
--- a/swarm/network/protocol_test.go
+++ b/swarm/network/protocol_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2014 The go-ethereum Authors
 // This file is part of the go-ethereum library.
 //
 // The go-ethereum library is free software: you can redistribute it and/or modify
diff --git a/vendor.conf b/vendor.conf
index b3a5a9d5ed5b9ea08d74965e0717bbe9d91357cf..30c96e81bb835fdc0393b9ebcce773d90ac65dd8 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -3,6 +3,7 @@ github.com/ethereum/go-ethereum
 
 # import
 github.com/Azure/azure-sdk-for-go	v5.0.0-beta-5-gbd73d95
+github.com/aristanetworks/goarista	ockafka-v0.0.2-7-g306a19f
 github.com/cespare/cp	165db2f
 github.com/davecgh/go-spew	v1.0.0-9-g346938d
 github.com/ethereum/ethash	v23.1-249-g214d4c0
diff --git a/vendor/github.com/aristanetworks/goarista/.travis.yml b/vendor/github.com/aristanetworks/goarista/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..534f444cae124364a6d2b44600420a05ff945544
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+- 1.6.2
+- tip
+before_install:
+- go get -v github.com/golang/lint/golint
+- go get -v -t -d ./...
+after_success:
+- make coverdata
+- bash <(curl -s https://codecov.io/bash)
+script:
+- make -j4 check GOTEST_FLAGS=-v
+notifications:
+  slack:
+    secure: MO/3LqbyALbi9vAY3pZetp/LfRuKEPAYEUya7XKmTWA3OFHYkTGqJWNosVkFJd6eSKwnc3HP4jlKADEBNVxADHzcA3uMPUQi1mIcNk/Ps1WWMNDv1liE2XOoOmHSHZ/8ksk6TNq83x+d17ZffYq8KAH6iKNKvllO1JzQPgJJdf+cNXQQlg6uPSe+ggMpjqVLkKcHqA4L3/BWo6fNcyvkqaN3uXcEzYPi7Nb2q9tl0ja6ToyZV4H6SinwitZmpedN3RkBcm4fKmGyw5ikzH93ycA5SvWrnXTh1dJvq6DU0FV7iwI6oqPTbAUc3FE5g7aEkK0qVR21s2j+KNaOLnuX10ZGQFwj2r3SW2REHq4j+qqFla/2EmSFZJt3GXYS+plmGCxqCgyjSw6tTi7LaGZ/mWBJEA9/EaXG1NkwlQYx5tdUMeGj77OczjXClynpb2hJ7MM2b32Rnp0JmNaXAh01SmClo+8nDWuksAsIdPtWsbF0/XHmEJiqpu8ojvVXOQIbPt43bjG7PS1t5jaRAU/N1n56SiCGgCSGd3Ui5eX5vmgWdpZMl8NG05G4LFsgmkdphRT5fru0C2PrhNZYRDGWs63XKapBxsvfqGzdHxTtYuaDjHjrI+9w0BC/8kEzSWoPmabQ5ci4wf4DeplcIay4tDMgMSo8pGAf52vrne4rmUo=
diff --git a/vendor/github.com/aristanetworks/goarista/AUTHORS b/vendor/github.com/aristanetworks/goarista/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..5bb93cb3fa6b6dff7e62690b699b0c87c75e6ec4
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/AUTHORS
@@ -0,0 +1,25 @@
+All contributors are required to sign a "Contributor License Agreement" at
+  <TBD>
+
+The following organizations and people have contributed code to this library.
+(Please keep both lists sorted alphabetically.)
+
+
+Arista Networks, Inc.
+
+
+Benoit Sigoure
+Fabrice Rabaute
+
+
+
+The list of individual contributors for code currently in HEAD can be obtained
+at any time with the following script:
+
+find . -type f \
+| while read i; do \
+  git blame -t $i 2>/dev/null; \
+  done \
+| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \
+| awk '{a[$0]++; t++} END{for(n in a) print n}' \
+| sort
diff --git a/vendor/github.com/aristanetworks/goarista/COPYING b/vendor/github.com/aristanetworks/goarista/COPYING
new file mode 100644
index 0000000000000000000000000000000000000000..f433b1a53f5b830a205fd2df78e2b34974656c7b
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/COPYING
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/aristanetworks/goarista/Dockerfile b/vendor/github.com/aristanetworks/goarista/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..e8a0f417931e76b08f13c809bda90157f9c4bde1
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/Dockerfile
@@ -0,0 +1,14 @@
+# Copyright (C) 2016  Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+# TODO: move this to cmd/ockafka (https://github.com/docker/hub-feedback/issues/292)
+FROM golang:1.6
+
+RUN mkdir -p /go/src/github.com/aristanetworks/goarista/cmd
+WORKDIR /go/src/github.com/aristanetworks/goarista
+COPY ./ .
+RUN go get -d ./cmd/ockafka/... \
+  && go install ./cmd/ockafka
+
+ENTRYPOINT ["/go/bin/ockafka"]
diff --git a/vendor/github.com/aristanetworks/goarista/Makefile b/vendor/github.com/aristanetworks/goarista/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..4ef2468dd65e43492f881c833faba06f16469f88
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/Makefile
@@ -0,0 +1,58 @@
+# Copyright (C) 2015  Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+GO := go
+TEST_TIMEOUT := 30s
+GOTEST_FLAGS :=
+
+DEFAULT_GOPATH := $${GOPATH%%:*}
+GOPATH_BIN := $(DEFAULT_GOPATH)/bin
+GOPATH_PKG := $(DEFAULT_GOPATH)/pkg
+GOLINT := $(GOPATH_BIN)/golint
+GOFOLDERS := find . -type d ! -path "./.git/*"
+
+all: install
+
+install:
+	$(GO) install ./...
+
+check: vet test fmtcheck lint
+
+COVER_PKGS := key test
+COVER_MODE := count
+coverdata:
+	echo 'mode: $(COVER_MODE)' >coverage.out
+	for dir in $(COVER_PKGS); do \
+	  $(GO) test -covermode=$(COVER_MODE) -coverprofile=cov.out-t ./$$dir || exit; \
+	  tail -n +2 cov.out-t >> coverage.out && \
+	  rm cov.out-t; \
+	done;
+
+coverage: coverdata
+	$(GO) tool cover -html=coverage.out
+	rm -f coverage.out
+
+fmtcheck:
+	errors=`gofmt -l .`; if test -n "$$errors"; then echo Check these files for style errors:; echo "$$errors"; exit 1; fi
+	find . -name '*.go' ! -name '*.pb.go' -exec ./check_line_len.awk {} +
+
+vet:
+	$(GO) vet ./...
+
+lint:
+	lint=`$(GOFOLDERS) | xargs -L 1 $(GOLINT) | fgrep -v .pb.go`; if test -n "$$lint"; then echo "$$lint"; exit 1; fi
+# The above is ugly, but unfortunately golint doesn't exit 1 when it finds
+# lint.  See https://github.com/golang/lint/issues/65
+
+test:
+	$(GO) test $(GOTEST_FLAGS) -timeout=$(TEST_TIMEOUT) ./...
+
+docker:
+	docker build -f cmd/occlient/Dockerfile .
+
+clean:
+	rm -rf $(GOPATH_PKG)/*/github.com/aristanetworks/goarista
+	$(GO) clean ./...
+
+.PHONY: all check coverage coverdata docker fmtcheck install lint test vet
diff --git a/vendor/github.com/aristanetworks/goarista/README.md b/vendor/github.com/aristanetworks/goarista/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..316d711ad212e3cf44e5cb592c42a14dfa66a5c4
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/README.md
@@ -0,0 +1,62 @@
+# Arista Go library [![Build Status](https://travis-ci.org/aristanetworks/goarista.svg?branch=master)](https://travis-ci.org/aristanetworks/goarista) [![codecov.io](http://codecov.io/github/aristanetworks/goarista/coverage.svg?branch=master)](http://codecov.io/github/aristanetworks/goarista?branch=master) [![GoDoc](https://godoc.org/github.com/aristanetworks/goarista?status.png)](https://godoc.org/github.com/aristanetworks/goarista) [![Go Report Card](https://goreportcard.com/badge/github.com/aristanetworks/goarista)](https://goreportcard.com/report/github.com/aristanetworks/goarista)
+
+## areflect
+
+Helper functions to work with the `reflect` package.  Contains
+`ForceExport()`, which bypasses the check in `reflect.Value` that
+prevents accessing unexported attributes.
+
+## monotime
+
+Provides access to a fast monotonic clock source, to fill in the gap in the
+[Go standard library, which lacks one](https://github.com/golang/go/issues/12914).
+Don't use `time.Now()` in code that needs to time things or otherwise assume
+that time passes at a constant rate, instead use `monotime.Now()`.
+
+## cmd
+
+See the [cmd](cmd) directory.
+
+## dscp
+
+Provides `ListenTCPWithTOS()`, which is a replacement for `net.ListenTCP()`
+that allows specifying the ToS (Type of Service), to specify DSCP / ECN /
+class of service flags to use for incoming connections.
+
+## key
+
+Provides a common type used across various Arista projects, named `key.Key`,
+which is used to work around the fact that Go can't let one
+use a non-hashable type as a key to a `map`, and we sometimes need to use
+a `map[string]interface{}` (or something containing one) as a key to maps.
+As a result, we frequently use `map[key.Key]interface{}` instead of just
+`map[interface{}]interface{}` when we need a generic key-value collection.
+
+## lanz
+A client for [LANZ](https://eos.arista.com/latency-analyzer-lanz-architectures-and-configuration/)
+streaming servers. It connects to a LANZ streaming server,
+listens for notifications, decodes them and sends the LANZ protobuf on the
+provided channel.
+
+## monitor
+
+A library to help expose monitoring metrics on top of the
+[`expvar`](https://golang.org/pkg/expvar/) infrastructure.
+
+## netns
+
+`netns.Do(namespace, cb)` provides a handy mechanism to execute the given
+callback `cb` in the given [network namespace](https://lwn.net/Articles/580893/).
+
+## pathmap
+
+A datastructure for mapping keys of type string slice to values. It
+allows for some fuzzy matching.
+
+## test
+
+This is a [Go](http://golang.org/) library to help in writing unit tests.
+
+## Examples
+
+TBD
diff --git a/vendor/github.com/aristanetworks/goarista/check_line_len.awk b/vendor/github.com/aristanetworks/goarista/check_line_len.awk
new file mode 100755
index 0000000000000000000000000000000000000000..a9db535505482337806b9e88ffc7e2a43cff0fb0
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/check_line_len.awk
@@ -0,0 +1,25 @@
+#!/usr/bin/awk -f
+# Copyright (C) 2015  Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+BEGIN {
+  max = 100;
+}
+
+# Expand tabs to 4 spaces.
+{
+  gsub(/\t/, "    ");
+}
+
+length() > max {
+  errors++;
+  print FILENAME ":" FNR ": Line too long (" length() "/" max ")";
+}
+
+END {
+  if (errors >= 125) {
+    errors = 125;
+  }
+  exit errors;
+}
diff --git a/vendor/github.com/aristanetworks/goarista/iptables.sh b/vendor/github.com/aristanetworks/goarista/iptables.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f118ff493e2e65248201327fa8dc9e702123adf0
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/iptables.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+DEFAULT_PORT=6042
+
+set -e
+
+if [ "$#" -lt 1 ]
+then
+   echo "usage: $0 <host> [<port>]"
+   exit 1
+fi
+
+host=$1
+port=$DEFAULT_PORT
+if [ "$#" -gt 1 ]
+then
+   port=$2
+fi
+iptables="bash sudo iptables -A INPUT -p tcp --dport $port -j ACCEPT"
+ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $host "$iptables"
+echo "opened TCP port $port on $host"
diff --git a/vendor/github.com/aristanetworks/goarista/monotime/issue15006.s b/vendor/github.com/aristanetworks/goarista/monotime/issue15006.s
new file mode 100644
index 0000000000000000000000000000000000000000..66109f4f3171920fd6d535a64d098c9372cdfcd2
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/monotime/issue15006.s
@@ -0,0 +1,6 @@
+// Copyright (C) 2016  Arista Networks, Inc.
+// Use of this source code is governed by the Apache License 2.0
+// that can be found in the COPYING file.
+
+// This file is intentionally empty.
+// It's a workaround for https://github.com/golang/go/issues/15006
diff --git a/vendor/github.com/aristanetworks/goarista/monotime/nanotime.go b/vendor/github.com/aristanetworks/goarista/monotime/nanotime.go
new file mode 100644
index 0000000000000000000000000000000000000000..efc1b92a606c28c56f2d21c703603e0bfabe4c36
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/monotime/nanotime.go
@@ -0,0 +1,24 @@
+// Copyright (C) 2016  Arista Networks, Inc.
+// Use of this source code is governed by the Apache License 2.0
+// that can be found in the COPYING file.
+
+// Package monotime provides a fast monotonic clock source.
+package monotime
+
+import (
+	_ "unsafe" // required to use //go:linkname
+)
+
+//go:noescape
+//go:linkname nanotime runtime.nanotime
+func nanotime() int64
+
+// Now returns the current time in nanoseconds from a monotonic clock.
+// The time returned is based on some arbitrary platform-specific point in the
+// past.  The time returned is guaranteed to increase monotonically at a
+// constant rate, unlike time.Now() from the Go standard library, which may
+// slow down, speed up, jump forward or backward, due to NTP activity or leap
+// seconds.
+func Now() uint64 {
+	return uint64(nanotime())
+}
diff --git a/vendor/github.com/aristanetworks/goarista/rpmbuild.sh b/vendor/github.com/aristanetworks/goarista/rpmbuild.sh
new file mode 100755
index 0000000000000000000000000000000000000000..52b691bd9ac5bd3a27c12318d725660bf9ef4b4e
--- /dev/null
+++ b/vendor/github.com/aristanetworks/goarista/rpmbuild.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+# Copyright (C) 2016  Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+if [ "$#" -lt 1 ]
+then
+   echo "usage: $0 <binary>"
+   exit 1
+fi
+binary=$1
+
+if [ -z "$GOPATH" ] || [ -z "$GOOS" ] || [ -z "$GOARCH" ]
+then
+    echo "Please set \$GOPATH, \$GOOS and \$GOARCH"
+    exit 1
+fi
+
+set -e
+
+version=$(git rev-parse --short=7 HEAD)
+pwd=$(pwd)
+cd $GOPATH/bin
+if [ -d $GOOS_$GOARCH ]
+then
+   cd $GOOS_GOARCH
+fi
+os=$GOOS
+arch=$GOARCH
+if [ "$arch" == "386" ]
+then
+   arch="i686"
+fi
+cmd="fpm -n $binary -v $version -s dir -t rpm --rpm-os $os -a $arch --epoch 0 --prefix /usr/bin $binary"
+echo $cmd
+$cmd
+mv $binary-$version-1.$arch.rpm $pwd