diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 7ac7915f225c70b516aa45b7b368927147bcf5cc..58c1a4a62eae2d48f11beb00f9cf5da177539afb 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -3,21 +3,20 @@
 
 accounts/usbwallet              @karalabe
 accounts/scwallet               @gballet
-accounts/abi                    @gballet
+accounts/abi                    @gballet @MariusVanDerWijden
 cmd/clef                        @holiman
 cmd/puppeth                     @karalabe
 consensus                       @karalabe
 core/                           @karalabe @holiman @rjl493456442
-dashboard/                      @kurkomisi
 eth/                            @karalabe @holiman @rjl493456442
 graphql/                        @gballet
 les/                            @zsfelfoldi @rjl493456442
 light/                          @zsfelfoldi @rjl493456442
 mobile/                         @karalabe @ligi
+node/                           @fjl @renaynay
 p2p/                            @fjl @zsfelfoldi
 rpc/                            @fjl @holiman
-p2p/simulations                 @zelig @janos @justelad
-p2p/protocols                   @zelig @janos @justelad
-p2p/testing                     @zelig @janos @justelad
+p2p/simulations                 @fjl
+p2p/protocols                   @fjl
+p2p/testing                     @fjl
 signer/                         @holiman
-whisper/                        @gballet @gluk256
diff --git a/.travis.yml b/.travis.yml
index 1b61667c88d29424c4f6194d6b514b02802e0aac..36673ffab3701bf9379c0fb4c6c2a3e1f86f352b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ jobs:
     - stage: lint
       os: linux
       dist: xenial
-      go: 1.14.x
+      go: 1.15.x
       env:
         - lint
       git:
@@ -24,65 +24,12 @@ jobs:
       script:
         - go run build/ci.go lint
 
-    - stage: build
-      os: linux
-      dist: xenial
-      go: 1.13.x
-      env:
-        - GO111MODULE=on
-      script:
-        - go run build/ci.go install
-        - go run build/ci.go test -coverage $TEST_PACKAGES
-
-    # These are the latest Go versions.
-    - stage: build
-      os: linux
-      arch: amd64
-      dist: xenial
-      go: 1.14.x
-      env:
-        - GO111MODULE=on
-      script:
-        - go run build/ci.go install
-        - go run build/ci.go test -coverage $TEST_PACKAGES
-
-    - stage: build
-      if: type = pull_request
-      os: linux
-      arch: arm64
-      dist: xenial
-      go: 1.14.x
-      env:
-        - GO111MODULE=on
-      script:
-        - go run build/ci.go install
-        - go run build/ci.go test -coverage $TEST_PACKAGES
-
-    - stage: build
-      os: osx
-      osx_image: xcode11.3
-      go: 1.14.x
-      env:
-        - GO111MODULE=on
-      script:
-        - echo "Increase the maximum number of open file descriptors on macOS"
-        - NOFILE=20480
-        - sudo sysctl -w kern.maxfiles=$NOFILE
-        - sudo sysctl -w kern.maxfilesperproc=$NOFILE
-        - sudo launchctl limit maxfiles $NOFILE $NOFILE
-        - sudo launchctl limit maxfiles
-        - ulimit -S -n $NOFILE
-        - ulimit -n
-        - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
-        - go run build/ci.go install
-        - go run build/ci.go test -coverage $TEST_PACKAGES
-
     # This builder does the Ubuntu PPA upload
     - stage: build
       if: type = push
       os: linux
       dist: xenial
-      go: 1.14.x
+      go: 1.15.x
       env:
         - ubuntu-ppa
         - GO111MODULE=on
@@ -107,7 +54,7 @@ jobs:
       os: linux
       dist: xenial
       sudo: required
-      go: 1.14.x
+      go: 1.15.x
       env:
         - azure-linux
         - GO111MODULE=on
@@ -119,22 +66,22 @@ jobs:
             - gcc-multilib
       script:
         # Build for the primary platforms that Trusty can manage
-        - go run build/ci.go install
+        - go run build/ci.go install -dlgo
         - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
-        - go run build/ci.go install -arch 386
+        - go run build/ci.go install -dlgo -arch 386
         - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
 
         # Switch over GCC to cross compilation (breaks 386, hence why do it here only)
         - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
         - sudo ln -s /usr/include/asm-generic /usr/include/asm
 
-        - GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
+        - GOARM=5 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc
         - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
-        - GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
+        - GOARM=6 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc
         - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
-        - GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc
+        - GOARM=7 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabihf-gcc
         - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
-        - go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc
+        - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
         - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
 
     # This builder does the Linux Azure MIPS xgo uploads
@@ -144,7 +91,7 @@ jobs:
       dist: xenial
       services:
         - docker
-      go: 1.14.x
+      go: 1.15.x
       env:
         - azure-linux-mips
         - GO111MODULE=on
@@ -192,7 +139,7 @@ jobs:
       git:
         submodules: false # avoid cloning ethereum/tests
       before_install:
-        - curl https://dl.google.com/go/go1.14.2.linux-amd64.tar.gz | tar -xz
+        - curl https://dl.google.com/go/go1.15.5.linux-amd64.tar.gz | tar -xz
         - export PATH=`pwd`/go/bin:$PATH
         - export GOROOT=`pwd`/go
         - export GOPATH=$HOME/go
@@ -210,7 +157,7 @@ jobs:
     - stage: build
       if: type = push
       os: osx
-      go: 1.14.x
+      go: 1.15.x
       env:
         - azure-osx
         - azure-ios
@@ -219,7 +166,7 @@ jobs:
       git:
         submodules: false # avoid cloning ethereum/tests
       script:
-        - go run build/ci.go install
+        - go run build/ci.go install -dlgo
         - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
 
         # Build the iOS framework and upload it to CocoaPods and Azure
@@ -237,12 +184,43 @@ jobs:
         - export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
         - go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
 
+    # These builders run the tests
+    - stage: build
+      os: linux
+      arch: amd64
+      dist: xenial
+      go: 1.15.x
+      env:
+        - GO111MODULE=on
+      script:
+        - go run build/ci.go test -coverage $TEST_PACKAGES
+
+    - stage: build
+      if: type = pull_request
+      os: linux
+      arch: arm64
+      dist: xenial
+      go: 1.15.x
+      env:
+        - GO111MODULE=on
+      script:
+        - go run build/ci.go test -coverage $TEST_PACKAGES
+
+    - stage: build
+      os: linux
+      dist: xenial
+      go: 1.14.x
+      env:
+        - GO111MODULE=on
+      script:
+        - go run build/ci.go test -coverage $TEST_PACKAGES
+
     # This builder does the Azure archive purges to avoid accumulating junk
     - stage: build
       if: type = cron
       os: linux
       dist: xenial
-      go: 1.14.x
+      go: 1.15.x
       env:
         - azure-purge
         - GO111MODULE=on
diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go
index 0216f6b544aecfd036dc122098215ccb0cbcadf4..9e6d898eaf233678bc55d30cd0f6fee1a147fbf3 100644
--- a/accounts/abi/bind/base.go
+++ b/accounts/abi/bind/base.go
@@ -152,7 +152,10 @@ func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method stri
 		}
 	} else {
 		output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
-		if err == nil && len(output) == 0 {
+		if err != nil {
+			return err
+		}
+		if len(output) == 0 {
 			// Make sure we have a contract to operate on, and bail out otherwise.
 			if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
 				return err
diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go
index 64eae64f689aa87764462b85c8100fd080ad6394..71f0f9392fc41870ce1a55c72c2e76b4622457e8 100644
--- a/accounts/usbwallet/ledger.go
+++ b/accounts/usbwallet/ledger.go
@@ -162,7 +162,7 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
 		return common.Address{}, nil, accounts.ErrWalletClosed
 	}
 	// Ensure the wallet is capable of signing the given transaction
-	if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 {
+	if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
 		//lint:ignore ST1005 brand name displayed on the console
 		return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
 	}
diff --git a/appveyor.yml b/appveyor.yml
index 7d6bf87639a16eccc352af293a6416306c73f814..2bf67d45684da4c341f444c60330459f0d574ded 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -24,13 +24,13 @@ environment:
 install:
   - git submodule update --init
   - rmdir C:\go /s /q
-  - appveyor DownloadFile https://dl.google.com/go/go1.15.windows-%GETH_ARCH%.zip
-  - 7z x go1.15.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
+  - appveyor DownloadFile https://dl.google.com/go/go1.15.5.windows-%GETH_ARCH%.zip
+  - 7z x go1.15.5.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
   - go version
   - gcc --version
 
 build_script:
-  - go run build\ci.go install
+  - go run build\ci.go install -dlgo
 
 after_build:
   - go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
diff --git a/build/checksums.txt b/build/checksums.txt
index 39f855cd0cf4b0ceb9464e664b8a470374ef946d..32b376519f343ef9513f9aaee0cb38090910471b 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -1,6 +1,13 @@
 # This file contains sha256 checksums of optional build dependencies.
 
-69438f7ed4f532154ffaf878f3dfd83747e7a00b70b3556eddabf7aaee28ac3a  go1.15.src.tar.gz
+c1076b90cf94b73ebed62a81d802cd84d43d02dea8c07abdc922c57a071c84f1  go1.15.5.src.tar.gz
+359a4334b8c8f5e3067e5a76f16419791ac3fef4613d8e8e1eac0b9719915f6d  go1.15.5.darwin-amd64.tar.gz
+4c8179d406136979724c71732009c7e2e7c794dbeaaa2a043c00da34d4be0559  go1.15.5.linux-386.tar.gz
+9a58494e8da722c3aef248c9227b0e9c528c7318309827780f16220998180a0d  go1.15.5.linux-amd64.tar.gz
+a72a0b036beb4193a0214bca3fca4c5d68a38a4ccf098c909f7ce8bf08567c48  go1.15.5.linux-arm64.tar.gz
+5ea6456620d3efed5dda99238c7f23866eafdd915e5348736e631bc283c0238a  go1.15.5.linux-armv6l.tar.gz
+d812436c7e3482ba3c97172edf26afaf35aca60a5621ff4a5f6a08386505ab9c  go1.15.5.windows-386.zip
+1d24be3a200201a74be25e4134fbec467750e834e84e9c7789a9fc13248c5507  go1.15.5.windows-amd64.zip
 
 d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35  golangci-lint-1.27.0-linux-armv7.tar.gz
 bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4  golangci-lint.exe-1.27.0-windows-amd64.zip
diff --git a/build/ci.go b/build/ci.go
index ab153eb85eae62c2b0f5a8a546fec7a221d6e0ec..0cffb903aadb6a99b149d3676488584f19cdefa0 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -46,12 +46,11 @@ import (
 	"encoding/base64"
 	"flag"
 	"fmt"
-	"go/parser"
-	"go/token"
 	"io/ioutil"
 	"log"
 	"os"
 	"os/exec"
+	"path"
 	"path/filepath"
 	"regexp"
 	"runtime"
@@ -135,11 +134,11 @@ var (
 	// Note: artful is unsupported because it was officially deprecated on Launchpad.
 	// Note: cosmic is unsupported because it was officially deprecated on Launchpad.
 	// Note: disco is unsupported because it was officially deprecated on Launchpad.
+	// Note: eoan is unsupported because it was officially deprecated on Launchpad.
 	debDistroGoBoots = map[string]string{
 		"trusty": "golang-1.11",
 		"xenial": "golang-go",
 		"bionic": "golang-go",
-		"eoan":   "golang-go",
 		"focal":  "golang-go",
 		"groovy": "golang-go",
 	}
@@ -148,6 +147,11 @@ var (
 		"golang-1.11": "/usr/lib/go-1.11",
 		"golang-go":   "/usr/lib/go",
 	}
+
+	// This is the version of go that will be downloaded by
+	//
+	//     go run ci.go install -dlgo
+	dlgoVersion = "1.15.5"
 )
 
 var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -198,19 +202,19 @@ func main() {
 
 func doInstall(cmdline []string) {
 	var (
+		dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
 		arch = flag.String("arch", "", "Architecture to cross build for")
 		cc   = flag.String("cc", "", "C compiler to cross build with")
 	)
 	flag.CommandLine.Parse(cmdline)
 	env := build.Env()
 
-	// Check Go version. People regularly open issues about compilation
+	// Check local Go version. People regularly open issues about compilation
 	// failure with outdated Go. This should save them the trouble.
 	if !strings.Contains(runtime.Version(), "devel") {
 		// Figure out the minor version number since we can't textually compare (1.10 < 1.9)
 		var minor int
 		fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
-
 		if minor < 13 {
 			log.Println("You have Go version", runtime.Version())
 			log.Println("go-ethereum requires at least Go version 1.13 and cannot")
@@ -218,90 +222,110 @@ func doInstall(cmdline []string) {
 			os.Exit(1)
 		}
 	}
-	// Compile packages given as arguments, or everything if there are no arguments.
-	packages := []string{"./..."}
-	if flag.NArg() > 0 {
-		packages = flag.Args()
+
+	// Choose which go command we're going to use.
+	var gobuild *exec.Cmd
+	if !*dlgo {
+		// Default behavior: use the go version which runs ci.go right now.
+		gobuild = goTool("build")
+	} else {
+		// Download of Go requested. This is for build environments where the
+		// installed version is too old and cannot be upgraded easily.
+		cachedir := filepath.Join("build", "cache")
+		goroot := downloadGo(runtime.GOARCH, runtime.GOOS, cachedir)
+		gobuild = localGoTool(goroot, "build")
 	}
 
-	if *arch == "" || *arch == runtime.GOARCH {
-		goinstall := goTool("install", buildFlags(env)...)
-		if runtime.GOARCH == "arm64" {
-			goinstall.Args = append(goinstall.Args, "-p", "1")
-		}
-		goinstall.Args = append(goinstall.Args, "-trimpath")
-		goinstall.Args = append(goinstall.Args, "-v")
-		goinstall.Args = append(goinstall.Args, packages...)
-		build.MustRun(goinstall)
-		return
+	// Configure environment for cross build.
+	if *arch != "" || *arch != runtime.GOARCH {
+		gobuild.Env = append(gobuild.Env, "CGO_ENABLED=1")
+		gobuild.Env = append(gobuild.Env, "GOARCH="+*arch)
 	}
 
-	// Seems we are cross compiling, work around forbidden GOBIN
-	goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
-	goinstall.Args = append(goinstall.Args, "-trimpath")
-	goinstall.Args = append(goinstall.Args, "-v")
-	goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...)
-	goinstall.Args = append(goinstall.Args, packages...)
-	build.MustRun(goinstall)
+	// Configure C compiler.
+	if *cc != "" {
+		gobuild.Env = append(gobuild.Env, "CC="+*cc)
+	} else if os.Getenv("CC") != "" {
+		gobuild.Env = append(gobuild.Env, "CC="+os.Getenv("CC"))
+	}
 
-	if cmds, err := ioutil.ReadDir("cmd"); err == nil {
-		for _, cmd := range cmds {
-			pkgs, err := parser.ParseDir(token.NewFileSet(), filepath.Join(".", "cmd", cmd.Name()), nil, parser.PackageClauseOnly)
-			if err != nil {
-				log.Fatal(err)
-			}
-			for name := range pkgs {
-				if name == "main" {
-					gobuild := goToolArch(*arch, *cc, "build", buildFlags(env)...)
-					gobuild.Args = append(gobuild.Args, "-v")
-					gobuild.Args = append(gobuild.Args, []string{"-o", executablePath(cmd.Name())}...)
-					gobuild.Args = append(gobuild.Args, "."+string(filepath.Separator)+filepath.Join("cmd", cmd.Name()))
-					build.MustRun(gobuild)
-					break
-				}
-			}
-		}
+	// arm64 CI builders are memory-constrained and can't handle concurrent builds,
+	// better disable it. This check isn't the best, it should probably
+	// check for something in env instead.
+	if runtime.GOARCH == "arm64" {
+		gobuild.Args = append(gobuild.Args, "-p", "1")
+	}
+
+	// Put the default settings in.
+	gobuild.Args = append(gobuild.Args, buildFlags(env)...)
+
+	// We use -trimpath to avoid leaking local paths into the built executables.
+	gobuild.Args = append(gobuild.Args, "-trimpath")
+
+	// Show packages during build.
+	gobuild.Args = append(gobuild.Args, "-v")
+
+	// Now we choose what we're even building.
+	// Default: collect all 'main' packages in cmd/ and build those.
+	packages := flag.Args()
+	if len(packages) == 0 {
+		packages = build.FindMainPackages("./cmd")
+	}
+
+	// Do the build!
+	for _, pkg := range packages {
+		args := make([]string, len(gobuild.Args))
+		copy(args, gobuild.Args)
+		args = append(args, "-o", executablePath(path.Base(pkg)))
+		args = append(args, pkg)
+		build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
 	}
 }
 
+// buildFlags returns the go tool flags for building.
 func buildFlags(env build.Environment) (flags []string) {
 	var ld []string
 	if env.Commit != "" {
 		ld = append(ld, "-X", "main.gitCommit="+env.Commit)
 		ld = append(ld, "-X", "main.gitDate="+env.Date)
 	}
+	// Strip DWARF on darwin. This used to be required for certain things,
+	// and there is no downside to this, so we just keep doing it.
 	if runtime.GOOS == "darwin" {
 		ld = append(ld, "-s")
 	}
-
 	if len(ld) > 0 {
 		flags = append(flags, "-ldflags", strings.Join(ld, " "))
 	}
 	return flags
 }
 
+// goTool returns the go tool. This uses the Go version which runs ci.go.
 func goTool(subcmd string, args ...string) *exec.Cmd {
-	return goToolArch(runtime.GOARCH, os.Getenv("CC"), subcmd, args...)
+	cmd := build.GoTool(subcmd, args...)
+	goToolSetEnv(cmd)
+	return cmd
 }
 
-func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
-	cmd := build.GoTool(subcmd, args...)
-	if arch == "" || arch == runtime.GOARCH {
-		cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
-	} else {
-		cmd.Env = append(cmd.Env, "CGO_ENABLED=1")
-		cmd.Env = append(cmd.Env, "GOARCH="+arch)
-	}
-	if cc != "" {
-		cmd.Env = append(cmd.Env, "CC="+cc)
-	}
+// localGoTool returns the go tool from the given GOROOT.
+func localGoTool(goroot string, subcmd string, args ...string) *exec.Cmd {
+	gotool := filepath.Join(goroot, "bin", "go")
+	cmd := exec.Command(gotool, subcmd)
+	goToolSetEnv(cmd)
+	cmd.Env = append(cmd.Env, "GOROOT="+goroot)
+	cmd.Args = append(cmd.Args, args...)
+	return cmd
+}
+
+// goToolSetEnv forwards the build environment to the go tool.
+func goToolSetEnv(cmd *exec.Cmd) {
+	cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
 	for _, e := range os.Environ() {
-		if strings.HasPrefix(e, "GOBIN=") {
+		if strings.HasPrefix(e, "GOBIN=") || strings.HasPrefix(e, "CC=") {
 			continue
 		}
 		cmd.Env = append(cmd.Env, e)
 	}
-	return cmd
 }
 
 // Running The Tests
@@ -363,7 +387,7 @@ func downloadLinter(cachedir string) string {
 	if err := csdb.DownloadFile(url, archivePath); err != nil {
 		log.Fatal(err)
 	}
-	if err := build.ExtractTarballArchive(archivePath, cachedir); err != nil {
+	if err := build.ExtractArchive(archivePath, cachedir); err != nil {
 		log.Fatal(err)
 	}
 	return filepath.Join(cachedir, base, "golangci-lint")
@@ -469,13 +493,12 @@ func maybeSkipArchive(env build.Environment) {
 // Debian Packaging
 func doDebianSource(cmdline []string) {
 	var (
-		goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`)
-		cachedir  = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`)
-		signer    = flag.String("signer", "", `Signing key name, also used as package author`)
-		upload    = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
-		sshUser   = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
-		workdir   = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
-		now       = time.Now()
+		cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`)
+		signer   = flag.String("signer", "", `Signing key name, also used as package author`)
+		upload   = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
+		sshUser  = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
+		workdir  = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
+		now      = time.Now()
 	)
 	flag.CommandLine.Parse(cmdline)
 	*workdir = makeWorkdir(*workdir)
@@ -490,10 +513,10 @@ func doDebianSource(cmdline []string) {
 	}
 
 	// Download and verify the Go source package.
-	gobundle := downloadGoSources(*goversion, *cachedir)
+	gobundle := downloadGoSources(*cachedir)
 
 	// Download all the dependencies needed to build the sources and run the ci script
-	srcdepfetch := goTool("install", "-n", "./...")
+	srcdepfetch := goTool("mod", "download")
 	srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
 	build.MustRun(srcdepfetch)
 
@@ -509,7 +532,7 @@ func doDebianSource(cmdline []string) {
 			pkgdir := stageDebianSource(*workdir, meta)
 
 			// Add Go source code
-			if err := build.ExtractTarballArchive(gobundle, pkgdir); err != nil {
+			if err := build.ExtractArchive(gobundle, pkgdir); err != nil {
 				log.Fatalf("Failed to extract Go sources: %v", err)
 			}
 			if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
@@ -541,9 +564,10 @@ func doDebianSource(cmdline []string) {
 	}
 }
 
-func downloadGoSources(version string, cachedir string) string {
+// downloadGoSources downloads the Go source tarball.
+func downloadGoSources(cachedir string) string {
 	csdb := build.MustLoadChecksums("build/checksums.txt")
-	file := fmt.Sprintf("go%s.src.tar.gz", version)
+	file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion)
 	url := "https://dl.google.com/go/" + file
 	dst := filepath.Join(cachedir, file)
 	if err := csdb.DownloadFile(url, dst); err != nil {
@@ -552,6 +576,41 @@ func downloadGoSources(version string, cachedir string) string {
 	return dst
 }
 
+// downloadGo downloads the Go binary distribution and unpacks it into a temporary
+// directory. It returns the GOROOT of the unpacked toolchain.
+func downloadGo(goarch, goos, cachedir string) string {
+	if goarch == "arm" {
+		goarch = "armv6l"
+	}
+
+	csdb := build.MustLoadChecksums("build/checksums.txt")
+	file := fmt.Sprintf("go%s.%s-%s", dlgoVersion, goos, goarch)
+	if goos == "windows" {
+		file += ".zip"
+	} else {
+		file += ".tar.gz"
+	}
+	url := "https://golang.org/dl/" + file
+	dst := filepath.Join(cachedir, file)
+	if err := csdb.DownloadFile(url, dst); err != nil {
+		log.Fatal(err)
+	}
+
+	ucache, err := os.UserCacheDir()
+	if err != nil {
+		log.Fatal(err)
+	}
+	godir := filepath.Join(ucache, fmt.Sprintf("geth-go-%s-%s-%s", dlgoVersion, goos, goarch))
+	if err := build.ExtractArchive(dst, godir); err != nil {
+		log.Fatal(err)
+	}
+	goroot, err := filepath.Abs(filepath.Join(godir, "go"))
+	if err != nil {
+		log.Fatal(err)
+	}
+	return goroot
+}
+
 func ppaUpload(workdir, ppa, sshUser string, files []string) {
 	p := strings.Split(ppa, "/")
 	if len(p) != 2 {
@@ -980,7 +1039,7 @@ func doXCodeFramework(cmdline []string) {
 	if *deploy != "" {
 		meta := newPodMetadata(env, archive)
 		build.Render("build/pod.podspec", "Geth.podspec", 0755, meta)
-		build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings", "--verbose")
+		build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings")
 	}
 }
 
diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md
index 2763c75085a70edec443c2c61629157ebb8e305f..e1372d015899954f22a21de0ca759bb4e90c391f 100644
--- a/cmd/devp2p/README.md
+++ b/cmd/devp2p/README.md
@@ -81,6 +81,25 @@ Now get the ENR of your node and store it in the `NODE` environment variable.
 
 Start the test by running `devp2p discv5 test -listen1 127.0.0.1 -listen2 127.0.0.2 $NODE`.
 
+### Eth Protocol Test Suite
+
+The Eth Protocol test suite is a conformance test suite for the [eth protocol][eth].
+
+To run the eth protocol test suite against your implementation, the node needs to be initialized as such:
+
+1. initialize the geth node with the `genesis.json` file contained in the `testdata` directory
+2. import the `halfchain.rlp` file in the `testdata` directory
+3. run geth with the following flags:
+```
+geth --datadir <datadir> --nodiscover --nat=none --networkid 19763 --verbosity 5
+```
+
+Then, run the following command, replacing `<enode ID>` with the enode of the geth node: 
+ ```
+ devp2p rlpx eth-test <enode ID> cmd/devp2p/internal/ethtest/testdata/fullchain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json
+```
+ 
+[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md
 [dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup
 [discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md
 [discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md
diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go
index 467c20deb5be7f5b2350dc42bcbc55aae350f26c..3b6dc09a1cc827ca47c16beadac406476b15b5d1 100644
--- a/cmd/devp2p/discv4cmd.go
+++ b/cmd/devp2p/discv4cmd.go
@@ -19,14 +19,12 @@ package main
 import (
 	"fmt"
 	"net"
-	"os"
 	"strings"
 	"time"
 
 	"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/internal/utesting"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/params"
@@ -82,7 +80,13 @@ var (
 		Name:   "test",
 		Usage:  "Runs tests against a node",
 		Action: discv4Test,
-		Flags:  []cli.Flag{remoteEnodeFlag, testPatternFlag, testListen1Flag, testListen2Flag},
+		Flags: []cli.Flag{
+			remoteEnodeFlag,
+			testPatternFlag,
+			testTAPFlag,
+			testListen1Flag,
+			testListen2Flag,
+		},
 	}
 )
 
@@ -113,20 +117,6 @@ var (
 		Usage:  "Enode of the remote node under test",
 		EnvVar: "REMOTE_ENODE",
 	}
-	testPatternFlag = cli.StringFlag{
-		Name:  "run",
-		Usage: "Pattern of test suite(s) to run",
-	}
-	testListen1Flag = cli.StringFlag{
-		Name:  "listen1",
-		Usage: "IP address of the first tester",
-		Value: v4test.Listen1,
-	}
-	testListen2Flag = cli.StringFlag{
-		Name:  "listen2",
-		Usage: "IP address of the second tester",
-		Value: v4test.Listen2,
-	}
 )
 
 func discv4Ping(ctx *cli.Context) error {
@@ -213,6 +203,7 @@ func discv4Crawl(ctx *cli.Context) error {
 	return nil
 }
 
+// discv4Test runs the protocol test suite.
 func discv4Test(ctx *cli.Context) error {
 	// Configure test package globals.
 	if !ctx.IsSet(remoteEnodeFlag.Name) {
@@ -221,18 +212,7 @@ func discv4Test(ctx *cli.Context) error {
 	v4test.Remote = ctx.String(remoteEnodeFlag.Name)
 	v4test.Listen1 = ctx.String(testListen1Flag.Name)
 	v4test.Listen2 = ctx.String(testListen2Flag.Name)
-
-	// Filter and run test cases.
-	tests := v4test.AllTests
-	if ctx.IsSet(testPatternFlag.Name) {
-		tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
-	}
-	results := utesting.RunTests(tests, os.Stdout)
-	if fails := utesting.CountFailures(results); fails > 0 {
-		return fmt.Errorf("%v/%v tests passed.", len(tests)-fails, len(tests))
-	}
-	fmt.Printf("%v/%v passed\n", len(tests), len(tests))
-	return nil
+	return runTests(ctx, v4test.AllTests)
 }
 
 // startV4 starts an ephemeral discovery V4 node.
diff --git a/cmd/devp2p/discv5cmd.go b/cmd/devp2p/discv5cmd.go
index 1d7442144fd4cbf21e6622a60847898357ce58e7..e20d7c9cfae6e344358bebf161367400a74aed12 100644
--- a/cmd/devp2p/discv5cmd.go
+++ b/cmd/devp2p/discv5cmd.go
@@ -18,13 +18,10 @@ package main
 
 import (
 	"fmt"
-	"os"
 	"time"
 
 	"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/internal/utesting"
-	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"gopkg.in/urfave/cli.v1"
 )
@@ -62,7 +59,12 @@ var (
 		Name:   "test",
 		Usage:  "Runs protocol tests against a node",
 		Action: discv5Test,
-		Flags:  []cli.Flag{testPatternFlag, testListen1Flag, testListen2Flag},
+		Flags: []cli.Flag{
+			testPatternFlag,
+			testTAPFlag,
+			testListen1Flag,
+			testListen2Flag,
+		},
 	}
 	discv5ListenCommand = cli.Command{
 		Name:   "listen",
@@ -114,28 +116,14 @@ func discv5Crawl(ctx *cli.Context) error {
 	return nil
 }
 
+// discv5Test runs the protocol test suite.
 func discv5Test(ctx *cli.Context) error {
-	// Disable logging unless explicitly enabled.
-	if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") {
-		log.Root().SetHandler(log.DiscardHandler())
-	}
-
-	// Filter and run test cases.
 	suite := &v5test.Suite{
 		Dest:    getNodeArg(ctx),
 		Listen1: ctx.String(testListen1Flag.Name),
 		Listen2: ctx.String(testListen2Flag.Name),
 	}
-	tests := suite.AllTests()
-	if ctx.IsSet(testPatternFlag.Name) {
-		tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
-	}
-	results := utesting.RunTests(tests, os.Stdout)
-	if fails := utesting.CountFailures(results); fails > 0 {
-		return fmt.Errorf("%v/%v tests passed.", len(tests)-fails, len(tests))
-	}
-	fmt.Printf("%v/%v passed\n", len(tests), len(tests))
-	return nil
+	return runTests(ctx, suite.AllTests())
 }
 
 func discv5Listen(ctx *cli.Context) error {
diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go
index 654888a4caed1dd19bf51c085fe1dc5a2d0993f0..d67387e80bf46535769bb5c0dd26cadd9e10ac66 100644
--- a/cmd/devp2p/internal/ethtest/chain.go
+++ b/cmd/devp2p/internal/ethtest/chain.go
@@ -124,13 +124,22 @@ func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) {
 // loadChain takes the given chain.rlp file, and decodes and returns
 // the blocks from the file.
 func loadChain(chainfile string, genesis string) (*Chain, error) {
-	// Open the file handle and potentially unwrap the gzip stream
+	chainConfig, err := ioutil.ReadFile(genesis)
+	if err != nil {
+		return nil, err
+	}
+	var gen core.Genesis
+	if err := json.Unmarshal(chainConfig, &gen); err != nil {
+		return nil, err
+	}
+	gblock := gen.ToBlock(nil)
+
+	// Load chain.rlp.
 	fh, err := os.Open(chainfile)
 	if err != nil {
 		return nil, err
 	}
 	defer fh.Close()
-
 	var reader io.Reader = fh
 	if strings.HasSuffix(chainfile, ".gz") {
 		if reader, err = gzip.NewReader(reader); err != nil {
@@ -138,29 +147,21 @@ func loadChain(chainfile string, genesis string) (*Chain, error) {
 		}
 	}
 	stream := rlp.NewStream(reader, 0)
-	var blocks []*types.Block
+	var blocks = make([]*types.Block, 1)
+	blocks[0] = gblock
 	for i := 0; ; i++ {
 		var b types.Block
 		if err := stream.Decode(&b); err == io.EOF {
 			break
 		} else if err != nil {
-			return nil, fmt.Errorf("at block %d: %v", i, err)
+			return nil, fmt.Errorf("at block index %d: %v", i, err)
+		}
+		if b.NumberU64() != uint64(i+1) {
+			return nil, fmt.Errorf("block at index %d has wrong number %d", i, b.NumberU64())
 		}
 		blocks = append(blocks, &b)
 	}
 
-	// Open the file handle and potentially unwrap the gzip stream
-	chainConfig, err := ioutil.ReadFile(genesis)
-	if err != nil {
-		return nil, err
-	}
-	var gen core.Genesis
-	if err := json.Unmarshal(chainConfig, &gen); err != nil {
-		return nil, err
-	}
-
-	return &Chain{
-		blocks:      blocks,
-		chainConfig: gen.Config,
-	}, nil
+	c := &Chain{blocks: blocks, chainConfig: gen.Config}
+	return c, nil
 }
diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go
index c8b977d2375016cfe49f41434f96b350ce8d8345..604b9086874907e19f823eb8d1fb84a1ad6c0fb8 100644
--- a/cmd/devp2p/internal/ethtest/chain_test.go
+++ b/cmd/devp2p/internal/ethtest/chain_test.go
@@ -73,7 +73,7 @@ func TestEthProtocolNegotiation(t *testing.T) {
 // TestChain_GetHeaders tests whether the test suite can correctly
 // respond to a GetBlockHeaders request from a node.
 func TestChain_GetHeaders(t *testing.T) {
-	chainFile, err := filepath.Abs("./testdata/chain.rlp.gz")
+	chainFile, err := filepath.Abs("./testdata/fullchain.rlp.gz")
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index f70bc43efa949d4ea951910dd0b60081f4fc2661..d5928bede44ff08604f8f73c3ce9fbdcbced2369 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -19,7 +19,9 @@ package ethtest
 import (
 	"fmt"
 	"net"
+	"time"
 
+	"github.com/davecgh/go-spew/spew"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/internal/utesting"
 	"github.com/ethereum/go-ethereum/p2p/enode"
@@ -27,6 +29,13 @@ import (
 	"github.com/stretchr/testify/assert"
 )
 
+var pretty = spew.ConfigState{
+	Indent:                  "  ",
+	DisableCapacities:       true,
+	DisablePointerAddresses: true,
+	SortKeys:                true,
+}
+
 // Suite represents a structure used to test the eth
 // protocol of a node(s).
 type Suite struct {
@@ -73,9 +82,9 @@ func (s *Suite) TestStatus(t *utesting.T) {
 	// get status
 	switch msg := conn.statusExchange(t, s.chain).(type) {
 	case *Status:
-		t.Logf("%+v\n", msg)
+		t.Logf("got status message: %s", pretty.Sdump(msg))
 	default:
-		t.Fatalf("unexpected: %#v", msg)
+		t.Fatalf("unexpected: %s", pretty.Sdump(msg))
 	}
 }
 
@@ -104,16 +113,17 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
 		t.Fatalf("could not write to connection: %v", err)
 	}
 
-	switch msg := conn.ReadAndServe(s.chain).(type) {
+	timeout := 20 * time.Second
+	switch msg := conn.ReadAndServe(s.chain, timeout).(type) {
 	case *BlockHeaders:
 		headers := msg
 		for _, header := range *headers {
 			num := header.Number.Uint64()
+			t.Logf("received header (%d): %s", num, pretty.Sdump(header))
 			assert.Equal(t, s.chain.blocks[int(num)].Header(), header)
-			t.Logf("\nHEADER FOR BLOCK NUMBER %d: %+v\n", header.Number, header)
 		}
 	default:
-		t.Fatalf("unexpected: %#v", msg)
+		t.Fatalf("unexpected: %s", pretty.Sdump(msg))
 	}
 }
 
@@ -133,14 +143,12 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
 		t.Fatalf("could not write to connection: %v", err)
 	}
 
-	switch msg := conn.ReadAndServe(s.chain).(type) {
+	timeout := 20 * time.Second
+	switch msg := conn.ReadAndServe(s.chain, timeout).(type) {
 	case *BlockBodies:
-		bodies := msg
-		for _, body := range *bodies {
-			t.Logf("\nBODY: %+v\n", body)
-		}
+		t.Logf("received %d block bodies", len(*msg))
 	default:
-		t.Fatalf("unexpected: %#v", msg)
+		t.Fatalf("unexpected: %s", pretty.Sdump(msg))
 	}
 }
 
@@ -173,18 +181,27 @@ func (s *Suite) TestBroadcast(t *utesting.T) {
 		t.Fatalf("could not write to connection: %v", err)
 	}
 
-	switch msg := receiveConn.ReadAndServe(s.chain).(type) {
+	timeout := 20 * time.Second
+	switch msg := receiveConn.ReadAndServe(s.chain, timeout).(type) {
 	case *NewBlock:
-		assert.Equal(t, blockAnnouncement.Block.Header(), msg.Block.Header(),
-			"wrong block header in announcement")
-		assert.Equal(t, blockAnnouncement.TD, msg.TD,
-			"wrong TD in announcement")
+		t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block))
+		assert.Equal(t,
+			blockAnnouncement.Block.Header(), msg.Block.Header(),
+			"wrong block header in announcement",
+		)
+		assert.Equal(t,
+			blockAnnouncement.TD, msg.TD,
+			"wrong TD in announcement",
+		)
 	case *NewBlockHashes:
 		hashes := *msg
-		assert.Equal(t, blockAnnouncement.Block.Hash(), hashes[0].Hash,
-			"wrong block hash in announcement")
+		t.Logf("received NewBlockHashes message: %s", pretty.Sdump(hashes))
+		assert.Equal(t,
+			blockAnnouncement.Block.Hash(), hashes[0].Hash,
+			"wrong block hash in announcement",
+		)
 	default:
-		t.Fatalf("unexpected: %#v", msg)
+		t.Fatalf("unexpected: %s", pretty.Sdump(msg))
 	}
 	// update test suite chain
 	s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[1000])
diff --git a/cmd/devp2p/internal/ethtest/testdata/fullchain.rlp.gz b/cmd/devp2p/internal/ethtest/testdata/fullchain.rlp.gz
new file mode 100644
index 0000000000000000000000000000000000000000..50f52eafa2539c9c8ba98a2685f89907d48a3629
Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/fullchain.rlp.gz differ
diff --git a/cmd/devp2p/internal/ethtest/testdata/genesis.json b/cmd/devp2p/internal/ethtest/testdata/genesis.json
index ea5e2725b569251b678d4c54d727269f45bb8c93..ed78488b67d6a16f52ee94fa11dc5164bafcdcb2 100644
--- a/cmd/devp2p/internal/ethtest/testdata/genesis.json
+++ b/cmd/devp2p/internal/ethtest/testdata/genesis.json
@@ -1,6 +1,6 @@
 {
     "config": {
-        "chainId": 1,
+        "chainId": 19763,
         "homesteadBlock": 0,
         "eip150Block": 0,
         "eip155Block": 0,
@@ -11,16 +11,16 @@
     "nonce": "0xdeadbeefdeadbeef",
     "timestamp": "0x0",
     "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000",
-    "gasLimit": "0x8000000",
-    "difficulty": "0x10",
+    "gasLimit": "0x80000000",
+    "difficulty": "0x20000",
     "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
     "coinbase": "0x0000000000000000000000000000000000000000",
     "alloc": {
         "71562b71999873db5b286df957af199ec94617f7": {
-            "balance": "0xf4240"
+            "balance": "0xffffffff"
         }
     },
     "number": "0x0",
     "gasUsed": "0x0",
     "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
-}
\ No newline at end of file
+}
diff --git a/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp.gz b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp.gz
new file mode 100644
index 0000000000000000000000000000000000000000..82d5271361e21f5094b58ada0c83e203b81e3d48
Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp.gz differ
diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go
index b6298e808327535dd7b0c8563dced13a015fb2c1..69367cb6cd4870a0651440fff7f83172f24146e7 100644
--- a/cmd/devp2p/internal/ethtest/types.go
+++ b/cmd/devp2p/internal/ethtest/types.go
@@ -42,10 +42,14 @@ type Error struct {
 	err error
 }
 
-func (e *Error) Unwrap() error    { return e.err }
-func (e *Error) Error() string    { return e.err.Error() }
-func (e *Error) Code() int        { return -1 }
-func (e *Error) GoString() string { return e.Error() }
+func (e *Error) Unwrap() error  { return e.err }
+func (e *Error) Error() string  { return e.err.Error() }
+func (e *Error) Code() int      { return -1 }
+func (e *Error) String() string { return e.Error() }
+
+func errorf(format string, args ...interface{}) *Error {
+	return &Error{fmt.Errorf(format, args...)}
+}
 
 // Hello is the RLP structure of the protocol handshake.
 type Hello struct {
@@ -174,7 +178,7 @@ type Conn struct {
 func (c *Conn) Read() Message {
 	code, rawData, _, err := c.Conn.Read()
 	if err != nil {
-		return &Error{fmt.Errorf("could not read from connection: %v", err)}
+		return errorf("could not read from connection: %v", err)
 	}
 
 	var msg Message
@@ -202,20 +206,22 @@ func (c *Conn) Read() Message {
 	case (NewBlockHashes{}).Code():
 		msg = new(NewBlockHashes)
 	default:
-		return &Error{fmt.Errorf("invalid message code: %d", code)}
+		return errorf("invalid message code: %d", code)
 	}
 
 	if err := rlp.DecodeBytes(rawData, msg); err != nil {
-		return &Error{fmt.Errorf("could not rlp decode message: %v", err)}
+		return errorf("could not rlp decode message: %v", err)
 	}
-
 	return msg
 }
 
 // ReadAndServe serves GetBlockHeaders requests while waiting
 // on another message from the node.
-func (c *Conn) ReadAndServe(chain *Chain) Message {
-	for {
+func (c *Conn) ReadAndServe(chain *Chain, timeout time.Duration) Message {
+	start := time.Now()
+	for time.Since(start) < timeout {
+		timeout := time.Now().Add(10 * time.Second)
+		c.SetReadDeadline(timeout)
 		switch msg := c.Read().(type) {
 		case *Ping:
 			c.Write(&Pong{})
@@ -223,16 +229,17 @@ func (c *Conn) ReadAndServe(chain *Chain) Message {
 			req := *msg
 			headers, err := chain.GetHeaders(req)
 			if err != nil {
-				return &Error{fmt.Errorf("could not get headers for inbound header request: %v", err)}
+				return errorf("could not get headers for inbound header request: %v", err)
 			}
 
 			if err := c.Write(headers); err != nil {
-				return &Error{fmt.Errorf("could not write to connection: %v", err)}
+				return errorf("could not write to connection: %v", err)
 			}
 		default:
 			return msg
 		}
 	}
+	return errorf("no message received within %v", timeout)
 }
 
 func (c *Conn) Write(msg Message) error {
@@ -242,12 +249,14 @@ func (c *Conn) Write(msg Message) error {
 	}
 	_, err = c.Conn.Write(uint64(msg.Code()), payload)
 	return err
-
 }
 
 // handshake checks to make sure a `HELLO` is received.
 func (c *Conn) handshake(t *utesting.T) Message {
-	// write protoHandshake to client
+	defer c.SetDeadline(time.Time{})
+	c.SetDeadline(time.Now().Add(10 * time.Second))
+
+	// write hello to client
 	pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:]
 	ourHandshake := &Hello{
 		Version: 5,
@@ -260,14 +269,13 @@ func (c *Conn) handshake(t *utesting.T) Message {
 	if err := c.Write(ourHandshake); err != nil {
 		t.Fatalf("could not write to connection: %v", err)
 	}
-	// read protoHandshake from client
+	// read hello from client
 	switch msg := c.Read().(type) {
 	case *Hello:
 		// set snappy if version is at least 5
 		if msg.Version >= 5 {
 			c.SetSnappy(true)
 		}
-
 		c.negotiateEthProtocol(msg.Caps)
 		if c.ethProtocolVersion == 0 {
 			t.Fatalf("unexpected eth protocol version")
@@ -297,15 +305,17 @@ func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
 // statusExchange performs a `Status` message exchange with the given
 // node.
 func (c *Conn) statusExchange(t *utesting.T, chain *Chain) Message {
+	defer c.SetDeadline(time.Time{})
+	c.SetDeadline(time.Now().Add(20 * time.Second))
+
 	// read status message from client
 	var message Message
-
 loop:
 	for {
 		switch msg := c.Read().(type) {
 		case *Status:
 			if msg.Head != chain.blocks[chain.Len()-1].Hash() {
-				t.Fatalf("wrong head in status: %v", msg.Head)
+				t.Fatalf("wrong head block in status: %s", msg.Head.String())
 			}
 			if msg.TD.Cmp(chain.TD(chain.Len())) != 0 {
 				t.Fatalf("wrong TD in status: %v", msg.TD)
@@ -321,7 +331,7 @@ loop:
 			c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error
 			// (PINGs should not be a response upon fresh connection)
 		default:
-			t.Fatalf("bad status message: %#v", msg)
+			t.Fatalf("bad status message: %s", pretty.Sdump(msg))
 		}
 	}
 	// make sure eth protocol version is set for negotiation
@@ -331,7 +341,7 @@ loop:
 	// write status message to client
 	status := Status{
 		ProtocolVersion: uint32(c.ethProtocolVersion),
-		NetworkID:       1,
+		NetworkID:       chain.chainConfig.ChainID.Uint64(),
 		TD:              chain.TD(chain.Len()),
 		Head:            chain.blocks[chain.Len()-1].Hash(),
 		Genesis:         chain.blocks[0].Hash(),
@@ -347,12 +357,15 @@ loop:
 // waitForBlock waits for confirmation from the client that it has
 // imported the given block.
 func (c *Conn) waitForBlock(block *types.Block) error {
+	defer c.SetReadDeadline(time.Time{})
+
+	timeout := time.Now().Add(20 * time.Second)
+	c.SetReadDeadline(timeout)
 	for {
 		req := &GetBlockHeaders{Origin: hashOrNumber{Hash: block.Hash()}, Amount: 1}
 		if err := c.Write(req); err != nil {
 			return err
 		}
-
 		switch msg := c.Read().(type) {
 		case *BlockHeaders:
 			if len(*msg) > 0 {
@@ -360,7 +373,7 @@ func (c *Conn) waitForBlock(block *types.Block) error {
 			}
 			time.Sleep(100 * time.Millisecond)
 		default:
-			return fmt.Errorf("invalid message: %v", msg)
+			return fmt.Errorf("invalid message: %s", pretty.Sdump(msg))
 		}
 	}
 }
diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go
index 17019aee004461c919c6d8b06d49f63f664313ce..d90eb4687cada3bfe69843714839df6d2023392b 100644
--- a/cmd/devp2p/rlpxcmd.go
+++ b/cmd/devp2p/rlpxcmd.go
@@ -19,11 +19,9 @@ package main
 import (
 	"fmt"
 	"net"
-	"os"
 
 	"github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest"
 	"github.com/ethereum/go-ethereum/crypto"
-	"github.com/ethereum/go-ethereum/internal/utesting"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/rlpx"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -47,9 +45,12 @@ var (
 	rlpxEthTestCommand = cli.Command{
 		Name:      "eth-test",
 		Usage:     "Runs tests against a node",
-		ArgsUsage: "<node> <path_to_chain.rlp_file>",
+		ArgsUsage: "<node> <chain.rlp> <genesis.json>",
 		Action:    rlpxEthTest,
-		Flags:     []cli.Flag{testPatternFlag},
+		Flags: []cli.Flag{
+			testPatternFlag,
+			testTAPFlag,
+		},
 	}
 )
 
@@ -88,22 +89,11 @@ func rlpxPing(ctx *cli.Context) error {
 	return nil
 }
 
+// rlpxEthTest runs the eth protocol test suite.
 func rlpxEthTest(ctx *cli.Context) error {
 	if ctx.NArg() < 3 {
 		exit("missing path to chain.rlp as command-line argument")
 	}
-
 	suite := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
-
-	// Filter and run test cases.
-	tests := suite.AllTests()
-	if ctx.IsSet(testPatternFlag.Name) {
-		tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
-	}
-	results := utesting.RunTests(tests, os.Stdout)
-	if fails := utesting.CountFailures(results); fails > 0 {
-		return fmt.Errorf("%v of %v tests passed.", len(tests)-fails, len(tests))
-	}
-	fmt.Printf("all tests passed\n")
-	return nil
+	return runTests(ctx, suite.AllTests())
 }
diff --git a/cmd/devp2p/runtest.go b/cmd/devp2p/runtest.go
new file mode 100644
index 0000000000000000000000000000000000000000..4168f8555bfbdd93bdba86e7b32ba38dc3f7b7b5
--- /dev/null
+++ b/cmd/devp2p/runtest.go
@@ -0,0 +1,69 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+	"os"
+
+	"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
+	"github.com/ethereum/go-ethereum/internal/utesting"
+	"github.com/ethereum/go-ethereum/log"
+	"gopkg.in/urfave/cli.v1"
+)
+
+var (
+	testPatternFlag = cli.StringFlag{
+		Name:  "run",
+		Usage: "Pattern of test suite(s) to run",
+	}
+	testTAPFlag = cli.BoolFlag{
+		Name:  "tap",
+		Usage: "Output TAP",
+	}
+	// These two are specific to the discovery tests.
+	testListen1Flag = cli.StringFlag{
+		Name:  "listen1",
+		Usage: "IP address of the first tester",
+		Value: v4test.Listen1,
+	}
+	testListen2Flag = cli.StringFlag{
+		Name:  "listen2",
+		Usage: "IP address of the second tester",
+		Value: v4test.Listen2,
+	}
+)
+
+func runTests(ctx *cli.Context, tests []utesting.Test) error {
+	// Filter test cases.
+	if ctx.IsSet(testPatternFlag.Name) {
+		tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
+	}
+	// Disable logging unless explicitly enabled.
+	if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") {
+		log.Root().SetHandler(log.DiscardHandler())
+	}
+	// Run the tests.
+	var run = utesting.RunTests
+	if ctx.Bool(testTAPFlag.Name) {
+		run = utesting.RunTAP
+	}
+	results := run(tests, os.Stdout)
+	if utesting.CountFailures(results) > 0 {
+		os.Exit(1)
+	}
+	return nil
+}
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 75586d588b0f70b5005aa4a48feb82afe53715b5..d8b93d291aacce82fa4d7895475ac0efadbd6014 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -147,6 +147,16 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
 		vmContext.Origin = msg.From()
 
 		evm := vm.NewEVM(vmContext, statedb, chainConfig, vmConfig)
+		if chainConfig.IsYoloV2(vmContext.BlockNumber) {
+			statedb.AddAddressToAccessList(msg.From())
+			if dst := msg.To(); dst != nil {
+				statedb.AddAddressToAccessList(*dst)
+				// If it's a create-tx, the destination will be added inside evm.create
+			}
+			for _, addr := range evm.ActivePrecompiles() {
+				statedb.AddAddressToAccessList(addr)
+			}
+		}
 		snapshot := statedb.Snapshot()
 		// (ret []byte, usedGas uint64, failed bool, err error)
 		msgResult, err := core.ApplyMessage(evm, msg, gaspool)
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index e9a24412e16220ac228527b2c9874aaad6027acb..0e0835cb7260268675fefcbd63975d60636f4372 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -167,7 +167,7 @@ The export-preimages command export hash preimages to an RLP encoded stream`,
 			utils.RinkebyFlag,
 			utils.TxLookupLimitFlag,
 			utils.GoerliFlag,
-			utils.YoloV1Flag,
+			utils.YoloV2Flag,
 			utils.LegacyTestnetFlag,
 		},
 		Category: "BLOCKCHAIN COMMANDS",
@@ -217,7 +217,7 @@ Use "ethereum dump 0" to dump the genesis block.`,
 			utils.RopstenFlag,
 			utils.RinkebyFlag,
 			utils.GoerliFlag,
-			utils.YoloV1Flag,
+			utils.YoloV2Flag,
 			utils.LegacyTestnetFlag,
 			utils.SyncModeFlag,
 		},
diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go
index e2f733f844a449a24325e204a37d56a7c954abca..cbecbe0a5fa702fc48a8008f663be4a9cd284228 100644
--- a/cmd/geth/consolecmd.go
+++ b/cmd/geth/consolecmd.go
@@ -136,8 +136,8 @@ func remoteConsole(ctx *cli.Context) error {
 				path = filepath.Join(path, "rinkeby")
 			} else if ctx.GlobalBool(utils.GoerliFlag.Name) {
 				path = filepath.Join(path, "goerli")
-			} else if ctx.GlobalBool(utils.YoloV1Flag.Name) {
-				path = filepath.Join(path, "yolo-v1")
+			} else if ctx.GlobalBool(utils.YoloV2Flag.Name) {
+				path = filepath.Join(path, "yolo-v2")
 			}
 		}
 		endpoint = fmt.Sprintf("%s/geth.ipc", path)
diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go
index 6c100e18d9ee33a274ce3811036423d5abfbf19b..913b060361f6043eaa36d5ec3da19ba9f4d411b1 100644
--- a/cmd/geth/consolecmd_test.go
+++ b/cmd/geth/consolecmd_test.go
@@ -66,6 +66,7 @@ at block: 0 ({{niltime}})
  datadir: {{.Datadir}}
  modules: {{apis}}
 
+To exit, press ctrl-d
 > {{.InputLine "exit"}}
 `)
 	geth.ExpectExit()
@@ -159,6 +160,7 @@ at block: 0 ({{niltime}}){{if ipc}}
  datadir: {{datadir}}{{end}}
  modules: {{apis}}
 
+To exit, press ctrl-d
 > {{.InputLine "exit" }}
 `)
 	attach.ExpectExit()
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index a96443bdad582a3c06a5108e157e0abc66acfdf5..26664b630ddbd2fb5e9fdf2b81d9d0b64efb6755 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -145,7 +145,7 @@ var (
 		utils.RopstenFlag,
 		utils.RinkebyFlag,
 		utils.GoerliFlag,
-		utils.YoloV1Flag,
+		utils.YoloV2Flag,
 		utils.VMEnableDebugFlag,
 		utils.NetworkIdFlag,
 		utils.EthStatsURLFlag,
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 288c4535975230b5011587c06e3371196821c53d..237cb8d5165ddab388fc6065e9c86c912ae3044a 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -42,7 +42,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
 			utils.NetworkIdFlag,
 			utils.GoerliFlag,
 			utils.RinkebyFlag,
-			utils.YoloV1Flag,
+			utils.YoloV2Flag,
 			utils.RopstenFlag,
 			utils.SyncModeFlag,
 			utils.ExitWhenSyncedFlag,
diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go
index 40327d25d226032d11b6a894931933c9882a2fd4..2d014e83bca4beab885840c6e4cc077c78abf81c 100644
--- a/cmd/puppeth/wizard_genesis.go
+++ b/cmd/puppeth/wizard_genesis.go
@@ -236,8 +236,8 @@ func (w *wizard) manageGenesis() {
 		w.conf.Genesis.Config.IstanbulBlock = w.readDefaultBigInt(w.conf.Genesis.Config.IstanbulBlock)
 
 		fmt.Println()
-		fmt.Printf("Which block should YOLOv1 come into effect? (default = %v)\n", w.conf.Genesis.Config.YoloV1Block)
-		w.conf.Genesis.Config.YoloV1Block = w.readDefaultBigInt(w.conf.Genesis.Config.YoloV1Block)
+		fmt.Printf("Which block should YOLOv2 come into effect? (default = %v)\n", w.conf.Genesis.Config.YoloV2Block)
+		w.conf.Genesis.Config.YoloV2Block = w.readDefaultBigInt(w.conf.Genesis.Config.YoloV2Block)
 
 		out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", "  ")
 		fmt.Printf("Chain configuration updated:\n\n%s\n", out)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 39cc94f0be5aa08cbff53a4563a58a0f0cb06673..f9e6fb16e479c2883c11c7ba1d28263feb0611a5 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -135,9 +135,9 @@ var (
 		Name:  "goerli",
 		Usage: "Görli network: pre-configured proof-of-authority test network",
 	}
-	YoloV1Flag = cli.BoolFlag{
-		Name:  "yolov1",
-		Usage: "YOLOv1 network: pre-configured proof-of-authority shortlived test network.",
+	YoloV2Flag = cli.BoolFlag{
+		Name:  "yolov2",
+		Usage: "YOLOv2 network: pre-configured proof-of-authority shortlived test network.",
 	}
 	RinkebyFlag = cli.BoolFlag{
 		Name:  "rinkeby",
@@ -744,8 +744,8 @@ func MakeDataDir(ctx *cli.Context) string {
 		if ctx.GlobalBool(GoerliFlag.Name) {
 			return filepath.Join(path, "goerli")
 		}
-		if ctx.GlobalBool(YoloV1Flag.Name) {
-			return filepath.Join(path, "yolo-v1")
+		if ctx.GlobalBool(YoloV2Flag.Name) {
+			return filepath.Join(path, "yolo-v2")
 		}
 		return path
 	}
@@ -803,8 +803,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
 		urls = params.RinkebyBootnodes
 	case ctx.GlobalBool(GoerliFlag.Name):
 		urls = params.GoerliBootnodes
-	case ctx.GlobalBool(YoloV1Flag.Name):
-		urls = params.YoloV1Bootnodes
+	case ctx.GlobalBool(YoloV2Flag.Name):
+		urls = params.YoloV2Bootnodes
 	case cfg.BootstrapNodes != nil:
 		return // already set, don't apply defaults.
 	}
@@ -839,8 +839,8 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
 		urls = params.RinkebyBootnodes
 	case ctx.GlobalBool(GoerliFlag.Name):
 		urls = params.GoerliBootnodes
-	case ctx.GlobalBool(YoloV1Flag.Name):
-		urls = params.YoloV1Bootnodes
+	case ctx.GlobalBool(YoloV2Flag.Name):
+		urls = params.YoloV2Bootnodes
 	case cfg.BootstrapNodesV5 != nil:
 		return // already set, don't apply defaults.
 	}
@@ -1269,8 +1269,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
 		cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
 	case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir():
 		cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
-	case ctx.GlobalBool(YoloV1Flag.Name) && cfg.DataDir == node.DefaultDataDir():
-		cfg.DataDir = filepath.Join(node.DefaultDataDir(), "yolo-v1")
+	case ctx.GlobalBool(YoloV2Flag.Name) && cfg.DataDir == node.DefaultDataDir():
+		cfg.DataDir = filepath.Join(node.DefaultDataDir(), "yolo-v2")
 	}
 }
 
@@ -1483,7 +1483,7 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node) {
 // SetEthConfig applies eth-related command line flags to the config.
 func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
 	// Avoid conflicting network flags
-	CheckExclusive(ctx, DeveloperFlag, LegacyTestnetFlag, RopstenFlag, RinkebyFlag, GoerliFlag, YoloV1Flag)
+	CheckExclusive(ctx, DeveloperFlag, LegacyTestnetFlag, RopstenFlag, RinkebyFlag, GoerliFlag, YoloV2Flag)
 	CheckExclusive(ctx, LegacyLightServFlag, LightServeFlag, SyncModeFlag, "light")
 	CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
 	CheckExclusive(ctx, GCModeFlag, "archive", TxLookupLimitFlag)
@@ -1603,11 +1603,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
 		}
 		cfg.Genesis = core.DefaultGoerliGenesisBlock()
 		SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
-	case ctx.GlobalBool(YoloV1Flag.Name):
+	case ctx.GlobalBool(YoloV2Flag.Name):
 		if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
-			cfg.NetworkId = 133519467574833 // "yolov1"
+			cfg.NetworkId = 133519467574834 // "yolov2"
 		}
-		cfg.Genesis = core.DefaultYoloV1GenesisBlock()
+		cfg.Genesis = core.DefaultYoloV2GenesisBlock()
 	case ctx.GlobalBool(DeveloperFlag.Name):
 		if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
 			cfg.NetworkId = 1337
@@ -1791,8 +1791,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
 		genesis = core.DefaultRinkebyGenesisBlock()
 	case ctx.GlobalBool(GoerliFlag.Name):
 		genesis = core.DefaultGoerliGenesisBlock()
-	case ctx.GlobalBool(YoloV1Flag.Name):
-		genesis = core.DefaultYoloV1GenesisBlock()
+	case ctx.GlobalBool(YoloV2Flag.Name):
+		genesis = core.DefaultYoloV2GenesisBlock()
 	case ctx.GlobalBool(DeveloperFlag.Name):
 		Fatalf("Developer chains are ephemeral")
 	}
diff --git a/common/bytes.go b/common/bytes.go
index 634041804d0bd50ecab0f8d707fbe28ea4909a11..7827bb572e1381e8ac6b21581bf43ea7bc1a8f3e 100644
--- a/common/bytes.go
+++ b/common/bytes.go
@@ -17,28 +17,9 @@
 // Package common contains various helper functions.
 package common
 
-import "encoding/hex"
-
-// ToHex returns the hex representation of b, prefixed with '0x'.
-// For empty slices, the return value is "0x0".
-//
-// Deprecated: use hexutil.Encode instead.
-func ToHex(b []byte) string {
-	hex := Bytes2Hex(b)
-	if len(hex) == 0 {
-		hex = "0"
-	}
-	return "0x" + hex
-}
-
-// ToHexArray creates a array of hex-string based on []byte
-func ToHexArray(b [][]byte) []string {
-	r := make([]string, len(b))
-	for i := range b {
-		r[i] = ToHex(b[i])
-	}
-	return r
-}
+import (
+	"encoding/hex"
+)
 
 // FromHex returns the bytes represented by the hexadecimal string s.
 // s may be prefixed with "0x".
diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go
index d6c871092ed30e54899f39fcfad7b97787f19b84..47d7e51b595906e59e47f2fdc811a8ed2539a0ec 100644
--- a/consensus/ethash/algorithm.go
+++ b/consensus/ethash/algorithm.go
@@ -295,7 +295,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
 	var pend sync.WaitGroup
 	pend.Add(threads)
 
-	var progress uint32
+	var progress uint64
 	for i := 0; i < threads; i++ {
 		go func(id int) {
 			defer pend.Done()
@@ -304,23 +304,23 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
 			keccak512 := makeHasher(sha3.NewLegacyKeccak512())
 
 			// Calculate the data segment this thread should generate
-			batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
-			first := uint32(id) * batch
+			batch := (size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads))
+			first := uint64(id) * batch
 			limit := first + batch
-			if limit > uint32(size/hashBytes) {
-				limit = uint32(size / hashBytes)
+			if limit > size/hashBytes {
+				limit = size / hashBytes
 			}
 			// Calculate the dataset segment
-			percent := uint32(size / hashBytes / 100)
+			percent := size / hashBytes / 100
 			for index := first; index < limit; index++ {
-				item := generateDatasetItem(cache, index, keccak512)
+				item := generateDatasetItem(cache, uint32(index), keccak512)
 				if swapped {
 					swap(item)
 				}
 				copy(dataset[index*hashBytes:], item)
 
-				if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
-					logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
+				if status := atomic.AddUint64(&progress, 1); status%percent == 0 {
+					logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
 				}
 			}
 		}(i)
diff --git a/console/console.go b/console/console.go
index 1dcad3065e6ed163a2210796784020f437c2fc7a..ae9f28da0486e0247ebe5314eb21955c72472509 100644
--- a/console/console.go
+++ b/console/console.go
@@ -324,6 +324,7 @@ func (c *Console) Welcome() {
 		sort.Strings(modules)
 		message += " modules: " + strings.Join(modules, " ") + "\n"
 	}
+	message += "\nTo exit, press ctrl-d"
 	fmt.Fprintln(c.printer, message)
 }
 
@@ -372,7 +373,7 @@ func (c *Console) Interactive() {
 			return
 
 		case err := <-inputErr:
-			if err == liner.ErrPromptAborted && indents > 0 {
+			if err == liner.ErrPromptAborted {
 				// When prompting for multi-line input, the first Ctrl-C resets
 				// the multi-line state.
 				prompt, indents, input = c.prompt, 0, ""
diff --git a/core/blockchain.go b/core/blockchain.go
index db61ed7f5d24b24c91488238a5ec20414a3c9876..3a6a3368e6e4f9a630d6b8eb4f459d1b7904fea8 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -207,9 +207,10 @@ type BlockChain struct {
 	processor  Processor  // Block transaction processor interface
 	vmConfig   vm.Config
 
-	badBlocks       *lru.Cache                     // Bad block cache
-	shouldPreserve  func(*types.Block) bool        // Function used to determine whether should preserve the given block.
-	terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+	badBlocks          *lru.Cache                     // Bad block cache
+	shouldPreserve     func(*types.Block) bool        // Function used to determine whether should preserve the given block.
+	terminateInsert    func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+	writeLegacyJournal bool                           // Testing flag used to flush the snapshot journal in legacy format.
 }
 
 // NewBlockChain returns a fully initialised block chain using information
@@ -281,9 +282,29 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 	// Make sure the state associated with the block is available
 	head := bc.CurrentBlock()
 	if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
-		log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
-		if err := bc.SetHead(head.NumberU64()); err != nil {
-			return nil, err
+		// Head state is missing, before the state recovery, find out the
+		// disk layer point of snapshot(if it's enabled). Make sure the
+		// rewound point is lower than disk layer.
+		var diskRoot common.Hash
+		if bc.cacheConfig.SnapshotLimit > 0 {
+			diskRoot = rawdb.ReadSnapshotRoot(bc.db)
+		}
+		if diskRoot != (common.Hash{}) {
+			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
+
+			snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot)
+			if err != nil {
+				return nil, err
+			}
+			// Chain rewound, persist old snapshot number to indicate recovery procedure
+			if snapDisk != 0 {
+				rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
+			}
+		} else {
+			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
+			if err := bc.SetHead(head.NumberU64()); err != nil {
+				return nil, err
+			}
 		}
 	}
 	// Ensure that a previous crash in SetHead doesn't leave extra ancients
@@ -339,12 +360,25 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 	}
 	// Load any existing snapshot, regenerating it if loading failed
 	if bc.cacheConfig.SnapshotLimit > 0 {
-		bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait)
+		// If the chain was rewound past the snapshot persistent layer (causing
+		// a recovery block number to be persisted to disk), check if we're still
+		// in recovery mode and in that case, don't invalidate the snapshot on a
+		// head mismatch.
+		var recover bool
+
+		head := bc.CurrentBlock()
+		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
+			log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
+			recover = true
+		}
+		bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, recover)
 	}
 	// Take ownership of this particular state
 	go bc.update()
 	if txLookupLimit != nil {
 		bc.txLookupLimit = *txLookupLimit
+
+		bc.wg.Add(1)
 		go bc.maintainTxIndex(txIndexBlock)
 	}
 	// If periodic cache journal is required, spin it up.
@@ -442,9 +476,25 @@ func (bc *BlockChain) loadLastState() error {
 // was fast synced or full synced and in which state, the method will try to
 // delete minimal data from disk whilst retaining chain consistency.
 func (bc *BlockChain) SetHead(head uint64) error {
+	_, err := bc.SetHeadBeyondRoot(head, common.Hash{})
+	return err
+}
+
+// SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition
+// that the rewind must pass the specified state root. This method is meant to be
+// used when rewiding with snapshots enabled to ensure that we go back further than
+// persistent disk layer. Depending on whether the node was fast synced or full, and
+// in which state, the method will try to delete minimal data from disk whilst
+// retaining chain consistency.
+//
+// The method returns the block number where the requested root cap was found.
+func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) {
 	bc.chainmu.Lock()
 	defer bc.chainmu.Unlock()
 
+	// Track the block number of the requested root hash
+	var rootNumber uint64 // (no root == always 0)
+
 	// Retrieve the last pivot block to short circuit rollbacks beyond it and the
 	// current freezer limit to start nuking id underflown
 	pivot := rawdb.ReadLastPivotNumber(bc.db)
@@ -460,8 +510,16 @@ func (bc *BlockChain) SetHead(head uint64) error {
 				log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
 				newHeadBlock = bc.genesisBlock
 			} else {
-				// Block exists, keep rewinding until we find one with state
+				// Block exists, keep rewinding until we find one with state,
+				// keeping rewinding until we exceed the optional threshold
+				// root hash
+				beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
+
 				for {
+					// If a root threshold was requested but not yet crossed, check
+					if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
+						beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
+					}
 					if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
 						log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
 						if pivot == nil || newHeadBlock.NumberU64() > *pivot {
@@ -472,8 +530,12 @@ func (bc *BlockChain) SetHead(head uint64) error {
 							newHeadBlock = bc.genesisBlock
 						}
 					}
-					log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
-					break
+					if beyondRoot || newHeadBlock.NumberU64() == 0 {
+						log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+						break
+					}
+					log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
+					newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
 				}
 			}
 			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
@@ -553,7 +615,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
 	bc.txLookupCache.Purge()
 	bc.futureBlocks.Purge()
 
-	return bc.loadLastState()
+	return rootNumber, bc.loadLastState()
 }
 
 // FastSyncCommitHead sets the current head block to the one defined by the hash
@@ -938,8 +1000,14 @@ func (bc *BlockChain) Stop() {
 	var snapBase common.Hash
 	if bc.snaps != nil {
 		var err error
-		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
-			log.Error("Failed to journal state snapshot", "err", err)
+		if bc.writeLegacyJournal {
+			if snapBase, err = bc.snaps.LegacyJournal(bc.CurrentBlock().Root()); err != nil {
+				log.Error("Failed to journal state snapshot", "err", err)
+			}
+		} else {
+			if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
+				log.Error("Failed to journal state snapshot", "err", err)
+			}
 		}
 	}
 	// Ensure the state of a recent block is also stored to disk before exiting.
@@ -2096,6 +2164,20 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
 			}
 			return ret
 		}
+		// mergeLogs returns a merged log slice with specified sort order.
+		mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
+			var ret []*types.Log
+			if reverse {
+				for i := len(logs) - 1; i >= 0; i-- {
+					ret = append(ret, logs[i]...)
+				}
+			} else {
+				for i := 0; i < len(logs); i++ {
+					ret = append(ret, logs[i]...)
+				}
+			}
+			return ret
+		}
 	)
 	// Reduce the longer chain to the same number as the shorter one
 	if oldBlock.NumberU64() > newBlock.NumberU64() {
@@ -2230,6 +2312,8 @@ func (bc *BlockChain) update() {
 // sync, Geth will automatically construct the missing indices and delete
 // the extra indices.
 func (bc *BlockChain) maintainTxIndex(ancients uint64) {
+	defer bc.wg.Done()
+
 	// Before starting the actual maintenance, we need to handle a special case,
 	// where user might init Geth with an external ancient database. If so, we
 	// need to reindex all necessary transactions before starting to process any
@@ -2239,7 +2323,7 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) {
 		if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit {
 			from = ancients - bc.txLookupLimit
 		}
-		rawdb.IndexTransactions(bc.db, from, ancients)
+		rawdb.IndexTransactions(bc.db, from, ancients, bc.quit)
 	}
 	// indexBlocks reindexes or unindexes transactions depending on user configuration
 	indexBlocks := func(tail *uint64, head uint64, done chan struct{}) {
@@ -2253,24 +2337,24 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) {
 				rawdb.WriteTxIndexTail(bc.db, 0)
 			} else {
 				// Prune all stale tx indices and record the tx index tail
-				rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1)
+				rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1, bc.quit)
 			}
 			return
 		}
 		// If a previous indexing existed, make sure that we fill in any missing entries
 		if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
 			if *tail > 0 {
-				rawdb.IndexTransactions(bc.db, 0, *tail)
+				rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit)
 			}
 			return
 		}
 		// Update the transaction index to the new chain state
 		if head-bc.txLookupLimit+1 < *tail {
 			// Reindex a part of missing indices and rewind index tail to HEAD-limit
-			rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail)
+			rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit)
 		} else {
 			// Unindex a part of stale indices and forward index tail to HEAD-limit
-			rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1)
+			rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit)
 		}
 	}
 	// Any reindexing done, start listening to chain events and moving the index window
@@ -2294,6 +2378,10 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) {
 		case <-done:
 			done = nil
 		case <-bc.quit:
+			if done != nil {
+				log.Info("Waiting background transaction indexer to exit")
+				<-done
+			}
 			return
 		}
 	}
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
index 5685b0a4bdd96b95af6f589ba998e20cb9ea57e6..cb8473c08426cde44f6336dcb18b3677e49bb0aa 100644
--- a/core/blockchain_insert.go
+++ b/core/blockchain_insert.go
@@ -43,7 +43,7 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
 	// Fetch the timings for the batch
 	var (
 		now     = mclock.Now()
-		elapsed = time.Duration(now) - time.Duration(st.startTime)
+		elapsed = now.Sub(st.startTime)
 	)
 	// If we're at the last block of the batch or report period reached, log
 	if index == len(chain)-1 || elapsed >= statsReportLimit {
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index 27903dd06b354ea324c051ff561e9fc1dbb596eb..b5cd232a9c4f2a64324100c1123827e4895bee4c 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -25,6 +25,7 @@ import (
 	"math/big"
 	"os"
 	"testing"
+	"time"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/consensus/ethash"
@@ -38,7 +39,10 @@ import (
 // committed to disk and then the process crashed. In this case we expect the full
 // chain to be rolled back to the committed block, but the chain data itself left
 // in the database for replaying.
-func TestShortRepair(t *testing.T) {
+func TestShortRepair(t *testing.T)              { testShortRepair(t, false) }
+func TestShortRepairWithSnapshots(t *testing.T) { testShortRepair(t, true) }
+
+func testShortRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -68,14 +72,17 @@ func TestShortRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain where the fast sync pivot point was
 // already committed, after which the process crashed. In this case we expect the full
 // chain to be rolled back to the committed block, but the chain data itself left in
 // the database for replaying.
-func TestShortFastSyncedRepair(t *testing.T) {
+func TestShortFastSyncedRepair(t *testing.T)              { testShortFastSyncedRepair(t, false) }
+func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) }
+
+func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -105,14 +112,17 @@ func TestShortFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain where the fast sync pivot point was
 // not yet committed, but the process crashed. In this case we expect the chain to
 // detect that it was fast syncing and not delete anything, since we can just pick
 // up directly where we left off.
-func TestShortFastSyncingRepair(t *testing.T) {
+func TestShortFastSyncingRepair(t *testing.T)              { testShortFastSyncingRepair(t, false) }
+func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) }
+
+func testShortFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -142,7 +152,7 @@ func TestShortFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where a
@@ -150,7 +160,10 @@ func TestShortFastSyncingRepair(t *testing.T) {
 // test scenario the side chain is below the committed block. In this case we expect
 // the canonical chain to be rolled back to the committed block, but the chain data
 // itself left in the database for replaying.
-func TestShortOldForkedRepair(t *testing.T) {
+func TestShortOldForkedRepair(t *testing.T)              { testShortOldForkedRepair(t, false) }
+func TestShortOldForkedRepairWithSnapshots(t *testing.T) { testShortOldForkedRepair(t, true) }
+
+func testShortOldForkedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -182,7 +195,7 @@ func TestShortOldForkedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -191,6 +204,13 @@ func TestShortOldForkedRepair(t *testing.T) {
 // this case we expect the canonical chain to be rolled back to the committed block,
 // but the chain data itself left in the database for replaying.
 func TestShortOldForkedFastSyncedRepair(t *testing.T) {
+	testShortOldForkedFastSyncedRepair(t, false)
+}
+func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncedRepair(t, true)
+}
+
+func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -222,7 +242,7 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -231,6 +251,13 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) {
 // the chain to detect that it was fast syncing and not delete anything, since we
 // can just pick up directly where we left off.
 func TestShortOldForkedFastSyncingRepair(t *testing.T) {
+	testShortOldForkedFastSyncingRepair(t, false)
+}
+func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncingRepair(t, true)
+}
+
+func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -262,7 +289,7 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where a
@@ -270,7 +297,10 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) {
 // test scenario the side chain reaches above the committed block. In this case we
 // expect the canonical chain to be rolled back to the committed block, but the
 // chain data itself left in the database for replaying.
-func TestShortNewlyForkedRepair(t *testing.T) {
+func TestShortNewlyForkedRepair(t *testing.T)              { testShortNewlyForkedRepair(t, false) }
+func TestShortNewlyForkedRepairWithSnapshots(t *testing.T) { testShortNewlyForkedRepair(t, true) }
+
+func testShortNewlyForkedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6
@@ -302,7 +332,7 @@ func TestShortNewlyForkedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -311,6 +341,13 @@ func TestShortNewlyForkedRepair(t *testing.T) {
 // In this case we expect the canonical chain to be rolled back to the committed
 // block, but the chain data itself left in the database for replaying.
 func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
+	testShortNewlyForkedFastSyncedRepair(t, false)
+}
+func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncedRepair(t, true)
+}
+
+func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6
@@ -342,7 +379,7 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -351,6 +388,13 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
 // case we expect the chain to detect that it was fast syncing and not delete
 // anything, since we can just pick up directly where we left off.
 func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
+	testShortNewlyForkedFastSyncingRepair(t, false)
+}
+func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncingRepair(t, true)
+}
+
+func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6
@@ -382,14 +426,17 @@ func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a longer side chain, where a
 // recent block was already committed to disk and then the process crashed. In this
 // case we expect the canonical chain to be rolled back to the committed block, but
 // the chain data itself left in the database for replaying.
-func TestShortReorgedRepair(t *testing.T) {
+func TestShortReorgedRepair(t *testing.T)              { testShortReorgedRepair(t, false) }
+func TestShortReorgedRepairWithSnapshots(t *testing.T) { testShortReorgedRepair(t, true) }
+
+func testShortReorgedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -421,7 +468,7 @@ func TestShortReorgedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a longer side chain, where
@@ -429,6 +476,13 @@ func TestShortReorgedRepair(t *testing.T) {
 // crashed. In this case we expect the canonical chain to be rolled back to the
 // committed block, but the chain data itself left in the database for replaying.
 func TestShortReorgedFastSyncedRepair(t *testing.T) {
+	testShortReorgedFastSyncedRepair(t, false)
+}
+func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncedRepair(t, true)
+}
+
+func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -460,7 +514,7 @@ func TestShortReorgedFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a longer side chain, where
@@ -468,6 +522,13 @@ func TestShortReorgedFastSyncedRepair(t *testing.T) {
 // this case we expect the chain to detect that it was fast syncing and not delete
 // anything, since we can just pick up directly where we left off.
 func TestShortReorgedFastSyncingRepair(t *testing.T) {
+	testShortReorgedFastSyncingRepair(t, false)
+}
+func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncingRepair(t, true)
+}
+
+func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -499,14 +560,17 @@ func TestShortReorgedFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where a recent
 // block - newer than the ancient limit - was already committed to disk and then
 // the process crashed. In this case we expect the chain to be rolled back to the
 // committed block, with everything afterwads kept as fast sync data.
-func TestLongShallowRepair(t *testing.T) {
+func TestLongShallowRepair(t *testing.T)              { testLongShallowRepair(t, false) }
+func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) }
+
+func testLongShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -541,14 +605,17 @@ func TestLongShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where a recent
 // block - older than the ancient limit - was already committed to disk and then
 // the process crashed. In this case we expect the chain to be rolled back to the
 // committed block, with everything afterwads deleted.
-func TestLongDeepRepair(t *testing.T) {
+func TestLongDeepRepair(t *testing.T)              { testLongDeepRepair(t, false) }
+func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) }
+
+func testLongDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -582,7 +649,7 @@ func TestLongDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -590,6 +657,13 @@ func TestLongDeepRepair(t *testing.T) {
 // which the process crashed. In this case we expect the chain to be rolled back
 // to the committed block, with everything afterwads kept as fast sync data.
 func TestLongFastSyncedShallowRepair(t *testing.T) {
+	testLongFastSyncedShallowRepair(t, false)
+}
+func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongFastSyncedShallowRepair(t, true)
+}
+
+func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -624,14 +698,17 @@ func TestLongFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
 // sync pivot point - older than the ancient limit - was already committed, after
 // which the process crashed. In this case we expect the chain to be rolled back
 // to the committed block, with everything afterwads deleted.
-func TestLongFastSyncedDeepRepair(t *testing.T) {
+func TestLongFastSyncedDeepRepair(t *testing.T)              { testLongFastSyncedDeepRepair(t, false) }
+func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) }
+
+func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -665,7 +742,7 @@ func TestLongFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -674,6 +751,13 @@ func TestLongFastSyncedDeepRepair(t *testing.T) {
 // syncing and not delete anything, since we can just pick up directly where we
 // left off.
 func TestLongFastSyncingShallowRepair(t *testing.T) {
+	testLongFastSyncingShallowRepair(t, false)
+}
+func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongFastSyncingShallowRepair(t, true)
+}
+
+func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -708,7 +792,7 @@ func TestLongFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -716,7 +800,10 @@ func TestLongFastSyncingShallowRepair(t *testing.T) {
 // process crashed. In this case we expect the chain to detect that it was fast
 // syncing and not delete anything, since we can just pick up directly where we
 // left off.
-func TestLongFastSyncingDeepRepair(t *testing.T) {
+func TestLongFastSyncingDeepRepair(t *testing.T)              { testLongFastSyncingDeepRepair(t, false) }
+func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) }
+
+func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -751,7 +838,7 @@ func TestLongFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -761,6 +848,13 @@ func TestLongFastSyncingDeepRepair(t *testing.T) {
 // rolled back to the committed block, with everything afterwads kept as fast
 // sync data; the side chain completely nuked by the freezer.
 func TestLongOldForkedShallowRepair(t *testing.T) {
+	testLongOldForkedShallowRepair(t, false)
+}
+func TestLongOldForkedShallowRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedShallowRepair(t, true)
+}
+
+func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -796,7 +890,7 @@ func TestLongOldForkedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -805,7 +899,10 @@ func TestLongOldForkedShallowRepair(t *testing.T) {
 // chain is below the committed block. In this case we expect the canonical chain
 // to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
-func TestLongOldForkedDeepRepair(t *testing.T) {
+func TestLongOldForkedDeepRepair(t *testing.T)              { testLongOldForkedDeepRepair(t, false) }
+func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) }
+
+func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -840,7 +937,7 @@ func TestLongOldForkedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -850,6 +947,13 @@ func TestLongOldForkedDeepRepair(t *testing.T) {
 // to be rolled back to the committed block, with everything afterwads kept as
 // fast sync data; the side chain completely nuked by the freezer.
 func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
+	testLongOldForkedFastSyncedShallowRepair(t, false)
+}
+func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedShallowRepair(t, true)
+}
+
+func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -885,7 +989,7 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -895,6 +999,13 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
 // chain to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
 func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
+	testLongOldForkedFastSyncedDeepRepair(t, false)
+}
+func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedDeepRepair(t, true)
+}
+
+func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -929,7 +1040,7 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -939,6 +1050,13 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
+	testLongOldForkedFastSyncingShallowRepair(t, false)
+}
+func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingShallowRepair(t, true)
+}
+
+func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -974,7 +1092,7 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -984,6 +1102,13 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
+	testLongOldForkedFastSyncingDeepRepair(t, false)
+}
+func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingDeepRepair(t, true)
+}
+
+func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1019,7 +1144,7 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1029,6 +1154,13 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
 // rolled back to the committed block, with everything afterwads kept as fast
 // sync data; the side chain completely nuked by the freezer.
 func TestLongNewerForkedShallowRepair(t *testing.T) {
+	testLongNewerForkedShallowRepair(t, false)
+}
+func TestLongNewerForkedShallowRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedShallowRepair(t, true)
+}
+
+func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1064,7 +1196,7 @@ func TestLongNewerForkedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1073,7 +1205,10 @@ func TestLongNewerForkedShallowRepair(t *testing.T) {
 // chain is above the committed block. In this case we expect the canonical chain
 // to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
-func TestLongNewerForkedDeepRepair(t *testing.T) {
+func TestLongNewerForkedDeepRepair(t *testing.T)              { testLongNewerForkedDeepRepair(t, false) }
+func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) }
+
+func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1108,7 +1243,7 @@ func TestLongNewerForkedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1118,6 +1253,13 @@ func TestLongNewerForkedDeepRepair(t *testing.T) {
 // to be rolled back to the committed block, with everything afterwads kept as fast
 // sync data; the side chain completely nuked by the freezer.
 func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowRepair(t, false)
+}
+func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1153,7 +1295,7 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1163,6 +1305,13 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
 // chain to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
 func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepRepair(t, false)
+}
+func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1197,7 +1346,7 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1207,6 +1356,13 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowRepair(t, false)
+}
+func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1242,7 +1398,7 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1252,6 +1408,13 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepRepair(t, false)
+}
+func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1287,7 +1450,7 @@ func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer side
@@ -1295,7 +1458,10 @@ func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
 // to disk and then the process crashed. In this case we expect the chain to be
 // rolled back to the committed block, with everything afterwads kept as fast sync
 // data. The side chain completely nuked by the freezer.
-func TestLongReorgedShallowRepair(t *testing.T) {
+func TestLongReorgedShallowRepair(t *testing.T)              { testLongReorgedShallowRepair(t, false) }
+func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) }
+
+func testLongReorgedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1331,7 +1497,7 @@ func TestLongReorgedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer side
@@ -1339,7 +1505,10 @@ func TestLongReorgedShallowRepair(t *testing.T) {
 // to disk and then the process crashed. In this case we expect the canonical chains
 // to be rolled back to the committed block, with everything afterwads deleted. The
 // side chain completely nuked by the freezer.
-func TestLongReorgedDeepRepair(t *testing.T) {
+func TestLongReorgedDeepRepair(t *testing.T)              { testLongReorgedDeepRepair(t, false) }
+func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) }
+
+func testLongReorgedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1374,7 +1543,7 @@ func TestLongReorgedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1384,6 +1553,13 @@ func TestLongReorgedDeepRepair(t *testing.T) {
 // afterwads kept as fast sync data. The side chain completely nuked by the
 // freezer.
 func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
+	testLongReorgedFastSyncedShallowRepair(t, false)
+}
+func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedShallowRepair(t, true)
+}
+
+func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1419,7 +1595,7 @@ func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1428,6 +1604,13 @@ func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
 // expect the canonical chains to be rolled back to the committed block, with
 // everything afterwads deleted. The side chain completely nuked by the freezer.
 func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
+	testLongReorgedFastSyncedDeepRepair(t, false)
+}
+func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedDeepRepair(t, true)
+}
+
+func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1462,7 +1645,7 @@ func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1471,6 +1654,13 @@ func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
 // chain to detect that it was fast syncing and not delete anything, since we
 // can just pick up directly where we left off.
 func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
+	testLongReorgedFastSyncingShallowRepair(t, false)
+}
+func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingShallowRepair(t, true)
+}
+
+func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1506,7 +1696,7 @@ func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1515,6 +1705,13 @@ func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
 // chain to detect that it was fast syncing and not delete anything, since we
 // can just pick up directly where we left off.
 func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
+	testLongReorgedFastSyncingDeepRepair(t, false)
+}
+func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingDeepRepair(t, true)
+}
+
+func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1550,13 +1747,13 @@ func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
-func testRepair(t *testing.T, tt *rewindTest) {
+func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
 	// It's hard to follow the test case, visualize the input
 	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
-	//fmt.Println(tt.dump(true))
+	// fmt.Println(tt.dump(true))
 
 	// Create a temporary persistent database
 	datadir, err := ioutil.TempDir("", "")
@@ -1575,8 +1772,18 @@ func testRepair(t *testing.T, tt *rewindTest) {
 	var (
 		genesis = new(Genesis).MustCommit(db)
 		engine  = ethash.NewFullFaker()
+		config  = &CacheConfig{
+			TrieCleanLimit: 256,
+			TrieDirtyLimit: 256,
+			TrieTimeLimit:  5 * time.Minute,
+			SnapshotLimit:  0, // Disable snapshot by default
+		}
 	)
-	chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if snapshots {
+		config.SnapshotLimit = 256
+		config.SnapshotWait = true
+	}
+	chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
 	if err != nil {
 		t.Fatalf("Failed to create chain: %v", err)
 	}
@@ -1599,6 +1806,11 @@ func testRepair(t *testing.T, tt *rewindTest) {
 	}
 	if tt.commitBlock > 0 {
 		chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+		if snapshots {
+			if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+				t.Fatalf("Failed to flatten snapshots: %v", err)
+			}
+		}
 	}
 	if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
 		t.Fatalf("Failed to import canonical chain tail: %v", err)
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index dc1368ff4b488e1777dff60a9923cf3345867051..45c4073eb4ce6e1e10301b55d09ab9819bf84637 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -26,6 +26,7 @@ import (
 	"os"
 	"strings"
 	"testing"
+	"time"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/consensus/ethash"
@@ -150,7 +151,10 @@ func (tt *rewindTest) dump(crash bool) string {
 // chain to be rolled back to the committed block. Everything above the sethead
 // point should be deleted. In between the committed block and the requested head
 // the data can remain as "fast sync" data to avoid redownloading it.
-func TestShortSetHead(t *testing.T) {
+func TestShortSetHead(t *testing.T)              { testShortSetHead(t, false) }
+func TestShortSetHeadWithSnapshots(t *testing.T) { testShortSetHead(t, true) }
+
+func testShortSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -181,7 +185,7 @@ func TestShortSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain where the fast sync pivot point was
@@ -190,7 +194,10 @@ func TestShortSetHead(t *testing.T) {
 // Everything above the sethead point should be deleted. In between the committed
 // block and the requested head the data can remain as "fast sync" data to avoid
 // redownloading it.
-func TestShortFastSyncedSetHead(t *testing.T) {
+func TestShortFastSyncedSetHead(t *testing.T)              { testShortFastSyncedSetHead(t, false) }
+func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) }
+
+func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -221,7 +228,7 @@ func TestShortFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain where the fast sync pivot point was
@@ -229,7 +236,10 @@ func TestShortFastSyncedSetHead(t *testing.T) {
 // detect that it was fast syncing and delete everything from the new head, since
 // we can just pick up fast syncing from there. The head full block should be set
 // to the genesis.
-func TestShortFastSyncingSetHead(t *testing.T) {
+func TestShortFastSyncingSetHead(t *testing.T)              { testShortFastSyncingSetHead(t, false) }
+func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) }
+
+func testShortFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -260,7 +270,7 @@ func TestShortFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where a
@@ -270,7 +280,10 @@ func TestShortFastSyncingSetHead(t *testing.T) {
 // above the sethead point should be deleted. In between the committed block and
 // the requested head the data can remain as "fast sync" data to avoid redownloading
 // it. The side chain should be left alone as it was shorter.
-func TestShortOldForkedSetHead(t *testing.T) {
+func TestShortOldForkedSetHead(t *testing.T)              { testShortOldForkedSetHead(t, false) }
+func TestShortOldForkedSetHeadWithSnapshots(t *testing.T) { testShortOldForkedSetHead(t, true) }
+
+func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -303,7 +316,7 @@ func TestShortOldForkedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -314,6 +327,13 @@ func TestShortOldForkedSetHead(t *testing.T) {
 // committed block and the requested head the data can remain as "fast sync" data
 // to avoid redownloading it. The side chain should be left alone as it was shorter.
 func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
+	testShortOldForkedFastSyncedSetHead(t, false)
+}
+func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncedSetHead(t, true)
+}
+
+func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -346,7 +366,7 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -356,6 +376,13 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
 // head, since we can just pick up fast syncing from there. The head full block
 // should be set to the genesis.
 func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
+	testShortOldForkedFastSyncingSetHead(t, false)
+}
+func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncingSetHead(t, true)
+}
+
+func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -388,7 +415,7 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where a
@@ -402,7 +429,10 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
 // The side chain could be left to be if the fork point was before the new head
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortNewlyForkedSetHead(t *testing.T) {
+func TestShortNewlyForkedSetHead(t *testing.T)              { testShortNewlyForkedSetHead(t, false) }
+func TestShortNewlyForkedSetHeadWithSnapshots(t *testing.T) { testShortNewlyForkedSetHead(t, true) }
+
+func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
@@ -435,7 +465,7 @@ func TestShortNewlyForkedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -449,6 +479,13 @@ func TestShortNewlyForkedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
+	testShortNewlyForkedFastSyncedSetHead(t, false)
+}
+func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncedSetHead(t, true)
+}
+
+func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
@@ -481,7 +518,7 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -495,6 +532,13 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
+	testShortNewlyForkedFastSyncingSetHead(t, false)
+}
+func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncingSetHead(t, true)
+}
+
+func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
@@ -527,7 +571,7 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a longer side chain, where a
@@ -540,7 +584,10 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
 // The side chain could be left to be if the fork point was before the new head
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortReorgedSetHead(t *testing.T) {
+func TestShortReorgedSetHead(t *testing.T)              { testShortReorgedSetHead(t, false) }
+func TestShortReorgedSetHeadWithSnapshots(t *testing.T) { testShortReorgedSetHead(t, true) }
+
+func testShortReorgedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -573,7 +620,7 @@ func TestShortReorgedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a longer side chain, where
@@ -588,6 +635,13 @@ func TestShortReorgedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortReorgedFastSyncedSetHead(t *testing.T) {
+	testShortReorgedFastSyncedSetHead(t, false)
+}
+func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncedSetHead(t, true)
+}
+
+func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -620,7 +674,7 @@ func TestShortReorgedFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a longer side chain, where
@@ -633,6 +687,13 @@ func TestShortReorgedFastSyncedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortReorgedFastSyncingSetHead(t *testing.T) {
+	testShortReorgedFastSyncingSetHead(t, false)
+}
+func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncingSetHead(t, true)
+}
+
+func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -665,7 +726,7 @@ func TestShortReorgedFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where a recent
@@ -674,7 +735,10 @@ func TestShortReorgedFastSyncingSetHead(t *testing.T) {
 // to the committed block. Everything above the sethead point should be deleted.
 // In between the committed block and the requested head the data can remain as
 // "fast sync" data to avoid redownloading it.
-func TestLongShallowSetHead(t *testing.T) {
+func TestLongShallowSetHead(t *testing.T)              { testLongShallowSetHead(t, false) }
+func TestLongShallowSetHeadWithSnapshots(t *testing.T) { testLongShallowSetHead(t, true) }
+
+func testLongShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -710,7 +774,7 @@ func TestLongShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where a recent
@@ -718,7 +782,10 @@ func TestLongShallowSetHead(t *testing.T) {
 // sethead was called. In this case we expect the full chain to be rolled back
 // to the committed block. Since the ancient limit was underflown, everything
 // needs to be deleted onwards to avoid creating a gap.
-func TestLongDeepSetHead(t *testing.T) {
+func TestLongDeepSetHead(t *testing.T)              { testLongDeepSetHead(t, false) }
+func TestLongDeepSetHeadWithSnapshots(t *testing.T) { testLongDeepSetHead(t, true) }
+
+func testLongDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -753,7 +820,7 @@ func TestLongDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -763,6 +830,13 @@ func TestLongDeepSetHead(t *testing.T) {
 // deleted. In between the committed block and the requested head the data can
 // remain as "fast sync" data to avoid redownloading it.
 func TestLongFastSyncedShallowSetHead(t *testing.T) {
+	testLongFastSyncedShallowSetHead(t, false)
+}
+func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongFastSyncedShallowSetHead(t, true)
+}
+
+func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -798,7 +872,7 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -806,7 +880,10 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) {
 // which sethead was called. In this case we expect the full chain to be rolled
 // back to the committed block. Since the ancient limit was underflown, everything
 // needs to be deleted onwards to avoid creating a gap.
-func TestLongFastSyncedDeepSetHead(t *testing.T) {
+func TestLongFastSyncedDeepSetHead(t *testing.T)              { testLongFastSyncedDeepSetHead(t, false) }
+func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) }
+
+func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -841,7 +918,7 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -850,6 +927,13 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) {
 // syncing and delete everything from the new head, since we can just pick up fast
 // syncing from there.
 func TestLongFastSyncingShallowSetHead(t *testing.T) {
+	testLongFastSyncingShallowSetHead(t, false)
+}
+func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongFastSyncingShallowSetHead(t, true)
+}
+
+func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -885,7 +969,7 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -894,6 +978,13 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) {
 // syncing and delete everything from the new head, since we can just pick up fast
 // syncing from there.
 func TestLongFastSyncingDeepSetHead(t *testing.T) {
+	testLongFastSyncingDeepSetHead(t, false)
+}
+func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongFastSyncingDeepSetHead(t, true)
+}
+
+func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -928,7 +1019,7 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter side
@@ -939,6 +1030,13 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) {
 // can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
 // by the freezer.
 func TestLongOldForkedShallowSetHead(t *testing.T) {
+	testLongOldForkedShallowSetHead(t, false)
+}
+func TestLongOldForkedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedShallowSetHead(t, true)
+}
+
+func testLongOldForkedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -975,7 +1073,7 @@ func TestLongOldForkedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter side
@@ -984,7 +1082,10 @@ func TestLongOldForkedShallowSetHead(t *testing.T) {
 // chain to be rolled back to the committed block. Since the ancient limit was
 // underflown, everything needs to be deleted onwards to avoid creating a gap. The
 // side chain is nuked by the freezer.
-func TestLongOldForkedDeepSetHead(t *testing.T) {
+func TestLongOldForkedDeepSetHead(t *testing.T)              { testLongOldForkedDeepSetHead(t, false) }
+func TestLongOldForkedDeepSetHeadWithSnapshots(t *testing.T) { testLongOldForkedDeepSetHead(t, true) }
+
+func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1020,7 +1121,7 @@ func TestLongOldForkedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1032,6 +1133,13 @@ func TestLongOldForkedDeepSetHead(t *testing.T) {
 // requested head the data can remain as "fast sync" data to avoid redownloading
 // it. The side chain is nuked by the freezer.
 func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
+	testLongOldForkedFastSyncedShallowSetHead(t, false)
+}
+func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -1068,7 +1176,7 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1079,6 +1187,13 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
 // underflown, everything needs to be deleted onwards to avoid creating a gap. The
 // side chain is nuked by the freezer.
 func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
+	testLongOldForkedFastSyncedDeepSetHead(t, false)
+}
+func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1114,7 +1229,7 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1125,6 +1240,13 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
 // just pick up fast syncing from there. The side chain is completely nuked by the
 // freezer.
 func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
+	testLongOldForkedFastSyncingShallowSetHead(t, false)
+}
+func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -1161,7 +1283,7 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1172,6 +1294,13 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
 // just pick up fast syncing from there. The side chain is completely nuked by the
 // freezer.
 func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
+	testLongOldForkedFastSyncingDeepSetHead(t, false)
+}
+func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1207,7 +1336,7 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1216,6 +1345,13 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongShallowSetHead.
 func TestLongNewerForkedShallowSetHead(t *testing.T) {
+	testLongNewerForkedShallowSetHead(t, false)
+}
+func TestLongNewerForkedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedShallowSetHead(t, true)
+}
+
+func testLongNewerForkedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1252,7 +1388,7 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1261,6 +1397,13 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongDeepSetHead.
 func TestLongNewerForkedDeepSetHead(t *testing.T) {
+	testLongNewerForkedDeepSetHead(t, false)
+}
+func TestLongNewerForkedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedDeepSetHead(t, true)
+}
+
+func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1296,7 +1439,7 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1305,6 +1448,13 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) {
 // the side chain is above the committed block. In this case the freezer will delete
 // the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
 func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1341,7 +1491,7 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1350,6 +1500,13 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
 // the side chain is above the committed block. In this case the freezer will delete
 // the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
 func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1385,7 +1542,7 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1394,6 +1551,13 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
 func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1430,7 +1594,7 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1439,6 +1603,13 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
 func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1474,14 +1645,17 @@ func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer side
 // chain, where a recent block - newer than the ancient limit - was already committed
 // to disk and then sethead was called. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongShallowSetHead.
-func TestLongReorgedShallowSetHead(t *testing.T) {
+func TestLongReorgedShallowSetHead(t *testing.T)              { testLongReorgedShallowSetHead(t, false) }
+func TestLongReorgedShallowSetHeadWithSnapshots(t *testing.T) { testLongReorgedShallowSetHead(t, true) }
+
+func testLongReorgedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1518,14 +1692,17 @@ func TestLongReorgedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer side
 // chain, where a recent block - older than the ancient limit - was already committed
 // to disk and then sethead was called. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongDeepSetHead.
-func TestLongReorgedDeepSetHead(t *testing.T) {
+func TestLongReorgedDeepSetHead(t *testing.T)              { testLongReorgedDeepSetHead(t, false) }
+func TestLongReorgedDeepSetHeadWithSnapshots(t *testing.T) { testLongReorgedDeepSetHead(t, true) }
+
+func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1561,7 +1738,7 @@ func TestLongReorgedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1570,6 +1747,13 @@ func TestLongReorgedDeepSetHead(t *testing.T) {
 // freezer will delete the sidechain since it's dangling, reverting to
 // TestLongFastSyncedShallowSetHead.
 func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
+	testLongReorgedFastSyncedShallowSetHead(t, false)
+}
+func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1606,7 +1790,7 @@ func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1615,6 +1799,13 @@ func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
 // freezer will delete the sidechain since it's dangling, reverting to
 // TestLongFastSyncedDeepSetHead.
 func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
+	testLongReorgedFastSyncedDeepSetHead(t, false)
+}
+func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1650,7 +1841,7 @@ func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1660,6 +1851,13 @@ func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
 // head, since we can just pick up fast syncing from there. The side chain is
 // completely nuked by the freezer.
 func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
+	testLongReorgedFastSyncingShallowSetHead(t, false)
+}
+func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1696,7 +1894,7 @@ func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1706,6 +1904,13 @@ func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
 // head, since we can just pick up fast syncing from there. The side chain is
 // completely nuked by the freezer.
 func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
+	testLongReorgedFastSyncingDeepSetHead(t, false)
+}
+func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1741,13 +1946,13 @@ func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
-func testSetHead(t *testing.T, tt *rewindTest) {
+func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
 	// It's hard to follow the test case, visualize the input
-	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
-	//fmt.Println(tt.dump(false))
+	// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+	// fmt.Println(tt.dump(false))
 
 	// Create a temporary persistent database
 	datadir, err := ioutil.TempDir("", "")
@@ -1766,8 +1971,18 @@ func testSetHead(t *testing.T, tt *rewindTest) {
 	var (
 		genesis = new(Genesis).MustCommit(db)
 		engine  = ethash.NewFullFaker()
+		config  = &CacheConfig{
+			TrieCleanLimit: 256,
+			TrieDirtyLimit: 256,
+			TrieTimeLimit:  5 * time.Minute,
+			SnapshotLimit:  0, // Disable snapshot
+		}
 	)
-	chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if snapshots {
+		config.SnapshotLimit = 256
+		config.SnapshotWait = true
+	}
+	chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
 	if err != nil {
 		t.Fatalf("Failed to create chain: %v", err)
 	}
@@ -1790,6 +2005,11 @@ func testSetHead(t *testing.T, tt *rewindTest) {
 	}
 	if tt.commitBlock > 0 {
 		chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+		if snapshots {
+			if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+				t.Fatalf("Failed to flatten snapshots: %v", err)
+			}
+		}
 	}
 	if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
 		t.Fatalf("Failed to import canonical chain tail: %v", err)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e8d3b2470a6e1c60ab2912d775871a2880542f9e
--- /dev/null
+++ b/core/blockchain_snapshot_test.go
@@ -0,0 +1,797 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Tests that abnormal program termination (i.e.crash) and restart can recovery
+// the snapshot properly if the snapshot is enabled.
+
+package core
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/consensus/ethash"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/params"
+)
+
+// snapshotTest is a test case for snapshot recovery. It can be used for
+// simulating these scenarios:
+// (i)   Geth restarts normally with valid legacy snapshot
+// (ii)  Geth restarts normally with valid new-format snapshot
+// (iii) Geth restarts after the crash, with broken legacy snapshot
+// (iv)  Geth restarts after the crash, with broken new-format snapshot
+// (v)   Geth restarts normally, but it's requested to be rewound to a lower point via SetHead
+// (vi)  Geth restarts normally with a stale snapshot
+type snapshotTest struct {
+	legacy       bool   // Flag whether the loaded snapshot is in legacy format
+	crash        bool   // Flag whether the Geth restarts from the previous crash
+	restartCrash int    // Number of blocks to insert after the normal stop, then the crash happens
+	gapped       int    // Number of blocks to insert without enabling snapshot
+	setHead      uint64 // Block number to set head back to
+
+	chainBlocks   int    // Number of blocks to generate for the canonical chain
+	snapshotBlock uint64 // Block number of the relevant snapshot disk layer
+	commitBlock   uint64 // Block number for which to commit the state to disk
+
+	expCanonicalBlocks int    // Number of canonical blocks expected to remain in the database (excl. genesis)
+	expHeadHeader      uint64 // Block number of the expected head header
+	expHeadFastBlock   uint64 // Block number of the expected head fast sync block
+	expHeadBlock       uint64 // Block number of the expected head full block
+	expSnapshotBottom  uint64 // The block height corresponding to the snapshot disk layer
+}
+
+func (tt *snapshotTest) dump() string {
+	buffer := new(strings.Builder)
+
+	fmt.Fprint(buffer, "Chain:\n  G")
+	for i := 0; i < tt.chainBlocks; i++ {
+		fmt.Fprintf(buffer, "->C%d", i+1)
+	}
+	fmt.Fprint(buffer, " (HEAD)\n\n")
+
+	fmt.Fprintf(buffer, "Commit:   G")
+	if tt.commitBlock > 0 {
+		fmt.Fprintf(buffer, ", C%d", tt.commitBlock)
+	}
+	fmt.Fprint(buffer, "\n")
+
+	fmt.Fprintf(buffer, "Snapshot: G")
+	if tt.snapshotBlock > 0 {
+		fmt.Fprintf(buffer, ", C%d", tt.snapshotBlock)
+	}
+	fmt.Fprint(buffer, "\n")
+
+	if tt.crash {
+		fmt.Fprintf(buffer, "\nCRASH\n\n")
+	} else {
+		fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setHead)
+	}
+	fmt.Fprintf(buffer, "------------------------------\n\n")
+
+	fmt.Fprint(buffer, "Expected in leveldb:\n  G")
+	for i := 0; i < tt.expCanonicalBlocks; i++ {
+		fmt.Fprintf(buffer, "->C%d", i+1)
+	}
+	fmt.Fprintf(buffer, "\n\n")
+	fmt.Fprintf(buffer, "Expected head header    : C%d\n", tt.expHeadHeader)
+	fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock)
+	if tt.expHeadBlock == 0 {
+		fmt.Fprintf(buffer, "Expected head block     : G\n")
+	} else {
+		fmt.Fprintf(buffer, "Expected head block     : C%d\n", tt.expHeadBlock)
+	}
+	if tt.expSnapshotBottom == 0 {
+		fmt.Fprintf(buffer, "Expected snapshot disk  : G\n")
+	} else {
+		fmt.Fprintf(buffer, "Expected snapshot disk  : C%d\n", tt.expSnapshotBottom)
+	}
+	return buffer.String()
+}
+
+// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot
+// journal will be persisted correctly. In this case no snapshot recovery is
+// required.
+func TestRestartWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C8
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              false,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       8,
+		expSnapshotBottom:  0, // Initial disk layer built from genesis
+	})
+}
+
+// Tests a Geth restart with valid but "legacy" snapshot. Before the shutdown,
+// all snapshot journal will be persisted correctly. In this case no snapshot
+// recovery is required.
+func TestRestartWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C8
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              false,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       8,
+		expSnapshotBottom:  0, // Initial disk layer built from genesis
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
+// chain head should be rewound to the point with available state. And also the
+// new head should must be lower than disk layer. But there is no committed point
+// so the chain should be rewound to genesis and the disk layer should be left
+// for recovery.
+func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : C4
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  4, // Last committed disk layer, wait recovery
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
+// chain head should be rewound to the point with available state. And also the
+// new head should must be lower than disk layer. But there is only a low committed
+// point so the chain should be rewound to committed point and the disk layer
+// should be left for recovery.
+func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C2
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C2
+	// Expected snapshot disk  : C4
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        2,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       2,
+		expSnapshotBottom:  4, // Last committed disk layer, wait recovery
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case
+// the chain head should be rewound to the point with available state. And also
+// the new head should must be lower than disk layer. But there is only a high
+// committed point so the chain should be rewound to genesis and the disk layer
+// should be left for recovery.
+func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C6
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : C4
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        6,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  4, // Last committed disk layer, wait recovery
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head. The new head here refers to the
+// genesis because there is no committed point.
+func TestNoCommitCrashWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  0, // Rebuilt snapshot from the latest HEAD(genesis)
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head. The new head here refers to the
+// block-2 because it's committed into the disk.
+func TestLowCommitCrashWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C2
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C2
+	// Expected snapshot disk  : C2
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        2,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       2,
+		expSnapshotBottom:  2, // Rebuilt snapshot from the latest HEAD
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head.
+//
+// The new head here refers to the the genesis, the reason is:
+//   - the state of block-6 is committed into the disk
+//   - the legacy disk layer of block-4 is committed into the disk
+//   - the head is rewound the genesis in order to find an available
+//     state lower than disk layer
+func TestHighCommitCrashWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C6
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        6,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  0, // Rebuilt snapshot from the latest HEAD(genesis)
+	})
+}
+
+// Tests a Geth was running with snapshot enabled. Then restarts without
+// enabling snapshot and after that re-enable the snapshot again. In this
+// case the snapshot should be rebuilt with latest chain head.
+func TestGappedNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+	//
+	// Expected head header    : C10
+	// Expected head fast block: C10
+	// Expected head block     : C10
+	// Expected snapshot disk  : C10
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              false,
+		gapped:             2,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 10,
+		expHeadHeader:      10,
+		expHeadFastBlock:   10,
+		expHeadBlock:       10,
+		expSnapshotBottom:  10, // Rebuilt snapshot from the latest HEAD
+	})
+}
+
+// Tests a Geth was running with leagcy snapshot enabled. Then restarts
+// without enabling snapshot and after that re-enable the snapshot again.
+// In this case the snapshot should be rebuilt with latest chain head.
+func TestGappedLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+	//
+	// Expected head header    : C10
+	// Expected head fast block: C10
+	// Expected head block     : C10
+	// Expected snapshot disk  : C10
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              false,
+		gapped:             2,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 10,
+		expHeadHeader:      10,
+		expHeadFastBlock:   10,
+		expHeadBlock:       10,
+		expSnapshotBottom:  10, // Rebuilt snapshot from the latest HEAD
+	})
+}
+
+// Tests the Geth was running with snapshot enabled and resetHead is applied.
+// In this case the head is rewound to the target(with state available). After
+// that the chain is restarted and the original disk layer is kept.
+func TestSetHeadWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(4)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              false,
+		gapped:             0,
+		setHead:            4,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 4,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+		expSnapshotBottom:  0, // The initial disk layer is built from the genesis
+	})
+}
+
+// Tests the Geth was running with snapshot(legacy-format) enabled and resetHead
+// is applied. In this case the head is rewound to the target(with state available).
+// After that the chain is restarted and the original disk layer is kept.
+func TestSetHeadWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(4)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              false,
+		gapped:             0,
+		setHead:            4,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 4,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+		expSnapshotBottom:  0, // The initial disk layer is built from the genesis
+	})
+}
+
+// Tests the Geth was running with snapshot(legacy-format) enabled and upgrades
+// the disk layer journal(journal generator) to latest format. After that the Geth
+// is restarted from a crash. In this case Geth will find the new-format disk layer
+// journal but with legacy-format diff journal(the new-format is never committed),
+// and the invalid diff journal is expected to be dropped.
+func TestRecoverSnapshotFromCrashWithLegacyDiffJournal(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+	//
+	// Expected head header    : C10
+	// Expected head fast block: C10
+	// Expected head block     : C8
+	// Expected snapshot disk  : C10
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              false,
+		restartCrash:       2,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 10,
+		expHeadHeader:      10,
+		expHeadFastBlock:   10,
+		expHeadBlock:       8,  // The persisted state in the first running
+		expSnapshotBottom:  10, // The persisted disk layer in the second running
+	})
+}
+
+func testSnapshot(t *testing.T, tt *snapshotTest) {
+	// It's hard to follow the test case, visualize the input
+	// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+	// fmt.Println(tt.dump())
+
+	// Create a temporary persistent database
+	datadir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatalf("Failed to create temporary datadir: %v", err)
+	}
+	os.RemoveAll(datadir)
+
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+	if err != nil {
+		t.Fatalf("Failed to create persistent database: %v", err)
+	}
+	defer db.Close() // Might double close, should be fine
+
+	// Initialize a fresh chain
+	var (
+		genesis = new(Genesis).MustCommit(db)
+		engine  = ethash.NewFullFaker()
+		gendb   = rawdb.NewMemoryDatabase()
+
+		// Snapshot is enabled, the first snapshot is created from the Genesis.
+		// The snapshot memory allowance is 256MB, it means no snapshot flush
+		// will happen during the block insertion.
+		cacheConfig = defaultCacheConfig
+	)
+	chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if err != nil {
+		t.Fatalf("Failed to create chain: %v", err)
+	}
+	blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, tt.chainBlocks, func(i int, b *BlockGen) {})
+
+	// Insert the blocks with configured settings.
+	var breakpoints []uint64
+	if tt.commitBlock > tt.snapshotBlock {
+		breakpoints = append(breakpoints, tt.snapshotBlock, tt.commitBlock)
+	} else {
+		breakpoints = append(breakpoints, tt.commitBlock, tt.snapshotBlock)
+	}
+	var startPoint uint64
+	for _, point := range breakpoints {
+		if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil {
+			t.Fatalf("Failed to import canonical chain start: %v", err)
+		}
+		startPoint = point
+
+		if tt.commitBlock > 0 && tt.commitBlock == point {
+			chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
+		}
+		if tt.snapshotBlock > 0 && tt.snapshotBlock == point {
+			if tt.legacy {
+				// Here we commit the snapshot disk root to simulate
+				// committing the legacy snapshot.
+				rawdb.WriteSnapshotRoot(db, blocks[point-1].Root())
+			} else {
+				chain.snaps.Cap(blocks[point-1].Root(), 0)
+				diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
+				if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
+					t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
+				}
+			}
+		}
+	}
+	if _, err := chain.InsertChain(blocks[startPoint:]); err != nil {
+		t.Fatalf("Failed to import canonical chain tail: %v", err)
+	}
+	// Set the flag for writing legacy journal if necessary
+	if tt.legacy {
+		chain.writeLegacyJournal = true
+	}
+	// Pull the plug on the database, simulating a hard crash
+	if tt.crash {
+		db.Close()
+
+		// Start a new blockchain back up and see where the repair leads us
+		db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+		if err != nil {
+			t.Fatalf("Failed to reopen persistent database: %v", err)
+		}
+		defer db.Close()
+
+		// The interesting thing is: instead of start the blockchain after
+		// the crash, we do restart twice here: one after the crash and one
+		// after the normal stop. It's used to ensure the broken snapshot
+		// can be detected all the time.
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		chain.Stop()
+
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	} else if tt.gapped > 0 {
+		// Insert blocks without enabling snapshot if gapping is required.
+		chain.Stop()
+		gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.gapped, func(i int, b *BlockGen) {})
+
+		// Insert a few more blocks without enabling snapshot
+		var cacheConfig = &CacheConfig{
+			TrieCleanLimit: 256,
+			TrieDirtyLimit: 256,
+			TrieTimeLimit:  5 * time.Minute,
+			SnapshotLimit:  0,
+		}
+		chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		chain.InsertChain(gappedBlocks)
+		chain.Stop()
+
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	} else if tt.setHead != 0 {
+		// Rewind the chain if setHead operation is required.
+		chain.SetHead(tt.setHead)
+		chain.Stop()
+
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	} else if tt.restartCrash != 0 {
+		// Firstly, stop the chain properly, with all snapshot journal
+		// and state committed.
+		chain.Stop()
+
+		// Restart chain, forcibly flush the disk layer journal with new format
+		newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.restartCrash, func(i int, b *BlockGen) {})
+		chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		chain.InsertChain(newBlocks)
+		chain.Snapshot().Cap(newBlocks[len(newBlocks)-1].Root(), 0)
+
+		// Simulate the blockchain crash
+		// Don't call chain.Stop here, so that no snapshot
+		// journal and latest state will be committed
+
+		// Restart the chain after the crash
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	} else {
+		chain.Stop()
+
+		// Restart the chain normally
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	}
+
+	// Iterate over all the remaining blocks and ensure there are no gaps
+	verifyNoGaps(t, chain, true, blocks)
+	verifyCutoff(t, chain, true, blocks, tt.expCanonicalBlocks)
+
+	if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+		t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+	}
+	if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+		t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+	}
+	if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+		t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+	}
+	// Check the disk layer, ensure they are matched
+	block := chain.GetBlockByNumber(tt.expSnapshotBottom)
+	if block == nil {
+		t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", tt.expSnapshotBottom)
+	} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
+		t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
+	}
+}
diff --git a/core/genesis.go b/core/genesis.go
index 4525b9c17440839122666ff624b110179c4b5016..0535d7ee3a183188eb77cf2b6e8572cfa6bb9045 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -243,8 +243,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
 		return params.RinkebyChainConfig
 	case ghash == params.GoerliGenesisHash:
 		return params.GoerliChainConfig
-	case ghash == params.YoloV1GenesisHash:
-		return params.YoloV1ChainConfig
+	case ghash == params.YoloV2GenesisHash:
+		return params.YoloV2ChainConfig
 	default:
 		return params.AllEthashProtocolChanges
 	}
@@ -380,10 +380,11 @@ func DefaultGoerliGenesisBlock() *Genesis {
 	}
 }
 
-func DefaultYoloV1GenesisBlock() *Genesis {
+func DefaultYoloV2GenesisBlock() *Genesis {
+	// TODO: Update with yolov2 values + regenerate alloc data
 	return &Genesis{
-		Config:     params.YoloV1ChainConfig,
-		Timestamp:  0x5ed754f1,
+		Config:     params.YoloV2ChainConfig,
+		Timestamp:  0x5f91b932,
 		ExtraData:  hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000008a37866fd3627c9205a37c8685666f32ec07bb1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
 		GasLimit:   0x47b760,
 		Difficulty: big.NewInt(1),
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
index ecd4e65978eeb4857c094392486192f6f0d785fc..5bd48ad5fad523ffc32809f4243d979821bfd815 100644
--- a/core/rawdb/accessors_snapshot.go
+++ b/core/rawdb/accessors_snapshot.go
@@ -17,6 +17,8 @@
 package rawdb
 
 import (
+	"encoding/binary"
+
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
@@ -118,3 +120,58 @@ func DeleteSnapshotJournal(db ethdb.KeyValueWriter) {
 		log.Crit("Failed to remove snapshot journal", "err", err)
 	}
 }
+
+// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
+// the last shutdown.
+func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte {
+	data, _ := db.Get(snapshotGeneratorKey)
+	return data
+}
+
+// WriteSnapshotGenerator stores the serialized snapshot generator to save at
+// shutdown.
+func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) {
+	if err := db.Put(snapshotGeneratorKey, generator); err != nil {
+		log.Crit("Failed to store snapshot generator", "err", err)
+	}
+}
+
+// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
+// the last shutdown
+func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) {
+	if err := db.Delete(snapshotGeneratorKey); err != nil {
+		log.Crit("Failed to remove snapshot generator", "err", err)
+	}
+}
+
+// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
+// snapshot layer.
+func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 {
+	data, _ := db.Get(snapshotRecoveryKey)
+	if len(data) == 0 {
+		return nil
+	}
+	if len(data) != 8 {
+		return nil
+	}
+	number := binary.BigEndian.Uint64(data)
+	return &number
+}
+
+// WriteSnapshotRecoveryNumber stores the block number of the last persisted
+// snapshot layer.
+func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) {
+	var buf [8]byte
+	binary.BigEndian.PutUint64(buf[:], number)
+	if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
+		log.Crit("Failed to store snapshot recovery number", "err", err)
+	}
+}
+
+// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
+// snapshot layer.
+func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) {
+	if err := db.Delete(snapshotRecoveryKey); err != nil {
+		log.Crit("Failed to remove snapshot recovery number", "err", err)
+	}
+}
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
index 3130e922e85a1d9951d022888b5f353008fa68b8..393b72c26c1638a4bd59909f8f401ace2f90b4fd 100644
--- a/core/rawdb/chain_iterator.go
+++ b/core/rawdb/chain_iterator.go
@@ -84,15 +84,17 @@ type blockTxHashes struct {
 }
 
 // iterateTransactions iterates over all transactions in the (canon) block
-// number(s) given, and yields the hashes on a channel
-func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool) (chan *blockTxHashes, chan struct{}) {
+// number(s) given, and yields the hashes on a channel. If there is a signal
+// received from interrupt channel, the iteration will be aborted and result
+// channel will be closed.
+func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool, interrupt chan struct{}) chan *blockTxHashes {
 	// One thread sequentially reads data from db
 	type numberRlp struct {
 		number uint64
 		rlp    rlp.RawValue
 	}
 	if to == from {
-		return nil, nil
+		return nil
 	}
 	threads := to - from
 	if cpus := runtime.NumCPU(); threads > uint64(cpus) {
@@ -101,7 +103,6 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
 	var (
 		rlpCh    = make(chan *numberRlp, threads*2)     // we send raw rlp over this channel
 		hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
-		abortCh  = make(chan struct{})
 	)
 	// lookup runs in one instance
 	lookup := func() {
@@ -115,7 +116,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
 			// Feed the block to the aggregator, or abort on interrupt
 			select {
 			case rlpCh <- &numberRlp{n, data}:
-			case <-abortCh:
+			case <-interrupt:
 				return
 			}
 			if reverse {
@@ -168,7 +169,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
 			// Feed the block to the aggregator, or abort on interrupt
 			select {
 			case hashesCh <- result:
-			case <-abortCh:
+			case <-interrupt:
 				return
 			}
 		}
@@ -177,25 +178,28 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
 	for i := 0; i < int(threads); i++ {
 		go process()
 	}
-	return hashesCh, abortCh
+	return hashesCh
 }
 
-// IndexTransactions creates txlookup indices of the specified block range.
+// indexTransactions creates txlookup indices of the specified block range.
 //
 // This function iterates canonical chain in reverse order, it has one main advantage:
 // We can write tx index tail flag periodically even without the whole indexing
 // procedure is finished. So that we can resume indexing procedure next time quickly.
-func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
 	// short circuit for invalid range
 	if from >= to {
 		return
 	}
 	var (
-		hashesCh, abortCh = iterateTransactions(db, from, to, true)
-		batch             = db.NewBatch()
-		start             = time.Now()
-		logged            = start.Add(-7 * time.Second)
-		//  Since we iterate in reverse, we expect the first number to come
+		hashesCh = iterateTransactions(db, from, to, true, interrupt)
+		batch    = db.NewBatch()
+		start    = time.Now()
+		logged   = start.Add(-7 * time.Second)
+		// Since we iterate in reverse, we expect the first number to come
 		// in to be [to-1]. Therefore, setting lastNum to means that the
 		// prqueue gap-evaluation will work correctly
 		lastNum = to
@@ -203,8 +207,6 @@ func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
 		// for stats reporting
 		blocks, txs = 0, 0
 	)
-	defer close(abortCh)
-
 	for chanDelivery := range hashesCh {
 		// Push the delivery into the queue and process contiguous ranges.
 		// Since we iterate in reverse, so lower numbers have lower prio, and
@@ -215,6 +217,10 @@ func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
 			if _, priority := queue.Peek(); priority != int64(lastNum-1) {
 				break
 			}
+			// For testing
+			if hook != nil && !hook(lastNum-1) {
+				break
+			}
 			// Next block available, pop it off and index it
 			delivery := queue.PopItem().(*blockTxHashes)
 			lastNum = delivery.number
@@ -223,8 +229,7 @@ func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
 			txs += len(delivery.hashes)
 			// If enough data was accumulated in memory or we're at the last block, dump to disk
 			if batch.ValueSize() > ethdb.IdealBatchSize {
-				// Also write the tail there
-				WriteTxIndexTail(batch, lastNum)
+				WriteTxIndexTail(batch, lastNum) // Also write the tail here
 				if err := batch.Write(); err != nil {
 					log.Crit("Failed writing batch to db", "error", err)
 					return
@@ -238,67 +243,122 @@ func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
 			}
 		}
 	}
-	if lastNum < to {
-		WriteTxIndexTail(batch, lastNum)
-		// No need to write the batch if we never entered the loop above...
+	// If there exists uncommitted data, flush them.
+	if batch.ValueSize() > 0 {
+		WriteTxIndexTail(batch, lastNum) // Also write the tail there
 		if err := batch.Write(); err != nil {
 			log.Crit("Failed writing batch to db", "error", err)
 			return
 		}
 	}
-	log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+	select {
+	case <-interrupt:
+		log.Debug("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+	default:
+		log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+	}
 }
 
-// UnindexTransactions removes txlookup indices of the specified block range.
-func UnindexTransactions(db ethdb.Database, from uint64, to uint64) {
+// IndexTransactions creates txlookup indices of the specified block range.
+//
+// This function iterates canonical chain in reverse order, it has one main advantage:
+// We can write tx index tail flag periodically even without the whole indexing
+// procedure is finished. So that we can resume indexing procedure next time quickly.
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) {
+	indexTransactions(db, from, to, interrupt, nil)
+}
+
+// indexTransactionsForTesting is the internal debug version with an additional hook.
+func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
+	indexTransactions(db, from, to, interrupt, hook)
+}
+
+// unindexTransactions removes txlookup indices of the specified block range.
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
 	// short circuit for invalid range
 	if from >= to {
 		return
 	}
-	// Write flag first and then unindex the transaction indices. Some indices
-	// will be left in the database if crash happens but it's fine.
-	WriteTxIndexTail(db, to)
-	// If only one block is unindexed, do it directly
-	//if from+1 == to {
-	//	data := ReadCanonicalBodyRLP(db, uint64(from))
-	//	DeleteTxLookupEntries(db, ReadBlock(db, ReadCanonicalHash(db, from), from))
-	//	log.Info("Unindexed transactions", "blocks", 1, "tail", to)
-	//	return
-	//}
-	// TODO @holiman, add this back (if we want it)
 	var (
-		hashesCh, abortCh = iterateTransactions(db, from, to, false)
-		batch             = db.NewBatch()
-		start             = time.Now()
-		logged            = start.Add(-7 * time.Second)
+		hashesCh = iterateTransactions(db, from, to, false, interrupt)
+		batch    = db.NewBatch()
+		start    = time.Now()
+		logged   = start.Add(-7 * time.Second)
+		// we expect the first number to come in to be [from]. Therefore, setting
+		// nextNum to from means that the prqueue gap-evaluation will work correctly
+		nextNum = from
+		queue   = prque.New(nil)
+		// for stats reporting
+		blocks, txs = 0, 0
 	)
-	defer close(abortCh)
 	// Otherwise spin up the concurrent iterator and unindexer
-	blocks, txs := 0, 0
 	for delivery := range hashesCh {
-		DeleteTxLookupEntries(batch, delivery.hashes)
-		txs += len(delivery.hashes)
-		blocks++
+		// Push the delivery into the queue and process contiguous ranges.
+		queue.Push(delivery, -int64(delivery.number))
+		for !queue.Empty() {
+			// If the next available item is gapped, return
+			if _, priority := queue.Peek(); -priority != int64(nextNum) {
+				break
+			}
+			// For testing
+			if hook != nil && !hook(nextNum) {
+				break
+			}
+			delivery := queue.PopItem().(*blockTxHashes)
+			nextNum = delivery.number + 1
+			DeleteTxLookupEntries(batch, delivery.hashes)
+			txs += len(delivery.hashes)
+			blocks++
 
-		// If enough data was accumulated in memory or we're at the last block, dump to disk
-		// A batch counts the size of deletion as '1', so we need to flush more
-		// often than that.
-		if blocks%1000 == 0 {
-			if err := batch.Write(); err != nil {
-				log.Crit("Failed writing batch to db", "error", err)
-				return
+			// If enough data was accumulated in memory or we're at the last block, dump to disk
+			// A batch counts the size of deletion as '1', so we need to flush more
+			// often than that.
+			if blocks%1000 == 0 {
+				WriteTxIndexTail(batch, nextNum)
+				if err := batch.Write(); err != nil {
+					log.Crit("Failed writing batch to db", "error", err)
+					return
+				}
+				batch.Reset()
+			}
+			// If we've spent too much time already, notify the user of what we're doing
+			if time.Since(logged) > 8*time.Second {
+				log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+				logged = time.Now()
 			}
-			batch.Reset()
 		}
-		// If we've spent too much time already, notify the user of what we're doing
-		if time.Since(logged) > 8*time.Second {
-			log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
-			logged = time.Now()
+	}
+	// Commit the last batch if there exists uncommitted data
+	if batch.ValueSize() > 0 {
+		WriteTxIndexTail(batch, nextNum)
+		if err := batch.Write(); err != nil {
+			log.Crit("Failed writing batch to db", "error", err)
+			return
 		}
 	}
-	if err := batch.Write(); err != nil {
-		log.Crit("Failed writing batch to db", "error", err)
-		return
+	select {
+	case <-interrupt:
+		log.Debug("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
+	default:
+		log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
 	}
-	log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+// UnindexTransactions removes txlookup indices of the specified block range.
+//
+// There is a passed channel, the whole procedure will be interrupted if any
+// signal received.
+func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) {
+	unindexTransactions(db, from, to, interrupt, nil)
+}
+
+// unindexTransactionsForTesting is the internal debug version with an additional hook.
+func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
+	unindexTransactions(db, from, to, interrupt, hook)
 }
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
index c635cd2f12737816a4750c104fb49c33acb0b1d9..90b2639d38cfa1d5c132beb3cb83f6a46fb55293 100644
--- a/core/rawdb/chain_iterator_test.go
+++ b/core/rawdb/chain_iterator_test.go
@@ -20,6 +20,7 @@ import (
 	"math/big"
 	"reflect"
 	"sort"
+	"sync"
 	"testing"
 
 	"github.com/ethereum/go-ethereum/common"
@@ -59,7 +60,7 @@ func TestChainIterator(t *testing.T) {
 	}
 	for i, c := range cases {
 		var numbers []int
-		hashCh, _ := iterateTransactions(chainDb, c.from, c.to, c.reverse)
+		hashCh := iterateTransactions(chainDb, c.from, c.to, c.reverse, nil)
 		if hashCh != nil {
 			for h := range hashCh {
 				numbers = append(numbers, int(h.number))
@@ -80,3 +81,85 @@ func TestChainIterator(t *testing.T) {
 		}
 	}
 }
+
+func TestIndexTransactions(t *testing.T) {
+	// Construct test chain db
+	chainDb := NewMemoryDatabase()
+
+	var block *types.Block
+	var txs []*types.Transaction
+	for i := uint64(0); i <= 10; i++ {
+		if i == 0 {
+			block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block
+		} else {
+			tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
+			txs = append(txs, tx)
+			block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
+		}
+		WriteBlock(chainDb, block)
+		WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
+	}
+	// verify checks whether the tx indices in the range [from, to)
+	// is expected.
+	verify := func(from, to int, exist bool, tail uint64) {
+		for i := from; i < to; i++ {
+			if i == 0 {
+				continue
+			}
+			number := ReadTxLookupEntry(chainDb, txs[i-1].Hash())
+			if exist && number == nil {
+				t.Fatalf("Transaction indice missing")
+			}
+			if !exist && number != nil {
+				t.Fatalf("Transaction indice is not deleted")
+			}
+		}
+		number := ReadTxIndexTail(chainDb)
+		if number == nil || *number != tail {
+			t.Fatalf("Transaction tail mismatch")
+		}
+	}
+	IndexTransactions(chainDb, 5, 11, nil)
+	verify(5, 11, true, 5)
+	verify(0, 5, false, 5)
+
+	IndexTransactions(chainDb, 0, 5, nil)
+	verify(0, 11, true, 0)
+
+	UnindexTransactions(chainDb, 0, 5, nil)
+	verify(5, 11, true, 5)
+	verify(0, 5, false, 5)
+
+	UnindexTransactions(chainDb, 5, 11, nil)
+	verify(0, 11, false, 11)
+
+	// Testing corner cases
+	signal := make(chan struct{})
+	var once sync.Once
+	indexTransactionsForTesting(chainDb, 5, 11, signal, func(n uint64) bool {
+		if n <= 8 {
+			once.Do(func() {
+				close(signal)
+			})
+			return false
+		}
+		return true
+	})
+	verify(9, 11, true, 9)
+	verify(0, 9, false, 9)
+	IndexTransactions(chainDb, 0, 9, nil)
+
+	signal = make(chan struct{})
+	var once2 sync.Once
+	unindexTransactionsForTesting(chainDb, 0, 11, signal, func(n uint64) bool {
+		if n >= 8 {
+			once2.Do(func() {
+				close(signal)
+			})
+			return false
+		}
+		return true
+	})
+	verify(8, 11, true, 8)
+	verify(0, 8, false, 8)
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index e2b093a34a6cc3de3807cbbb1b65a57c8ba7ec60..dbc5025d5d2cdf812133e7a24cb208ec1b480ef5 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -51,6 +51,12 @@ var (
 	// snapshotJournalKey tracks the in-memory diff layers across restarts.
 	snapshotJournalKey = []byte("SnapshotJournal")
 
+	// snapshotGeneratorKey tracks the snapshot generation marker across restarts.
+	snapshotGeneratorKey = []byte("SnapshotGenerator")
+
+	// snapshotRecoveryKey tracks the snapshot recovery marker across restarts.
+	snapshotRecoveryKey = []byte("SnapshotRecovery")
+
 	// txIndexTailKey tracks the oldest block whose transactions have been indexed.
 	txIndexTailKey = []byte("TransactionIndexTail")
 
diff --git a/core/state/access_list.go b/core/state/access_list.go
new file mode 100644
index 0000000000000000000000000000000000000000..4194691345958cf26d488d0d36b0603686755117
--- /dev/null
+++ b/core/state/access_list.go
@@ -0,0 +1,136 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package state
+
+import (
+	"github.com/ethereum/go-ethereum/common"
+)
+
+type accessList struct {
+	addresses map[common.Address]int
+	slots     []map[common.Hash]struct{}
+}
+
+// ContainsAddress returns true if the address is in the access list.
+func (al *accessList) ContainsAddress(address common.Address) bool {
+	_, ok := al.addresses[address]
+	return ok
+}
+
+// Contains checks if a slot within an account is present in the access list, returning
+// separate flags for the presence of the account and the slot respectively.
+func (al *accessList) Contains(address common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
+	idx, ok := al.addresses[address]
+	if !ok {
+		// no such address (and hence zero slots)
+		return false, false
+	}
+	if idx == -1 {
+		// address yes, but no slots
+		return true, false
+	}
+	_, slotPresent = al.slots[idx][slot]
+	return true, slotPresent
+}
+
+// newAccessList creates a new accessList.
+func newAccessList() *accessList {
+	return &accessList{
+		addresses: make(map[common.Address]int),
+	}
+}
+
+// Copy creates an independent copy of an accessList.
+func (a *accessList) Copy() *accessList {
+	cp := newAccessList()
+	for k, v := range a.addresses {
+		cp.addresses[k] = v
+	}
+	cp.slots = make([]map[common.Hash]struct{}, len(a.slots))
+	for i, slotMap := range a.slots {
+		newSlotmap := make(map[common.Hash]struct{}, len(slotMap))
+		for k := range slotMap {
+			newSlotmap[k] = struct{}{}
+		}
+		cp.slots[i] = newSlotmap
+	}
+	return cp
+}
+
+// AddAddress adds an address to the access list, and returns 'true' if the operation
+// caused a change (addr was not previously in the list).
+func (al *accessList) AddAddress(address common.Address) bool {
+	if _, present := al.addresses[address]; present {
+		return false
+	}
+	al.addresses[address] = -1
+	return true
+}
+
+// AddSlot adds the specified (addr, slot) combo to the access list.
+// Return values are:
+// - address added
+// - slot added
+// For any 'true' value returned, a corresponding journal entry must be made.
+func (al *accessList) AddSlot(address common.Address, slot common.Hash) (addrChange bool, slotChange bool) {
+	idx, addrPresent := al.addresses[address]
+	if !addrPresent || idx == -1 {
+		// Address not present, or addr present but no slots there
+		al.addresses[address] = len(al.slots)
+		slotmap := map[common.Hash]struct{}{slot: {}}
+		al.slots = append(al.slots, slotmap)
+		return !addrPresent, true
+	}
+	// There is already an (address,slot) mapping
+	slotmap := al.slots[idx]
+	if _, ok := slotmap[slot]; !ok {
+		slotmap[slot] = struct{}{}
+		// Journal add slot change
+		return false, true
+	}
+	// No changes required
+	return false, false
+}
+
+// DeleteSlot removes an (address, slot)-tuple from the access list.
+// This operation needs to be performed in the same order as the addition happened.
+// This method is meant to be used  by the journal, which maintains ordering of
+// operations.
+func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) {
+	idx, addrOk := al.addresses[address]
+	// There are two ways this can fail
+	if !addrOk {
+		panic("reverting slot change, address not present in list")
+	}
+	slotmap := al.slots[idx]
+	delete(slotmap, slot)
+	// If that was the last (first) slot, remove it
+	// Since additions and rollbacks are always performed in order,
+	// we can delete the item without worrying about screwing up later indices
+	if len(slotmap) == 0 {
+		al.slots = al.slots[:idx]
+		al.addresses[address] = -1
+	}
+}
+
+// DeleteAddress removes an address from the access list. This operation
+// needs to be performed in the same order as the addition happened.
+// This method is meant to be used  by the journal, which maintains ordering of
+// operations.
+func (al *accessList) DeleteAddress(address common.Address) {
+	delete(al.addresses, address)
+}
diff --git a/core/state/journal.go b/core/state/journal.go
index f242dac5afb0b32a24f303ccbea101bbaca56a3d..2070f30875541497ca688906703a60bb44c1f820 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -130,6 +130,14 @@ type (
 	touchChange struct {
 		account *common.Address
 	}
+	// Changes to the access list
+	accessListAddAccountChange struct {
+		address *common.Address
+	}
+	accessListAddSlotChange struct {
+		address *common.Address
+		slot    *common.Hash
+	}
 )
 
 func (ch createObjectChange) revert(s *StateDB) {
@@ -234,3 +242,28 @@ func (ch addPreimageChange) revert(s *StateDB) {
 func (ch addPreimageChange) dirtied() *common.Address {
 	return nil
 }
+
+func (ch accessListAddAccountChange) revert(s *StateDB) {
+	/*
+		One important invariant here, is that whenever a (addr, slot) is added, if the
+		addr is not already present, the add causes two journal entries:
+		- one for the address,
+		- one for the (address,slot)
+		Therefore, when unrolling the change, we can always blindly delete the
+		(addr) at this point, since no storage adds can remain when come upon
+		a single (addr) change.
+	*/
+	s.accessList.DeleteAddress(*ch.address)
+}
+
+func (ch accessListAddAccountChange) dirtied() *common.Address {
+	return nil
+}
+
+func (ch accessListAddSlotChange) revert(s *StateDB) {
+	s.accessList.DeleteSlot(*ch.address, *ch.slot)
+}
+
+func (ch accessListAddSlotChange) dirtied() *common.Address {
+	return nil
+}
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index 8460cd332f9915f6d46d09bf0aa7178fe60ec588..40ff5ade4c37e0832ef7679a9cf5999db1afb1f2 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -28,6 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/ethdb/leveldb"
 	"github.com/ethereum/go-ethereum/ethdb/memorydb"
+	"github.com/ethereum/go-ethereum/rlp"
 )
 
 // reverse reverses the contents of a byte slice. It's used to update random accs
@@ -429,6 +430,81 @@ func TestDiskPartialMerge(t *testing.T) {
 	}
 }
 
+// Tests that when the bottom-most diff layer is merged into the disk
+// layer whether the corresponding generator is persisted correctly.
+func TestDiskGeneratorPersistence(t *testing.T) {
+	var (
+		accOne        = randomHash()
+		accTwo        = randomHash()
+		accOneSlotOne = randomHash()
+		accOneSlotTwo = randomHash()
+
+		accThree     = randomHash()
+		accThreeSlot = randomHash()
+		baseRoot     = randomHash()
+		diffRoot     = randomHash()
+		diffTwoRoot  = randomHash()
+		genMarker    = append(randomHash().Bytes(), randomHash().Bytes()...)
+	)
+	// Testing scenario 1, the disk layer is still under the construction.
+	db := rawdb.NewMemoryDatabase()
+
+	rawdb.WriteAccountSnapshot(db, accOne, accOne[:])
+	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:])
+	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:])
+	rawdb.WriteSnapshotRoot(db, baseRoot)
+
+	// Create a disk layer based on all above updates
+	snaps := &Tree{
+		layers: map[common.Hash]snapshot{
+			baseRoot: &diskLayer{
+				diskdb:    db,
+				cache:     fastcache.New(500 * 1024),
+				root:      baseRoot,
+				genMarker: genMarker,
+			},
+		},
+	}
+	// Modify or delete some accounts, flatten everything onto disk
+	if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
+		accTwo: accTwo[:],
+	}, nil); err != nil {
+		t.Fatalf("failed to update snapshot tree: %v", err)
+	}
+	if err := snaps.Cap(diffRoot, 0); err != nil {
+		t.Fatalf("failed to flatten snapshot tree: %v", err)
+	}
+	blob := rawdb.ReadSnapshotGenerator(db)
+	var generator journalGenerator
+	if err := rlp.DecodeBytes(blob, &generator); err != nil {
+		t.Fatalf("Failed to decode snapshot generator %v", err)
+	}
+	if !bytes.Equal(generator.Marker, genMarker) {
+		t.Fatalf("Generator marker is not matched")
+	}
+	// Test senario 2, the disk layer is fully generated
+	// Modify or delete some accounts, flatten everything onto disk
+	if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
+		accThree: accThree.Bytes(),
+	}, map[common.Hash]map[common.Hash][]byte{
+		accThree: {accThreeSlot: accThreeSlot.Bytes()},
+	}); err != nil {
+		t.Fatalf("failed to update snapshot tree: %v", err)
+	}
+	diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
+	diskLayer.genMarker = nil // Construction finished
+	if err := snaps.Cap(diffTwoRoot, 0); err != nil {
+		t.Fatalf("failed to flatten snapshot tree: %v", err)
+	}
+	blob = rawdb.ReadSnapshotGenerator(db)
+	if err := rlp.DecodeBytes(blob, &generator); err != nil {
+		t.Fatalf("Failed to decode snapshot generator %v", err)
+	}
+	if len(generator.Marker) != 0 {
+		t.Fatalf("Failed to update snapshot generator")
+	}
+}
+
 // Tests that merging something into a disk layer persists it into the database
 // and invalidates any previously written and cached values, discarding anything
 // after the in-progress generation marker.
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index a6b3e4420d8c97f9c474907d131589a737bc7dd9..92c7640c40103789925c44656bc818ef8c28b7bc 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -19,6 +19,7 @@ package snapshot
 import (
 	"bytes"
 	"encoding/binary"
+	"fmt"
 	"math/big"
 	"time"
 
@@ -112,9 +113,42 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i
 		genAbort:   make(chan chan *generatorStats),
 	}
 	go base.generate(&generatorStats{wiping: wiper, start: time.Now()})
+	log.Debug("Start snapshot generation", "root", root)
 	return base
 }
 
+// journalProgress persists the generator stats into the database to resume later.
+func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) {
+	// Write out the generator marker. Note it's a standalone disk layer generator
+	// which is not mixed with journal. It's ok if the generator is persisted while
+	// journal is not.
+	entry := journalGenerator{
+		Done:   marker == nil,
+		Marker: marker,
+	}
+	if stats != nil {
+		entry.Wiping = (stats.wiping != nil)
+		entry.Accounts = stats.accounts
+		entry.Slots = stats.slots
+		entry.Storage = uint64(stats.storage)
+	}
+	blob, err := rlp.EncodeToBytes(entry)
+	if err != nil {
+		panic(err) // Cannot happen, here to catch dev errors
+	}
+	var logstr string
+	switch len(marker) {
+	case 0:
+		logstr = "done"
+	case common.HashLength:
+		logstr = fmt.Sprintf("%#x", marker)
+	default:
+		logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:])
+	}
+	log.Debug("Journalled generator progress", "progress", logstr)
+	rawdb.WriteSnapshotGenerator(db, blob)
+}
+
 // generate is a background thread that iterates over the state and storage tries,
 // constructing the state snapshot. All the arguments are purely for statistics
 // gethering and logging, since the method surfs the blocks as they arrive, often
@@ -186,11 +220,15 @@ func (dl *diskLayer) generate(stats *generatorStats) {
 		if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
 			// Only write and set the marker if we actually did something useful
 			if batch.ValueSize() > 0 {
+				// Ensure the generator entry is in sync with the data
+				marker := accountHash[:]
+				journalProgress(batch, marker, stats)
+
 				batch.Write()
 				batch.Reset()
 
 				dl.lock.Lock()
-				dl.genMarker = accountHash[:]
+				dl.genMarker = marker
 				dl.lock.Unlock()
 			}
 			if abort != nil {
@@ -227,11 +265,15 @@ func (dl *diskLayer) generate(stats *generatorStats) {
 				if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
 					// Only write and set the marker if we actually did something useful
 					if batch.ValueSize() > 0 {
+						// Ensure the generator entry is in sync with the data
+						marker := append(accountHash[:], storeIt.Key...)
+						journalProgress(batch, marker, stats)
+
 						batch.Write()
 						batch.Reset()
 
 						dl.lock.Lock()
-						dl.genMarker = append(accountHash[:], storeIt.Key...)
+						dl.genMarker = marker
 						dl.lock.Unlock()
 					}
 					if abort != nil {
@@ -263,6 +305,9 @@ func (dl *diskLayer) generate(stats *generatorStats) {
 	}
 	// Snapshot fully generated, set the marker to nil
 	if batch.ValueSize() > 0 {
+		// Ensure the generator entry is in sync with the data
+		journalProgress(batch, nil, stats)
+
 		batch.Write()
 	}
 	log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots,
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index fc1053f818d634ab14f9114d9413c543f1ed5602..178ba08902763484b85ab9779ffae9c4a033bc21 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -33,6 +33,8 @@ import (
 	"github.com/ethereum/go-ethereum/trie"
 )
 
+const journalVersion uint64 = 0
+
 // journalGenerator is a disk layer entry containing the generator progress marker.
 type journalGenerator struct {
 	Wiping   bool // Whether the database was in progress of being wiped
@@ -61,8 +63,91 @@ type journalStorage struct {
 	Vals [][]byte
 }
 
+// loadAndParseLegacyJournal tries to parse the snapshot journal in legacy format.
+func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
+	// Retrieve the journal, for legacy journal it must exist since even for
+	// 0 layer it stores whether we've already generated the snapshot or are
+	// in progress only.
+	journal := rawdb.ReadSnapshotJournal(db)
+	if len(journal) == 0 {
+		return nil, journalGenerator{}, errors.New("missing or corrupted snapshot journal")
+	}
+	r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+	// Read the snapshot generation progress for the disk layer
+	var generator journalGenerator
+	if err := r.Decode(&generator); err != nil {
+		return nil, journalGenerator{}, fmt.Errorf("failed to load snapshot progress marker: %v", err)
+	}
+	// Load all the snapshot diffs from the journal
+	snapshot, err := loadDiffLayer(base, r)
+	if err != nil {
+		return nil, generator, err
+	}
+	return snapshot, generator, nil
+}
+
+// loadAndParseJournal tries to parse the snapshot journal in latest format.
+func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
+	// Retrieve the disk layer generator. It must exist, no matter the
+	// snapshot is fully generated or not. Otherwise the entire disk
+	// layer is invalid.
+	generatorBlob := rawdb.ReadSnapshotGenerator(db)
+	if len(generatorBlob) == 0 {
+		return nil, journalGenerator{}, errors.New("missing snapshot generator")
+	}
+	var generator journalGenerator
+	if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
+		return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err)
+	}
+	// Retrieve the diff layer journal. It's possible that the journal is
+	// not existent, e.g. the disk layer is generating while that the Geth
+	// crashes without persisting the diff journal.
+	// So if there is no journal, or the journal is invalid(e.g. the journal
+	// is not matched with disk layer; or the it's the legacy-format journal,
+	// etc.), we just discard all diffs and try to recover them later.
+	journal := rawdb.ReadSnapshotJournal(db)
+	if len(journal) == 0 {
+		log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "missing")
+		return base, generator, nil
+	}
+	r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+	// Firstly, resolve the first element as the journal version
+	version, err := r.Uint()
+	if err != nil {
+		log.Warn("Failed to resolve the journal version", "error", err)
+		return base, generator, nil
+	}
+	if version != journalVersion {
+		log.Warn("Discarded the snapshot journal with wrong version", "required", journalVersion, "got", version)
+		return base, generator, nil
+	}
+	// Secondly, resolve the disk layer root, ensure it's continuous
+	// with disk layer. Note now we can ensure it's the snapshot journal
+	// correct version, so we expect everything can be resolved properly.
+	var root common.Hash
+	if err := r.Decode(&root); err != nil {
+		return nil, journalGenerator{}, errors.New("missing disk layer root")
+	}
+	// The diff journal is not matched with disk, discard them.
+	// It can happen that Geth crashes without persisting the latest
+	// diff journal.
+	if !bytes.Equal(root.Bytes(), base.root.Bytes()) {
+		log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "unmatched")
+		return base, generator, nil
+	}
+	// Load all the snapshot diffs from the journal
+	snapshot, err := loadDiffLayer(base, r)
+	if err != nil {
+		return nil, journalGenerator{}, err
+	}
+	log.Debug("Loaded snapshot journal", "diskroot", base.root, "diffhead", snapshot.Root())
+	return snapshot, generator, nil
+}
+
 // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
-func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) {
+func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, error) {
 	// Retrieve the block number and hash of the snapshot, failing if no snapshot
 	// is present in the database (or crashed mid-update).
 	baseRoot := rawdb.ReadSnapshotRoot(diskdb)
@@ -75,28 +160,36 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
 		cache:  fastcache.New(cache * 1024 * 1024),
 		root:   baseRoot,
 	}
-	// Retrieve the journal, it must exist since even for 0 layer it stores whether
-	// we've already generated the snapshot or are in progress only
-	journal := rawdb.ReadSnapshotJournal(diskdb)
-	if len(journal) == 0 {
-		return nil, errors.New("missing or corrupted snapshot journal")
-	}
-	r := rlp.NewStream(bytes.NewReader(journal), 0)
-
-	// Read the snapshot generation progress for the disk layer
-	var generator journalGenerator
-	if err := r.Decode(&generator); err != nil {
-		return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err)
+	var legacy bool
+	snapshot, generator, err := loadAndParseJournal(diskdb, base)
+	if err != nil {
+		log.Warn("Failed to load new-format journal", "error", err)
+		snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base)
+		legacy = true
 	}
-	// Load all the snapshot diffs from the journal
-	snapshot, err := loadDiffLayer(base, r)
 	if err != nil {
 		return nil, err
 	}
-	// Entire snapshot journal loaded, sanity check the head and return
-	// Journal doesn't exist, don't worry if it's not supposed to
+	// Entire snapshot journal loaded, sanity check the head. If the loaded
+	// snapshot is not matched with current state root, print a warning log
+	// or discard the entire snapshot it's legacy snapshot.
+	//
+	// Possible scenario: Geth was crashed without persisting journal and then
+	// restart, the head is rewound to the point with available state(trie)
+	// which is below the snapshot. In this case the snapshot can be recovered
+	// by re-executing blocks but right now it's unavailable.
 	if head := snapshot.Root(); head != root {
-		return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+		// If it's legacy snapshot, or it's new-format snapshot but
+		// it's not in recovery mode, returns the error here for
+		// rebuilding the entire snapshot forcibly.
+		if legacy || !recovery {
+			return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+		}
+		// It's in snapshot recovery, the assumption is held that
+		// the disk layer is always higher than chain head. It can
+		// be eventually recovered when the chain head beyonds the
+		// disk layer.
+		log.Warn("Snapshot is not continuous with chain", "snaproot", head, "chainroot", root)
 	}
 	// Everything loaded correctly, resume any suspended operations
 	if !generator.Done {
@@ -183,8 +276,8 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
 	return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r)
 }
 
-// Journal writes the persistent layer generator stats into a buffer to be stored
-// in the database as the snapshot journal.
+// Journal terminates any in-progress snapshot generation, also implicitly pushing
+// the progress into the database.
 func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 	// If the snapshot is currently being generated, abort it
 	var stats *generatorStats
@@ -200,6 +293,85 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 	dl.lock.RLock()
 	defer dl.lock.RUnlock()
 
+	if dl.stale {
+		return common.Hash{}, ErrSnapshotStale
+	}
+	// Ensure the generator stats is written even if none was ran this cycle
+	journalProgress(dl.diskdb, dl.genMarker, stats)
+
+	log.Debug("Journalled disk layer", "root", dl.root)
+	return dl.root, nil
+}
+
+// Journal writes the memory layer contents into a buffer to be stored in the
+// database as the snapshot journal.
+func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
+	// Journal the parent first
+	base, err := dl.parent.Journal(buffer)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	// Ensure the layer didn't get stale
+	dl.lock.RLock()
+	defer dl.lock.RUnlock()
+
+	if dl.Stale() {
+		return common.Hash{}, ErrSnapshotStale
+	}
+	// Everything below was journalled, persist this layer too
+	if err := rlp.Encode(buffer, dl.root); err != nil {
+		return common.Hash{}, err
+	}
+	destructs := make([]journalDestruct, 0, len(dl.destructSet))
+	for hash := range dl.destructSet {
+		destructs = append(destructs, journalDestruct{Hash: hash})
+	}
+	if err := rlp.Encode(buffer, destructs); err != nil {
+		return common.Hash{}, err
+	}
+	accounts := make([]journalAccount, 0, len(dl.accountData))
+	for hash, blob := range dl.accountData {
+		accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
+	}
+	if err := rlp.Encode(buffer, accounts); err != nil {
+		return common.Hash{}, err
+	}
+	storage := make([]journalStorage, 0, len(dl.storageData))
+	for hash, slots := range dl.storageData {
+		keys := make([]common.Hash, 0, len(slots))
+		vals := make([][]byte, 0, len(slots))
+		for key, val := range slots {
+			keys = append(keys, key)
+			vals = append(vals, val)
+		}
+		storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
+	}
+	if err := rlp.Encode(buffer, storage); err != nil {
+		return common.Hash{}, err
+	}
+	log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
+	return base, nil
+}
+
+// LegacyJournal writes the persistent layer generator stats into a buffer
+// to be stored in the database as the snapshot journal.
+//
+// Note it's the legacy version which is only used in testing right now.
+func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
+	// If the snapshot is currently being generated, abort it
+	var stats *generatorStats
+	if dl.genAbort != nil {
+		abort := make(chan *generatorStats)
+		dl.genAbort <- abort
+
+		if stats = <-abort; stats != nil {
+			stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
+		}
+	}
+	// Ensure the layer didn't get stale
+	dl.lock.RLock()
+	defer dl.lock.RUnlock()
+
 	if dl.stale {
 		return common.Hash{}, ErrSnapshotStale
 	}
@@ -214,6 +386,7 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 		entry.Slots = stats.slots
 		entry.Storage = uint64(stats.storage)
 	}
+	log.Debug("Legacy journalled disk layer", "root", dl.root)
 	if err := rlp.Encode(buffer, entry); err != nil {
 		return common.Hash{}, err
 	}
@@ -222,9 +395,11 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 
 // Journal writes the memory layer contents into a buffer to be stored in the
 // database as the snapshot journal.
-func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
+//
+// Note it's the legacy version which is only used in testing right now.
+func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
 	// Journal the parent first
-	base, err := dl.parent.Journal(buffer)
+	base, err := dl.parent.LegacyJournal(buffer)
 	if err != nil {
 		return common.Hash{}, err
 	}
@@ -266,5 +441,6 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 	if err := rlp.Encode(buffer, storage); err != nil {
 		return common.Hash{}, err
 	}
+	log.Debug("Legacy journalled disk layer", "root", dl.root, "parent", dl.parent.Root())
 	return base, nil
 }
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index f6c5a6a9a8caea58e60da18165338f2bf9d89f0e..60b4158b5642786133d8809ebe576b571bc208a2 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -29,6 +29,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
+	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 )
 
@@ -86,6 +87,10 @@ var (
 	// range of accounts covered.
 	ErrNotCoveredYet = errors.New("not covered yet")
 
+	// ErrNotConstructed is returned if the callers want to iterate the snapshot
+	// while the generation is not finished yet.
+	ErrNotConstructed = errors.New("snapshot is not constructed")
+
 	// errSnapshotCycle is returned if a snapshot is attempted to be inserted
 	// that forms a cycle in the snapshot tree.
 	errSnapshotCycle = errors.New("snapshot cycle")
@@ -132,6 +137,10 @@ type snapshot interface {
 	// flattening everything down (bad for reorgs).
 	Journal(buffer *bytes.Buffer) (common.Hash, error)
 
+	// LegacyJournal is basically identical to Journal. it's the legacy version for
+	// flushing legacy journal. Now the only purpose of this function is for testing.
+	LegacyJournal(buffer *bytes.Buffer) (common.Hash, error)
+
 	// Stale return whether this layer has become stale (was flattened across) or
 	// if it's still live.
 	Stale() bool
@@ -164,10 +173,12 @@ type Tree struct {
 // store (with a number of memory layers from a journal), ensuring that the head
 // of the snapshot matches the expected one.
 //
-// If the snapshot is missing or inconsistent, the entirety is deleted and will
-// be reconstructed from scratch based on the tries in the key-value store, on a
-// background thread.
-func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree {
+// If the snapshot is missing or the disk layer is broken, the entire is deleted
+// and will be reconstructed from scratch based on the tries in the key-value
+// store, on a background thread. If the memory layers from the journal is not
+// continuous with disk layer or the journal is missing, all diffs will be discarded
+// iff it's in "recovery" mode, otherwise rebuild is mandatory.
+func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, recovery bool) *Tree {
 	// Create a new, empty snapshot tree
 	snap := &Tree{
 		diskdb: diskdb,
@@ -179,7 +190,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
 		defer snap.waitBuild()
 	}
 	// Attempt to load a previously persisted snapshot and rebuild one if failed
-	head, err := loadSnapshot(diskdb, triedb, cache, root)
+	head, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
 	if err != nil {
 		log.Warn("Failed to load snapshot, regenerating", "err", err)
 		snap.Rebuild(root)
@@ -194,7 +205,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
 }
 
 // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
-// to  be used by tests to ensure we're testing what we believe we are.
+// to be used by tests to ensure we're testing what we believe we are.
 func (t *Tree) waitBuild() {
 	// Find the rebuild termination channel
 	var done chan struct{}
@@ -411,6 +422,9 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
 
 // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
 // it. The method will panic if called onto a non-bottom-most diff layer.
+//
+// The disk layer persistence should be operated in an atomic way. All updates should
+// be discarded if the whole transition if not finished.
 func diffToDisk(bottom *diffLayer) *diskLayer {
 	var (
 		base  = bottom.parent.(*diskLayer)
@@ -423,8 +437,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
 		base.genAbort <- abort
 		stats = <-abort
 	}
-	// Start by temporarily deleting the current snapshot block marker. This
-	// ensures that in the case of a crash, the entire snapshot is invalidated.
+	// Put the deletion in the batch writer, flush all updates in the final step.
 	rawdb.DeleteSnapshotRoot(batch)
 
 	// Mark the original base as stale as we're going to create a new wrapper
@@ -467,12 +480,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
 		base.cache.Set(hash[:], data)
 		snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
 
-		if batch.ValueSize() > ethdb.IdealBatchSize {
-			if err := batch.Write(); err != nil {
-				log.Crit("Failed to write account snapshot", "err", err)
-			}
-			batch.Reset()
-		}
 		snapshotFlushAccountItemMeter.Mark(1)
 		snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
 	}
@@ -501,18 +508,19 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
 			snapshotFlushStorageItemMeter.Mark(1)
 			snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
 		}
-		if batch.ValueSize() > ethdb.IdealBatchSize {
-			if err := batch.Write(); err != nil {
-				log.Crit("Failed to write storage snapshot", "err", err)
-			}
-			batch.Reset()
-		}
 	}
 	// Update the snapshot block marker and write any remainder data
 	rawdb.WriteSnapshotRoot(batch, bottom.root)
+
+	// Write out the generator progress marker and report
+	journalProgress(batch, base.genMarker, stats)
+
+	// Flush all the updates in the single db operation. Ensure the
+	// disk layer transition is atomic.
 	if err := batch.Write(); err != nil {
 		log.Crit("Failed to write leftover snapshot", "err", err)
 	}
+	log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
 	res := &diskLayer{
 		root:       bottom.root,
 		cache:      base.cache,
@@ -550,7 +558,21 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
 	t.lock.Lock()
 	defer t.lock.Unlock()
 
+	// Firstly write out the metadata of journal
 	journal := new(bytes.Buffer)
+	if err := rlp.Encode(journal, journalVersion); err != nil {
+		return common.Hash{}, err
+	}
+	diskroot := t.diskRoot()
+	if diskroot == (common.Hash{}) {
+		return common.Hash{}, errors.New("invalid disk root")
+	}
+	// Secondly write out the disk layer root, ensure the
+	// diff journal is continuous with disk.
+	if err := rlp.Encode(journal, diskroot); err != nil {
+		return common.Hash{}, err
+	}
+	// Finally write out the journal of each layer in reverse order.
 	base, err := snap.(snapshot).Journal(journal)
 	if err != nil {
 		return common.Hash{}, err
@@ -560,6 +582,29 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
 	return base, nil
 }
 
+// LegacyJournal is basically identical to Journal. it's the legacy
+// version for flushing legacy journal. Now the only purpose of this
+// function is for testing.
+func (t *Tree) LegacyJournal(root common.Hash) (common.Hash, error) {
+	// Retrieve the head snapshot to journal from var snap snapshot
+	snap := t.Snapshot(root)
+	if snap == nil {
+		return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
+	}
+	// Run the journaling
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	journal := new(bytes.Buffer)
+	base, err := snap.(snapshot).LegacyJournal(journal)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	// Store the journal into the database and return
+	rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
+	return base, nil
+}
+
 // Rebuild wipes all available snapshot data from the persistent database and
 // discard all caches and diff layers. Afterwards, it starts a new snapshot
 // generator with the given root hash.
@@ -567,6 +612,10 @@ func (t *Tree) Rebuild(root common.Hash) {
 	t.lock.Lock()
 	defer t.lock.Unlock()
 
+	// Firstly delete any recovery flag in the database. Because now we are
+	// building a brand new snapshot.
+	rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
+
 	// Track whether there's a wipe currently running and keep it alive if so
 	var wiper chan struct{}
 
@@ -609,11 +658,79 @@ func (t *Tree) Rebuild(root common.Hash) {
 // AccountIterator creates a new account iterator for the specified root hash and
 // seeks to a starting account hash.
 func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
+	ok, err := t.generating()
+	if err != nil {
+		return nil, err
+	}
+	if ok {
+		return nil, ErrNotConstructed
+	}
 	return newFastAccountIterator(t, root, seek)
 }
 
 // StorageIterator creates a new storage iterator for the specified root hash and
 // account. The iterator will be move to the specific start position.
 func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+	ok, err := t.generating()
+	if err != nil {
+		return nil, err
+	}
+	if ok {
+		return nil, ErrNotConstructed
+	}
 	return newFastStorageIterator(t, root, account, seek)
 }
+
+// disklayer is an internal helper function to return the disk layer.
+// The lock of snapTree is assumed to be held already.
+func (t *Tree) disklayer() *diskLayer {
+	var snap snapshot
+	for _, s := range t.layers {
+		snap = s
+		break
+	}
+	if snap == nil {
+		return nil
+	}
+	switch layer := snap.(type) {
+	case *diskLayer:
+		return layer
+	case *diffLayer:
+		return layer.origin
+	default:
+		panic(fmt.Sprintf("%T: undefined layer", snap))
+	}
+}
+
+// diskRoot is a internal helper function to return the disk layer root.
+// The lock of snapTree is assumed to be held already.
+func (t *Tree) diskRoot() common.Hash {
+	disklayer := t.disklayer()
+	if disklayer == nil {
+		return common.Hash{}
+	}
+	return disklayer.Root()
+}
+
+// generating is an internal helper function which reports whether the snapshot
+// is still under the construction.
+func (t *Tree) generating() (bool, error) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	layer := t.disklayer()
+	if layer == nil {
+		return false, errors.New("disk layer is missing")
+	}
+	layer.lock.RLock()
+	defer layer.lock.RUnlock()
+	return layer.genMarker != nil, nil
+}
+
+// diskRoot is a external helper function to return the disk layer root.
+func (t *Tree) DiskRoot() common.Hash {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	return t.diskRoot()
+}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 36f7d863af9ba80c9854aa5d37664e8dfa04d061..fe30f595ed868ff7de088d0bf51a5a53d8ecbc5b 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -93,6 +93,9 @@ type StateDB struct {
 
 	preimages map[common.Hash][]byte
 
+	// Per-transaction access list
+	accessList *accessList
+
 	// Journal of state modifications. This is the backbone of
 	// Snapshot and RevertToSnapshot.
 	journal        *journal
@@ -129,6 +132,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
 		logs:                make(map[common.Hash][]*types.Log),
 		preimages:           make(map[common.Hash][]byte),
 		journal:             newJournal(),
+		accessList:          newAccessList(),
 	}
 	if sdb.snaps != nil {
 		if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
@@ -178,6 +182,7 @@ func (s *StateDB) Reset(root common.Hash) error {
 			s.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
 		}
 	}
+	s.accessList = newAccessList()
 	return nil
 }
 
@@ -697,6 +702,12 @@ func (s *StateDB) Copy() *StateDB {
 	for hash, preimage := range s.preimages {
 		state.preimages[hash] = preimage
 	}
+	// Do we need to copy the access list? In practice: No. At the start of a
+	// transaction, the access list is empty. In practice, we only ever copy state
+	// _between_ transactions/blocks, never in the middle of a transaction.
+	// However, it doesn't cost us much to copy an empty list, so we do it anyway
+	// to not blow up if we ever decide copy it in the middle of a transaction
+	state.accessList = s.accessList.Copy()
 	return state
 }
 
@@ -798,6 +809,7 @@ func (s *StateDB) Prepare(thash, bhash common.Hash, ti int) {
 	s.thash = thash
 	s.bhash = bhash
 	s.txIndex = ti
+	s.accessList = newAccessList()
 }
 
 func (s *StateDB) clearJournalAndRefund() {
@@ -869,11 +881,50 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
 			if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
 				log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
 			}
-			if err := s.snaps.Cap(root, 127); err != nil { // Persistent layer is 128th, the last available trie
-				log.Warn("Failed to cap snapshot tree", "root", root, "layers", 127, "err", err)
+			// Keep 128 diff layers in the memory, persistent layer is 129th.
+			// - head layer is paired with HEAD state
+			// - head-1 layer is paired with HEAD-1 state
+			// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
+			if err := s.snaps.Cap(root, 128); err != nil {
+				log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
 			}
 		}
 		s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
 	}
 	return root, err
 }
+
+// AddAddressToAccessList adds the given address to the access list
+func (s *StateDB) AddAddressToAccessList(addr common.Address) {
+	if s.accessList.AddAddress(addr) {
+		s.journal.append(accessListAddAccountChange{&addr})
+	}
+}
+
+// AddSlotToAccessList adds the given (address, slot)-tuple to the access list
+func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
+	addrMod, slotMod := s.accessList.AddSlot(addr, slot)
+	if addrMod {
+		// In practice, this should not happen, since there is no way to enter the
+		// scope of 'address' without having the 'address' become already added
+		// to the access list (via call-variant, create, etc).
+		// Better safe than sorry, though
+		s.journal.append(accessListAddAccountChange{&addr})
+	}
+	if slotMod {
+		s.journal.append(accessListAddSlotChange{
+			address: &addr,
+			slot:    &slot,
+		})
+	}
+}
+
+// AddressInAccessList returns true if the given address is in the access list.
+func (s *StateDB) AddressInAccessList(addr common.Address) bool {
+	return s.accessList.ContainsAddress(addr)
+}
+
+// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list.
+func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
+	return s.accessList.Contains(addr, slot)
+}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 36ff2713318830c40bf9a925480957f4d13e4a23..70d01ff3dd9c892c6cb3f1e16f12bd5893432006 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -328,6 +328,20 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
 			},
 			args: make([]int64, 1),
 		},
+		{
+			name: "AddAddressToAccessList",
+			fn: func(a testAction, s *StateDB) {
+				s.AddAddressToAccessList(addr)
+			},
+		},
+		{
+			name: "AddSlotToAccessList",
+			fn: func(a testAction, s *StateDB) {
+				s.AddSlotToAccessList(addr,
+					common.Hash{byte(a.args[0])})
+			},
+			args: make([]int64, 1),
+		},
 	}
 	action := actions[r.Intn(len(actions))]
 	var nameargs []string
@@ -727,3 +741,177 @@ func TestMissingTrieNodes(t *testing.T) {
 		t.Fatalf("expected error, got root :%x", root)
 	}
 }
+
+func TestStateDBAccessList(t *testing.T) {
+	// Some helpers
+	addr := func(a string) common.Address {
+		return common.HexToAddress(a)
+	}
+	slot := func(a string) common.Hash {
+		return common.HexToHash(a)
+	}
+
+	memDb := rawdb.NewMemoryDatabase()
+	db := NewDatabase(memDb)
+	state, _ := New(common.Hash{}, db, nil)
+	state.accessList = newAccessList()
+
+	verifyAddrs := func(astrings ...string) {
+		t.Helper()
+		// convert to common.Address form
+		var addresses []common.Address
+		var addressMap = make(map[common.Address]struct{})
+		for _, astring := range astrings {
+			address := addr(astring)
+			addresses = append(addresses, address)
+			addressMap[address] = struct{}{}
+		}
+		// Check that the given addresses are in the access list
+		for _, address := range addresses {
+			if !state.AddressInAccessList(address) {
+				t.Fatalf("expected %x to be in access list", address)
+			}
+		}
+		// Check that only the expected addresses are present in the acesslist
+		for address := range state.accessList.addresses {
+			if _, exist := addressMap[address]; !exist {
+				t.Fatalf("extra address %x in access list", address)
+			}
+		}
+	}
+	verifySlots := func(addrString string, slotStrings ...string) {
+		if !state.AddressInAccessList(addr(addrString)) {
+			t.Fatalf("scope missing address/slots %v", addrString)
+		}
+		var address = addr(addrString)
+		// convert to common.Hash form
+		var slots []common.Hash
+		var slotMap = make(map[common.Hash]struct{})
+		for _, slotString := range slotStrings {
+			s := slot(slotString)
+			slots = append(slots, s)
+			slotMap[s] = struct{}{}
+		}
+		// Check that the expected items are in the access list
+		for i, s := range slots {
+			if _, slotPresent := state.SlotInAccessList(address, s); !slotPresent {
+				t.Fatalf("input %d: scope missing slot %v (address %v)", i, s, addrString)
+			}
+		}
+		// Check that no extra elements are in the access list
+		index := state.accessList.addresses[address]
+		if index >= 0 {
+			stateSlots := state.accessList.slots[index]
+			for s := range stateSlots {
+				if _, slotPresent := slotMap[s]; !slotPresent {
+					t.Fatalf("scope has extra slot %v (address %v)", s, addrString)
+				}
+			}
+		}
+	}
+
+	state.AddAddressToAccessList(addr("aa"))          // 1
+	state.AddSlotToAccessList(addr("bb"), slot("01")) // 2,3
+	state.AddSlotToAccessList(addr("bb"), slot("02")) // 4
+	verifyAddrs("aa", "bb")
+	verifySlots("bb", "01", "02")
+
+	// Make a copy
+	stateCopy1 := state.Copy()
+	if exp, got := 4, state.journal.length(); exp != got {
+		t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
+	}
+
+	// same again, should cause no journal entries
+	state.AddSlotToAccessList(addr("bb"), slot("01"))
+	state.AddSlotToAccessList(addr("bb"), slot("02"))
+	state.AddAddressToAccessList(addr("aa"))
+	if exp, got := 4, state.journal.length(); exp != got {
+		t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
+	}
+	// some new ones
+	state.AddSlotToAccessList(addr("bb"), slot("03")) // 5
+	state.AddSlotToAccessList(addr("aa"), slot("01")) // 6
+	state.AddSlotToAccessList(addr("cc"), slot("01")) // 7,8
+	state.AddAddressToAccessList(addr("cc"))
+	if exp, got := 8, state.journal.length(); exp != got {
+		t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
+	}
+
+	verifyAddrs("aa", "bb", "cc")
+	verifySlots("aa", "01")
+	verifySlots("bb", "01", "02", "03")
+	verifySlots("cc", "01")
+
+	// now start rolling back changes
+	state.journal.revert(state, 7)
+	if _, ok := state.SlotInAccessList(addr("cc"), slot("01")); ok {
+		t.Fatalf("slot present, expected missing")
+	}
+	verifyAddrs("aa", "bb", "cc")
+	verifySlots("aa", "01")
+	verifySlots("bb", "01", "02", "03")
+
+	state.journal.revert(state, 6)
+	if state.AddressInAccessList(addr("cc")) {
+		t.Fatalf("addr present, expected missing")
+	}
+	verifyAddrs("aa", "bb")
+	verifySlots("aa", "01")
+	verifySlots("bb", "01", "02", "03")
+
+	state.journal.revert(state, 5)
+	if _, ok := state.SlotInAccessList(addr("aa"), slot("01")); ok {
+		t.Fatalf("slot present, expected missing")
+	}
+	verifyAddrs("aa", "bb")
+	verifySlots("bb", "01", "02", "03")
+
+	state.journal.revert(state, 4)
+	if _, ok := state.SlotInAccessList(addr("bb"), slot("03")); ok {
+		t.Fatalf("slot present, expected missing")
+	}
+	verifyAddrs("aa", "bb")
+	verifySlots("bb", "01", "02")
+
+	state.journal.revert(state, 3)
+	if _, ok := state.SlotInAccessList(addr("bb"), slot("02")); ok {
+		t.Fatalf("slot present, expected missing")
+	}
+	verifyAddrs("aa", "bb")
+	verifySlots("bb", "01")
+
+	state.journal.revert(state, 2)
+	if _, ok := state.SlotInAccessList(addr("bb"), slot("01")); ok {
+		t.Fatalf("slot present, expected missing")
+	}
+	verifyAddrs("aa", "bb")
+
+	state.journal.revert(state, 1)
+	if state.AddressInAccessList(addr("bb")) {
+		t.Fatalf("addr present, expected missing")
+	}
+	verifyAddrs("aa")
+
+	state.journal.revert(state, 0)
+	if state.AddressInAccessList(addr("aa")) {
+		t.Fatalf("addr present, expected missing")
+	}
+	if got, exp := len(state.accessList.addresses), 0; got != exp {
+		t.Fatalf("expected empty, got %d", got)
+	}
+	if got, exp := len(state.accessList.slots), 0; got != exp {
+		t.Fatalf("expected empty, got %d", got)
+	}
+	// Check the copy
+	// Make a copy
+	state = stateCopy1
+	verifyAddrs("aa", "bb")
+	verifySlots("bb", "01", "02")
+	if got, exp := len(state.accessList.addresses), 2; got != exp {
+		t.Fatalf("expected empty, got %d", got)
+	}
+	if got, exp := len(state.accessList.slots), 1; got != exp {
+		t.Fatalf("expected empty, got %d", got)
+	}
+}
diff --git a/core/state_processor.go b/core/state_processor.go
index e655d8f3bfbac17ee2ce0fdc595bddec2c6e2961..ac6046b717c588f808092e70127ac7118bce82e8 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -95,6 +95,18 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
 	// Create a new environment which holds all relevant information
 	// about the transaction and calling mechanisms.
 	vmenv := vm.NewEVM(context, statedb, config, cfg)
+
+	if config.IsYoloV2(header.Number) {
+		statedb.AddAddressToAccessList(msg.From())
+		if dst := msg.To(); dst != nil {
+			statedb.AddAddressToAccessList(*dst)
+			// If it's a create-tx, the destination will be added inside evm.create
+		}
+		for _, addr := range vmenv.ActivePrecompiles() {
+			statedb.AddAddressToAccessList(addr)
+		}
+	}
+
 	// Apply the transaction to the current state (included in the env)
 	result, err := ApplyMessage(vmenv, msg, gp)
 	if err != nil {
diff --git a/core/types/derive_sha.go b/core/types/derive_sha.go
index 51b8506bce4388c117a756526eb8a1eb6311528c..51a10f3f3da7f01dd15d93709f53e26737919ae9 100644
--- a/core/types/derive_sha.go
+++ b/core/types/derive_sha.go
@@ -17,8 +17,6 @@
 package types
 
 import (
-	"bytes"
-
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/rlp"
 )
@@ -37,26 +35,24 @@ type Hasher interface {
 
 func DeriveSha(list DerivableList, hasher Hasher) common.Hash {
 	hasher.Reset()
-	keybuf := new(bytes.Buffer)
 
 	// StackTrie requires values to be inserted in increasing
 	// hash order, which is not the order that `list` provides
 	// hashes in. This insertion sequence ensures that the
 	// order is correct.
+
+	var buf []byte
 	for i := 1; i < list.Len() && i <= 0x7f; i++ {
-		keybuf.Reset()
-		rlp.Encode(keybuf, uint(i))
-		hasher.Update(keybuf.Bytes(), list.GetRlp(i))
+		buf = rlp.AppendUint64(buf[:0], uint64(i))
+		hasher.Update(buf, list.GetRlp(i))
 	}
 	if list.Len() > 0 {
-		keybuf.Reset()
-		rlp.Encode(keybuf, uint(0))
-		hasher.Update(keybuf.Bytes(), list.GetRlp(0))
+		buf = rlp.AppendUint64(buf[:0], 0)
+		hasher.Update(buf, list.GetRlp(0))
 	}
 	for i := 0x80; i < list.Len(); i++ {
-		keybuf.Reset()
-		rlp.Encode(keybuf, uint(i))
-		hasher.Update(keybuf.Bytes(), list.GetRlp(i))
+		buf = rlp.AppendUint64(buf[:0], uint64(i))
+		hasher.Update(buf, list.GetRlp(i))
 	}
 	return hasher.Hash()
 }
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 8930a06266b8ccbcb8b5f3d54c9632b16dcf4fba..35faa7b83dba6d0149eac9b12acfe5cfae8e8fd5 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -78,9 +78,9 @@ var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{
 	common.BytesToAddress([]byte{9}): &blake2F{},
 }
 
-// PrecompiledContractsYoloV1 contains the default set of pre-compiled Ethereum
-// contracts used in the Yolo v1 test release.
-var PrecompiledContractsYoloV1 = map[common.Address]PrecompiledContract{
+// PrecompiledContractsYoloV2 contains the default set of pre-compiled Ethereum
+// contracts used in the Yolo v2 test release.
+var PrecompiledContractsYoloV2 = map[common.Address]PrecompiledContract{
 	common.BytesToAddress([]byte{1}):  &ecrecover{},
 	common.BytesToAddress([]byte{2}):  &sha256hash{},
 	common.BytesToAddress([]byte{3}):  &ripemd160hash{},
@@ -101,6 +101,28 @@ var PrecompiledContractsYoloV1 = map[common.Address]PrecompiledContract{
 	common.BytesToAddress([]byte{18}): &bls12381MapG2{},
 }
 
+var (
+	PrecompiledAddressesYoloV2    []common.Address
+	PrecompiledAddressesIstanbul  []common.Address
+	PrecompiledAddressesByzantium []common.Address
+	PrecompiledAddressesHomestead []common.Address
+)
+
+func init() {
+	for k := range PrecompiledContractsHomestead {
+		PrecompiledAddressesHomestead = append(PrecompiledAddressesHomestead, k)
+	}
+	for k := range PrecompiledContractsByzantium {
+		PrecompiledAddressesHomestead = append(PrecompiledAddressesByzantium, k)
+	}
+	for k := range PrecompiledContractsIstanbul {
+		PrecompiledAddressesIstanbul = append(PrecompiledAddressesIstanbul, k)
+	}
+	for k := range PrecompiledContractsYoloV2 {
+		PrecompiledAddressesYoloV2 = append(PrecompiledAddressesYoloV2, k)
+	}
+}
+
 // RunPrecompiledContract runs and evaluates the output of a precompiled contract.
 // It returns
 // - the returned bytes,
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index 6320875e1aebc631205493e64a48dc17301610d3..ed0d675a69630ab20561f0d2543978d28e7c52a3 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -43,7 +43,7 @@ type precompiledFailureTest struct {
 	Name          string
 }
 
-var allPrecompiles = PrecompiledContractsYoloV1
+var allPrecompiles = PrecompiledContractsYoloV2
 
 // EIP-152 test vectors
 var blake2FMalformedInputTests = []precompiledFailureTest{
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 6b5ba62aade145bf2b7fc89af0011d4aee142a3c..962c0f14b1624924dd31252ce217c9b06975a4d6 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -25,6 +25,7 @@ import (
 )
 
 var activators = map[int]func(*JumpTable){
+	2929: enable2929,
 	2200: enable2200,
 	1884: enable1884,
 	1344: enable1344,
@@ -134,3 +135,41 @@ func enable2315(jt *JumpTable) {
 		jumps:       true,
 	}
 }
+
+// enable2929 enables "EIP-2929: Gas cost increases for state access opcodes"
+// https://eips.ethereum.org/EIPS/eip-2929
+func enable2929(jt *JumpTable) {
+	jt[SSTORE].dynamicGas = gasSStoreEIP2929
+
+	jt[SLOAD].constantGas = 0
+	jt[SLOAD].dynamicGas = gasSLoadEIP2929
+
+	jt[EXTCODECOPY].constantGas = WarmStorageReadCostEIP2929
+	jt[EXTCODECOPY].dynamicGas = gasExtCodeCopyEIP2929
+
+	jt[EXTCODESIZE].constantGas = WarmStorageReadCostEIP2929
+	jt[EXTCODESIZE].dynamicGas = gasEip2929AccountCheck
+
+	jt[EXTCODEHASH].constantGas = WarmStorageReadCostEIP2929
+	jt[EXTCODEHASH].dynamicGas = gasEip2929AccountCheck
+
+	jt[BALANCE].constantGas = WarmStorageReadCostEIP2929
+	jt[BALANCE].dynamicGas = gasEip2929AccountCheck
+
+	jt[CALL].constantGas = WarmStorageReadCostEIP2929
+	jt[CALL].dynamicGas = gasCallEIP2929
+
+	jt[CALLCODE].constantGas = WarmStorageReadCostEIP2929
+	jt[CALLCODE].dynamicGas = gasCallCodeEIP2929
+
+	jt[STATICCALL].constantGas = WarmStorageReadCostEIP2929
+	jt[STATICCALL].dynamicGas = gasStaticCallEIP2929
+
+	jt[DELEGATECALL].constantGas = WarmStorageReadCostEIP2929
+	jt[DELEGATECALL].dynamicGas = gasDelegateCallEIP2929
+
+	// This was previously part of the dynamic cost, but we're using it as a constantGas
+	// factor here
+	jt[SELFDESTRUCT].constantGas = params.SelfdestructGasEIP150
+	jt[SELFDESTRUCT].dynamicGas = gasSelfdestructEIP2929
+}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index f5469c500c8af19d42ac89e534cb8554a6cfbb36..8f6e603aee77bd0f66677bb2669acea860ee39de 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -42,11 +42,26 @@ type (
 	GetHashFunc func(uint64) common.Hash
 )
 
+// ActivePrecompiles returns the addresses of the precompiles enabled with the current
+// configuration
+func (evm *EVM) ActivePrecompiles() []common.Address {
+	switch {
+	case evm.chainRules.IsYoloV2:
+		return PrecompiledAddressesYoloV2
+	case evm.chainRules.IsIstanbul:
+		return PrecompiledAddressesIstanbul
+	case evm.chainRules.IsByzantium:
+		return PrecompiledAddressesByzantium
+	default:
+		return PrecompiledAddressesHomestead
+	}
+}
+
 func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) {
 	var precompiles map[common.Address]PrecompiledContract
 	switch {
-	case evm.chainRules.IsYoloV1:
-		precompiles = PrecompiledContractsYoloV1
+	case evm.chainRules.IsYoloV2:
+		precompiles = PrecompiledContractsYoloV2
 	case evm.chainRules.IsIstanbul:
 		precompiles = PrecompiledContractsIstanbul
 	case evm.chainRules.IsByzantium:
@@ -416,7 +431,11 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
 	}
 	nonce := evm.StateDB.GetNonce(caller.Address())
 	evm.StateDB.SetNonce(caller.Address(), nonce+1)
-
+	// We add this to the access list _before_ taking a snapshot. Even if the creation fails,
+	// the access-list change should not be rolled back
+	if evm.chainRules.IsYoloV2 {
+		evm.StateDB.AddAddressToAccessList(address)
+	}
 	// Ensure there's no existing contract already at the designated address
 	contractHash := evm.StateDB.GetCodeHash(address)
 	if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) {
diff --git a/core/vm/gen_structlog.go b/core/vm/gen_structlog.go
index ac1a9070c8972b307bfda6314a5ad2d05679bb7c..44da014de919fb91e63eb27071a151884d8cf02f 100644
--- a/core/vm/gen_structlog.go
+++ b/core/vm/gen_structlog.go
@@ -24,7 +24,7 @@ func (s StructLog) MarshalJSON() ([]byte, error) {
 		MemorySize    int                         `json:"memSize"`
 		Stack         []*math.HexOrDecimal256     `json:"stack"`
 		ReturnStack   []math.HexOrDecimal64       `json:"returnStack"`
-		ReturnData    []byte                      `json:"returnData"`
+		ReturnData    hexutil.Bytes               `json:"returnData"`
 		Storage       map[common.Hash]common.Hash `json:"-"`
 		Depth         int                         `json:"depth"`
 		RefundCounter uint64                      `json:"refund"`
@@ -72,7 +72,7 @@ func (s *StructLog) UnmarshalJSON(input []byte) error {
 		MemorySize    *int                        `json:"memSize"`
 		Stack         []*math.HexOrDecimal256     `json:"stack"`
 		ReturnStack   []math.HexOrDecimal64       `json:"returnStack"`
-		ReturnData    []byte                      `json:"returnData"`
+		ReturnData    *hexutil.Bytes              `json:"returnData"`
 		Storage       map[common.Hash]common.Hash `json:"-"`
 		Depth         *int                        `json:"depth"`
 		RefundCounter *uint64                     `json:"refund"`
@@ -113,7 +113,7 @@ func (s *StructLog) UnmarshalJSON(input []byte) error {
 		}
 	}
 	if dec.ReturnData != nil {
-		s.ReturnData = dec.ReturnData
+		s.ReturnData = *dec.ReturnData
 	}
 	if dec.Storage != nil {
 		s.Storage = dec.Storage
diff --git a/core/vm/interface.go b/core/vm/interface.go
index dd401466adfad2c04ab181681727ccb7927c5e9d..fb5bbca48f6598fc92dfa1982b67ee7e75d1549b 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -57,6 +57,15 @@ type StateDB interface {
 	// is defined according to EIP161 (balance = nonce = code = 0).
 	Empty(common.Address) bool
 
+	AddressInAccessList(addr common.Address) bool
+	SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool)
+	// AddAddressToAccessList adds the given address to the access list. This operation is safe to perform
+	// even if the feature/fork is not active yet
+	AddAddressToAccessList(addr common.Address)
+	// AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform
+	// even if the feature/fork is not active yet
+	AddSlotToAccessList(addr common.Address, slot common.Hash)
+
 	RevertToSnapshot(int)
 	Snapshot() int
 
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 1e2a661debd1dda70b9e2c8132c5419f7c5ff683..bffc5013a65a4aab7eb690fddf5edf80a2ea306e 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -99,8 +99,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
 	if cfg.JumpTable[STOP] == nil {
 		var jt JumpTable
 		switch {
-		case evm.chainRules.IsYoloV1:
-			jt = yoloV1InstructionSet
+		case evm.chainRules.IsYoloV2:
+			jt = yoloV2InstructionSet
 		case evm.chainRules.IsIstanbul:
 			jt = istanbulInstructionSet
 		case evm.chainRules.IsConstantinople:
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 9d9bc12b62e6d12480e8f6318e28e619c00c6130..83fb2c1ed628f09611f4ed60a4dbb3722899f225 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -56,17 +56,19 @@ var (
 	byzantiumInstructionSet        = newByzantiumInstructionSet()
 	constantinopleInstructionSet   = newConstantinopleInstructionSet()
 	istanbulInstructionSet         = newIstanbulInstructionSet()
-	yoloV1InstructionSet           = newYoloV1InstructionSet()
+	yoloV2InstructionSet           = newYoloV2InstructionSet()
 )
 
 // JumpTable contains the EVM opcodes supported at a given fork.
 type JumpTable [256]*operation
 
-func newYoloV1InstructionSet() JumpTable {
+// newYoloV2InstructionSet creates an instructionset containing
+// - "EIP-2315: Simple Subroutines"
+// - "EIP-2929: Gas cost increases for state access opcodes"
+func newYoloV2InstructionSet() JumpTable {
 	instructionSet := newIstanbulInstructionSet()
-
 	enable2315(&instructionSet) // Subroutines - https://eips.ethereum.org/EIPS/eip-2315
-
+	enable2929(&instructionSet) // Access lists for trie accesses https://eips.ethereum.org/EIPS/eip-2929
 	return instructionSet
 }
 
diff --git a/core/vm/logger.go b/core/vm/logger.go
index 3b166b5d2649a5c94bf0348271273cabca4c3276..962be6ec8e0e30c634ecaa6c1513259974e6ddfe 100644
--- a/core/vm/logger.go
+++ b/core/vm/logger.go
@@ -29,6 +29,7 @@ import (
 	"github.com/ethereum/go-ethereum/common/hexutil"
 	"github.com/ethereum/go-ethereum/common/math"
 	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/params"
 )
 
 var errTraceLimitReached = errors.New("the number of logs reached the specified limit")
@@ -53,6 +54,8 @@ type LogConfig struct {
 	DisableReturnData bool // disable return data capture
 	Debug             bool // print output during capture end
 	Limit             int  // maximum length of output, but zero means unlimited
+	// Chain overrides, can be used to execute a trace using future fork rules
+	Overrides *params.ChainConfig `json:"overrides,omitempty"`
 }
 
 //go:generate gencodec -type StructLog -field-override structLogMarshaling -out gen_structlog.go
@@ -82,6 +85,7 @@ type structLogMarshaling struct {
 	Gas         math.HexOrDecimal64
 	GasCost     math.HexOrDecimal64
 	Memory      hexutil.Bytes
+	ReturnData  hexutil.Bytes
 	OpName      string `json:"opName"` // adds call to OpName() in MarshalJSON
 	ErrorString string `json:"error"`  // adds call to ErrorString() in MarshalJSON
 }
@@ -313,8 +317,8 @@ func (t *mdLogger) CaptureStart(from common.Address, to common.Address, create b
 	}
 
 	fmt.Fprintf(t.out, `
-|  Pc   |      Op     | Cost |   Stack   |   RStack  |
-|-------|-------------|------|-----------|-----------|
+|  Pc   |      Op     | Cost |   Stack   |   RStack  |  Refund |
+|-------|-------------|------|-----------|-----------|---------|
 `)
 	return nil
 }
@@ -326,7 +330,7 @@ func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64
 		// format stack
 		var a []string
 		for _, elem := range stack.data {
-			a = append(a, fmt.Sprintf("%d", elem))
+			a = append(a, fmt.Sprintf("%v", elem.String()))
 		}
 		b := fmt.Sprintf("[%v]", strings.Join(a, ","))
 		fmt.Fprintf(t.out, "%10v |", b)
@@ -339,6 +343,7 @@ func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64
 		b = fmt.Sprintf("[%v]", strings.Join(a, ","))
 		fmt.Fprintf(t.out, "%10v |", b)
 	}
+	fmt.Fprintf(t.out, "%10v |", env.StateDB.GetRefund())
 	fmt.Fprintln(t.out, "")
 	if err != nil {
 		fmt.Fprintf(t.out, "Error: %v\n", err)
@@ -354,11 +359,7 @@ func (t *mdLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64
 }
 
 func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) error {
-	fmt.Fprintf(t.out, `
-Output: 0x%x
-Consumed gas: %d
-Error: %v
-`,
+	fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n",
 		output, gasUsed, err)
 	return nil
 }
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
new file mode 100644
index 0000000000000000000000000000000000000000..41b0549c51cd59dfa008b1daf0c7da57b92037f8
--- /dev/null
+++ b/core/vm/operations_acl.go
@@ -0,0 +1,222 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package vm
+
+import (
+	"errors"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/math"
+	"github.com/ethereum/go-ethereum/params"
+)
+
+const (
+	ColdAccountAccessCostEIP2929 = uint64(2600) // COLD_ACCOUNT_ACCESS_COST
+	ColdSloadCostEIP2929         = uint64(2100) // COLD_SLOAD_COST
+	WarmStorageReadCostEIP2929   = uint64(100)  // WARM_STORAGE_READ_COST
+)
+
+// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929"
+//
+// When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys.
+// If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys.
+// Additionally, modify the parameters defined in EIP 2200 as follows:
+//
+// Parameter 	Old value 	New value
+// SLOAD_GAS 	800 	= WARM_STORAGE_READ_COST
+// SSTORE_RESET_GAS 	5000 	5000 - COLD_SLOAD_COST
+//
+//The other parameters defined in EIP 2200 are unchanged.
+// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
+func gasSStoreEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+	// If we fail the minimum gas availability invariant, fail (0)
+	if contract.Gas <= params.SstoreSentryGasEIP2200 {
+		return 0, errors.New("not enough gas for reentrancy sentry")
+	}
+	// Gas sentry honoured, do the actual gas calculation based on the stored value
+	var (
+		y, x    = stack.Back(1), stack.peek()
+		slot    = common.Hash(x.Bytes32())
+		current = evm.StateDB.GetState(contract.Address(), slot)
+		cost    = uint64(0)
+	)
+	// Check slot presence in the access list
+	if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent {
+		cost = ColdSloadCostEIP2929
+		// If the caller cannot afford the cost, this change will be rolled back
+		evm.StateDB.AddSlotToAccessList(contract.Address(), slot)
+		if !addrPresent {
+			// Once we're done with YOLOv2 and schedule this for mainnet, might
+			// be good to remove this panic here, which is just really a
+			// canary to have during testing
+			panic("impossible case: address was not present in access list during sstore op")
+		}
+	}
+	value := common.Hash(y.Bytes32())
+
+	if current == value { // noop (1)
+		// EIP 2200 original clause:
+		//		return params.SloadGasEIP2200, nil
+		return cost + WarmStorageReadCostEIP2929, nil // SLOAD_GAS
+	}
+	original := evm.StateDB.GetCommittedState(contract.Address(), common.Hash(x.Bytes32()))
+	if original == current {
+		if original == (common.Hash{}) { // create slot (2.1.1)
+			return cost + params.SstoreSetGasEIP2200, nil
+		}
+		if value == (common.Hash{}) { // delete slot (2.1.2b)
+			evm.StateDB.AddRefund(params.SstoreClearsScheduleRefundEIP2200)
+		}
+		// EIP-2200 original clause:
+		//		return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2)
+		return cost + (params.SstoreResetGasEIP2200 - ColdSloadCostEIP2929), nil // write existing slot (2.1.2)
+	}
+	if original != (common.Hash{}) {
+		if current == (common.Hash{}) { // recreate slot (2.2.1.1)
+			evm.StateDB.SubRefund(params.SstoreClearsScheduleRefundEIP2200)
+		} else if value == (common.Hash{}) { // delete slot (2.2.1.2)
+			evm.StateDB.AddRefund(params.SstoreClearsScheduleRefundEIP2200)
+		}
+	}
+	if original == value {
+		if original == (common.Hash{}) { // reset to original inexistent slot (2.2.2.1)
+			// EIP 2200 Original clause:
+			//evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200)
+			evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - WarmStorageReadCostEIP2929)
+		} else { // reset to original existing slot (2.2.2.2)
+			// EIP 2200 Original clause:
+			//	evm.StateDB.AddRefund(params.SstoreResetGasEIP2200 - params.SloadGasEIP2200)
+			// - SSTORE_RESET_GAS redefined as (5000 - COLD_SLOAD_COST)
+			// - SLOAD_GAS redefined as WARM_STORAGE_READ_COST
+			// Final: (5000 - COLD_SLOAD_COST) - WARM_STORAGE_READ_COST
+			evm.StateDB.AddRefund((params.SstoreResetGasEIP2200 - ColdSloadCostEIP2929) - WarmStorageReadCostEIP2929)
+		}
+	}
+	// EIP-2200 original clause:
+	//return params.SloadGasEIP2200, nil // dirty update (2.2)
+	return cost + WarmStorageReadCostEIP2929, nil // dirty update (2.2)
+}
+
+// gasSLoadEIP2929 calculates dynamic gas for SLOAD according to EIP-2929
+// For SLOAD, if the (address, storage_key) pair (where address is the address of the contract
+// whose storage is being read) is not yet in accessed_storage_keys,
+// charge 2100 gas and add the pair to accessed_storage_keys.
+// If the pair is already in accessed_storage_keys, charge 100 gas.
+func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+	loc := stack.peek()
+	slot := common.Hash(loc.Bytes32())
+	// Check slot presence in the access list
+	if _, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent {
+		// If the caller cannot afford the cost, this change will be rolled back
+		// If he does afford it, we can skip checking the same thing later on, during execution
+		evm.StateDB.AddSlotToAccessList(contract.Address(), slot)
+		return ColdSloadCostEIP2929, nil
+	}
+	return WarmStorageReadCostEIP2929, nil
+}
+
+// gasExtCodeCopyEIP2929 implements extcodecopy according to EIP-2929
+// EIP spec:
+// > If the target is not in accessed_addresses,
+// > charge COLD_ACCOUNT_ACCESS_COST gas, and add the address to accessed_addresses.
+// > Otherwise, charge WARM_STORAGE_READ_COST gas.
+func gasExtCodeCopyEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+	// memory expansion first (dynamic part of pre-2929 implementation)
+	gas, err := gasExtCodeCopy(evm, contract, stack, mem, memorySize)
+	if err != nil {
+		return 0, err
+	}
+	addr := common.Address(stack.peek().Bytes20())
+	// Check slot presence in the access list
+	if !evm.StateDB.AddressInAccessList(addr) {
+		evm.StateDB.AddAddressToAccessList(addr)
+		var overflow bool
+		// We charge (cold-warm), since 'warm' is already charged as constantGas
+		if gas, overflow = math.SafeAdd(gas, ColdAccountAccessCostEIP2929-WarmStorageReadCostEIP2929); overflow {
+			return 0, ErrGasUintOverflow
+		}
+		return gas, nil
+	}
+	return gas, nil
+}
+
+// gasEip2929AccountCheck checks whether the first stack item (as address) is present in the access list.
+// If it is, this method returns '0', otherwise 'cold-warm' gas, presuming that the opcode using it
+// is also using 'warm' as constant factor.
+// This method is used by:
+// - extcodehash,
+// - extcodesize,
+// - (ext) balance
+func gasEip2929AccountCheck(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+	addr := common.Address(stack.peek().Bytes20())
+	// Check slot presence in the access list
+	if !evm.StateDB.AddressInAccessList(addr) {
+		// If the caller cannot afford the cost, this change will be rolled back
+		evm.StateDB.AddAddressToAccessList(addr)
+		// The warm storage read cost is already charged as constantGas
+		return ColdAccountAccessCostEIP2929 - WarmStorageReadCostEIP2929, nil
+	}
+	return 0, nil
+}
+
+func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {
+	return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+		addr := common.Address(stack.Back(1).Bytes20())
+		// Check slot presence in the access list
+		if !evm.StateDB.AddressInAccessList(addr) {
+			evm.StateDB.AddAddressToAccessList(addr)
+			// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost
+			if !contract.UseGas(ColdAccountAccessCostEIP2929 - WarmStorageReadCostEIP2929) {
+				return 0, ErrOutOfGas
+			}
+		}
+		// Now call the old calculator, which takes into account
+		// - create new account
+		// - transfer value
+		// - memory expansion
+		// - 63/64ths rule
+		return oldCalculator(evm, contract, stack, mem, memorySize)
+	}
+}
+
+var (
+	gasCallEIP2929         = makeCallVariantGasCallEIP2929(gasCall)
+	gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall)
+	gasStaticCallEIP2929   = makeCallVariantGasCallEIP2929(gasStaticCall)
+	gasCallCodeEIP2929     = makeCallVariantGasCallEIP2929(gasCallCode)
+)
+
+func gasSelfdestructEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+	var (
+		gas     uint64
+		address = common.Address(stack.peek().Bytes20())
+	)
+	if !evm.StateDB.AddressInAccessList(address) {
+		// If the caller cannot afford the cost, this change will be rolled back
+		evm.StateDB.AddAddressToAccessList(address)
+		gas = ColdAccountAccessCostEIP2929
+	}
+	// if empty and transfers value
+	if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
+		gas += params.CreateBySelfdestructGas
+	}
+	if !evm.StateDB.HasSuicided(contract.Address()) {
+		evm.StateDB.AddRefund(params.SelfdestructRefundGas)
+	}
+	return gas, nil
+
+}
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 7ebaa9a7e3865df62197ccc5561a23f720ba40ca..d99e8f3b2bc0a1d9426381ca5fc131a3a9f4eed1 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -65,7 +65,7 @@ func setDefaults(cfg *Config) {
 			PetersburgBlock:     new(big.Int),
 			IstanbulBlock:       new(big.Int),
 			MuirGlacierBlock:    new(big.Int),
-			YoloV1Block:         nil,
+			YoloV2Block:         nil,
 		}
 	}
 
@@ -113,6 +113,14 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
 		vmenv   = NewEnv(cfg)
 		sender  = vm.AccountRef(cfg.Origin)
 	)
+	if cfg.ChainConfig.IsYoloV2(vmenv.BlockNumber) {
+		cfg.State.AddAddressToAccessList(cfg.Origin)
+		cfg.State.AddAddressToAccessList(address)
+		for _, addr := range vmenv.ActivePrecompiles() {
+			cfg.State.AddAddressToAccessList(addr)
+			cfg.State.AddAddressToAccessList(addr)
+		}
+	}
 	cfg.State.CreateAccount(address)
 	// set the receiver's (the executing contract) code for execution.
 	cfg.State.SetCode(address, code)
@@ -142,6 +150,12 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
 		vmenv  = NewEnv(cfg)
 		sender = vm.AccountRef(cfg.Origin)
 	)
+	if cfg.ChainConfig.IsYoloV2(vmenv.BlockNumber) {
+		cfg.State.AddAddressToAccessList(cfg.Origin)
+		for _, addr := range vmenv.ActivePrecompiles() {
+			cfg.State.AddAddressToAccessList(addr)
+		}
+	}
 
 	// Call the code with the given configuration.
 	code, address, leftOverGas, err := vmenv.Create(
@@ -164,6 +178,14 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
 	vmenv := NewEnv(cfg)
 
 	sender := cfg.State.GetOrNewStateObject(cfg.Origin)
+	if cfg.ChainConfig.IsYoloV2(vmenv.BlockNumber) {
+		cfg.State.AddAddressToAccessList(cfg.Origin)
+		cfg.State.AddAddressToAccessList(address)
+		for _, addr := range vmenv.ActivePrecompiles() {
+			cfg.State.AddAddressToAccessList(addr)
+		}
+	}
+
 	// Call the code with the given configuration.
 	ret, leftOverGas, err := vmenv.Call(
 		sender,
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 108ee80e41e65a64b614b63a629527eba0064e4f..b185258dadb8ecf3ffa87e1d04972fb8e18d5e94 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -722,3 +722,115 @@ func BenchmarkSimpleLoop(b *testing.B) {
 	//benchmarkNonModifyingCode(10000000, staticCallIdentity, "staticcall-identity-10M", b)
 	//benchmarkNonModifyingCode(10000000, loopingCode, "loop-10M", b)
 }
+
+// TestEip2929Cases contains various testcases that are used for
+// EIP-2929 about gas repricings
+func TestEip2929Cases(t *testing.T) {
+
+	id := 1
+	prettyPrint := func(comment string, code []byte) {
+
+		instrs := make([]string, 0)
+		it := asm.NewInstructionIterator(code)
+		for it.Next() {
+			if it.Arg() != nil && 0 < len(it.Arg()) {
+				instrs = append(instrs, fmt.Sprintf("%v 0x%x", it.Op(), it.Arg()))
+			} else {
+				instrs = append(instrs, fmt.Sprintf("%v", it.Op()))
+			}
+		}
+		ops := strings.Join(instrs, ", ")
+		fmt.Printf("### Case %d\n\n", id)
+		id++
+		fmt.Printf("%v\n\nBytecode: \n```\n0x%x\n```\nOperations: \n```\n%v\n```\n\n",
+			comment,
+			code, ops)
+		Execute(code, nil, &Config{
+			EVMConfig: vm.Config{
+				Debug:     true,
+				Tracer:    vm.NewMarkdownLogger(nil, os.Stdout),
+				ExtraEips: []int{2929},
+			},
+		})
+	}
+
+	{ // First eip testcase
+		code := []byte{
+			// Three checks against a precompile
+			byte(vm.PUSH1), 1, byte(vm.EXTCODEHASH), byte(vm.POP),
+			byte(vm.PUSH1), 2, byte(vm.EXTCODESIZE), byte(vm.POP),
+			byte(vm.PUSH1), 3, byte(vm.BALANCE), byte(vm.POP),
+			// Three checks against a non-precompile
+			byte(vm.PUSH1), 0xf1, byte(vm.EXTCODEHASH), byte(vm.POP),
+			byte(vm.PUSH1), 0xf2, byte(vm.EXTCODESIZE), byte(vm.POP),
+			byte(vm.PUSH1), 0xf3, byte(vm.BALANCE), byte(vm.POP),
+			// Same three checks (should be cheaper)
+			byte(vm.PUSH1), 0xf2, byte(vm.EXTCODEHASH), byte(vm.POP),
+			byte(vm.PUSH1), 0xf3, byte(vm.EXTCODESIZE), byte(vm.POP),
+			byte(vm.PUSH1), 0xf1, byte(vm.BALANCE), byte(vm.POP),
+			// Check the origin, and the 'this'
+			byte(vm.ORIGIN), byte(vm.BALANCE), byte(vm.POP),
+			byte(vm.ADDRESS), byte(vm.BALANCE), byte(vm.POP),
+
+			byte(vm.STOP),
+		}
+		prettyPrint("This checks `EXT`(codehash,codesize,balance) of precompiles, which should be `100`, "+
+			"and later checks the same operations twice against some non-precompiles. "+
+			"Those are cheaper second time they are accessed. Lastly, it checks the `BALANCE` of `origin` and `this`.", code)
+	}
+
+	{ // EXTCODECOPY
+		code := []byte{
+			// extcodecopy( 0xff,0,0,0,0)
+			byte(vm.PUSH1), 0x00, byte(vm.PUSH1), 0x00, byte(vm.PUSH1), 0x00, //length, codeoffset, memoffset
+			byte(vm.PUSH1), 0xff, byte(vm.EXTCODECOPY),
+			// extcodecopy( 0xff,0,0,0,0)
+			byte(vm.PUSH1), 0x00, byte(vm.PUSH1), 0x00, byte(vm.PUSH1), 0x00, //length, codeoffset, memoffset
+			byte(vm.PUSH1), 0xff, byte(vm.EXTCODECOPY),
+			// extcodecopy( this,0,0,0,0)
+			byte(vm.PUSH1), 0x00, byte(vm.PUSH1), 0x00, byte(vm.PUSH1), 0x00, //length, codeoffset, memoffset
+			byte(vm.ADDRESS), byte(vm.EXTCODECOPY),
+
+			byte(vm.STOP),
+		}
+		prettyPrint("This checks `extcodecopy( 0xff,0,0,0,0)` twice, (should be expensive first time), "+
+			"and then does `extcodecopy( this,0,0,0,0)`.", code)
+	}
+
+	{ // SLOAD + SSTORE
+		code := []byte{
+
+			// Add slot `0x1` to access list
+			byte(vm.PUSH1), 0x01, byte(vm.SLOAD), byte(vm.POP), // SLOAD( 0x1) (add to access list)
+			// Write to `0x1` which is already in access list
+			byte(vm.PUSH1), 0x11, byte(vm.PUSH1), 0x01, byte(vm.SSTORE), // SSTORE( loc: 0x01, val: 0x11)
+			// Write to `0x2` which is not in access list
+			byte(vm.PUSH1), 0x11, byte(vm.PUSH1), 0x02, byte(vm.SSTORE), // SSTORE( loc: 0x02, val: 0x11)
+			// Write again to `0x2`
+			byte(vm.PUSH1), 0x11, byte(vm.PUSH1), 0x02, byte(vm.SSTORE), // SSTORE( loc: 0x02, val: 0x11)
+			// Read slot in access list (0x2)
+			byte(vm.PUSH1), 0x02, byte(vm.SLOAD), // SLOAD( 0x2)
+			// Read slot in access list (0x1)
+			byte(vm.PUSH1), 0x01, byte(vm.SLOAD), // SLOAD( 0x1)
+		}
+		prettyPrint("This checks `sload( 0x1)` followed by `sstore(loc: 0x01, val:0x11)`, then 'naked' sstore:"+
+			"`sstore(loc: 0x02, val:0x11)` twice, and `sload(0x2)`, `sload(0x1)`. ", code)
+	}
+	{ // Call variants
+		code := []byte{
+			// identity precompile
+			byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
+			byte(vm.PUSH1), 0x04, byte(vm.PUSH1), 0x0, byte(vm.CALL), byte(vm.POP),
+
+			// random account - call 1
+			byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
+			byte(vm.PUSH1), 0xff, byte(vm.PUSH1), 0x0, byte(vm.CALL), byte(vm.POP),
+
+			// random account - call 2
+			byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
+			byte(vm.PUSH1), 0xff, byte(vm.PUSH1), 0x0, byte(vm.STATICCALL), byte(vm.POP),
+		}
+		prettyPrint("This calls the `identity`-precompile (cheap), then calls an account (expensive) and `staticcall`s the same"+
+			"account (cheap)", code)
+	}
+}
diff --git a/eth/api.go b/eth/api.go
index 76118e2d7fc683555a995fad795d8dc3b5fde14d..fd3565647647b75da6dbc2575c803509129f7c1f 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -389,6 +389,8 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta
 		if err != nil {
 			return state.IteratorDump{}, err
 		}
+	} else {
+		return state.IteratorDump{}, errors.New("either block number or block hash must be specified")
 	}
 
 	if maxResults > AccountRangeMaxResults || maxResults <= 0 {
diff --git a/eth/api_tracer.go b/eth/api_tracer.go
index 748280951c4a5809b798ab5f44bc880ec610045b..90d4a95c147b8ea10589c99ba4ee6fdf21a502c3 100644
--- a/eth/api_tracer.go
+++ b/eth/api_tracer.go
@@ -38,6 +38,7 @@ import (
 	"github.com/ethereum/go-ethereum/eth/tracers"
 	"github.com/ethereum/go-ethereum/internal/ethapi"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/rpc"
 	"github.com/ethereum/go-ethereum/trie"
@@ -561,9 +562,28 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
 
 	// Execute transaction, either tracing all or just the requested one
 	var (
-		signer = types.MakeSigner(api.eth.blockchain.Config(), block.Number())
-		dumps  []string
+		signer      = types.MakeSigner(api.eth.blockchain.Config(), block.Number())
+		dumps       []string
+		chainConfig = api.eth.blockchain.Config()
+		canon       = true
 	)
+	// Check if there are any overrides: the caller may wish to enable a future
+	// fork when executing this block. Note, such overrides are only applicable to the
+	// actual specified block, not any preceding blocks that we have to go through
+	// in order to obtain the state.
+	// Therefore, it's perfectly valid to specify `"futureForkBlock": 0`, to enable `futureFork`
+
+	if config != nil && config.Overrides != nil {
+		// Copy the config, to not screw up the main config
+		// Note: the Clique-part is _not_ deep copied
+		chainConfigCopy := new(params.ChainConfig)
+		*chainConfigCopy = *chainConfig
+		chainConfig = chainConfigCopy
+		if yolov2 := config.Overrides.YoloV2Block; yolov2 != nil {
+			chainConfig.YoloV2Block = yolov2
+			canon = false
+		}
+	}
 	for i, tx := range block.Transactions() {
 		// Prepare the trasaction for un-traced execution
 		var (
@@ -579,7 +599,9 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
 		if tx.Hash() == txHash || txHash == (common.Hash{}) {
 			// Generate a unique temporary file to dump it into
 			prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4])
-
+			if !canon {
+				prefix = fmt.Sprintf("%valt-", prefix)
+			}
 			dump, err = ioutil.TempFile(os.TempDir(), prefix)
 			if err != nil {
 				return nil, err
@@ -595,7 +617,7 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
 			}
 		}
 		// Execute the transaction and flush any traces to disk
-		vmenv := vm.NewEVM(vmctx, statedb, api.eth.blockchain.Config(), vmConf)
+		vmenv := vm.NewEVM(vmctx, statedb, chainConfig, vmConf)
 		_, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
 		if writer != nil {
 			writer.Flush()
diff --git a/internal/build/archive.go b/internal/build/archive.go
index a00258d999032b2ac2f02c97b501d2d07a62ccee..8b3ac23d1d89999a5b6d90b0464f3ccde1e6b6af 100644
--- a/internal/build/archive.go
+++ b/internal/build/archive.go
@@ -184,24 +184,35 @@ func (a *TarballArchive) Close() error {
 	return a.file.Close()
 }
 
-func ExtractTarballArchive(archive string, dest string) error {
-	// We're only interested in gzipped archives, wrap the reader now
+// ExtractArchive unpacks a .zip or .tar.gz archive to the destination directory.
+func ExtractArchive(archive string, dest string) error {
 	ar, err := os.Open(archive)
 	if err != nil {
 		return err
 	}
 	defer ar.Close()
 
+	switch {
+	case strings.HasSuffix(archive, ".tar.gz"):
+		return extractTarball(ar, dest)
+	case strings.HasSuffix(archive, ".zip"):
+		return extractZip(ar, dest)
+	default:
+		return fmt.Errorf("unhandled archive type %s", archive)
+	}
+}
+
+// extractTarball unpacks a .tar.gz file.
+func extractTarball(ar io.Reader, dest string) error {
 	gzr, err := gzip.NewReader(ar)
 	if err != nil {
 		return err
 	}
 	defer gzr.Close()
 
-	// Iterate over all the files in the tarball
 	tr := tar.NewReader(gzr)
 	for {
-		// Fetch the next tarball header and abort if needed
+		// Move to the next file header.
 		header, err := tr.Next()
 		if err != nil {
 			if err == io.EOF {
@@ -209,22 +220,69 @@ func ExtractTarballArchive(archive string, dest string) error {
 			}
 			return err
 		}
-		// Figure out the target and create it
-		target := filepath.Join(dest, header.Name)
-
-		switch header.Typeflag {
-		case tar.TypeReg:
-			if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
-				return err
-			}
-			file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
+		// We only care about regular files, directory modes
+		// and special file types are not supported.
+		if header.Typeflag == tar.TypeReg {
+			armode := header.FileInfo().Mode()
+			err := extractFile(header.Name, armode, tr, dest)
 			if err != nil {
-				return err
-			}
-			if _, err := io.Copy(file, tr); err != nil {
-				return err
+				return fmt.Errorf("extract %s: %v", header.Name, err)
 			}
-			file.Close()
 		}
 	}
 }
+
+// extractZip unpacks the given .zip file.
+func extractZip(ar *os.File, dest string) error {
+	info, err := ar.Stat()
+	if err != nil {
+		return err
+	}
+	zr, err := zip.NewReader(ar, info.Size())
+	if err != nil {
+		return err
+	}
+
+	for _, zf := range zr.File {
+		if !zf.Mode().IsRegular() {
+			continue
+		}
+
+		data, err := zf.Open()
+		if err != nil {
+			return err
+		}
+		err = extractFile(zf.Name, zf.Mode(), data, dest)
+		data.Close()
+		if err != nil {
+			return fmt.Errorf("extract %s: %v", zf.Name, err)
+		}
+	}
+	return nil
+}
+
+// extractFile extracts a single file from an archive.
+func extractFile(arpath string, armode os.FileMode, data io.Reader, dest string) error {
+	// Check that path is inside destination directory.
+	target := filepath.Join(dest, filepath.FromSlash(arpath))
+	if !strings.HasPrefix(target, filepath.Clean(dest)+string(os.PathSeparator)) {
+		return fmt.Errorf("path %q escapes archive destination", target)
+	}
+
+	// Ensure the destination directory exists.
+	if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
+		return err
+	}
+
+	// Copy file data.
+	file, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, armode)
+	if err != nil {
+		return err
+	}
+	if _, err := io.Copy(file, data); err != nil {
+		file.Close()
+		os.Remove(target)
+		return err
+	}
+	return file.Close()
+}
diff --git a/internal/build/util.go b/internal/build/util.go
index fc559760b26cc66156ef7cd0eeefbfa03f34d3d7..91149926f790584b242aea71536ae1ef312706e0 100644
--- a/internal/build/util.go
+++ b/internal/build/util.go
@@ -20,6 +20,8 @@ import (
 	"bytes"
 	"flag"
 	"fmt"
+	"go/parser"
+	"go/token"
 	"io"
 	"io/ioutil"
 	"log"
@@ -152,3 +154,28 @@ func UploadSFTP(identityFile, host, dir string, files []string) error {
 	stdin.Close()
 	return sftp.Wait()
 }
+
+// FindMainPackages finds all 'main' packages in the given directory and returns their
+// package paths.
+func FindMainPackages(dir string) []string {
+	var commands []string
+	cmds, err := ioutil.ReadDir(dir)
+	if err != nil {
+		log.Fatal(err)
+	}
+	for _, cmd := range cmds {
+		pkgdir := filepath.Join(dir, cmd.Name())
+		pkgs, err := parser.ParseDir(token.NewFileSet(), pkgdir, nil, parser.PackageClauseOnly)
+		if err != nil {
+			log.Fatal(err)
+		}
+		for name := range pkgs {
+			if name == "main" {
+				path := "./" + filepath.ToSlash(pkgdir)
+				commands = append(commands, path)
+				break
+			}
+		}
+	}
+	return commands
+}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index c7d1e0020f77f85485744deec7282474c7641220..0d6ace9b5be603eb54e7b5bd73411687b185fab9 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -599,7 +599,7 @@ func (s *PublicBlockChainAPI) GetProof(ctx context.Context, address common.Addre
 			if storageError != nil {
 				return nil, storageError
 			}
-			storageProof[i] = StorageResult{key, (*hexutil.Big)(state.GetState(address, common.HexToHash(key)).Big()), common.ToHexArray(proof)}
+			storageProof[i] = StorageResult{key, (*hexutil.Big)(state.GetState(address, common.HexToHash(key)).Big()), toHexSlice(proof)}
 		} else {
 			storageProof[i] = StorageResult{key, &hexutil.Big{}, []string{}}
 		}
@@ -613,7 +613,7 @@ func (s *PublicBlockChainAPI) GetProof(ctx context.Context, address common.Addre
 
 	return &AccountResult{
 		Address:      address,
-		AccountProof: common.ToHexArray(accountProof),
+		AccountProof: toHexSlice(accountProof),
 		Balance:      (*hexutil.Big)(state.GetBalance(address)),
 		CodeHash:     codeHash,
 		Nonce:        hexutil.Uint64(state.GetNonce(address)),
@@ -1943,3 +1943,12 @@ func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error {
 	}
 	return nil
 }
+
+// toHexSlice creates a slice of hex-strings based on []byte.
+func toHexSlice(b [][]byte) []string {
+	r := make([]string, len(b))
+	for i := range b {
+		r[i] = hexutil.Encode(b[i])
+	}
+	return r
+}
diff --git a/internal/utesting/utesting.go b/internal/utesting/utesting.go
index 4de0ecf99aac14e3c9dd4f127d2ea6f872959fdf..ef05a90e4c57b03ff46393c50206e730a16e2b3e 100644
--- a/internal/utesting/utesting.go
+++ b/internal/utesting/utesting.go
@@ -25,6 +25,7 @@ import (
 	"bytes"
 	"fmt"
 	"io"
+	"io/ioutil"
 	"regexp"
 	"runtime"
 	"sync"
@@ -63,32 +64,165 @@ func MatchTests(tests []Test, expr string) []Test {
 // RunTests executes all given tests in order and returns their results.
 // If the report writer is non-nil, a test report is written to it in real time.
 func RunTests(tests []Test, report io.Writer) []Result {
-	results := make([]Result, len(tests))
+	if report == nil {
+		report = ioutil.Discard
+	}
+	results := run(tests, newConsoleOutput(report))
+	fails := CountFailures(results)
+	fmt.Fprintf(report, "%v/%v tests passed.\n", len(tests)-fails, len(tests))
+	return results
+}
+
+// RunTAP runs the given tests and writes Test Anything Protocol output
+// to the report writer.
+func RunTAP(tests []Test, report io.Writer) []Result {
+	return run(tests, newTAP(report, len(tests)))
+}
+
+func run(tests []Test, output testOutput) []Result {
+	var results = make([]Result, len(tests))
 	for i, test := range tests {
-		var output io.Writer
 		buffer := new(bytes.Buffer)
-		output = buffer
-		if report != nil {
-			output = io.MultiWriter(buffer, report)
-		}
+		logOutput := io.MultiWriter(buffer, output)
+
+		output.testStart(test.Name)
 		start := time.Now()
 		results[i].Name = test.Name
-		results[i].Failed = run(test, output)
+		results[i].Failed = runTest(test, logOutput)
 		results[i].Duration = time.Since(start)
 		results[i].Output = buffer.String()
-		if report != nil {
-			printResult(results[i], report)
-		}
+		output.testResult(results[i])
 	}
 	return results
 }
 
-func printResult(r Result, w io.Writer) {
+// testOutput is implemented by output formats.
+type testOutput interface {
+	testStart(name string)
+	Write([]byte) (int, error)
+	testResult(Result)
+}
+
+// consoleOutput prints test results similarly to go test.
+type consoleOutput struct {
+	out         io.Writer
+	indented    *indentWriter
+	curTest     string
+	wroteHeader bool
+}
+
+func newConsoleOutput(w io.Writer) *consoleOutput {
+	return &consoleOutput{
+		out:      w,
+		indented: newIndentWriter(" ", w),
+	}
+}
+
+// testStart signals the start of a new test.
+func (c *consoleOutput) testStart(name string) {
+	c.curTest = name
+	c.wroteHeader = false
+}
+
+// Write handles test log output.
+func (c *consoleOutput) Write(b []byte) (int, error) {
+	if !c.wroteHeader {
+		// This is the first output line from the test. Print a "-- RUN" header.
+		fmt.Fprintln(c.out, "-- RUN", c.curTest)
+		c.wroteHeader = true
+	}
+	return c.indented.Write(b)
+}
+
+// testResult prints the final test result line.
+func (c *consoleOutput) testResult(r Result) {
+	c.indented.flush()
 	pd := r.Duration.Truncate(100 * time.Microsecond)
 	if r.Failed {
-		fmt.Fprintf(w, "-- FAIL %s (%v)\n", r.Name, pd)
+		fmt.Fprintf(c.out, "-- FAIL %s (%v)\n", r.Name, pd)
 	} else {
-		fmt.Fprintf(w, "-- OK %s (%v)\n", r.Name, pd)
+		fmt.Fprintf(c.out, "-- OK %s (%v)\n", r.Name, pd)
+	}
+}
+
+// tapOutput produces Test Anything Protocol v13 output.
+type tapOutput struct {
+	out      io.Writer
+	indented *indentWriter
+	counter  int
+}
+
+func newTAP(out io.Writer, numTests int) *tapOutput {
+	fmt.Fprintf(out, "1..%d\n", numTests)
+	return &tapOutput{
+		out:      out,
+		indented: newIndentWriter("# ", out),
+	}
+}
+
+func (t *tapOutput) testStart(name string) {
+	t.counter++
+}
+
+// Write does nothing for TAP because there is no real-time output of test logs.
+func (t *tapOutput) Write(b []byte) (int, error) {
+	return len(b), nil
+}
+
+func (t *tapOutput) testResult(r Result) {
+	status := "ok"
+	if r.Failed {
+		status = "not ok"
+	}
+	fmt.Fprintln(t.out, status, t.counter, r.Name)
+	t.indented.Write([]byte(r.Output))
+	t.indented.flush()
+}
+
+// indentWriter indents all written text.
+type indentWriter struct {
+	out    io.Writer
+	indent string
+	inLine bool
+}
+
+func newIndentWriter(indent string, out io.Writer) *indentWriter {
+	return &indentWriter{out: out, indent: indent}
+}
+
+func (w *indentWriter) Write(b []byte) (n int, err error) {
+	for len(b) > 0 {
+		if !w.inLine {
+			if _, err = io.WriteString(w.out, w.indent); err != nil {
+				return n, err
+			}
+			w.inLine = true
+		}
+
+		end := bytes.IndexByte(b, '\n')
+		if end == -1 {
+			nn, err := w.out.Write(b)
+			n += nn
+			return n, err
+		}
+
+		line := b[:end+1]
+		nn, err := w.out.Write(line)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		b = b[end+1:]
+		w.inLine = false
+	}
+	return n, err
+}
+
+// flush ensures the current line is terminated.
+func (w *indentWriter) flush() {
+	if w.inLine {
+		fmt.Println(w.out)
+		w.inLine = false
 	}
 }
 
@@ -106,11 +240,11 @@ func CountFailures(rr []Result) int {
 // Run executes a single test.
 func Run(test Test) (bool, string) {
 	output := new(bytes.Buffer)
-	failed := run(test, output)
+	failed := runTest(test, output)
 	return failed, output.String()
 }
 
-func run(test Test, output io.Writer) bool {
+func runTest(test Test, output io.Writer) bool {
 	t := &T{output: output}
 	done := make(chan struct{})
 	go func() {
@@ -137,6 +271,9 @@ type T struct {
 	output io.Writer
 }
 
+// Helper exists for compatibility with testing.T.
+func (t *T) Helper() {}
+
 // FailNow marks the test as having failed and stops its execution by calling
 // runtime.Goexit (which then runs all deferred calls in the current goroutine).
 func (t *T) FailNow() {
diff --git a/internal/utesting/utesting_test.go b/internal/utesting/utesting_test.go
index 1403a5c8f735825cbb931dcbd90d60218258aeb4..31c7911c52f0f73ad1577ad3919ae562bd7b193f 100644
--- a/internal/utesting/utesting_test.go
+++ b/internal/utesting/utesting_test.go
@@ -17,6 +17,8 @@
 package utesting
 
 import (
+	"bytes"
+	"regexp"
 	"strings"
 	"testing"
 )
@@ -53,3 +55,85 @@ func TestTest(t *testing.T) {
 		t.Fatalf("wrong result for panicking test: %#v", results[2])
 	}
 }
+
+var outputTests = []Test{
+	{
+		Name: "TestWithLogs",
+		Fn: func(t *T) {
+			t.Log("output line 1")
+			t.Log("output line 2\noutput line 3")
+		},
+	},
+	{
+		Name: "TestNoLogs",
+		Fn:   func(t *T) {},
+	},
+	{
+		Name: "FailWithLogs",
+		Fn: func(t *T) {
+			t.Log("output line 1")
+			t.Error("failed 1")
+		},
+	},
+	{
+		Name: "FailMessage",
+		Fn: func(t *T) {
+			t.Error("failed 2")
+		},
+	},
+	{
+		Name: "FailNoOutput",
+		Fn: func(t *T) {
+			t.Fail()
+		},
+	},
+}
+
+func TestOutput(t *testing.T) {
+	var buf bytes.Buffer
+	RunTests(outputTests, &buf)
+
+	want := regexp.MustCompile(`
+^-- RUN TestWithLogs
+ output line 1
+ output line 2
+ output line 3
+-- OK TestWithLogs \([^)]+\)
+-- OK TestNoLogs \([^)]+\)
+-- RUN FailWithLogs
+ output line 1
+ failed 1
+-- FAIL FailWithLogs \([^)]+\)
+-- RUN FailMessage
+ failed 2
+-- FAIL FailMessage \([^)]+\)
+-- FAIL FailNoOutput \([^)]+\)
+2/5 tests passed.
+$`[1:])
+	if !want.MatchString(buf.String()) {
+		t.Fatalf("output does not match: %q", buf.String())
+	}
+}
+
+func TestOutputTAP(t *testing.T) {
+	var buf bytes.Buffer
+	RunTAP(outputTests, &buf)
+
+	want := `
+1..5
+ok 1 TestWithLogs
+# output line 1
+# output line 2
+# output line 3
+ok 2 TestNoLogs
+not ok 3 FailWithLogs
+# output line 1
+# failed 1
+not ok 4 FailMessage
+# failed 2
+not ok 5 FailNoOutput
+`
+	if buf.String() != want[1:] {
+		t.Fatalf("output does not match: %q", buf.String())
+	}
+}
diff --git a/les/client_handler.go b/les/client_handler.go
index cfeec7a03cdb5cce4e71f9253fde2689ead59481..77a0ea5c6f87c72bfdd76f6e65dd7ae5c9214c7d 100644
--- a/les/client_handler.go
+++ b/les/client_handler.go
@@ -102,13 +102,7 @@ func (h *clientHandler) handle(p *serverPeer) error {
 	p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
 
 	// Execute the LES handshake
-	var (
-		head   = h.backend.blockchain.CurrentHeader()
-		hash   = head.Hash()
-		number = head.Number.Uint64()
-		td     = h.backend.blockchain.GetTd(hash, number)
-	)
-	if err := p.Handshake(td, hash, number, h.backend.blockchain.Genesis().Hash(), nil); err != nil {
+	if err := p.Handshake(h.backend.blockchain.Genesis().Hash()); err != nil {
 		p.Log().Debug("Light Ethereum handshake failed", "err", err)
 		return err
 	}
diff --git a/les/clientpool.go b/les/clientpool.go
index 4f6e3fafe01d2cf0f1174b7f2b52c8ebf712cb11..da0db6e622957c2daee4da03ec16b232a070a516 100644
--- a/les/clientpool.go
+++ b/les/clientpool.go
@@ -18,7 +18,6 @@ package les
 
 import (
 	"fmt"
-	"reflect"
 	"sync"
 	"time"
 
@@ -46,19 +45,6 @@ const (
 	inactiveTimeout      = time.Second * 10
 )
 
-var (
-	clientPoolSetup     = &nodestate.Setup{}
-	clientField         = clientPoolSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{}))
-	connAddressField    = clientPoolSetup.NewField("connAddr", reflect.TypeOf(""))
-	balanceTrackerSetup = lps.NewBalanceTrackerSetup(clientPoolSetup)
-	priorityPoolSetup   = lps.NewPriorityPoolSetup(clientPoolSetup)
-)
-
-func init() {
-	balanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField)
-	priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority
-}
-
 // clientPool implements a client database that assigns a priority to each client
 // based on a positive and negative balance. Positive balance is externally assigned
 // to prioritized clients and is decreased with connection time and processed
@@ -119,8 +105,7 @@ type clientInfo struct {
 }
 
 // newClientPool creates a new client pool
-func newClientPool(lespayDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
-	ns := nodestate.NewNodeStateMachine(nil, nil, clock, clientPoolSetup)
+func newClientPool(ns *nodestate.NodeStateMachine, lespayDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
 	pool := &clientPool{
 		ns:                  ns,
 		BalanceTrackerSetup: balanceTrackerSetup,
@@ -147,7 +132,7 @@ func newClientPool(lespayDb ethdb.Database, minCap uint64, connectedBias time.Du
 	})
 
 	ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
-		c, _ := ns.GetField(node, clientField).(*clientInfo)
+		c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
 		if c == nil {
 			return
 		}
@@ -172,7 +157,7 @@ func newClientPool(lespayDb ethdb.Database, minCap uint64, connectedBias time.Du
 		if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) {
 			clientDeactivatedMeter.Mark(1)
 			log.Debug("Client deactivated", "id", node.ID())
-			c, _ := ns.GetField(node, clientField).(*clientInfo)
+			c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
 			if c == nil || !c.peer.allowInactive() {
 				pool.removePeer(node.ID())
 			}
@@ -190,13 +175,11 @@ func newClientPool(lespayDb ethdb.Database, minCap uint64, connectedBias time.Du
 		newCap, _ := newValue.(uint64)
 		totalConnected += newCap - oldCap
 		totalConnectedGauge.Update(int64(totalConnected))
-		c, _ := ns.GetField(node, clientField).(*clientInfo)
+		c, _ := ns.GetField(node, clientInfoField).(*clientInfo)
 		if c != nil {
 			c.peer.updateCapacity(newCap)
 		}
 	})
-
-	ns.Start()
 	return pool
 }
 
@@ -210,7 +193,6 @@ func (f *clientPool) stop() {
 		f.disconnectNode(node)
 	})
 	f.bt.Stop()
-	f.ns.Stop()
 }
 
 // connect should be called after a successful handshake. If the connection was
@@ -225,7 +207,7 @@ func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {
 	}
 	// Dedup connected peers.
 	node, freeID := peer.Node(), peer.freeClientId()
-	if f.ns.GetField(node, clientField) != nil {
+	if f.ns.GetField(node, clientInfoField) != nil {
 		log.Debug("Client already connected", "address", freeID, "id", node.ID().String())
 		return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String())
 	}
@@ -237,7 +219,7 @@ func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {
 		connected:   true,
 		connectedAt: now,
 	}
-	f.ns.SetField(node, clientField, c)
+	f.ns.SetField(node, clientInfoField, c)
 	f.ns.SetField(node, connAddressField, freeID)
 	if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
 		f.disconnect(peer)
@@ -280,7 +262,7 @@ func (f *clientPool) disconnect(p clientPoolPeer) {
 // disconnectNode removes node fields and flags related to connected status
 func (f *clientPool) disconnectNode(node *enode.Node) {
 	f.ns.SetField(node, connAddressField, nil)
-	f.ns.SetField(node, clientField, nil)
+	f.ns.SetField(node, clientInfoField, nil)
 }
 
 // setDefaultFactors sets the default price factors applied to subsequently connected clients
@@ -299,7 +281,8 @@ func (f *clientPool) capacityInfo() (uint64, uint64, uint64) {
 	defer f.lock.Unlock()
 
 	// total priority active cap will be supported when the token issuer module is added
-	return f.capLimit, f.pp.ActiveCapacity(), 0
+	_, activeCap := f.pp.Active()
+	return f.capLimit, activeCap, 0
 }
 
 // setLimits sets the maximum number and total capacity of connected clients,
@@ -314,13 +297,13 @@ func (f *clientPool) setLimits(totalConn int, totalCap uint64) {
 
 // setCapacity sets the assigned capacity of a connected client
 func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) {
-	c, _ := f.ns.GetField(node, clientField).(*clientInfo)
+	c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
 	if c == nil {
 		if setCap {
 			return 0, fmt.Errorf("client %064x is not connected", node.ID())
 		}
 		c = &clientInfo{node: node}
-		f.ns.SetField(node, clientField, c)
+		f.ns.SetField(node, clientInfoField, c)
 		f.ns.SetField(node, connAddressField, freeID)
 		if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
 			log.Error("BalanceField is missing", "node", node.ID())
@@ -328,7 +311,7 @@ func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint6
 		}
 		defer func() {
 			f.ns.SetField(node, connAddressField, nil)
-			f.ns.SetField(node, clientField, nil)
+			f.ns.SetField(node, clientInfoField, nil)
 		}()
 	}
 	var (
@@ -370,7 +353,7 @@ func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
 
 	if len(ids) == 0 {
 		f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
-			c, _ := f.ns.GetField(node, clientField).(*clientInfo)
+			c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
 			if c != nil {
 				cb(c)
 			}
@@ -381,12 +364,12 @@ func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
 			if node == nil {
 				node = enode.SignNull(&enr.Record{}, id)
 			}
-			c, _ := f.ns.GetField(node, clientField).(*clientInfo)
+			c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)
 			if c != nil {
 				cb(c)
 			} else {
 				c = &clientInfo{node: node}
-				f.ns.SetField(node, clientField, c)
+				f.ns.SetField(node, clientInfoField, c)
 				f.ns.SetField(node, connAddressField, "")
 				if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance != nil {
 					cb(c)
@@ -394,7 +377,7 @@ func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
 					log.Error("BalanceField is missing")
 				}
 				f.ns.SetField(node, connAddressField, nil)
-				f.ns.SetField(node, clientField, nil)
+				f.ns.SetField(node, clientInfoField, nil)
 			}
 		}
 	}
diff --git a/les/clientpool_test.go b/les/clientpool_test.go
index cfd1486b437e281e35aafaab916cee0c8e5f07c1..b1c38d374c8768caa5d785427bce770644c471a5 100644
--- a/les/clientpool_test.go
+++ b/les/clientpool_test.go
@@ -64,6 +64,11 @@ type poolTestPeer struct {
 	inactiveAllowed bool
 }
 
+func testStateMachine() *nodestate.NodeStateMachine {
+	return nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup)
+
+}
+
 func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
 	return &poolTestPeer{
 		index:     i,
@@ -91,7 +96,7 @@ func (i *poolTestPeer) allowInactive() bool {
 }
 
 func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {
-	temp := pool.ns.GetField(p.node, clientField) == nil
+	temp := pool.ns.GetField(p.node, clientInfoField) == nil
 	if temp {
 		pool.ns.SetField(p.node, connAddressField, p.freeClientId())
 	}
@@ -128,8 +133,9 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando
 		disconnFn = func(id enode.ID) {
 			disconnCh <- int(id[0]) + int(id[1])<<8
 		}
-		pool = newClientPool(db, 1, 0, &clock, disconnFn)
+		pool = newClientPool(testStateMachine(), db, 1, 0, &clock, disconnFn)
 	)
+	pool.ns.Start()
 
 	pool.setLimits(activeLimit, uint64(activeLimit))
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -233,7 +239,8 @@ func TestConnectPaidClient(t *testing.T) {
 		clock mclock.Simulated
 		db    = rawdb.NewMemoryDatabase()
 	)
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10))
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -248,7 +255,8 @@ func TestConnectPaidClientToSmallPool(t *testing.T) {
 		clock mclock.Simulated
 		db    = rawdb.NewMemoryDatabase()
 	)
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -266,7 +274,8 @@ func TestConnectPaidClientToFullPool(t *testing.T) {
 		db    = rawdb.NewMemoryDatabase()
 	)
 	removeFn := func(enode.ID) {} // Noop
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -295,7 +304,8 @@ func TestPaidClientKickedOut(t *testing.T) {
 	removeFn := func(id enode.ID) {
 		kickedCh <- int(id[0])
 	}
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
+	pool.ns.Start()
 	pool.bt.SetExpirationTCs(0, 0)
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
@@ -325,7 +335,8 @@ func TestConnectFreeClient(t *testing.T) {
 		clock mclock.Simulated
 		db    = rawdb.NewMemoryDatabase()
 	)
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10))
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -341,7 +352,8 @@ func TestConnectFreeClientToFullPool(t *testing.T) {
 		db    = rawdb.NewMemoryDatabase()
 	)
 	removeFn := func(enode.ID) {} // Noop
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -370,7 +382,8 @@ func TestFreeClientKickedOut(t *testing.T) {
 		kicked = make(chan int, 100)
 	)
 	removeFn := func(id enode.ID) { kicked <- int(id[0]) }
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -411,7 +424,8 @@ func TestPositiveBalanceCalculation(t *testing.T) {
 		kicked = make(chan int, 10)
 	)
 	removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -434,7 +448,8 @@ func TestDowngradePriorityClient(t *testing.T) {
 		kicked = make(chan int, 10)
 	)
 	removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
@@ -468,7 +483,8 @@ func TestNegativeBalanceCalculation(t *testing.T) {
 		clock mclock.Simulated
 		db    = rawdb.NewMemoryDatabase()
 	)
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
@@ -503,7 +519,8 @@ func TestInactiveClient(t *testing.T) {
 		clock mclock.Simulated
 		db    = rawdb.NewMemoryDatabase()
 	)
-	pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+	pool.ns.Start()
 	defer pool.stop()
 	pool.setLimits(2, uint64(2))
 
diff --git a/les/enr_entry.go b/les/enr_entry.go
index 65d0d1fdb412dd6985af2295c58980e580c5c2a3..11e6273be5326ad9bf761d44d7519aa47c2cb3d5 100644
--- a/les/enr_entry.go
+++ b/les/enr_entry.go
@@ -36,7 +36,7 @@ func (e lesEntry) ENRKey() string {
 
 // setupDiscovery creates the node discovery source for the eth protocol.
 func (eth *LightEthereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) {
-	if /*cfg.NoDiscovery || */ len(eth.config.DiscoveryURLs) == 0 {
+	if cfg.NoDiscovery || len(eth.config.DiscoveryURLs) == 0 {
 		return nil, nil
 	}
 	client := dnsdisc.NewClient(dnsdisc.Config{})
diff --git a/les/lespay/server/prioritypool.go b/les/lespay/server/prioritypool.go
index 52224e093e931c13e3ef69211d26794473c1874c..c0c33840ca8d57abe539f7f8a9ffffc52eb72d0d 100644
--- a/les/lespay/server/prioritypool.go
+++ b/les/lespay/server/prioritypool.go
@@ -253,12 +253,12 @@ func (pp *PriorityPool) SetActiveBias(bias time.Duration) {
 	pp.tryActivate()
 }
 
-// ActiveCapacity returns the total capacity of currently active nodes
-func (pp *PriorityPool) ActiveCapacity() uint64 {
+// Active returns the number and total capacity of currently active nodes
+func (pp *PriorityPool) Active() (uint64, uint64) {
 	pp.lock.Lock()
 	defer pp.lock.Unlock()
 
-	return pp.activeCap
+	return pp.activeCount, pp.activeCap
 }
 
 // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
diff --git a/les/peer.go b/les/peer.go
index 0549daf9a6485355460678f7fc0448046bdcb7e1..25bf446466806eaeed78101058fcd2ec73e83584 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -126,7 +126,7 @@ type peerCommons struct {
 	frozen       uint32    // Flag whether the peer is frozen.
 	announceType uint64    // New block announcement type.
 	serving      uint32    // The status indicates the peer is served.
-	headInfo     blockInfo // Latest block information.
+	headInfo     blockInfo // Last announced block information.
 
 	// Background task queue for caching peer tasks and executing in order.
 	sendQueue *utils.ExecQueue
@@ -255,6 +255,8 @@ func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, g
 	// Add some basic handshake fields
 	send = send.add("protocolVersion", uint64(p.version))
 	send = send.add("networkId", p.network)
+	// Note: the head info announced at handshake is only used in case of server peers
+	// but dummy values are still announced by clients for compatibility with older servers
 	send = send.add("headTd", td)
 	send = send.add("headHash", head)
 	send = send.add("headNum", headNum)
@@ -273,24 +275,14 @@ func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, g
 	if size > allowedUpdateBytes {
 		return errResp(ErrRequestRejected, "")
 	}
-	var rGenesis, rHash common.Hash
-	var rVersion, rNetwork, rNum uint64
-	var rTd *big.Int
+	var rGenesis common.Hash
+	var rVersion, rNetwork uint64
 	if err := recv.get("protocolVersion", &rVersion); err != nil {
 		return err
 	}
 	if err := recv.get("networkId", &rNetwork); err != nil {
 		return err
 	}
-	if err := recv.get("headTd", &rTd); err != nil {
-		return err
-	}
-	if err := recv.get("headHash", &rHash); err != nil {
-		return err
-	}
-	if err := recv.get("headNum", &rNum); err != nil {
-		return err
-	}
 	if err := recv.get("genesisHash", &rGenesis); err != nil {
 		return err
 	}
@@ -303,7 +295,6 @@ func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, g
 	if int(rVersion) != p.version {
 		return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
 	}
-	p.headInfo = blockInfo{Hash: rHash, Number: rNum, Td: rTd}
 	if recvCallback != nil {
 		return recvCallback(recv)
 	}
@@ -569,9 +560,11 @@ func (p *serverPeer) updateHead(hash common.Hash, number uint64, td *big.Int) {
 }
 
 // Handshake executes the les protocol handshake, negotiating version number,
-// network IDs, difficulties, head and genesis blocks.
-func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
-	return p.handshake(td, head, headNum, genesis, func(lists *keyValueList) {
+// network IDs and genesis blocks.
+func (p *serverPeer) Handshake(genesis common.Hash) error {
+	// Note: there is no need to share local head with a server but older servers still
+	// require these fields so we announce zero values.
+	return p.handshake(common.Big0, common.Hash{}, 0, genesis, func(lists *keyValueList) {
 		// Add some client-specific handshake fields
 		//
 		// Enable signed announcement randomly even the server is not trusted.
@@ -581,6 +574,21 @@ func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge
 		}
 		*lists = (*lists).add("announceType", p.announceType)
 	}, func(recv keyValueMap) error {
+		var (
+			rHash common.Hash
+			rNum  uint64
+			rTd   *big.Int
+		)
+		if err := recv.get("headTd", &rTd); err != nil {
+			return err
+		}
+		if err := recv.get("headHash", &rHash); err != nil {
+			return err
+		}
+		if err := recv.get("headNum", &rNum); err != nil {
+			return err
+		}
+		p.headInfo = blockInfo{Hash: rHash, Number: rNum, Td: rTd}
 		if recv.get("serveChainSince", &p.chainSince) != nil {
 			p.onlyAnnounce = true
 		}
@@ -937,6 +945,9 @@ func (p *clientPeer) freezeClient() {
 // Handshake executes the les protocol handshake, negotiating version number,
 // network IDs, difficulties, head and genesis blocks.
 func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
+	// Note: clientPeer.headInfo should contain the last head announced to the client by us.
+	// The values announced in the handshake are dummy values for compatibility reasons and should be ignored.
+	p.headInfo = blockInfo{Hash: head, Number: headNum, Td: td}
 	return p.handshake(td, head, headNum, genesis, func(lists *keyValueList) {
 		// Add some information which services server can offer.
 		if !server.config.UltraLightOnlyAnnounce {
@@ -1009,145 +1020,6 @@ type serverPeerSubscriber interface {
 	unregisterPeer(*serverPeer)
 }
 
-// clientPeerSubscriber is an interface to notify services about added or
-// removed client peers
-type clientPeerSubscriber interface {
-	registerPeer(*clientPeer)
-	unregisterPeer(*clientPeer)
-}
-
-// clientPeerSet represents the set of active client peers currently
-// participating in the Light Ethereum sub-protocol.
-type clientPeerSet struct {
-	peers map[string]*clientPeer
-	// subscribers is a batch of subscribers and peerset will notify
-	// these subscribers when the peerset changes(new client peer is
-	// added or removed)
-	subscribers []clientPeerSubscriber
-	closed      bool
-	lock        sync.RWMutex
-}
-
-// newClientPeerSet creates a new peer set to track the client peers.
-func newClientPeerSet() *clientPeerSet {
-	return &clientPeerSet{peers: make(map[string]*clientPeer)}
-}
-
-// subscribe adds a service to be notified about added or removed
-// peers and also register all active peers into the given service.
-func (ps *clientPeerSet) subscribe(sub clientPeerSubscriber) {
-	ps.lock.Lock()
-	defer ps.lock.Unlock()
-
-	ps.subscribers = append(ps.subscribers, sub)
-	for _, p := range ps.peers {
-		sub.registerPeer(p)
-	}
-}
-
-// unSubscribe removes the specified service from the subscriber pool.
-func (ps *clientPeerSet) unSubscribe(sub clientPeerSubscriber) {
-	ps.lock.Lock()
-	defer ps.lock.Unlock()
-
-	for i, s := range ps.subscribers {
-		if s == sub {
-			ps.subscribers = append(ps.subscribers[:i], ps.subscribers[i+1:]...)
-			return
-		}
-	}
-}
-
-// register adds a new peer into the peer set, or returns an error if the
-// peer is already known.
-func (ps *clientPeerSet) register(peer *clientPeer) error {
-	ps.lock.Lock()
-	defer ps.lock.Unlock()
-
-	if ps.closed {
-		return errClosed
-	}
-	if _, exist := ps.peers[peer.id]; exist {
-		return errAlreadyRegistered
-	}
-	ps.peers[peer.id] = peer
-	for _, sub := range ps.subscribers {
-		sub.registerPeer(peer)
-	}
-	return nil
-}
-
-// unregister removes a remote peer from the peer set, disabling any further
-// actions to/from that particular entity. It also initiates disconnection
-// at the networking layer.
-func (ps *clientPeerSet) unregister(id string) error {
-	ps.lock.Lock()
-	defer ps.lock.Unlock()
-
-	p, ok := ps.peers[id]
-	if !ok {
-		return errNotRegistered
-	}
-	delete(ps.peers, id)
-	for _, sub := range ps.subscribers {
-		sub.unregisterPeer(p)
-	}
-	p.Peer.Disconnect(p2p.DiscRequested)
-	return nil
-}
-
-// ids returns a list of all registered peer IDs
-func (ps *clientPeerSet) ids() []string {
-	ps.lock.RLock()
-	defer ps.lock.RUnlock()
-
-	var ids []string
-	for id := range ps.peers {
-		ids = append(ids, id)
-	}
-	return ids
-}
-
-// peer retrieves the registered peer with the given id.
-func (ps *clientPeerSet) peer(id string) *clientPeer {
-	ps.lock.RLock()
-	defer ps.lock.RUnlock()
-
-	return ps.peers[id]
-}
-
-// len returns if the current number of peers in the set.
-func (ps *clientPeerSet) len() int {
-	ps.lock.RLock()
-	defer ps.lock.RUnlock()
-
-	return len(ps.peers)
-}
-
-// allClientPeers returns all client peers in a list.
-func (ps *clientPeerSet) allPeers() []*clientPeer {
-	ps.lock.RLock()
-	defer ps.lock.RUnlock()
-
-	list := make([]*clientPeer, 0, len(ps.peers))
-	for _, p := range ps.peers {
-		list = append(list, p)
-	}
-	return list
-}
-
-// close disconnects all peers. No new peers can be registered
-// after close has returned.
-func (ps *clientPeerSet) close() {
-	ps.lock.Lock()
-	defer ps.lock.Unlock()
-
-	for _, p := range ps.peers {
-		p.Disconnect(p2p.DiscQuitting)
-	}
-	ps.closed = true
-}
-
 // serverPeerSet represents the set of active server peers currently
 // participating in the Light Ethereum sub-protocol.
 type serverPeerSet struct {
diff --git a/les/protocol.go b/les/protocol.go
index 4fd19f9beca1c7ad967af40a4b44b9ffda029569..19a9561ce9e414985c5aee40a131abaab927108d 100644
--- a/les/protocol.go
+++ b/les/protocol.go
@@ -174,12 +174,6 @@ var errorToString = map[int]string{
 	ErrMissingKey:              "Key missing from list",
 }
 
-type announceBlock struct {
-	Hash   common.Hash // Hash of one particular block being announced
-	Number uint64      // Number of one particular block being announced
-	Td     *big.Int    // Total difficulty of one particular block being announced
-}
-
 // announceData is the network packet for the block announcements.
 type announceData struct {
 	Hash       common.Hash // Hash of one particular block being announced
@@ -199,7 +193,7 @@ func (a *announceData) sanityCheck() error {
 
 // sign adds a signature to the block announcement by the given privKey
 func (a *announceData) sign(privKey *ecdsa.PrivateKey) {
-	rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td})
+	rlp, _ := rlp.EncodeToBytes(blockInfo{a.Hash, a.Number, a.Td})
 	sig, _ := crypto.Sign(crypto.Keccak256(rlp), privKey)
 	a.Update = a.Update.add("sign", sig)
 }
@@ -210,7 +204,7 @@ func (a *announceData) checkSignature(id enode.ID, update keyValueMap) error {
 	if err := update.get("sign", &sig); err != nil {
 		return err
 	}
-	rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td})
+	rlp, _ := rlp.EncodeToBytes(blockInfo{a.Hash, a.Number, a.Td})
 	recPubkey, err := crypto.SigToPub(crypto.Keccak256(rlp), sig)
 	if err != nil {
 		return err
diff --git a/les/server.go b/les/server.go
index 225a7ad1f03301e94b23dd068f2f170f5334d754..cbedce136c35abdb3434c7a5acccc82fe0fce23c 100644
--- a/les/server.go
+++ b/les/server.go
@@ -18,6 +18,7 @@ package les
 
 import (
 	"crypto/ecdsa"
+	"reflect"
 	"time"
 
 	"github.com/ethereum/go-ethereum/common/mclock"
@@ -31,17 +32,32 @@ import (
 	"github.com/ethereum/go-ethereum/p2p/discv5"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/p2p/enr"
+	"github.com/ethereum/go-ethereum/p2p/nodestate"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rpc"
 )
 
+var (
+	serverSetup         = &nodestate.Setup{}
+	clientPeerField     = serverSetup.NewField("clientPeer", reflect.TypeOf(&clientPeer{}))
+	clientInfoField     = serverSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{}))
+	connAddressField    = serverSetup.NewField("connAddr", reflect.TypeOf(""))
+	balanceTrackerSetup = lps.NewBalanceTrackerSetup(serverSetup)
+	priorityPoolSetup   = lps.NewPriorityPoolSetup(serverSetup)
+)
+
+func init() {
+	balanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField)
+	priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority
+}
+
 type LesServer struct {
 	lesCommons
 
+	ns          *nodestate.NodeStateMachine
 	archiveMode bool // Flag whether the ethereum node runs in archive mode.
-	peers       *clientPeerSet
-	serverset   *serverSet
 	handler     *serverHandler
+	broadcaster *broadcaster
 	lesTopics   []discv5.Topic
 	privateKey  *ecdsa.PrivateKey
 
@@ -60,6 +76,7 @@ type LesServer struct {
 }
 
 func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
+	ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup)
 	// Collect les protocol version information supported by local node.
 	lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
 	for i, pv := range AdvertiseProtocolVersions {
@@ -83,9 +100,9 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer
 			bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency, true),
 			closeCh:          make(chan struct{}),
 		},
+		ns:           ns,
 		archiveMode:  e.ArchiveMode(),
-		peers:        newClientPeerSet(),
-		serverset:    newServerSet(),
+		broadcaster:  newBroadcaster(ns),
 		lesTopics:    lesTopics,
 		fcManager:    flowcontrol.NewClientManager(nil, &mclock.System{}),
 		servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),
@@ -116,7 +133,7 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer
 		srv.maxCapacity = totalRecharge
 	}
 	srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2)
-	srv.clientPool = newClientPool(srv.chainDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(id.String()) })
+	srv.clientPool = newClientPool(ns, srv.chainDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient)
 	srv.clientPool.setDefaultFactors(lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1})
 
 	checkpoint := srv.latestLocalCheckpoint()
@@ -130,6 +147,13 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer
 	node.RegisterAPIs(srv.APIs())
 	node.RegisterLifecycle(srv)
 
+	// disconnect all peers at nsm shutdown
+	ns.SubscribeField(clientPeerField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
+		if state.Equals(serverSetup.OfflineFlag()) && oldValue != nil {
+			oldValue.(*clientPeer).Peer.Disconnect(p2p.DiscRequested)
+		}
+	})
+	ns.Start()
 	return srv, nil
 }
 
@@ -158,7 +182,7 @@ func (s *LesServer) APIs() []rpc.API {
 
 func (s *LesServer) Protocols() []p2p.Protocol {
 	ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
-		if p := s.peers.peer(id.String()); p != nil {
+		if p := s.getClient(id); p != nil {
 			return p.Info()
 		}
 		return nil
@@ -173,6 +197,7 @@ func (s *LesServer) Protocols() []p2p.Protocol {
 // Start starts the LES server
 func (s *LesServer) Start() error {
 	s.privateKey = s.p2pSrv.PrivateKey
+	s.broadcaster.setSignerKey(s.privateKey)
 	s.handler.start()
 
 	s.wg.Add(1)
@@ -198,19 +223,11 @@ func (s *LesServer) Start() error {
 func (s *LesServer) Stop() error {
 	close(s.closeCh)
 
-	// Disconnect existing connections with other LES servers.
-	s.serverset.close()
-
-	// Disconnect existing sessions.
-	// This also closes the gate for any new registrations on the peer set.
-	// sessions which are already established but not added to pm.peers yet
-	// will exit when they try to register.
-	s.peers.close()
-
+	s.clientPool.stop()
+	s.ns.Stop()
 	s.fcManager.Stop()
 	s.costTracker.stop()
 	s.handler.stop()
-	s.clientPool.stop() // client pool should be closed after handler.
 	s.servingQueue.stop()
 
 	// Note, bloom trie indexer is closed by parent bloombits indexer.
@@ -279,3 +296,18 @@ func (s *LesServer) capacityManagement() {
 		}
 	}
 }
+
+func (s *LesServer) getClient(id enode.ID) *clientPeer {
+	if node := s.ns.GetNode(id); node != nil {
+		if p, ok := s.ns.GetField(node, clientPeerField).(*clientPeer); ok {
+			return p
+		}
+	}
+	return nil
+}
+
+func (s *LesServer) dropClient(id enode.ID) {
+	if p := s.getClient(id); p != nil {
+		p.Peer.Disconnect(p2p.DiscRequested)
+	}
+}
diff --git a/les/server_handler.go b/les/server_handler.go
index 583df960080c76e0192d4f2e5abe187687ce1eb9..d3e2c956b3ea4fa362390786aa7b6c5426e4d789 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -17,6 +17,7 @@
 package les
 
 import (
+	"crypto/ecdsa"
 	"encoding/binary"
 	"encoding/json"
 	"errors"
@@ -36,6 +37,8 @@ import (
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/p2p/enode"
+	"github.com/ethereum/go-ethereum/p2p/nodestate"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 )
@@ -91,7 +94,7 @@ func newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb et
 // start starts the server handler.
 func (h *serverHandler) start() {
 	h.wg.Add(1)
-	go h.broadcastHeaders()
+	go h.broadcastLoop()
 }
 
 // stop stops the server handler.
@@ -123,47 +126,61 @@ func (h *serverHandler) handle(p *clientPeer) error {
 		p.Log().Debug("Light Ethereum handshake failed", "err", err)
 		return err
 	}
-	if p.server {
-		if err := h.server.serverset.register(p); err != nil {
-			return err
+	// Reject the duplicated peer, otherwise register it to peerset.
+	var registered bool
+	if err := h.server.ns.Operation(func() {
+		if h.server.ns.GetField(p.Node(), clientPeerField) != nil {
+			registered = true
+		} else {
+			h.server.ns.SetFieldSub(p.Node(), clientPeerField, p)
 		}
+	}); err != nil {
+		return err
+	}
+	if registered {
+		return errAlreadyRegistered
+	}
+
+	defer func() {
+		h.server.ns.SetField(p.Node(), clientPeerField, nil)
+		if p.fcClient != nil { // is nil when connecting another server
+			p.fcClient.Disconnect()
+		}
+	}()
+	if p.server {
 		// connected to another server, no messages expected, just wait for disconnection
 		_, err := p.rw.ReadMsg()
 		return err
 	}
 	// Reject light clients if server is not synced.
+	//
+	// Put this checking here, so that "non-synced" les-server peers are still allowed
+	// to keep the connection.
 	if !h.synced() {
 		p.Log().Debug("Light server not synced, rejecting peer")
 		return p2p.DiscRequested
 	}
-	defer p.fcClient.Disconnect()
-
 	// Disconnect the inbound peer if it's rejected by clientPool
 	if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil {
 		p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool)
 		return errFullClientPool
 	}
-	p.balance, _ = h.server.clientPool.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance)
+	p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance)
 	if p.balance == nil {
 		return p2p.DiscRequested
 	}
-	// Register the peer locally
-	if err := h.server.peers.register(p); err != nil {
-		h.server.clientPool.disconnect(p)
-		p.Log().Error("Light Ethereum peer registration failed", "err", err)
-		return err
-	}
-	clientConnectionGauge.Update(int64(h.server.peers.len()))
+	activeCount, _ := h.server.clientPool.pp.Active()
+	clientConnectionGauge.Update(int64(activeCount))
 
 	var wg sync.WaitGroup // Wait group used to track all in-flight task routines.
 
 	connectedAt := mclock.Now()
 	defer func() {
 		wg.Wait() // Ensure all background task routines have exited.
-		h.server.peers.unregister(p.id)
 		h.server.clientPool.disconnect(p)
 		p.balance = nil
-		clientConnectionGauge.Update(int64(h.server.peers.len()))
+		activeCount, _ := h.server.clientPool.pp.Active()
+		clientConnectionGauge.Update(int64(activeCount))
 		connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
 	}()
 	// Mark the peer starts to be served.
@@ -334,7 +351,6 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
 						origin = h.blockchain.GetHeaderByNumber(query.Origin.Number)
 					}
 					if origin == nil {
-						p.bumpInvalid()
 						break
 					}
 					headers = append(headers, origin)
@@ -911,11 +927,11 @@ func (h *serverHandler) txStatus(hash common.Hash) light.TxStatus {
 	return stat
 }
 
-// broadcastHeaders broadcasts new block information to all connected light
+// broadcastLoop broadcasts new block information to all connected light
 // clients. According to the agreement between client and server, server should
 // only broadcast new announcement if the total difficulty is higher than the
 // last one. Besides server will add the signature if client requires.
-func (h *serverHandler) broadcastHeaders() {
+func (h *serverHandler) broadcastLoop() {
 	defer h.wg.Done()
 
 	headCh := make(chan core.ChainHeadEvent, 10)
@@ -929,10 +945,6 @@ func (h *serverHandler) broadcastHeaders() {
 	for {
 		select {
 		case ev := <-headCh:
-			peers := h.server.peers.allPeers()
-			if len(peers) == 0 {
-				continue
-			}
 			header := ev.Block.Header()
 			hash, number := header.Hash(), header.Number.Uint64()
 			td := h.blockchain.GetTd(hash, number)
@@ -944,33 +956,79 @@ func (h *serverHandler) broadcastHeaders() {
 				reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(h.chainDb, header, lastHead).Number.Uint64()
 			}
 			lastHead, lastTd = header, td
-
 			log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
-			var (
-				signed         bool
-				signedAnnounce announceData
-			)
-			announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
-			for _, p := range peers {
-				p := p
-				switch p.announceType {
-				case announceTypeSimple:
-					if !p.queueSend(func() { p.sendAnnounce(announce) }) {
-						log.Debug("Drop announcement because queue is full", "number", number, "hash", hash)
-					}
-				case announceTypeSigned:
-					if !signed {
-						signedAnnounce = announce
-						signedAnnounce.sign(h.server.privateKey)
-						signed = true
-					}
-					if !p.queueSend(func() { p.sendAnnounce(signedAnnounce) }) {
-						log.Debug("Drop announcement because queue is full", "number", number, "hash", hash)
-					}
-				}
-			}
+			h.server.broadcaster.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg})
 		case <-h.closeCh:
 			return
 		}
 	}
 }
+
+// broadcaster sends new header announcements to active client peers
+type broadcaster struct {
+	ns                           *nodestate.NodeStateMachine
+	privateKey                   *ecdsa.PrivateKey
+	lastAnnounce, signedAnnounce announceData
+}
+
+// newBroadcaster creates a new broadcaster
+func newBroadcaster(ns *nodestate.NodeStateMachine) *broadcaster {
+	b := &broadcaster{ns: ns}
+	ns.SubscribeState(priorityPoolSetup.ActiveFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
+		if newState.Equals(priorityPoolSetup.ActiveFlag) {
+			// send last announcement to activated peers
+			b.sendTo(node)
+		}
+	})
+	return b
+}
+
+// setSignerKey sets the signer key for signed announcements. Should be called before
+// starting the protocol handler.
+func (b *broadcaster) setSignerKey(privateKey *ecdsa.PrivateKey) {
+	b.privateKey = privateKey
+}
+
+// broadcast sends the given announcements to all active peers
+func (b *broadcaster) broadcast(announce announceData) {
+	b.ns.Operation(func() {
+		// iterate in an Operation to ensure that the active set does not change while iterating
+		b.lastAnnounce = announce
+		b.ns.ForEach(priorityPoolSetup.ActiveFlag, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
+			b.sendTo(node)
+		})
+	})
+}
+
+// sendTo sends the most recent announcement to the given node unless the same or higher Td
+// announcement has already been sent.
+func (b *broadcaster) sendTo(node *enode.Node) {
+	if b.lastAnnounce.Td == nil {
+		return
+	}
+	if p, _ := b.ns.GetField(node, clientPeerField).(*clientPeer); p != nil {
+		if p.headInfo.Td == nil || b.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 {
+			announce := b.lastAnnounce
+			switch p.announceType {
+			case announceTypeSimple:
+				if !p.queueSend(func() { p.sendAnnounce(announce) }) {
+					log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash)
+				} else {
+					log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash)
+				}
+			case announceTypeSigned:
+				if b.signedAnnounce.Hash != b.lastAnnounce.Hash {
+					b.signedAnnounce = b.lastAnnounce
+					b.signedAnnounce.sign(b.privateKey)
+				}
+				announce := b.signedAnnounce
+				if !p.queueSend(func() { p.sendAnnounce(announce) }) {
+					log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash)
+				} else {
+					log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash)
+				}
+			}
+			p.headInfo = blockInfo{b.lastAnnounce.Hash, b.lastAnnounce.Number, b.lastAnnounce.Td}
+		}
+	}
+}
diff --git a/les/test_helper.go b/les/test_helper.go
index 9f9b28721e446bb69a8f49e523b2d614f76d3882..5a8d64f7670426e64842585fe2fd909716e39738 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -46,6 +46,7 @@ import (
 	"github.com/ethereum/go-ethereum/light"
 	"github.com/ethereum/go-ethereum/p2p"
 	"github.com/ethereum/go-ethereum/p2p/enode"
+	"github.com/ethereum/go-ethereum/p2p/nodestate"
 	"github.com/ethereum/go-ethereum/params"
 )
 
@@ -227,7 +228,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
 	return client.handler
 }
 
-func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Database, peers *clientPeerSet, clock mclock.Clock) (*serverHandler, *backends.SimulatedBackend) {
+func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Database, clock mclock.Clock) (*serverHandler, *backends.SimulatedBackend) {
 	var (
 		gspec = core.Genesis{
 			Config:   params.AllEthashProtocolChanges,
@@ -263,6 +264,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
 		}
 		oracle = checkpointoracle.New(checkpointConfig, getLocal)
 	}
+	ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup)
 	server := &LesServer{
 		lesCommons: lesCommons{
 			genesis:     genesis.Hash(),
@@ -274,7 +276,8 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
 			oracle:      oracle,
 			closeCh:     make(chan struct{}),
 		},
-		peers:        peers,
+		ns:           ns,
+		broadcaster:  newBroadcaster(ns),
 		servingQueue: newServingQueue(int64(time.Millisecond*10), 1),
 		defParams: flowcontrol.ServerParams{
 			BufLimit:    testBufLimit,
@@ -284,13 +287,14 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
 	}
 	server.costTracker, server.minCapacity = newCostTracker(db, server.config)
 	server.costTracker.testCostList = testCostList(0) // Disable flow control mechanism.
-	server.clientPool = newClientPool(db, testBufRecharge, defaultConnectedBias, clock, func(id enode.ID) {})
+	server.clientPool = newClientPool(ns, db, testBufRecharge, defaultConnectedBias, clock, func(id enode.ID) {})
 	server.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool
 	server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true })
 	if server.oracle != nil {
 		server.oracle.Start(simulation)
 	}
 	server.servingQueue.setThreads(4)
+	ns.Start()
 	server.handler.start()
 	return server.handler, simulation
 }
@@ -463,7 +467,7 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba
 	if simClock {
 		clock = &mclock.Simulated{}
 	}
-	handler, b := newTestServerHandler(blocks, indexers, db, newClientPeerSet(), clock)
+	handler, b := newTestServerHandler(blocks, indexers, db, clock)
 
 	var peer *testPeer
 	if newPeer {
@@ -502,7 +506,7 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba
 
 func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallback, ulcServers []string, ulcFraction int, simClock bool, connect bool, disablePruning bool) (*testServer, *testClient, func()) {
 	sdb, cdb := rawdb.NewMemoryDatabase(), rawdb.NewMemoryDatabase()
-	speers, cpeers := newServerPeerSet(), newClientPeerSet()
+	speers := newServerPeerSet()
 
 	var clock mclock.Clock = &mclock.System{}
 	if simClock {
@@ -519,7 +523,7 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer
 	ccIndexer, cbIndexer, cbtIndexer := cIndexers[0], cIndexers[1], cIndexers[2]
 	odr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer)
 
-	server, b := newTestServerHandler(blocks, sindexers, sdb, cpeers, clock)
+	server, b := newTestServerHandler(blocks, sindexers, sdb, clock)
 	client := newTestClientHandler(b, odr, cIndexers, cdb, speers, ulcServers, ulcFraction)
 
 	scIndexer.Start(server.blockchain)
diff --git a/miner/miner_test.go b/miner/miner_test.go
index 43b31872a498918bcf14316034754df9a0475f5b..127b4c7687aeabad9e1177b604d7b89905c85416 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -22,7 +22,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/consensus/ethash"
+	"github.com/ethereum/go-ethereum/consensus/clique"
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/rawdb"
 	"github.com/ethereum/go-ethereum/core/state"
@@ -31,7 +31,6 @@ import (
 	"github.com/ethereum/go-ethereum/eth/downloader"
 	"github.com/ethereum/go-ethereum/ethdb/memorydb"
 	"github.com/ethereum/go-ethereum/event"
-	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/trie"
 )
 
@@ -243,26 +242,20 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
 	if err != nil {
 		t.Fatalf("can't create new chain config: %v", err)
 	}
-	// Create event Mux
-	mux := new(event.TypeMux)
 	// Create consensus engine
-	engine := ethash.New(ethash.Config{}, []string{}, false)
-	engine.SetThreads(-1)
-	// Create isLocalBlock
-	isLocalBlock := func(block *types.Block) bool {
-		return true
-	}
+	engine := clique.New(chainConfig.Clique, chainDB)
 	// Create Ethereum backend
-	limit := uint64(1000)
-	bc, err := core.NewBlockChain(chainDB, new(core.CacheConfig), chainConfig, engine, vm.Config{}, isLocalBlock, &limit)
+	bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil)
 	if err != nil {
 		t.Fatalf("can't create new chain %v", err)
 	}
-	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+	statedb, _ := state.New(common.Hash{}, state.NewDatabase(chainDB), nil)
 	blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
 
-	pool := core.NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+	pool := core.NewTxPool(testTxPoolConfig, chainConfig, blockchain)
 	backend := NewMockBackend(bc, pool)
+	// Create event Mux
+	mux := new(event.TypeMux)
 	// Create Miner
-	return New(backend, &config, chainConfig, mux, engine, isLocalBlock), mux
+	return New(backend, &config, chainConfig, mux, engine, nil), mux
 }
diff --git a/oss-fuzz.sh b/oss-fuzz.sh
new file mode 100644
index 0000000000000000000000000000000000000000..23fb4dd412ca50b364a9cb5db2a94f7e8c8e2188
--- /dev/null
+++ b/oss-fuzz.sh
@@ -0,0 +1,56 @@
+#/bin/bash -eu
+# Copyright 2020 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+################################################################################
+
+# This file is for integration with Google OSS-Fuzz.
+# The following ENV variables are available when executing on OSS-fuzz:
+#
+# /out/         $OUT    Directory to store build artifacts (fuzz targets, dictionaries, options files, seed corpus archives).
+# /src/         $SRC    Directory to checkout source files.
+# /work/        $WORK   Directory to store intermediate files.
+#
+# $CC, $CXX, $CCC       The C and C++ compiler binaries.
+# $CFLAGS, $CXXFLAGS    C and C++ compiler flags.
+# $LIB_FUZZING_ENGINE   C++ compiler argument to link fuzz target against the prebuilt engine library (e.g. libFuzzer).
+
+function compile_fuzzer {
+  path=$SRC/go-ethereum/$1
+  func=$2
+  fuzzer=$3
+  echo "Building $fuzzer"
+  (cd $path && \
+        go-fuzz -func $func -o $WORK/$fuzzer.a . && \
+        echo "First stage built OK" && \
+        $CXX $CXXFLAGS $LIB_FUZZING_ENGINE $WORK/$fuzzer.a -o $OUT/$fuzzer && \
+        echo "Second stage built ok" )
+
+}
+
+compile_fuzzer common/bitutil  Fuzz      fuzzBitutilCompress
+compile_fuzzer crypto/bn256    FuzzAdd   fuzzBn256Add
+compile_fuzzer crypto/bn256    FuzzMul   fuzzBn256Mul
+compile_fuzzer crypto/bn256    FuzzPair  fuzzBn256Pair
+compile_fuzzer core/vm/runtime Fuzz      fuzzVmRuntime
+compile_fuzzer crypto/blake2b  Fuzz      fuzzBlake2b
+compile_fuzzer tests/fuzzers/keystore   Fuzz fuzzKeystore
+compile_fuzzer tests/fuzzers/txfetcher  Fuzz fuzzTxfetcher
+compile_fuzzer tests/fuzzers/rlp        Fuzz fuzzRlp
+compile_fuzzer tests/fuzzers/trie       Fuzz fuzzTrie
+compile_fuzzer tests/fuzzers/stacktrie  Fuzz fuzzStackTrie
+
+# This doesn't work very well @TODO
+#compile_fuzzertests/fuzzers/abi Fuzz fuzzAbi
+
diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go
index 7ef908814d7961772cbb0261d609ee970e7c995d..0ed3deab38a6863ae58e6054f5eabcfe4b5b61bb 100644
--- a/p2p/simulations/adapters/exec.go
+++ b/p2p/simulations/adapters/exec.go
@@ -184,7 +184,19 @@ func (n *ExecNode) Start(snapshots map[string][]byte) (err error) {
 	if err != nil {
 		return fmt.Errorf("error generating node config: %s", err)
 	}
-
+	// expose the admin namespace via websocket if it's not enabled
+	exposed := confCopy.Stack.WSExposeAll
+	if !exposed {
+		for _, api := range confCopy.Stack.WSModules {
+			if api == "admin" {
+				exposed = true
+				break
+			}
+		}
+	}
+	if !exposed {
+		confCopy.Stack.WSModules = append(confCopy.Stack.WSModules, "admin")
+	}
 	// start the one-shot server that waits for startup information
 	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 	defer cancel()
@@ -362,13 +374,44 @@ type execNodeConfig struct {
 	PeerAddrs map[string]string `json:"peer_addrs,omitempty"`
 }
 
+func initLogging() {
+	// Initialize the logging by default first.
+	glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
+	glogger.Verbosity(log.LvlInfo)
+	log.Root().SetHandler(glogger)
+
+	confEnv := os.Getenv(envNodeConfig)
+	if confEnv == "" {
+		return
+	}
+	var conf execNodeConfig
+	if err := json.Unmarshal([]byte(confEnv), &conf); err != nil {
+		return
+	}
+	var writer = os.Stderr
+	if conf.Node.LogFile != "" {
+		logWriter, err := os.Create(conf.Node.LogFile)
+		if err != nil {
+			return
+		}
+		writer = logWriter
+	}
+	var verbosity = log.LvlInfo
+	if conf.Node.LogVerbosity <= log.LvlTrace && conf.Node.LogVerbosity >= log.LvlCrit {
+		verbosity = conf.Node.LogVerbosity
+	}
+	// Reinitialize the logger
+	glogger = log.NewGlogHandler(log.StreamHandler(writer, log.TerminalFormat(true)))
+	glogger.Verbosity(verbosity)
+	log.Root().SetHandler(glogger)
+}
+
 // execP2PNode starts a simulation node when the current binary is executed with
 // argv[0] being "p2p-node", reading the service / ID from argv[1] / argv[2]
 // and the node config from an environment variable.
 func execP2PNode() {
-	glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
-	glogger.Verbosity(log.LvlInfo)
-	log.Root().SetHandler(glogger)
+	initLogging()
+
 	statusURL := os.Getenv(envStatusURL)
 	if statusURL == "" {
 		log.Crit("missing " + envStatusURL)
@@ -380,7 +423,7 @@ func execP2PNode() {
 	if stackErr != nil {
 		status.Err = stackErr.Error()
 	} else {
-		status.WSEndpoint = "ws://" + stack.WSEndpoint()
+		status.WSEndpoint = stack.WSEndpoint()
 		status.NodeInfo = stack.Server().NodeInfo()
 	}
 
@@ -454,7 +497,6 @@ func startExecNodeStack() (*node.Node, error) {
 			return nil, err
 		}
 		services[name] = service
-		stack.RegisterLifecycle(service)
 	}
 
 	// Add the snapshot API.
diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go
index fd10da4319298907f29b685b2a6a208119b266c2..4fc7abc06a43f4116b4bb834ba6e25ac998b398c 100644
--- a/p2p/simulations/adapters/inproc.go
+++ b/p2p/simulations/adapters/inproc.go
@@ -99,8 +99,9 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
 			Dialer:          s,
 			EnableMsgEvents: config.EnableMsgEvents,
 		},
-		NoUSB:  true,
-		Logger: log.New("node.id", id.String()),
+		ExternalSigner: config.ExternalSigner,
+		NoUSB:          true,
+		Logger:         log.New("node.id", id.String()),
 	})
 	if err != nil {
 		return nil, err
@@ -263,7 +264,6 @@ func (sn *SimNode) Start(snapshots map[string][]byte) error {
 				continue
 			}
 			sn.running[name] = service
-			sn.node.RegisterLifecycle(service)
 		}
 	})
 	if regErr != nil {
diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go
index 716cde6a6c7d254ffa7b506777cf0a116a97d26a..1da464a10d41c54d5c60a5768b6cc423e972c56d 100644
--- a/p2p/simulations/adapters/types.go
+++ b/p2p/simulations/adapters/types.go
@@ -107,6 +107,9 @@ type NodeConfig struct {
 	// These values need to be checked and acted upon by node Services
 	Properties []string
 
+	// ExternalSigner specifies an external URI for a clef-type signer
+	ExternalSigner string
+
 	// Enode
 	node *enode.Node
 
@@ -117,6 +120,17 @@ type NodeConfig struct {
 	Reachable func(id enode.ID) bool
 
 	Port uint16
+
+	// LogFile is the log file name of the p2p node at runtime.
+	//
+	// The default value is empty so that the default log writer
+	// is the system standard output.
+	LogFile string
+
+	// LogVerbosity is the log verbosity of the p2p node at runtime.
+	//
+	// The default verbosity is INFO.
+	LogVerbosity log.Lvl
 }
 
 // nodeConfigJSON is used to encode and decode NodeConfig as JSON by encoding
@@ -125,10 +139,12 @@ type nodeConfigJSON struct {
 	ID              string   `json:"id"`
 	PrivateKey      string   `json:"private_key"`
 	Name            string   `json:"name"`
-	Services        []string `json:"services"`
+	Lifecycles      []string `json:"lifecycles"`
 	Properties      []string `json:"properties"`
 	EnableMsgEvents bool     `json:"enable_msg_events"`
 	Port            uint16   `json:"port"`
+	LogFile         string   `json:"logfile"`
+	LogVerbosity    int      `json:"log_verbosity"`
 }
 
 // MarshalJSON implements the json.Marshaler interface by encoding the config
@@ -137,10 +153,12 @@ func (n *NodeConfig) MarshalJSON() ([]byte, error) {
 	confJSON := nodeConfigJSON{
 		ID:              n.ID.String(),
 		Name:            n.Name,
-		Services:        n.Lifecycles,
+		Lifecycles:      n.Lifecycles,
 		Properties:      n.Properties,
 		Port:            n.Port,
 		EnableMsgEvents: n.EnableMsgEvents,
+		LogFile:         n.LogFile,
+		LogVerbosity:    int(n.LogVerbosity),
 	}
 	if n.PrivateKey != nil {
 		confJSON.PrivateKey = hex.EncodeToString(crypto.FromECDSA(n.PrivateKey))
@@ -175,10 +193,12 @@ func (n *NodeConfig) UnmarshalJSON(data []byte) error {
 	}
 
 	n.Name = confJSON.Name
-	n.Lifecycles = confJSON.Services
+	n.Lifecycles = confJSON.Lifecycles
 	n.Properties = confJSON.Properties
 	n.Port = confJSON.Port
 	n.EnableMsgEvents = confJSON.EnableMsgEvents
+	n.LogFile = confJSON.LogFile
+	n.LogVerbosity = log.Lvl(confJSON.LogVerbosity)
 
 	return nil
 }
@@ -208,6 +228,7 @@ func RandomNodeConfig() *NodeConfig {
 		Name:            fmt.Sprintf("node_%s", enodId.String()),
 		Port:            port,
 		EnableMsgEvents: true,
+		LogVerbosity:    log.LvlInfo,
 	}
 }
 
diff --git a/params/bootnodes.go b/params/bootnodes.go
index 42a6e2ec7ca6a85c3e8d15263c711fbc29cf5d58..d4512bf789a5ed28641eae8f645a9d9a5008e0e3 100644
--- a/params/bootnodes.go
+++ b/params/bootnodes.go
@@ -67,10 +67,10 @@ var GoerliBootnodes = []string{
 	"enode://a59e33ccd2b3e52d578f1fbd70c6f9babda2650f0760d6ff3b37742fdcdfdb3defba5d56d315b40c46b70198c7621e63ffa3f987389c7118634b0fefbbdfa7fd@51.15.119.157:40303",
 }
 
-// YoloV1Bootnodes are the enode URLs of the P2P bootstrap nodes running on the
-// YOLOv1 ephemeral test network.
-var YoloV1Bootnodes = []string{
-	"enode://9e1096aa59862a6f164994cb5cb16f5124d6c992cdbf4535ff7dea43ea1512afe5448dca9df1b7ab0726129603f1a3336b631e4d7a1a44c94daddd03241587f9@35.178.210.161:30303",
+// YoloV2Bootnodes are the enode URLs of the P2P bootstrap nodes running on the
+// YOLOv2 ephemeral test network.
+var YoloV2Bootnodes = []string{
+	"enode://9e1096aa59862a6f164994cb5cb16f5124d6c992cdbf4535ff7dea43ea1512afe5448dca9df1b7ab0726129603f1a3336b631e4d7a1a44c94daddd03241587f9@3.9.20.133:30303",
 }
 
 const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@"
diff --git a/params/config.go b/params/config.go
index e3924df144b972ac1a3c305a8fd05439e1d8e860..bdb7a55e67d723fb5023e08df4a89ddeb2c34668 100644
--- a/params/config.go
+++ b/params/config.go
@@ -31,7 +31,8 @@ var (
 	RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
 	RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
 	GoerliGenesisHash  = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a")
-	YoloV1GenesisHash  = common.HexToHash("0xc3fd235071f24f93865b0850bd2a2119b30f7224d18a0e34c7bbf549ad7e3d36")
+	// TODO: update with yolov2 values
+	YoloV2GenesisHash = common.HexToHash("0x498a7239036dd2cd09e2bb8a80922b78632017958c332b42044c250d603a8a3e")
 )
 
 // TrustedCheckpoints associates each known checkpoint with the genesis hash of
@@ -213,9 +214,9 @@ var (
 		Threshold: 2,
 	}
 
-	// YoloV1ChainConfig contains the chain parameters to run a node on the YOLOv1 test network.
-	YoloV1ChainConfig = &ChainConfig{
-		ChainID:             big.NewInt(133519467574833),
+	// YoloV2ChainConfig contains the chain parameters to run a node on the YOLOv2 test network.
+	YoloV2ChainConfig = &ChainConfig{
+		ChainID:             big.NewInt(133519467574834),
 		HomesteadBlock:      big.NewInt(0),
 		DAOForkBlock:        nil,
 		DAOForkSupport:      true,
@@ -227,7 +228,7 @@ var (
 		PetersburgBlock:     big.NewInt(0),
 		IstanbulBlock:       big.NewInt(0),
 		MuirGlacierBlock:    nil,
-		YoloV1Block:         big.NewInt(0),
+		YoloV2Block:         big.NewInt(0),
 		Clique: &CliqueConfig{
 			Period: 15,
 			Epoch:  30000,
@@ -320,7 +321,7 @@ type ChainConfig struct {
 	IstanbulBlock       *big.Int `json:"istanbulBlock,omitempty"`       // Istanbul switch block (nil = no fork, 0 = already on istanbul)
 	MuirGlacierBlock    *big.Int `json:"muirGlacierBlock,omitempty"`    // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated)
 
-	YoloV1Block *big.Int `json:"yoloV1Block,omitempty"` // YOLO v1: https://github.com/ethereum/EIPs/pull/2657 (Ephemeral testnet)
+	YoloV2Block *big.Int `json:"yoloV2Block,omitempty"` // YOLO v2: Gas repricings TODO @holiman add EIP references
 	EWASMBlock  *big.Int `json:"ewasmBlock,omitempty"`  // EWASM switch block (nil = no fork, 0 = already activated)
 
 	// Various consensus engines
@@ -376,7 +377,7 @@ func (c *ChainConfig) String() string {
 	default:
 		engine = "unknown"
 	}
-	return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, YOLO v1: %v, Engine: %v}",
+	return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, YOLO v2: %v, Engine: %v}",
 		c.ChainID,
 		c.HomesteadBlock,
 		c.DAOForkBlock,
@@ -389,7 +390,7 @@ func (c *ChainConfig) String() string {
 		c.PetersburgBlock,
 		c.IstanbulBlock,
 		c.MuirGlacierBlock,
-		c.YoloV1Block,
+		c.YoloV2Block,
 		engine,
 	)
 }
@@ -446,9 +447,9 @@ func (c *ChainConfig) IsIstanbul(num *big.Int) bool {
 	return isForked(c.IstanbulBlock, num)
 }
 
-// IsYoloV1 returns whether num is either equal to the YoloV1 fork block or greater.
-func (c *ChainConfig) IsYoloV1(num *big.Int) bool {
-	return isForked(c.YoloV1Block, num)
+// IsYoloV2 returns whether num is either equal to the YoloV1 fork block or greater.
+func (c *ChainConfig) IsYoloV2(num *big.Int) bool {
+	return isForked(c.YoloV2Block, num)
 }
 
 // IsEWASM returns whether num represents a block number after the EWASM fork
@@ -494,7 +495,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
 		{name: "petersburgBlock", block: c.PetersburgBlock},
 		{name: "istanbulBlock", block: c.IstanbulBlock},
 		{name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true},
-		{name: "yoloV1Block", block: c.YoloV1Block},
+		{name: "yoloV2Block", block: c.YoloV2Block},
 	} {
 		if lastFork.name != "" {
 			// Next one must be higher number
@@ -558,8 +559,8 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
 	if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) {
 		return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock)
 	}
-	if isForkIncompatible(c.YoloV1Block, newcfg.YoloV1Block, head) {
-		return newCompatError("YOLOv1 fork block", c.YoloV1Block, newcfg.YoloV1Block)
+	if isForkIncompatible(c.YoloV2Block, newcfg.YoloV2Block, head) {
+		return newCompatError("YOLOv2 fork block", c.YoloV2Block, newcfg.YoloV2Block)
 	}
 	if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) {
 		return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock)
@@ -631,7 +632,7 @@ type Rules struct {
 	ChainID                                                 *big.Int
 	IsHomestead, IsEIP150, IsEIP155, IsEIP158               bool
 	IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
-	IsYoloV1                                                bool
+	IsYoloV2                                                bool
 }
 
 // Rules ensures c's ChainID is not nil.
@@ -650,6 +651,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
 		IsConstantinople: c.IsConstantinople(num),
 		IsPetersburg:     c.IsPetersburg(num),
 		IsIstanbul:       c.IsIstanbul(num),
-		IsYoloV1:         c.IsYoloV1(num),
+		IsYoloV2:         c.IsYoloV2(num),
 	}
 }
diff --git a/params/version.go b/params/version.go
index 77cbbe7800ffbf1ad361b2d1d28d3ce3d8493b0f..c0f356889a65e1cf7c39a9465601f309f54e5d1b 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
 const (
 	VersionMajor = 1        // Major version component of the current release
 	VersionMinor = 9        // Minor version component of the current release
-	VersionPatch = 23       // Patch version component of the current release
+	VersionPatch = 24       // Patch version component of the current release
 	VersionMeta  = "stable" // Version metadata to append to the version string
 )
 
diff --git a/rlp/raw.go b/rlp/raw.go
index c2a8517f62f13d5fa6bafabcaf7083422fda4d8b..3071e99cab1bf27a2c77fa4bfc0faaca622379ce 100644
--- a/rlp/raw.go
+++ b/rlp/raw.go
@@ -180,3 +180,74 @@ func readSize(b []byte, slen byte) (uint64, error) {
 	}
 	return s, nil
 }
+
+// AppendUint64 appends the RLP encoding of i to b, and returns the resulting slice.
+func AppendUint64(b []byte, i uint64) []byte {
+	if i == 0 {
+		return append(b, 0x80)
+	} else if i < 128 {
+		return append(b, byte(i))
+	}
+	switch {
+	case i < (1 << 8):
+		return append(b, 0x81, byte(i))
+	case i < (1 << 16):
+		return append(b, 0x82,
+			byte(i>>8),
+			byte(i),
+		)
+	case i < (1 << 24):
+		return append(b, 0x83,
+			byte(i>>16),
+			byte(i>>8),
+			byte(i),
+		)
+	case i < (1 << 32):
+		return append(b, 0x84,
+			byte(i>>24),
+			byte(i>>16),
+			byte(i>>8),
+			byte(i),
+		)
+	case i < (1 << 40):
+		return append(b, 0x85,
+			byte(i>>32),
+			byte(i>>24),
+			byte(i>>16),
+			byte(i>>8),
+			byte(i),
+		)
+
+	case i < (1 << 48):
+		return append(b, 0x86,
+			byte(i>>40),
+			byte(i>>32),
+			byte(i>>24),
+			byte(i>>16),
+			byte(i>>8),
+			byte(i),
+		)
+	case i < (1 << 56):
+		return append(b, 0x87,
+			byte(i>>48),
+			byte(i>>40),
+			byte(i>>32),
+			byte(i>>24),
+			byte(i>>16),
+			byte(i>>8),
+			byte(i),
+		)
+
+	default:
+		return append(b, 0x88,
+			byte(i>>56),
+			byte(i>>48),
+			byte(i>>40),
+			byte(i>>32),
+			byte(i>>24),
+			byte(i>>16),
+			byte(i>>8),
+			byte(i),
+		)
+	}
+}
diff --git a/rlp/raw_test.go b/rlp/raw_test.go
index cdae4ff088593e3689a3cdaaa944ac13410c2057..c976c4f73429522cde5d85da6f4b7b2149864760 100644
--- a/rlp/raw_test.go
+++ b/rlp/raw_test.go
@@ -21,6 +21,7 @@ import (
 	"io"
 	"reflect"
 	"testing"
+	"testing/quick"
 )
 
 func TestCountValues(t *testing.T) {
@@ -239,3 +240,40 @@ func TestReadSize(t *testing.T) {
 		}
 	}
 }
+
+func TestAppendUint64(t *testing.T) {
+	tests := []struct {
+		input  uint64
+		slice  []byte
+		output string
+	}{
+		{0, nil, "80"},
+		{1, nil, "01"},
+		{2, nil, "02"},
+		{127, nil, "7F"},
+		{128, nil, "8180"},
+		{129, nil, "8181"},
+		{0xFFFFFF, nil, "83FFFFFF"},
+		{127, []byte{1, 2, 3}, "0102037F"},
+		{0xFFFFFF, []byte{1, 2, 3}, "01020383FFFFFF"},
+	}
+
+	for _, test := range tests {
+		x := AppendUint64(test.slice, test.input)
+		if !bytes.Equal(x, unhex(test.output)) {
+			t.Errorf("AppendUint64(%v, %d): got %x, want %s", test.slice, test.input, x, test.output)
+		}
+	}
+}
+
+func TestAppendUint64Random(t *testing.T) {
+	fn := func(i uint64) bool {
+		enc, _ := EncodeToBytes(i)
+		encAppend := AppendUint64(nil, i)
+		return bytes.Equal(enc, encAppend)
+	}
+	config := quick.Config{MaxCountScale: 50}
+	if err := quick.Check(fn, &config); err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/tests/fuzzers/stacktrie/debug/main.go b/tests/fuzzers/stacktrie/debug/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ec28a8ef155565db83c4941be72c8897d882a16
--- /dev/null
+++ b/tests/fuzzers/stacktrie/debug/main.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+
+	"github.com/ethereum/go-ethereum/tests/fuzzers/stacktrie"
+)
+
+func main() {
+	if len(os.Args) != 2 {
+		fmt.Fprintf(os.Stderr, "Usage: debug <file>")
+		os.Exit(1)
+	}
+	crasher := os.Args[1]
+	data, err := ioutil.ReadFile(crasher)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err)
+		os.Exit(1)
+	}
+	stacktrie.Debug(data)
+}
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
new file mode 100644
index 0000000000000000000000000000000000000000..a072ff772d204004f31c1cfd3cffec269f25a31b
--- /dev/null
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -0,0 +1,197 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package stacktrie
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+	"sort"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/trie"
+	"golang.org/x/crypto/sha3"
+)
+
+type fuzzer struct {
+	input     io.Reader
+	exhausted bool
+	debugging bool
+}
+
+func (f *fuzzer) read(size int) []byte {
+	out := make([]byte, size)
+	if _, err := f.input.Read(out); err != nil {
+		f.exhausted = true
+	}
+	return out
+}
+
+func (f *fuzzer) readSlice(min, max int) []byte {
+	var a uint16
+	binary.Read(f.input, binary.LittleEndian, &a)
+	size := min + int(a)%(max-min)
+	out := make([]byte, size)
+	if _, err := f.input.Read(out); err != nil {
+		f.exhausted = true
+	}
+	return out
+}
+
+// spongeDb is a dummy db backend which accumulates writes in a sponge
+type spongeDb struct {
+	sponge hash.Hash
+	debug  bool
+}
+
+func (s *spongeDb) Has(key []byte) (bool, error)             { panic("implement me") }
+func (s *spongeDb) Get(key []byte) ([]byte, error)           { return nil, errors.New("no such elem") }
+func (s *spongeDb) Delete(key []byte) error                  { panic("implement me") }
+func (s *spongeDb) NewBatch() ethdb.Batch                    { return &spongeBatch{s} }
+func (s *spongeDb) Stat(property string) (string, error)     { panic("implement me") }
+func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
+func (s *spongeDb) Close() error                             { return nil }
+
+func (s *spongeDb) Put(key []byte, value []byte) error {
+	if s.debug {
+		fmt.Printf("db.Put %x : %x\n", key, value)
+	}
+	s.sponge.Write(key)
+	s.sponge.Write(value)
+	return nil
+}
+func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") }
+
+// spongeBatch is a dummy batch which immediately writes to the underlying spongedb
+type spongeBatch struct {
+	db *spongeDb
+}
+
+func (b *spongeBatch) Put(key, value []byte) error {
+	b.db.Put(key, value)
+	return nil
+}
+func (b *spongeBatch) Delete(key []byte) error             { panic("implement me") }
+func (b *spongeBatch) ValueSize() int                      { return 100 }
+func (b *spongeBatch) Write() error                        { return nil }
+func (b *spongeBatch) Reset()                              {}
+func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
+
+type kv struct {
+	k, v []byte
+}
+type kvs []kv
+
+func (k kvs) Len() int {
+	return len(k)
+}
+
+func (k kvs) Less(i, j int) bool {
+	return bytes.Compare(k[i].k, k[j].k) < 0
+}
+
+func (k kvs) Swap(i, j int) {
+	k[j], k[i] = k[i], k[j]
+}
+
+// The function must return
+// 1 if the fuzzer should increase priority of the
+//    given input during subsequent fuzzing (for example, the input is lexically
+//    correct and was parsed successfully);
+// -1 if the input must not be added to corpus even if gives new coverage; and
+// 0  otherwise
+// other values are reserved for future use.
+func Fuzz(data []byte) int {
+	f := fuzzer{
+		input:     bytes.NewReader(data),
+		exhausted: false,
+	}
+	return f.fuzz()
+}
+
+func Debug(data []byte) int {
+	f := fuzzer{
+		input:     bytes.NewReader(data),
+		exhausted: false,
+		debugging: true,
+	}
+	return f.fuzz()
+}
+
+func (f *fuzzer) fuzz() int {
+
+	// This spongeDb is used to check the sequence of disk-db-writes
+	var (
+		spongeA     = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
+		dbA         = trie.NewDatabase(spongeA)
+		trieA, _    = trie.New(common.Hash{}, dbA)
+		spongeB     = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
+		trieB       = trie.NewStackTrie(spongeB)
+		vals        kvs
+		useful      bool
+		maxElements = 10000
+	)
+	// Fill the trie with elements
+	for i := 0; !f.exhausted && i < maxElements; i++ {
+		k := f.read(32)
+		v := f.readSlice(1, 500)
+		if f.exhausted {
+			// If it was exhausted while reading, the value may be all zeroes,
+			// thus 'deletion' which is not supported on stacktrie
+			break
+		}
+		vals = append(vals, kv{k: k, v: v})
+		trieA.Update(k, v)
+		useful = true
+	}
+	if !useful {
+		return 0
+	}
+	// Flush trie -> database
+	rootA, err := trieA.Commit(nil)
+	if err != nil {
+		panic(err)
+	}
+	// Flush memdb -> disk (sponge)
+	dbA.Commit(rootA, false, nil)
+
+	// Stacktrie requires sorted insertion
+	sort.Sort(vals)
+	for _, kv := range vals {
+		if f.debugging {
+			fmt.Printf("{\"0x%x\" , \"0x%x\"} // stacktrie.Update\n", kv.k, kv.v)
+		}
+		trieB.Update(kv.k, kv.v)
+	}
+	rootB := trieB.Hash()
+	if _, err := trieB.Commit(); err != nil {
+		panic(err)
+	}
+	if rootA != rootB {
+		panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootB))
+	}
+	sumA := spongeA.sponge.Sum(nil)
+	sumB := spongeB.sponge.Sum(nil)
+	if !bytes.Equal(sumA, sumB) {
+		panic(fmt.Sprintf("sequence differ: (trie) %x != %x (stacktrie)", sumA, sumB))
+	}
+	return 1
+}
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 98188380535f590e7cd4f36385f871f475f0a2f5..762ab5f3470a2f54d3035e8aa776bc16c86b3b7c 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -122,15 +122,22 @@ func Generate(input []byte) randTest {
 	return steps
 }
 
+// The function must return
+// 1 if the fuzzer should increase priority of the
+//    given input during subsequent fuzzing (for example, the input is lexically
+//    correct and was parsed successfully);
+// -1 if the input must not be added to corpus even if gives new coverage; and
+// 0  otherwise
+// other values are reserved for future use.
 func Fuzz(input []byte) int {
 	program := Generate(input)
 	if len(program) == 0 {
-		return -1
+		return 0
 	}
 	if err := runRandTest(program); err != nil {
 		panic(err)
 	}
-	return 0
+	return 1
 }
 
 func runRandTest(rt randTest) error {
diff --git a/tests/init.go b/tests/init.go
index d920c70e2e6285429d2a24857f5f8f3c6f895514..607c69ddb3b5a55f2ad98be96bd3664bc3af5770 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -141,7 +141,7 @@ var Forks = map[string]*params.ChainConfig{
 		PetersburgBlock:     big.NewInt(0),
 		IstanbulBlock:       big.NewInt(5),
 	},
-	"YOLOv1": {
+	"YOLOv2": {
 		ChainID:             big.NewInt(1),
 		HomesteadBlock:      big.NewInt(0),
 		EIP150Block:         big.NewInt(0),
@@ -151,9 +151,9 @@ var Forks = map[string]*params.ChainConfig{
 		ConstantinopleBlock: big.NewInt(0),
 		PetersburgBlock:     big.NewInt(0),
 		IstanbulBlock:       big.NewInt(0),
-		YoloV1Block:         big.NewInt(0),
+		YoloV2Block:         big.NewInt(0),
 	},
-	// This specification is subject to change, but is for now identical to YOLOv1
+	// This specification is subject to change, but is for now identical to YOLOv2
 	// for cross-client testing purposes
 	"Berlin": {
 		ChainID:             big.NewInt(1),
@@ -165,7 +165,7 @@ var Forks = map[string]*params.ChainConfig{
 		ConstantinopleBlock: big.NewInt(0),
 		PetersburgBlock:     big.NewInt(0),
 		IstanbulBlock:       big.NewInt(0),
-		YoloV1Block:         big.NewInt(0),
+		YoloV2Block:         big.NewInt(0),
 	},
 }
 
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index a999cba4715c11d9be8b7ec7b055706c63f79f52..28a5313129dfb31c23ec2b64a54c9dba181c0c72 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -186,6 +186,16 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
 	context.GetHash = vmTestBlockHash
 	evm := vm.NewEVM(context, statedb, config, vmconfig)
 
+	if config.IsYoloV2(context.BlockNumber) {
+		statedb.AddAddressToAccessList(msg.From())
+		if dst := msg.To(); dst != nil {
+			statedb.AddAddressToAccessList(*dst)
+			// If it's a create-tx, the destination will be added inside evm.create
+		}
+		for _, addr := range evm.ActivePrecompiles() {
+			statedb.AddAddressToAccessList(addr)
+		}
+	}
 	gaspool := new(core.GasPool)
 	gaspool.AddGas(block.GasLimit())
 	snapshot := statedb.Snapshot()
@@ -225,7 +235,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
 
 	var snaps *snapshot.Tree
 	if snapshotter {
-		snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false)
+		snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false, false)
 	}
 	statedb, _ = state.New(root, sdb, snaps)
 	return snaps, statedb
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index ffccbbf4ace450e132b0d54def5b66ee6f0a0176..575a04022f3a8aad5afacb43ef1e0285a78d8971 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -314,19 +314,22 @@ func (st *StackTrie) hash() {
 			panic(err)
 		}
 	case extNode:
+		st.children[0].hash()
 		h = newHasher(false)
 		defer returnHasherToPool(h)
 		h.tmp.Reset()
-		st.children[0].hash()
-		// This is also possible:
-		//sz := hexToCompactInPlace(st.key)
-		//n := [][]byte{
-		//	st.key[:sz],
-		//	st.children[0].val,
-		//}
-		n := [][]byte{
-			hexToCompact(st.key),
-			st.children[0].val,
+		var valuenode node
+		if len(st.children[0].val) < 32 {
+			valuenode = rawNode(st.children[0].val)
+		} else {
+			valuenode = hashNode(st.children[0].val)
+		}
+		n := struct {
+			Key []byte
+			Val node
+		}{
+			Key: hexToCompact(st.key),
+			Val: valuenode,
 		}
 		if err := rlp.Encode(&h.tmp, n); err != nil {
 			panic(err)
@@ -406,6 +409,18 @@ func (st *StackTrie) Commit() (common.Hash, error) {
 		return common.Hash{}, ErrCommitDisabled
 	}
 	st.hash()
-	h := common.BytesToHash(st.val)
-	return h, nil
+	if len(st.val) != 32 {
+		// If the node's RLP isn't 32 bytes long, the node will not
+		// be hashed (and committed), and instead contain the  rlp-encoding of the
+		// node. For the top level node, we need to force the hashing+commit.
+		ret := make([]byte, 32)
+		h := newHasher(false)
+		defer returnHasherToPool(h)
+		h.sha.Reset()
+		h.sha.Write(st.val)
+		h.sha.Read(ret)
+		st.db.Put(ret, st.val)
+		return common.BytesToHash(ret), nil
+	}
+	return common.BytesToHash(st.val), nil
 }
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index 26e3bade2717a61bd12621549f2e476ad312834b..d4488b4029c4b3cd37aafcb003d0f7987d03f4f5 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -240,3 +240,52 @@ func TestDerivableList(t *testing.T) {
 		}
 	}
 }
+
+// TestUpdateSmallNodes tests a case where the leaves are small (both key and value),
+// which causes a lot of node-within-node. This case was found via fuzzing.
+func TestUpdateSmallNodes(t *testing.T) {
+	st := NewStackTrie(nil)
+	nt, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+	kvs := []struct {
+		K string
+		V string
+	}{
+		{"63303030", "3041"}, // stacktrie.Update
+		{"65", "3000"},       // stacktrie.Update
+	}
+	for _, kv := range kvs {
+		nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+		st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+	}
+	if nt.Hash() != st.Hash() {
+		t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
+	}
+}
+
+// TestUpdateVariableKeys contains a case which stacktrie fails: when keys of different
+// sizes are used, and the second one has the same prefix as the first, then the
+// stacktrie fails, since it's unable to 'expand' on an already added leaf.
+// For all practical purposes, this is fine, since keys are fixed-size length
+// in account and storage tries.
+//
+// The test is marked as 'skipped', and exists just to have the behaviour documented.
+// This case was found via fuzzing.
+func TestUpdateVariableKeys(t *testing.T) {
+	t.SkipNow()
+	st := NewStackTrie(nil)
+	nt, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+	kvs := []struct {
+		K string
+		V string
+	}{
+		{"0x33303534636532393561313031676174", "303030"},
+		{"0x3330353463653239356131303167617430", "313131"},
+	}
+	for _, kv := range kvs {
+		nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+		st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+	}
+	if nt.Hash() != st.Hash() {
+		t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
+	}
+}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 539451fbf4dd89b38f71fce57fdd17216f0dbc26..682dec157c61a0e786194ccc28b6bdd850041b5d 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -853,6 +853,42 @@ func TestCommitSequenceStackTrie(t *testing.T) {
 	}
 }
 
+// TestCommitSequenceSmallRoot tests that a trie which is essentially only a
+// small (<32 byte) shortnode with an included value is properly committed to a
+// database.
+// This case might not matter, since in practice, all keys are 32 bytes, which means
+// that even a small trie which contains a leaf will have an extension making it
+// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
+func TestCommitSequenceSmallRoot(t *testing.T) {
+	s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
+	db := NewDatabase(s)
+	trie, _ := New(common.Hash{}, db)
+	// Another sponge is used for the stacktrie commits
+	stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
+	stTrie := NewStackTrie(stackTrieSponge)
+	// Add a single small-element to the trie(s)
+	key := make([]byte, 5)
+	key[0] = 1
+	trie.TryUpdate(key, []byte{0x1})
+	stTrie.TryUpdate(key, []byte{0x1})
+	// Flush trie -> database
+	root, _ := trie.Commit(nil)
+	// Flush memdb -> disk (sponge)
+	db.Commit(root, false, nil)
+	// And flush stacktrie -> disk
+	stRoot, err := stTrie.Commit()
+	if err != nil {
+		t.Fatalf("Failed to commit stack trie %v", err)
+	}
+	if stRoot != root {
+		t.Fatalf("root wrong, got %x exp %x", stRoot, root)
+	}
+	fmt.Printf("root: %x\n", stRoot)
+	if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
+		t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
+	}
+}
+
 // BenchmarkCommitAfterHashFixedSize benchmarks the Commit (after Hash) of a fixed number of updates to a trie.
 // This benchmark is meant to capture the difference on efficiency of small versus large changes. Typically,
 // storage tries are small (a couple of entries), whereas the full post-block account trie update is large (a couple