diff --git a/core/blockchain.go b/core/blockchain.go
index 356340ae19df25ad24744e0c22d835a58189ceac..1c8a7fe60a9f1a4cce946822e9507b693186ae11 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -207,9 +207,10 @@ type BlockChain struct {
 	processor  Processor  // Block transaction processor interface
 	vmConfig   vm.Config
 
-	badBlocks       *lru.Cache                     // Bad block cache
-	shouldPreserve  func(*types.Block) bool        // Function used to determine whether should preserve the given block.
-	terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+	badBlocks          *lru.Cache                     // Bad block cache
+	shouldPreserve     func(*types.Block) bool        // Function used to determine whether should preserve the given block.
+	terminateInsert    func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+	writeLegacyJournal bool                           // Testing flag used to flush the snapshot journal in legacy format.
 }
 
 // NewBlockChain returns a fully initialised block chain using information
@@ -281,9 +282,29 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 	// Make sure the state associated with the block is available
 	head := bc.CurrentBlock()
 	if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
-		log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
-		if err := bc.SetHead(head.NumberU64()); err != nil {
-			return nil, err
+		// Head state is missing, before the state recovery, find out the
+		// disk layer point of snapshot(if it's enabled). Make sure the
+		// rewound point is lower than disk layer.
+		var diskRoot common.Hash
+		if bc.cacheConfig.SnapshotLimit > 0 {
+			diskRoot = rawdb.ReadSnapshotRoot(bc.db)
+		}
+		if diskRoot != (common.Hash{}) {
+			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
+
+			snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot)
+			if err != nil {
+				return nil, err
+			}
+			// Chain rewound, persist old snapshot number to indicate recovery procedure
+			if snapDisk != 0 {
+				rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
+			}
+		} else {
+			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
+			if err := bc.SetHead(head.NumberU64()); err != nil {
+				return nil, err
+			}
 		}
 	}
 	// Ensure that a previous crash in SetHead doesn't leave extra ancients
@@ -339,7 +360,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 	}
 	// Load any existing snapshot, regenerating it if loading failed
 	if bc.cacheConfig.SnapshotLimit > 0 {
-		bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait)
+		// If the chain was rewound past the snapshot persistent layer (causing
+		// a recovery block number to be persisted to disk), check if we're still
+		// in recovery mode and in that case, don't invalidate the snapshot on a
+		// head mismatch.
+		var recover bool
+
+		head := bc.CurrentBlock()
+		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
+			log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
+			recover = true
+		}
+		bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, recover)
 	}
 	// Take ownership of this particular state
 	go bc.update()
@@ -444,9 +476,25 @@ func (bc *BlockChain) loadLastState() error {
 // was fast synced or full synced and in which state, the method will try to
 // delete minimal data from disk whilst retaining chain consistency.
 func (bc *BlockChain) SetHead(head uint64) error {
+	_, err := bc.SetHeadBeyondRoot(head, common.Hash{})
+	return err
+}
+
+// SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition
+// that the rewind must pass the specified state root. This method is meant to be
+// used when rewiding with snapshots enabled to ensure that we go back further than
+// persistent disk layer. Depending on whether the node was fast synced or full, and
+// in which state, the method will try to delete minimal data from disk whilst
+// retaining chain consistency.
+//
+// The method returns the block number where the requested root cap was found.
+func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) {
 	bc.chainmu.Lock()
 	defer bc.chainmu.Unlock()
 
+	// Track the block number of the requested root hash
+	var rootNumber uint64 // (no root == always 0)
+
 	// Retrieve the last pivot block to short circuit rollbacks beyond it and the
 	// current freezer limit to start nuking id underflown
 	pivot := rawdb.ReadLastPivotNumber(bc.db)
@@ -462,8 +510,16 @@ func (bc *BlockChain) SetHead(head uint64) error {
 				log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
 				newHeadBlock = bc.genesisBlock
 			} else {
-				// Block exists, keep rewinding until we find one with state
+				// Block exists, keep rewinding until we find one with state,
+				// keeping rewinding until we exceed the optional threshold
+				// root hash
+				beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
+
 				for {
+					// If a root threshold was requested but not yet crossed, check
+					if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
+						beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
+					}
 					if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
 						log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
 						if pivot == nil || newHeadBlock.NumberU64() > *pivot {
@@ -474,8 +530,12 @@ func (bc *BlockChain) SetHead(head uint64) error {
 							newHeadBlock = bc.genesisBlock
 						}
 					}
-					log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
-					break
+					if beyondRoot || newHeadBlock.NumberU64() == 0 {
+						log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+						break
+					}
+					log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
+					newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
 				}
 			}
 			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
@@ -555,7 +615,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
 	bc.txLookupCache.Purge()
 	bc.futureBlocks.Purge()
 
-	return bc.loadLastState()
+	return rootNumber, bc.loadLastState()
 }
 
 // FastSyncCommitHead sets the current head block to the one defined by the hash
@@ -940,8 +1000,14 @@ func (bc *BlockChain) Stop() {
 	var snapBase common.Hash
 	if bc.snaps != nil {
 		var err error
-		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
-			log.Error("Failed to journal state snapshot", "err", err)
+		if bc.writeLegacyJournal {
+			if snapBase, err = bc.snaps.LegacyJournal(bc.CurrentBlock().Root()); err != nil {
+				log.Error("Failed to journal state snapshot", "err", err)
+			}
+		} else {
+			if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
+				log.Error("Failed to journal state snapshot", "err", err)
+			}
 		}
 	}
 	// Ensure the state of a recent block is also stored to disk before exiting.
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index 27903dd06b354ea324c051ff561e9fc1dbb596eb..b5cd232a9c4f2a64324100c1123827e4895bee4c 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -25,6 +25,7 @@ import (
 	"math/big"
 	"os"
 	"testing"
+	"time"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/consensus/ethash"
@@ -38,7 +39,10 @@ import (
 // committed to disk and then the process crashed. In this case we expect the full
 // chain to be rolled back to the committed block, but the chain data itself left
 // in the database for replaying.
-func TestShortRepair(t *testing.T) {
+func TestShortRepair(t *testing.T)              { testShortRepair(t, false) }
+func TestShortRepairWithSnapshots(t *testing.T) { testShortRepair(t, true) }
+
+func testShortRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -68,14 +72,17 @@ func TestShortRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain where the fast sync pivot point was
 // already committed, after which the process crashed. In this case we expect the full
 // chain to be rolled back to the committed block, but the chain data itself left in
 // the database for replaying.
-func TestShortFastSyncedRepair(t *testing.T) {
+func TestShortFastSyncedRepair(t *testing.T)              { testShortFastSyncedRepair(t, false) }
+func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) }
+
+func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -105,14 +112,17 @@ func TestShortFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain where the fast sync pivot point was
 // not yet committed, but the process crashed. In this case we expect the chain to
 // detect that it was fast syncing and not delete anything, since we can just pick
 // up directly where we left off.
-func TestShortFastSyncingRepair(t *testing.T) {
+func TestShortFastSyncingRepair(t *testing.T)              { testShortFastSyncingRepair(t, false) }
+func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) }
+
+func testShortFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -142,7 +152,7 @@ func TestShortFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where a
@@ -150,7 +160,10 @@ func TestShortFastSyncingRepair(t *testing.T) {
 // test scenario the side chain is below the committed block. In this case we expect
 // the canonical chain to be rolled back to the committed block, but the chain data
 // itself left in the database for replaying.
-func TestShortOldForkedRepair(t *testing.T) {
+func TestShortOldForkedRepair(t *testing.T)              { testShortOldForkedRepair(t, false) }
+func TestShortOldForkedRepairWithSnapshots(t *testing.T) { testShortOldForkedRepair(t, true) }
+
+func testShortOldForkedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -182,7 +195,7 @@ func TestShortOldForkedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -191,6 +204,13 @@ func TestShortOldForkedRepair(t *testing.T) {
 // this case we expect the canonical chain to be rolled back to the committed block,
 // but the chain data itself left in the database for replaying.
 func TestShortOldForkedFastSyncedRepair(t *testing.T) {
+	testShortOldForkedFastSyncedRepair(t, false)
+}
+func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncedRepair(t, true)
+}
+
+func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -222,7 +242,7 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -231,6 +251,13 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) {
 // the chain to detect that it was fast syncing and not delete anything, since we
 // can just pick up directly where we left off.
 func TestShortOldForkedFastSyncingRepair(t *testing.T) {
+	testShortOldForkedFastSyncingRepair(t, false)
+}
+func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncingRepair(t, true)
+}
+
+func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -262,7 +289,7 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where a
@@ -270,7 +297,10 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) {
 // test scenario the side chain reaches above the committed block. In this case we
 // expect the canonical chain to be rolled back to the committed block, but the
 // chain data itself left in the database for replaying.
-func TestShortNewlyForkedRepair(t *testing.T) {
+func TestShortNewlyForkedRepair(t *testing.T)              { testShortNewlyForkedRepair(t, false) }
+func TestShortNewlyForkedRepairWithSnapshots(t *testing.T) { testShortNewlyForkedRepair(t, true) }
+
+func testShortNewlyForkedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6
@@ -302,7 +332,7 @@ func TestShortNewlyForkedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -311,6 +341,13 @@ func TestShortNewlyForkedRepair(t *testing.T) {
 // In this case we expect the canonical chain to be rolled back to the committed
 // block, but the chain data itself left in the database for replaying.
 func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
+	testShortNewlyForkedFastSyncedRepair(t, false)
+}
+func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncedRepair(t, true)
+}
+
+func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6
@@ -342,7 +379,7 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -351,6 +388,13 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
 // case we expect the chain to detect that it was fast syncing and not delete
 // anything, since we can just pick up directly where we left off.
 func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
+	testShortNewlyForkedFastSyncingRepair(t, false)
+}
+func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncingRepair(t, true)
+}
+
+func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6
@@ -382,14 +426,17 @@ func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a longer side chain, where a
 // recent block was already committed to disk and then the process crashed. In this
 // case we expect the canonical chain to be rolled back to the committed block, but
 // the chain data itself left in the database for replaying.
-func TestShortReorgedRepair(t *testing.T) {
+func TestShortReorgedRepair(t *testing.T)              { testShortReorgedRepair(t, false) }
+func TestShortReorgedRepairWithSnapshots(t *testing.T) { testShortReorgedRepair(t, true) }
+
+func testShortReorgedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -421,7 +468,7 @@ func TestShortReorgedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a longer side chain, where
@@ -429,6 +476,13 @@ func TestShortReorgedRepair(t *testing.T) {
 // crashed. In this case we expect the canonical chain to be rolled back to the
 // committed block, but the chain data itself left in the database for replaying.
 func TestShortReorgedFastSyncedRepair(t *testing.T) {
+	testShortReorgedFastSyncedRepair(t, false)
+}
+func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncedRepair(t, true)
+}
+
+func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -460,7 +514,7 @@ func TestShortReorgedFastSyncedRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a short canonical chain and a longer side chain, where
@@ -468,6 +522,13 @@ func TestShortReorgedFastSyncedRepair(t *testing.T) {
 // this case we expect the chain to detect that it was fast syncing and not delete
 // anything, since we can just pick up directly where we left off.
 func TestShortReorgedFastSyncingRepair(t *testing.T) {
+	testShortReorgedFastSyncingRepair(t, false)
+}
+func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncingRepair(t, true)
+}
+
+func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -499,14 +560,17 @@ func TestShortReorgedFastSyncingRepair(t *testing.T) {
 		expHeadHeader:      8,
 		expHeadFastBlock:   8,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where a recent
 // block - newer than the ancient limit - was already committed to disk and then
 // the process crashed. In this case we expect the chain to be rolled back to the
 // committed block, with everything afterwads kept as fast sync data.
-func TestLongShallowRepair(t *testing.T) {
+func TestLongShallowRepair(t *testing.T)              { testLongShallowRepair(t, false) }
+func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) }
+
+func testLongShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -541,14 +605,17 @@ func TestLongShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where a recent
 // block - older than the ancient limit - was already committed to disk and then
 // the process crashed. In this case we expect the chain to be rolled back to the
 // committed block, with everything afterwads deleted.
-func TestLongDeepRepair(t *testing.T) {
+func TestLongDeepRepair(t *testing.T)              { testLongDeepRepair(t, false) }
+func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) }
+
+func testLongDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -582,7 +649,7 @@ func TestLongDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -590,6 +657,13 @@ func TestLongDeepRepair(t *testing.T) {
 // which the process crashed. In this case we expect the chain to be rolled back
 // to the committed block, with everything afterwads kept as fast sync data.
 func TestLongFastSyncedShallowRepair(t *testing.T) {
+	testLongFastSyncedShallowRepair(t, false)
+}
+func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongFastSyncedShallowRepair(t, true)
+}
+
+func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -624,14 +698,17 @@ func TestLongFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
 // sync pivot point - older than the ancient limit - was already committed, after
 // which the process crashed. In this case we expect the chain to be rolled back
 // to the committed block, with everything afterwads deleted.
-func TestLongFastSyncedDeepRepair(t *testing.T) {
+func TestLongFastSyncedDeepRepair(t *testing.T)              { testLongFastSyncedDeepRepair(t, false) }
+func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) }
+
+func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -665,7 +742,7 @@ func TestLongFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -674,6 +751,13 @@ func TestLongFastSyncedDeepRepair(t *testing.T) {
 // syncing and not delete anything, since we can just pick up directly where we
 // left off.
 func TestLongFastSyncingShallowRepair(t *testing.T) {
+	testLongFastSyncingShallowRepair(t, false)
+}
+func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongFastSyncingShallowRepair(t, true)
+}
+
+func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -708,7 +792,7 @@ func TestLongFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -716,7 +800,10 @@ func TestLongFastSyncingShallowRepair(t *testing.T) {
 // process crashed. In this case we expect the chain to detect that it was fast
 // syncing and not delete anything, since we can just pick up directly where we
 // left off.
-func TestLongFastSyncingDeepRepair(t *testing.T) {
+func TestLongFastSyncingDeepRepair(t *testing.T)              { testLongFastSyncingDeepRepair(t, false) }
+func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) }
+
+func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -751,7 +838,7 @@ func TestLongFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -761,6 +848,13 @@ func TestLongFastSyncingDeepRepair(t *testing.T) {
 // rolled back to the committed block, with everything afterwads kept as fast
 // sync data; the side chain completely nuked by the freezer.
 func TestLongOldForkedShallowRepair(t *testing.T) {
+	testLongOldForkedShallowRepair(t, false)
+}
+func TestLongOldForkedShallowRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedShallowRepair(t, true)
+}
+
+func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -796,7 +890,7 @@ func TestLongOldForkedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -805,7 +899,10 @@ func TestLongOldForkedShallowRepair(t *testing.T) {
 // chain is below the committed block. In this case we expect the canonical chain
 // to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
-func TestLongOldForkedDeepRepair(t *testing.T) {
+func TestLongOldForkedDeepRepair(t *testing.T)              { testLongOldForkedDeepRepair(t, false) }
+func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) }
+
+func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -840,7 +937,7 @@ func TestLongOldForkedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -850,6 +947,13 @@ func TestLongOldForkedDeepRepair(t *testing.T) {
 // to be rolled back to the committed block, with everything afterwads kept as
 // fast sync data; the side chain completely nuked by the freezer.
 func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
+	testLongOldForkedFastSyncedShallowRepair(t, false)
+}
+func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedShallowRepair(t, true)
+}
+
+func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -885,7 +989,7 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -895,6 +999,13 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
 // chain to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
 func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
+	testLongOldForkedFastSyncedDeepRepair(t, false)
+}
+func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedDeepRepair(t, true)
+}
+
+func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -929,7 +1040,7 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -939,6 +1050,13 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
+	testLongOldForkedFastSyncingShallowRepair(t, false)
+}
+func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingShallowRepair(t, true)
+}
+
+func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -974,7 +1092,7 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -984,6 +1102,13 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
+	testLongOldForkedFastSyncingDeepRepair(t, false)
+}
+func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingDeepRepair(t, true)
+}
+
+func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1019,7 +1144,7 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1029,6 +1154,13 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
 // rolled back to the committed block, with everything afterwads kept as fast
 // sync data; the side chain completely nuked by the freezer.
 func TestLongNewerForkedShallowRepair(t *testing.T) {
+	testLongNewerForkedShallowRepair(t, false)
+}
+func TestLongNewerForkedShallowRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedShallowRepair(t, true)
+}
+
+func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1064,7 +1196,7 @@ func TestLongNewerForkedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1073,7 +1205,10 @@ func TestLongNewerForkedShallowRepair(t *testing.T) {
 // chain is above the committed block. In this case we expect the canonical chain
 // to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
-func TestLongNewerForkedDeepRepair(t *testing.T) {
+func TestLongNewerForkedDeepRepair(t *testing.T)              { testLongNewerForkedDeepRepair(t, false) }
+func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) }
+
+func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1108,7 +1243,7 @@ func TestLongNewerForkedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1118,6 +1253,13 @@ func TestLongNewerForkedDeepRepair(t *testing.T) {
 // to be rolled back to the committed block, with everything afterwads kept as fast
 // sync data; the side chain completely nuked by the freezer.
 func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowRepair(t, false)
+}
+func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1153,7 +1295,7 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1163,6 +1305,13 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
 // chain to be rolled back to the committed block, with everything afterwads deleted;
 // the side chain completely nuked by the freezer.
 func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepRepair(t, false)
+}
+func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1197,7 +1346,7 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1207,6 +1356,13 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowRepair(t, false)
+}
+func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1242,7 +1398,7 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1252,6 +1408,13 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
 // that it was fast syncing and not delete anything. The side chain is completely
 // nuked by the freezer.
 func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepRepair(t, false)
+}
+func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1287,7 +1450,7 @@ func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer side
@@ -1295,7 +1458,10 @@ func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
 // to disk and then the process crashed. In this case we expect the chain to be
 // rolled back to the committed block, with everything afterwads kept as fast sync
 // data. The side chain completely nuked by the freezer.
-func TestLongReorgedShallowRepair(t *testing.T) {
+func TestLongReorgedShallowRepair(t *testing.T)              { testLongReorgedShallowRepair(t, false) }
+func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) }
+
+func testLongReorgedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1331,7 +1497,7 @@ func TestLongReorgedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer side
@@ -1339,7 +1505,10 @@ func TestLongReorgedShallowRepair(t *testing.T) {
 // to disk and then the process crashed. In this case we expect the canonical chains
 // to be rolled back to the committed block, with everything afterwads deleted. The
 // side chain completely nuked by the freezer.
-func TestLongReorgedDeepRepair(t *testing.T) {
+func TestLongReorgedDeepRepair(t *testing.T)              { testLongReorgedDeepRepair(t, false) }
+func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) }
+
+func testLongReorgedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1374,7 +1543,7 @@ func TestLongReorgedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1384,6 +1553,13 @@ func TestLongReorgedDeepRepair(t *testing.T) {
 // afterwads kept as fast sync data. The side chain completely nuked by the
 // freezer.
 func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
+	testLongReorgedFastSyncedShallowRepair(t, false)
+}
+func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedShallowRepair(t, true)
+}
+
+func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1419,7 +1595,7 @@ func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1428,6 +1604,13 @@ func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
 // expect the canonical chains to be rolled back to the committed block, with
 // everything afterwads deleted. The side chain completely nuked by the freezer.
 func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
+	testLongReorgedFastSyncedDeepRepair(t, false)
+}
+func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedDeepRepair(t, true)
+}
+
+func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1462,7 +1645,7 @@ func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1471,6 +1654,13 @@ func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
 // chain to detect that it was fast syncing and not delete anything, since we
 // can just pick up directly where we left off.
 func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
+	testLongReorgedFastSyncingShallowRepair(t, false)
+}
+func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingShallowRepair(t, true)
+}
+
+func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1506,7 +1696,7 @@ func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
 		expHeadHeader:      18,
 		expHeadFastBlock:   18,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a recovery for a long canonical chain with frozen blocks and a longer
@@ -1515,6 +1705,13 @@ func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
 // chain to detect that it was fast syncing and not delete anything, since we
 // can just pick up directly where we left off.
 func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
+	testLongReorgedFastSyncingDeepRepair(t, false)
+}
+func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingDeepRepair(t, true)
+}
+
+func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1550,13 +1747,13 @@ func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
 		expHeadHeader:      24,
 		expHeadFastBlock:   24,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
-func testRepair(t *testing.T, tt *rewindTest) {
+func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
 	// It's hard to follow the test case, visualize the input
 	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
-	//fmt.Println(tt.dump(true))
+	// fmt.Println(tt.dump(true))
 
 	// Create a temporary persistent database
 	datadir, err := ioutil.TempDir("", "")
@@ -1575,8 +1772,18 @@ func testRepair(t *testing.T, tt *rewindTest) {
 	var (
 		genesis = new(Genesis).MustCommit(db)
 		engine  = ethash.NewFullFaker()
+		config  = &CacheConfig{
+			TrieCleanLimit: 256,
+			TrieDirtyLimit: 256,
+			TrieTimeLimit:  5 * time.Minute,
+			SnapshotLimit:  0, // Disable snapshot by default
+		}
 	)
-	chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if snapshots {
+		config.SnapshotLimit = 256
+		config.SnapshotWait = true
+	}
+	chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
 	if err != nil {
 		t.Fatalf("Failed to create chain: %v", err)
 	}
@@ -1599,6 +1806,11 @@ func testRepair(t *testing.T, tt *rewindTest) {
 	}
 	if tt.commitBlock > 0 {
 		chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+		if snapshots {
+			if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+				t.Fatalf("Failed to flatten snapshots: %v", err)
+			}
+		}
 	}
 	if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
 		t.Fatalf("Failed to import canonical chain tail: %v", err)
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index dc1368ff4b488e1777dff60a9923cf3345867051..45c4073eb4ce6e1e10301b55d09ab9819bf84637 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -26,6 +26,7 @@ import (
 	"os"
 	"strings"
 	"testing"
+	"time"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/consensus/ethash"
@@ -150,7 +151,10 @@ func (tt *rewindTest) dump(crash bool) string {
 // chain to be rolled back to the committed block. Everything above the sethead
 // point should be deleted. In between the committed block and the requested head
 // the data can remain as "fast sync" data to avoid redownloading it.
-func TestShortSetHead(t *testing.T) {
+func TestShortSetHead(t *testing.T)              { testShortSetHead(t, false) }
+func TestShortSetHeadWithSnapshots(t *testing.T) { testShortSetHead(t, true) }
+
+func testShortSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -181,7 +185,7 @@ func TestShortSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain where the fast sync pivot point was
@@ -190,7 +194,10 @@ func TestShortSetHead(t *testing.T) {
 // Everything above the sethead point should be deleted. In between the committed
 // block and the requested head the data can remain as "fast sync" data to avoid
 // redownloading it.
-func TestShortFastSyncedSetHead(t *testing.T) {
+func TestShortFastSyncedSetHead(t *testing.T)              { testShortFastSyncedSetHead(t, false) }
+func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) }
+
+func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -221,7 +228,7 @@ func TestShortFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain where the fast sync pivot point was
@@ -229,7 +236,10 @@ func TestShortFastSyncedSetHead(t *testing.T) {
 // detect that it was fast syncing and delete everything from the new head, since
 // we can just pick up fast syncing from there. The head full block should be set
 // to the genesis.
-func TestShortFastSyncingSetHead(t *testing.T) {
+func TestShortFastSyncingSetHead(t *testing.T)              { testShortFastSyncingSetHead(t, false) }
+func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) }
+
+func testShortFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//
@@ -260,7 +270,7 @@ func TestShortFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where a
@@ -270,7 +280,10 @@ func TestShortFastSyncingSetHead(t *testing.T) {
 // above the sethead point should be deleted. In between the committed block and
 // the requested head the data can remain as "fast sync" data to avoid redownloading
 // it. The side chain should be left alone as it was shorter.
-func TestShortOldForkedSetHead(t *testing.T) {
+func TestShortOldForkedSetHead(t *testing.T)              { testShortOldForkedSetHead(t, false) }
+func TestShortOldForkedSetHeadWithSnapshots(t *testing.T) { testShortOldForkedSetHead(t, true) }
+
+func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -303,7 +316,7 @@ func TestShortOldForkedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -314,6 +327,13 @@ func TestShortOldForkedSetHead(t *testing.T) {
 // committed block and the requested head the data can remain as "fast sync" data
 // to avoid redownloading it. The side chain should be left alone as it was shorter.
 func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
+	testShortOldForkedFastSyncedSetHead(t, false)
+}
+func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncedSetHead(t, true)
+}
+
+func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -346,7 +366,7 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -356,6 +376,13 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
 // head, since we can just pick up fast syncing from there. The head full block
 // should be set to the genesis.
 func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
+	testShortOldForkedFastSyncingSetHead(t, false)
+}
+func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+	testShortOldForkedFastSyncingSetHead(t, true)
+}
+
+func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3
@@ -388,7 +415,7 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where a
@@ -402,7 +429,10 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
 // The side chain could be left to be if the fork point was before the new head
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortNewlyForkedSetHead(t *testing.T) {
+func TestShortNewlyForkedSetHead(t *testing.T)              { testShortNewlyForkedSetHead(t, false) }
+func TestShortNewlyForkedSetHeadWithSnapshots(t *testing.T) { testShortNewlyForkedSetHead(t, true) }
+
+func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
@@ -435,7 +465,7 @@ func TestShortNewlyForkedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -449,6 +479,13 @@ func TestShortNewlyForkedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
+	testShortNewlyForkedFastSyncedSetHead(t, false)
+}
+func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncedSetHead(t, true)
+}
+
+func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
@@ -481,7 +518,7 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -495,6 +532,13 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
+	testShortNewlyForkedFastSyncingSetHead(t, false)
+}
+func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+	testShortNewlyForkedFastSyncingSetHead(t, true)
+}
+
+func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
@@ -527,7 +571,7 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a longer side chain, where a
@@ -540,7 +584,10 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
 // The side chain could be left to be if the fork point was before the new head
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortReorgedSetHead(t *testing.T) {
+func TestShortReorgedSetHead(t *testing.T)              { testShortReorgedSetHead(t, false) }
+func TestShortReorgedSetHeadWithSnapshots(t *testing.T) { testShortReorgedSetHead(t, true) }
+
+func testShortReorgedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -573,7 +620,7 @@ func TestShortReorgedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a longer side chain, where
@@ -588,6 +635,13 @@ func TestShortReorgedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortReorgedFastSyncedSetHead(t *testing.T) {
+	testShortReorgedFastSyncedSetHead(t, false)
+}
+func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncedSetHead(t, true)
+}
+
+func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -620,7 +674,7 @@ func TestShortReorgedFastSyncedSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a short canonical chain and a longer side chain, where
@@ -633,6 +687,13 @@ func TestShortReorgedFastSyncedSetHead(t *testing.T) {
 // we are deleting to, but it would be exceedingly hard to detect that case and
 // properly handle it, so we'll trade extra work in exchange for simpler code.
 func TestShortReorgedFastSyncingSetHead(t *testing.T) {
+	testShortReorgedFastSyncingSetHead(t, false)
+}
+func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+	testShortReorgedFastSyncingSetHead(t, true)
+}
+
+func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
@@ -665,7 +726,7 @@ func TestShortReorgedFastSyncingSetHead(t *testing.T) {
 		expHeadHeader:      7,
 		expHeadFastBlock:   7,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where a recent
@@ -674,7 +735,10 @@ func TestShortReorgedFastSyncingSetHead(t *testing.T) {
 // to the committed block. Everything above the sethead point should be deleted.
 // In between the committed block and the requested head the data can remain as
 // "fast sync" data to avoid redownloading it.
-func TestLongShallowSetHead(t *testing.T) {
+func TestLongShallowSetHead(t *testing.T)              { testLongShallowSetHead(t, false) }
+func TestLongShallowSetHeadWithSnapshots(t *testing.T) { testLongShallowSetHead(t, true) }
+
+func testLongShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -710,7 +774,7 @@ func TestLongShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where a recent
@@ -718,7 +782,10 @@ func TestLongShallowSetHead(t *testing.T) {
 // sethead was called. In this case we expect the full chain to be rolled back
 // to the committed block. Since the ancient limit was underflown, everything
 // needs to be deleted onwards to avoid creating a gap.
-func TestLongDeepSetHead(t *testing.T) {
+func TestLongDeepSetHead(t *testing.T)              { testLongDeepSetHead(t, false) }
+func TestLongDeepSetHeadWithSnapshots(t *testing.T) { testLongDeepSetHead(t, true) }
+
+func testLongDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -753,7 +820,7 @@ func TestLongDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -763,6 +830,13 @@ func TestLongDeepSetHead(t *testing.T) {
 // deleted. In between the committed block and the requested head the data can
 // remain as "fast sync" data to avoid redownloading it.
 func TestLongFastSyncedShallowSetHead(t *testing.T) {
+	testLongFastSyncedShallowSetHead(t, false)
+}
+func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongFastSyncedShallowSetHead(t, true)
+}
+
+func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -798,7 +872,7 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -806,7 +880,10 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) {
 // which sethead was called. In this case we expect the full chain to be rolled
 // back to the committed block. Since the ancient limit was underflown, everything
 // needs to be deleted onwards to avoid creating a gap.
-func TestLongFastSyncedDeepSetHead(t *testing.T) {
+func TestLongFastSyncedDeepSetHead(t *testing.T)              { testLongFastSyncedDeepSetHead(t, false) }
+func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) }
+
+func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -841,7 +918,7 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -850,6 +927,13 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) {
 // syncing and delete everything from the new head, since we can just pick up fast
 // syncing from there.
 func TestLongFastSyncingShallowSetHead(t *testing.T) {
+	testLongFastSyncingShallowSetHead(t, false)
+}
+func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongFastSyncingShallowSetHead(t, true)
+}
+
+func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//
@@ -885,7 +969,7 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -894,6 +978,13 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) {
 // syncing and delete everything from the new head, since we can just pick up fast
 // syncing from there.
 func TestLongFastSyncingDeepSetHead(t *testing.T) {
+	testLongFastSyncingDeepSetHead(t, false)
+}
+func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongFastSyncingDeepSetHead(t, true)
+}
+
+func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//
@@ -928,7 +1019,7 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter side
@@ -939,6 +1030,13 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) {
 // can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
 // by the freezer.
 func TestLongOldForkedShallowSetHead(t *testing.T) {
+	testLongOldForkedShallowSetHead(t, false)
+}
+func TestLongOldForkedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedShallowSetHead(t, true)
+}
+
+func testLongOldForkedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -975,7 +1073,7 @@ func TestLongOldForkedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter side
@@ -984,7 +1082,10 @@ func TestLongOldForkedShallowSetHead(t *testing.T) {
 // chain to be rolled back to the committed block. Since the ancient limit was
 // underflown, everything needs to be deleted onwards to avoid creating a gap. The
 // side chain is nuked by the freezer.
-func TestLongOldForkedDeepSetHead(t *testing.T) {
+func TestLongOldForkedDeepSetHead(t *testing.T)              { testLongOldForkedDeepSetHead(t, false) }
+func TestLongOldForkedDeepSetHeadWithSnapshots(t *testing.T) { testLongOldForkedDeepSetHead(t, true) }
+
+func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1020,7 +1121,7 @@ func TestLongOldForkedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1032,6 +1133,13 @@ func TestLongOldForkedDeepSetHead(t *testing.T) {
 // requested head the data can remain as "fast sync" data to avoid redownloading
 // it. The side chain is nuked by the freezer.
 func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
+	testLongOldForkedFastSyncedShallowSetHead(t, false)
+}
+func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -1068,7 +1176,7 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1079,6 +1187,13 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
 // underflown, everything needs to be deleted onwards to avoid creating a gap. The
 // side chain is nuked by the freezer.
 func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
+	testLongOldForkedFastSyncedDeepSetHead(t, false)
+}
+func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1114,7 +1229,7 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1125,6 +1240,13 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
 // just pick up fast syncing from there. The side chain is completely nuked by the
 // freezer.
 func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
+	testLongOldForkedFastSyncingShallowSetHead(t, false)
+}
+func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3
@@ -1161,7 +1283,7 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1172,6 +1294,13 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
 // just pick up fast syncing from there. The side chain is completely nuked by the
 // freezer.
 func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
+	testLongOldForkedFastSyncingDeepSetHead(t, false)
+}
+func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongOldForkedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3
@@ -1207,7 +1336,7 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1216,6 +1345,13 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongShallowSetHead.
 func TestLongNewerForkedShallowSetHead(t *testing.T) {
+	testLongNewerForkedShallowSetHead(t, false)
+}
+func TestLongNewerForkedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedShallowSetHead(t, true)
+}
+
+func testLongNewerForkedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1252,7 +1388,7 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1261,6 +1397,13 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongDeepSetHead.
 func TestLongNewerForkedDeepSetHead(t *testing.T) {
+	testLongNewerForkedDeepSetHead(t, false)
+}
+func TestLongNewerForkedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedDeepSetHead(t, true)
+}
+
+func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1296,7 +1439,7 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1305,6 +1448,13 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) {
 // the side chain is above the committed block. In this case the freezer will delete
 // the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
 func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1341,7 +1491,7 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1350,6 +1500,13 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
 // the side chain is above the committed block. In this case the freezer will delete
 // the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
 func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1385,7 +1542,7 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1394,6 +1551,13 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
 func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1430,7 +1594,7 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1439,6 +1603,13 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
 // chain is above the committed block. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
 func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongNewerForkedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1474,14 +1645,17 @@ func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer side
 // chain, where a recent block - newer than the ancient limit - was already committed
 // to disk and then sethead was called. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongShallowSetHead.
-func TestLongReorgedShallowSetHead(t *testing.T) {
+func TestLongReorgedShallowSetHead(t *testing.T)              { testLongReorgedShallowSetHead(t, false) }
+func TestLongReorgedShallowSetHeadWithSnapshots(t *testing.T) { testLongReorgedShallowSetHead(t, true) }
+
+func testLongReorgedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1518,14 +1692,17 @@ func TestLongReorgedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer side
 // chain, where a recent block - older than the ancient limit - was already committed
 // to disk and then sethead was called. In this case the freezer will delete the
 // sidechain since it's dangling, reverting to TestLongDeepSetHead.
-func TestLongReorgedDeepSetHead(t *testing.T) {
+func TestLongReorgedDeepSetHead(t *testing.T)              { testLongReorgedDeepSetHead(t, false) }
+func TestLongReorgedDeepSetHeadWithSnapshots(t *testing.T) { testLongReorgedDeepSetHead(t, true) }
+
+func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1561,7 +1738,7 @@ func TestLongReorgedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1570,6 +1747,13 @@ func TestLongReorgedDeepSetHead(t *testing.T) {
 // freezer will delete the sidechain since it's dangling, reverting to
 // TestLongFastSyncedShallowSetHead.
 func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
+	testLongReorgedFastSyncedShallowSetHead(t, false)
+}
+func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1606,7 +1790,7 @@ func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1615,6 +1799,13 @@ func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
 // freezer will delete the sidechain since it's dangling, reverting to
 // TestLongFastSyncedDeepSetHead.
 func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
+	testLongReorgedFastSyncedDeepSetHead(t, false)
+}
+func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1650,7 +1841,7 @@ func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
 		expHeadHeader:      4,
 		expHeadFastBlock:   4,
 		expHeadBlock:       4,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1660,6 +1851,13 @@ func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
 // head, since we can just pick up fast syncing from there. The side chain is
 // completely nuked by the freezer.
 func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
+	testLongReorgedFastSyncingShallowSetHead(t, false)
+}
+func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1696,7 +1894,7 @@ func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
 // Tests a sethead for a long canonical chain with frozen blocks and a longer
@@ -1706,6 +1904,13 @@ func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
 // head, since we can just pick up fast syncing from there. The side chain is
 // completely nuked by the freezer.
 func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
+	testLongReorgedFastSyncingDeepSetHead(t, false)
+}
+func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+	testLongReorgedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
 	// Chain:
 	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
 	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
@@ -1741,13 +1946,13 @@ func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
 		expHeadHeader:      6,
 		expHeadFastBlock:   6,
 		expHeadBlock:       0,
-	})
+	}, snapshots)
 }
 
-func testSetHead(t *testing.T, tt *rewindTest) {
+func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
 	// It's hard to follow the test case, visualize the input
-	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
-	//fmt.Println(tt.dump(false))
+	// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+	// fmt.Println(tt.dump(false))
 
 	// Create a temporary persistent database
 	datadir, err := ioutil.TempDir("", "")
@@ -1766,8 +1971,18 @@ func testSetHead(t *testing.T, tt *rewindTest) {
 	var (
 		genesis = new(Genesis).MustCommit(db)
 		engine  = ethash.NewFullFaker()
+		config  = &CacheConfig{
+			TrieCleanLimit: 256,
+			TrieDirtyLimit: 256,
+			TrieTimeLimit:  5 * time.Minute,
+			SnapshotLimit:  0, // Disable snapshot
+		}
 	)
-	chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if snapshots {
+		config.SnapshotLimit = 256
+		config.SnapshotWait = true
+	}
+	chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
 	if err != nil {
 		t.Fatalf("Failed to create chain: %v", err)
 	}
@@ -1790,6 +2005,11 @@ func testSetHead(t *testing.T, tt *rewindTest) {
 	}
 	if tt.commitBlock > 0 {
 		chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+		if snapshots {
+			if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+				t.Fatalf("Failed to flatten snapshots: %v", err)
+			}
+		}
 	}
 	if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
 		t.Fatalf("Failed to import canonical chain tail: %v", err)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb499593e38a63cf8b4131b8462d493b09ab709e
--- /dev/null
+++ b/core/blockchain_snapshot_test.go
@@ -0,0 +1,732 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Tests that abnormal program termination (i.e.crash) and restart can recovery
+// the snapshot properly if the snapshot is enabled.
+
+package core
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/consensus/ethash"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/params"
+)
+
+// snapshotTest is a test case for snapshot recovery. It can be used for
+// simulating these scenarios:
+// (i)   Geth restarts normally with valid legacy snapshot
+// (ii)  Geth restarts normally with valid new-format snapshot
+// (iii) Geth restarts after the crash, with broken legacy snapshot
+// (iv)  Geth restarts after the crash, with broken new-format snapshot
+// (v)   Geth restarts normally, but it's requested to be rewound to a lower point via SetHead
+// (vi)  Geth restarts normally with a stale snapshot
+type snapshotTest struct {
+	legacy  bool   // Flag whether the loaded snapshot is in legacy format
+	crash   bool   // Flag whether the Geth restarts from the previous crash
+	gapped  int    // Number of blocks to insert without enabling snapshot
+	setHead uint64 // Block number to set head back to
+
+	chainBlocks   int    // Number of blocks to generate for the canonical chain
+	snapshotBlock uint64 // Block number of the relevant snapshot disk layer
+	commitBlock   uint64 // Block number for which to commit the state to disk
+
+	expCanonicalBlocks int    // Number of canonical blocks expected to remain in the database (excl. genesis)
+	expHeadHeader      uint64 // Block number of the expected head header
+	expHeadFastBlock   uint64 // Block number of the expected head fast sync block
+	expHeadBlock       uint64 // Block number of the expected head full block
+	expSnapshotBottom  uint64 // The block height corresponding to the snapshot disk layer
+}
+
+func (tt *snapshotTest) dump() string {
+	buffer := new(strings.Builder)
+
+	fmt.Fprint(buffer, "Chain:\n  G")
+	for i := 0; i < tt.chainBlocks; i++ {
+		fmt.Fprintf(buffer, "->C%d", i+1)
+	}
+	fmt.Fprint(buffer, " (HEAD)\n\n")
+
+	fmt.Fprintf(buffer, "Commit:   G")
+	if tt.commitBlock > 0 {
+		fmt.Fprintf(buffer, ", C%d", tt.commitBlock)
+	}
+	fmt.Fprint(buffer, "\n")
+
+	fmt.Fprintf(buffer, "Snapshot: G")
+	if tt.snapshotBlock > 0 {
+		fmt.Fprintf(buffer, ", C%d", tt.snapshotBlock)
+	}
+	fmt.Fprint(buffer, "\n")
+
+	if tt.crash {
+		fmt.Fprintf(buffer, "\nCRASH\n\n")
+	} else {
+		fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setHead)
+	}
+	fmt.Fprintf(buffer, "------------------------------\n\n")
+
+	fmt.Fprint(buffer, "Expected in leveldb:\n  G")
+	for i := 0; i < tt.expCanonicalBlocks; i++ {
+		fmt.Fprintf(buffer, "->C%d", i+1)
+	}
+	fmt.Fprintf(buffer, "\n\n")
+	fmt.Fprintf(buffer, "Expected head header    : C%d\n", tt.expHeadHeader)
+	fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock)
+	if tt.expHeadBlock == 0 {
+		fmt.Fprintf(buffer, "Expected head block     : G\n")
+	} else {
+		fmt.Fprintf(buffer, "Expected head block     : C%d\n", tt.expHeadBlock)
+	}
+	if tt.expSnapshotBottom == 0 {
+		fmt.Fprintf(buffer, "Expected snapshot disk  : G\n")
+	} else {
+		fmt.Fprintf(buffer, "Expected snapshot disk  : C%d\n", tt.expSnapshotBottom)
+	}
+	return buffer.String()
+}
+
+// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot
+// journal will be persisted correctly. In this case no snapshot recovery is
+// required.
+func TestRestartWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C8
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              false,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       8,
+		expSnapshotBottom:  0, // Initial disk layer built from genesis
+	})
+}
+
+// Tests a Geth restart with valid but "legacy" snapshot. Before the shutdown,
+// all snapshot journal will be persisted correctly. In this case no snapshot
+// recovery is required.
+func TestRestartWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C8
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              false,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       8,
+		expSnapshotBottom:  0, // Initial disk layer built from genesis
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
+// chain head should be rewound to the point with available state. And also the
+// new head should must be lower than disk layer. But there is no committed point
+// so the chain should be rewound to genesis and the disk layer should be left
+// for recovery.
+func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : C4
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  4, // Last committed disk layer, wait recovery
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
+// chain head should be rewound to the point with available state. And also the
+// new head should must be lower than disk layer. But there is only a low committed
+// point so the chain should be rewound to committed point and the disk layer
+// should be left for recovery.
+func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C2
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C2
+	// Expected snapshot disk  : C4
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        2,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       2,
+		expSnapshotBottom:  4, // Last committed disk layer, wait recovery
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case
+// the chain head should be rewound to the point with available state. And also
+// the new head should must be lower than disk layer. But there is only a high
+// committed point so the chain should be rewound to genesis and the disk layer
+// should be left for recovery.
+func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C6
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : C4
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        6,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  4, // Last committed disk layer, wait recovery
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head. The new head here refers to the
+// genesis because there is no committed point.
+func TestNoCommitCrashWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        0,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  0, // Rebuilt snapshot from the latest HEAD(genesis)
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head. The new head here refers to the
+// block-2 because it's committed into the disk.
+func TestLowCommitCrashWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C2
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C2
+	// Expected snapshot disk  : C2
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        2,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       2,
+		expSnapshotBottom:  2, // Rebuilt snapshot from the latest HEAD
+	})
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head.
+//
+// The new head here refers to the the genesis, the reason is:
+//   - the state of block-6 is committed into the disk
+//   - the legacy disk layer of block-4 is committed into the disk
+//   - the head is rewound the genesis in order to find an available
+//     state lower than disk layer
+func TestHighCommitCrashWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G, C6
+	// Snapshot: G, C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              true,
+		gapped:             0,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      4,
+		commitBlock:        6,
+		expCanonicalBlocks: 8,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+		expSnapshotBottom:  0, // Rebuilt snapshot from the latest HEAD(genesis)
+	})
+}
+
+// Tests a Geth was running with snapshot enabled. Then restarts without
+// enabling snapshot and after that re-enable the snapshot again. In this
+// case the snapshot should be rebuilt with latest chain head.
+func TestGappedNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+	//
+	// Expected head header    : C10
+	// Expected head fast block: C10
+	// Expected head block     : C10
+	// Expected snapshot disk  : C10
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              false,
+		gapped:             2,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 10,
+		expHeadHeader:      10,
+		expHeadFastBlock:   10,
+		expHeadBlock:       10,
+		expSnapshotBottom:  10, // Rebuilt snapshot from the latest HEAD
+	})
+}
+
+// Tests a Geth was running with leagcy snapshot enabled. Then restarts
+// without enabling snapshot and after that re-enable the snapshot again.
+// In this case the snapshot should be rebuilt with latest chain head.
+func TestGappedLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(0)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+	//
+	// Expected head header    : C10
+	// Expected head fast block: C10
+	// Expected head block     : C10
+	// Expected snapshot disk  : C10
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              false,
+		gapped:             2,
+		setHead:            0,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 10,
+		expHeadHeader:      10,
+		expHeadFastBlock:   10,
+		expHeadBlock:       10,
+		expSnapshotBottom:  10, // Rebuilt snapshot from the latest HEAD
+	})
+}
+
+// Tests the Geth was running with snapshot enabled and resetHead is applied.
+// In this case the head is rewound to the target(with state available). After
+// that the chain is restarted and the original disk layer is kept.
+func TestSetHeadWithNewSnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(4)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             false,
+		crash:              false,
+		gapped:             0,
+		setHead:            4,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 4,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+		expSnapshotBottom:  0, // The initial disk layer is built from the genesis
+	})
+}
+
+// Tests the Geth was running with snapshot(legacy-format) enabled and resetHead
+// is applied. In this case the head is rewound to the target(with state available).
+// After that the chain is restarted and the original disk layer is kept.
+func TestSetHeadWithLegacySnapshot(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Commit:   G
+	// Snapshot: G
+	//
+	// SetHead(4)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	// Expected snapshot disk  : G
+	testSnapshot(t, &snapshotTest{
+		legacy:             true,
+		crash:              false,
+		gapped:             0,
+		setHead:            4,
+		chainBlocks:        8,
+		snapshotBlock:      0,
+		commitBlock:        0,
+		expCanonicalBlocks: 4,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+		expSnapshotBottom:  0, // The initial disk layer is built from the genesis
+	})
+}
+
+func testSnapshot(t *testing.T, tt *snapshotTest) {
+	// It's hard to follow the test case, visualize the input
+	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+	//fmt.Println(tt.dump())
+
+	// Create a temporary persistent database
+	datadir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatalf("Failed to create temporary datadir: %v", err)
+	}
+	os.RemoveAll(datadir)
+
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+	if err != nil {
+		t.Fatalf("Failed to create persistent database: %v", err)
+	}
+	defer db.Close() // Might double close, should be fine
+
+	// Initialize a fresh chain
+	var (
+		genesis = new(Genesis).MustCommit(db)
+		engine  = ethash.NewFullFaker()
+		gendb   = rawdb.NewMemoryDatabase()
+
+		// Snapshot is enabled, the first snapshot is created from the Genesis.
+		// The snapshot memory allowance is 256MB, it means no snapshot flush
+		// will happen during the block insertion.
+		cacheConfig = defaultCacheConfig
+	)
+	chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if err != nil {
+		t.Fatalf("Failed to create chain: %v", err)
+	}
+	blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, tt.chainBlocks, func(i int, b *BlockGen) {})
+
+	// Insert the blocks with configured settings.
+	var breakpoints []uint64
+	if tt.commitBlock > tt.snapshotBlock {
+		breakpoints = append(breakpoints, tt.snapshotBlock, tt.commitBlock)
+	} else {
+		breakpoints = append(breakpoints, tt.commitBlock, tt.snapshotBlock)
+	}
+	var startPoint uint64
+	for _, point := range breakpoints {
+		if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil {
+			t.Fatalf("Failed to import canonical chain start: %v", err)
+		}
+		startPoint = point
+
+		if tt.commitBlock > 0 && tt.commitBlock == point {
+			chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
+		}
+		if tt.snapshotBlock > 0 && tt.snapshotBlock == point {
+			if tt.legacy {
+				// Here we commit the snapshot disk root to simulate
+				// committing the legacy snapshot.
+				rawdb.WriteSnapshotRoot(db, blocks[point-1].Root())
+			} else {
+				chain.snaps.Cap(blocks[point-1].Root(), 0)
+				diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
+				if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
+					t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
+				}
+			}
+		}
+	}
+	if _, err := chain.InsertChain(blocks[startPoint:]); err != nil {
+		t.Fatalf("Failed to import canonical chain tail: %v", err)
+	}
+	// Set the flag for writing legacy journal if ncessary
+	if tt.legacy {
+		chain.writeLegacyJournal = true
+	}
+	// Pull the plug on the database, simulating a hard crash
+	if tt.crash {
+		db.Close()
+
+		// Start a new blockchain back up and see where the repair leads us
+		db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+		if err != nil {
+			t.Fatalf("Failed to reopen persistent database: %v", err)
+		}
+		defer db.Close()
+
+		// The interesting thing is: instead of start the blockchain after
+		// the crash, we do restart twice here: one after the crash and one
+		// after the normal stop. It's used to ensure the broken snapshot
+		// can be detected all the time.
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		chain.Stop()
+
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	} else if tt.gapped > 0 {
+		// Insert blocks without enabling snapshot if gapping is required.
+		chain.Stop()
+
+		gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.gapped, func(i int, b *BlockGen) {})
+
+		// Insert a few more blocks without enabling snapshot
+		var cacheConfig = &CacheConfig{
+			TrieCleanLimit: 256,
+			TrieDirtyLimit: 256,
+			TrieTimeLimit:  5 * time.Minute,
+			SnapshotLimit:  0,
+		}
+		chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		chain.InsertChain(gappedBlocks)
+		chain.Stop()
+
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	} else if tt.setHead != 0 {
+		// Rewind the chain if setHead operation is required.
+		chain.SetHead(tt.setHead)
+		chain.Stop()
+
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	} else {
+		chain.Stop()
+		// Restart the chain normally
+		chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+		if err != nil {
+			t.Fatalf("Failed to recreate chain: %v", err)
+		}
+		defer chain.Stop()
+	}
+
+	// Iterate over all the remaining blocks and ensure there are no gaps
+	verifyNoGaps(t, chain, true, blocks)
+	verifyCutoff(t, chain, true, blocks, tt.expCanonicalBlocks)
+
+	if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+		t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+	}
+	if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+		t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+	}
+	if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+		t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+	}
+	// Check the disk layer, ensure they are matched
+	block := chain.GetBlockByNumber(tt.expSnapshotBottom)
+	if block == nil {
+		t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", tt.expSnapshotBottom)
+	} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
+		t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
+	}
+}
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
index ecd4e65978eeb4857c094392486192f6f0d785fc..5bd48ad5fad523ffc32809f4243d979821bfd815 100644
--- a/core/rawdb/accessors_snapshot.go
+++ b/core/rawdb/accessors_snapshot.go
@@ -17,6 +17,8 @@
 package rawdb
 
 import (
+	"encoding/binary"
+
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
@@ -118,3 +120,58 @@ func DeleteSnapshotJournal(db ethdb.KeyValueWriter) {
 		log.Crit("Failed to remove snapshot journal", "err", err)
 	}
 }
+
+// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
+// the last shutdown.
+func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte {
+	data, _ := db.Get(snapshotGeneratorKey)
+	return data
+}
+
+// WriteSnapshotGenerator stores the serialized snapshot generator to save at
+// shutdown.
+func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) {
+	if err := db.Put(snapshotGeneratorKey, generator); err != nil {
+		log.Crit("Failed to store snapshot generator", "err", err)
+	}
+}
+
+// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
+// the last shutdown
+func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) {
+	if err := db.Delete(snapshotGeneratorKey); err != nil {
+		log.Crit("Failed to remove snapshot generator", "err", err)
+	}
+}
+
+// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
+// snapshot layer.
+func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 {
+	data, _ := db.Get(snapshotRecoveryKey)
+	if len(data) == 0 {
+		return nil
+	}
+	if len(data) != 8 {
+		return nil
+	}
+	number := binary.BigEndian.Uint64(data)
+	return &number
+}
+
+// WriteSnapshotRecoveryNumber stores the block number of the last persisted
+// snapshot layer.
+func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) {
+	var buf [8]byte
+	binary.BigEndian.PutUint64(buf[:], number)
+	if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
+		log.Crit("Failed to store snapshot recovery number", "err", err)
+	}
+}
+
+// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
+// snapshot layer.
+func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) {
+	if err := db.Delete(snapshotRecoveryKey); err != nil {
+		log.Crit("Failed to remove snapshot recovery number", "err", err)
+	}
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index e2b093a34a6cc3de3807cbbb1b65a57c8ba7ec60..dbc5025d5d2cdf812133e7a24cb208ec1b480ef5 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -51,6 +51,12 @@ var (
 	// snapshotJournalKey tracks the in-memory diff layers across restarts.
 	snapshotJournalKey = []byte("SnapshotJournal")
 
+	// snapshotGeneratorKey tracks the snapshot generation marker across restarts.
+	snapshotGeneratorKey = []byte("SnapshotGenerator")
+
+	// snapshotRecoveryKey tracks the snapshot recovery marker across restarts.
+	snapshotRecoveryKey = []byte("SnapshotRecovery")
+
 	// txIndexTailKey tracks the oldest block whose transactions have been indexed.
 	txIndexTailKey = []byte("TransactionIndexTail")
 
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index 8460cd332f9915f6d46d09bf0aa7178fe60ec588..40ff5ade4c37e0832ef7679a9cf5999db1afb1f2 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -28,6 +28,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/ethdb/leveldb"
 	"github.com/ethereum/go-ethereum/ethdb/memorydb"
+	"github.com/ethereum/go-ethereum/rlp"
 )
 
 // reverse reverses the contents of a byte slice. It's used to update random accs
@@ -429,6 +430,81 @@ func TestDiskPartialMerge(t *testing.T) {
 	}
 }
 
+// Tests that when the bottom-most diff layer is merged into the disk
+// layer whether the corresponding generator is persisted correctly.
+func TestDiskGeneratorPersistence(t *testing.T) {
+	var (
+		accOne        = randomHash()
+		accTwo        = randomHash()
+		accOneSlotOne = randomHash()
+		accOneSlotTwo = randomHash()
+
+		accThree     = randomHash()
+		accThreeSlot = randomHash()
+		baseRoot     = randomHash()
+		diffRoot     = randomHash()
+		diffTwoRoot  = randomHash()
+		genMarker    = append(randomHash().Bytes(), randomHash().Bytes()...)
+	)
+	// Testing scenario 1, the disk layer is still under the construction.
+	db := rawdb.NewMemoryDatabase()
+
+	rawdb.WriteAccountSnapshot(db, accOne, accOne[:])
+	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:])
+	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:])
+	rawdb.WriteSnapshotRoot(db, baseRoot)
+
+	// Create a disk layer based on all above updates
+	snaps := &Tree{
+		layers: map[common.Hash]snapshot{
+			baseRoot: &diskLayer{
+				diskdb:    db,
+				cache:     fastcache.New(500 * 1024),
+				root:      baseRoot,
+				genMarker: genMarker,
+			},
+		},
+	}
+	// Modify or delete some accounts, flatten everything onto disk
+	if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
+		accTwo: accTwo[:],
+	}, nil); err != nil {
+		t.Fatalf("failed to update snapshot tree: %v", err)
+	}
+	if err := snaps.Cap(diffRoot, 0); err != nil {
+		t.Fatalf("failed to flatten snapshot tree: %v", err)
+	}
+	blob := rawdb.ReadSnapshotGenerator(db)
+	var generator journalGenerator
+	if err := rlp.DecodeBytes(blob, &generator); err != nil {
+		t.Fatalf("Failed to decode snapshot generator %v", err)
+	}
+	if !bytes.Equal(generator.Marker, genMarker) {
+		t.Fatalf("Generator marker is not matched")
+	}
+	// Test senario 2, the disk layer is fully generated
+	// Modify or delete some accounts, flatten everything onto disk
+	if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
+		accThree: accThree.Bytes(),
+	}, map[common.Hash]map[common.Hash][]byte{
+		accThree: {accThreeSlot: accThreeSlot.Bytes()},
+	}); err != nil {
+		t.Fatalf("failed to update snapshot tree: %v", err)
+	}
+	diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
+	diskLayer.genMarker = nil // Construction finished
+	if err := snaps.Cap(diffTwoRoot, 0); err != nil {
+		t.Fatalf("failed to flatten snapshot tree: %v", err)
+	}
+	blob = rawdb.ReadSnapshotGenerator(db)
+	if err := rlp.DecodeBytes(blob, &generator); err != nil {
+		t.Fatalf("Failed to decode snapshot generator %v", err)
+	}
+	if len(generator.Marker) != 0 {
+		t.Fatalf("Failed to update snapshot generator")
+	}
+}
+
 // Tests that merging something into a disk layer persists it into the database
 // and invalidates any previously written and cached values, discarding anything
 // after the in-progress generation marker.
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index a6b3e4420d8c97f9c474907d131589a737bc7dd9..566f7d94a81eff8ce5854d6fe9bf6ede0f3e2ffa 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -112,6 +112,7 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i
 		genAbort:   make(chan chan *generatorStats),
 	}
 	go base.generate(&generatorStats{wiping: wiper, start: time.Now()})
+	log.Debug("Start snapshot generation", "root", root)
 	return base
 }
 
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index fc1053f818d634ab14f9114d9413c543f1ed5602..2821248a726e23d1e69f7548133e7e632be2c1a8 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -33,6 +33,8 @@ import (
 	"github.com/ethereum/go-ethereum/trie"
 )
 
+const journalVersion uint64 = 0
+
 // journalGenerator is a disk layer entry containing the generator progress marker.
 type journalGenerator struct {
 	Wiping   bool // Whether the database was in progress of being wiped
@@ -61,8 +63,87 @@ type journalStorage struct {
 	Vals [][]byte
 }
 
+// loadAndParseLegacyJournal tries to parse the snapshot journal in legacy format.
+func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
+	// Retrieve the journal, for legacy journal it must exist since even for
+	// 0 layer it stores whether we've already generated the snapshot or are
+	// in progress only.
+	journal := rawdb.ReadSnapshotJournal(db)
+	if len(journal) == 0 {
+		return nil, journalGenerator{}, errors.New("missing or corrupted snapshot journal")
+	}
+	r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+	// Read the snapshot generation progress for the disk layer
+	var generator journalGenerator
+	if err := r.Decode(&generator); err != nil {
+		return nil, journalGenerator{}, fmt.Errorf("failed to load snapshot progress marker: %v", err)
+	}
+	// Load all the snapshot diffs from the journal
+	snapshot, err := loadDiffLayer(base, r)
+	if err != nil {
+		return nil, generator, err
+	}
+	return snapshot, generator, nil
+}
+
+// loadAndParseJournal tries to parse the snapshot journal in latest format.
+func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
+	// Retrieve the disk layer generator. It must exist, no matter the
+	// snapshot is fully generated or not. Otherwise the entire disk
+	// layer is invalid.
+	generatorBlob := rawdb.ReadSnapshotGenerator(db)
+	if len(generatorBlob) == 0 {
+		return nil, journalGenerator{}, errors.New("missing snapshot generator")
+	}
+	var generator journalGenerator
+	if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
+		return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err)
+	}
+	// Retrieve the diff layer journal. It's possible that the journal is
+	// not existent, e.g. the disk layer is generating while that the Geth
+	// crashes without persisting the diff journal.
+	// So if there is no journal, or the journal is not matched with disk
+	// layer, we just discard all diffs and try to recover them later.
+	journal := rawdb.ReadSnapshotJournal(db)
+	if len(journal) == 0 {
+		log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "missing")
+		return base, generator, nil
+	}
+	r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+	// Firstly, resolve the first element as the journal version
+	version, err := r.Uint()
+	if err != nil {
+		return nil, journalGenerator{}, err
+	}
+	if version != journalVersion {
+		return nil, journalGenerator{}, fmt.Errorf("journal version mismatch, want %d got %v", journalVersion, version)
+	}
+	// Secondly, resolve the disk layer root, ensure it's continuous
+	// with disk layer.
+	var root common.Hash
+	if err := r.Decode(&root); err != nil {
+		return nil, journalGenerator{}, errors.New("missing disk layer root")
+	}
+	// The diff journal is not matched with disk, discard them.
+	// It can happen that Geth crashes without persisting the latest
+	// diff journal.
+	if !bytes.Equal(root.Bytes(), base.root.Bytes()) {
+		log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "unmatched")
+		return base, generator, nil
+	}
+	// Load all the snapshot diffs from the journal
+	snapshot, err := loadDiffLayer(base, r)
+	if err != nil {
+		return nil, journalGenerator{}, err
+	}
+	log.Debug("Loaded snapshot journal", "diskroot", base.root, "diffhead", snapshot.Root())
+	return snapshot, generator, nil
+}
+
 // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
-func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) {
+func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, error) {
 	// Retrieve the block number and hash of the snapshot, failing if no snapshot
 	// is present in the database (or crashed mid-update).
 	baseRoot := rawdb.ReadSnapshotRoot(diskdb)
@@ -75,28 +156,36 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
 		cache:  fastcache.New(cache * 1024 * 1024),
 		root:   baseRoot,
 	}
-	// Retrieve the journal, it must exist since even for 0 layer it stores whether
-	// we've already generated the snapshot or are in progress only
-	journal := rawdb.ReadSnapshotJournal(diskdb)
-	if len(journal) == 0 {
-		return nil, errors.New("missing or corrupted snapshot journal")
-	}
-	r := rlp.NewStream(bytes.NewReader(journal), 0)
-
-	// Read the snapshot generation progress for the disk layer
-	var generator journalGenerator
-	if err := r.Decode(&generator); err != nil {
-		return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err)
+	var legacy bool
+	snapshot, generator, err := loadAndParseJournal(diskdb, base)
+	if err != nil {
+		log.Debug("Failed to load new-format journal", "error", err)
+		snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base)
+		legacy = true
 	}
-	// Load all the snapshot diffs from the journal
-	snapshot, err := loadDiffLayer(base, r)
 	if err != nil {
 		return nil, err
 	}
-	// Entire snapshot journal loaded, sanity check the head and return
-	// Journal doesn't exist, don't worry if it's not supposed to
+	// Entire snapshot journal loaded, sanity check the head. If the loaded
+	// snapshot is not matched with current state root, print a warning log
+	// or discard the entire snapshot it's legacy snapshot.
+	//
+	// Possible scenario: Geth was crashed without persisting journal and then
+	// restart, the head is rewound to the point with available state(trie)
+	// which is below the snapshot. In this case the snapshot can be recovered
+	// by re-executing blocks but right now it's unavailable.
 	if head := snapshot.Root(); head != root {
-		return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+		// If it's legacy snapshot, or it's new-format snapshot but
+		// it's not in recovery mode, returns the error here for
+		// rebuilding the entire snapshot forcibly.
+		if legacy || !recovery {
+			return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+		}
+		// It's in snapshot recovery, the assumption is held that
+		// the disk layer is always higher than chain head. It can
+		// be eventually recovered when the chain head beyonds the
+		// disk layer.
+		log.Warn("Snapshot is not continuous with chain", "snaproot", head, "chainroot", root)
 	}
 	// Everything loaded correctly, resume any suspended operations
 	if !generator.Done {
@@ -203,7 +292,9 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 	if dl.stale {
 		return common.Hash{}, ErrSnapshotStale
 	}
-	// Write out the generator marker
+	// Write out the generator marker. Note it's a standalone disk layer generator
+	// which is not mixed with journal. It's ok if the generator is persisted while
+	// journal is not.
 	entry := journalGenerator{
 		Done:   dl.genMarker == nil,
 		Marker: dl.genMarker,
@@ -214,9 +305,12 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 		entry.Slots = stats.slots
 		entry.Storage = uint64(stats.storage)
 	}
-	if err := rlp.Encode(buffer, entry); err != nil {
+	blob, err := rlp.EncodeToBytes(entry)
+	if err != nil {
 		return common.Hash{}, err
 	}
+	log.Debug("Journalled disk layer", "root", dl.root, "complete", dl.genMarker == nil)
+	rawdb.WriteSnapshotGenerator(dl.diskdb, blob)
 	return dl.root, nil
 }
 
@@ -266,5 +360,97 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 	if err := rlp.Encode(buffer, storage); err != nil {
 		return common.Hash{}, err
 	}
+	log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
+	return base, nil
+}
+
+// LegacyJournal writes the persistent layer generator stats into a buffer
+// to be stored in the database as the snapshot journal.
+//
+// Note it's the legacy version which is only used in testing right now.
+func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
+	// If the snapshot is currently being generated, abort it
+	var stats *generatorStats
+	if dl.genAbort != nil {
+		abort := make(chan *generatorStats)
+		dl.genAbort <- abort
+
+		if stats = <-abort; stats != nil {
+			stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
+		}
+	}
+	// Ensure the layer didn't get stale
+	dl.lock.RLock()
+	defer dl.lock.RUnlock()
+
+	if dl.stale {
+		return common.Hash{}, ErrSnapshotStale
+	}
+	// Write out the generator marker
+	entry := journalGenerator{
+		Done:   dl.genMarker == nil,
+		Marker: dl.genMarker,
+	}
+	if stats != nil {
+		entry.Wiping = (stats.wiping != nil)
+		entry.Accounts = stats.accounts
+		entry.Slots = stats.slots
+		entry.Storage = uint64(stats.storage)
+	}
+	if err := rlp.Encode(buffer, entry); err != nil {
+		return common.Hash{}, err
+	}
+	return dl.root, nil
+}
+
+// Journal writes the memory layer contents into a buffer to be stored in the
+// database as the snapshot journal.
+//
+// Note it's the legacy version which is only used in testing right now.
+func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
+	// Journal the parent first
+	base, err := dl.parent.LegacyJournal(buffer)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	// Ensure the layer didn't get stale
+	dl.lock.RLock()
+	defer dl.lock.RUnlock()
+
+	if dl.Stale() {
+		return common.Hash{}, ErrSnapshotStale
+	}
+	// Everything below was journalled, persist this layer too
+	if err := rlp.Encode(buffer, dl.root); err != nil {
+		return common.Hash{}, err
+	}
+	destructs := make([]journalDestruct, 0, len(dl.destructSet))
+	for hash := range dl.destructSet {
+		destructs = append(destructs, journalDestruct{Hash: hash})
+	}
+	if err := rlp.Encode(buffer, destructs); err != nil {
+		return common.Hash{}, err
+	}
+	accounts := make([]journalAccount, 0, len(dl.accountData))
+	for hash, blob := range dl.accountData {
+		accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
+	}
+	if err := rlp.Encode(buffer, accounts); err != nil {
+		return common.Hash{}, err
+	}
+	storage := make([]journalStorage, 0, len(dl.storageData))
+	for hash, slots := range dl.storageData {
+		keys := make([]common.Hash, 0, len(slots))
+		vals := make([][]byte, 0, len(slots))
+		for key, val := range slots {
+			keys = append(keys, key)
+			vals = append(vals, val)
+		}
+		storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
+	}
+	if err := rlp.Encode(buffer, storage); err != nil {
+		return common.Hash{}, err
+	}
+	log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
 	return base, nil
 }
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 4f496b4ff64ff84f7d143b14c51f9ea5bcc5a2b0..6ad4451ea34fe5a66e62f015c4fc1d1bc0a30b3e 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -29,6 +29,7 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
+	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 )
 
@@ -136,6 +137,10 @@ type snapshot interface {
 	// flattening everything down (bad for reorgs).
 	Journal(buffer *bytes.Buffer) (common.Hash, error)
 
+	// LegacyJournal is basically identical to Journal. it's the legacy version for
+	// flushing legacy journal. Now the only purpose of this function is for testing.
+	LegacyJournal(buffer *bytes.Buffer) (common.Hash, error)
+
 	// Stale return whether this layer has become stale (was flattened across) or
 	// if it's still live.
 	Stale() bool
@@ -168,10 +173,12 @@ type Tree struct {
 // store (with a number of memory layers from a journal), ensuring that the head
 // of the snapshot matches the expected one.
 //
-// If the snapshot is missing or inconsistent, the entirety is deleted and will
-// be reconstructed from scratch based on the tries in the key-value store, on a
-// background thread.
-func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree {
+// If the snapshot is missing or the disk layer is broken, the entire is deleted
+// and will be reconstructed from scratch based on the tries in the key-value
+// store, on a background thread. If the memory layers from the journal is not
+// continuous with disk layer or the journal is missing, all diffs will be discarded
+// iff it's in "recovery" mode, otherwise rebuild is mandatory.
+func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, recovery bool) *Tree {
 	// Create a new, empty snapshot tree
 	snap := &Tree{
 		diskdb: diskdb,
@@ -183,7 +190,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
 		defer snap.waitBuild()
 	}
 	// Attempt to load a previously persisted snapshot and rebuild one if failed
-	head, err := loadSnapshot(diskdb, triedb, cache, root)
+	head, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
 	if err != nil {
 		log.Warn("Failed to load snapshot, regenerating", "err", err)
 		snap.Rebuild(root)
@@ -198,7 +205,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
 }
 
 // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
-// to  be used by tests to ensure we're testing what we believe we are.
+// to be used by tests to ensure we're testing what we believe we are.
 func (t *Tree) waitBuild() {
 	// Find the rebuild termination channel
 	var done chan struct{}
@@ -415,6 +422,9 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
 
 // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
 // it. The method will panic if called onto a non-bottom-most diff layer.
+//
+// The disk layer persistence should be operated in an atomic way. All updates should
+// be discarded if the whole transition if not finished.
 func diffToDisk(bottom *diffLayer) *diskLayer {
 	var (
 		base  = bottom.parent.(*diskLayer)
@@ -427,8 +437,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
 		base.genAbort <- abort
 		stats = <-abort
 	}
-	// Start by temporarily deleting the current snapshot block marker. This
-	// ensures that in the case of a crash, the entire snapshot is invalidated.
+	// Put the deletion in the batch writer, flush all updates in the final step.
 	rawdb.DeleteSnapshotRoot(batch)
 
 	// Mark the original base as stale as we're going to create a new wrapper
@@ -471,12 +480,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
 		base.cache.Set(hash[:], data)
 		snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
 
-		if batch.ValueSize() > ethdb.IdealBatchSize {
-			if err := batch.Write(); err != nil {
-				log.Crit("Failed to write account snapshot", "err", err)
-			}
-			batch.Reset()
-		}
 		snapshotFlushAccountItemMeter.Mark(1)
 		snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
 	}
@@ -505,18 +508,33 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
 			snapshotFlushStorageItemMeter.Mark(1)
 			snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
 		}
-		if batch.ValueSize() > ethdb.IdealBatchSize {
-			if err := batch.Write(); err != nil {
-				log.Crit("Failed to write storage snapshot", "err", err)
-			}
-			batch.Reset()
-		}
 	}
 	// Update the snapshot block marker and write any remainder data
 	rawdb.WriteSnapshotRoot(batch, bottom.root)
+
+	// Write out the generator marker
+	entry := journalGenerator{
+		Done:   base.genMarker == nil,
+		Marker: base.genMarker,
+	}
+	if stats != nil {
+		entry.Wiping = (stats.wiping != nil)
+		entry.Accounts = stats.accounts
+		entry.Slots = stats.slots
+		entry.Storage = uint64(stats.storage)
+	}
+	blob, err := rlp.EncodeToBytes(entry)
+	if err != nil {
+		panic(fmt.Sprintf("Failed to RLP encode generator %v", err))
+	}
+	rawdb.WriteSnapshotGenerator(batch, blob)
+
+	// Flush all the updates in the single db operation. Ensure the
+	// disk layer transition is atomic.
 	if err := batch.Write(); err != nil {
 		log.Crit("Failed to write leftover snapshot", "err", err)
 	}
+	log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
 	res := &diskLayer{
 		root:       bottom.root,
 		cache:      base.cache,
@@ -554,7 +572,21 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
 	t.lock.Lock()
 	defer t.lock.Unlock()
 
+	// Firstly write out the metadata of journal
 	journal := new(bytes.Buffer)
+	if err := rlp.Encode(journal, journalVersion); err != nil {
+		return common.Hash{}, err
+	}
+	diskroot := t.diskRoot()
+	if diskroot == (common.Hash{}) {
+		return common.Hash{}, errors.New("invalid disk root")
+	}
+	// Secondly write out the disk layer root, ensure the
+	// diff journal is continuous with disk.
+	if err := rlp.Encode(journal, diskroot); err != nil {
+		return common.Hash{}, err
+	}
+	// Finally write out the journal of each layer in reverse order.
 	base, err := snap.(snapshot).Journal(journal)
 	if err != nil {
 		return common.Hash{}, err
@@ -564,6 +596,29 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
 	return base, nil
 }
 
+// LegacyJournal is basically identical to Journal. it's the legacy
+// version for flushing legacy journal. Now the only purpose of this
+// function is for testing.
+func (t *Tree) LegacyJournal(root common.Hash) (common.Hash, error) {
+	// Retrieve the head snapshot to journal from var snap snapshot
+	snap := t.Snapshot(root)
+	if snap == nil {
+		return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
+	}
+	// Run the journaling
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	journal := new(bytes.Buffer)
+	base, err := snap.(snapshot).LegacyJournal(journal)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	// Store the journal into the database and return
+	rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
+	return base, nil
+}
+
 // Rebuild wipes all available snapshot data from the persistent database and
 // discard all caches and diff layers. Afterwards, it starts a new snapshot
 // generator with the given root hash.
@@ -571,6 +626,10 @@ func (t *Tree) Rebuild(root common.Hash) {
 	t.lock.Lock()
 	defer t.lock.Unlock()
 
+	// Firstly delete any recovery flag in the database. Because now we are
+	// building a brand new snapshot.
+	rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
+
 	// Track whether there's a wipe currently running and keep it alive if so
 	var wiper chan struct{}
 
@@ -657,6 +716,16 @@ func (t *Tree) disklayer() *diskLayer {
 	}
 }
 
+// diskRoot is a internal helper function to return the disk layer root.
+// The lock of snapTree is assumed to be held already.
+func (t *Tree) diskRoot() common.Hash {
+	disklayer := t.disklayer()
+	if disklayer == nil {
+		return common.Hash{}
+	}
+	return disklayer.Root()
+}
+
 // generating is an internal helper function which reports whether the snapshot
 // is still under the construction.
 func (t *Tree) generating() (bool, error) {
@@ -671,3 +740,11 @@ func (t *Tree) generating() (bool, error) {
 	defer layer.lock.RUnlock()
 	return layer.genMarker != nil, nil
 }
+
+// diskRoot is a external helper function to return the disk layer root.
+func (t *Tree) DiskRoot() common.Hash {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	return t.diskRoot()
+}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 238d204745f1bcf5a50df9793f741f02183648d2..28a5313129dfb31c23ec2b64a54c9dba181c0c72 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -235,7 +235,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
 
 	var snaps *snapshot.Tree
 	if snapshotter {
-		snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false)
+		snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false, false)
 	}
 	statedb, _ = state.New(root, sdb, snaps)
 	return snaps, statedb