diff --git a/core/blockchain.go b/core/blockchain.go
index 6acef13c4187a76488a9d0b79c5a9d5d2b3ae073..9dc1fa9c65c4b67cd969844a46f7b011f5c4b88e 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -130,6 +130,16 @@ type CacheConfig struct {
 	SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
 }
 
+// defaultCacheConfig are the default caching values if none are specified by the
+// user (also used during testing).
+var defaultCacheConfig = &CacheConfig{
+	TrieCleanLimit: 256,
+	TrieDirtyLimit: 256,
+	TrieTimeLimit:  5 * time.Minute,
+	SnapshotLimit:  256,
+	SnapshotWait:   true,
+}
+
 // BlockChain represents the canonical chain given a database with a genesis
 // block. The Blockchain manages chain imports, reverts, chain reorganisations.
 //
@@ -204,13 +214,7 @@ type BlockChain struct {
 // Processor.
 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
 	if cacheConfig == nil {
-		cacheConfig = &CacheConfig{
-			TrieCleanLimit: 256,
-			TrieDirtyLimit: 256,
-			TrieTimeLimit:  5 * time.Minute,
-			SnapshotLimit:  256,
-			SnapshotWait:   true,
-		}
+		cacheConfig = defaultCacheConfig
 	}
 	bodyCache, _ := lru.New(bodyCacheLimit)
 	bodyRLPCache, _ := lru.New(bodyCacheLimit)
@@ -268,15 +272,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 			txIndexBlock = frozen
 		}
 	}
-
 	if err := bc.loadLastState(); err != nil {
 		return nil, err
 	}
-	// The first thing the node will do is reconstruct the verification data for
-	// the head block (ethash cache or clique voting snapshot). Might as well do
-	// it in advance.
-	bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
-
+	// Make sure the state associated with the block is available
+	head := bc.CurrentBlock()
+	if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
+		log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
+		if err := bc.SetHead(head.NumberU64()); err != nil {
+			return nil, err
+		}
+	}
+	// Ensure that a previous crash in SetHead doesn't leave extra ancients
 	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
 		var (
 			needRewind bool
@@ -286,7 +293,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 		// blockchain repair. If the head full block is even lower than the ancient
 		// chain, truncate the ancient store.
 		fullBlock := bc.CurrentBlock()
-		if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
+		if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 {
 			needRewind = true
 			low = fullBlock.NumberU64()
 		}
@@ -301,15 +308,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 			}
 		}
 		if needRewind {
-			var hashes []common.Hash
-			previous := bc.CurrentHeader().Number.Uint64()
-			for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
-				hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
+			log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
+			if err := bc.SetHead(low); err != nil {
+				return nil, err
 			}
-			bc.Rollback(hashes)
-			log.Warn("Truncate ancient chain", "from", previous, "to", low)
 		}
 	}
+	// The first thing the node will do is reconstruct the verification data for
+	// the head block (ethash cache or clique voting snapshot). Might as well do
+	// it in advance.
+	bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
+
 	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
 	for hash := range BadHashes {
 		if header := bc.GetHeaderByHash(hash); header != nil {
@@ -318,7 +327,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 			// make sure the headerByNumber (if present) is in our current canonical chain
 			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
 				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
-				bc.SetHead(header.Number.Uint64() - 1)
+				if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
+					return nil, err
+				}
 				log.Error("Chain rewind was successful, resuming normal operation")
 			}
 		}
@@ -385,15 +396,6 @@ func (bc *BlockChain) loadLastState() error {
 		log.Warn("Head block missing, resetting chain", "hash", head)
 		return bc.Reset()
 	}
-	// Make sure the state associated with the block is available
-	if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil {
-		// Dangling block without a state associated, init from scratch
-		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
-		if err := bc.repair(&currentBlock); err != nil {
-			return err
-		}
-		rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
-	}
 	// Everything seems to be fine, set as the head block
 	bc.currentBlock.Store(currentBlock)
 	headBlockGauge.Update(int64(currentBlock.NumberU64()))
@@ -427,30 +429,48 @@ func (bc *BlockChain) loadLastState() error {
 	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
 	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
 	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
-
+	if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
+		log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
+	}
 	return nil
 }
 
-// SetHead rewinds the local chain to a new head. In the case of headers, everything
-// above the new head will be deleted and the new one set. In the case of blocks
-// though, the head may be further rewound if block bodies are missing (non-archive
-// nodes after a fast sync).
+// SetHead rewinds the local chain to a new head. Depending on whether the node
+// was fast synced or full synced and in which state, the method will try to
+// delete minimal data from disk whilst retaining chain consistency.
 func (bc *BlockChain) SetHead(head uint64) error {
-	log.Warn("Rewinding blockchain", "target", head)
-
 	bc.chainmu.Lock()
 	defer bc.chainmu.Unlock()
 
-	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) {
-		// Rewind the block chain, ensuring we don't end up with a stateless head block
-		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
+	// Retrieve the last pivot block to short circuit rollbacks beyond it and the
+	// current freezer limit to start nuking id underflown
+	pivot := rawdb.ReadLastPivotNumber(bc.db)
+	frozen, _ := bc.db.Ancients()
+
+	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
+		// Rewind the block chain, ensuring we don't end up with a stateless head
+		// block. Note, depth equality is permitted to allow using SetHead as a
+		// chain reparation mechanism without deleting any data!
+		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
 			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
 			if newHeadBlock == nil {
+				log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
 				newHeadBlock = bc.genesisBlock
 			} else {
-				if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
-					// Rewound state missing, rolled back to before pivot, reset to genesis
-					newHeadBlock = bc.genesisBlock
+				// Block exists, keep rewinding until we find one with state
+				for {
+					if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
+						log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+						if pivot == nil || newHeadBlock.NumberU64() > *pivot {
+							newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
+							continue
+						} else {
+							log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
+							newHeadBlock = bc.genesisBlock
+						}
+					}
+					log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+					break
 				}
 			}
 			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
@@ -462,7 +482,6 @@ func (bc *BlockChain) SetHead(head uint64) error {
 			bc.currentBlock.Store(newHeadBlock)
 			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
 		}
-
 		// Rewind the fast block in a simpleton way to the target head
 		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
 			newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
@@ -479,8 +498,17 @@ func (bc *BlockChain) SetHead(head uint64) error {
 			bc.currentFastBlock.Store(newHeadFastBlock)
 			headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
 		}
-	}
+		head := bc.CurrentBlock().NumberU64()
 
+		// If setHead underflown the freezer threshold and the block processing
+		// intent afterwards is full block importing, delete the chain segment
+		// between the stateful-block and the sethead target.
+		var wipe bool
+		if head+1 < frozen {
+			wipe = pivot == nil || head >= *pivot
+		}
+		return head, wipe // Only force wipe if full synced
+	}
 	// Rewind the header chain, deleting all block bodies until then
 	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
 		// Ignore the error here since light client won't hit this path
@@ -488,10 +516,9 @@ func (bc *BlockChain) SetHead(head uint64) error {
 		if num+1 <= frozen {
 			// Truncate all relative data(header, total difficulty, body, receipt
 			// and canonical hash) from ancient store.
-			if err := bc.db.TruncateAncients(num + 1); err != nil {
+			if err := bc.db.TruncateAncients(num); err != nil {
 				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
 			}
-
 			// Remove the hash <-> number mapping from the active store.
 			rawdb.DeleteHeaderNumber(db, hash)
 		} else {
@@ -503,8 +530,18 @@ func (bc *BlockChain) SetHead(head uint64) error {
 		}
 		// Todo(rjl493456442) txlookup, bloombits, etc
 	}
-	bc.hc.SetHead(head, updateFn, delFn)
-
+	// If SetHead was only called as a chain reparation method, try to skip
+	// touching the header chain altogether, unless the freezer is broken
+	if block := bc.CurrentBlock(); block.NumberU64() == head {
+		if target, force := updateFn(bc.db, block.Header()); force {
+			bc.hc.SetHead(target, updateFn, delFn)
+		}
+	} else {
+		// Rewind the chain to the requested head and keep going backwards until a
+		// block with a state is found or fast sync pivot is passed
+		log.Warn("Rewinding blockchain", "target", head)
+		bc.hc.SetHead(head, updateFn, delFn)
+	}
 	// Clear out any stale content from the caches
 	bc.bodyCache.Purge()
 	bc.bodyRLPCache.Purge()
@@ -627,28 +664,6 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
 	return nil
 }
 
-// repair tries to repair the current blockchain by rolling back the current block
-// until one with associated state is found. This is needed to fix incomplete db
-// writes caused either by crashes/power outages, or simply non-committed tries.
-//
-// This method only rolls back the current block. The current header and current
-// fast block are left intact.
-func (bc *BlockChain) repair(head **types.Block) error {
-	for {
-		// Abort if we've rewound to a head block that does have associated state
-		if _, err := state.New((*head).Root(), bc.stateCache, bc.snaps); err == nil {
-			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
-			return nil
-		}
-		// Otherwise rewind one block and recheck state availability there
-		block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
-		if block == nil {
-			return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
-		}
-		*head = block
-	}
-}
-
 // Export writes the active chain to the given writer.
 func (bc *BlockChain) Export(w io.Writer) error {
 	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
@@ -985,52 +1000,6 @@ const (
 	SideStatTy
 )
 
-// Rollback is designed to remove a chain of links from the database that aren't
-// certain enough to be valid.
-func (bc *BlockChain) Rollback(chain []common.Hash) {
-	bc.chainmu.Lock()
-	defer bc.chainmu.Unlock()
-
-	batch := bc.db.NewBatch()
-	for i := len(chain) - 1; i >= 0; i-- {
-		hash := chain[i]
-
-		// Degrade the chain markers if they are explicitly reverted.
-		// In theory we should update all in-memory markers in the
-		// last step, however the direction of rollback is from high
-		// to low, so it's safe the update in-memory markers directly.
-		currentHeader := bc.hc.CurrentHeader()
-		if currentHeader.Hash() == hash {
-			newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
-			rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash)
-			bc.hc.SetCurrentHeader(newHeadHeader)
-		}
-		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
-			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
-			rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash())
-			bc.currentFastBlock.Store(newFastBlock)
-			headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
-		}
-		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
-			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
-			rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash())
-			bc.currentBlock.Store(newBlock)
-			headBlockGauge.Update(int64(newBlock.NumberU64()))
-		}
-	}
-	if err := batch.Write(); err != nil {
-		log.Crit("Failed to rollback chain markers", "err", err)
-	}
-	// Truncate ancient data which exceeds the current header.
-	//
-	// Notably, it can happen that system crashes without truncating the ancient data
-	// but the head indicator has been updated in the active store. Regarding this issue,
-	// system will self recovery by truncating the extra data during the setup phase.
-	if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
-		log.Crit("Truncate ancient store failed", "err", err)
-	}
-}
-
 // truncateAncient rewinds the blockchain to the specified header and deletes all
 // data in the ancient store that exceeds the specified header.
 func (bc *BlockChain) truncateAncient(head uint64) error {
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..27903dd06b354ea324c051ff561e9fc1dbb596eb
--- /dev/null
+++ b/core/blockchain_repair_test.go
@@ -0,0 +1,1653 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Tests that abnormal program termination (i.e.crash) and restart doesn't leave
+// the database in some strange state with gaps in the chain, nor with block data
+// dangling in the future.
+
+package core
+
+import (
+	"io/ioutil"
+	"math/big"
+	"os"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/consensus/ethash"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/params"
+)
+
+// Tests a recovery for a short canonical chain where a recent block was already
+// committed to disk and then the process crashed. In this case we expect the full
+// chain to be rolled back to the committed block, but the chain data itself left
+// in the database for replaying.
+func TestShortRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 0,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain where the fast sync pivot point was
+// already committed, after which the process crashed. In this case we expect the full
+// chain to be rolled back to the committed block, but the chain data itself left in
+// the database for replaying.
+func TestShortFastSyncedRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 0,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain where the fast sync pivot point was
+// not yet committed, but the process crashed. In this case we expect the chain to
+// detect that it was fast syncing and not delete anything, since we can just pick
+// up directly where we left off.
+func TestShortFastSyncingRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 0,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the canonical chain to be rolled back to the committed block, but the chain data
+// itself left in the database for replaying.
+func TestShortOldForkedRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 3,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this test scenario the side chain is below the committed block. In
+// this case we expect the canonical chain to be rolled back to the committed block,
+// but the chain data itself left in the database for replaying.
+func TestShortOldForkedFastSyncedRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 3,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the chain to detect that it was fast syncing and not delete anything, since we
+// can just pick up directly where we left off.
+func TestShortOldForkedFastSyncingRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 3,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// test scenario the side chain reaches above the committed block. In this case we
+// expect the canonical chain to be rolled back to the committed block, but the
+// chain data itself left in the database for replaying.
+func TestShortNewlyForkedRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3->S4->S5->S6
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    6,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 6,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this test scenario the side chain reaches above the committed block.
+// In this case we expect the canonical chain to be rolled back to the committed
+// block, but the chain data itself left in the database for replaying.
+func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3->S4->S5->S6
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    6,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 6,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In
+// this test scenario the side chain reaches above the committed block. In this
+// case we expect the chain to detect that it was fast syncing and not delete
+// anything, since we can just pick up directly where we left off.
+func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3->S4->S5->S6
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    6,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 6,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a longer side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// case we expect the canonical chain to be rolled back to the committed block, but
+// the chain data itself left in the database for replaying.
+func TestShortReorgedRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    10,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 10,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this case we expect the canonical chain to be rolled back to the
+// committed block, but the chain data itself left in the database for replaying.
+func TestShortReorgedFastSyncedRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    10,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 10,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In
+// this case we expect the chain to detect that it was fast syncing and not delete
+// anything, since we can just pick up directly where we left off.
+func TestShortReorgedFastSyncingRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Expected head header    : C8
+	// Expected head fast block: C8
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    10,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 8,
+		expSidechainBlocks: 10,
+		expFrozen:          0,
+		expHeadHeader:      8,
+		expHeadFastBlock:   8,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where a recent
+// block - newer than the ancient limit - was already committed to disk and then
+// the process crashed. In this case we expect the chain to be rolled back to the
+// committed block, with everything afterwads kept as fast sync data.
+func TestLongShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where a recent
+// block - older than the ancient limit - was already committed to disk and then
+// the process crashed. In this case we expect the chain to be rolled back to the
+// committed block, with everything afterwads deleted.
+func TestLongDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was already committed, after
+// which the process crashed. In this case we expect the chain to be rolled back
+// to the committed block, with everything afterwads kept as fast sync data.
+func TestLongFastSyncedShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was already committed, after
+// which the process crashed. In this case we expect the chain to be rolled back
+// to the committed block, with everything afterwads deleted.
+func TestLongFastSyncedDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was not yet committed, but the
+// process crashed. In this case we expect the chain to detect that it was fast
+// syncing and not delete anything, since we can just pick up directly where we
+// left off.
+func TestLongFastSyncingShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was not yet committed, but the
+// process crashed. In this case we expect the chain to detect that it was fast
+// syncing and not delete anything, since we can just pick up directly where we
+// left off.
+func TestLongFastSyncingDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected in leveldb:
+	//   C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+	//
+	// Expected head header    : C24
+	// Expected head fast block: C24
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 24,
+		expSidechainBlocks: 0,
+		expFrozen:          9,
+		expHeadHeader:      24,
+		expHeadFastBlock:   24,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - newer than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to be
+// rolled back to the committed block, with everything afterwads kept as fast
+// sync data; the side chain completely nuked by the freezer.
+func TestLongOldForkedShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - older than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the canonical chain
+// to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongOldForkedDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is below the committed block. In this case we expect the chain
+// to be rolled back to the committed block, with everything afterwads kept as
+// fast sync data; the side chain completely nuked by the freezer.
+func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// chain to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected in leveldb:
+	//   C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+	//
+	// Expected head header    : C24
+	// Expected head fast block: C24
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 24,
+		expSidechainBlocks: 0,
+		expFrozen:          9,
+		expHeadHeader:      24,
+		expHeadFastBlock:   24,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - newer than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the chain to be
+// rolled back to the committed block, with everything afterwads kept as fast
+// sync data; the side chain completely nuked by the freezer.
+func TestLongNewerForkedShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - older than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the canonical chain
+// to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongNewerForkedDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is above the committed block. In this case we expect the chain
+// to be rolled back to the committed block, with everything afterwads kept as fast
+// sync data; the side chain completely nuked by the freezer.
+func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is above the committed block. In this case we expect the canonical
+// chain to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected in leveldb:
+	//   C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+	//
+	// Expected head header    : C24
+	// Expected head fast block: C24
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 24,
+		expSidechainBlocks: 0,
+		expFrozen:          9,
+		expHeadHeader:      24,
+		expHeadFastBlock:   24,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a longer side
+// chain, where a recent block - newer than the ancient limit - was already committed
+// to disk and then the process crashed. In this case we expect the chain to be
+// rolled back to the committed block, with everything afterwads kept as fast sync
+// data. The side chain completely nuked by the freezer.
+func TestLongReorgedShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a longer side
+// chain, where a recent block - older than the ancient limit - was already committed
+// to disk and then the process crashed. In this case we expect the canonical chains
+// to be rolled back to the committed block, with everything afterwads deleted. The
+// side chain completely nuked by the freezer.
+func TestLongReorgedDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then the process crashed. In this case we
+// expect the chain to be rolled back to the committed block, with everything
+// afterwads kept as fast sync data. The side chain completely nuked by the
+// freezer.
+func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then the process crashed. In this case we
+// expect the canonical chains to be rolled back to the committed block, with
+// everything afterwads deleted. The side chain completely nuked by the freezer.
+func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but the process crashed. In this case we expect the
+// chain to detect that it was fast syncing and not delete anything, since we
+// can just pick up directly where we left off.
+func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+	//
+	// Expected head header    : C18
+	// Expected head fast block: C18
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 18,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      18,
+		expHeadFastBlock:   18,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this case we expect the
+// chain to detect that it was fast syncing and not delete anything, since we
+// can just pick up directly where we left off.
+func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// CRASH
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Expected in leveldb:
+	//   C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+	//
+	// Expected head header    : C24
+	// Expected head fast block: C24
+	// Expected head block     : G
+	testRepair(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		expCanonicalBlocks: 24,
+		expSidechainBlocks: 0,
+		expFrozen:          9,
+		expHeadHeader:      24,
+		expHeadFastBlock:   24,
+		expHeadBlock:       0,
+	})
+}
+
+func testRepair(t *testing.T, tt *rewindTest) {
+	// It's hard to follow the test case, visualize the input
+	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+	//fmt.Println(tt.dump(true))
+
+	// Create a temporary persistent database
+	datadir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatalf("Failed to create temporary datadir: %v", err)
+	}
+	os.RemoveAll(datadir)
+
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+	if err != nil {
+		t.Fatalf("Failed to create persistent database: %v", err)
+	}
+	defer db.Close() // Might double close, should be fine
+
+	// Initialize a fresh chain
+	var (
+		genesis = new(Genesis).MustCommit(db)
+		engine  = ethash.NewFullFaker()
+	)
+	chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if err != nil {
+		t.Fatalf("Failed to create chain: %v", err)
+	}
+	// If sidechain blocks are needed, make a light chain and import it
+	var sideblocks types.Blocks
+	if tt.sidechainBlocks > 0 {
+		sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
+			b.SetCoinbase(common.Address{0x01})
+		})
+		if _, err := chain.InsertChain(sideblocks); err != nil {
+			t.Fatalf("Failed to import side chain: %v", err)
+		}
+	}
+	canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+		b.SetCoinbase(common.Address{0x02})
+		b.SetDifficulty(big.NewInt(1000000))
+	})
+	if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
+		t.Fatalf("Failed to import canonical chain start: %v", err)
+	}
+	if tt.commitBlock > 0 {
+		chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+	}
+	if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
+		t.Fatalf("Failed to import canonical chain tail: %v", err)
+	}
+	// Force run a freeze cycle
+	type freezer interface {
+		Freeze(threshold uint64)
+		Ancients() (uint64, error)
+	}
+	db.(freezer).Freeze(tt.freezeThreshold)
+
+	// Set the simulated pivot block
+	if tt.pivotBlock != nil {
+		rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
+	}
+	// Pull the plug on the database, simulating a hard crash
+	db.Close()
+
+	// Start a new blockchain back up and see where the repait leads us
+	db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+	if err != nil {
+		t.Fatalf("Failed to reopen persistent database: %v", err)
+	}
+	defer db.Close()
+
+	chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if err != nil {
+		t.Fatalf("Failed to recreate chain: %v", err)
+	}
+	defer chain.Stop()
+
+	// Iterate over all the remaining blocks and ensure there are no gaps
+	verifyNoGaps(t, chain, true, canonblocks)
+	verifyNoGaps(t, chain, false, sideblocks)
+	verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
+	verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+
+	if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+		t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+	}
+	if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+		t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+	}
+	if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+		t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+	}
+	if frozen, err := db.(freezer).Ancients(); err != nil {
+		t.Errorf("Failed to retrieve ancient count: %v\n", err)
+	} else if int(frozen) != tt.expFrozen {
+		t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
+	}
+}
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..dc1368ff4b488e1777dff60a9923cf3345867051
--- /dev/null
+++ b/core/blockchain_sethead_test.go
@@ -0,0 +1,1949 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Tests that setting the chain head backwards doesn't leave the database in some
+// strange state with gaps in the chain, nor with block data dangling in the future.
+
+package core
+
+import (
+	"fmt"
+	"io/ioutil"
+	"math/big"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/consensus/ethash"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/params"
+)
+
+// rewindTest is a test case for chain rollback upon user request.
+type rewindTest struct {
+	canonicalBlocks int     // Number of blocks to generate for the canonical chain (heavier)
+	sidechainBlocks int     // Number of blocks to generate for the side chain (lighter)
+	freezeThreshold uint64  // Block number until which to move things into the freezer
+	commitBlock     uint64  // Block number for which to commit the state to disk
+	pivotBlock      *uint64 // Pivot block number in case of fast sync
+
+	setheadBlock       uint64 // Block number to set head back to
+	expCanonicalBlocks int    // Number of canonical blocks expected to remain in the database (excl. genesis)
+	expSidechainBlocks int    // Number of sidechain blocks expected to remain in the database (excl. genesis)
+	expFrozen          int    // Number of canonical blocks expected to be in the freezer (incl. genesis)
+	expHeadHeader      uint64 // Block number of the expected head header
+	expHeadFastBlock   uint64 // Block number of the expected head fast sync block
+	expHeadBlock       uint64 // Block number of the expected head full block
+}
+
+func (tt *rewindTest) dump(crash bool) string {
+	buffer := new(strings.Builder)
+
+	fmt.Fprint(buffer, "Chain:\n  G")
+	for i := 0; i < tt.canonicalBlocks; i++ {
+		fmt.Fprintf(buffer, "->C%d", i+1)
+	}
+	fmt.Fprint(buffer, " (HEAD)\n")
+	if tt.sidechainBlocks > 0 {
+		fmt.Fprintf(buffer, "  â””")
+		for i := 0; i < tt.sidechainBlocks; i++ {
+			fmt.Fprintf(buffer, "->S%d", i+1)
+		}
+		fmt.Fprintf(buffer, "\n")
+	}
+	fmt.Fprintf(buffer, "\n")
+
+	if tt.canonicalBlocks > int(tt.freezeThreshold) {
+		fmt.Fprint(buffer, "Frozen:\n  G")
+		for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ {
+			fmt.Fprintf(buffer, "->C%d", i+1)
+		}
+		fmt.Fprintf(buffer, "\n\n")
+	} else {
+		fmt.Fprintf(buffer, "Frozen: none\n")
+	}
+	fmt.Fprintf(buffer, "Commit: G")
+	if tt.commitBlock > 0 {
+		fmt.Fprintf(buffer, ", C%d", tt.commitBlock)
+	}
+	fmt.Fprint(buffer, "\n")
+
+	if tt.pivotBlock == nil {
+		fmt.Fprintf(buffer, "Pivot : none\n")
+	} else {
+		fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock)
+	}
+	if crash {
+		fmt.Fprintf(buffer, "\nCRASH\n\n")
+	} else {
+		fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock)
+	}
+	fmt.Fprintf(buffer, "------------------------------\n\n")
+
+	if tt.expFrozen > 0 {
+		fmt.Fprint(buffer, "Expected in freezer:\n  G")
+		for i := 0; i < tt.expFrozen-1; i++ {
+			fmt.Fprintf(buffer, "->C%d", i+1)
+		}
+		fmt.Fprintf(buffer, "\n\n")
+	}
+	if tt.expFrozen > 0 {
+		if tt.expFrozen >= tt.expCanonicalBlocks {
+			fmt.Fprintf(buffer, "Expected in leveldb: none\n")
+		} else {
+			fmt.Fprintf(buffer, "Expected in leveldb:\n  C%d)", tt.expFrozen-1)
+			for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ {
+				fmt.Fprintf(buffer, "->C%d", i+1)
+			}
+			fmt.Fprint(buffer, "\n")
+			if tt.expSidechainBlocks > tt.expFrozen {
+				fmt.Fprintf(buffer, "  â””")
+				for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ {
+					fmt.Fprintf(buffer, "->S%d", i+1)
+				}
+				fmt.Fprintf(buffer, "\n")
+			}
+		}
+	} else {
+		fmt.Fprint(buffer, "Expected in leveldb:\n  G")
+		for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ {
+			fmt.Fprintf(buffer, "->C%d", i+1)
+		}
+		fmt.Fprint(buffer, "\n")
+		if tt.expSidechainBlocks > tt.expFrozen {
+			fmt.Fprintf(buffer, "  â””")
+			for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ {
+				fmt.Fprintf(buffer, "->S%d", i+1)
+			}
+			fmt.Fprintf(buffer, "\n")
+		}
+	}
+	fmt.Fprintf(buffer, "\n")
+	fmt.Fprintf(buffer, "Expected head header    : C%d\n", tt.expHeadHeader)
+	fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock)
+	if tt.expHeadBlock == 0 {
+		fmt.Fprintf(buffer, "Expected head block     : G\n")
+	} else {
+		fmt.Fprintf(buffer, "Expected head block     : C%d\n", tt.expHeadBlock)
+	}
+	return buffer.String()
+}
+
+// Tests a sethead for a short canonical chain where a recent block was already
+// committed to disk and then the sethead called. In this case we expect the full
+// chain to be rolled back to the committed block. Everything above the sethead
+// point should be deleted. In between the committed block and the requested head
+// the data can remain as "fast sync" data to avoid redownloading it.
+func TestShortSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 0,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain where the fast sync pivot point was
+// already committed, after which sethead was called. In this case we expect the
+// chain to behave like in full sync mode, rolling back to the committed block
+// Everything above the sethead point should be deleted. In between the committed
+// block and the requested head the data can remain as "fast sync" data to avoid
+// redownloading it.
+func TestShortFastSyncedSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 0,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain where the fast sync pivot point was
+// not yet committed, but sethead was called. In this case we expect the chain to
+// detect that it was fast syncing and delete everything from the new head, since
+// we can just pick up fast syncing from there. The head full block should be set
+// to the genesis.
+func TestShortFastSyncingSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 0,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the canonical full chain to be rolled back to the committed block. Everything
+// above the sethead point should be deleted. In between the committed block and
+// the requested head the data can remain as "fast sync" data to avoid redownloading
+// it. The side chain should be left alone as it was shorter.
+func TestShortOldForkedSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 3,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this test scenario the side chain is below the committed block. In
+// this case we expect the canonical full chain to be rolled back to the committed
+// block. Everything above the sethead point should be deleted. In between the
+// committed block and the requested head the data can remain as "fast sync" data
+// to avoid redownloading it. The side chain should be left alone as it was shorter.
+func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 3,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the chain to detect that it was fast syncing and delete everything from the new
+// head, since we can just pick up fast syncing from there. The head full block
+// should be set to the genesis.
+func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 3,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// test scenario the side chain reaches above the committed block. In this case we
+// expect the canonical full chain to be rolled back to the committed block. All
+// data above the sethead point should be deleted. In between the committed block
+// and the requested head the data can remain as "fast sync" data to avoid having
+// to redownload it. The side chain should be truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3->S4->S5->S6->S7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    10,
+		sidechainBlocks:    8,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 7,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this case we expect the canonical full chain to be rolled back to
+// between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid having to redownload it. The side chain should be
+// truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3->S4->S5->S6->S7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    10,
+		sidechainBlocks:    8,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 7,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In
+// this test scenario the side chain reaches above the committed block. In this
+// case we expect the chain to detect that it was fast syncing and delete
+// everything from the new head, since we can just pick up fast syncing from
+// there.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3->S4->S5->S6->S7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    10,
+		sidechainBlocks:    8,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 7,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a longer side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// case we expect the canonical full chain to be rolled back to the committed block.
+// All data above the sethead point should be deleted. In between the committed
+// block and the requested head the data can remain as "fast sync" data to avoid
+// having to redownload it. The side chain should be truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortReorgedSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3->S4->S5->S6->S7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    10,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 7,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this case we expect the canonical full chain to be rolled back to
+// the committed block. All data above the sethead point should be deleted. In
+// between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid having to redownload it. The side chain should be
+// truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortReorgedFastSyncedSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Frozen: none
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3->S4->S5->S6->S7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    10,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 7,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In
+// this case we expect the chain to detect that it was fast syncing and delete
+// everything from the new head, since we can just pick up fast syncing from
+// there.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortReorgedFastSyncingSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+	//
+	// Frozen: none
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(7)
+	//
+	// ------------------------------
+	//
+	// Expected in leveldb:
+	//   G->C1->C2->C3->C4->C5->C6->C7
+	//   â””->S1->S2->S3->S4->S5->S6->S7
+	//
+	// Expected head header    : C7
+	// Expected head fast block: C7
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    8,
+		sidechainBlocks:    10,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       7,
+		expCanonicalBlocks: 7,
+		expSidechainBlocks: 7,
+		expFrozen:          0,
+		expHeadHeader:      7,
+		expHeadFastBlock:   7,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where a recent
+// block - newer than the ancient limit - was already committed to disk and then
+// sethead was called. In this case we expect the full chain to be rolled back
+// to the committed block. Everything above the sethead point should be deleted.
+// In between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid redownloading it.
+func TestLongShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where a recent
+// block - older than the ancient limit - was already committed to disk and then
+// sethead was called. In this case we expect the full chain to be rolled back
+// to the committed block. Since the ancient limit was underflown, everything
+// needs to be deleted onwards to avoid creating a gap.
+func TestLongDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was already committed, after
+// which sethead was called. In this case we expect the full chain to be rolled
+// back to the committed block. Everything above the sethead point should be
+// deleted. In between the committed block and the requested head the data can
+// remain as "fast sync" data to avoid redownloading it.
+func TestLongFastSyncedShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was already committed, after
+// which sethead was called. In this case we expect the full chain to be rolled
+// back to the committed block. Since the ancient limit was underflown, everything
+// needs to be deleted onwards to avoid creating a gap.
+func TestLongFastSyncedDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was not yet committed, but
+// sethead was called. In this case we expect the chain to detect that it was fast
+// syncing and delete everything from the new head, since we can just pick up fast
+// syncing from there.
+func TestLongFastSyncingShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was not yet committed, but
+// sethead was called. In this case we expect the chain to detect that it was fast
+// syncing and delete everything from the new head, since we can just pick up fast
+// syncing from there.
+func TestLongFastSyncingDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    0,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          7,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
+// chain, where a recent block - newer than the ancient limit - was already committed
+// to disk and then sethead was called. In this case we expect the canonical full
+// chain to be rolled back to the committed block. Everything above the sethead point
+// should be deleted. In between the committed block and the requested head the data
+// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
+// by the freezer.
+func TestLongOldForkedShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
+// chain, where a recent block - older than the ancient limit - was already committed
+// to disk and then sethead was called. In this case we expect the canonical full
+// chain to be rolled back to the committed block. Since the ancient limit was
+// underflown, everything needs to be deleted onwards to avoid creating a gap. The
+// side chain is nuked by the freezer.
+func TestLongOldForkedDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// full chain to be rolled back to the committed block. Everything above the
+// sethead point should be deleted. In between the committed block and the
+// requested head the data can remain as "fast sync" data to avoid redownloading
+// it. The side chain is nuked by the freezer.
+func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// full chain to be rolled back to the committed block. Since the ancient limit was
+// underflown, everything needs to be deleted onwards to avoid creating a gap. The
+// side chain is nuked by the freezer.
+func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and delete everything from the new head, since we can
+// just pick up fast syncing from there. The side chain is completely nuked by the
+// freezer.
+func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and delete everything from the new head, since we can
+// just pick up fast syncing from there. The side chain is completely nuked by the
+// freezer.
+func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    3,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          7,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - newer than the ancient limit - was already
+// committed to disk and then sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongShallowSetHead.
+func TestLongNewerForkedShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - older than the ancient limit - was already
+// committed to disk and then sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongDeepSetHead.
+func TestLongNewerForkedDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is above the committed block. In this case the freezer will delete
+// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
+func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is above the committed block. In this case the freezer will delete
+// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
+func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
+func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
+func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    12,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          7,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer side
+// chain, where a recent block - newer than the ancient limit - was already committed
+// to disk and then sethead was called. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongShallowSetHead.
+func TestLongReorgedShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer side
+// chain, where a recent block - older than the ancient limit - was already committed
+// to disk and then sethead was called. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongDeepSetHead.
+func TestLongReorgedDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : none
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         nil,
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this case the
+// freezer will delete the sidechain since it's dangling, reverting to
+// TestLongFastSyncedShallowSetHead.
+func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this case the
+// freezer will delete the sidechain since it's dangling, reverting to
+// TestLongFastSyncedDeepSetHead.
+func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G, C4
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C4
+	// Expected head fast block: C4
+	// Expected head block     : C4
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        4,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 4,
+		expSidechainBlocks: 0,
+		expFrozen:          5,
+		expHeadHeader:      4,
+		expHeadFastBlock:   4,
+		expHeadBlock:       4,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this case we expect the
+// chain to detect that it was fast syncing and delete everything from the new
+// head, since we can just pick up fast syncing from there. The side chain is
+// completely nuked by the freezer.
+func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2
+	//
+	// Expected in leveldb:
+	//   C2)->C3->C4->C5->C6
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    18,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          3,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this case we expect the
+// chain to detect that it was fast syncing and delete everything from the new
+// head, since we can just pick up fast syncing from there. The side chain is
+// completely nuked by the freezer.
+func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
+	// Chain:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+	//   â””->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+	//
+	// Frozen:
+	//   G->C1->C2->C3->C4->C5->C6->C7->C8
+	//
+	// Commit: G
+	// Pivot : C4
+	//
+	// SetHead(6)
+	//
+	// ------------------------------
+	//
+	// Expected in freezer:
+	//   G->C1->C2->C3->C4->C5->C6
+	//
+	// Expected in leveldb: none
+	//
+	// Expected head header    : C6
+	// Expected head fast block: C6
+	// Expected head block     : G
+	testSetHead(t, &rewindTest{
+		canonicalBlocks:    24,
+		sidechainBlocks:    26,
+		freezeThreshold:    16,
+		commitBlock:        0,
+		pivotBlock:         uint64ptr(4),
+		setheadBlock:       6,
+		expCanonicalBlocks: 6,
+		expSidechainBlocks: 0,
+		expFrozen:          7,
+		expHeadHeader:      6,
+		expHeadFastBlock:   6,
+		expHeadBlock:       0,
+	})
+}
+
+func testSetHead(t *testing.T, tt *rewindTest) {
+	// It's hard to follow the test case, visualize the input
+	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+	//fmt.Println(tt.dump(false))
+
+	// Create a temporary persistent database
+	datadir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatalf("Failed to create temporary datadir: %v", err)
+	}
+	os.RemoveAll(datadir)
+
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+	if err != nil {
+		t.Fatalf("Failed to create persistent database: %v", err)
+	}
+	defer db.Close()
+
+	// Initialize a fresh chain
+	var (
+		genesis = new(Genesis).MustCommit(db)
+		engine  = ethash.NewFullFaker()
+	)
+	chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	if err != nil {
+		t.Fatalf("Failed to create chain: %v", err)
+	}
+	// If sidechain blocks are needed, make a light chain and import it
+	var sideblocks types.Blocks
+	if tt.sidechainBlocks > 0 {
+		sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
+			b.SetCoinbase(common.Address{0x01})
+		})
+		if _, err := chain.InsertChain(sideblocks); err != nil {
+			t.Fatalf("Failed to import side chain: %v", err)
+		}
+	}
+	canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+		b.SetCoinbase(common.Address{0x02})
+		b.SetDifficulty(big.NewInt(1000000))
+	})
+	if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
+		t.Fatalf("Failed to import canonical chain start: %v", err)
+	}
+	if tt.commitBlock > 0 {
+		chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+	}
+	if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
+		t.Fatalf("Failed to import canonical chain tail: %v", err)
+	}
+	// Manually dereference anything not committed to not have to work with 128+ tries
+	for _, block := range sideblocks {
+		chain.stateCache.TrieDB().Dereference(block.Root())
+	}
+	for _, block := range canonblocks {
+		chain.stateCache.TrieDB().Dereference(block.Root())
+	}
+	// Force run a freeze cycle
+	type freezer interface {
+		Freeze(threshold uint64)
+		Ancients() (uint64, error)
+	}
+	db.(freezer).Freeze(tt.freezeThreshold)
+
+	// Set the simulated pivot block
+	if tt.pivotBlock != nil {
+		rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
+	}
+	// Set the head of the chain back to the requested number
+	chain.SetHead(tt.setheadBlock)
+
+	// Iterate over all the remaining blocks and ensure there are no gaps
+	verifyNoGaps(t, chain, true, canonblocks)
+	verifyNoGaps(t, chain, false, sideblocks)
+	verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
+	verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+
+	if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+		t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+	}
+	if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+		t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+	}
+	if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+		t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+	}
+	if frozen, err := db.(freezer).Ancients(); err != nil {
+		t.Errorf("Failed to retrieve ancient count: %v\n", err)
+	} else if int(frozen) != tt.expFrozen {
+		t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
+	}
+}
+
+// verifyNoGaps checks that there are no gaps after the initial set of blocks in
+// the database and errors if found.
+func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) {
+	t.Helper()
+
+	var end uint64
+	for i := uint64(0); i <= uint64(len(inserted)); i++ {
+		header := chain.GetHeaderByNumber(i)
+		if header == nil && end == 0 {
+			end = i
+		}
+		if header != nil && end > 0 {
+			if canonical {
+				t.Errorf("Canonical header gap between #%d-#%d", end, i-1)
+			} else {
+				t.Errorf("Sidechain header gap between #%d-#%d", end, i-1)
+			}
+			end = 0 // Reset for further gap detection
+		}
+	}
+	end = 0
+	for i := uint64(0); i <= uint64(len(inserted)); i++ {
+		block := chain.GetBlockByNumber(i)
+		if block == nil && end == 0 {
+			end = i
+		}
+		if block != nil && end > 0 {
+			if canonical {
+				t.Errorf("Canonical block gap between #%d-#%d", end, i-1)
+			} else {
+				t.Errorf("Sidechain block gap between #%d-#%d", end, i-1)
+			}
+			end = 0 // Reset for further gap detection
+		}
+	}
+	end = 0
+	for i := uint64(1); i <= uint64(len(inserted)); i++ {
+		receipts := chain.GetReceiptsByHash(inserted[i-1].Hash())
+		if receipts == nil && end == 0 {
+			end = i
+		}
+		if receipts != nil && end > 0 {
+			if canonical {
+				t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1)
+			} else {
+				t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1)
+			}
+			end = 0 // Reset for further gap detection
+		}
+	}
+}
+
+// verifyCutoff checks that there are no chain data available in the chain after
+// the specified limit, but that it is available before.
+func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) {
+	t.Helper()
+
+	for i := 1; i <= len(inserted); i++ {
+		if i <= head {
+			if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil {
+				if canonical {
+					t.Errorf("Canonical header   #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				} else {
+					t.Errorf("Sidechain header   #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				}
+			}
+			if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil {
+				if canonical {
+					t.Errorf("Canonical block    #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				} else {
+					t.Errorf("Sidechain block    #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				}
+			}
+			if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil {
+				if canonical {
+					t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				} else {
+					t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				}
+			}
+		} else {
+			if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil {
+				if canonical {
+					t.Errorf("Canonical header   #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				} else {
+					t.Errorf("Sidechain header   #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				}
+			}
+			if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil {
+				if canonical {
+					t.Errorf("Canonical block    #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				} else {
+					t.Errorf("Sidechain block    #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				}
+			}
+			if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil {
+				if canonical {
+					t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				} else {
+					t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+				}
+			}
+		}
+	}
+}
+
+// uint64ptr is a weird helper to allow 1-line constant pointer creation.
+func uint64ptr(n uint64) *uint64 {
+	return &n
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 0d810699f667c722e2aa25f373bad65792e99ee4..41fc4920ca2d3e4602a9a9b48353f4a8ad202137 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -731,12 +731,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
 		return db, func() { os.RemoveAll(dir) }
 	}
 	// Configure a subchain to roll back
-	remove := []common.Hash{}
-	for _, block := range blocks[height/2:] {
-		remove = append(remove, block.Hash())
-	}
+	remove := blocks[height/2].NumberU64()
+
 	// Create a small assertion method to check the three heads
 	assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
+		t.Helper()
+
 		if num := chain.CurrentBlock().NumberU64(); num != block {
 			t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
 		}
@@ -750,14 +750,18 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
 	// Import the chain as an archive node and ensure all pointers are updated
 	archiveDb, delfn := makeDb()
 	defer delfn()
-	archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+
+	archiveCaching := *defaultCacheConfig
+	archiveCaching.TrieDirtyDisabled = true
+
+	archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
 	if n, err := archive.InsertChain(blocks); err != nil {
 		t.Fatalf("failed to process block %d: %v", n, err)
 	}
 	defer archive.Stop()
 
 	assert(t, "archive", archive, height, height, height)
-	archive.Rollback(remove)
+	archive.SetHead(remove - 1)
 	assert(t, "archive", archive, height/2, height/2, height/2)
 
 	// Import the chain as a non-archive node and ensure all pointers are updated
@@ -777,7 +781,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 	}
 	assert(t, "fast", fast, height, height, 0)
-	fast.Rollback(remove)
+	fast.SetHead(remove - 1)
 	assert(t, "fast", fast, height/2, height/2, 0)
 
 	// Import the chain as a ancient-first node and ensure all pointers are updated
@@ -793,12 +797,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 	}
 	assert(t, "ancient", ancient, height, height, 0)
-	ancient.Rollback(remove)
-	assert(t, "ancient", ancient, height/2, height/2, 0)
-	if frozen, err := ancientDb.Ancients(); err != nil || frozen != height/2+1 {
-		t.Fatalf("failed to truncate ancient store, want %v, have %v", height/2+1, frozen)
-	}
+	ancient.SetHead(remove - 1)
+	assert(t, "ancient", ancient, 0, 0, 0)
 
+	if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
+		t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
+	}
 	// Import the chain as a light node and ensure all pointers are updated
 	lightDb, delfn := makeDb()
 	defer delfn()
@@ -809,7 +813,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
 	defer light.Stop()
 
 	assert(t, "light", light, height, 0, 0)
-	light.Rollback(remove)
+	light.SetHead(remove - 1)
 	assert(t, "light", light, height/2, 0, 0)
 }
 
@@ -1585,6 +1589,7 @@ func TestBlockchainRecovery(t *testing.T) {
 		t.Fatalf("failed to create temp freezer dir: %v", err)
 	}
 	defer os.Remove(frdir)
+
 	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
@@ -1602,6 +1607,7 @@ func TestBlockchainRecovery(t *testing.T) {
 	if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 	}
+	rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
 	ancient.Stop()
 
 	// Destroy head fast block manually
@@ -1912,11 +1918,9 @@ func testInsertKnownChainData(t *testing.T, typ string) {
 	asserter(t, blocks[len(blocks)-1])
 
 	// Import a long canonical chain with some known data as prefix.
-	var rollback []common.Hash
-	for i := len(blocks) / 2; i < len(blocks); i++ {
-		rollback = append(rollback, blocks[i].Hash())
-	}
-	chain.Rollback(rollback)
+	rollback := blocks[len(blocks)/2].NumberU64()
+
+	chain.SetHead(rollback - 1)
 	if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
 		t.Fatalf("failed to insert chain data: %v", err)
 	}
@@ -1936,11 +1940,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
 	asserter(t, blocks3[len(blocks3)-1])
 
 	// Rollback the heavier chain and re-insert the longer chain again
-	for i := 0; i < len(blocks3); i++ {
-		rollback = append(rollback, blocks3[i].Hash())
-	}
-	chain.Rollback(rollback)
-
+	chain.SetHead(rollback - 1)
 	if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
 		t.Fatalf("failed to insert chain data: %v", err)
 	}
diff --git a/core/headerchain.go b/core/headerchain.go
index a6028d8b9a108a4715c9c1d12e0aceed8184abce..f5a8e21cfc6cc67dc709a8280f6b0f6be312cafb 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -488,8 +488,10 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
 
 type (
 	// UpdateHeadBlocksCallback is a callback function that is called by SetHead
-	// before head header is updated.
-	UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header)
+	// before head header is updated. The method will return the actual block it
+	// updated the head to (missing state) and a flag if setHead should continue
+	// rewinding till that forcefully (exceeded ancient limits)
+	UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool)
 
 	// DeleteBlockContentCallback is a callback function that is called by SetHead
 	// before each header is deleted.
@@ -502,9 +504,10 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
 	var (
 		parentHash common.Hash
 		batch      = hc.chainDb.NewBatch()
+		origin     = true
 	)
 	for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
-		hash, num := hdr.Hash(), hdr.Number.Uint64()
+		num := hdr.Number.Uint64()
 
 		// Rewind block chain to new head.
 		parent := hc.GetHeader(hdr.ParentHash, num-1)
@@ -512,16 +515,21 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
 			parent = hc.genesisHeader
 		}
 		parentHash = hdr.ParentHash
+
 		// Notably, since geth has the possibility for setting the head to a low
 		// height which is even lower than ancient head.
 		// In order to ensure that the head is always no higher than the data in
-		// the database(ancient store or active store), we need to update head
+		// the database (ancient store or active store), we need to update head
 		// first then remove the relative data from the database.
 		//
 		// Update head first(head fast block, head full block) before deleting the data.
 		markerBatch := hc.chainDb.NewBatch()
 		if updateFn != nil {
-			updateFn(markerBatch, parent)
+			newHead, force := updateFn(markerBatch, parent)
+			if force && newHead < head {
+				log.Warn("Force rewinding till ancient limit", "head", newHead)
+				head = newHead
+			}
 		}
 		// Update head header then.
 		rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
@@ -532,14 +540,34 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
 		hc.currentHeaderHash = parentHash
 		headHeaderGauge.Update(parent.Number.Int64())
 
-		// Remove the relative data from the database.
-		if delFn != nil {
-			delFn(batch, hash, num)
+		// If this is the first iteration, wipe any leftover data upwards too so
+		// we don't end up with dangling daps in the database
+		var nums []uint64
+		if origin {
+			for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
+				nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
+			}
+			origin = false
+		}
+		nums = append(nums, num)
+
+		// Remove the related data from the database on all sidechains
+		for _, num := range nums {
+			// Gather all the side fork hashes
+			hashes := rawdb.ReadAllHashes(hc.chainDb, num)
+			if len(hashes) == 0 {
+				// No hashes in the database whatsoever, probably frozen already
+				hashes = append(hashes, hdr.Hash())
+			}
+			for _, hash := range hashes {
+				if delFn != nil {
+					delFn(batch, hash, num)
+				}
+				rawdb.DeleteHeader(batch, hash, num)
+				rawdb.DeleteTd(batch, hash, num)
+			}
+			rawdb.DeleteCanonicalHash(batch, num)
 		}
-		// Rewind header chain to new head.
-		rawdb.DeleteHeader(batch, hash, num)
-		rawdb.DeleteTd(batch, hash, num)
-		rawdb.DeleteCanonicalHash(batch, num)
 	}
 	// Flush all accumulated deletions.
 	if err := batch.Write(); err != nil {
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 8dd1f6345ab803d71e93a7b3c0a38e6f7e6d1ba5..c948cdc7c60e0afd23c89146457dfd191f4a231b 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -187,6 +187,32 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
 	}
 }
 
+// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
+// full synced, the last pivot will always be nil.
+func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
+	data, _ := db.Get(lastPivotKey)
+	if len(data) == 0 {
+		return nil
+	}
+	var pivot uint64
+	if err := rlp.DecodeBytes(data, &pivot); err != nil {
+		log.Error("Invalid pivot block number in database", "err", err)
+		return nil
+	}
+	return &pivot
+}
+
+// WriteLastPivotNumber stores the number of the last pivot block.
+func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
+	enc, err := rlp.EncodeToBytes(pivot)
+	if err != nil {
+		log.Crit("Failed to encode pivot block number", "err", err)
+	}
+	if err := db.Put(lastPivotKey, enc); err != nil {
+		log.Crit("Failed to store pivot block number", "err", err)
+	}
+}
+
 // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
 // reporting correct numbers across restarts.
 func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index eb3f86a76e2ae3d041d0da87427600a53b5cd457..d22ca1c529d62f5d43bead98932bfc92d1920ce4 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -21,6 +21,7 @@ import (
 	"errors"
 	"fmt"
 	"os"
+	"sync/atomic"
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
@@ -53,6 +54,22 @@ func (frdb *freezerdb) Close() error {
 	return nil
 }
 
+// Freeze is a helper method used for external testing to trigger and block until
+// a freeze cycle completes, without having to sleep for a minute to trigger the
+// automatic background run.
+func (frdb *freezerdb) Freeze(threshold uint64) {
+	// Set the freezer threshold to a temporary value
+	defer func(old uint64) {
+		atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old)
+	}(atomic.LoadUint64(&frdb.AncientStore.(*freezer).threshold))
+	atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, threshold)
+
+	// Trigger a freeze cycle and block until it's done
+	trigger := make(chan struct{}, 1)
+	frdb.AncientStore.(*freezer).trigger <- trigger
+	<-trigger
+}
+
 // nofreezedb is a database wrapper that disables freezer data retrievals.
 type nofreezedb struct {
 	ethdb.KeyValueStore
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 621d35d3f4201eac85c2452cedffef63d083dff0..5744b0cbb3d5ba8c67306f74c150e8ff09f91748 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -70,12 +70,16 @@ type freezer struct {
 	// WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only
 	// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
 	// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
-	frozen uint64 // Number of blocks already frozen
+	frozen    uint64 // Number of blocks already frozen
+	threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
 
 	tables       map[string]*freezerTable // Data tables for storing everything
 	instanceLock fileutil.Releaser        // File-system lock to prevent double opens
-	quit         chan struct{}
-	closeOnce    sync.Once
+
+	trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
+
+	quit      chan struct{}
+	closeOnce sync.Once
 }
 
 // newFreezer creates a chain freezer that moves ancient chain data into
@@ -102,8 +106,10 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
 	}
 	// Open all the supported data tables
 	freezer := &freezer{
+		threshold:    params.FullImmutabilityThreshold,
 		tables:       make(map[string]*freezerTable),
 		instanceLock: lock,
+		trigger:      make(chan chan struct{}),
 		quit:         make(chan struct{}),
 	}
 	for name, disableSnappy := range freezerNoSnappy {
@@ -261,7 +267,10 @@ func (f *freezer) Sync() error {
 func (f *freezer) freeze(db ethdb.KeyValueStore) {
 	nfdb := &nofreezedb{KeyValueStore: db}
 
-	backoff := false
+	var (
+		backoff   bool
+		triggered chan struct{} // Used in tests
+	)
 	for {
 		select {
 		case <-f.quit:
@@ -270,9 +279,16 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
 		default:
 		}
 		if backoff {
+			// If we were doing a manual trigger, notify it
+			if triggered != nil {
+				triggered <- struct{}{}
+				triggered = nil
+			}
 			select {
 			case <-time.NewTimer(freezerRecheckInterval).C:
 				backoff = false
+			case triggered = <-f.trigger:
+				backoff = false
 			case <-f.quit:
 				return
 			}
@@ -285,18 +301,20 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
 			continue
 		}
 		number := ReadHeaderNumber(nfdb, hash)
+		threshold := atomic.LoadUint64(&f.threshold)
+
 		switch {
 		case number == nil:
 			log.Error("Current full block number unavailable", "hash", hash)
 			backoff = true
 			continue
 
-		case *number < params.FullImmutabilityThreshold:
-			log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.FullImmutabilityThreshold)
+		case *number < threshold:
+			log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
 			backoff = true
 			continue
 
-		case *number-params.FullImmutabilityThreshold <= f.frozen:
+		case *number-threshold <= f.frozen:
 			log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
 			backoff = true
 			continue
@@ -308,7 +326,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
 			continue
 		}
 		// Seems we have data ready to be frozen, process in usable batches
-		limit := *number - params.FullImmutabilityThreshold
+		limit := *number - threshold
 		if limit-f.frozen > freezerBatchLimit {
 			limit = f.frozen + freezerBatchLimit
 		}
@@ -317,7 +335,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
 			first    = f.frozen
 			ancients = make([]common.Hash, 0, limit-f.frozen)
 		)
-		for f.frozen < limit {
+		for f.frozen <= limit {
 			// Retrieves all the components of the canonical block
 			hash := ReadCanonicalHash(nfdb, f.frozen)
 			if hash == (common.Hash{}) {
@@ -368,11 +386,15 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
 			log.Crit("Failed to delete frozen canonical blocks", "err", err)
 		}
 		batch.Reset()
-		// Wipe out side chain also.
+
+		// Wipe out side chains also and track dangling side chians
+		var dangling []common.Hash
 		for number := first; number < f.frozen; number++ {
 			// Always keep the genesis block in active database
 			if number != 0 {
-				for _, hash := range ReadAllHashes(db, number) {
+				dangling = ReadAllHashes(db, number)
+				for _, hash := range dangling {
+					log.Trace("Deleting side chain", "number", number, "hash", hash)
 					DeleteBlock(batch, hash, number)
 				}
 			}
@@ -380,6 +402,41 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
 		if err := batch.Write(); err != nil {
 			log.Crit("Failed to delete frozen side blocks", "err", err)
 		}
+		batch.Reset()
+
+		// Step into the future and delete and dangling side chains
+		if f.frozen > 0 {
+			tip := f.frozen
+			for len(dangling) > 0 {
+				drop := make(map[common.Hash]struct{})
+				for _, hash := range dangling {
+					log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
+					drop[hash] = struct{}{}
+				}
+				children := ReadAllHashes(db, tip)
+				for i := 0; i < len(children); i++ {
+					// Dig up the child and ensure it's dangling
+					child := ReadHeader(nfdb, children[i], tip)
+					if child == nil {
+						log.Error("Missing dangling header", "number", tip, "hash", children[i])
+						continue
+					}
+					if _, ok := drop[child.ParentHash]; !ok {
+						children = append(children[:i], children[i+1:]...)
+						i--
+						continue
+					}
+					// Delete all block data associated with the child
+					log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
+					DeleteBlock(batch, children[i], tip)
+				}
+				dangling = children
+				tip++
+			}
+			if err := batch.Write(); err != nil {
+				log.Crit("Failed to delete dangling side blocks", "err", err)
+			}
+		}
 		// Log something friendly for the user
 		context := []interface{}{
 			"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 5a41199a7ccb0ad82af734dccbb249c9893a033c..b87e7888cce4d4b61440abbbec3ac807657086cf 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -38,6 +38,9 @@ var (
 	// headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
 	headFastBlockKey = []byte("LastFast")
 
+	// lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead).
+	lastPivotKey = []byte("LastPivot")
+
 	// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
 	fastTrieProgressKey = []byte("TrieSync")
 
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 8edcdc6222fc90b9f1fbd7f53838d378ffe7494e..83b757fa17ccec6db9a1e251e76ada71a3df98d8 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -176,8 +176,8 @@ type LightChain interface {
 	// InsertHeaderChain inserts a batch of headers into the local chain.
 	InsertHeaderChain([]*types.Header, int) (int, error)
 
-	// Rollback removes a few recently added elements from the local chain.
-	Rollback([]common.Hash)
+	// SetHead rewinds the local chain to a new head.
+	SetHead(uint64) error
 }
 
 // BlockChain encapsulates functions required to sync a (full or fast) blockchain.
@@ -469,6 +469,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
 			if pivot <= origin {
 				origin = pivot - 1
 			}
+			// Write out the pivot into the database so a rollback beyond it will
+			// reenable fast sync
+			rawdb.WriteLastPivotNumber(d.stateDB, pivot)
 		}
 	}
 	d.committed = 1
@@ -496,6 +499,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
 			d.ancientLimit = height - fullMaxForkAncestry - 1
 		}
 		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
+
 		// If a part of blockchain data has already been written into active store,
 		// disable the ancient style insertion explicitly.
 		if origin >= frozen && frozen != 0 {
@@ -506,11 +510,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
 		}
 		// Rewind the ancient store and blockchain if reorg happens.
 		if origin+1 < frozen {
-			var hashes []common.Hash
-			for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ {
-				hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i))
+			if err := d.lightchain.SetHead(origin + 1); err != nil {
+				return err
 			}
-			d.lightchain.Rollback(hashes)
 		}
 	}
 	// Initiate the sync using a concurrent header and content retrieval algorithm
@@ -1382,35 +1384,32 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
 	// Keep a count of uncertain headers to roll back
 	var (
-		rollback    []*types.Header
+		rollback    uint64 // Zero means no rollback (fine as you can't unroll the genesis)
 		rollbackErr error
 		mode        = d.getMode()
 	)
 	defer func() {
-		if len(rollback) > 0 {
-			// Flatten the headers and roll them back
-			hashes := make([]common.Hash, len(rollback))
-			for i, header := range rollback {
-				hashes[i] = header.Hash()
-			}
+		if rollback > 0 {
 			lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
 			if mode != LightSync {
 				lastFastBlock = d.blockchain.CurrentFastBlock().Number()
 				lastBlock = d.blockchain.CurrentBlock().Number()
 			}
-			d.lightchain.Rollback(hashes)
+			if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
+				// We're already unwinding the stack, only print the error to make it more visible
+				log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
+			}
 			curFastBlock, curBlock := common.Big0, common.Big0
 			if mode != LightSync {
 				curFastBlock = d.blockchain.CurrentFastBlock().Number()
 				curBlock = d.blockchain.CurrentBlock().Number()
 			}
-			log.Warn("Rolled back headers", "count", len(hashes),
+			log.Warn("Rolled back chain segment",
 				"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
 				"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
 				"block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
 		}
 	}()
-
 	// Wait for batches of headers to process
 	gotHeaders := false
 
@@ -1462,7 +1461,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
 					}
 				}
 				// Disable any rollback and return
-				rollback = nil
+				rollback = 0
 				return nil
 			}
 			// Otherwise split the chunk of headers into batches and process them
@@ -1481,15 +1480,9 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
 					limit = len(headers)
 				}
 				chunk := headers[:limit]
+
 				// In case of header only syncing, validate the chunk immediately
 				if mode == FastSync || mode == LightSync {
-					// Collect the yet unknown headers to mark them as uncertain
-					unknown := make([]*types.Header, 0, len(chunk))
-					for _, header := range chunk {
-						if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
-							unknown = append(unknown, header)
-						}
-					}
 					// If we're importing pure headers, verify based on their recentness
 					frequency := fsHeaderCheckFrequency
 					if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
@@ -1497,17 +1490,18 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
 					}
 					if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
 						rollbackErr = err
-						// If some headers were inserted, add them too to the rollback list
-						if n > 0 {
-							rollback = append(rollback, chunk[:n]...)
+
+						// If some headers were inserted, track them as uncertain
+						if n > 0 && rollback == 0 {
+							rollback = chunk[0].Number.Uint64()
 						}
 						log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
 						return fmt.Errorf("%w: %v", errInvalidChain, err)
 					}
-					// All verifications passed, store newly found uncertain headers
-					rollback = append(rollback, unknown...)
-					if len(rollback) > fsHeaderSafetyNet {
-						rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
+					// All verifications passed, track all headers within the alloted limits
+					head := chunk[len(chunk)-1].Number.Uint64()
+					if head-rollback > uint64(fsHeaderSafetyNet) {
+						rollback = head - uint64(fsHeaderSafetyNet)
 					}
 				}
 				// Unless we're doing light chains, schedule the headers for associated content retrieval
@@ -1613,6 +1607,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
 		}
 	}
 	go closeOnErr(sync)
+
 	// Figure out the ideal pivot block. Note, that this goalpost may move if the
 	// sync takes long enough for the chain head to move significantly.
 	pivot := uint64(0)
@@ -1654,6 +1649,10 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
 			if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
 				log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
 				pivot = height - uint64(fsMinFullBlocks)
+
+				// Write out the pivot into the database so a rollback beyond it will
+				// reenable fast sync
+				rawdb.WriteLastPivotNumber(d.stateDB, pivot)
 			}
 		}
 		P, beforeP, afterP := splitAroundPivot(pivot, results)
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index e774b2b89d2f5a08ff8f727cdd785f1f0b6f7efc..7c165c63c3a6b2057cbc688c7b0d47e1410ecd4c 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -341,25 +341,52 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ
 	return len(blocks), nil
 }
 
-// Rollback removes some recently added elements from the chain.
-func (dl *downloadTester) Rollback(hashes []common.Hash) {
+// SetHead rewinds the local chain to a new head.
+func (dl *downloadTester) SetHead(head uint64) error {
 	dl.lock.Lock()
 	defer dl.lock.Unlock()
 
-	for i := len(hashes) - 1; i >= 0; i-- {
-		if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
-			dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
+	// Find the hash of the head to reset to
+	var hash common.Hash
+	for h, header := range dl.ownHeaders {
+		if header.Number.Uint64() == head {
+			hash = h
+		}
+	}
+	for h, header := range dl.ancientHeaders {
+		if header.Number.Uint64() == head {
+			hash = h
+		}
+	}
+	if hash == (common.Hash{}) {
+		return fmt.Errorf("unknown head to set: %d", head)
+	}
+	// Find the offset in the header chain
+	var offset int
+	for o, h := range dl.ownHashes {
+		if h == hash {
+			offset = o
+			break
 		}
-		delete(dl.ownChainTd, hashes[i])
-		delete(dl.ownHeaders, hashes[i])
-		delete(dl.ownReceipts, hashes[i])
-		delete(dl.ownBlocks, hashes[i])
+	}
+	// Remove all the hashes and associated data afterwards
+	for i := offset + 1; i < len(dl.ownHashes); i++ {
+		delete(dl.ownChainTd, dl.ownHashes[i])
+		delete(dl.ownHeaders, dl.ownHashes[i])
+		delete(dl.ownReceipts, dl.ownHashes[i])
+		delete(dl.ownBlocks, dl.ownHashes[i])
 
-		delete(dl.ancientChainTd, hashes[i])
-		delete(dl.ancientHeaders, hashes[i])
-		delete(dl.ancientReceipts, hashes[i])
-		delete(dl.ancientBlocks, hashes[i])
+		delete(dl.ancientChainTd, dl.ownHashes[i])
+		delete(dl.ancientHeaders, dl.ownHashes[i])
+		delete(dl.ancientReceipts, dl.ownHashes[i])
+		delete(dl.ancientBlocks, dl.ownHashes[i])
 	}
+	dl.ownHashes = dl.ownHashes[:offset+1]
+	return nil
+}
+
+// Rollback removes some recently added elements from the chain.
+func (dl *downloadTester) Rollback(hashes []common.Hash) {
 }
 
 // newPeer registers a new block download source into the downloader.
diff --git a/eth/sync.go b/eth/sync.go
index 0982a9702df985da5be14ad2f0857f0fbb34046b..26badd1e21c2755b0666f40eb5944b81773c2c17 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -271,15 +271,25 @@ func peerToSyncOp(mode downloader.SyncMode, p *peer) *chainSyncOp {
 }
 
 func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
+	// If we're in fast sync mode, return that directly
 	if atomic.LoadUint32(&cs.pm.fastSync) == 1 {
 		block := cs.pm.blockchain.CurrentFastBlock()
 		td := cs.pm.blockchain.GetTdByHash(block.Hash())
 		return downloader.FastSync, td
-	} else {
-		head := cs.pm.blockchain.CurrentHeader()
-		td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
-		return downloader.FullSync, td
 	}
+	// We are probably in full sync, but we might have rewound to before the
+	// fast sync pivot, check if we should reenable
+	if pivot := rawdb.ReadLastPivotNumber(cs.pm.chaindb); pivot != nil {
+		if head := cs.pm.blockchain.CurrentBlock(); head.NumberU64() < *pivot {
+			block := cs.pm.blockchain.CurrentFastBlock()
+			td := cs.pm.blockchain.GetTdByHash(block.Hash())
+			return downloader.FastSync, td
+		}
+	}
+	// Nope, we're really full syncing
+	head := cs.pm.blockchain.CurrentHeader()
+	td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
+	return downloader.FullSync, td
 }
 
 // startSync launches doSync in a new goroutine.
diff --git a/trie/sync.go b/trie/sync.go
index 978e76799a10c0cffd99ab83db49895582feefc2..620e97fa30b7374cbbbbc481900554ccb61cfe58 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -99,7 +99,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb
 	if _, ok := s.membatch.batch[root]; ok {
 		return
 	}
-	if s.bloom.Contains(root[:]) {
+	if s.bloom == nil || s.bloom.Contains(root[:]) {
 		// Bloom filter says this might be a duplicate, double check
 		blob, _ := s.database.Get(root[:])
 		if local, err := decodeNode(root[:], blob); local != nil && err == nil {
@@ -138,7 +138,7 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
 	if _, ok := s.membatch.batch[hash]; ok {
 		return
 	}
-	if s.bloom.Contains(hash[:]) {
+	if s.bloom == nil || s.bloom.Contains(hash[:]) {
 		// Bloom filter says this might be a duplicate, double check
 		if ok, _ := s.database.Has(hash[:]); ok {
 			return
@@ -300,7 +300,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
 			if _, ok := s.membatch.batch[hash]; ok {
 				continue
 			}
-			if s.bloom.Contains(node) {
+			if s.bloom == nil || s.bloom.Contains(node) {
 				// Bloom filter says this might be a duplicate, double check
 				if ok, _ := s.database.Has(node); ok {
 					continue