From 178fd1931d7650edcfefb1c23a07130fd1f6d427 Mon Sep 17 00:00:00 2001
From: Alex Sharov <AskAlexSharov@gmail.com>
Date: Sun, 5 Dec 2021 09:03:08 +0700
Subject: [PATCH] Genesis sync from existing snapshots (#3087)

---
 cmd/hack/hack.go                              |  84 +++-
 cmd/integration/commands/snapshot_check.go    |   3 -
 cmd/integration/commands/stages.go            |  38 +-
 cmd/integration/commands/state_stages.go      |   9 +-
 cmd/rpcdaemon/README.md                       |  10 +-
 cmd/rpcdaemon/cli/config.go                   |  22 +-
 cmd/rpcdaemon/interfaces/interfaces.go        |   8 +-
 cmd/state/commands/regenerate_txlookup.go     |  19 -
 cmd/state/generate/regenerate_tx_lookup.go    |  59 ---
 cmd/utils/flags.go                            |   2 +-
 common/hasher.go                              |  16 +
 consensus/clique/clique.go                    |   8 +-
 core/rawdb/accessors_chain.go                 |   8 +-
 eth/backend.go                                |   4 +-
 eth/ethconfig/config.go                       |  17 +-
 eth/stagedsync/stage_blockhashes.go           |   5 +-
 eth/stagedsync/stage_bodies.go                |  22 +-
 eth/stagedsync/stage_execute.go               |   2 +-
 eth/stagedsync/stage_headers.go               | 166 ++++++-
 eth/stagedsync/stage_senders.go               |  12 +-
 eth/stagedsync/stage_senders_test.go          |   2 +-
 eth/stagedsync/stage_txlookup.go              |  21 +-
 go.mod                                        |   4 +-
 go.sum                                        |  67 ++-
 p2p/enode/nodedb_test.go                      |   2 +-
 params/config.go                              |   6 +-
 params/snapshots.go                           |  23 +
 rpc/service.go                                |   4 +-
 turbo/snapshotsync/block_reader.go            |  45 +-
 turbo/snapshotsync/block_snapshots.go         | 429 +++++++++++++++---
 turbo/snapshotsync/block_snapshots_test.go    |  33 +-
 turbo/stages/bodydownload/body_algos.go       |  17 +-
 .../stages/headerdownload/header_algo_test.go |   5 +-
 turbo/stages/headerdownload/header_algos.go   |  58 ++-
 turbo/stages/mock_sentry.go                   |  11 +-
 turbo/stages/stageloop.go                     |  17 +-
 36 files changed, 929 insertions(+), 329 deletions(-)
 delete mode 100644 cmd/integration/commands/snapshot_check.go
 delete mode 100644 cmd/state/commands/regenerate_txlookup.go
 delete mode 100644 cmd/state/generate/regenerate_tx_lookup.go
 create mode 100644 params/snapshots.go

diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go
index abad2b7b90..e04d86fdd0 100644
--- a/cmd/hack/hack.go
+++ b/cmd/hack/hack.go
@@ -2589,58 +2589,102 @@ func recsplitWholeChain(chaindata string) error {
 
 	log.Info("Last body number", "last", last)
 	for i := uint64(*block); i < last; i += blocksPerFile {
-		fileName := snapshotsync.FileName(i, i+blocksPerFile, snapshotsync.Transactions)
-		segmentFile := path.Join(snapshotDir, fileName) + ".seg"
+		fileName := snapshotsync.FileName(i, i+blocksPerFile, snapshotsync.Bodies)
+
 		log.Info("Creating", "file", fileName+".seg")
 		db := mdbx.MustOpen(chaindata)
-		firstTxID, err := snapshotsync.DumpTxs(db, "", i, int(blocksPerFile))
-		if err != nil {
+		if err := snapshotsync.DumpBodies(db, "", i, int(blocksPerFile)); err != nil {
 			panic(err)
 		}
 		db.Close()
+		segmentFile := path.Join(snapshotDir, fileName) + ".seg"
 		if err := compress1(chaindata, fileName, segmentFile); err != nil {
 			panic(err)
 		}
-		if err := snapshotsync.TransactionsHashIdx(*chainID, firstTxID, segmentFile); err != nil {
-			panic(err)
-		}
+		//if err := snapshotsync.BodiesIdx(segmentFile, i); err != nil {
+		//	panic(err)
+		//}
 		_ = os.Remove(fileName + ".dat")
 
 		fileName = snapshotsync.FileName(i, i+blocksPerFile, snapshotsync.Headers)
-		segmentFile = path.Join(snapshotDir, fileName) + ".seg"
 		log.Info("Creating", "file", fileName+".seg")
 		db = mdbx.MustOpen(chaindata)
 		if err := snapshotsync.DumpHeaders(db, "", i, int(blocksPerFile)); err != nil {
 			panic(err)
 		}
 		db.Close()
+		segmentFile = path.Join(snapshotDir, fileName) + ".seg"
 		if err := compress1(chaindata, fileName, segmentFile); err != nil {
 			panic(err)
 		}
-
-		if err := snapshotsync.HeadersHashIdx(segmentFile, i); err != nil {
-			panic(err)
-		}
+		//if err := snapshotsync.HeadersHashIdx(segmentFile, i); err != nil {
+		//	panic(err)
+		//}
 		_ = os.Remove(fileName + ".dat")
 
-		fileName = snapshotsync.FileName(i, i+blocksPerFile, snapshotsync.Bodies)
-		segmentFile = path.Join(snapshotDir, fileName) + ".seg"
+		fileName = snapshotsync.FileName(i, i+blocksPerFile, snapshotsync.Transactions)
 		log.Info("Creating", "file", fileName+".seg")
 		db = mdbx.MustOpen(chaindata)
-		if err := snapshotsync.DumpBodies(db, "", i, int(blocksPerFile)); err != nil {
+		firstTxID, err := snapshotsync.DumpTxs(db, "", i, int(blocksPerFile))
+		if err != nil {
 			panic(err)
 		}
 		db.Close()
+		segmentFile = path.Join(snapshotDir, fileName) + ".seg"
 		if err := compress1(chaindata, fileName, segmentFile); err != nil {
 			panic(err)
 		}
-		if err := snapshotsync.BodiesIdx(segmentFile, i); err != nil {
-			panic(err)
-		}
+		_ = firstTxID
+		//if err := snapshotsync.TransactionsHashIdx(*chainID, firstTxID, segmentFile); err != nil {
+		//	panic(err)
+		//}
 		_ = os.Remove(fileName + ".dat")
 
 		//nolint
-		break // TODO: remove me - useful for tests
+		//break // TODO: remove me - useful for tests
+	}
+	return nil
+}
+
+func checkBlockSnapshot(chaindata string) error {
+	database := mdbx.MustOpen(chaindata)
+	defer database.Close()
+	dataDir := path.Dir(chaindata)
+	chainConfig := tool.ChainConfigFromDB(database)
+	chainID, _ := uint256.FromBig(chainConfig.ChainID)
+	_ = chainID
+
+	snapshots := snapshotsync.NewAllSnapshots(path.Join(dataDir, "snapshots"), params.KnownSnapshots(chainConfig.ChainName))
+	snapshots.ReopenSegments()
+	snapshots.ReopenIndices()
+	//if err := snapshots.BuildIndices(context.Background(), *chainID); err != nil {
+	//	panic(err)
+	//}
+
+	snBlockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots)
+	tx, err := database.BeginRo(context.Background())
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
+
+	for i := uint64(0); i < snapshots.BlocksAvailable(); i++ {
+		hash, err := rawdb.ReadCanonicalHash(tx, i)
+		if err != nil {
+			return err
+		}
+		blockFromDB := rawdb.ReadBlock(tx, hash, i)
+		blockFromSnapshot, _, err := snBlockReader.BlockWithSenders(context.Background(), tx, hash, i)
+		if err != nil {
+			return err
+		}
+
+		if blockFromSnapshot.Hash() != blockFromDB.Hash() {
+			panic(i)
+		}
+		if i%1_000 == 0 {
+			log.Info(fmt.Sprintf("Block Num: %dK", i/1_000))
+		}
 	}
 	return nil
 }
@@ -3928,6 +3972,8 @@ func main() {
 		err = compress1(*chaindata, *name, *name)
 	case "recsplitWholeChain":
 		err = recsplitWholeChain(*chaindata)
+	case "checkBlockSnapshot":
+		err = checkBlockSnapshot(*chaindata)
 	case "decompress":
 		err = decompress(*name)
 	case "genstate":
diff --git a/cmd/integration/commands/snapshot_check.go b/cmd/integration/commands/snapshot_check.go
deleted file mode 100644
index bd4fb20267..0000000000
--- a/cmd/integration/commands/snapshot_check.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package commands
-
-var tmpDBPath string
diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go
index 4bb7b78ce8..25d57dad28 100644
--- a/cmd/integration/commands/stages.go
+++ b/cmd/integration/commands/stages.go
@@ -459,7 +459,7 @@ func stageBodies(db kv.RwDB, ctx context.Context) error {
 			}
 
 			u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber)
-			if err := stagedsync.UnwindBodiesStage(u, tx, stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, 0), ctx); err != nil {
+			if err := stagedsync.UnwindBodiesStage(u, tx, stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, 0, allSnapshots(chainConfig), getBlockReader(chainConfig)), ctx); err != nil {
 				return err
 			}
 
@@ -538,7 +538,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error {
 	if err != nil {
 		return err
 	}
-	cfg := stagedsync.StageSendersCfg(db, chainConfig, tmpdir, pm)
+	cfg := stagedsync.StageSendersCfg(db, chainConfig, tmpdir, pm, allSnapshots(chainConfig))
 	if unwind > 0 {
 		u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber)
 		err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx)
@@ -558,6 +558,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error {
 func stageExec(db kv.RwDB, ctx context.Context) error {
 	pm, engine, chainConfig, vmConfig, sync, _, _ := newSync(ctx, db, nil)
 	must(sync.SetCurrentStage(stages.Execution))
+	tmpdir := path.Join(datadir, etl.TmpDirName)
 
 	if reset {
 		genesis, _ := byChain()
@@ -585,7 +586,7 @@ func stageExec(db kv.RwDB, ctx context.Context) error {
 		pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo)
 	}
 
-	cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpDBPath, getBlockReader())
+	cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpdir, getBlockReader(chainConfig))
 	if unwind > 0 {
 		u := sync.NewUnwindState(stages.Execution, s.BlockNumber-unwind, s.BlockNumber)
 		err := stagedsync.UnwindExecutionStage(u, s, nil, ctx, cfg, false)
@@ -615,7 +616,7 @@ func stageExec(db kv.RwDB, ctx context.Context) error {
 }
 
 func stageTrie(db kv.RwDB, ctx context.Context) error {
-	pm, _, _, _, sync, _, _ := newSync(ctx, db, nil)
+	pm, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil)
 	must(sync.SetCurrentStage(stages.IntermediateHashes))
 	tmpdir := path.Join(datadir, etl.TmpDirName)
 
@@ -644,7 +645,7 @@ func stageTrie(db kv.RwDB, ctx context.Context) error {
 
 	log.Info("StageExec", "progress", execStage.BlockNumber)
 	log.Info("StageTrie", "progress", s.BlockNumber)
-	cfg := stagedsync.StageTrieCfg(db, true, true, tmpdir, getBlockReader())
+	cfg := stagedsync.StageTrieCfg(db, true, true, tmpdir, getBlockReader(chainConfig))
 	if unwind > 0 {
 		u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber)
 		if err := stagedsync.UnwindIntermediateHashesStage(u, s, tx, cfg, ctx); err != nil {
@@ -911,7 +912,7 @@ func stageHistory(db kv.RwDB, ctx context.Context) error {
 func stageTxLookup(db kv.RwDB, ctx context.Context) error {
 	tmpdir := path.Join(datadir, etl.TmpDirName)
 
-	pm, _, _, _, sync, _, _ := newSync(ctx, db, nil)
+	pm, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil)
 	must(sync.SetCurrentStage(stages.TxLookup))
 
 	tx, err := db.BeginRw(ctx)
@@ -936,7 +937,7 @@ func stageTxLookup(db kv.RwDB, ctx context.Context) error {
 	}
 	log.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
 
-	cfg := stagedsync.StageTxLookupCfg(db, pm, tmpdir)
+	cfg := stagedsync.StageTxLookupCfg(db, pm, tmpdir, allSnapshots(chainConfig))
 	if unwind > 0 {
 		u := sync.NewUnwindState(stages.TxLookup, s.BlockNumber-unwind, s.BlockNumber)
 		err = stagedsync.UnwindTxLookup(u, s, tx, cfg, ctx)
@@ -1022,21 +1023,28 @@ func byChain() (*core.Genesis, *params.ChainConfig) {
 var openSnapshotOnce sync.Once
 var _allSnapshotsSingleton *snapshotsync.AllSnapshots
 
-func allSnapshots() *snapshotsync.AllSnapshots {
+func allSnapshots(cc *params.ChainConfig) *snapshotsync.AllSnapshots {
 	openSnapshotOnce.Do(func() {
-		snapshotCfg := ethconfig.Snapshot{}
 		if enableSnapshot {
-			snapshotCfg.Enabled = true
-			snapshotCfg.Dir = path.Join(datadir, "snapshots")
-			_allSnapshotsSingleton = snapshotsync.MustOpenAll(snapshotCfg.Dir)
+			snapshotCfg := ethconfig.Snapshot{
+				Enabled: true,
+				Dir:     path.Join(datadir, "snapshots"),
+			}
+			_allSnapshotsSingleton = snapshotsync.NewAllSnapshots(snapshotCfg.Dir, params.KnownSnapshots(cc.ChainName))
+			if err := _allSnapshotsSingleton.ReopenSegments(); err != nil {
+				panic(err)
+			}
+			if err := _allSnapshotsSingleton.ReopenIndices(); err != nil {
+				panic(err)
+			}
 		}
 	})
 	return _allSnapshotsSingleton
 }
 
-func getBlockReader() (blockReader interfaces.FullBlockReader) {
+func getBlockReader(cc *params.ChainConfig) (blockReader interfaces.FullBlockReader) {
 	blockReader = snapshotsync.NewBlockReader()
-	if sn := allSnapshots(); sn != nil {
+	if sn := allSnapshots(cc); sn != nil {
 		blockReader = snapshotsync.NewBlockReaderWithSnapshots(sn)
 	}
 	return blockReader
@@ -1110,7 +1118,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig)
 			stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, tmpdir),
 			stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir),
 			stagedsync.StageHashStateCfg(db, tmpdir),
-			stagedsync.StageTrieCfg(db, false, true, tmpdir, getBlockReader()),
+			stagedsync.StageTrieCfg(db, false, true, tmpdir, getBlockReader(chainConfig)),
 			stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, ctx.Done()),
 		),
 		stagedsync.MiningUnwindOrder,
diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go
index 0923330a50..8fbd698685 100644
--- a/cmd/integration/commands/state_stages.go
+++ b/cmd/integration/commands/state_stages.go
@@ -180,7 +180,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
 	stateStages.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders,
 		stages.Finish)
 
-	execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, tmpDir, getBlockReader())
+	execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, tmpDir, getBlockReader(chainConfig))
 
 	execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error {
 		return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error {
@@ -404,7 +404,7 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *params.ChainConfig) {
 }
 
 func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error {
-	_, _, _, _, sync, _, _ := newSync(ctx, db, nil)
+	_, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil)
 	tmpdir := path.Join(datadir, etl.TmpDirName)
 	tx, err := db.BeginRw(ctx)
 	if err != nil {
@@ -425,7 +425,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error {
 	}
 	_ = sync.SetCurrentStage(stages.IntermediateHashes)
 	u = &stagedsync.UnwindState{ID: stages.IntermediateHashes, UnwindPoint: to}
-	if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, tmpdir, getBlockReader()), ctx); err != nil {
+	if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, tmpdir, getBlockReader(chainConfig)), ctx); err != nil {
 		return err
 	}
 	must(tx.Commit())
@@ -470,6 +470,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error {
 
 func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error {
 	pm, engine, chainConfig, vmConfig, sync, _, _ := newSync(ctx, db, nil)
+	tmpdir := path.Join(datadir, etl.TmpDirName)
 
 	tx, err := db.BeginRw(ctx)
 	if err != nil {
@@ -489,7 +490,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error {
 	from := progress(tx, stages.Execution)
 	to := from + unwind
 
-	cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpDBPath, getBlockReader())
+	cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpdir, getBlockReader(chainConfig))
 
 	// set block limit of execute stage
 	sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error {
diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md
index 77ed2e1124..0d12c13f6a 100644
--- a/cmd/rpcdaemon/README.md
+++ b/cmd/rpcdaemon/README.md
@@ -64,11 +64,11 @@ The daemon should respond with something like:
 INFO [date-time] HTTP endpoint opened url=localhost:8545...
 ```
 
-When RPC daemon runs remotely, by default it maintains a state cache, which is updated every time when Erigon
-imports a new block. When state cache is reasonably warm, it allows such remote RPC daemon to execute queries
-related to `latest` block (i.e. to current state) with comparable performance to a local RPC daemon
-(around 2x slower vs 10x slower without state cache). Since there can be multiple such RPC daemons per one
-Erigon node, it may scale well for some workloads that are heavy on the current state queries.
+When RPC daemon runs remotely, by default it maintains a state cache, which is updated every time when Erigon imports a
+new block. When state cache is reasonably warm, it allows such remote RPC daemon to execute queries related to `latest`
+block (i.e. to current state) with comparable performance to a local RPC daemon
+(around 2x slower vs 10x slower without state cache). Since there can be multiple such RPC daemons per one Erigon node,
+it may scale well for some workloads that are heavy on the current state queries.
 
 ### Healthcheck
 
diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go
index 8281f6a7dd..25b7a69396 100644
--- a/cmd/rpcdaemon/cli/config.go
+++ b/cmd/rpcdaemon/cli/config.go
@@ -23,9 +23,11 @@ import (
 	"github.com/ledgerwatch/erigon/cmd/rpcdaemon/services"
 	"github.com/ledgerwatch/erigon/cmd/utils"
 	"github.com/ledgerwatch/erigon/common/paths"
+	"github.com/ledgerwatch/erigon/core/rawdb"
 	"github.com/ledgerwatch/erigon/eth/ethconfig"
 	"github.com/ledgerwatch/erigon/internal/debug"
 	"github.com/ledgerwatch/erigon/node"
+	"github.com/ledgerwatch/erigon/params"
 	"github.com/ledgerwatch/erigon/rpc"
 	"github.com/ledgerwatch/erigon/turbo/snapshotsync"
 	"github.com/ledgerwatch/log/v3"
@@ -246,7 +248,25 @@ func RemoteServices(ctx context.Context, cfg Flags, logger log.Logger, rootCance
 
 	if cfg.SingleNodeMode {
 		if cfg.Snapshot.Enabled {
-			allSnapshots, err := snapshotsync.OpenAll(cfg.Snapshot.Dir)
+			var cc *params.ChainConfig
+			if err := db.View(context.Background(), func(tx kv.Tx) error {
+				genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0)
+				if err != nil {
+					return err
+				}
+				cc, err = rawdb.ReadChainConfig(tx, genesisBlock.Hash())
+				if err != nil {
+					return err
+				}
+				return nil
+			}); err != nil {
+				return nil, nil, nil, nil, nil, nil, err
+			}
+			if cc == nil {
+				return nil, nil, nil, nil, nil, nil, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db")
+			}
+
+			allSnapshots := snapshotsync.NewAllSnapshots(cfg.Snapshot.Dir, params.KnownSnapshots(cc.ChainName))
 			if err != nil {
 				return nil, nil, nil, nil, nil, nil, err
 			}
diff --git a/cmd/rpcdaemon/interfaces/interfaces.go b/cmd/rpcdaemon/interfaces/interfaces.go
index 17e33ab2c7..7b3346f8b4 100644
--- a/cmd/rpcdaemon/interfaces/interfaces.go
+++ b/cmd/rpcdaemon/interfaces/interfaces.go
@@ -12,8 +12,12 @@ type BlockReader interface {
 	BlockWithSenders(ctx context.Context, tx kv.Tx, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error)
 }
 
+type HeaderReader interface {
+	Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error)
+	HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (*types.Header, error)
+}
+
 type FullBlockReader interface {
 	BlockReader
-	Header(ctx context.Context, tx kv.Tx, hash common.Hash, blockHeight uint64) (*types.Header, error)
-	HeaderByNumber(ctx context.Context, tx kv.Tx, blockHeight uint64) (*types.Header, error)
+	HeaderReader
 }
diff --git a/cmd/state/commands/regenerate_txlookup.go b/cmd/state/commands/regenerate_txlookup.go
deleted file mode 100644
index da23e92739..0000000000
--- a/cmd/state/commands/regenerate_txlookup.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package commands
-
-import (
-	"github.com/ledgerwatch/erigon/cmd/state/generate"
-	"github.com/spf13/cobra"
-)
-
-func init() {
-	withDatadir(regenerateTxLookupCmd)
-	rootCmd.AddCommand(regenerateTxLookupCmd)
-}
-
-var regenerateTxLookupCmd = &cobra.Command{
-	Use:   "regenerateTxLookup",
-	Short: "Generate tx lookup index",
-	RunE: func(cmd *cobra.Command, args []string) error {
-		return generate.RegenerateTxLookup(chaindata)
-	},
-}
diff --git a/cmd/state/generate/regenerate_tx_lookup.go b/cmd/state/generate/regenerate_tx_lookup.go
deleted file mode 100644
index 89b388a9f5..0000000000
--- a/cmd/state/generate/regenerate_tx_lookup.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package generate
-
-import (
-	"context"
-	"os"
-	"os/signal"
-	"time"
-
-	"github.com/ledgerwatch/erigon-lib/kv"
-	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
-	"github.com/ledgerwatch/erigon/common/dbutils"
-	"github.com/ledgerwatch/erigon/eth/stagedsync"
-	"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
-	"github.com/ledgerwatch/erigon/ethdb/prune"
-	"github.com/ledgerwatch/log/v3"
-)
-
-func RegenerateTxLookup(chaindata string) error {
-	db := mdbx.MustOpen(chaindata)
-	defer db.Close()
-	tx, err := db.BeginRw(context.Background())
-	if err != nil {
-		return err
-	}
-	defer tx.Rollback()
-	if err := tx.ClearBucket(kv.TxLookup); err != nil {
-		return err
-	}
-
-	startTime := time.Now()
-	ch := make(chan os.Signal, 1)
-	quitCh := make(chan struct{})
-	signal.Notify(ch, os.Interrupt)
-	go func() {
-		<-ch
-		close(quitCh)
-	}()
-
-	pm, err := prune.Get(tx)
-	if err != nil {
-		return err
-	}
-	lastExecutedBlock, err := stages.GetStageProgress(tx, stages.Execution)
-	if err != nil {
-		//There could be headers without block in the end
-		log.Error("Cant get last executed block", "err", err)
-	}
-	log.Info("TxLookup generation started", "start time", startTime)
-	err = stagedsync.TxLookupTransform("txlookup", tx,
-		dbutils.EncodeBlockNumber(pm.TxIndex.PruneTo(lastExecutedBlock)),
-		dbutils.EncodeBlockNumber(lastExecutedBlock+1),
-		quitCh,
-		stagedsync.StageTxLookupCfg(db, pm, os.TempDir()))
-	if err != nil {
-		return err
-	}
-	log.Info("TxLookup index is successfully regenerated", "it took", time.Since(startTime))
-	return nil
-}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 973cbc6972..71cc039e6b 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -1099,7 +1099,7 @@ func SetupMinerCobra(cmd *cobra.Command, cfg *params.MiningConfig) {
 	cfg.Etherbase = common.HexToAddress(etherbase)
 }
 
-func setClique(ctx *cli.Context, cfg *params.SnapshotConfig, datadir string) {
+func setClique(ctx *cli.Context, cfg *params.ConsensusSnapshotConfig, datadir string) {
 	cfg.CheckpointInterval = ctx.GlobalUint64(CliqueSnapshotCheckpointIntervalFlag.Name)
 	cfg.InmemorySnapshots = ctx.GlobalInt(CliqueSnapshotInmemorySnapshotsFlag.Name)
 	cfg.InmemorySignatures = ctx.GlobalInt(CliqueSnapshotInmemorySignaturesFlag.Name)
diff --git a/common/hasher.go b/common/hasher.go
index 87cb09a309..168c21b675 100644
--- a/common/hasher.go
+++ b/common/hasher.go
@@ -56,3 +56,19 @@ func HashData(data []byte) (Hash, error) {
 	}
 	return buf, nil
 }
+
+func HashTo(data []byte, to []byte) {
+	h := NewHasher()
+	defer ReturnHasherToPool(h)
+	h.Sha.Reset()
+
+	_, err := h.Sha.Write(data)
+	if err != nil {
+		panic(err)
+	}
+
+	_, err = h.Sha.Read(to)
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 37c9abde17..e9ddd78d10 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -174,9 +174,9 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, er
 // Ethereum testnet following the Ropsten attacks.
 type Clique struct {
 	chainConfig    *params.ChainConfig
-	config         *params.CliqueConfig   // Consensus engine configuration parameters
-	snapshotConfig *params.SnapshotConfig // Consensus engine configuration parameters
-	db             kv.RwDB                // Database to store and retrieve snapshot checkpoints
+	config         *params.CliqueConfig            // Consensus engine configuration parameters
+	snapshotConfig *params.ConsensusSnapshotConfig // Consensus engine configuration parameters
+	db             kv.RwDB                         // Database to store and retrieve snapshot checkpoints
 
 	signatures *lru.ARCCache // Signatures of recent blocks to speed up mining
 	recents    *lru.ARCCache // Snapshots for recent block to speed up reorgs
@@ -195,7 +195,7 @@ type Clique struct {
 
 // New creates a Clique proof-of-authority consensus engine with the initial
 // signers set to the ones provided by the user.
-func New(cfg *params.ChainConfig, snapshotConfig *params.SnapshotConfig, cliqueDB kv.RwDB) *Clique {
+func New(cfg *params.ChainConfig, snapshotConfig *params.ConsensusSnapshotConfig, cliqueDB kv.RwDB) *Clique {
 	config := cfg.Clique
 
 	// Set any missing consensus parameters to their defaults
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 85ca431bb4..ab8b297704 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -326,12 +326,6 @@ func NonCanonicalTransactions(db kv.Getter, baseTxId uint64, amount uint32) ([]t
 func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64) error {
 	txId := baseTxId
 	buf := bytes.NewBuffer(nil)
-	c, err := db.RwCursor(kv.EthTx)
-	if err != nil {
-		return err
-	}
-	defer c.Close()
-
 	for _, tx := range txs {
 		txIdKey := make([]byte, 8)
 		binary.BigEndian.PutUint64(txIdKey, txId)
@@ -343,7 +337,7 @@ func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64) err
 		}
 
 		// If next Append returns KeyExists error - it means you need to open transaction in App code before calling this func. Batch is also fine.
-		if err := c.Append(txIdKey, common.CopyBytes(buf.Bytes())); err != nil {
+		if err := db.Append(kv.EthTx, txIdKey, common.CopyBytes(buf.Bytes())); err != nil {
 			return err
 		}
 	}
diff --git a/eth/backend.go b/eth/backend.go
index 07fa9240b0..73d927e48f 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -352,7 +352,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
 
 	var blockReader interfaces.FullBlockReader
 	if config.Snapshot.Enabled {
-		allSnapshots, err := snapshotsync.OpenAll(config.Snapshot.Dir)
+		allSnapshots := snapshotsync.NewAllSnapshots(config.Snapshot.Dir, params.KnownSnapshots(chainConfig.ChainName))
 		if err != nil {
 			return nil, err
 		}
@@ -423,7 +423,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
 				if err := miningRPC.(*privateapi.MiningServer).BroadcastMinedBlock(b); err != nil {
 					log.Error("txpool rpc mined block broadcast", "err", err)
 				}
-				if err := backend.downloadServer.Hd.AddMinedBlock(b); err != nil {
+				if err := backend.downloadServer.Hd.AddMinedHeader(b.Header()); err != nil {
 					log.Error("add mined block to header downloader", "err", err)
 				}
 				if err := backend.downloadServer.Bd.AddMinedBlock(b); err != nil {
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 1e86f8285d..bee8809d21 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -115,9 +115,9 @@ func init() {
 //go:generate gencodec -type Config -formats toml -out gen_config.go
 
 type Snapshot struct {
-	Enabled bool
-	Dir     string
-	Seeding bool
+	Enabled             bool
+	Dir                 string
+	ChainSnapshotConfig *params.SnapshotsConfig
 }
 
 // Config contains configuration options for ETH protocol.
@@ -135,9 +135,10 @@ type Config struct {
 
 	P2PEnabled bool
 
-	Prune        prune.Mode
-	BatchSize    datasize.ByteSize // Batch size for execution stage
-	BadBlockHash common.Hash       // hash of the block marked as bad
+	Prune     prune.Mode
+	BatchSize datasize.ByteSize // Batch size for execution stage
+
+	BadBlockHash common.Hash // hash of the block marked as bad
 
 	Snapshot Snapshot
 
@@ -156,7 +157,7 @@ type Config struct {
 	// Ethash options
 	Ethash ethash.Config
 
-	Clique params.SnapshotConfig
+	Clique params.ConsensusSnapshotConfig
 	Aura   params.AuRaConfig
 
 	// Transaction pool options
@@ -205,7 +206,7 @@ func CreateConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, c
 			}, notify, noverify)
 			eng = engine
 		}
-	case *params.SnapshotConfig:
+	case *params.ConsensusSnapshotConfig:
 		if chainConfig.Clique != nil {
 			eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory))
 		}
diff --git a/eth/stagedsync/stage_blockhashes.go b/eth/stagedsync/stage_blockhashes.go
index a48e6c7f7c..844180ab0d 100644
--- a/eth/stagedsync/stage_blockhashes.go
+++ b/eth/stagedsync/stage_blockhashes.go
@@ -11,6 +11,7 @@ import (
 	"github.com/ledgerwatch/erigon/common/dbutils"
 	"github.com/ledgerwatch/erigon/core/rawdb"
 	"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
+	"github.com/ledgerwatch/erigon/params"
 )
 
 func extractHeaders(k []byte, v []byte, next etl.ExtractNextFunc) error {
@@ -24,12 +25,14 @@ func extractHeaders(k []byte, v []byte, next etl.ExtractNextFunc) error {
 type BlockHashesCfg struct {
 	db     kv.RwDB
 	tmpDir string
+	cc     *params.ChainConfig
 }
 
-func StageBlockHashesCfg(db kv.RwDB, tmpDir string) BlockHashesCfg {
+func StageBlockHashesCfg(db kv.RwDB, tmpDir string, cc *params.ChainConfig) BlockHashesCfg {
 	return BlockHashesCfg{
 		db:     db,
 		tmpDir: tmpDir,
+		cc:     cc,
 	}
 }
 
diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go
index c067dce27a..74fdf2cfd6 100644
--- a/eth/stagedsync/stage_bodies.go
+++ b/eth/stagedsync/stage_bodies.go
@@ -9,12 +9,14 @@ import (
 	"github.com/c2h5oh/datasize"
 	libcommon "github.com/ledgerwatch/erigon-lib/common"
 	"github.com/ledgerwatch/erigon-lib/kv"
+	"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
 	"github.com/ledgerwatch/erigon/common"
 	"github.com/ledgerwatch/erigon/core/rawdb"
 	"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
 	"github.com/ledgerwatch/erigon/p2p/enode"
 	"github.com/ledgerwatch/erigon/params"
 	"github.com/ledgerwatch/erigon/turbo/adapter"
+	"github.com/ledgerwatch/erigon/turbo/snapshotsync"
 	"github.com/ledgerwatch/erigon/turbo/stages/bodydownload"
 	"github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
 	"github.com/ledgerwatch/log/v3"
@@ -29,6 +31,8 @@ type BodiesCfg struct {
 	timeout         int
 	chanConfig      params.ChainConfig
 	batchSize       datasize.ByteSize
+	snapshots       *snapshotsync.AllSnapshots
+	blockReader     interfaces.FullBlockReader
 }
 
 func StageBodiesCfg(
@@ -40,8 +44,10 @@ func StageBodiesCfg(
 	timeout int,
 	chanConfig params.ChainConfig,
 	batchSize datasize.ByteSize,
+	snapshots *snapshotsync.AllSnapshots,
+	blockReader interfaces.FullBlockReader,
 ) BodiesCfg {
-	return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, batchSize: batchSize}
+	return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, batchSize: batchSize, snapshots: snapshots, blockReader: blockReader}
 }
 
 // BodiesForward progresses Bodies stage in the forward direction
@@ -55,7 +61,6 @@ func BodiesForward(
 ) error {
 
 	var d1, d2, d3, d4, d5, d6 time.Duration
-
 	var err error
 	useExternalTx := tx != nil
 	if !useExternalTx {
@@ -66,6 +71,15 @@ func BodiesForward(
 		defer tx.Rollback()
 	}
 	timeout := cfg.timeout
+
+	if cfg.snapshots != nil {
+		if s.BlockNumber < cfg.snapshots.BlocksAvailable() {
+			if err := s.Update(tx, cfg.snapshots.BlocksAvailable()); err != nil {
+				return err
+			}
+			s.BlockNumber = cfg.snapshots.BlocksAvailable()
+		}
+	}
 	// This will update bd.maxProgress
 	if _, _, _, err = cfg.bd.UpdateFromDb(tx); err != nil {
 		return err
@@ -114,7 +128,7 @@ Loop:
 		if req == nil {
 			start := time.Now()
 			currentTime := uint64(time.Now().Unix())
-			req, blockNum, err = cfg.bd.RequestMoreBodies(tx, blockNum, currentTime, cfg.blockPropagator)
+			req, blockNum, err = cfg.bd.RequestMoreBodies(tx, cfg.blockReader, blockNum, currentTime, cfg.blockPropagator)
 			if err != nil {
 				return fmt.Errorf("request more bodies: %w", err)
 			}
@@ -136,7 +150,7 @@ Loop:
 		for req != nil && sentToPeer {
 			start := time.Now()
 			currentTime := uint64(time.Now().Unix())
-			req, blockNum, err = cfg.bd.RequestMoreBodies(tx, blockNum, currentTime, cfg.blockPropagator)
+			req, blockNum, err = cfg.bd.RequestMoreBodies(tx, cfg.blockReader, blockNum, currentTime, cfg.blockPropagator)
 			if err != nil {
 				return fmt.Errorf("request more bodies: %w", err)
 			}
diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go
index b26b9668ec..e2e99dfad0 100644
--- a/eth/stagedsync/stage_execute.go
+++ b/eth/stagedsync/stage_execute.go
@@ -112,7 +112,7 @@ func executeBlock(
 	callTracer := calltracer.NewCallTracer(contractHasTEVM)
 	vmConfig.Debug = true
 	vmConfig.Tracer = callTracer
-	receipts, err := core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHeader, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx}, contractHasTEVM)
+	receipts, err := core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHeader, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM)
 	if err != nil {
 		return err
 	}
diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go
index 8bc6cd29f3..4e1749776a 100644
--- a/eth/stagedsync/stage_headers.go
+++ b/eth/stagedsync/stage_headers.go
@@ -9,8 +9,10 @@ import (
 	"time"
 
 	"github.com/c2h5oh/datasize"
+	"github.com/holiman/uint256"
 	libcommon "github.com/ledgerwatch/erigon-lib/common"
 	"github.com/ledgerwatch/erigon-lib/kv"
+	"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
 	"github.com/ledgerwatch/erigon/common"
 	"github.com/ledgerwatch/erigon/common/dbutils"
 	"github.com/ledgerwatch/erigon/core/rawdb"
@@ -19,6 +21,7 @@ import (
 	"github.com/ledgerwatch/erigon/p2p/enode"
 	"github.com/ledgerwatch/erigon/params"
 	"github.com/ledgerwatch/erigon/rlp"
+	"github.com/ledgerwatch/erigon/turbo/snapshotsync"
 	"github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
 
 	"github.com/ledgerwatch/log/v3"
@@ -35,6 +38,8 @@ type HeadersCfg struct {
 	noP2PDiscovery    bool
 	reverseDownloadCh chan types.Header
 	waitingPosHeaders *bool
+	snapshots         *snapshotsync.AllSnapshots
+	blockReader       interfaces.FullBlockReader
 }
 
 func StageHeadersCfg(
@@ -48,6 +53,8 @@ func StageHeadersCfg(
 	noP2PDiscovery bool,
 	reverseDownloadCh chan types.Header,
 	waitingPosHeaders *bool,
+	snapshots *snapshotsync.AllSnapshots,
+	blockReader interfaces.FullBlockReader,
 ) HeadersCfg {
 	return HeadersCfg{
 		db:                db,
@@ -60,6 +67,8 @@ func StageHeadersCfg(
 		noP2PDiscovery:    noP2PDiscovery,
 		reverseDownloadCh: reverseDownloadCh,
 		waitingPosHeaders: waitingPosHeaders,
+		snapshots:         snapshots,
+		blockReader:       blockReader,
 	}
 }
 
@@ -103,7 +112,7 @@ func SpawnStageHeaders(
 	}
 }
 
-// HeadersDownwards progresses Headers stage in the downward direction
+// HeadersDownward progresses Headers stage in the downward direction
 func HeadersDownward(
 	s *StageState,
 	u Unwinder,
@@ -149,6 +158,123 @@ func HeadersForward(
 	useExternalTx bool,
 ) error {
 	var headerProgress uint64
+
+	if cfg.snapshots != nil {
+		if !cfg.snapshots.AllSegmentsAvailable() {
+			// wait for Downloader service to download all expected snapshots
+			logEvery := time.NewTicker(logInterval)
+			defer logEvery.Stop()
+			for {
+				headers, bodies, txs, err := cfg.snapshots.SegmentsAvailability()
+				if err != nil {
+					return err
+				}
+				expect := cfg.snapshots.ChainSnapshotConfig().ExpectBlocks
+				if headers >= expect && bodies >= expect && txs >= expect {
+					if err := cfg.snapshots.ReopenSegments(); err != nil {
+						return err
+					}
+					if expect > cfg.snapshots.BlocksAvailable() {
+						return fmt.Errorf("not enough snapshots available: %d > %d", expect, cfg.snapshots.BlocksAvailable())
+					}
+					cfg.snapshots.SetAllSegmentsAvailable(true)
+
+					break
+				}
+				log.Info(fmt.Sprintf("[%s] Waiting for snapshots up to block %d...", s.LogPrefix(), expect), "headers", headers, "bodies", bodies, "txs", txs)
+				time.Sleep(10 * time.Second)
+
+				select {
+				case <-ctx.Done():
+					return ctx.Err()
+				case <-logEvery.C:
+					log.Info(fmt.Sprintf("[%s] Waiting for snapshots up to block %d...", s.LogPrefix(), expect), "headers", headers, "bodies", bodies, "txs", txs)
+				default:
+				}
+			}
+		}
+
+		if !cfg.snapshots.AllIdxAvailable() {
+			if !cfg.snapshots.AllSegmentsAvailable() {
+				return fmt.Errorf("not all snapshot segments are available")
+			}
+
+			// wait for Downloader service to download all expected snapshots
+			logEvery := time.NewTicker(logInterval)
+			defer logEvery.Stop()
+			headers, bodies, txs, err := cfg.snapshots.IdxAvailability()
+			if err != nil {
+				return err
+			}
+			expect := cfg.snapshots.ChainSnapshotConfig().ExpectBlocks
+			if headers < expect || bodies < expect || txs < expect {
+				chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID)
+				if err := cfg.snapshots.BuildIndices(ctx, *chainID); err != nil {
+					return err
+				}
+			}
+
+			if err := cfg.snapshots.ReopenIndices(); err != nil {
+				return err
+			}
+			if expect > cfg.snapshots.IndicesAvailable() {
+				return fmt.Errorf("not enough snapshots available: %d > %d", expect, cfg.snapshots.BlocksAvailable())
+			}
+			cfg.snapshots.SetAllIdxAvailable(true)
+		}
+
+		c, _ := tx.Cursor(kv.HeaderTD)
+		count, _ := c.Count()
+		if count == 0 || count == 1 { // genesis does write 1 record
+			logEvery := time.NewTicker(logInterval)
+			defer logEvery.Stop()
+
+			tx.ClearBucket(kv.HeaderTD)
+			var lastHeader *types.Header
+			//total  difficulty write
+			td := big.NewInt(0)
+			if err := snapshotsync.ForEachHeader(cfg.snapshots, func(header *types.Header) error {
+				td.Add(td, header.Difficulty)
+				/*
+					if header.Eip3675 {
+						return nil
+					}
+
+					if td.Cmp(cfg.terminalTotalDifficulty) > 0 {
+						return rawdb.MarkTransition(tx, blockNum)
+					}
+				*/
+				// TODO: append
+				rawdb.WriteTd(tx, header.Hash(), header.Number.Uint64(), td)
+				lastHeader = header
+				select {
+				case <-ctx.Done():
+					return ctx.Err()
+				case <-logEvery.C:
+					log.Info(fmt.Sprintf("[%s] Writing total difficulty index for snapshots", s.LogPrefix()), "block_num", header.Number.Uint64())
+				default:
+				}
+				return nil
+			}); err != nil {
+				return err
+			}
+			tx.ClearBucket(kv.HeaderCanonical)
+			if err := fixCanonicalChain(s.LogPrefix(), logEvery, lastHeader.Number.Uint64(), lastHeader.Hash(), tx, cfg.blockReader); err != nil {
+				return err
+			}
+		}
+
+		if s.BlockNumber < cfg.snapshots.BlocksAvailable() {
+			if err := cfg.hd.AddHeaderFromSnapshot(cfg.snapshots.BlocksAvailable(), cfg.blockReader); err != nil {
+				return err
+			}
+			if err := s.Update(tx, cfg.snapshots.BlocksAvailable()); err != nil {
+				return err
+			}
+			s.BlockNumber = cfg.snapshots.BlocksAvailable()
+		}
+	}
+
 	var err error
 
 	if !useExternalTx {
@@ -170,7 +296,7 @@ func HeadersForward(
 	defer logEvery.Stop()
 	if hash == (common.Hash{}) {
 		headHash := rawdb.ReadHeadHeaderHash(tx)
-		if err = fixCanonicalChain(logPrefix, logEvery, headerProgress, headHash, tx); err != nil {
+		if err = fixCanonicalChain(logPrefix, logEvery, headerProgress, headHash, tx, cfg.blockReader); err != nil {
 			return err
 		}
 		if !useExternalTx {
@@ -193,7 +319,7 @@ func HeadersForward(
 		return err
 	}
 	headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, headerProgress)
-	cfg.hd.SetHeaderReader(&chainReader{config: &cfg.chainConfig, tx: tx})
+	cfg.hd.SetHeaderReader(&chainReader{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader})
 
 	var sentToPeer bool
 	stopped := false
@@ -246,8 +372,7 @@ Loop:
 		}
 		// Load headers into the database
 		var inSync bool
-
-		if inSync, err = cfg.hd.InsertHeaders(headerInserter.FeedHeaderFunc(tx), cfg.chainConfig.TerminalTotalDifficulty, logPrefix, logEvery.C); err != nil {
+		if inSync, err = cfg.hd.InsertHeaders(headerInserter.FeedHeaderFunc(tx, cfg.blockReader), cfg.chainConfig.TerminalTotalDifficulty, logPrefix, logEvery.C); err != nil {
 			return err
 		}
 
@@ -288,7 +413,7 @@ Loop:
 	if headerInserter.Unwind() {
 		u.UnwindTo(headerInserter.UnwindPoint(), common.Hash{})
 	} else if headerInserter.GetHighest() != 0 {
-		if err := fixCanonicalChain(logPrefix, logEvery, headerInserter.GetHighest(), headerInserter.GetHighestHash(), tx); err != nil {
+		if err := fixCanonicalChain(logPrefix, logEvery, headerInserter.GetHighest(), headerInserter.GetHighestHash(), tx, cfg.blockReader); err != nil {
 			return fmt.Errorf("fix canonical chain: %w", err)
 		}
 	}
@@ -306,7 +431,7 @@ Loop:
 	return nil
 }
 
-func fixCanonicalChain(logPrefix string, logEvery *time.Ticker, height uint64, hash common.Hash, tx kv.StatelessRwTx) error {
+func fixCanonicalChain(logPrefix string, logEvery *time.Ticker, height uint64, hash common.Hash, tx kv.StatelessRwTx, headerReader interfaces.FullBlockReader) error {
 	if height == 0 {
 		return nil
 	}
@@ -319,7 +444,11 @@ func fixCanonicalChain(logPrefix string, logEvery *time.Ticker, height uint64, h
 		if err = rawdb.WriteCanonicalHash(tx, ancestorHash, ancestorHeight); err != nil {
 			return fmt.Errorf("marking canonical header %d %x: %w", ancestorHeight, ancestorHash, err)
 		}
-		ancestor := rawdb.ReadHeader(tx, ancestorHash, ancestorHeight)
+
+		ancestor, err := headerReader.Header(context.Background(), tx, ancestorHash, ancestorHeight)
+		if err != nil {
+			return err
+		}
 		if ancestor == nil {
 			return fmt.Errorf("ancestor is nil. height %d, hash %x", ancestorHeight, ancestorHash)
 		}
@@ -464,19 +593,36 @@ func logProgressHeaders(logPrefix string, prev, now uint64) uint64 {
 }
 
 type chainReader struct {
-	config *params.ChainConfig
-	tx     kv.RwTx
+	config      *params.ChainConfig
+	tx          kv.RwTx
+	blockReader interfaces.FullBlockReader
 }
 
 func (cr chainReader) Config() *params.ChainConfig  { return cr.config }
 func (cr chainReader) CurrentHeader() *types.Header { panic("") }
 func (cr chainReader) GetHeader(hash common.Hash, number uint64) *types.Header {
+	if cr.blockReader != nil {
+		h, _ := cr.blockReader.Header(context.Background(), cr.tx, hash, number)
+		return h
+	}
 	return rawdb.ReadHeader(cr.tx, hash, number)
 }
 func (cr chainReader) GetHeaderByNumber(number uint64) *types.Header {
+	if cr.blockReader != nil {
+		h, _ := cr.blockReader.HeaderByNumber(context.Background(), cr.tx, number)
+		return h
+	}
 	return rawdb.ReadHeaderByNumber(cr.tx, number)
+
 }
 func (cr chainReader) GetHeaderByHash(hash common.Hash) *types.Header {
+	if cr.blockReader != nil {
+		number := rawdb.ReadHeaderNumber(cr.tx, hash)
+		if number == nil {
+			return nil
+		}
+		return cr.GetHeader(hash, *number)
+	}
 	h, _ := rawdb.ReadHeaderByHash(cr.tx, hash)
 	return h
 }
diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go
index 2b0c72cd40..d322cd2fe4 100644
--- a/eth/stagedsync/stage_senders.go
+++ b/eth/stagedsync/stage_senders.go
@@ -21,6 +21,7 @@ import (
 	"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
 	"github.com/ledgerwatch/erigon/ethdb/prune"
 	"github.com/ledgerwatch/erigon/params"
+	"github.com/ledgerwatch/erigon/turbo/snapshotsync"
 	"github.com/ledgerwatch/log/v3"
 	"github.com/ledgerwatch/secp256k1"
 )
@@ -35,9 +36,10 @@ type SendersCfg struct {
 	tmpdir          string
 	prune           prune.Mode
 	chainConfig     *params.ChainConfig
+	snapshots       *snapshotsync.AllSnapshots
 }
 
-func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, tmpdir string, prune prune.Mode) SendersCfg {
+func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, tmpdir string, prune prune.Mode, snapshots *snapshotsync.AllSnapshots) SendersCfg {
 	const sendersBatchSize = 10000
 	const sendersBlockSize = 4096
 
@@ -51,6 +53,7 @@ func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, tmpdir string, pr
 		tmpdir:          tmpdir,
 		chainConfig:     chainCfg,
 		prune:           prune,
+		snapshots:       snapshots,
 	}
 }
 
@@ -95,7 +98,12 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.R
 	}
 	defer canonicalC.Close()
 
-	for k, v, err := canonicalC.Seek(dbutils.EncodeBlockNumber(s.BlockNumber + 1)); k != nil; k, v, err = canonicalC.Next() {
+	startFrom := s.BlockNumber + 1
+	if cfg.snapshots != nil && startFrom < cfg.snapshots.BlocksAvailable() {
+		startFrom = cfg.snapshots.BlocksAvailable()
+	}
+
+	for k, v, err := canonicalC.Seek(dbutils.EncodeBlockNumber(startFrom)); k != nil; k, v, err = canonicalC.Next() {
 		if err != nil {
 			return err
 		}
diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go
index 93a6c22ad3..1d39e38ade 100644
--- a/eth/stagedsync/stage_senders_test.go
+++ b/eth/stagedsync/stage_senders_test.go
@@ -108,7 +108,7 @@ func TestSenders(t *testing.T) {
 
 	require.NoError(stages.SaveStageProgress(tx, stages.Bodies, 3))
 
-	cfg := StageSendersCfg(db, params.TestChainConfig, "", prune.Mode{})
+	cfg := StageSendersCfg(db, params.TestChainConfig, "", prune.Mode{}, nil)
 	err := SpawnRecoverSendersStage(cfg, &StageState{ID: stages.Senders}, nil, tx, 3, ctx)
 	assert.NoError(t, err)
 
diff --git a/eth/stagedsync/stage_txlookup.go b/eth/stagedsync/stage_txlookup.go
index c3016410b8..f878c570f7 100644
--- a/eth/stagedsync/stage_txlookup.go
+++ b/eth/stagedsync/stage_txlookup.go
@@ -15,23 +15,27 @@ import (
 	"github.com/ledgerwatch/erigon/core/types"
 	"github.com/ledgerwatch/erigon/ethdb/prune"
 	"github.com/ledgerwatch/erigon/rlp"
+	"github.com/ledgerwatch/erigon/turbo/snapshotsync"
 )
 
 type TxLookupCfg struct {
-	db     kv.RwDB
-	prune  prune.Mode
-	tmpdir string
+	db        kv.RwDB
+	prune     prune.Mode
+	tmpdir    string
+	snapshots *snapshotsync.AllSnapshots
 }
 
 func StageTxLookupCfg(
 	db kv.RwDB,
 	prune prune.Mode,
 	tmpdir string,
+	snapshots *snapshotsync.AllSnapshots,
 ) TxLookupCfg {
 	return TxLookupCfg{
-		db:     db,
-		prune:  prune,
-		tmpdir: tmpdir,
+		db:        db,
+		prune:     prune,
+		tmpdir:    tmpdir,
+		snapshots: snapshots,
 	}
 }
 
@@ -56,6 +60,11 @@ func SpawnTxLookup(s *StageState, tx kv.RwTx, cfg TxLookupCfg, ctx context.Conte
 	if startBlock < pruneTo {
 		startBlock = pruneTo
 	}
+
+	// Snapshot .idx files already have TxLookup index - then no reason iterate over them here
+	if cfg.snapshots != nil && cfg.snapshots.BlocksAvailable() > startBlock {
+		startBlock = cfg.snapshots.BlocksAvailable()
+	}
 	if startBlock > 0 {
 		startBlock++
 	}
diff --git a/go.mod b/go.mod
index ad9b663f35..4e388ff874 100644
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,7 @@ require (
 	github.com/VictoriaMetrics/fastcache v1.7.0
 	github.com/VictoriaMetrics/metrics v1.18.1
 	github.com/anacrolix/log v0.10.0
-	github.com/anacrolix/torrent v1.35.0
+	github.com/anacrolix/torrent v1.38.0
 	github.com/btcsuite/btcd v0.21.0-beta
 	github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2
 	github.com/consensys/gnark-crypto v0.4.0
@@ -42,7 +42,7 @@ require (
 	github.com/pelletier/go-toml v1.9.4
 	github.com/quasilyte/go-ruleguard/dsl v0.3.6
 	github.com/rs/cors v1.8.0
-	github.com/shirou/gopsutil/v3 v3.21.9
+	github.com/shirou/gopsutil/v3 v3.21.11
 	github.com/spf13/cobra v1.2.1
 	github.com/spf13/pflag v1.0.5
 	github.com/stretchr/testify v1.7.0
diff --git a/go.sum b/go.sum
index 79872288a7..4bd24add25 100644
--- a/go.sum
+++ b/go.sum
@@ -51,9 +51,12 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
 dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
 dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
 dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
+filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
 git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
 github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
 github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
@@ -66,8 +69,6 @@ github.com/RoaringBitmap/roaring v0.9.4 h1:ckvZSX5gwCRaJYBNe7syNawCU5oruY9gQmjXl
 github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
 github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
 github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
-github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
 github.com/VictoriaMetrics/fastcache v1.7.0 h1:E6GibaGI685TafrI7E/QqZPkMsOzRw+3gpICQx08ISg=
 github.com/VictoriaMetrics/fastcache v1.7.0/go.mod h1:n7Sl+ioh/HlWeYHLSIBIE8TcZFHg/+xgvomWSS5xuEE=
 github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
@@ -89,15 +90,20 @@ github.com/alexflint/go-scalar v1.0.0/go.mod h1:GpHzbCOZXEKMEcygYQ5n/aa4Aq84zbxj
 github.com/alexflint/go-scalar v1.1.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o=
 github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
 github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
-github.com/anacrolix/args v0.1.1-0.20211020052733-53ed238acbd4/go.mod h1:41JBnF8sKExNVLHPkCdL74jkZc3dSxAkGsk1TuKOUFI=
+github.com/anacrolix/args v0.3.0/go.mod h1:41JBnF8sKExNVLHPkCdL74jkZc3dSxAkGsk1TuKOUFI=
+github.com/anacrolix/args v0.4.1-0.20211104085705-59f0fe94eb8f/go.mod h1:41JBnF8sKExNVLHPkCdL74jkZc3dSxAkGsk1TuKOUFI=
 github.com/anacrolix/chansync v0.0.0-20210524073341-a336ebc2de92/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
-github.com/anacrolix/chansync v0.3.0-0.0.20211007004133-3f72684c4a93 h1:sQ8igc3anitrtKPEHRK+RBvuNZP0+DRAa6jskKlq4+k=
+github.com/anacrolix/chansync v0.1.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
+github.com/anacrolix/chansync v0.2.1-0.20210910114620-14955c95ded9/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
 github.com/anacrolix/chansync v0.3.0-0.0.20211007004133-3f72684c4a93/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
+github.com/anacrolix/chansync v0.3.0 h1:lRu9tbeuw3wl+PhMu/r+JJCRu5ArFXIluOgdF0ao6/U=
+github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
 github.com/anacrolix/confluence v1.7.1-0.20210221224747-9cb14aa2c53a/go.mod h1:T0JHvSaf9UfoiUdCtCOUuRroHm/tauUJTbLc6/vd5YA=
 github.com/anacrolix/confluence v1.7.1-0.20210221225853-90405640e928/go.mod h1:NoLcfoRet+kYttjLXJRmh4qBVrylJsfIItik5GGj21A=
 github.com/anacrolix/confluence v1.7.1-0.20210311004351-d642adb8546c/go.mod h1:KCZ3eObqKECNeZg0ekAoJVakHMP3gAdR8i0bQ26IkzM=
-github.com/anacrolix/confluence v1.8.0 h1:JRLJ+7AjqpquyAkVmlLrhrfRCwc5G1qSvUQOAfzd0ps=
 github.com/anacrolix/confluence v1.8.0/go.mod h1:GsPP6ikA8h/CU7ExbuMOswpzZpPdf1efDPu4rVXL43g=
+github.com/anacrolix/confluence v1.9.0 h1:7WrWktoDw7P4uo1bzgaA8FFesvc7NsTp37sAsG54XlE=
+github.com/anacrolix/confluence v1.9.0/go.mod h1:O5uS+WVgip+3SOcV1K7E/jE3m4DtK7Jk6QJTnU2VS5s=
 github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4 h1:0yHJvFiGQhJ1gSHJOR8xzmnx45orEt7uiIB6guf0+zc=
 github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4/go.mod h1:hQfX2BrtuQsLQMYQwsypFAab/GvHg8qxwVi4OJdR1WI=
 github.com/anacrolix/dht/v2 v2.0.1/go.mod h1:GbTT8BaEtfqab/LPd5tY41f3GvYeii3mmDUK300Ycyo=
@@ -109,14 +115,17 @@ github.com/anacrolix/dht/v2 v2.8.1-0.20210221225335-7a6713a749f9/go.mod h1:p7fLH
 github.com/anacrolix/dht/v2 v2.8.1-0.20210311003418-13622df072ae/go.mod h1:wLmYr78fBu4KfUUkFZyGFFwDPDw9EHL5x8c632XCZzs=
 github.com/anacrolix/dht/v2 v2.9.1/go.mod h1:ZyYcIQinN/TE3oKONCchQOLjhYR786Jaxz3jsBtih4A=
 github.com/anacrolix/dht/v2 v2.10.0/go.mod h1:KC51tqylRYBu82RM5pEYf+g1n7db+F0tOJqSbCjjZWc=
-github.com/anacrolix/dht/v2 v2.10.6-0.20211007004332-99263ec9c1c8 h1:udjd5Gu0/e5sUL93uHORcodq6T7CSx5OjtBjC2ZXRVg=
+github.com/anacrolix/dht/v2 v2.10.5-0.20210902001729-06cc4fe90e53/go.mod h1:zHjijcebN+L7JbzxW0mOraHis+I81EIgsJAAtiw8bQ8=
 github.com/anacrolix/dht/v2 v2.10.6-0.20211007004332-99263ec9c1c8/go.mod h1:WID4DexLrucfnwzv1OV8REzgoCpyVDwEczxIOrUeFrY=
+github.com/anacrolix/dht/v2 v2.13.0 h1:nhEXbbwVL2fFEDqWJby+lSD0LEB06CW/Tgj74O5Ty9g=
+github.com/anacrolix/dht/v2 v2.13.0/go.mod h1:zJgaiAU2yhzmchZE2mY8WyZ64LK/F/D9MAeN0ct73qQ=
 github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
 github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
 github.com/anacrolix/envpprof v1.0.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
 github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
 github.com/anacrolix/envpprof v1.1.1 h1:sHQCyj7HtiSfaZAzL2rJrQdyS7odLqlwO6nhk/tG/j8=
 github.com/anacrolix/envpprof v1.1.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
+github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ=
 github.com/anacrolix/go-libutp v0.0.0-20180522111405-6baeb806518d/go.mod h1:beQSaSxwH2d9Eeu5ijrEnHei5Qhk+J6cDm1QkWFru4E=
 github.com/anacrolix/go-libutp v1.0.2/go.mod h1:uIH0A72V++j0D1nnmTjjZUiH/ujPkFxYWkxQ02+7S0U=
 github.com/anacrolix/go-libutp v1.0.4 h1:95sv09MoNQbgEJqJLrotglFnVBAiMx1tyl6xMAmnAgg=
@@ -162,8 +171,11 @@ github.com/anacrolix/multiless v0.0.0-20191223025854-070b7994e841/go.mod h1:TrCL
 github.com/anacrolix/multiless v0.0.0-20200413040533-acfd16f65d5d/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4=
 github.com/anacrolix/multiless v0.0.0-20210222022749-ef43011a77ec/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4=
 github.com/anacrolix/multiless v0.1.1-0.20210520040635-10ee7b5f3cff/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4=
-github.com/anacrolix/multiless v0.1.1-0.20210529082330-de2f6cf29619 h1:ZkusP2EHxvxm+IymiKJ8DBVE/E6fJkb8K/2+GXZpjAY=
 github.com/anacrolix/multiless v0.1.1-0.20210529082330-de2f6cf29619/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4=
+github.com/anacrolix/multiless v0.2.0 h1:HtGBBOQcHaJM59RP3ysITId7AMIgiNF4xJucaFh14Ms=
+github.com/anacrolix/multiless v0.2.0/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4=
+github.com/anacrolix/publicip v0.2.0/go.mod h1:67G1lVkLo8UjdEcJkwScWVTvlJ35OCDsRJoWXl/wi4g=
+github.com/anacrolix/squirrel v0.1.0/go.mod h1:YzgVvikMdFD441oTWlNG189bpKabO9Sbf3uCSVgca04=
 github.com/anacrolix/squirrel v0.1.1-0.20210914065657-81bc5ecdc43a/go.mod h1:YzgVvikMdFD441oTWlNG189bpKabO9Sbf3uCSVgca04=
 github.com/anacrolix/stm v0.1.0/go.mod h1:ZKz7e7ERWvP0KgL7WXfRjBXHNRhlVRlbBQecqFtPq+A=
 github.com/anacrolix/stm v0.1.1-0.20191106051447-e749ba3531cf/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg=
@@ -201,8 +213,10 @@ github.com/anacrolix/torrent v1.25.1-0.20210224024805-693c30dd889e/go.mod h1:d4V
 github.com/anacrolix/torrent v1.28.1-0.20210622065255-582f0ccd48a0/go.mod h1:15VRIA5/DwqbqETbKo3fzlC4aSbB0iMoo10ng3mzAbE=
 github.com/anacrolix/torrent v1.29.0/go.mod h1:40Hf2bWxFqTbTWbrdig57JnmYLCjShbWWjdbB3VN5n4=
 github.com/anacrolix/torrent v1.30.2/go.mod h1:vbNxKfaGiNq8edcCaQI1oSNJwh4GMqtMUMF9qOdZ6C0=
-github.com/anacrolix/torrent v1.35.0 h1:XuTuufcAC0jLV0bEMkFJ9yHZ9y9isE6GmK5Gw17pNno=
-github.com/anacrolix/torrent v1.35.0/go.mod h1:eNmTGUF2tnLB4adDjYgx2+hAZhCYW8r8vjF6ZITO2pU=
+github.com/anacrolix/torrent v1.31.1-0.20210910222643-d957502528e0/go.mod h1:akZJHHFN8aWH2lcPZQ0I3etujnenwYpUvj36HV9uvAI=
+github.com/anacrolix/torrent v1.35.1-0.20211104090255-eaeb38b18c6a/go.mod h1:97nxJW8NIeUyGdBvMOAl9cmcxi8xPez3nlE0RwSZcL0=
+github.com/anacrolix/torrent v1.38.0 h1:HxSErXlfxjSnPRT9LJT4v7c6hGEFp9JLSS7eRcs3G14=
+github.com/anacrolix/torrent v1.38.0/go.mod h1:QC5d3J5OQMZC67Ni2nKkvwph/vc/MA7ikHqtdiGSq7M=
 github.com/anacrolix/upnp v0.1.1/go.mod h1:LXsbsp5h+WGN7YR+0A7iVXm5BL1LYryDev1zuJMWYQo=
 github.com/anacrolix/upnp v0.1.2-0.20200416075019-5e9378ed1425 h1:/Wi6l2ONI1FUFWN4cBwHOO90V4ylp4ud/eov6GUcVFk=
 github.com/anacrolix/upnp v0.1.2-0.20200416075019-5e9378ed1425/go.mod h1:Pz94W3kl8rf+wxH3IbCa9Sq+DTJr8OSbV2Q3/y51vYs=
@@ -213,6 +227,7 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU
 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
 github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
 github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/arl/statsviz v0.4.0/go.mod h1:+5inUy/dxy11x/KSmicG3ZrEEy0Yr81AFm3dn4QC04M=
 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@@ -301,12 +316,14 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb
 github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
 github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
 github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
 github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
 github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
 github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
 github.com/elliotchance/orderedmap v1.2.0/go.mod h1:8hdSl6jmveQw8ScByd3AaNHNk51RhbTazdqtTty+NFw=
 github.com/elliotchance/orderedmap v1.3.0/go.mod h1:8hdSl6jmveQw8ScByd3AaNHNk51RhbTazdqtTty+NFw=
 github.com/elliotchance/orderedmap v1.4.0 h1:wZtfeEONCbx6in1CZyE6bELEt/vFayMvsxqI5SgsR+A=
@@ -365,11 +382,12 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
-github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
 github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
 github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
 github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
@@ -610,6 +628,8 @@ github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxA
 github.com/lucas-clemente/quic-go v0.7.1-0.20190401152353-907071221cf9/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
 github.com/lucas-clemente/quic-go v0.18.0/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg=
 github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
 github.com/lukechampine/stm v0.0.0-20191022212748-05486c32d236/go.mod h1:wTLsd5FC9rts7GkMpsPGk64CIuea+03yaLAp19Jmlg8=
 github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
 github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
@@ -660,6 +680,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
 github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
 github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
 github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
+github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
@@ -799,6 +820,8 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
 github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
@@ -807,6 +830,7 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
 github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -820,6 +844,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
 github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -829,9 +855,12 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4
 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.2/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
 github.com/quasilyte/go-ruleguard/dsl v0.3.6 h1:W2wnISJifyda0x/RXq15Qjrsu9iOhX2gy4+Ku+owylw=
 github.com/quasilyte/go-ruleguard/dsl v0.3.6/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
 github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -854,8 +883,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
 github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
 github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shirou/gopsutil/v3 v3.21.9 h1:Vn4MUz2uXhqLSiCbGFRc0DILbMVLAY92DSkT8bsYrHg=
-github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ=
+github.com/shirou/gopsutil/v3 v3.21.11 h1:d5tOAP5+bmJ8Hf2+4bxOSkQ/64+sjEbjU9nSW9nJgG0=
+github.com/shirou/gopsutil/v3 v3.21.11/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA=
 github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
 github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
 github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
@@ -905,6 +934,7 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
 github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@@ -960,10 +990,8 @@ github.com/wcharczuk/go-chart/v2 v2.1.0/go.mod h1:yx7MvAVNcP/kN9lKXM/NTce4au4DFN
 github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
 github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
 github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
 github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
 github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
-github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
 github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -972,6 +1000,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
+github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
 go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
 go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
 go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@@ -1129,6 +1159,7 @@ golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96b
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20210420210106-798c2154c571/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
 golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c=
@@ -1147,6 +1178,7 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1223,6 +1255,7 @@ golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1240,12 +1273,15 @@ golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021 h1:giLT+HuUP/gXYrG2Plg9WTjj4qhfgaW424ZIFog3rlk=
 golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@@ -1309,6 +1345,7 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK
 golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
 golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
 golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
diff --git a/p2p/enode/nodedb_test.go b/p2p/enode/nodedb_test.go
index 0ca44575f9..89b2d91d2c 100644
--- a/p2p/enode/nodedb_test.go
+++ b/p2p/enode/nodedb_test.go
@@ -323,7 +323,7 @@ func TestDBPersistency(t *testing.T) {
 	}
 	db.Close()
 
-	// Reopen the database and check the value
+	// ReopenSegments the database and check the value
 	db, err = OpenDB(filepath.Join(root, "database"))
 	if err != nil {
 		t.Fatalf("failed to open persistent database: %v", err)
diff --git a/params/config.go b/params/config.go
index 5631f7868f..ab61e995c9 100644
--- a/params/config.go
+++ b/params/config.go
@@ -490,7 +490,7 @@ func (c *ChainConfig) IsHeaderWithSeal() bool {
 	return c.Consensus == AuRaConsensus
 }
 
-type SnapshotConfig struct {
+type ConsensusSnapshotConfig struct {
 	CheckpointInterval uint64 // Number of blocks after which to save the vote snapshot to the database
 	InmemorySnapshots  int    // Number of recent vote snapshots to keep in memory
 	InmemorySignatures int    // Number of recent block signatures to keep in memory
@@ -500,12 +500,12 @@ type SnapshotConfig struct {
 
 const cliquePath = "clique"
 
-func NewSnapshotConfig(checkpointInterval uint64, inmemorySnapshots int, inmemorySignatures int, inmemory bool, dbPath string) *SnapshotConfig {
+func NewSnapshotConfig(checkpointInterval uint64, inmemorySnapshots int, inmemorySignatures int, inmemory bool, dbPath string) *ConsensusSnapshotConfig {
 	if len(dbPath) == 0 {
 		dbPath = paths.DefaultDataDir()
 	}
 
-	return &SnapshotConfig{
+	return &ConsensusSnapshotConfig{
 		checkpointInterval,
 		inmemorySnapshots,
 		inmemorySignatures,
diff --git a/params/snapshots.go b/params/snapshots.go
new file mode 100644
index 0000000000..3edca1a112
--- /dev/null
+++ b/params/snapshots.go
@@ -0,0 +1,23 @@
+package params
+
+var (
+	MainnetChainSnapshotConfig = &SnapshotsConfig{}
+	GoerliChainSnapshotConfig  = &SnapshotsConfig{
+		ExpectBlocks: 5_900_000 - 1,
+	}
+)
+
+type SnapshotsConfig struct {
+	ExpectBlocks uint64
+}
+
+func KnownSnapshots(networkName string) *SnapshotsConfig {
+	switch networkName {
+	case MainnetChainName:
+		return MainnetChainSnapshotConfig
+	case GoerliChainName:
+		return GoerliChainSnapshotConfig
+	default:
+		return nil
+	}
+}
diff --git a/rpc/service.go b/rpc/service.go
index 25b01a9e72..e04225f82a 100644
--- a/rpc/service.go
+++ b/rpc/service.go
@@ -21,12 +21,12 @@ import (
 	"errors"
 	"fmt"
 	"reflect"
-	"runtime/debug"
 	"strings"
 	"sync"
 	"unicode"
 
 	jsoniter "github.com/json-iterator/go"
+	"github.com/ledgerwatch/erigon-lib/common/dbg"
 	"github.com/ledgerwatch/log/v3"
 )
 
@@ -214,7 +214,7 @@ func (c *callback) call(ctx context.Context, method string, args []reflect.Value
 	// Catch panic while running the callback.
 	defer func() {
 		if err := recover(); err != nil {
-			log.Error("RPC method " + method + " crashed: " + fmt.Sprintf("%v\n%s", err, debug.Stack()))
+			log.Error("RPC method " + method + " crashed: " + fmt.Sprintf("%v\n%s", err, dbg.Stack()))
 			errRes = errors.New("method handler crashed")
 		}
 	}()
diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go
index dda234f97c..8efe49d15c 100644
--- a/turbo/snapshotsync/block_reader.go
+++ b/turbo/snapshotsync/block_reader.go
@@ -23,12 +23,12 @@ func NewBlockReader() *BlockReader {
 	return &BlockReader{}
 }
 
-func (back *BlockReader) Header(ctx context.Context, tx kv.Tx, hash common.Hash, blockHeight uint64) (*types.Header, error) {
+func (back *BlockReader) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error) {
 	h := rawdb.ReadHeader(tx, hash, blockHeight)
 	return h, nil
 }
 
-func (back *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Tx, blockHeight uint64) (*types.Header, error) {
+func (back *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (*types.Header, error) {
 	h := rawdb.ReadHeaderByNumber(tx, blockHeight)
 	return h, nil
 }
@@ -98,7 +98,7 @@ type BlockReaderWithSnapshots struct {
 func NewBlockReaderWithSnapshots(snapshots *AllSnapshots) *BlockReaderWithSnapshots {
 	return &BlockReaderWithSnapshots{sn: snapshots}
 }
-func (back *BlockReaderWithSnapshots) HeaderByNumber(ctx context.Context, tx kv.Tx, blockHeight uint64) (*types.Header, error) {
+func (back *BlockReaderWithSnapshots) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (*types.Header, error) {
 	sn, ok := back.sn.Blocks(blockHeight)
 	if !ok {
 		h := rawdb.ReadHeaderByNumber(tx, blockHeight)
@@ -106,7 +106,7 @@ func (back *BlockReaderWithSnapshots) HeaderByNumber(ctx context.Context, tx kv.
 	}
 	return back.headerFromSnapshot(blockHeight, sn)
 }
-func (back *BlockReaderWithSnapshots) Header(ctx context.Context, tx kv.Tx, hash common.Hash, blockHeight uint64) (*types.Header, error) {
+func (back *BlockReaderWithSnapshots) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error) {
 	sn, ok := back.sn.Blocks(blockHeight)
 	if !ok {
 		h := rawdb.ReadHeader(tx, hash, blockHeight)
@@ -116,7 +116,7 @@ func (back *BlockReaderWithSnapshots) Header(ctx context.Context, tx kv.Tx, hash
 	return back.headerFromSnapshot(blockHeight, sn)
 }
 
-func (back *BlockReaderWithSnapshots) ReadHeaderByNumber(ctx context.Context, tx kv.Tx, hash common.Hash, blockHeight uint64) (*types.Header, error) {
+func (back *BlockReaderWithSnapshots) ReadHeaderByNumber(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error) {
 	sn, ok := back.sn.Blocks(blockHeight)
 	if !ok {
 		h := rawdb.ReadHeader(tx, hash, blockHeight)
@@ -163,29 +163,32 @@ func (back *BlockReaderWithSnapshots) BlockWithSenders(ctx context.Context, tx k
 	gg.Reset(bodyOffset)
 	buf, _ = gg.Next(buf[:0])
 	b := &types.BodyForStorage{}
-	if err = rlp.DecodeBytes(buf, b); err != nil {
+	reader := bytes.NewReader(buf)
+	if err = rlp.Decode(reader, b); err != nil {
 		return nil, nil, err
 	}
 
 	if b.BaseTxId < sn.Transactions.Idx.BaseDataID() {
-		return nil, nil, fmt.Errorf(".idx file has wrong baseDataID? %d<%d\n", b.BaseTxId, sn.Transactions.Idx.BaseDataID())
+		return nil, nil, fmt.Errorf(".idx file has wrong baseDataID? %d<%d, %s", b.BaseTxId, sn.Transactions.Idx.BaseDataID(), sn.Transactions.File)
 	}
-	txnOffset := sn.Transactions.Idx.Lookup2(b.BaseTxId - sn.Transactions.Idx.BaseDataID()) // need subtract baseID of indexFile
-	gg = sn.Transactions.Segment.MakeGetter()
-	gg.Reset(txnOffset)
-	reader := bytes.NewReader(nil)
-	stream := rlp.NewStream(reader, 0)
+
 	txs := make([]types.Transaction, b.TxAmount)
 	senders = make([]common.Address, b.TxAmount)
-	for i := uint32(0); i < b.TxAmount; i++ {
-		buf, _ = gg.Next(buf[:0])
-		senders[i].SetBytes(buf[1 : 1+20])
-		txRlp := buf[1+20:]
-		reader.Reset(txRlp)
-		stream.Reset(reader, 0)
-		txs[i], err = types.DecodeTransaction(stream)
-		if err != nil {
-			return nil, nil, err
+	if b.TxAmount > 0 {
+		txnOffset := sn.Transactions.Idx.Lookup2(b.BaseTxId - sn.Transactions.Idx.BaseDataID()) // need subtract baseID of indexFile
+		gg = sn.Transactions.Segment.MakeGetter()
+		gg.Reset(txnOffset)
+		stream := rlp.NewStream(reader, 0)
+		for i := uint32(0); i < b.TxAmount; i++ {
+			buf, _ = gg.Next(buf[:0])
+			senders[i].SetBytes(buf[1 : 1+20])
+			txRlp := buf[1+20:]
+			reader.Reset(txRlp)
+			stream.Reset(reader, 0)
+			txs[i], err = types.DecodeTransaction(stream)
+			if err != nil {
+				return nil, nil, err
+			}
 		}
 	}
 
diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go
index b341f62c12..4f7c941110 100644
--- a/turbo/snapshotsync/block_snapshots.go
+++ b/turbo/snapshotsync/block_snapshots.go
@@ -2,6 +2,9 @@ package snapshotsync
 
 import (
 	"bufio"
+	"bytes"
+	"context"
+	"crypto/sha256"
 	"encoding/binary"
 	"errors"
 	"fmt"
@@ -27,6 +30,7 @@ import (
 	"github.com/ledgerwatch/erigon/common/dbutils"
 	"github.com/ledgerwatch/erigon/core/rawdb"
 	"github.com/ledgerwatch/erigon/core/types"
+	"github.com/ledgerwatch/erigon/params"
 	"github.com/ledgerwatch/erigon/rlp"
 	"github.com/ledgerwatch/log/v3"
 )
@@ -80,38 +84,109 @@ type BlocksSnapshot struct {
 func (s BlocksSnapshot) Has(block uint64) bool { return block >= s.From && block < s.To }
 
 type AllSnapshots struct {
-	dir             string
-	blocksAvailable uint64
-	blocks          []*BlocksSnapshot
+	dir                  string
+	allSegmentsAvailable bool
+	allIdxAvailable      bool
+	segmentsAvailable    uint64
+	idxAvailable         uint64
+	blocks               []*BlocksSnapshot
+	cfg                  *params.SnapshotsConfig
 }
 
-func MustOpenAll(dir string) *AllSnapshots {
-	res, err := OpenAll(dir)
-	if err != nil {
-		panic(err)
-	}
-	return res
-}
-
-// OpenAll - opens all snapshots. But to simplify everything:
+// NewAllSnapshots - opens all snapshots. But to simplify everything:
 //  - it opens snapshots only on App start and immutable after
 //  - all snapshots of given blocks range must exist - to make this blocks range available
 //  - gaps are not allowed
 //  - segment have [from:to) semantic
-func OpenAll(dir string) (*AllSnapshots, error) {
-	all := &AllSnapshots{dir: dir}
-	files, err := headersSegments(dir)
+func NewAllSnapshots(dir string, cfg *params.SnapshotsConfig) *AllSnapshots {
+	if err := os.MkdirAll(dir, 0755); err != nil {
+		panic(err)
+	}
+	return &AllSnapshots{dir: dir, cfg: cfg}
+}
+
+func (s *AllSnapshots) ChainSnapshotConfig() *params.SnapshotsConfig { return s.cfg }
+func (s *AllSnapshots) AllSegmentsAvailable() bool                   { return s.allSegmentsAvailable }
+func (s *AllSnapshots) SetAllSegmentsAvailable(v bool)               { s.allSegmentsAvailable = v }
+func (s *AllSnapshots) BlocksAvailable() uint64                      { return s.segmentsAvailable }
+func (s *AllSnapshots) AllIdxAvailable() bool                        { return s.allIdxAvailable }
+func (s *AllSnapshots) SetAllIdxAvailable(v bool)                    { s.allIdxAvailable = v }
+func (s *AllSnapshots) IndicesAvailable() uint64                     { return s.idxAvailable }
+
+func (s *AllSnapshots) SegmentsAvailability() (headers, bodies, txs uint64, err error) {
+	if headers, err = latestSegment(s.dir, Headers); err != nil {
+		return
+	}
+	if bodies, err = latestSegment(s.dir, Bodies); err != nil {
+		return
+	}
+	if txs, err = latestSegment(s.dir, Transactions); err != nil {
+		return
+	}
+	return
+}
+func (s *AllSnapshots) IdxAvailability() (headers, bodies, txs uint64, err error) {
+	if headers, err = latestIdx(s.dir, Headers); err != nil {
+		return
+	}
+	if bodies, err = latestIdx(s.dir, Bodies); err != nil {
+		return
+	}
+	if txs, err = latestIdx(s.dir, Transactions); err != nil {
+		return
+	}
+	return
+}
+func (s *AllSnapshots) ReopenIndices() error {
+	for _, bs := range s.blocks {
+		if bs.Headers.Idx != nil {
+			bs.Headers.Idx.Close()
+			bs.Headers.Idx = nil
+		}
+		idx, err := recsplit.OpenIndex(path.Join(s.dir, IdxFileName(bs.Headers.From, bs.Headers.To, Headers)))
+		if err != nil {
+			return err
+		}
+		bs.Headers.Idx = idx
+
+		if bs.Bodies.Idx != nil {
+			bs.Bodies.Idx.Close()
+			bs.Bodies.Idx = nil
+		}
+		idx, err = recsplit.OpenIndex(path.Join(s.dir, IdxFileName(bs.Bodies.From, bs.Bodies.To, Bodies)))
+		if err != nil {
+			return err
+		}
+		bs.Bodies.Idx = idx
+
+		if bs.Transactions.Idx != nil {
+			bs.Transactions.Idx.Close()
+			bs.Transactions.Idx = nil
+		}
+		idx, err = recsplit.OpenIndex(path.Join(s.dir, IdxFileName(bs.Transactions.From, bs.Transactions.To, Transactions)))
+		if err != nil {
+			return err
+		}
+		bs.Transactions.Idx = idx
+		s.idxAvailable = bs.Transactions.To - 1
+	}
+	return nil
+}
+
+func (s *AllSnapshots) ReopenSegments() error {
+	dir := s.dir
+	files, err := segments(dir, Headers)
 	if err != nil {
-		return nil, err
+		return err
 	}
 	var prevTo uint64
 	for _, f := range files {
-		from, to, _, err := ParseCompressedFileName(f)
+		from, to, _, err := ParseFileName(f, ".seg")
 		if err != nil {
 			if errors.Is(ErrInvalidCompressedFileName, err) {
 				continue
 			}
-			return nil, err
+			return err
 		}
 		if to == prevTo {
 			continue
@@ -130,17 +205,9 @@ func OpenAll(dir string) (*AllSnapshots, error) {
 				if errors.Is(err, os.ErrNotExist) {
 					break
 				}
-				return nil, err
-			}
-
-			idx, err := recsplit.OpenIndex(path.Join(dir, IdxFileName(from, to, Bodies)))
-			if err != nil {
-				if errors.Is(err, os.ErrNotExist) {
-					break
-				}
-				return nil, err
+				return err
 			}
-			blocksSnapshot.Bodies = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d, Idx: idx}
+			blocksSnapshot.Bodies = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d}
 		}
 		{
 			fileName := SegmentFileName(from, to, Headers)
@@ -149,17 +216,9 @@ func OpenAll(dir string) (*AllSnapshots, error) {
 				if errors.Is(err, os.ErrNotExist) {
 					break
 				}
-				return nil, err
-			}
-			idx, err := recsplit.OpenIndex(path.Join(dir, IdxFileName(from, to, Headers)))
-			if err != nil {
-				if errors.Is(err, os.ErrNotExist) {
-					break
-				}
-				return nil, err
+				return err
 			}
-
-			blocksSnapshot.Headers = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d, Idx: idx}
+			blocksSnapshot.Headers = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d}
 		}
 		{
 			fileName := SegmentFileName(from, to, Transactions)
@@ -168,37 +227,42 @@ func OpenAll(dir string) (*AllSnapshots, error) {
 				if errors.Is(err, os.ErrNotExist) {
 					break
 				}
-				return nil, err
-			}
-			idx, err := recsplit.OpenIndex(path.Join(dir, IdxFileName(from, to, Transactions)))
-			if err != nil {
-				if errors.Is(err, os.ErrNotExist) {
-					break
-				}
-				return nil, err
+				return err
 			}
-			blocksSnapshot.Transactions = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d, Idx: idx}
+			blocksSnapshot.Transactions = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d}
 		}
 
-		all.blocks = append(all.blocks, blocksSnapshot)
-		all.blocksAvailable = blocksSnapshot.To
+		s.blocks = append(s.blocks, blocksSnapshot)
+		s.segmentsAvailable = blocksSnapshot.To - 1
 	}
-	return all, nil
+	return nil
 }
 
-func (s AllSnapshots) Close() {
+func (s *AllSnapshots) Close() {
 	for _, s := range s.blocks {
-		s.Headers.Idx.Close()
-		s.Headers.Segment.Close()
-		s.Bodies.Idx.Close()
-		s.Bodies.Segment.Close()
-		s.Transactions.Idx.Close()
-		s.Transactions.Segment.Close()
+		if s.Headers.Idx != nil {
+			s.Headers.Idx.Close()
+		}
+		if s.Headers.Segment != nil {
+			s.Headers.Segment.Close()
+		}
+		if s.Bodies.Idx != nil {
+			s.Bodies.Idx.Close()
+		}
+		if s.Bodies.Segment != nil {
+			s.Bodies.Segment.Close()
+		}
+		if s.Transactions.Idx != nil {
+			s.Transactions.Idx.Close()
+		}
+		if s.Transactions.Segment != nil {
+			s.Transactions.Segment.Close()
+		}
 	}
 }
 
-func (s AllSnapshots) Blocks(blockNumber uint64) (snapshot *BlocksSnapshot, found bool) {
-	if blockNumber > s.blocksAvailable {
+func (s *AllSnapshots) Blocks(blockNumber uint64) (snapshot *BlocksSnapshot, found bool) {
+	if blockNumber > s.segmentsAvailable {
 		return snapshot, false
 	}
 	for _, blocksSnapshot := range s.blocks {
@@ -209,7 +273,111 @@ func (s AllSnapshots) Blocks(blockNumber uint64) (snapshot *BlocksSnapshot, foun
 	return snapshot, false
 }
 
-func headersSegments(dir string) ([]string, error) {
+func (s *AllSnapshots) BuildIndices(ctx context.Context, chainID uint256.Int) error {
+	fmt.Printf("build!\n")
+	for _, sn := range s.blocks {
+		f := path.Join(s.dir, SegmentFileName(sn.Headers.From, sn.Headers.To, Headers))
+		if err := HeadersHashIdx(f, sn.Headers.From); err != nil {
+			return err
+		}
+
+		f = path.Join(s.dir, SegmentFileName(sn.Bodies.From, sn.Bodies.To, Bodies))
+		if err := BodiesIdx(f, sn.Bodies.From); err != nil {
+			return err
+		}
+	}
+
+	// hack to read first block body - to get baseTxId from there
+	_ = s.ReopenIndices()
+	for _, sn := range s.blocks {
+		gg := sn.Bodies.Segment.MakeGetter()
+		buf, _ := gg.Next(nil)
+		b := &types.BodyForStorage{}
+		if err := rlp.DecodeBytes(buf, b); err != nil {
+			return err
+		}
+
+		var expectedTxsAmount uint64
+		{
+			off := sn.Bodies.Idx.Lookup2(sn.To - 1)
+			gg.Reset(off)
+
+			buf, _ = gg.Next(nil)
+			bodyForStorage := new(types.BodyForStorage)
+			err := rlp.DecodeBytes(buf, bodyForStorage)
+			if err != nil {
+				panic(err)
+			}
+			expectedTxsAmount = bodyForStorage.BaseTxId + uint64(bodyForStorage.TxAmount) - b.BaseTxId
+		}
+		f := path.Join(s.dir, SegmentFileName(sn.Transactions.From, sn.Transactions.To, Transactions))
+		fmt.Printf("create: %s\n", f)
+		if err := TransactionsHashIdx(chainID, b.BaseTxId, f, expectedTxsAmount); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func latestSegment(dir string, ofType SnapshotType) (uint64, error) {
+	files, err := segments(dir, ofType)
+	if err != nil {
+		return 0, err
+	}
+	var maxBlock, prevTo uint64
+	for _, f := range files {
+		from, to, _, err := ParseFileName(f, ".seg")
+		if err != nil {
+			if errors.Is(ErrInvalidCompressedFileName, err) {
+				continue
+			}
+			return 0, err
+		}
+		if from != prevTo { // no gaps
+			log.Warn("[open snapshots] snapshot missed", "type", ofType, "from", prevTo, "to", from)
+			break
+		}
+		prevTo = to
+		if maxBlock < to {
+			maxBlock = to
+		}
+	}
+	if maxBlock == 0 {
+		return 0, nil
+	}
+	return maxBlock - 1, nil
+}
+func latestIdx(dir string, ofType SnapshotType) (uint64, error) {
+	files, err := idxFiles(dir, ofType)
+	if err != nil {
+		return 0, err
+	}
+	var maxBlock, prevTo uint64
+	for _, f := range files {
+		from, to, _, err := ParseFileName(f, ".idx")
+		if err != nil {
+			if errors.Is(ErrInvalidCompressedFileName, err) {
+				continue
+			}
+			return 0, err
+		}
+		if from != prevTo { // no gaps
+			log.Warn("[open snapshots] snapshot missed", "type", ofType, "from", prevTo, "to", from)
+			break
+		}
+		prevTo = to
+		if maxBlock < to {
+			maxBlock = to
+		}
+	}
+	if maxBlock == 0 {
+		return 0, nil
+	}
+	return maxBlock - 1, nil
+}
+
+func segments(dir string, ofType SnapshotType) ([]string, error) {
 	files, err := ioutil.ReadDir(dir)
 	if err != nil {
 		return nil, err
@@ -225,7 +393,31 @@ func headersSegments(dir string) ([]string, error) {
 		if filepath.Ext(f.Name()) != ".seg" { // filter out only compressed files
 			continue
 		}
-		if !strings.Contains(f.Name(), string(Headers)) {
+		if !strings.Contains(f.Name(), string(ofType)) {
+			continue
+		}
+		res = append(res, f.Name())
+	}
+	sort.Strings(res)
+	return res, nil
+}
+func idxFiles(dir string, ofType SnapshotType) ([]string, error) {
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return nil, err
+	}
+	var res []string
+	for _, f := range files {
+		if !IsCorrectFileName(f.Name()) {
+			continue
+		}
+		if f.Size() == 0 {
+			continue
+		}
+		if filepath.Ext(f.Name()) != ".idx" { // filter out only compressed files
+			continue
+		}
+		if !strings.Contains(f.Name(), string(ofType)) {
 			continue
 		}
 		res = append(res, f.Name())
@@ -239,10 +431,10 @@ func IsCorrectFileName(name string) bool {
 	return len(parts) == 4 && parts[3] != "v1"
 }
 
-func ParseCompressedFileName(name string) (from, to uint64, snapshotType SnapshotType, err error) {
+func ParseFileName(name, expectedExt string) (from, to uint64, snapshotType SnapshotType, err error) {
 	_, fileName := filepath.Split(name)
 	ext := filepath.Ext(fileName)
-	if ext != ".seg" {
+	if ext != expectedExt {
 		return 0, 0, "", fmt.Errorf("%w. Ext: %s", ErrInvalidCompressedFileName, ext)
 	}
 	onlyName := fileName[:len(fileName)-len(ext)]
@@ -292,7 +484,7 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 	}
 	defer f.Close()
 
-	i := 0
+	var count, prevTxID uint64
 	numBuf := make([]byte, binary.MaxVarintLen64)
 	parseCtx := txpool.NewTxParseContext(*chainID)
 	parseCtx.WithSender(false)
@@ -302,6 +494,7 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 	firstIDSaved := false
 
 	from := dbutils.EncodeBlockNumber(fromBlock)
+	var lastBody types.BodyForStorage
 	if err := kv.BigChunks(db, kv.HeaderCanonical, from, func(tx kv.Tx, k, v []byte) (bool, error) {
 		blockNum := binary.BigEndian.Uint64(k)
 		if blockNum >= fromBlock+uint64(blocksAmount) {
@@ -314,6 +507,7 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 		if e := rlp.DecodeBytes(dataRLP, &body); e != nil {
 			return false, e
 		}
+		lastBody = body
 		if body.TxAmount == 0 {
 			return true, nil
 		}
@@ -323,13 +517,18 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 		}
 
 		binary.BigEndian.PutUint64(numBuf, body.BaseTxId)
-		j := 0
 
 		if !firstIDSaved {
 			firstIDSaved = true
 			firstTxID = body.BaseTxId
 		}
+		j := 0
 		if err := tx.ForAmount(kv.EthTx, numBuf[:8], body.TxAmount, func(tk, tv []byte) error {
+			id := binary.BigEndian.Uint64(tk)
+			if prevTxID != 0 && id != prevTxID+1 {
+				panic(fmt.Sprintf("no gaps in tx ids are allowed: block %d does jump from %d to %d", blockNum, prevTxID, id))
+			}
+			prevTxID = id
 			if _, err := parseCtx.ParseTransaction(tv, 0, &slot, nil); err != nil {
 				return err
 			}
@@ -337,8 +536,8 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 			if len(senders) > 0 {
 				sender = senders[j][:]
 			} else {
-				sender = make([]byte, 20) // TODO: return error here
-				//panic("not implemented")
+				//sender = make([]byte, 20) // TODO: return error here
+				panic("not implemented")
 			}
 			_ = sender
 			valueBuf = valueBuf[:0]
@@ -348,7 +547,7 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 			if err := f.Append(valueBuf); err != nil {
 				return err
 			}
-			i++
+			count++
 			j++
 
 			select {
@@ -356,7 +555,7 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 			case <-logEvery.C:
 				var m runtime.MemStats
 				runtime.ReadMemStats(&m)
-				log.Info("Dumping txs", "million txs", i/1_000_000, "block num", blockNum,
+				log.Info("Dumping txs", "processed", count/1_000_000, "block num", blockNum,
 					"alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys),
 				)
 			}
@@ -368,7 +567,10 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) (fir
 	}); err != nil {
 		return 0, err
 	}
-
+	if lastBody.BaseTxId+uint64(lastBody.TxAmount)-firstTxID != count {
+		fmt.Printf("prevTxID: %d\n", prevTxID)
+		return 0, fmt.Errorf("incorrect tx count: %d, expected: %d", count, lastBody.BaseTxId+uint64(lastBody.TxAmount)-firstTxID)
+	}
 	return firstTxID, nil
 }
 
@@ -483,11 +685,14 @@ func DumpBodies(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) e
 	return nil
 }
 
-func TransactionsHashIdx(chainID uint256.Int, firstTxID uint64, segmentFileName string) error {
+func TransactionsHashIdx(chainID uint256.Int, firstTxID uint64, segmentFileName string, expectedCount uint64) error {
+	logEvery := time.NewTicker(20 * time.Second)
+	defer logEvery.Stop()
 	parseCtx := txpool.NewTxParseContext(chainID)
 	parseCtx.WithSender(false)
 	slot := txpool.TxSlot{}
 	var sender [20]byte
+	var j uint64
 	if err := Idx(segmentFileName, firstTxID, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error {
 		if _, err := parseCtx.ParseTransaction(word[1+20:], 0, &slot, sender[:]); err != nil {
 			return err
@@ -495,21 +700,48 @@ func TransactionsHashIdx(chainID uint256.Int, firstTxID uint64, segmentFileName
 		if err := idx.AddKey(slot.IdHash[:], offset); err != nil {
 			return err
 		}
+
+		select {
+		default:
+		case <-logEvery.C:
+			log.Info("[Snapshots] TransactionsHashIdx", "millions", i/1_000_000)
+		}
+		j++
 		return nil
 	}); err != nil {
 		return fmt.Errorf("TransactionsHashIdx: %w", err)
 	}
+	if j != expectedCount {
+		panic(fmt.Errorf("expect: %d, got %d\n", expectedCount, j))
+	}
 	return nil
 }
 
 // HeadersHashIdx - headerHash -> offset (analog of kv.HeaderNumber)
 func HeadersHashIdx(segmentFileName string, firstBlockNumInSegment uint64) error {
+	logEvery := time.NewTicker(5 * time.Second)
+	defer logEvery.Stop()
 	if err := Idx(segmentFileName, firstBlockNumInSegment, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error {
 		h := types.Header{}
 		if err := rlp.DecodeBytes(word, &h); err != nil {
 			return err
 		}
-		return idx.AddKey(h.Hash().Bytes(), offset)
+
+		if err := idx.AddKey(h.Hash().Bytes(), offset); err != nil {
+			return err
+		}
+		// hashBuf := make([]byte, 32)
+		// common.HashTo(word, hashBuf)
+		//if err := idx.AddKey(types.RawRlpHash(word).Bytes(), offset); err != nil {
+		//	return err
+		//}
+
+		select {
+		default:
+		case <-logEvery.C:
+			log.Info("[Snapshots] HeadersHashIdx", "block num", h.Number.Uint64())
+		}
+		return nil
 	}); err != nil {
 		return fmt.Errorf("HeadersHashIdx: %w", err)
 	}
@@ -517,10 +749,21 @@ func HeadersHashIdx(segmentFileName string, firstBlockNumInSegment uint64) error
 }
 
 func BodiesIdx(segmentFileName string, firstBlockNumInSegment uint64) error {
+	logEvery := time.NewTicker(5 * time.Second)
+	defer logEvery.Stop()
 	num := make([]byte, 8)
 	if err := Idx(segmentFileName, firstBlockNumInSegment, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error {
 		n := binary.PutUvarint(num, i)
-		return idx.AddKey(num[:n], offset)
+		if err := idx.AddKey(num[:n], offset); err != nil {
+			return err
+		}
+
+		select {
+		default:
+		case <-logEvery.C:
+			log.Info("[Snapshots] BodiesIdx", "millions", i/1_000_000)
+		}
+		return nil
 	}); err != nil {
 		return fmt.Errorf("BodiesIdx: %w", err)
 	}
@@ -585,6 +828,27 @@ RETRY:
 	return nil
 }
 
+func ForEachHeader(s *AllSnapshots, walker func(header *types.Header) error) error {
+	for _, sn := range s.blocks {
+		d := sn.Headers.Segment
+		g := d.MakeGetter()
+		word := make([]byte, 0, 4096)
+		header := new(types.Header)
+		r := bytes.NewReader(nil)
+		for g.HasNext() {
+			word, _ = g.Next(word[:0])
+			r.Reset(word)
+			if err := rlp.Decode(r, header); err != nil {
+				return err
+			}
+			if err := walker(header); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
 type SimpleFile struct {
 	name  string
 	f     *os.File
@@ -645,3 +909,22 @@ func ReadSimpleFile(fileName string, walker func(v []byte) error) error {
 	}
 	return nil
 }
+
+type SnapshotInfo struct {
+	MinimumAvailability uint64
+}
+
+func FileCheckSum(file string) (common.Hash, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	defer f.Close()
+	h := sha256.New()
+	if _, err := io.Copy(h, f); err != nil {
+		return common.Hash{}, err
+	}
+
+	hash := h.Sum(nil)
+	return common.BytesToHash(hash), nil
+}
diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go
index 42b19c35f3..84f8e4632b 100644
--- a/turbo/snapshotsync/block_snapshots_test.go
+++ b/turbo/snapshotsync/block_snapshots_test.go
@@ -6,11 +6,13 @@ import (
 
 	"github.com/ledgerwatch/erigon-lib/compress"
 	"github.com/ledgerwatch/erigon-lib/recsplit"
+	"github.com/ledgerwatch/erigon/params"
 	"github.com/stretchr/testify/require"
 )
 
 func TestOpenAllSnapshot(t *testing.T) {
 	dir, require := t.TempDir(), require.New(t)
+	cfg := params.KnownSnapshots(params.MainnetChainName)
 	createFile := func(from, to uint64, name SnapshotType) {
 		c, err := compress.NewCompressor("test", path.Join(dir, SegmentFileName(from, to, name)), dir, 100)
 		require.NoError(err)
@@ -32,26 +34,31 @@ func TestOpenAllSnapshot(t *testing.T) {
 		err = idx.Build()
 		require.NoError(err)
 	}
-	s, err := OpenAll(dir)
+	s := NewAllSnapshots(dir, cfg)
+	err := s.ReopenSegments()
 	require.NoError(err)
 	require.Equal(0, len(s.blocks))
 	s.Close()
 
 	createFile(500_000, 1_000_000, Bodies)
-	s = MustOpenAll(dir)
+	s = NewAllSnapshots(dir, cfg)
 	require.Equal(0, len(s.blocks)) //because, no headers and transactions snapshot files are created
 	s.Close()
 
 	createFile(500_000, 1_000_000, Headers)
 	createFile(500_000, 1_000_000, Transactions)
-	s = MustOpenAll(dir)
+	s = NewAllSnapshots(dir, cfg)
+	err = s.ReopenSegments()
+	require.NoError(err)
 	require.Equal(0, len(s.blocks)) //because, no gaps are allowed (expect snapshots from block 0)
 	s.Close()
 
 	createFile(0, 500_000, Bodies)
 	createFile(0, 500_000, Headers)
 	createFile(0, 500_000, Transactions)
-	s = MustOpenAll(dir)
+	s = NewAllSnapshots(dir, cfg)
+	err = s.ReopenSegments()
+	require.NoError(err)
 	defer s.Close()
 	require.Equal(2, len(s.blocks))
 
@@ -69,24 +76,24 @@ func TestOpenAllSnapshot(t *testing.T) {
 
 func TestParseCompressedFileName(t *testing.T) {
 	require := require.New(t)
-	_, _, _, err := ParseCompressedFileName("a")
+	_, _, _, err := ParseFileName("a", ".seg")
 	require.Error(err)
-	_, _, _, err = ParseCompressedFileName("1-a")
+	_, _, _, err = ParseFileName("1-a", ".seg")
 	require.Error(err)
-	_, _, _, err = ParseCompressedFileName("1-2-a")
+	_, _, _, err = ParseFileName("1-2-a", ".seg")
 	require.Error(err)
-	_, _, _, err = ParseCompressedFileName("1-2-bodies.info")
+	_, _, _, err = ParseFileName("1-2-bodies.info", ".seg")
 	require.Error(err)
-	_, _, _, err = ParseCompressedFileName("1-2-bodies.idx")
+	_, _, _, err = ParseFileName("1-2-bodies.idx", ".seg")
 	require.Error(err)
-	_, _, _, err = ParseCompressedFileName("1-2-bodies.seg")
+	_, _, _, err = ParseFileName("1-2-bodies.seg", ".seg")
 	require.Error(err)
-	_, _, _, err = ParseCompressedFileName("v2-1-2-bodies.seg")
+	_, _, _, err = ParseFileName("v2-1-2-bodies.seg", ".seg")
 	require.Error(err)
-	_, _, _, err = ParseCompressedFileName("v0-1-2-bodies.seg")
+	_, _, _, err = ParseFileName("v0-1-2-bodies.seg", ".seg")
 	require.Error(err)
 
-	from, to, tt, err := ParseCompressedFileName("v1-1-2-bodies.seg")
+	from, to, tt, err := ParseFileName("v1-1-2-bodies.seg", ".seg")
 	require.NoError(err)
 	require.Equal(tt, Bodies)
 	require.Equal(1_000, int(from))
diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go
index 44082725a3..f234b9aaa8 100644
--- a/turbo/stages/bodydownload/body_algos.go
+++ b/turbo/stages/bodydownload/body_algos.go
@@ -7,9 +7,10 @@ import (
 	"math/big"
 
 	"github.com/holiman/uint256"
+	"github.com/ledgerwatch/erigon-lib/common/dbg"
 	"github.com/ledgerwatch/erigon-lib/kv"
+	"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
 	"github.com/ledgerwatch/erigon/common"
-	"github.com/ledgerwatch/erigon/common/debug"
 	"github.com/ledgerwatch/erigon/consensus"
 	"github.com/ledgerwatch/erigon/core/rawdb"
 	"github.com/ledgerwatch/erigon/core/types"
@@ -77,7 +78,7 @@ func (bd *BodyDownload) UpdateFromDb(db kv.Tx) (headHeight uint64, headHash comm
 }
 
 // RequestMoreBodies - returns nil if nothing to request
-func (bd *BodyDownload) RequestMoreBodies(db kv.Tx, blockNum uint64, currentTime uint64, blockPropagator adapter.BlockPropagator) (*BodyRequest, uint64, error) {
+func (bd *BodyDownload) RequestMoreBodies(db kv.Tx, blockReader interfaces.FullBlockReader, blockNum uint64, currentTime uint64, blockPropagator adapter.BlockPropagator) (*BodyRequest, uint64, error) {
 	if blockNum < bd.requestedLow {
 		blockNum = bd.requestedLow
 	}
@@ -110,17 +111,21 @@ func (bd *BodyDownload) RequestMoreBodies(db kv.Tx, blockNum uint64, currentTime
 			// If this block was requested before, we don't need to fetch the headers from the database the second time
 			header = bd.deliveriesH[blockNum-bd.requestedLow]
 			if header == nil {
-				return nil, 0, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, debug.Callers(7))
+				return nil, 0, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack())
 			}
 			hash = header.Hash()
 		} else {
 			hash, err = rawdb.ReadCanonicalHash(db, blockNum)
 			if err != nil {
-				return nil, 0, fmt.Errorf("could not find canonical header: %w, blockNum=%d, trace=%s", err, blockNum, debug.Callers(7))
+				return nil, 0, fmt.Errorf("could not find canonical header: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack())
+			}
+
+			header, err = blockReader.Header(context.Background(), db, hash, blockNum)
+			if err != nil {
+				return nil, 0, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack())
 			}
-			header = rawdb.ReadHeader(db, hash, blockNum)
 			if header == nil {
-				return nil, 0, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, debug.Callers(7))
+				return nil, 0, fmt.Errorf("header not found: blockNum=%d, trace=%s", blockNum, dbg.Stack())
 			}
 
 			if block := bd.prefetchedBlocks.Pop(hash); block != nil {
diff --git a/turbo/stages/headerdownload/header_algo_test.go b/turbo/stages/headerdownload/header_algo_test.go
index dfd4a469bc..167e21cf9b 100644
--- a/turbo/stages/headerdownload/header_algo_test.go
+++ b/turbo/stages/headerdownload/header_algo_test.go
@@ -11,6 +11,7 @@ import (
 	"github.com/ledgerwatch/erigon/core/types"
 	"github.com/ledgerwatch/erigon/crypto"
 	"github.com/ledgerwatch/erigon/params"
+	"github.com/ledgerwatch/erigon/turbo/snapshotsync"
 )
 
 func TestInserter1(t *testing.T) {
@@ -48,10 +49,10 @@ func TestInserter1(t *testing.T) {
 		ParentHash: h1Hash,
 	}
 	h2Hash := h2.Hash()
-	if _, err = hi.FeedHeader(tx, &h1, h1Hash, 1, nil); err != nil {
+	if _, err = hi.FeedHeader(tx, snapshotsync.NewBlockReader(), &h1, h1Hash, 1, nil); err != nil {
 		t.Errorf("feed empty header 1: %v", err)
 	}
-	if _, err = hi.FeedHeader(tx, &h2, h2Hash, 2, nil); err != nil {
+	if _, err = hi.FeedHeader(tx, snapshotsync.NewBlockReader(), &h2, h2Hash, 2, nil); err != nil {
 		t.Errorf("feed empty header 2: %v", err)
 	}
 }
diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go
index e3461bad95..185d07bd0f 100644
--- a/turbo/stages/headerdownload/header_algos.go
+++ b/turbo/stages/headerdownload/header_algos.go
@@ -16,6 +16,7 @@ import (
 	"time"
 
 	"github.com/ledgerwatch/erigon-lib/kv"
+	"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
 	"github.com/ledgerwatch/erigon/common"
 	"github.com/ledgerwatch/erigon/common/dbutils"
 	"github.com/ledgerwatch/erigon/consensus"
@@ -777,14 +778,13 @@ func (hd *HeaderDownload) addHeaderAsLink(h ChainSegmentHeader, persisted bool)
 	return link
 }
 
-func (hi *HeaderInserter) FeedHeaderFunc(db kv.StatelessRwTx) func(header *types.Header, hash common.Hash, blockHeight uint64, terminalTotalDifficulty *big.Int) (bool, error) {
+func (hi *HeaderInserter) FeedHeaderFunc(db kv.StatelessRwTx, headerReader interfaces.HeaderReader) func(header *types.Header, hash common.Hash, blockHeight uint64, terminalTotalDifficulty *big.Int) (bool, error) {
 	return func(header *types.Header, hash common.Hash, blockHeight uint64, terminalTotalDifficulty *big.Int) (bool, error) {
-		return hi.FeedHeader(db, header, hash, blockHeight, terminalTotalDifficulty)
+		return hi.FeedHeader(db, headerReader, header, hash, blockHeight, terminalTotalDifficulty)
 	}
 }
 
-func (hi *HeaderInserter) FeedHeader(db kv.StatelessRwTx, header *types.Header, hash common.Hash, blockHeight uint64, terminalTotalDifficulty *big.Int) (isTrans bool, err error) {
-
+func (hi *HeaderInserter) FeedHeader(db kv.StatelessRwTx, headerReader interfaces.HeaderReader, header *types.Header, hash common.Hash, blockHeight uint64, terminalTotalDifficulty *big.Int) (isTrans bool, err error) {
 	if hash == hi.prevHash {
 		// Skip duplicates
 		return false, nil
@@ -794,7 +794,10 @@ func (hi *HeaderInserter) FeedHeader(db kv.StatelessRwTx, header *types.Header,
 		return false, nil
 	}
 	// Load parent header
-	parent := rawdb.ReadHeader(db, header.ParentHash, blockHeight-1)
+	parent, err := headerReader.Header(context.Background(), db, header.ParentHash, blockHeight-1)
+	if err != nil {
+		return false, err
+	}
 	if parent == nil {
 		// Fail on headers without parent
 		return false, fmt.Errorf("could not find parent with hash %x and height %d for header %x %d", header.ParentHash, blockHeight-1, hash, blockHeight)
@@ -1034,8 +1037,7 @@ func (hd *HeaderDownload) Fetching() bool {
 	return hd.fetching
 }
 
-func (hd *HeaderDownload) AddMinedBlock(block *types.Block) error {
-	header := block.Header()
+func (hd *HeaderDownload) AddMinedHeader(header *types.Header) error {
 	buf := bytes.NewBuffer(nil)
 	if err := header.EncodeRLP(buf); err != nil {
 		return err
@@ -1053,6 +1055,48 @@ func (hd *HeaderDownload) AddMinedBlock(block *types.Block) error {
 	return nil
 }
 
+func (hd *HeaderDownload) AddHeaderFromSnapshot(n uint64, r interfaces.FullBlockReader) error {
+	hd.lock.Lock()
+	defer hd.lock.Unlock()
+	addPreVerifiedHashes := len(hd.preverifiedHashes) == 0
+	if addPreVerifiedHashes && hd.preverifiedHashes == nil {
+		hd.preverifiedHashes = map[common.Hash]struct{}{}
+	}
+
+	for i := n; i > 0 && hd.persistedLinkQueue.Len() < hd.persistedLinkLimit; i-- {
+		header, err := r.HeaderByNumber(context.Background(), nil, i)
+		if err != nil {
+			return err
+		}
+		if header == nil {
+			continue
+		}
+		v, err := rlp.EncodeToBytes(header)
+		if err != nil {
+			return err
+		}
+		h := ChainSegmentHeader{
+			HeaderRaw: v,
+			Header:    header,
+			Hash:      types.RawRlpHash(v),
+			Number:    header.Number.Uint64(),
+		}
+		link := hd.addHeaderAsLink(h, true /* persisted */)
+		link.preverified = true
+		if addPreVerifiedHashes {
+			hd.preverifiedHashes[h.Hash] = struct{}{}
+		}
+	}
+	if hd.highestInDb < n {
+		hd.highestInDb = n
+	}
+	if hd.preverifiedHeight < n {
+		hd.preverifiedHeight = n
+	}
+
+	return nil
+}
+
 func DecodeTips(encodings []string) (map[common.Hash]HeaderRecord, error) {
 	hardTips := make(map[common.Hash]HeaderRecord, len(encodings))
 
diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go
index dc768eceba..8eeeb6eff7 100644
--- a/turbo/stages/mock_sentry.go
+++ b/turbo/stages/mock_sentry.go
@@ -270,6 +270,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
 	}
 
 	blockReader := snapshotsync.NewBlockReader()
+	var allSnapshots *snapshotsync.AllSnapshots
 	mock.Sync = stagedsync.New(
 		stagedsync.DefaultStages(mock.Ctx, prune, stagedsync.StageHeadersCfg(
 			mock.DB,
@@ -282,7 +283,9 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
 			false,
 			nil,
 			nil,
-		), stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir), stagedsync.StageBodiesCfg(
+			allSnapshots,
+			blockReader,
+		), stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir, mock.ChainConfig), stagedsync.StageBodiesCfg(
 			mock.DB,
 			mock.downloader.Bd,
 			sendBodyRequest,
@@ -291,8 +294,10 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
 			cfg.BodyDownloadTimeoutSeconds,
 			*mock.ChainConfig,
 			cfg.BatchSize,
+			allSnapshots,
+			blockReader,
 		), stagedsync.StageDifficultyCfg(mock.DB, mock.tmpdir, nil, blockReader),
-			stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, mock.tmpdir, prune),
+			stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, mock.tmpdir, prune, allSnapshots),
 			stagedsync.StageExecuteBlocksCfg(
 				mock.DB,
 				prune,
@@ -309,7 +314,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
 				mock.DB,
 				cfg.BatchSize,
 				mock.ChainConfig,
-			), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), stagedsync.StageTrieCfg(mock.DB, true, true, mock.tmpdir, blockReader), stagedsync.StageHistoryCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), stagedsync.StageTxLookupCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, mock.Log), true),
+			), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), stagedsync.StageTrieCfg(mock.DB, true, true, mock.tmpdir, blockReader), stagedsync.StageHistoryCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), stagedsync.StageTxLookupCfg(mock.DB, prune, mock.tmpdir, allSnapshots), stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, mock.Log), true),
 		stagedsync.DefaultUnwindOrder,
 		stagedsync.DefaultPruneOrder,
 	)
diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go
index ae1c72c3b8..81309b0139 100644
--- a/turbo/stages/stageloop.go
+++ b/turbo/stages/stageloop.go
@@ -23,6 +23,7 @@ import (
 	"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
 	"github.com/ledgerwatch/erigon/ethdb/privateapi"
 	"github.com/ledgerwatch/erigon/p2p"
+	"github.com/ledgerwatch/erigon/params"
 	"github.com/ledgerwatch/erigon/turbo/shards"
 	"github.com/ledgerwatch/erigon/turbo/snapshotsync"
 	"github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
@@ -231,11 +232,9 @@ func NewStagedSync(
 	waitingForPOSHeaders *bool,
 ) (*stagedsync.Sync, error) {
 	var blockReader interfaces.FullBlockReader
+	var allSnapshots *snapshotsync.AllSnapshots
 	if cfg.Snapshot.Enabled {
-		allSnapshots, err := snapshotsync.OpenAll(cfg.Snapshot.Dir)
-		if err != nil {
-			return nil, err
-		}
+		allSnapshots = snapshotsync.NewAllSnapshots(cfg.Snapshot.Dir, params.KnownSnapshots(controlServer.ChainConfig.ChainName))
 		blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
 	} else {
 		blockReader = snapshotsync.NewBlockReader()
@@ -253,7 +252,9 @@ func NewStagedSync(
 			p2pCfg.NoDiscovery,
 			reverseDownloadCh,
 			waitingForPOSHeaders,
-		), stagedsync.StageBlockHashesCfg(db, tmpdir), stagedsync.StageBodiesCfg(
+			allSnapshots,
+			blockReader,
+		), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), stagedsync.StageBodiesCfg(
 			db,
 			controlServer.Bd,
 			controlServer.SendBodyRequest,
@@ -262,7 +263,9 @@ func NewStagedSync(
 			cfg.BodyDownloadTimeoutSeconds,
 			*controlServer.ChainConfig,
 			cfg.BatchSize,
-		), stagedsync.StageDifficultyCfg(db, tmpdir, terminalTotalDifficulty, blockReader), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, tmpdir, cfg.Prune), stagedsync.StageExecuteBlocksCfg(
+			allSnapshots,
+			blockReader,
+		), stagedsync.StageDifficultyCfg(db, tmpdir, terminalTotalDifficulty, blockReader), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, tmpdir, cfg.Prune, allSnapshots), stagedsync.StageExecuteBlocksCfg(
 			db,
 			cfg.Prune,
 			cfg.BatchSize,
@@ -283,7 +286,7 @@ func NewStagedSync(
 			stagedsync.StageHistoryCfg(db, cfg.Prune, tmpdir),
 			stagedsync.StageLogIndexCfg(db, cfg.Prune, tmpdir),
 			stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, tmpdir),
-			stagedsync.StageTxLookupCfg(db, cfg.Prune, tmpdir),
+			stagedsync.StageTxLookupCfg(db, cfg.Prune, tmpdir, allSnapshots),
 			stagedsync.StageFinishCfg(db, tmpdir, logger), false),
 		stagedsync.DefaultUnwindOrder,
 		stagedsync.DefaultPruneOrder,
-- 
GitLab