diff --git a/.golangci.yml b/.golangci.yml
index 166cda180bb115870ddc8f62411602df31bc9e30..7d49626ca991ee5f238163ade52d90c6d2cfd5ef 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -4,6 +4,7 @@ run:
 linters:
   disable-all: true
   enable:
+    - gofmt
     - deadcode
     - errcheck
     - gosimple
diff --git a/Makefile b/Makefile
index e7ecb3e492333b59c071e57f4cd468117deb4e4c..eca90db8cccbea563ac43f4927e3ba434308d70e 100644
--- a/Makefile
+++ b/Makefile
@@ -37,7 +37,7 @@ docker-compose:
 dbg: mdbx-dbg
 	$(GO_DBG_BUILD) -o $(GOBIN)/ ./cmd/...
 
-geth:
+geth: mdbx
 	$(GOBUILD) -o $(GOBIN)/tg ./cmd/tg
 	@echo "Done building."
 	@echo "Run \"$(GOBIN)/tg\" to launch turbo-geth."
@@ -94,6 +94,21 @@ evm:
 	@echo "Done building."
 	@echo "Run \"$(GOBIN)/evm\" to run EVM"
 
+seeder:
+	$(GOBUILD) -o $(GOBIN)/seeder ./cmd/snapshots/seeder
+	@echo "Done building."
+	@echo "Run \"$(GOBIN)/seeder\" to seed snapshots."
+
+sndownloader:
+	$(GOBUILD) -o $(GOBIN)/sndownloader ./cmd/snapshots/downloader
+	@echo "Done building."
+	@echo "Run \"$(GOBIN)/sndownloader\" to seed snapshots."
+
+tracker:
+	$(GOBUILD) -o $(GOBIN)/tracker ./cmd/snapshots/tracker
+	@echo "Done building."
+	@echo "Run \"$(GOBIN)/tracker\" to run snapshots tracker."
+
 db-tools: mdbx
 	@echo "Building bb-tools"
 	go mod vendor; cd vendor/github.com/ledgerwatch/lmdb-go/dist; make clean mdb_stat mdb_copy mdb_dump mdb_drop mdb_load; cp mdb_stat $(GOBIN); cp mdb_copy $(GOBIN); cp mdb_dump $(GOBIN); cp mdb_drop $(GOBIN); cp mdb_load $(GOBIN); cd ../../../../..; rm -rf vendor
@@ -199,4 +214,3 @@ prometheus:
 
 escape:
 	cd $(path) && go test -gcflags "-m -m" -run none -bench=BenchmarkJumpdest* -benchmem -memprofile mem.out
-
diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go
index c0e94095ef4f122f6cb0093f627087ed0596773c..effb303778d5d1bba28b401db9783e23353739dd 100644
--- a/cmd/hack/hack.go
+++ b/cmd/hack/hack.go
@@ -27,7 +27,6 @@ import (
 	"github.com/wcharczuk/go-chart/util"
 
 	"github.com/ledgerwatch/lmdb-go/lmdb"
-
 	"github.com/ledgerwatch/turbo-geth/cmd/hack/db"
 	"github.com/ledgerwatch/turbo-geth/cmd/hack/flow"
 	"github.com/ledgerwatch/turbo-geth/cmd/hack/tool"
diff --git a/cmd/integration/commands/snapshot_check.go b/cmd/integration/commands/snapshot_check.go
index 792b0667129401b72ebe98c425b1c39a7f8b7f06..b5dab590d707bee2c0ac4b63ec78768973e5ebc9 100644
--- a/cmd/integration/commands/snapshot_check.go
+++ b/cmd/integration/commands/snapshot_check.go
@@ -80,11 +80,11 @@ var cmdSnapshotCheck = &cobra.Command{
 		}()
 		tmpDb := ethdb.NewLMDB().Path(path).MustOpen()
 
-		kv := ethdb.NewSnapshot2KV().
+		kv := ethdb.NewSnapshotKV().
 			DB(tmpDb).
 			SnapshotDB([]string{dbutils.HeadersBucket, dbutils.HeaderCanonicalBucket, dbutils.HeaderTDBucket, dbutils.BlockBodyPrefix, dbutils.Senders, dbutils.HeadBlockKey, dbutils.HeaderNumberBucket}, mainDB.RwKV()).
 			SnapshotDB([]string{dbutils.PlainStateBucket, dbutils.CodeBucket, dbutils.PlainContractCodeBucket}, stateSnapshot).
-			MustOpen()
+			Open()
 
 		db := ethdb.NewObjectDatabase(kv)
 		defer db.Close()
diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go
index 200b196ee222ed26a34531273427d4cdfee85990..1c8bbd9f46948d6b1dea67ee50c1469d6d055d9f 100644
--- a/cmd/integration/commands/stages.go
+++ b/cmd/integration/commands/stages.go
@@ -856,6 +856,7 @@ func newSync(quitCh <-chan struct{}, db ethdb.Database, tx ethdb.Database, minin
 
 func SetSnapshotKV(db ethdb.Database, snapshotDir string, mode snapshotsync.SnapshotMode) error {
 	if len(snapshotDir) > 0 {
+		//todo change to new format
 		snapshotKV := db.(ethdb.HasRwKV).RwKV()
 		var err error
 		snapshotKV, err = snapshotsync.WrapBySnapshotsFromDir(snapshotKV, snapshotDir, mode)
diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go
index 8cf848e96b05a954dc0b7ce019f81216cb23ce83..457f2907f99277484a168ac457dbcb7292ef9297 100644
--- a/cmd/rpcdaemon/commands/trace_adhoc.go
+++ b/cmd/rpcdaemon/commands/trace_adhoc.go
@@ -730,7 +730,6 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx ethdb.Tx, callPara
 			return nil, fmt.Errorf("vmTrace not implemented yet")
 		}
 		results = append(results, traceResult)
-		txIndex++ //nolint
 	}
 	return results, nil
 }
diff --git a/cmd/rpcdaemon/commands/trace_types.go b/cmd/rpcdaemon/commands/trace_types.go
index af76bb316aa0588a72b85190da8040c718ef7254..c1c54a52613f8271a226a80390168759a07f0df8 100644
--- a/cmd/rpcdaemon/commands/trace_types.go
+++ b/cmd/rpcdaemon/commands/trace_types.go
@@ -154,8 +154,7 @@ func (t ParityTrace) String() string {
 
 // Takes a hierarchical Geth trace with fields of different meaning stored in the same named fields depending on 'type'. Parity traces
 // are flattened depth first and each field is put in its proper place
-//nolint
-func (api *TraceAPIImpl) convertToParityTrace(gethTrace GethTrace, blockHash common.Hash, blockNumber uint64, tx types.Transaction, txIndex uint64, depth []int) ParityTraces {
+func (api *TraceAPIImpl) convertToParityTrace(gethTrace GethTrace, blockHash common.Hash, blockNumber uint64, tx types.Transaction, txIndex uint64, depth []int) ParityTraces { //nolint: unused
 	var traces ParityTraces // nolint prealloc
 	return traces
 }
diff --git a/cmd/rpcdaemon/filters/filters.go b/cmd/rpcdaemon/filters/filters.go
index 642c68b7211f2df479ae0ebedeb97a84281c1e64..530f8af46582d08416ec55a724dc3dfc5bc06f29 100644
--- a/cmd/rpcdaemon/filters/filters.go
+++ b/cmd/rpcdaemon/filters/filters.go
@@ -62,7 +62,7 @@ func New(ctx context.Context, ethBackend core.ApiBackend, txPool txpool.TxpoolCl
 			return
 		}
 		if err := ff.subscribeToPendingTransactions(ctx, txPool); err != nil {
-			log.Warn("rpc filters: error subscribing to events", "err", err)
+			log.Warn("rpc filters: error subscribing to pending transactions", "err", err)
 			time.Sleep(time.Second)
 		}
 	}()
diff --git a/cmd/snapshots/debug/debug_test.go b/cmd/snapshots/debug/debug_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f85dd73b174aa03c915231e703a7008ddf35c8e0
--- /dev/null
+++ b/cmd/snapshots/debug/debug_test.go
@@ -0,0 +1,368 @@
+package debug
+
+import (
+	"context"
+	"encoding/binary"
+	"fmt"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/holiman/uint256"
+	"github.com/ledgerwatch/turbo-geth/common"
+	"github.com/ledgerwatch/turbo-geth/common/dbutils"
+	"github.com/ledgerwatch/turbo-geth/consensus/ethash"
+	"github.com/ledgerwatch/turbo-geth/core"
+	"github.com/ledgerwatch/turbo-geth/core/rawdb"
+	"github.com/ledgerwatch/turbo-geth/core/state"
+	"github.com/ledgerwatch/turbo-geth/core/types"
+	"github.com/ledgerwatch/turbo-geth/core/types/accounts"
+	"github.com/ledgerwatch/turbo-geth/core/vm"
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/rlp"
+)
+
+const (
+	AccountDiff  = "accdiff"
+	StorageDiff  = "stdiff"
+	ContractDiff = "contractdiff"
+	Deleted      = "it is deleted"
+)
+
+func WithBlock(block uint64, key []byte) []byte {
+	b := make([]byte, 8)
+	binary.BigEndian.PutUint64(b, block)
+	return append(b, key...)
+}
+func TestMatreshkaStream(t *testing.T) {
+	t.Skip()
+	chaindataDir := "/media/b00ris/nvme/fresh_sync/tg/chaindata"
+	tmpDbDir := "/home/b00ris/event_stream"
+
+	chaindata, err := ethdb.Open(chaindataDir, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+	//tmpDb:=ethdb.NewMemDatabase()
+	os.RemoveAll(tmpDbDir)
+
+	kv, err := ethdb.NewMDBX().Path(tmpDbDir).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
+		defaultBuckets[AccountDiff] = dbutils.BucketConfigItem{}
+		defaultBuckets[StorageDiff] = dbutils.BucketConfigItem{}
+		defaultBuckets[ContractDiff] = dbutils.BucketConfigItem{}
+		return defaultBuckets
+	}).Open()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	tmpDb := ethdb.NewObjectDatabase(kv)
+	chainConfig, _, genesisErr := core.SetupGenesisBlock(tmpDb, core.DefaultGenesisBlock(), true, false /* overwrite */)
+	if genesisErr != nil {
+		t.Fatal(err)
+	}
+	tmpDb.ClearBuckets(dbutils.HeadHeaderKey)
+
+	snkv := ethdb.NewSnapshotKV().DB(tmpDb.RwKV()).SnapshotDB([]string{dbutils.HeadersBucket, dbutils.HeaderCanonicalBucket, dbutils.HeaderTDBucket, dbutils.HeaderNumberBucket, dbutils.BlockBodyPrefix, dbutils.HeadHeaderKey, dbutils.Senders}, chaindata.RwKV()).Open()
+	defer snkv.Close()
+	db := ethdb.NewObjectDatabase(snkv)
+
+	tx, err := snkv.BeginRw(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer tx.Rollback()
+	//
+	//tx, err := db.Begin(context.Background(), ethdb.RW)
+	//if err != nil {
+	//	t.Fatal(err)
+	//}
+	psCursor, err := tx.Cursor(dbutils.PlainStateBucket)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	i := 5
+	err = ethdb.Walk(psCursor, []byte{}, 0, func(k, v []byte) (bool, error) {
+		fmt.Println(common.Bytes2Hex(k))
+		i--
+		if i == 0 {
+			return false, nil
+		}
+		return true, nil
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	currentBlock := rawdb.ReadCurrentHeader(tx)
+	fmt.Println("currentBlock", currentBlock.Number.Uint64())
+	blockNum := uint64(1)
+	limit := currentBlock.Number.Uint64()
+	blockchain, err := core.NewBlockChain(db, chainConfig, ethash.NewFaker(), vm.Config{
+		NoReceipts: true,
+	}, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) }
+
+	stateReaderWriter := NewDebugReaderWriter(state.NewPlainStateReader(tx), state.NewPlainStateWriter(db, tx, blockNum))
+	tt := time.Now()
+	ttt := time.Now()
+	for currentBlock := blockNum; currentBlock < blockNum+limit; currentBlock++ {
+		stateReaderWriter.UpdateWriter(state.NewPlainStateWriter(db, tx, currentBlock))
+		block, err := rawdb.ReadBlockByNumber(tx, currentBlock)
+		if err != nil {
+			t.Fatal(err, currentBlock)
+		}
+
+		_, err = core.ExecuteBlockEphemerally(blockchain.Config(), blockchain.GetVMConfig(), getHeader, ethash.NewFaker(), block, stateReaderWriter, stateReaderWriter)
+		if err != nil {
+			t.Fatal(err, currentBlock)
+		}
+		cs := stateReaderWriter.UpdatedAccouts()
+		accDiffLen := len(cs)
+		for i := range cs {
+			if len(cs[i].Value) == 0 {
+				cs[i].Value = []byte(Deleted)
+			}
+			err = tx.Put(AccountDiff, WithBlock(currentBlock, cs[i].Key), cs[i].Value)
+			if err != nil {
+				t.Fatal(err, cs[i].Key, currentBlock)
+			}
+		}
+		cs = stateReaderWriter.UpdatedStorage()
+		stDiffLen := len(cs)
+		for i := range cs {
+			if len(cs[i].Value) == 0 {
+				cs[i].Value = []byte(Deleted)
+			}
+			err = tx.Put(StorageDiff, WithBlock(currentBlock, cs[i].Key), cs[i].Value)
+			if err != nil {
+				t.Fatal(err, cs[i].Key, currentBlock)
+			}
+		}
+		cs = stateReaderWriter.UpdatedCodes()
+		codesDiffLen := len(cs)
+		for i := range cs {
+			if len(cs[i].Value) == 0 {
+				cs[i].Value = []byte(Deleted)
+			}
+			err = tx.Put(ContractDiff, WithBlock(currentBlock, cs[i].Key), cs[i].Value)
+			if err != nil {
+				t.Fatal(err, cs[i].Key, currentBlock)
+			}
+		}
+
+		stateReaderWriter.Reset()
+		if currentBlock%10000 == 0 {
+			err = tx.Commit()
+			if err != nil {
+				t.Fatal(err, currentBlock)
+			}
+			tx, err = snkv.BeginRw(context.Background())
+			if err != nil {
+				t.Fatal(err, currentBlock)
+			}
+
+			dr := time.Since(ttt)
+			fmt.Println(currentBlock, "finished", "acc-", accDiffLen, "st-", stDiffLen, "codes - ", codesDiffLen, "all -", time.Since(tt), "chunk - ", dr, "blocks/s", 10000/dr.Seconds())
+			ttt = time.Now()
+		}
+	}
+	err = tx.Commit()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	fmt.Println("End")
+	//spew.Dump("readAcc",len(stateReaderWriter.readAcc))
+	//spew.Dump("readStr",len(stateReaderWriter.readStorage))
+	//spew.Dump("createdContracts", len(stateReaderWriter.createdContracts))
+	//spew.Dump("deleted",len(stateReaderWriter.deletedAcc))
+
+}
+
+var _ state.StateReader = &DebugReaderWriter{}
+var _ state.WriterWithChangeSets = &DebugReaderWriter{}
+
+func NewDebugReaderWriter(r state.StateReader, w state.WriterWithChangeSets) *DebugReaderWriter {
+	return &DebugReaderWriter{
+		r: r,
+		w: w,
+		//readAcc: make(map[common.Address]struct{}),
+		//readStorage: make(map[string]struct{}),
+		//readCodes: make(map[common.Hash]struct{}),
+		//readIncarnations: make(map[common.Address]struct{}),
+
+		updatedAcc:     make(map[common.Address][]byte),
+		updatedStorage: make(map[string][]byte),
+		updatedCodes:   make(map[common.Hash][]byte),
+		//deletedAcc: make(map[common.Address]struct{}),
+		//createdContracts: make(map[common.Address]struct{}),
+
+	}
+}
+
+type DebugReaderWriter struct {
+	r state.StateReader
+	w state.WriterWithChangeSets
+	//readAcc map[common.Address]struct{}
+	//readStorage map[string]struct{}
+	//readCodes map[common.Hash] struct{}
+	//readIncarnations map[common.Address] struct{}
+	updatedAcc     map[common.Address][]byte
+	updatedStorage map[string][]byte
+	updatedCodes   map[common.Hash][]byte
+	//deletedAcc map[common.Address]struct{}
+	//createdContracts map[common.Address]struct{}
+}
+
+func (d *DebugReaderWriter) Reset() {
+	d.updatedAcc = map[common.Address][]byte{}
+	d.updatedStorage = map[string][]byte{}
+	d.updatedCodes = map[common.Hash][]byte{}
+}
+func (d *DebugReaderWriter) UpdateWriter(w state.WriterWithChangeSets) {
+	d.w = w
+}
+
+func (d *DebugReaderWriter) ReadAccountData(address common.Address) (*accounts.Account, error) {
+	//d.readAcc[address] = struct{}{}
+	return d.r.ReadAccountData(address)
+}
+
+func (d *DebugReaderWriter) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) {
+	//d.readStorage[string(dbutils.PlainGenerateCompositeStorageKey(address.Bytes(),incarnation, key.Bytes()))] = struct{}{}
+	return d.r.ReadAccountStorage(address, incarnation, key)
+}
+
+func (d *DebugReaderWriter) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) {
+	//d.readCodes[codeHash] = struct{}{}
+	return d.r.ReadAccountCode(address, incarnation, codeHash)
+}
+
+func (d *DebugReaderWriter) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) {
+	return d.r.ReadAccountCodeSize(address, incarnation, codeHash)
+}
+
+func (d *DebugReaderWriter) ReadAccountIncarnation(address common.Address) (uint64, error) {
+	//d.readIncarnations[address] = struct{}{}
+	return d.r.ReadAccountIncarnation(address)
+}
+
+func (d *DebugReaderWriter) WriteChangeSets() error {
+	return d.w.WriteChangeSets()
+}
+
+func (d *DebugReaderWriter) WriteHistory() error {
+	return d.w.WriteHistory()
+}
+
+func (d *DebugReaderWriter) UpdateAccountData(ctx context.Context, address common.Address, original, account *accounts.Account) error {
+	b, err := rlp.EncodeToBytes(account)
+	if err != nil {
+		return err
+	}
+	d.updatedAcc[address] = b
+	return d.w.UpdateAccountData(ctx, address, original, account)
+}
+
+func (d *DebugReaderWriter) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error {
+	d.updatedCodes[codeHash] = code
+	return d.w.UpdateAccountCode(address, incarnation, codeHash, code)
+}
+
+func (d *DebugReaderWriter) DeleteAccount(ctx context.Context, address common.Address, original *accounts.Account) error {
+	d.updatedAcc[address] = nil
+	//d.deletedAcc[address]= struct{}{}
+	return d.w.DeleteAccount(ctx, address, original)
+}
+
+func (d *DebugReaderWriter) WriteAccountStorage(ctx context.Context, address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error {
+	d.updatedStorage[string(dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), incarnation, key.Bytes()))] = value.Bytes()
+	return d.w.WriteAccountStorage(ctx, address, incarnation, key, original, value)
+}
+
+func (d *DebugReaderWriter) CreateContract(address common.Address) error {
+	//d.createdContracts[address] = struct{}{}
+	return d.w.CreateContract(address)
+}
+
+type Change struct {
+	Key   []byte
+	Value []byte
+}
+
+func (d *DebugReaderWriter) UpdatedAccouts() []Change {
+	ch := make([]Change, 0, len(d.updatedAcc))
+	for k, v := range d.updatedAcc {
+		ch = append(ch, Change{
+			Key:   common.CopyBytes(k.Bytes()),
+			Value: common.CopyBytes(v),
+		})
+	}
+	return ch
+}
+func (d *DebugReaderWriter) UpdatedStorage() []Change {
+	ch := make([]Change, 0, len(d.updatedStorage))
+	for k, v := range d.updatedStorage {
+		ch = append(ch, Change{
+			Key:   common.CopyBytes([]byte(k)),
+			Value: common.CopyBytes(v),
+		})
+	}
+	return ch
+
+}
+func (d *DebugReaderWriter) UpdatedCodes() []Change {
+	ch := make([]Change, 0, len(d.updatedCodes))
+	for k, v := range d.updatedCodes {
+		ch = append(ch, Change{
+			Key:   common.CopyBytes(k.Bytes()),
+			Value: common.CopyBytes(v),
+		})
+	}
+	return ch
+}
+
+//func (d *DebugReaderWriter) AllAccounts() map[common.Address]struct{}  {
+//	accs:=make(map[common.Address]struct{})
+//	for i:=range d.readAcc {
+//		accs[i]=struct{}{}
+//	}
+//	for i:=range d.updatedAcc {
+//		accs[i]=struct{}{}
+//	}
+//	for i:=range d.readIncarnations {
+//		accs[i]=struct{}{}
+//	}
+//	for i:=range d.deletedAcc {
+//		accs[i]=struct{}{}
+//	}
+//	for i:=range d.createdContracts {
+//		accs[i]=struct{}{}
+//	}
+//	return accs
+//}
+//func (d *DebugReaderWriter) AllStorage() map[string]struct{}  {
+//	st:=make(map[string]struct{})
+//	for i:=range d.readStorage {
+//		st[i]=struct{}{}
+//	}
+//	for i:=range d.updatedStorage {
+//		st[i]=struct{}{}
+//	}
+//	return st
+//}
+//func (d *DebugReaderWriter) AllCodes() map[common.Hash]struct{}  {
+//	c:=make(map[common.Hash]struct{})
+//	for i:=range d.readCodes {
+//		c[i]=struct{}{}
+//	}
+//	for i:=range d.updatedCodes {
+//		c[i]=struct{}{}
+//	}
+//	return c
+//}
diff --git a/cmd/snapshots/downloader/commands/root.go b/cmd/snapshots/downloader/commands/root.go
index c1a74e6ff0d1aadb763982c3a5ce726e8c6f4520..fd6512b587b20ca27c18b085f572952da7efb516 100644
--- a/cmd/snapshots/downloader/commands/root.go
+++ b/cmd/snapshots/downloader/commands/root.go
@@ -17,7 +17,6 @@ import (
 	"github.com/ledgerwatch/turbo-geth/log"
 	"github.com/ledgerwatch/turbo-geth/params"
 	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync/bittorrent"
 	"github.com/spf13/cobra"
 	"github.com/urfave/cli"
 	"google.golang.org/grpc"
@@ -26,7 +25,7 @@ import (
 
 func init() {
 	flags := append(debug.Flags, utils.MetricFlags...)
-	flags = append(flags, PreDownloadMainnetFlag, Addr, Dir)
+	flags = append(flags, PreDownloadMainnetFlag, Addr, Dir, HttpApi)
 	utils.CobraFlags(rootCmd, flags)
 
 	rootCmd.PersistentFlags().Bool("seeding", true, "Seed snapshots")
@@ -47,6 +46,10 @@ var (
 		Name:  "predownload.mainnet",
 		Usage: "add all available mainnet snapshots for seeding",
 	}
+	HttpApi = cli.BoolFlag{
+		Name:  "http",
+		Usage: "Enable http",
+	}
 )
 
 type Config struct {
@@ -83,7 +86,7 @@ func rootContext() context.Context {
 var rootCmd = &cobra.Command{
 	Use:     "",
 	Short:   "run snapshot downloader",
-	Example: "go run ./cmd/snapshots/downloader/main.go --dir /tmp --addr 127.0.0.1:9191",
+	Example: "go run ./cmd/snapshots/downloader/main.go --dir /tmp --addr 127.0.0.1:9191 --predownload.mainnet",
 	PersistentPreRun: func(cmd *cobra.Command, args []string) {
 		if err := debug.SetupCobra(cmd); err != nil {
 			panic(err)
@@ -133,31 +136,55 @@ func runDownloader(cmd *cobra.Command, args []string) error {
 		grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)),
 	}
 	grpcServer := grpc.NewServer(opts...)
-	bittorrentServer, err := bittorrent.NewServer(cfg.Dir, cfg.Seeding)
+	bittorrentServer, err := snapshotsync.NewServer(cfg.Dir, cfg.Seeding)
 	if err != nil {
-		return err
+		return fmt.Errorf("new server: %w", err)
 	}
+	log.Info("Load")
 	err = bittorrentServer.Load()
 	if err != nil {
-		return err
+		return fmt.Errorf("load: %w", err)
 	}
 
 	mainNetPreDownload, err := cmd.Flags().GetBool(PreDownloadMainnetFlag.Name)
 	if err != nil {
-		return err
+		return fmt.Errorf("get bool: %w", err)
 	}
 	if mainNetPreDownload {
 		log.Info("Predownload mainnet snapshots")
 		go func() {
 			_, err := bittorrentServer.Download(context.Background(), &snapshotsync.DownloadSnapshotRequest{
 				NetworkId: params.MainnetChainConfig.ChainID.Uint64(),
-				Type:      bittorrent.GetAvailableSnapshotTypes(params.MainnetChainConfig.ChainID.Uint64()),
+				Type:      snapshotsync.GetAvailableSnapshotTypes(params.MainnetChainConfig.ChainID.Uint64()),
 			})
 			if err != nil {
 				log.Error("Predownload failed", "err", err, "networkID", params.MainnetChainConfig.ChainID.Uint64())
 			}
 		}()
 	}
+	go func() {
+		for {
+			select {
+			case <-cmd.Context().Done():
+				return
+			default:
+			}
+
+			snapshots, err := bittorrentServer.Snapshots(context.Background(), &snapshotsync.SnapshotsRequest{
+				NetworkId: params.MainnetChainConfig.ChainID.Uint64(),
+			})
+			if err != nil {
+				log.Error("get snapshots", "err", err)
+				time.Sleep(time.Minute)
+				continue
+			}
+			stats := bittorrentServer.Stats(context.Background())
+			for _, v := range snapshots.Info {
+				log.Info("Snapshot "+v.Type.String(), "%", v.Readiness, "peers", stats[v.Type.String()].ConnectedSeeders)
+			}
+			time.Sleep(time.Minute)
+		}
+	}()
 	snapshotsync.RegisterDownloaderServer(grpcServer, bittorrentServer)
 	go func() {
 		log.Info("Starting grpc")
diff --git a/cmd/snapshots/generator/commands/copy_from_state.go b/cmd/snapshots/generator/commands/copy_from_state.go
index 329c86362b3789ca535b15f8d9354e36f8d44196..8c51e337aa6d4c9f0877911d40966b22903c37bc 100644
--- a/cmd/snapshots/generator/commands/copy_from_state.go
+++ b/cmd/snapshots/generator/commands/copy_from_state.go
@@ -9,14 +9,12 @@ import (
 	"github.com/ledgerwatch/turbo-geth/common/dbutils"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/log"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 	"github.com/spf13/cobra"
 )
 
 func init() {
 	withDatadir(copyFromStateSnapshotCmd)
 	withSnapshotFile(copyFromStateSnapshotCmd)
-	withSnapshotData(copyFromStateSnapshotCmd)
 	withBlock(copyFromStateSnapshotCmd)
 	rootCmd.AddCommand(copyFromStateSnapshotCmd)
 
@@ -38,20 +36,6 @@ func CopyFromState(ctx context.Context, dbpath string, snapshotPath string, bloc
 		return err
 	}
 
-	kv := db.RwKV()
-	if snapshotDir != "" {
-		var mode snapshotsync.SnapshotMode
-		mode, err = snapshotsync.SnapshotModeFromString(snapshotMode)
-		if err != nil {
-			return err
-		}
-		kv, err = snapshotsync.WrapBySnapshotsFromDir(kv, snapshotDir, mode)
-		if err != nil {
-			return err
-		}
-	}
-	db.SetRwKV(kv)
-
 	err = os.RemoveAll(snapshotPath)
 	if err != nil {
 		return err
diff --git a/cmd/snapshots/generator/commands/generate_body_snapshot.go b/cmd/snapshots/generator/commands/generate_body_snapshot.go
index c1d02a34741a6d81f0cbd570c4bfcb791c218bb3..af6bddd1db176c7647f24b79a62bc0789d9e61a7 100644
--- a/cmd/snapshots/generator/commands/generate_body_snapshot.go
+++ b/cmd/snapshots/generator/commands/generate_body_snapshot.go
@@ -14,13 +14,11 @@ import (
 	"github.com/ledgerwatch/turbo-geth/core/rawdb"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/log"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 )
 
 func init() {
 	withDatadir(generateBodiesSnapshotCmd)
 	withSnapshotFile(generateBodiesSnapshotCmd)
-	withSnapshotData(generateBodiesSnapshotCmd)
 	withBlock(generateBodiesSnapshotCmd)
 	rootCmd.AddCommand(generateBodiesSnapshotCmd)
 
@@ -38,18 +36,6 @@ var generateBodiesSnapshotCmd = &cobra.Command{
 func BodySnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock uint64, snapshotDir string, snapshotMode string) error {
 	kv := ethdb.NewLMDB().Path(dbPath).MustOpen()
 	var err error
-	if snapshotDir != "" {
-		var mode snapshotsync.SnapshotMode
-		mode, err = snapshotsync.SnapshotModeFromString(snapshotMode)
-		if err != nil {
-			return err
-		}
-
-		kv, err = snapshotsync.WrapBySnapshotsFromDir(kv, snapshotDir, mode)
-		if err != nil {
-			return err
-		}
-	}
 
 	snKV := ethdb.NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
 		return dbutils.BucketsCfg{
diff --git a/cmd/snapshots/generator/commands/generate_header_snapshot.go b/cmd/snapshots/generator/commands/generate_header_snapshot.go
index 829020b3fd6cc0bc07439cbc3fe89a563d2aec52..2ed27718533538087793e8c2196c4dbab8c3bf31 100644
--- a/cmd/snapshots/generator/commands/generate_header_snapshot.go
+++ b/cmd/snapshots/generator/commands/generate_header_snapshot.go
@@ -15,13 +15,11 @@ import (
 	"github.com/ledgerwatch/turbo-geth/core/rawdb"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/log"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 )
 
 func init() {
 	withDatadir(generateHeadersSnapshotCmd)
 	withSnapshotFile(generateHeadersSnapshotCmd)
-	withSnapshotData(generateHeadersSnapshotCmd)
 	withBlock(generateHeadersSnapshotCmd)
 
 	rootCmd.AddCommand(generateHeadersSnapshotCmd)
@@ -46,18 +44,6 @@ func HeaderSnapshot(ctx context.Context, dbPath, snapshotPath string, toBlock ui
 	}
 	kv := ethdb.NewLMDB().Path(dbPath).MustOpen()
 
-	if snapshotDir != "" {
-		var mode snapshotsync.SnapshotMode
-		mode, err = snapshotsync.SnapshotModeFromString(snapshotMode)
-		if err != nil {
-			return err
-		}
-
-		kv, err = snapshotsync.WrapBySnapshotsFromDir(kv, snapshotDir, mode)
-		if err != nil {
-			return err
-		}
-	}
 	snKV := ethdb.NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
 		return dbutils.BucketsCfg{
 			dbutils.HeadersBucket: dbutils.BucketConfigItem{},
diff --git a/cmd/snapshots/generator/commands/generate_state_snapshot.go b/cmd/snapshots/generator/commands/generate_state_snapshot.go
index c59e9a28eed2419219935b4ffc6b9f40be3354a6..fe1f282fe256bc631555b20500e76c2133ac9567 100644
--- a/cmd/snapshots/generator/commands/generate_state_snapshot.go
+++ b/cmd/snapshots/generator/commands/generate_state_snapshot.go
@@ -12,7 +12,6 @@ import (
 	"github.com/ledgerwatch/turbo-geth/core/state"
 	"github.com/ledgerwatch/turbo-geth/core/types/accounts"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 	"github.com/ledgerwatch/turbo-geth/turbo/trie"
 	"github.com/spf13/cobra"
 )
@@ -20,7 +19,6 @@ import (
 func init() {
 	withDatadir(generateStateSnapshotCmd)
 	withSnapshotFile(generateStateSnapshotCmd)
-	withSnapshotData(generateStateSnapshotCmd)
 	withBlock(generateStateSnapshotCmd)
 	rootCmd.AddCommand(generateStateSnapshotCmd)
 
@@ -47,19 +45,6 @@ func GenerateStateSnapshot(ctx context.Context, dbPath, snapshotPath string, toB
 	}
 	kv := ethdb.NewLMDB().Path(dbPath).MustOpen()
 
-	if snapshotDir != "" {
-		var mode snapshotsync.SnapshotMode
-		mode, err = snapshotsync.SnapshotModeFromString(snapshotMode)
-		if err != nil {
-			return err
-		}
-
-		kv, err = snapshotsync.WrapBySnapshotsFromDir(kv, snapshotDir, mode)
-		if err != nil {
-			return err
-		}
-	}
-
 	snkv := ethdb.NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
 		return dbutils.BucketsCfg{
 			dbutils.PlainStateBucket:        dbutils.BucketConfigItem{},
diff --git a/cmd/snapshots/generator/commands/metainfo_hash.go b/cmd/snapshots/generator/commands/metainfo_hash.go
new file mode 100644
index 0000000000000000000000000000000000000000..26893847e69917b1a20fa16bba129755ff5add87
--- /dev/null
+++ b/cmd/snapshots/generator/commands/metainfo_hash.go
@@ -0,0 +1,46 @@
+package commands
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/ledgerwatch/turbo-geth/common"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
+	"github.com/spf13/cobra"
+)
+
+func init() {
+	rootCmd.AddCommand(snapshotMetainfoCmd)
+}
+
+func PrintMetaInfoHash(path string) error {
+	t := time.Now()
+	mi := metainfo.MetaInfo{}
+	info, err := snapshotsync.BuildInfoBytesForSnapshot(path, snapshotsync.MdbxFilename)
+	if err != nil {
+		return err
+	}
+	mi.InfoBytes, err = bencode.Marshal(info)
+	if err != nil {
+		return err
+	}
+
+	fmt.Println("infohash:", mi.HashInfoBytes().String())
+	fmt.Println("infobytes:", common.Bytes2Hex(mi.InfoBytes))
+	fmt.Println("It took", time.Since(t))
+	return nil
+}
+
+var snapshotMetainfoCmd = &cobra.Command{
+	Use:   "snapshotMetainfo",
+	Short: "Calculate snapshot metainfo",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		if len(args) == 0 {
+			return errors.New("empty path")
+		}
+		return PrintMetaInfoHash(args[0])
+	},
+}
diff --git a/cmd/snapshots/generator/commands/root.go b/cmd/snapshots/generator/commands/root.go
index a5d6d5c55ffd6e2ea8e1d6883ee755b737d68168..d769de93cc4688e41fa32b030ebabcffcd232162 100644
--- a/cmd/snapshots/generator/commands/root.go
+++ b/cmd/snapshots/generator/commands/root.go
@@ -79,10 +79,6 @@ func must(err error) {
 func withBlock(cmd *cobra.Command) {
 	cmd.Flags().Uint64Var(&block, "block", 1, "specifies a block number for operation")
 }
-func withSnapshotData(cmd *cobra.Command) {
-	cmd.Flags().StringVar(&snapshotMode, "snapshot.mode", "", "set of snapshots to use")
-	cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir")
-}
 
 func withDatadir(cmd *cobra.Command) {
 	cmd.Flags().StringVar(&datadir, "datadir", paths.DefaultDataDir(), "data directory for temporary ELT files")
diff --git a/cmd/snapshots/generator/commands/verify_headers.go b/cmd/snapshots/generator/commands/verify_headers.go
new file mode 100644
index 0000000000000000000000000000000000000000..bf254e3a9d39481c7106160416264b3242c24770
--- /dev/null
+++ b/cmd/snapshots/generator/commands/verify_headers.go
@@ -0,0 +1,110 @@
+package commands
+
+import (
+	"context"
+	"errors"
+	"sync/atomic"
+	"time"
+
+	"github.com/ledgerwatch/turbo-geth/common/dbutils"
+	"github.com/ledgerwatch/turbo-geth/core/types"
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/log"
+	"github.com/ledgerwatch/turbo-geth/rlp"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
+	"github.com/spf13/cobra"
+)
+
+func init() {
+	withSnapshotFile(verifyHeadersSnapshotCmd)
+	withBlock(verifyHeadersSnapshotCmd)
+	rootCmd.AddCommand(verifyHeadersSnapshotCmd)
+
+}
+
+//go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state
+var verifyHeadersSnapshotCmd = &cobra.Command{
+	Use:     "verify_headers",
+	Short:   "Copy from state snapshot",
+	Example: "go run cmd/snapshots/generator/main.go verify_headers --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		return VerifyHeadersSnapshot(cmd.Context(), snapshotFile)
+	},
+}
+
+func VerifyHeadersSnapshot(ctx context.Context, snapshotPath string) error {
+	tt := time.Now()
+	log.Info("Start validation")
+	var prevHeader *types.Header
+	var lastHeader uint64
+
+	go func() {
+		for {
+			select {
+			case <-ctx.Done():
+				return
+			default:
+				log.Info("Verifying", "t", time.Since(tt), "block", atomic.LoadUint64(&lastHeader))
+			}
+			time.Sleep(time.Second * 10)
+		}
+	}()
+	snKV, err := snapshotsync.OpenHeadersSnapshot(snapshotPath)
+	if err != nil {
+		return err
+	}
+	err = snKV.View(ctx, func(tx ethdb.Tx) error {
+		c, err := tx.Cursor(dbutils.HeadersBucket)
+		if err != nil {
+			return err
+		}
+		k, v, innerErr := c.First()
+		for {
+			if len(k) == 0 && len(v) == 0 {
+				break
+			}
+			if innerErr != nil {
+				return innerErr
+			}
+
+			header := new(types.Header)
+			innerErr := rlp.DecodeBytes(v, header)
+			if innerErr != nil {
+				return innerErr
+			}
+
+			if prevHeader != nil {
+				if prevHeader.Number.Uint64()+1 != header.Number.Uint64() {
+					log.Error("invalid header number", "p", prevHeader.Number.Uint64(), "c", header.Number.Uint64())
+					return errors.New("invalid header number")
+				}
+				if prevHeader.Hash() != header.ParentHash {
+					log.Error("invalid parent hash", "p", prevHeader.Hash(), "c", header.ParentHash)
+					return errors.New("invalid parent hash")
+				}
+			}
+			k, v, innerErr = c.Next()
+			if innerErr != nil {
+				return innerErr
+			}
+
+			prevHeader = header
+
+			atomic.StoreUint64(&lastHeader, header.Number.Uint64())
+		}
+		if innerErr != nil {
+			return innerErr
+		}
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+	if block != 0 {
+		if lastHeader != block {
+			return errors.New("incorrect last block")
+		}
+	}
+	log.Info("Success", "t", time.Since(tt))
+	return nil
+}
diff --git a/cmd/snapshots/generator/commands/verify_state_snapshot.go b/cmd/snapshots/generator/commands/verify_state_snapshot.go
index 3cb73bd9e8b8fd557d97b1455cc3048ed1e5c288..48d72ddb19a455132c87f86a32cf1cf12469447a 100644
--- a/cmd/snapshots/generator/commands/verify_state_snapshot.go
+++ b/cmd/snapshots/generator/commands/verify_state_snapshot.go
@@ -12,7 +12,6 @@ import (
 	"github.com/ledgerwatch/turbo-geth/core/rawdb"
 	"github.com/ledgerwatch/turbo-geth/eth/stagedsync"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 	"github.com/spf13/cobra"
 )
 
@@ -35,25 +34,6 @@ var verifyStateSnapshotCmd = &cobra.Command{
 }
 
 func VerifyStateSnapshot(ctx context.Context, dbPath, snapshotPath string, block uint64) error {
-	db, err := ethdb.Open(dbPath, true)
-	if err != nil {
-		return fmt.Errorf("open err: %w", err)
-	}
-
-	kv := db.RwKV()
-	if snapshotDir != "" {
-		var mode snapshotsync.SnapshotMode
-		mode, err = snapshotsync.SnapshotModeFromString(snapshotMode)
-		if err != nil {
-			return err
-		}
-		kv, err = snapshotsync.WrapBySnapshotsFromDir(kv, snapshotDir, mode)
-		if err != nil {
-			return err
-		}
-	}
-	db.SetRwKV(kv)
-
 	snkv := ethdb.NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
 		return dbutils.BucketsCfg{
 			dbutils.PlainStateBucket:        dbutils.BucketsConfigs[dbutils.PlainStateBucket],
@@ -69,7 +49,7 @@ func VerifyStateSnapshot(ctx context.Context, dbPath, snapshotPath string, block
 	tmpDB := ethdb.NewLMDB().Path(tmpPath).MustOpen()
 	defer os.RemoveAll(tmpPath)
 	defer tmpDB.Close()
-	snkv = ethdb.NewSnapshot2KV().SnapshotDB([]string{dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.CodeBucket}, snkv).DB(tmpDB).MustOpen()
+	snkv = ethdb.NewSnapshotKV().SnapshotDB([]string{dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.CodeBucket}, snkv).DB(tmpDB).Open()
 	sndb := ethdb.NewObjectDatabase(snkv)
 	tx, err := sndb.Begin(context.Background(), ethdb.RW)
 	if err != nil {
@@ -87,13 +67,13 @@ func VerifyStateSnapshot(ctx context.Context, dbPath, snapshotPath string, block
 	}
 	expectedRootHash := syncHeadHeader.Root
 	tt := time.Now()
-	err = stagedsync.PromoteHashedStateCleanly("", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), stagedsync.StageHashStateCfg(db.RwKV(), os.TempDir()), ctx.Done())
+	err = stagedsync.PromoteHashedStateCleanly("", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), stagedsync.StageHashStateCfg(sndb.RwKV(), os.TempDir()), ctx.Done())
 	fmt.Println("Promote took", time.Since(tt))
 	if err != nil {
 		return fmt.Errorf("promote state err: %w", err)
 	}
 
-	_, err = stagedsync.RegenerateIntermediateHashes("", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), stagedsync.StageTrieCfg(db.RwKV(), true, true, os.TempDir()), expectedRootHash, ctx.Done())
+	_, err = stagedsync.RegenerateIntermediateHashes("", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), stagedsync.StageTrieCfg(sndb.RwKV(), true, true, os.TempDir()), expectedRootHash, ctx.Done())
 	if err != nil {
 		return fmt.Errorf("regenerateIntermediateHashes err: %w", err)
 	}
diff --git a/cmd/snapshots/seeder/commands/seeder.go b/cmd/snapshots/seeder/commands/seeder.go
index 458d32b9bbbdd57b29a7250b367e61a2386ba54d..626884dd8da4a0e3f0999327be830a0e0086204b 100644
--- a/cmd/snapshots/seeder/commands/seeder.go
+++ b/cmd/snapshots/seeder/commands/seeder.go
@@ -13,10 +13,14 @@ import (
 	"github.com/anacrolix/torrent/metainfo"
 	"github.com/ledgerwatch/turbo-geth/common"
 	"github.com/ledgerwatch/turbo-geth/log"
-	trnt "github.com/ledgerwatch/turbo-geth/turbo/snapshotsync/bittorrent"
+	trnt "github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 )
 
 func Seed(ctx context.Context, datadir string) error {
+	defer func() {
+		//hack origin lib don't have proper close handling
+		time.Sleep(time.Second * 5)
+	}()
 	datadir = filepath.Dir(datadir)
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
@@ -33,7 +37,6 @@ func Seed(ctx context.Context, datadir string) error {
 		cfg.DataDir + "/headers",
 		cfg.DataDir + "/bodies",
 		cfg.DataDir + "/state",
-		//cfg.DataDir+"/receipts",
 	}
 
 	cl, err := torrent.NewClient(cfg)
@@ -61,7 +64,7 @@ func Seed(ctx context.Context, datadir string) error {
 		if common.IsCanceled(ctx) {
 			return common.ErrStopped
 		}
-		info, err := trnt.BuildInfoBytesForLMDBSnapshot(v)
+		info, err := trnt.BuildInfoBytesForSnapshot(v, trnt.LmdbFilename)
 		if err != nil {
 			return err
 		}
@@ -90,8 +93,6 @@ func Seed(ctx context.Context, datadir string) error {
 		if common.IsCanceled(ctx) {
 			return common.ErrStopped
 		}
-
-		torrents[i].VerifyData()
 	}
 
 	go func() {
diff --git a/cmd/snapshots/tracker/commands/root.go b/cmd/snapshots/tracker/commands/root.go
new file mode 100644
index 0000000000000000000000000000000000000000..a61779daa30033a953813333c6fd3ec6c2c7ae97
--- /dev/null
+++ b/cmd/snapshots/tracker/commands/root.go
@@ -0,0 +1,375 @@
+package commands
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"os/signal"
+	"strconv"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/tracker"
+	"github.com/ledgerwatch/turbo-geth/cmd/utils"
+	"github.com/ledgerwatch/turbo-geth/common"
+	"github.com/ledgerwatch/turbo-geth/common/dbutils"
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/internal/debug"
+	"github.com/ledgerwatch/turbo-geth/log"
+	"github.com/spf13/cobra"
+)
+
+const DefaultInterval = 60             //in seconds
+const SoftLimit = 5                    //in seconds
+const DisconnectInterval = time.Minute //in seconds
+var trackerID = "tg snapshot tracker"
+
+func init() {
+	utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...))
+}
+
+func Execute() {
+	if err := rootCmd.ExecuteContext(rootContext()); err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+}
+
+func rootContext() context.Context {
+	ctx, cancel := context.WithCancel(context.Background())
+	go func() {
+		ch := make(chan os.Signal, 1)
+		signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
+		defer signal.Stop(ch)
+
+		select {
+		case <-ch:
+			log.Info("Got interrupt, shutting down...")
+		case <-ctx.Done():
+		}
+
+		cancel()
+	}()
+	return ctx
+}
+
+var rootCmd = &cobra.Command{
+	Use:   "start",
+	Short: "start tracker",
+	PersistentPreRun: func(cmd *cobra.Command, args []string) {
+		if err := debug.SetupCobra(cmd); err != nil {
+			panic(err)
+		}
+	},
+	PersistentPostRun: func(cmd *cobra.Command, args []string) {
+		debug.Exit()
+	},
+	Args:       cobra.ExactArgs(1),
+	ArgAliases: []string{"snapshots dir"},
+	RunE: func(cmd *cobra.Command, args []string) error {
+		db := ethdb.MustOpen(args[0])
+		m := http.NewServeMux()
+		m.Handle("/announce", &Tracker{db: db})
+		m.HandleFunc("/scrape", func(writer http.ResponseWriter, request *http.Request) {
+			log.Warn("scrape", "url", request.RequestURI)
+			ih := request.URL.Query().Get("info_hash")
+			if len(ih) != 20 {
+				log.Error("wronng infohash", "ih", ih, "l", len(ih))
+				WriteResp(writer, ErrResponse{FailureReason: "incorrect infohash"}, false)
+				return
+			}
+			resp := ScrapeResponse{Files: map[string]*ScrapeData{
+				ih: {},
+			}}
+
+			err := db.Walk(dbutils.SnapshotInfoBucket, append([]byte(ih), make([]byte, 20)...), 20*8, func(k, v []byte) (bool, error) {
+				a := AnnounceReqWithTime{}
+				err := json.Unmarshal(v, &a)
+				if err != nil {
+					log.Error("Fail to unmarshall", "k", common.Bytes2Hex(k), "err", err)
+					//skip failed
+					return true, nil
+				}
+				if time.Since(a.UpdatedAt) > 24*time.Hour {
+					log.Debug("Skipped", "k", common.Bytes2Hex(k), "last updated", a.UpdatedAt)
+					return true, nil
+				}
+				if a.Left == 0 {
+					resp.Files[ih].Downloaded++
+					resp.Files[ih].Complete++
+				} else {
+					resp.Files[ih].Incomplete++
+				}
+				return true, nil
+			})
+			if err != nil {
+				log.Error("Walk", "err", err)
+				WriteResp(writer, ErrResponse{FailureReason: err.Error()}, false)
+				return
+			}
+			jsonResp, err := json.Marshal(resp)
+			if err == nil {
+				log.Info("scrape resp", "v", string(jsonResp))
+			} else {
+				log.Info("marshall scrape resp", "err", err)
+			}
+
+			WriteResp(writer, resp, false)
+		})
+		m.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
+			log.Warn("404", "url", request.RequestURI)
+		})
+
+		log.Info("Listen1")
+		go func() {
+			err := http.ListenAndServe(":80", m)
+			log.Error("error", "err", err)
+		}()
+		<-cmd.Context().Done()
+		return nil
+	},
+}
+
+type Tracker struct {
+	db ethdb.Database
+}
+
+/*
+/announce?compact=1
+&downloaded=0
+&event="started"
+&info_hash=D%22%5C%80%F7%FD%12Z%EA%9B%F0%A5z%DA%AF%1F%A4%E1je
+&left=0
+&peer_id=-GT0002-9%EA%FB+%BF%B3%AD%DE%8Ae%D0%B7
+&port=53631
+&supportcrypto=1
+&uploaded=0"
+*/
+type AnnounceReqWithTime struct {
+	AnnounceReq
+	UpdatedAt time.Time
+}
+type AnnounceReq struct {
+	InfoHash      []byte
+	PeerID        []byte
+	RemoteAddr    net.IP
+	Port          int
+	Event         string
+	Uploaded      int64
+	Downloaded    int64
+	SupportCrypto bool
+	Left          int64
+	Compact       bool
+}
+
+type Peer struct {
+	IP     string
+	Port   int
+	PeerID []byte
+}
+
+func (t *Tracker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	log.Info("call", "url", r.RequestURI)
+
+	req, err := ParseRequest(r)
+	if err != nil {
+		log.Error("Parse request", "err", err)
+		WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact)
+		return
+	}
+	if err = ValidateReq(req); err != nil {
+		log.Error("Validate failed", "err", err)
+		WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact)
+		return
+	}
+
+	toSave := AnnounceReqWithTime{
+		req,
+		time.Now(),
+	}
+	peerBytes, err := json.Marshal(toSave)
+	if err != nil {
+		log.Error("Json marshal", "err", err)
+		WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact)
+		return
+	}
+
+	key := append(req.InfoHash, req.PeerID...)
+	if req.Event == tracker.Stopped.String() {
+		err = t.db.Delete(dbutils.SnapshotInfoBucket, key, nil)
+		if err != nil {
+			log.Error("Json marshal", "err", err)
+			WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact)
+			return
+		}
+	} else {
+		if prevBytes, err := t.db.Get(dbutils.SnapshotInfoBucket, key); err == nil && len(prevBytes) > 0 {
+			prev := new(AnnounceReqWithTime)
+			err = json.Unmarshal(prevBytes, prev)
+			if err != nil {
+				log.Error("Unable to unmarshall", "err", err)
+			}
+			if time.Since(prev.UpdatedAt) < time.Second*SoftLimit {
+				//too early to update
+				WriteResp(w, ErrResponse{FailureReason: "too early to update"}, req.Compact)
+				return
+
+			}
+		} else if !errors.Is(err, ethdb.ErrKeyNotFound) && err != nil {
+			log.Error("get from db is return error", "err", err)
+			WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact)
+			return
+		}
+		err = t.db.Put(dbutils.SnapshotInfoBucket, key, peerBytes)
+		if err != nil {
+			log.Error("db.Put", "err", err)
+			WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact)
+			return
+		}
+	}
+
+	resp := HttpResponse{
+		Interval:  DefaultInterval,
+		TrackerId: trackerID,
+	}
+
+	err = t.db.Walk(dbutils.SnapshotInfoBucket, append(req.InfoHash, make([]byte, 20)...), 20*8, func(k, v []byte) (bool, error) {
+		a := AnnounceReqWithTime{}
+		err = json.Unmarshal(v, &a)
+		if err != nil {
+			log.Error("Fail to unmarshall", "k", common.Bytes2Hex(k), "err", err)
+			//skip failed
+			return true, nil
+		}
+		if time.Since(a.UpdatedAt) > 5*DisconnectInterval {
+			log.Debug("Skipped requset", "peer", common.Bytes2Hex(a.PeerID), "last updated", a.UpdatedAt, "now", time.Now())
+			return true, nil
+		}
+		if a.Left == 0 {
+			resp.Complete++
+		} else {
+			resp.Incomplete++
+		}
+		resp.Peers = append(resp.Peers, map[string]interface{}{
+			"ip":      a.RemoteAddr.String(),
+			"peer id": a.PeerID,
+			"port":    a.Port,
+		})
+		return true, nil
+	})
+	if err != nil {
+		log.Error("Walk", "err", err)
+		WriteResp(w, ErrResponse{FailureReason: err.Error()}, req.Compact)
+		return
+	}
+	jsonResp, err := json.Marshal(resp)
+	if err == nil {
+		log.Info("announce resp", "v", string(jsonResp))
+	} else {
+		log.Info("marshall announce resp", "err", err)
+	}
+
+	WriteResp(w, resp, req.Compact)
+}
+
+func WriteResp(w http.ResponseWriter, res interface{}, compact bool) {
+	if _, ok := res.(ErrResponse); ok {
+		log.Error("Err", "err", res)
+	}
+	if compact {
+		err := bencode.NewEncoder(w).Encode(res)
+		if err != nil {
+			log.Error("Bencode encode", "err", err)
+		}
+	} else {
+		err := json.NewEncoder(w).Encode(res)
+		if err != nil {
+			log.Error("Json marshal", "err", err)
+			return
+		}
+	}
+}
+
+func ParseRequest(r *http.Request) (AnnounceReq, error) {
+	q := r.URL.Query()
+
+	var remoteAddr net.IP
+	if strings.Contains(r.RemoteAddr, ":") {
+		remoteAddr = net.ParseIP(strings.Split(r.RemoteAddr, ":")[0])
+	} else {
+		remoteAddr = net.ParseIP(r.RemoteAddr)
+	}
+
+	downloaded, err := strconv.ParseInt(q.Get("downloaded"), 10, 64)
+	if err != nil {
+		log.Warn("downloaded", "err", err)
+		return AnnounceReq{}, fmt.Errorf("downloaded %v - %w", q.Get("downloaded"), err)
+	}
+	uploaded, err := strconv.ParseInt(q.Get("uploaded"), 10, 64)
+	if err != nil {
+		log.Warn("uploaded", "err", err)
+		return AnnounceReq{}, fmt.Errorf("uploaded %v - %w", q.Get("uploaded"), err)
+	}
+	left, err := strconv.ParseInt(q.Get("left"), 10, 64)
+	if err != nil {
+		log.Warn("left", "err", err)
+		return AnnounceReq{}, fmt.Errorf("left: %v - %w", q.Get("left"), err)
+	}
+	port, err := strconv.Atoi(q.Get("port"))
+	if err != nil {
+		return AnnounceReq{}, fmt.Errorf("port: %v -  %w", q.Get("port"), err)
+	}
+
+	res := AnnounceReq{
+		InfoHash:      []byte(q.Get("info_hash")),
+		PeerID:        []byte(q.Get("peer_id")),
+		RemoteAddr:    remoteAddr,
+		Event:         q.Get("event"),
+		Compact:       q.Get("compact") == "1",
+		SupportCrypto: q.Get("supportcrypto") == "1",
+		Downloaded:    downloaded,
+		Uploaded:      uploaded,
+		Left:          left,
+		Port:          port,
+	}
+	return res, nil
+}
+
+func ValidateReq(req AnnounceReq) error {
+	if len(req.InfoHash) != 20 {
+		return errors.New("invalid infohash")
+	}
+	if len(req.PeerID) != 20 {
+		return errors.New("invalid peer id")
+	}
+	if req.Port == 0 {
+		return errors.New("invalid port")
+	}
+	return nil
+}
+
+type HttpResponse struct {
+	Interval   int32                    `bencode:"interval" json:"interval"`
+	TrackerId  string                   `bencode:"tracker id" json:"tracker_id"`
+	Complete   int32                    `bencode:"complete" json:"complete"`
+	Incomplete int32                    `bencode:"incomplete" json:"incomplete"`
+	Peers      []map[string]interface{} `bencode:"peers" json:"peers"`
+}
+type ErrResponse struct {
+	FailureReason string `bencode:"failure reason" json:"failure_reason"`
+}
+type ScrapeResponse struct {
+	Files map[string]*ScrapeData `json:"files" bencode:"files"`
+}
+
+type ScrapeData struct {
+	Complete   int32 `bencode:"complete" json:"complete"`
+	Downloaded int32 `json:"downloaded" bencode:"downloaded"`
+	Incomplete int32 `json:"incomplete" bencode:"incomplete"`
+}
diff --git a/cmd/snapshots/tracker/main.go b/cmd/snapshots/tracker/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..18f600f0ba6ed78cadf20ba89d5c9b6ce5c848e1
--- /dev/null
+++ b/cmd/snapshots/tracker/main.go
@@ -0,0 +1,7 @@
+package main
+
+import "github.com/ledgerwatch/turbo-geth/cmd/snapshots/tracker/commands"
+
+func main() {
+	commands.Execute()
+}
diff --git a/cmd/snapshots/utils/utils.go b/cmd/snapshots/utils/utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..68a5c849fd5e893aa3a3ee85b705443b3aac5b6d
--- /dev/null
+++ b/cmd/snapshots/utils/utils.go
@@ -0,0 +1,45 @@
+package utils
+
+import (
+	"errors"
+	"os"
+
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+)
+
+const (
+	TypeLMDB = "lmdb"
+	TypeMDBX = "mdbx"
+)
+
+var ErrUnsupported error = errors.New("unsupported KV type")
+
+func RmTmpFiles(dbType string, snapshotPath string) error {
+	switch dbType {
+	case TypeLMDB:
+		return rmLmdbLock(snapshotPath)
+	case TypeMDBX:
+		return rmMdbxLock(snapshotPath)
+	default:
+		return ErrUnsupported
+	}
+}
+func rmLmdbLock(snapshotPath string) error {
+	err := os.Remove(snapshotPath + "/lock.mdb")
+	if err != nil {
+		return err
+	}
+	return os.Remove(snapshotPath + "/LOCK")
+}
+func rmMdbxLock(path string) error {
+	return os.Remove(path + "/mdbx.lck")
+}
+
+func OpenSnapshotKV(dbType string, configsFunc ethdb.BucketConfigsFunc, path string) ethdb.RwKV {
+	if dbType == TypeLMDB {
+		return ethdb.NewLMDB().WithBucketsConfig(configsFunc).Path(path).MustOpen()
+	} else if dbType == TypeMDBX {
+		return ethdb.NewMDBX().WithBucketsConfig(configsFunc).Path(path).MustOpen()
+	}
+	panic(ErrUnsupported.Error())
+}
diff --git a/cmd/state/commands/snapshot_metainfo.go b/cmd/state/commands/snapshot_metainfo.go
deleted file mode 100644
index 80583360186a0f000483b2337db549d0a4d405ef..0000000000000000000000000000000000000000
--- a/cmd/state/commands/snapshot_metainfo.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package commands
-
-import (
-	"errors"
-
-	"github.com/ledgerwatch/turbo-geth/cmd/state/generate"
-	"github.com/spf13/cobra"
-)
-
-func init() {
-	rootCmd.AddCommand(snapshotMetainfoCmd)
-}
-
-var snapshotMetainfoCmd = &cobra.Command{
-	Use:   "snapshotMetainfo",
-	Short: "Calculate snapshot metainfo",
-	RunE: func(cmd *cobra.Command, args []string) error {
-		if len(args) == 0 {
-			return errors.New("empty path")
-		}
-		return generate.MetaInfoHash(args[0])
-	},
-}
diff --git a/cmd/state/generate/snapshot_metainfo_hash.go b/cmd/state/generate/snapshot_metainfo_hash.go
deleted file mode 100644
index 1d3267c828a5de1c3527d83bee6e23020be9a1f8..0000000000000000000000000000000000000000
--- a/cmd/state/generate/snapshot_metainfo_hash.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package generate
-
-import (
-	"fmt"
-	"github.com/anacrolix/torrent/bencode"
-	"github.com/anacrolix/torrent/metainfo"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync/bittorrent"
-	"time"
-)
-
-func MetaInfoHash(path string) error {
-	t := time.Now()
-	mi := metainfo.MetaInfo{}
-	info, err := bittorrent.BuildInfoBytesForLMDBSnapshot(path)
-	if err != nil {
-		return err
-	}
-	mi.InfoBytes, err = bencode.Marshal(info)
-	if err != nil {
-		return err
-	}
-
-	fmt.Println(mi.HashInfoBytes())
-	fmt.Println("It took", time.Since(t))
-	return nil
-}
diff --git a/cmd/tg/main.go b/cmd/tg/main.go
index 8ab9184cd7cd7d73c5eb50846fb09cc33b0f40f3..e96dd0a6781fc490b81dba2c2d643a5df963f1f2 100644
--- a/cmd/tg/main.go
+++ b/cmd/tg/main.go
@@ -41,12 +41,21 @@ func runTurboGeth(cliCtx *cli.Context) {
 		}
 	}
 
-	// creating staged sync with all default parameters
-	sync := stagedsync.New(
-		stagedsync.DefaultStages(),
-		stagedsync.DefaultUnwindOrder(),
-		stagedsync.OptionalParameters{SilkwormExecutionFunc: silkwormExecutionFunc},
-	)
+	var sync *stagedsync.StagedSync
+	if cliCtx.Bool(turbocli.SnapshotDatabaseLayoutFlag.Name) {
+		sync = stagedsync.New(
+			stagedsync.WithSnapshotsStages(),
+			stagedsync.UnwindOrderWithSnapshots(),
+			stagedsync.OptionalParameters{SilkwormExecutionFunc: silkwormExecutionFunc},
+		)
+	} else {
+		// creating staged sync with all default parameters
+		sync = stagedsync.New(
+			stagedsync.DefaultStages(),
+			stagedsync.DefaultUnwindOrder(),
+			stagedsync.OptionalParameters{SilkwormExecutionFunc: silkwormExecutionFunc},
+		)
+	}
 
 	ctx, _ := utils.RootContext()
 
diff --git a/common/dbutils/bucket.go b/common/dbutils/bucket.go
index 59d6e7f18a545cfc53a69641715c0813100e1bf1..d74fa1e3a381b0e69d417d6cb56f6276946d2e8e 100644
--- a/common/dbutils/bucket.go
+++ b/common/dbutils/bucket.go
@@ -142,6 +142,7 @@ const (
 	// DatabaseInfoBucket is used to store information about data layout.
 	DatabaseInfoBucket        = "DBINFO"
 	SnapshotInfoBucket        = "SNINFO"
+	BittorrentInfoBucket      = "BTINFO"
 	HeadersSnapshotInfoBucket = "hSNINFO"
 	BodiesSnapshotInfoBucket  = "bSNINFO"
 	StateSnapshotInfoBucket   = "sSNINFO"
@@ -238,6 +239,10 @@ var (
 	SnapshotHeadersHeadHash   = "SnapshotLastHeaderHash"
 	SnapshotBodyHeadNumber    = "SnapshotLastBodyNumber"
 	SnapshotBodyHeadHash      = "SnapshotLastBodyHash"
+
+	BittorrentPeerID            = "peerID"
+	CurrentHeadersSnapshotHash  = []byte("CurrentHeadersSnapshotHash")
+	CurrentHeadersSnapshotBlock = []byte("CurrentHeadersSnapshotBlock")
 )
 
 // Buckets - list of all buckets. App will panic if some bucket is not in this list.
@@ -288,7 +293,7 @@ var Buckets = []string{
 	HashedAccountsBucket,
 	HashedStorageBucket,
 	IntermediateTrieHashBucketOld2,
-
+	BittorrentInfoBucket,
 	HeaderCanonicalBucket,
 	HeadersBucket,
 	HeaderTDBucket,
diff --git a/consensus/clique/keys.go b/consensus/clique/keys.go
index 7f18133eceb9d1ca565c382004d0c3f1cb1b3fec..b7abc0d5bbb0e77098469c828fa4713fc356e433 100644
--- a/consensus/clique/keys.go
+++ b/consensus/clique/keys.go
@@ -29,4 +29,3 @@ func EncodeBlockNumber(number uint64) []byte {
 	binary.BigEndian.PutUint64(enc, number)
 	return enc
 }
-
diff --git a/core/state/history.go b/core/state/history.go
index 275d85de584614d8b3480631e8b76106e5e0cf38..4478aae4fd04aad2db9685ea62054d65c76b1464 100644
--- a/core/state/history.go
+++ b/core/state/history.go
@@ -15,9 +15,6 @@ import (
 	"github.com/ledgerwatch/turbo-geth/ethdb/bitmapdb"
 )
 
-//MaxChangesetsSearch -
-const MaxChangesetsSearch = 256
-
 func GetAsOf(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]byte, error) {
 	var dat []byte
 	v, err := FindByHistory(tx, storage, key, timestamp)
diff --git a/core/vm/analysis.go b/core/vm/analysis.go
index 27afbcf65e639cd3c19b904eed09dcfe42ca3a81..a8520d6f6d05f8b16b17963e2f76374cff3d0eda 100644
--- a/core/vm/analysis.go
+++ b/core/vm/analysis.go
@@ -16,9 +16,6 @@
 
 package vm
 
-import (
-)
-
 // codeBitmap collects data locations in code.
 func codeBitmap(code []byte) []uint64 {
 	// The bitmap is 4 bytes longer than necessary, in case the code
diff --git a/eth/backend.go b/eth/backend.go
index 2742afcbf30cea7ac1fa0fd2e81b3af81da28ec8..de9ef2d349a333a7fafd75ec21bb13e426534cc2 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -36,6 +36,7 @@ import (
 	"github.com/holiman/uint256"
 	"github.com/ledgerwatch/turbo-geth/cmd/headers/download"
 	"github.com/ledgerwatch/turbo-geth/common"
+	"github.com/ledgerwatch/turbo-geth/common/dbutils"
 	"github.com/ledgerwatch/turbo-geth/common/etl"
 	"github.com/ledgerwatch/turbo-geth/consensus"
 	"github.com/ledgerwatch/turbo-geth/consensus/clique"
@@ -62,7 +63,6 @@ import (
 	"github.com/ledgerwatch/turbo-geth/params"
 	"github.com/ledgerwatch/turbo-geth/rpc"
 	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync/bittorrent"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/credentials"
 )
@@ -95,7 +95,7 @@ type Ethereum struct {
 
 	p2pServer *p2p.Server
 
-	torrentClient *bittorrent.Client
+	torrentClient *snapshotsync.Client
 
 	lock        sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
 	events      *remotedbserver.Events
@@ -134,139 +134,41 @@ func New(stack *node.Node, config *ethconfig.Config, gitCommit string) (*Ethereu
 		return nil, err
 	}
 
-	chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis, config.StorageMode.History, false /* overwrite */)
-	if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
-		return nil, genesisErr
-	}
-	log.Info("Initialised chain configuration", "config", chainConfig)
-
-	var torrentClient *bittorrent.Client
-	if config.SnapshotMode != (snapshotsync.SnapshotMode{}) && config.NetworkID == params.MainnetChainConfig.ChainID.Uint64() {
-		if config.ExternalSnapshotDownloaderAddr != "" {
-			cli, cl, innerErr := snapshotsync.NewClient(config.ExternalSnapshotDownloaderAddr)
-			if innerErr != nil {
-				return nil, innerErr
-			}
-			defer cl() //nolint
-
-			_, innerErr = cli.Download(context.Background(), &snapshotsync.DownloadSnapshotRequest{
-				NetworkId: config.NetworkID,
-				Type:      config.SnapshotMode.ToSnapshotTypes(),
-			})
-			if innerErr != nil {
-				return nil, innerErr
-			}
-
-			waitDownload := func() (map[snapshotsync.SnapshotType]*snapshotsync.SnapshotsInfo, error) {
-				snapshotReadinessCheck := func(mp map[snapshotsync.SnapshotType]*snapshotsync.SnapshotsInfo, tp snapshotsync.SnapshotType) bool {
-					if mp[tp].Readiness != int32(100) {
-						log.Info("Downloading", "snapshot", tp, "%", mp[tp].Readiness)
-						return false
-					}
-					return true
-				}
-				for {
-					mp := make(map[snapshotsync.SnapshotType]*snapshotsync.SnapshotsInfo)
-					snapshots, err1 := cli.Snapshots(context.Background(), &snapshotsync.SnapshotsRequest{NetworkId: config.NetworkID})
-					if err1 != nil {
-						return nil, err1
-					}
-					for i := range snapshots.Info {
-						if mp[snapshots.Info[i].Type].SnapshotBlock < snapshots.Info[i].SnapshotBlock && snapshots.Info[i] != nil {
-							mp[snapshots.Info[i].Type] = snapshots.Info[i]
-						}
-					}
-
-					downloaded := true
-					if config.SnapshotMode.Headers {
-						if !snapshotReadinessCheck(mp, snapshotsync.SnapshotType_headers) {
-							downloaded = false
-						}
-					}
-					if config.SnapshotMode.Bodies {
-						if !snapshotReadinessCheck(mp, snapshotsync.SnapshotType_bodies) {
-							downloaded = false
-						}
-					}
-					if config.SnapshotMode.State {
-						if !snapshotReadinessCheck(mp, snapshotsync.SnapshotType_state) {
-							downloaded = false
-						}
-					}
-					if config.SnapshotMode.Receipts {
-						if !snapshotReadinessCheck(mp, snapshotsync.SnapshotType_receipts) {
-							downloaded = false
-						}
-					}
-					if downloaded {
-						return mp, nil
-					}
-					time.Sleep(time.Second * 10)
-				}
-			}
-			downloadedSnapshots, innerErr := waitDownload()
-			if innerErr != nil {
-				return nil, innerErr
-			}
-			snapshotKV := chainDb.(ethdb.HasRwKV).RwKV()
-
-			snapshotKV, innerErr = snapshotsync.WrapBySnapshotsFromDownloader(snapshotKV, downloadedSnapshots)
-			if innerErr != nil {
-				return nil, innerErr
-			}
-			chainDb.(ethdb.HasRwKV).SetRwKV(snapshotKV)
-
-			innerErr = snapshotsync.PostProcessing(chainDb, config.SnapshotMode, downloadedSnapshots)
-			if innerErr != nil {
-				return nil, innerErr
-			}
-		} else {
-			var dbPath string
-			dbPath, err = stack.Config().ResolvePath("snapshots")
-			if err != nil {
-				return nil, err
-			}
-			torrentClient, err = bittorrent.New(dbPath, config.SnapshotSeeding)
-			if err != nil {
-				return nil, err
-			}
-
-			err = torrentClient.Load(chainDb)
+	var torrentClient *snapshotsync.Client
+	snapshotsDir := stack.Config().ResolvePath("snapshots")
+	if config.SnapshotLayout {
+		v, err := chainDb.Get(dbutils.BittorrentInfoBucket, []byte(dbutils.BittorrentPeerID))
+		if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) {
+			log.Error("Get bittorrent peer", "err", err)
+		}
+		torrentClient, err = snapshotsync.New(snapshotsDir, config.SnapshotSeeding, string(v))
+		if err != nil {
+			return nil, err
+		}
+		if len(v) == 0 {
+			log.Info("Generate new bittorent peerID", "id", common.Bytes2Hex(torrentClient.PeerID()))
+			err = torrentClient.SavePeerID(chainDb)
 			if err != nil {
-				return nil, err
+				log.Error("Bittorrent peerID haven't saved", "err", err)
 			}
-			err = torrentClient.AddSnapshotsTorrents(context.Background(), chainDb, config.NetworkID, config.SnapshotMode)
-			if err == nil {
-				torrentClient.Download()
-				snapshotKV := chainDb.(ethdb.HasRwKV).RwKV()
-				mp, innerErr := torrentClient.GetSnapshots(chainDb, config.NetworkID)
-				if innerErr != nil {
-					return nil, innerErr
-				}
+		}
 
-				snapshotKV, innerErr = snapshotsync.WrapBySnapshotsFromDownloader(snapshotKV, mp)
-				if innerErr != nil {
-					return nil, innerErr
-				}
-				chainDb.(ethdb.HasRwKV).SetRwKV(snapshotKV)
-				tx, err := chainDb.Begin(context.Background(), ethdb.RW)
-				if err != nil {
-					return nil, err
-				}
-				defer tx.Rollback()
-				innerErr = snapshotsync.PostProcessing(chainDb, config.SnapshotMode, mp)
-				if err = tx.Commit(); err != nil {
-					return nil, err
-				}
-				if innerErr != nil {
-					return nil, innerErr
-				}
-			} else {
-				log.Error("There was an error in snapshot init. Swithing to regular sync", "err", err)
-			}
+		err = snapshotsync.WrapSnapshots(chainDb, snapshotsDir)
+		if err != nil {
+			return nil, err
+		}
+		err = snapshotsync.SnapshotSeeding(chainDb, torrentClient, "headers", snapshotsDir)
+		if err != nil {
+			return nil, err
 		}
 	}
 
+	chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis, config.StorageMode.History, false /* overwrite */)
+	if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
+		return nil, genesisErr
+	}
+	log.Info("Initialised chain configuration", "config", chainConfig)
+
 	eth := &Ethereum{
 		config:        config,
 		chainDB:       chainDb,
@@ -313,10 +215,7 @@ func New(stack *node.Node, config *ethconfig.Config, gitCommit string) (*Ethereu
 	txCacher := core.NewTxSenderCacher(runtime.NumCPU())
 
 	if config.TxPool.Journal != "" {
-		config.TxPool.Journal, err = stack.ResolvePath(config.TxPool.Journal)
-		if err != nil {
-			return nil, err
-		}
+		config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)
 	}
 
 	eth.txPool = core.NewTxPool(config.TxPool, chainConfig, chainDb, txCacher)
@@ -325,14 +224,34 @@ func New(stack *node.Node, config *ethconfig.Config, gitCommit string) (*Ethereu
 
 	// setting notifier to support streaming events to rpc daemon
 	eth.events = remotedbserver.NewEvents()
+	var mg *snapshotsync.SnapshotMigrator
+	if config.SnapshotLayout {
+		currentSnapshotBlock, currentInfohash, err := snapshotsync.GetSnapshotInfo(chainDb)
+		if err != nil {
+			return nil, err
+		}
+		mg = snapshotsync.NewMigrator(snapshotsDir, currentSnapshotBlock, currentInfohash)
+		err = mg.RemoveNonCurrentSnapshots()
+		if err != nil {
+			log.Error("Remove non current snapshot", "err", err)
+		}
+	}
 	if stagedSync == nil {
 		// if there is not stagedsync, we create one with the custom notifier
-		stagedSync = stagedsync.New(stagedsync.DefaultStages(), stagedsync.DefaultUnwindOrder(), stagedsync.OptionalParameters{Notifier: eth.events})
+		if config.SnapshotLayout {
+			stagedSync = stagedsync.New(stagedsync.WithSnapshotsStages(), stagedsync.UnwindOrderWithSnapshots(), stagedsync.OptionalParameters{Notifier: eth.events, SnapshotDir: snapshotsDir, TorrnetClient: torrentClient, SnapshotMigrator: mg})
+		} else {
+			stagedSync = stagedsync.New(stagedsync.DefaultStages(), stagedsync.DefaultUnwindOrder(), stagedsync.OptionalParameters{Notifier: eth.events})
+		}
 	} else {
 		// otherwise we add one if needed
 		if stagedSync.Notifier == nil {
 			stagedSync.Notifier = eth.events
 		}
+		if config.SnapshotLayout {
+			stagedSync.SetTorrentParams(torrentClient, snapshotsDir, mg)
+			log.Info("Set torrent params", "snapshotsDir", snapshotsDir)
+		}
 	}
 
 	mining := stagedsync.New(stagedsync.MiningStages(), stagedsync.MiningUnwindOrder(), stagedsync.OptionalParameters{})
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 8bb6e2c24a3974793bd66e52ad0e002681e4bdcf..c477ee0e043cc0285e0e9e79aed011edfcf28e86 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -127,6 +127,7 @@ type Config struct {
 	BatchSize       datasize.ByteSize // Batch size for execution stage
 	SnapshotMode    snapshotsync.SnapshotMode
 	SnapshotSeeding bool
+	SnapshotLayout  bool
 
 	// Address to connect to external snapshot downloader
 	// empty if you want to use internal bittorrent snapshot downloader
diff --git a/eth/stagedsync/all_stages.go b/eth/stagedsync/all_stages.go
index 0dcdc707521d91690d0b45b3fc39cdc1174c5b7e..7fcfca4f1ed4010cafb68fe32909bf6a121463bb 100644
--- a/eth/stagedsync/all_stages.go
+++ b/eth/stagedsync/all_stages.go
@@ -283,7 +283,7 @@ func createStageBuilders(blocks []*types.Block, blockNum uint64, checkRoot bool)
 					ID:          stages.Finish,
 					Description: "Final: update current block for the RPC API",
 					ExecFunc: func(s *StageState, _ Unwinder) error {
-						return FinishForward(s, world.TX, world.notifier)
+						return FinishForward(s, world.DB, world.notifier, world.TX, world.btClient, world.SnapshotBuilder)
 					},
 					UnwindFunc: func(u *UnwindState, s *StageState) error {
 						return UnwindFinish(u, s, world.TX)
diff --git a/eth/stagedsync/replacement_stages.go.go b/eth/stagedsync/replacement_stages.go.go
index a7221b4bba311a4b02881b431e450e89b72770c1..89f25fa5115e9bb6898da193554ca1785682ef3b 100644
--- a/eth/stagedsync/replacement_stages.go.go
+++ b/eth/stagedsync/replacement_stages.go.go
@@ -327,7 +327,7 @@ func ReplacementStages(ctx context.Context,
 					ID:          stages.Finish,
 					Description: "Final: update current block for the RPC API",
 					ExecFunc: func(s *StageState, _ Unwinder) error {
-						return FinishForward(s, world.DB, world.notifier)
+						return FinishForward(s, world.DB, world.notifier, world.TX, world.btClient, world.SnapshotBuilder)
 					},
 					UnwindFunc: func(u *UnwindState, s *StageState) error {
 						return UnwindFinish(u, s, world.DB)
diff --git a/eth/stagedsync/stage_bodies_snapshot.go b/eth/stagedsync/stage_bodies_snapshot.go
new file mode 100644
index 0000000000000000000000000000000000000000..5118b0d71a82d1b212267bafe4531ccfd80b0afd
--- /dev/null
+++ b/eth/stagedsync/stage_bodies_snapshot.go
@@ -0,0 +1,11 @@
+package stagedsync
+
+import (
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
+)
+
+func SpawnBodiesSnapshotGenerationStage(s *StageState, db ethdb.Database, snapshotDir string, torrentClient *snapshotsync.Client, quit <-chan struct{}) error {
+	s.Done()
+	return nil
+}
diff --git a/eth/stagedsync/stage_finish.go b/eth/stagedsync/stage_finish.go
index c75b2782465942da941a8afa7104687fd209b72f..7717b8d5c011ca12292ecd481f5863469b772ba1 100644
--- a/eth/stagedsync/stage_finish.go
+++ b/eth/stagedsync/stage_finish.go
@@ -2,13 +2,14 @@ package stagedsync
 
 import (
 	"fmt"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 
 	"github.com/ledgerwatch/turbo-geth/core/rawdb"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/log"
 )
 
-func FinishForward(s *StageState, db ethdb.Database, notifier ChainEventNotifier) error {
+func FinishForward(s *StageState, db ethdb.Database, notifier ChainEventNotifier, tx ethdb.Database, btClient *snapshotsync.Client, snBuilder *snapshotsync.SnapshotMigrator) error {
 	var executionAt uint64
 	var err error
 	if executionAt, err = s.ExecutionAt(db); err != nil {
@@ -27,6 +28,10 @@ func FinishForward(s *StageState, db ethdb.Database, notifier ChainEventNotifier
 		return err
 	}
 
+	err = MigrateSnapshot(executionAt, tx, db, btClient, snBuilder)
+	if err != nil {
+		return err
+	}
 	return s.DoneAndUpdate(db, executionAt)
 }
 
@@ -46,5 +51,15 @@ func NotifyNewHeaders(from, to uint64, notifier ChainEventNotifier, db ethdb.Dat
 		}
 		notifier.OnNewHeader(header)
 	}
+
 	return nil
 }
+
+func MigrateSnapshot(to uint64, tx ethdb.Database, db ethdb.Database, btClient *snapshotsync.Client, mg *snapshotsync.SnapshotMigrator) error {
+	if mg == nil {
+		return nil
+	}
+
+	snBlock := snapshotsync.CalculateEpoch(to, snapshotsync.EpochSize)
+	return mg.Migrate(db, tx, snBlock, btClient)
+}
diff --git a/eth/stagedsync/stage_headers_snapshot.go b/eth/stagedsync/stage_headers_snapshot.go
new file mode 100644
index 0000000000000000000000000000000000000000..35184895e62f7b7adb6c55befc4c69cd3c17c4ac
--- /dev/null
+++ b/eth/stagedsync/stage_headers_snapshot.go
@@ -0,0 +1,65 @@
+package stagedsync
+
+import (
+	"fmt"
+	"github.com/ledgerwatch/turbo-geth/eth/stagedsync/stages"
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/log"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
+	"time"
+)
+
+func SpawnHeadersSnapshotGenerationStage(s *StageState, db ethdb.Database, sm *snapshotsync.SnapshotMigrator, snapshotDir string, torrentClient *snapshotsync.Client, quit <-chan struct{}) error {
+	to, err := stages.GetStageProgress(db, stages.Headers)
+	if err != nil {
+		return fmt.Errorf("%w", err)
+	}
+
+	currentSnapshotBlock, err := stages.GetStageProgress(db, stages.CreateHeadersSnapshot)
+	if err != nil {
+		return fmt.Errorf("%w", err)
+	}
+
+	//Problem: we must inject this stage, because it's not possible to do compact mdbx after sync.
+	//So we have to move headers to snapshot right after headers stage.
+	//but we don't want to block not initial sync
+	if to < currentSnapshotBlock+snapshotsync.EpochSize {
+		s.Done()
+		return nil
+	}
+
+	if to < snapshotsync.EpochSize {
+		s.Done()
+		return nil
+	}
+	if s.BlockNumber > to {
+		return fmt.Errorf("headers snapshot is higher canonical. snapshot %d headers %d", s.BlockNumber, to)
+	}
+
+	snapshotBlock := snapshotsync.CalculateEpoch(to, snapshotsync.EpochSize)
+
+	if s.BlockNumber == snapshotBlock {
+		// we already did snapshot creation for this block
+		s.Done()
+		return nil
+	}
+
+	err = sm.Migrate(db, db, snapshotBlock, torrentClient)
+	if err != nil {
+		return err
+	}
+	for !sm.Finished(snapshotBlock) {
+		select {
+		case <-quit:
+			break
+		default:
+			log.Info("Migrating to new snapshot", "stage", sm.GetStage())
+			err = sm.Migrate(db, db, snapshotBlock, torrentClient)
+			if err != nil {
+				return err
+			}
+		}
+		time.Sleep(time.Second * 10)
+	}
+	return s.DoneAndUpdate(db, snapshotBlock)
+}
diff --git a/eth/stagedsync/stage_state_snapshot.go b/eth/stagedsync/stage_state_snapshot.go
new file mode 100644
index 0000000000000000000000000000000000000000..396e132535b92d4cb56bc75f40784da455c64c6e
--- /dev/null
+++ b/eth/stagedsync/stage_state_snapshot.go
@@ -0,0 +1,10 @@
+package stagedsync
+
+import (
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
+)
+
+func SpawnStateSnapshotGenerationStage(s *StageState, db ethdb.Database, snapshotDir string, torrentClient *snapshotsync.Client, quit <-chan struct{}) error {
+	return s.DoneAndUpdate(db, 0)
+}
diff --git a/eth/stagedsync/stagebuilder.go b/eth/stagedsync/stagebuilder.go
index 9db649098166de2fdb625fd4a88eeb1113faf008..e290af5d19255730ec4100a10a9eb14fb0ac07b6 100644
--- a/eth/stagedsync/stagebuilder.go
+++ b/eth/stagedsync/stagebuilder.go
@@ -12,7 +12,9 @@ import (
 	"github.com/ledgerwatch/turbo-geth/core/vm"
 	"github.com/ledgerwatch/turbo-geth/eth/stagedsync/stages"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/log"
 	"github.com/ledgerwatch/turbo-geth/params"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 	"github.com/ledgerwatch/turbo-geth/turbo/stages/bodydownload"
 )
 
@@ -50,6 +52,10 @@ type StageParameters struct {
 	silkwormExecutionFunc unsafe.Pointer
 	InitialCycle          bool
 	mining                *MiningCfg
+
+	snapshotsDir    string
+	btClient        *snapshotsync.Client
+	SnapshotBuilder *snapshotsync.SnapshotMigrator
 }
 
 type MiningCfg struct {
@@ -422,7 +428,7 @@ func DefaultStages() StageBuilders {
 					ID:          stages.Finish,
 					Description: "Final: update current block for the RPC API",
 					ExecFunc: func(s *StageState, _ Unwinder) error {
-						return FinishForward(s, world.DB, world.notifier)
+						return FinishForward(s, world.DB, world.notifier, world.TX, world.btClient, world.SnapshotBuilder)
 					},
 					UnwindFunc: func(u *UnwindState, s *StageState) error {
 						return UnwindFinish(u, s, world.DB)
@@ -569,6 +575,92 @@ func DefaultUnwindOrder() UnwindOrder {
 	}
 }
 
+func WithSnapshotsStages() StageBuilders {
+	defaultStages := DefaultStages()
+	blockHashesStageIndex := -1
+	sendersStageIndex := -1
+	hashedStateStageIndex := -1
+	for i := range defaultStages {
+		if defaultStages[i].ID == stages.Bodies {
+			blockHashesStageIndex = i
+		}
+		if defaultStages[i].ID == stages.Senders {
+			sendersStageIndex = i
+		}
+		if defaultStages[i].ID == stages.HashState {
+			hashedStateStageIndex = i
+		}
+	}
+	if blockHashesStageIndex < 0 || sendersStageIndex < 0 || hashedStateStageIndex < 0 {
+		log.Error("Unrecognized block hashes stage", "blockHashesStageIndex < 0", blockHashesStageIndex < 0, "sendersStageIndex < 0", sendersStageIndex < 0, "hashedStateStageIndex < 0", hashedStateStageIndex < 0)
+		return DefaultStages()
+	}
+
+	stagesWithSnapshots := make(StageBuilders, 0, len(defaultStages)+1)
+	stagesWithSnapshots = append(stagesWithSnapshots, defaultStages[:blockHashesStageIndex]...)
+	stagesWithSnapshots = append(stagesWithSnapshots, StageBuilder{
+		ID: stages.CreateHeadersSnapshot,
+		Build: func(world StageParameters) *Stage {
+			return &Stage{
+				ID:          stages.CreateHeadersSnapshot,
+				Description: "Create headers snapshot",
+				ExecFunc: func(s *StageState, u Unwinder) error {
+					return SpawnHeadersSnapshotGenerationStage(s, world.DB, world.SnapshotBuilder, world.snapshotsDir, world.btClient, world.QuitCh)
+				},
+				UnwindFunc: func(u *UnwindState, s *StageState) error {
+					return u.Done(world.DB)
+				},
+			}
+		},
+	})
+	stagesWithSnapshots = append(stagesWithSnapshots, defaultStages[blockHashesStageIndex:sendersStageIndex]...)
+	stagesWithSnapshots = append(stagesWithSnapshots, StageBuilder{
+		ID: stages.CreateBodiesSnapshot,
+		Build: func(world StageParameters) *Stage {
+			return &Stage{
+				ID:          stages.CreateBodiesSnapshot,
+				Description: "Create bodies snapshot",
+				ExecFunc: func(s *StageState, u Unwinder) error {
+					return SpawnBodiesSnapshotGenerationStage(s, world.DB, world.snapshotsDir, world.btClient, world.QuitCh)
+				},
+				UnwindFunc: func(u *UnwindState, s *StageState) error {
+					return u.Done(world.DB)
+				},
+			}
+		},
+	})
+	stagesWithSnapshots = append(stagesWithSnapshots, defaultStages[sendersStageIndex:hashedStateStageIndex]...)
+	stagesWithSnapshots = append(stagesWithSnapshots, StageBuilder{
+		ID: stages.CreateStateSnapshot,
+		Build: func(world StageParameters) *Stage {
+			return &Stage{
+				ID:          stages.CreateStateSnapshot,
+				Description: "Create state snapshot",
+				ExecFunc: func(s *StageState, u Unwinder) error {
+					return SpawnStateSnapshotGenerationStage(s, world.DB, world.snapshotsDir, world.btClient, world.QuitCh)
+				},
+				UnwindFunc: func(u *UnwindState, s *StageState) error {
+					return u.Done(world.DB)
+				},
+			}
+		},
+	})
+	stagesWithSnapshots = append(stagesWithSnapshots, defaultStages[hashedStateStageIndex:]...)
+	return stagesWithSnapshots
+}
+
+func UnwindOrderWithSnapshots() UnwindOrder {
+	return []int{
+		0, 1, 2,
+		// Unwinding of tx pool (reinjecting transactions into the pool needs to happen after unwinding execution)
+		// also tx pool is before senders because senders unwind is inside cycle transaction
+		15,
+		// Unwinding of IHashes needs to happen after unwinding HashState
+		3, 4, 6, 5,
+		7, 9, 10, 12, 14,
+	}
+}
+
 func MiningUnwindOrder() UnwindOrder {
 	return []int{0, 1, 2, 3, 4}
 }
diff --git a/eth/stagedsync/stagedsync.go b/eth/stagedsync/stagedsync.go
index fddde38277132164e6855c69b955be7ae3915175..bd162b0ade723bc88e89ed6bf2494cb5d53b654e 100644
--- a/eth/stagedsync/stagedsync.go
+++ b/eth/stagedsync/stagedsync.go
@@ -9,6 +9,7 @@ import (
 	"github.com/ledgerwatch/turbo-geth/core/vm"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/params"
+	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 	"github.com/ledgerwatch/turbo-geth/turbo/stages/bodydownload"
 )
 
@@ -35,6 +36,10 @@ type OptionalParameters struct {
 	Notifier ChainEventNotifier
 
 	SilkwormExecutionFunc unsafe.Pointer
+
+	SnapshotDir      string
+	TorrnetClient    *snapshotsync.Client
+	SnapshotMigrator *snapshotsync.SnapshotMigrator
 }
 
 func New(stages StageBuilders, unwindOrder UnwindOrder, params OptionalParameters) *StagedSync {
@@ -99,6 +104,9 @@ func (stagedSync *StagedSync) Prepare(
 			silkwormExecutionFunc: stagedSync.params.SilkwormExecutionFunc,
 			InitialCycle:          initialCycle,
 			mining:                miningConfig,
+			snapshotsDir:          stagedSync.params.SnapshotDir,
+			btClient:              stagedSync.params.TorrnetClient,
+			SnapshotBuilder:       stagedSync.params.SnapshotMigrator,
 		},
 	)
 	state := NewState(stages)
@@ -117,3 +125,9 @@ func (stagedSync *StagedSync) Prepare(
 	}
 	return state, nil
 }
+
+func (stagedSync *StagedSync) SetTorrentParams(client *snapshotsync.Client, snapshotsDir string, snapshotMigrator *snapshotsync.SnapshotMigrator) {
+	stagedSync.params.TorrnetClient = client
+	stagedSync.params.SnapshotDir = snapshotsDir
+	stagedSync.params.SnapshotMigrator = snapshotMigrator
+}
diff --git a/eth/stagedsync/stages/stages.go b/eth/stagedsync/stages/stages.go
index c48868ba0d20f2248a60845c2cb8d3f1c3a86a77..55dbe7419afe8f96bc7000c3d2663ee4b184a348 100644
--- a/eth/stagedsync/stages/stages.go
+++ b/eth/stagedsync/stages/stages.go
@@ -48,6 +48,10 @@ var (
 	MiningCreateBlock SyncStage = "MiningCreateBlock"
 	MiningExecution   SyncStage = "MiningExecution"
 	MiningFinish      SyncStage = "MiningFinish"
+
+	CreateHeadersSnapshot SyncStage = "CreateHeadersSnapshot"
+	CreateBodiesSnapshot  SyncStage = "CreateBodiesSnapshot"
+	CreateStateSnapshot   SyncStage = "CreateStateSnapshot"
 )
 
 var AllStages = []SyncStage{
diff --git a/eth/stagedsync/state_test.go b/eth/stagedsync/state_test.go
index 8402f13ccf49212682c64ea656dc53d62ed2db80..f84b8ea46d654a860938570df0446d55c0b78559 100644
--- a/eth/stagedsync/state_test.go
+++ b/eth/stagedsync/state_test.go
@@ -2,6 +2,7 @@ package stagedsync
 
 import (
 	"errors"
+	"github.com/stretchr/testify/require"
 	"testing"
 
 	"github.com/ledgerwatch/turbo-geth/eth/stagedsync/stages"
@@ -693,3 +694,20 @@ func TestStateSyncInterruptLongUnwind(t *testing.T) {
 func unwindOf(s stages.SyncStage) stages.SyncStage {
 	return stages.SyncStage(append([]byte(s), 0xF0))
 }
+
+func TestSnapshotUnwindOrderEqualDefault(t *testing.T) {
+	stagesWithSnapshots := WithSnapshotsStages()
+	defaultStages := DefaultStages()
+	snUnwindOrder := UnwindOrderWithSnapshots()
+	unwindOrder := DefaultUnwindOrder()
+	snUnwindIDs := make([]stages.SyncStage, 0)
+	unwindIDs := make([]stages.SyncStage, 0)
+	for _, i := range snUnwindOrder {
+		snUnwindIDs = append(snUnwindIDs, stagesWithSnapshots[len(stagesWithSnapshots)-i-2].ID)
+	}
+	for _, i := range unwindOrder {
+		unwindIDs = append(unwindIDs, defaultStages[len(defaultStages)-i-2].ID)
+	}
+
+	require.Equal(t, snUnwindIDs, unwindIDs)
+}
diff --git a/ethdb/kv_lmdb.go b/ethdb/kv_lmdb.go
index b279461e5b606d0a5251e3f640cffb00ef7285b2..4b26e62b82d1914caebb94019f2b00fcab298e56 100644
--- a/ethdb/kv_lmdb.go
+++ b/ethdb/kv_lmdb.go
@@ -9,6 +9,8 @@ import (
 	"os"
 	"path"
 	"runtime"
+	"sort"
+	"strings"
 	"sync"
 	"time"
 	"unsafe"
@@ -185,14 +187,15 @@ func (opts LmdbOpts) Open() (kv RwKV, err error) {
 		db.buckets[name] = cfg
 	}
 
+	buckets := bucketSlice(db.buckets)
 	// Open or create buckets
 	if opts.flags&lmdb.Readonly != 0 {
 		tx, innerErr := db.BeginRo(context.Background())
 		if innerErr != nil {
 			return nil, innerErr
 		}
-		for name, cfg := range db.buckets {
-			if cfg.IsDeprecated {
+		for _, name := range buckets {
+			if db.buckets[name].IsDeprecated {
 				continue
 			}
 			if err = tx.(BucketMigrator).CreateBucket(name); err != nil {
@@ -205,8 +208,8 @@ func (opts LmdbOpts) Open() (kv RwKV, err error) {
 		}
 	} else {
 		if err := db.Update(context.Background(), func(tx RwTx) error {
-			for name, cfg := range db.buckets {
-				if cfg.IsDeprecated {
+			for _, name := range buckets {
+				if db.buckets[name].IsDeprecated {
 					continue
 				}
 				if err := tx.(BucketMigrator).CreateBucket(name); err != nil {
@@ -221,9 +224,9 @@ func (opts LmdbOpts) Open() (kv RwKV, err error) {
 
 	// Configure buckets and open deprecated buckets
 	if err := env.View(func(tx *lmdb.Txn) error {
-		for name, cfg := range db.buckets {
+		for _, name := range buckets {
 			// Open deprecated buckets if they exist, don't create
-			if !cfg.IsDeprecated {
+			if !db.buckets[name].IsDeprecated {
 				continue
 			}
 			dbi, createErr := tx.OpenDBI(name, 0)
@@ -1449,3 +1452,14 @@ func (c *LmdbDupSortCursor) CountDuplicates() (uint64, error) {
 	}
 	return res, nil
 }
+
+func bucketSlice(b dbutils.BucketsCfg) []string {
+	buckets := make([]string, 0, len(b))
+	for name := range b {
+		buckets = append(buckets, name)
+	}
+	sort.Slice(buckets, func(i, j int) bool {
+		return strings.Compare(buckets[i], buckets[j]) < 0
+	})
+	return buckets
+}
diff --git a/ethdb/kv_mdbx.go b/ethdb/kv_mdbx.go
index 8a0cc652e3450e802b7a9c5ae70563e937f75a80..af9f5f2d311e1f2a22a994f455dcbded60cee96e 100644
--- a/ethdb/kv_mdbx.go
+++ b/ethdb/kv_mdbx.go
@@ -189,14 +189,15 @@ func (opts MdbxOpts) Open() (RwKV, error) {
 		db.buckets[name] = cfg
 	}
 
+	buckets := bucketSlice(db.buckets)
 	// Open or create buckets
 	if opts.flags&mdbx.Readonly != 0 {
 		tx, innerErr := db.BeginRo(context.Background())
 		if innerErr != nil {
 			return nil, innerErr
 		}
-		for name, cfg := range db.buckets {
-			if cfg.IsDeprecated {
+		for _, name := range buckets {
+			if db.buckets[name].IsDeprecated {
 				continue
 			}
 			if err = tx.(BucketMigrator).CreateBucket(name); err != nil {
@@ -209,8 +210,8 @@ func (opts MdbxOpts) Open() (RwKV, error) {
 		}
 	} else {
 		if err := db.Update(context.Background(), func(tx RwTx) error {
-			for name, cfg := range db.buckets {
-				if cfg.IsDeprecated {
+			for _, name := range buckets {
+				if db.buckets[name].IsDeprecated {
 					continue
 				}
 				if err := tx.(BucketMigrator).CreateBucket(name); err != nil {
@@ -225,9 +226,9 @@ func (opts MdbxOpts) Open() (RwKV, error) {
 
 	// Configure buckets and open deprecated buckets
 	if err := env.View(func(tx *mdbx.Txn) error {
-		for name, cfg := range db.buckets {
+		for _, name := range buckets {
 			// Open deprecated buckets if they exist, don't create
-			if !cfg.IsDeprecated {
+			if !db.buckets[name].IsDeprecated {
 				continue
 			}
 			cnfCopy := db.buckets[name]
diff --git a/ethdb/kv_snapshot.go b/ethdb/kv_snapshot.go
index 75ccb7605e18554ad250c621b9d388b3722c7fbe..11c06b9aed91b0006e02a0ffff8c25f6d175a818 100644
--- a/ethdb/kv_snapshot.go
+++ b/ethdb/kv_snapshot.go
@@ -5,6 +5,7 @@ import (
 	"context"
 	"errors"
 	"fmt"
+	"sync"
 	"unsafe"
 
 	"github.com/ledgerwatch/turbo-geth/common"
@@ -12,26 +13,34 @@ import (
 )
 
 var (
-	_ RwKV           = &SnapshotKV2{}
-	_ Tx             = &sn2TX{}
-	_ BucketMigrator = &sn2TX{}
-	_ Cursor         = &snCursor2{}
+	_ RwKV           = &SnapshotKV{}
+	_ RoKV           = &SnapshotKV{}
+	_ Tx             = &snTX{}
+	_ BucketMigrator = &snTX{}
+	_ RwCursor       = &snCursor{}
+	_ Cursor         = &snCursor{}
 )
 
-func NewSnapshot2KV() snapshotOpts2 {
-	return snapshotOpts2{}
+type SnapshotUpdater interface {
+	UpdateSnapshots(buckets []string, snapshotKV RoKV, done chan struct{})
+	WriteDB() RwKV
+	SnapshotKV(bucket string) RoKV
+}
+
+func NewSnapshotKV() snapshotOpts {
+	return snapshotOpts{}
 }
 
 type snapshotData struct {
 	buckets  []string
-	snapshot RwKV
+	snapshot RoKV
 }
-type snapshotOpts2 struct {
+type snapshotOpts struct {
 	db        RwKV
 	snapshots []snapshotData
 }
 
-func (opts snapshotOpts2) SnapshotDB(buckets []string, db RwKV) snapshotOpts2 {
+func (opts snapshotOpts) SnapshotDB(buckets []string, db RoKV) snapshotOpts {
 	opts.snapshots = append(opts.snapshots, snapshotData{
 		buckets:  buckets,
 		snapshot: db,
@@ -39,30 +48,31 @@ func (opts snapshotOpts2) SnapshotDB(buckets []string, db RwKV) snapshotOpts2 {
 	return opts
 }
 
-func (opts snapshotOpts2) DB(db RwKV) snapshotOpts2 {
+func (opts snapshotOpts) DB(db RwKV) snapshotOpts {
 	opts.db = db
 	return opts
 }
 
-func (opts snapshotOpts2) MustOpen() RwKV {
+func (opts snapshotOpts) Open() RwKV {
 	snapshots := make(map[string]snapshotData)
 	for i, v := range opts.snapshots {
 		for _, bucket := range v.buckets {
 			snapshots[bucket] = opts.snapshots[i]
 		}
 	}
-	return &SnapshotKV2{
+	return &SnapshotKV{
 		snapshots: snapshots,
 		db:        opts.db,
 	}
 }
 
-type SnapshotKV2 struct {
+type SnapshotKV struct {
 	db        RwKV
+	mtx       sync.RWMutex
 	snapshots map[string]snapshotData
 }
 
-func (s *SnapshotKV2) View(ctx context.Context, f func(tx Tx) error) error {
+func (s *SnapshotKV) View(ctx context.Context, f func(tx Tx) error) error {
 	snTX, err := s.BeginRo(ctx)
 	if err != nil {
 		return err
@@ -71,7 +81,7 @@ func (s *SnapshotKV2) View(ctx context.Context, f func(tx Tx) error) error {
 	return f(snTX)
 }
 
-func (s *SnapshotKV2) Update(ctx context.Context, f func(tx RwTx) error) error {
+func (s *SnapshotKV) Update(ctx context.Context, f func(tx RwTx) error) error {
 	tx, err := s.BeginRw(ctx)
 	if err != nil {
 		return err
@@ -85,75 +95,158 @@ func (s *SnapshotKV2) Update(ctx context.Context, f func(tx RwTx) error) error {
 	return err
 }
 
-func (s *SnapshotKV2) Close() {
+func (s *SnapshotKV) Close() {
 	s.db.Close()
 	for i := range s.snapshots {
 		s.snapshots[i].snapshot.Close()
 	}
 }
 
-func (s *SnapshotKV2) CollectMetrics() {
+func (s *SnapshotKV) UpdateSnapshots(buckets []string, snapshotKV RoKV, done chan struct{}) {
+	sd := snapshotData{
+		buckets:  buckets,
+		snapshot: snapshotKV,
+	}
+
+	toClose := []RoKV{}
+	var (
+		snData snapshotData
+		ok     bool
+	)
+	s.mtx.Lock()
+	for _, bucket := range buckets {
+		snData, ok = s.snapshots[bucket]
+		if ok {
+			toClose = append(toClose, snData.snapshot)
+		}
+		s.snapshots[bucket] = sd
+	}
+	s.mtx.Unlock()
+
+	go func() {
+		wg := sync.WaitGroup{}
+		wg.Add(len(toClose))
+
+		for i := range toClose {
+			i := i
+			go func() {
+				defer wg.Done()
+				toClose[i].Close()
+			}()
+		}
+		wg.Wait()
+		close(done)
+	}()
+}
+
+func (s *SnapshotKV) WriteDB() RwKV {
+	return s.db
+}
+func (s *SnapshotKV) SnapshotKV(bucket string) RoKV {
+	return s.snapshots[bucket].snapshot
+}
+
+func (s *SnapshotKV) CollectMetrics() {
 	s.db.CollectMetrics()
 }
 
-func (s *SnapshotKV2) BeginRo(ctx context.Context) (Tx, error) {
+func (s *SnapshotKV) BeginRo(ctx context.Context) (Tx, error) {
 	dbTx, err := s.db.BeginRo(ctx)
 	if err != nil {
 		return nil, err
 	}
-	return &sn2TX{
+	return &snTX{
 		dbTX:      dbTx,
-		snapshots: s.snapshots,
+		snapshots: s.copySnapshots(),
 		snTX:      map[string]Tx{},
 	}, nil
 }
+func (s *SnapshotKV) copySnapshots() map[string]snapshotData {
+	s.mtx.RLock()
+	defer s.mtx.RUnlock()
+	mp := make(map[string]snapshotData, len(s.snapshots))
+	for i := range s.snapshots {
+		mp[i] = s.snapshots[i]
+	}
+	return mp
+}
 
-func (s *SnapshotKV2) BeginRw(ctx context.Context) (RwTx, error) {
+func (s *SnapshotKV) BeginRw(ctx context.Context) (RwTx, error) {
 	dbTx, err := s.db.BeginRw(ctx)
 	if err != nil {
 		return nil, err
 	}
-	return &sn2TX{
+	return &snTX{
 		dbTX:      dbTx,
-		snapshots: s.snapshots,
+		snapshots: s.copySnapshots(),
 		snTX:      map[string]Tx{},
 	}, nil
 }
 
-func (s *SnapshotKV2) AllBuckets() dbutils.BucketsCfg {
+func (s *SnapshotKV) AllBuckets() dbutils.BucketsCfg {
 	return s.db.AllBuckets()
 }
 
 var ErrUnavailableSnapshot = errors.New("unavailable snapshot")
 
-type sn2TX struct {
+type snTX struct {
 	dbTX      Tx
 	snapshots map[string]snapshotData
 	snTX      map[string]Tx
 }
 
-func (s *sn2TX) DropBucket(bucket string) error {
+type DBTX interface {
+	DBTX() RwTx
+}
+
+func (s *snTX) DBTX() RwTx {
+	return s.dbTX.(RwTx)
+}
+func (s *snTX) RwCursor(bucket string) (RwCursor, error) {
+	tx, err := s.getSnapshotTX(bucket)
+	if err != nil && !errors.Is(err, ErrUnavailableSnapshot) {
+		panic(err.Error())
+	}
+	//process only db buckets
+	if errors.Is(err, ErrUnavailableSnapshot) {
+		return s.dbTX.(RwTx).RwCursor(bucket)
+	}
+	dbCursor, err := s.dbTX.(RwTx).RwCursor(bucket)
+	if err != nil {
+		return nil, err
+	}
+	snCursor2, err := tx.Cursor(bucket)
+	if err != nil {
+		return nil, err
+	}
+	return &snCursor{
+		dbCursor: dbCursor,
+		snCursor: snCursor2,
+	}, nil
+
+}
+
+func (s *snTX) DropBucket(bucket string) error {
 	return s.dbTX.(BucketMigrator).DropBucket(bucket)
 }
 
-func (s *sn2TX) CreateBucket(bucket string) error {
+func (s *snTX) CreateBucket(bucket string) error {
 	return s.dbTX.(BucketMigrator).CreateBucket(bucket)
 }
 
-func (s *sn2TX) ExistsBucket(bucket string) bool {
-	//todo snapshot check?
+func (s *snTX) ExistsBucket(bucket string) bool {
 	return s.dbTX.(BucketMigrator).ExistsBucket(bucket)
 }
 
-func (s *sn2TX) ClearBucket(bucket string) error {
+func (s *snTX) ClearBucket(bucket string) error {
 	return s.dbTX.(BucketMigrator).ClearBucket(bucket)
 }
 
-func (s *sn2TX) ExistingBuckets() ([]string, error) {
-	panic("implement me")
+func (s *snTX) ExistingBuckets() ([]string, error) {
+	return s.dbTX.(BucketMigrator).ExistingBuckets()
 }
 
-func (s *sn2TX) Cursor(bucket string) (Cursor, error) {
+func (s *snTX) Cursor(bucket string) (Cursor, error) {
 	tx, err := s.getSnapshotTX(bucket)
 	if err != nil && !errors.Is(err, ErrUnavailableSnapshot) {
 		panic(err.Error())
@@ -166,25 +259,17 @@ func (s *sn2TX) Cursor(bucket string) (Cursor, error) {
 	if err != nil {
 		return nil, err
 	}
-	snCursor, err := tx.Cursor(bucket)
+	snCursor2, err := tx.Cursor(bucket)
 	if err != nil {
 		return nil, err
 	}
-	return &snCursor2{
+	return &snCursor{
 		dbCursor: dbCursor,
-		snCursor: snCursor,
+		snCursor: snCursor2,
 	}, nil
 }
 
-func (s *sn2TX) RwCursor(bucket string) (RwCursor, error) {
-	c, err := s.Cursor(bucket)
-	if err != nil {
-		return nil, err
-	}
-	return c.(RwCursor), nil
-}
-
-func (s *sn2TX) CursorDupSort(bucket string) (CursorDupSort, error) {
+func (s *snTX) CursorDupSort(bucket string) (CursorDupSort, error) {
 	tx, err := s.getSnapshotTX(bucket)
 	if err != nil && !errors.Is(err, ErrUnavailableSnapshot) {
 		panic(err.Error())
@@ -201,8 +286,8 @@ func (s *sn2TX) CursorDupSort(bucket string) (CursorDupSort, error) {
 	if err != nil {
 		return nil, err
 	}
-	return &snCursor2Dup{
-		snCursor2{
+	return &snCursorDup{
+		snCursor{
 			dbCursor: dbc,
 			snCursor: sncbc,
 		},
@@ -211,7 +296,7 @@ func (s *sn2TX) CursorDupSort(bucket string) (CursorDupSort, error) {
 	}, nil
 }
 
-func (s *sn2TX) RwCursorDupSort(bucket string) (RwCursorDupSort, error) {
+func (s *snTX) RwCursorDupSort(bucket string) (RwCursorDupSort, error) {
 	c, err := s.CursorDupSort(bucket)
 	if err != nil {
 		return nil, err
@@ -219,7 +304,7 @@ func (s *sn2TX) RwCursorDupSort(bucket string) (RwCursorDupSort, error) {
 	return c.(RwCursorDupSort), nil
 }
 
-func (s *sn2TX) GetOne(bucket string, key []byte) (val []byte, err error) {
+func (s *snTX) GetOne(bucket string, key []byte) (val []byte, err error) {
 	v, err := s.dbTX.GetOne(bucket, key)
 	if err != nil {
 		return nil, err
@@ -245,19 +330,19 @@ func (s *sn2TX) GetOne(bucket string, key []byte) (val []byte, err error) {
 	return v, nil
 }
 
-func (s *sn2TX) Put(bucket string, k, v []byte) error {
+func (s *snTX) Put(bucket string, k, v []byte) error {
 	return s.dbTX.(RwTx).Put(bucket, k, v)
 }
 
-func (s *sn2TX) Delete(bucket string, k, v []byte) error {
-	return s.dbTX.(RwTx).Delete(bucket, k, v)
+func (s *snTX) Delete(bucket string, k, v []byte) error {
+	return s.dbTX.(RwTx).Put(bucket, k, DeletedValue)
 }
 
-func (s *sn2TX) CollectMetrics() {
+func (s *snTX) CollectMetrics() {
 	s.dbTX.CollectMetrics()
 }
 
-func (s *sn2TX) getSnapshotTX(bucket string) (Tx, error) {
+func (s *snTX) getSnapshotTX(bucket string) (Tx, error) {
 	tx, ok := s.snTX[bucket]
 	if ok {
 		return tx, nil
@@ -271,12 +356,11 @@ func (s *sn2TX) getSnapshotTX(bucket string) (Tx, error) {
 	if err != nil {
 		return nil, err
 	}
-
 	s.snTX[bucket] = tx
 	return tx, nil
 }
 
-func (s *sn2TX) Has(bucket string, key []byte) (bool, error) {
+func (s *snTX) Has(bucket string, key []byte) (bool, error) {
 	v, err := s.dbTX.Has(bucket, key)
 	if err != nil {
 		return false, err
@@ -304,14 +388,14 @@ func (s *sn2TX) Has(bucket string, key []byte) (bool, error) {
 	return v, nil
 }
 
-func (s *sn2TX) Commit() error {
+func (s *snTX) Commit() error {
 	for i := range s.snTX {
 		defer s.snTX[i].Rollback()
 	}
 	return s.dbTX.Commit()
 }
 
-func (s *sn2TX) Rollback() {
+func (s *snTX) Rollback() {
 	for i := range s.snTX {
 		defer s.snTX[i].Rollback()
 	}
@@ -319,40 +403,48 @@ func (s *sn2TX) Rollback() {
 
 }
 
-func (s *sn2TX) BucketSize(name string) (uint64, error) {
+func (s *snTX) BucketSize(name string) (uint64, error) {
 	panic("implement me")
 }
 
-func (s *sn2TX) Comparator(bucket string) dbutils.CmpFunc {
+func (s *snTX) Comparator(bucket string) dbutils.CmpFunc {
 	return s.dbTX.Comparator(bucket)
 }
 
-func (s *sn2TX) IncrementSequence(bucket string, amount uint64) (uint64, error) {
-	panic("implement me")
+func (s *snTX) IncrementSequence(bucket string, amount uint64) (uint64, error) {
+	return s.dbTX.(RwTx).IncrementSequence(bucket, amount)
 }
 
-func (s *sn2TX) ReadSequence(bucket string) (uint64, error) {
+func (s *snTX) ReadSequence(bucket string) (uint64, error) {
 	panic("implement me")
 }
 
-func (s *sn2TX) CHandle() unsafe.Pointer {
+func (s *snTX) CHandle() unsafe.Pointer {
 	return s.dbTX.CHandle()
 }
 
-var DeletedValue = []byte("it is deleted value")
+func (s *snTX) BucketExists(bucket string) (bool, error) {
+	return s.dbTX.(BucketsMigrator).BucketExists(bucket)
+}
+
+func (s *snTX) ClearBuckets(buckets ...string) error {
+	return s.dbTX.(BucketsMigrator).ClearBuckets(buckets...)
+}
 
-type snCursor2 struct {
+func (s *snTX) DropBuckets(buckets ...string) error {
+	return s.dbTX.(BucketsMigrator).DropBuckets(buckets...)
+}
+
+var DeletedValue = []byte{0}
+
+type snCursor struct {
 	dbCursor Cursor
 	snCursor Cursor
 
 	currentKey []byte
 }
 
-func (s *snCursor2) Prefetch(v uint) Cursor {
-	panic("implement me")
-}
-
-func (s *snCursor2) First() ([]byte, []byte, error) {
+func (s *snCursor) First() ([]byte, []byte, error) {
 	var err error
 	lastDBKey, lastDBVal, err := s.dbCursor.First()
 	if err != nil {
@@ -376,7 +468,7 @@ func (s *snCursor2) First() ([]byte, []byte, error) {
 	return lastSNDBKey, lastSNDBVal, nil
 }
 
-func (s *snCursor2) Seek(seek []byte) ([]byte, []byte, error) {
+func (s *snCursor) Seek(seek []byte) ([]byte, []byte, error) {
 	dbKey, dbVal, err := s.dbCursor.Seek(seek)
 	if err != nil && !errors.Is(err, ErrKeyNotFound) {
 		return nil, nil, err
@@ -401,7 +493,7 @@ func (s *snCursor2) Seek(seek []byte) ([]byte, []byte, error) {
 	return sndbKey, sndbVal, nil
 }
 
-func (s *snCursor2) SeekExact(key []byte) ([]byte, []byte, error) {
+func (s *snCursor) SeekExact(key []byte) ([]byte, []byte, error) {
 	k, v, err := s.dbCursor.SeekExact(key)
 	if err != nil {
 		return nil, nil, err
@@ -418,7 +510,7 @@ func (s *snCursor2) SeekExact(key []byte) ([]byte, []byte, error) {
 	return k, v, err
 }
 
-func (s *snCursor2) iteration(dbNextElement func() ([]byte, []byte, error), sndbNextElement func() ([]byte, []byte, error), cmpFunc func(kdb, ksndb []byte) (int, bool)) ([]byte, []byte, error) {
+func (s *snCursor) iteration(dbNextElement func() ([]byte, []byte, error), sndbNextElement func() ([]byte, []byte, error), cmpFunc func(kdb, ksndb []byte) (int, bool)) ([]byte, []byte, error) {
 	var err error
 	//current returns error on empty bucket
 	lastDBKey, lastDBVal, err := s.dbCursor.Current()
@@ -490,7 +582,7 @@ func (s *snCursor2) iteration(dbNextElement func() ([]byte, []byte, error), sndb
 	return lastSNDBKey, lastSNDBVal, nil
 }
 
-func (s *snCursor2) Next() ([]byte, []byte, error) {
+func (s *snCursor) Next() ([]byte, []byte, error) {
 	k, v, err := s.iteration(s.dbCursor.Next, s.snCursor.Next, common.KeyCmp) //f(s.dbCursor.Next, s.snCursor.Next)
 	if err != nil {
 		return nil, nil, err
@@ -506,7 +598,7 @@ func (s *snCursor2) Next() ([]byte, []byte, error) {
 	return k, v, nil
 }
 
-func (s *snCursor2) Prev() ([]byte, []byte, error) {
+func (s *snCursor) Prev() ([]byte, []byte, error) {
 	k, v, err := s.iteration(s.dbCursor.Prev, s.snCursor.Prev, func(kdb, ksndb []byte) (int, bool) {
 		cmp, br := KeyCmpBackward(kdb, ksndb)
 		return -1 * cmp, br
@@ -527,7 +619,7 @@ func (s *snCursor2) Prev() ([]byte, []byte, error) {
 	return k, v, nil
 }
 
-func (s *snCursor2) Last() ([]byte, []byte, error) {
+func (s *snCursor) Last() ([]byte, []byte, error) {
 	var err error
 	lastSNDBKey, lastSNDBVal, err := s.snCursor.Last()
 	if err != nil {
@@ -550,7 +642,7 @@ func (s *snCursor2) Last() ([]byte, []byte, error) {
 	return lastSNDBKey, lastSNDBVal, nil
 }
 
-func (s *snCursor2) Current() ([]byte, []byte, error) {
+func (s *snCursor) Current() ([]byte, []byte, error) {
 	k, v, err := s.dbCursor.Current()
 	if bytes.Equal(k, s.currentKey) {
 		return k, v, err
@@ -558,38 +650,38 @@ func (s *snCursor2) Current() ([]byte, []byte, error) {
 	return s.snCursor.Current()
 }
 
-func (s *snCursor2) Put(k, v []byte) error {
+func (s *snCursor) Put(k, v []byte) error {
 	return s.dbCursor.(RwCursor).Put(k, v)
 }
 
-func (s *snCursor2) Append(k []byte, v []byte) error {
+func (s *snCursor) Append(k []byte, v []byte) error {
 	return s.dbCursor.(RwCursor).Append(k, v)
 }
 
-func (s *snCursor2) Delete(k, v []byte) error {
+func (s *snCursor) Delete(k, v []byte) error {
 	return s.dbCursor.(RwCursor).Put(k, DeletedValue)
 }
 
-func (s *snCursor2) DeleteCurrent() error {
+func (s *snCursor) DeleteCurrent() error {
 	panic("implement me")
 }
 
-func (s *snCursor2) Count() (uint64, error) {
+func (s *snCursor) Count() (uint64, error) {
 	panic("implement me")
 }
 
-func (s *snCursor2) Close() {
+func (s *snCursor) Close() {
 	s.dbCursor.Close()
 	s.snCursor.Close()
 }
 
-type snCursor2Dup struct {
-	snCursor2
+type snCursorDup struct {
+	snCursor
 	dbCursorDup   CursorDupSort
 	sndbCursorDup CursorDupSort
 }
 
-func (c *snCursor2Dup) SeekBothExact(key, value []byte) ([]byte, []byte, error) {
+func (c *snCursorDup) SeekBothExact(key, value []byte) ([]byte, []byte, error) {
 	k, v, err := c.dbCursorDup.SeekBothExact(key, value)
 	if err != nil {
 		return nil, nil, err
@@ -604,7 +696,7 @@ func (c *snCursor2Dup) SeekBothExact(key, value []byte) ([]byte, []byte, error)
 
 }
 
-func (c *snCursor2Dup) SeekBothRange(key, value []byte) ([]byte, error) {
+func (c *snCursorDup) SeekBothRange(key, value []byte) ([]byte, error) {
 	dbVal, err := c.dbCursorDup.SeekBothRange(key, value)
 	if err != nil {
 		return nil, err
@@ -622,35 +714,35 @@ func (c *snCursor2Dup) SeekBothRange(key, value []byte) ([]byte, error) {
 	return snDBVal, nil
 }
 
-func (c *snCursor2Dup) FirstDup() ([]byte, error) {
+func (c *snCursorDup) FirstDup() ([]byte, error) {
 	panic("implement me")
 }
 
-func (c *snCursor2Dup) NextDup() ([]byte, []byte, error) {
+func (c *snCursorDup) NextDup() ([]byte, []byte, error) {
 	panic("implement me")
 }
 
-func (c *snCursor2Dup) NextNoDup() ([]byte, []byte, error) {
+func (c *snCursorDup) NextNoDup() ([]byte, []byte, error) {
 	panic("implement me")
 }
 
-func (c *snCursor2Dup) LastDup() ([]byte, error) {
+func (c *snCursorDup) LastDup() ([]byte, error) {
 	panic("implement me")
 }
 
-func (c *snCursor2Dup) CountDuplicates() (uint64, error) {
+func (c *snCursorDup) CountDuplicates() (uint64, error) {
 	panic("implement me")
 }
 
-func (c *snCursor2Dup) DeleteCurrentDuplicates() error {
+func (c *snCursorDup) DeleteCurrentDuplicates() error {
 	panic("implement me")
 }
 
-func (c *snCursor2Dup) AppendDup(key, value []byte) error {
+func (c *snCursorDup) AppendDup(key, value []byte) error {
 	panic("implement me")
 }
 
-func (s *snCursor2) saveCurrent(k []byte) {
+func (s *snCursor) saveCurrent(k []byte) {
 	if k != nil {
 		s.currentKey = common.CopyBytes(k)
 	}
diff --git a/ethdb/kv_snapshot_test.go b/ethdb/kv_snapshot_test.go
index d3d4e9d5bdfed51e7e2d400f48e6fca4db435b85..a40fb1732fb0736f16e2e6d5e4b4b63e51fc0596 100644
--- a/ethdb/kv_snapshot_test.go
+++ b/ethdb/kv_snapshot_test.go
@@ -4,332 +4,14 @@ import (
 	"bytes"
 	"context"
 	"fmt"
-	"testing"
-
 	"github.com/ledgerwatch/turbo-geth/common"
 	"github.com/ledgerwatch/turbo-geth/common/dbutils"
 	"github.com/stretchr/testify/require"
+	"pgregory.net/rapid"
+	"testing"
+	"time"
 )
 
-//func TestSnapshotGet(t *testing.T) {
-//	sn1 := NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
-//		return dbutils.BucketsCfg{
-//			dbutils.HeaderPrefix: dbutils.BucketConfigItem{},
-//		}
-//	}).InMem().MustOpen()
-//	err := sn1.Update(context.Background(), func(tx Tx) error {
-//		bucket := tx.Cursor(dbutils.HeaderPrefix)
-//		innerErr := bucket.Put(dbutils.HeaderKey(1, common.Hash{1}), []byte{1})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//		innerErr = bucket.Put(dbutils.HeaderKey(2, common.Hash{2}), []byte{2})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//
-//		return nil
-//	})
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	sn2 := NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
-//		return dbutils.BucketsCfg{
-//			dbutils.BlockBodyPrefix: dbutils.BucketConfigItem{},
-//		}
-//	}).InMem().MustOpen()
-//	err = sn2.Update(context.Background(), func(tx Tx) error {
-//		bucket := tx.Cursor(dbutils.BlockBodyPrefix)
-//		innerErr := bucket.Put(dbutils.BlockBodyKey(1, common.Hash{1}), []byte{1})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//		innerErr = bucket.Put(dbutils.BlockBodyKey(2, common.Hash{2}), []byte{2})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//
-//		return nil
-//	})
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	mainDB := NewLMDB().InMem().MustOpen()
-//	err = mainDB.Update(context.Background(), func(tx Tx) error {
-//		bucket := tx.Cursor(dbutils.HeaderPrefix)
-//		innerErr := bucket.Put(dbutils.HeaderKey(2, common.Hash{2}), []byte{22})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//		innerErr = bucket.Put(dbutils.HeaderKey(3, common.Hash{3}), []byte{33})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//
-//		bucket = tx.Cursor(dbutils.BlockBodyPrefix)
-//		innerErr = bucket.Put(dbutils.BlockBodyKey(2, common.Hash{2}), []byte{22})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//		innerErr = bucket.Put(dbutils.BlockBodyKey(3, common.Hash{3}), []byte{33})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//
-//		return nil
-//	})
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	kv := NewSnapshotKV().For(dbutils.HeaderPrefix).SnapshotDB(sn1).DB(mainDB).MustOpen()
-//	kv = NewSnapshotKV().For(dbutils.BlockBodyPrefix).SnapshotDB(sn2).DB(kv).MustOpen()
-//
-//	tx, err := kv.Begin(context.Background(), RO)
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	v, err := tx.GetOne(dbutils.HeaderPrefix, dbutils.HeaderKey(1, common.Hash{1}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{1}) {
-//		t.Fatal(v)
-//	}
-//
-//	v, err = tx.GetOne(dbutils.HeaderPrefix, dbutils.HeaderKey(2, common.Hash{2}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{22}) {
-//		t.Fatal(v)
-//	}
-//
-//	v, err = tx.GetOne(dbutils.HeaderPrefix, dbutils.HeaderKey(3, common.Hash{3}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{33}) {
-//		t.Fatal(v)
-//	}
-//
-//	v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(1, common.Hash{1}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{1}) {
-//		t.Fatal(v)
-//	}
-//
-//	v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(2, common.Hash{2}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{22}) {
-//		t.Fatal(v)
-//	}
-//
-//	v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(3, common.Hash{3}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{33}) {
-//		t.Fatal(v)
-//	}
-//
-//	headerCursor := tx.Cursor(dbutils.HeaderPrefix)
-//	k, v, err := headerCursor.Last()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !(bytes.Equal(dbutils.HeaderKey(3, common.Hash{3}), k) && bytes.Equal(v, []byte{33})) {
-//		t.Fatal(k, v)
-//	}
-//	k, v, err = headerCursor.First()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !(bytes.Equal(dbutils.HeaderKey(1, common.Hash{1}), k) && bytes.Equal(v, []byte{1})) {
-//		t.Fatal(k, v)
-//	}
-//
-//	k, v, err = headerCursor.Next()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	if !(bytes.Equal(dbutils.HeaderKey(2, common.Hash{2}), k) && bytes.Equal(v, []byte{22})) {
-//		t.Fatal(k, v)
-//	}
-//
-//	k, v, err = headerCursor.Next()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	if !(bytes.Equal(dbutils.HeaderKey(3, common.Hash{3}), k) && bytes.Equal(v, []byte{33})) {
-//		t.Fatal(k, v)
-//	}
-//
-//	k, v, err = headerCursor.Next()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	if !(bytes.Equal([]byte{}, k) && bytes.Equal(v, []byte{})) {
-//		t.Fatal(k, v)
-//	}
-//}
-//
-//func TestSnapshotWritableTxAndGet(t *testing.T) {
-//	sn1 := NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
-//		return dbutils.BucketsCfg{
-//			dbutils.HeaderPrefix: dbutils.BucketConfigItem{},
-//		}
-//	}).InMem().MustOpen()
-//	err := sn1.Update(context.Background(), func(tx Tx) error {
-//		bucket := tx.Cursor(dbutils.HeaderPrefix)
-//		innerErr := bucket.Put(dbutils.HeaderKey(1, common.Hash{1}), []byte{1})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//		innerErr = bucket.Put(dbutils.HeaderKey(2, common.Hash{2}), []byte{2})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//
-//		return nil
-//	})
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	sn2 := NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
-//		return dbutils.BucketsCfg{
-//			dbutils.BlockBodyPrefix: dbutils.BucketConfigItem{},
-//		}
-//	}).InMem().MustOpen()
-//	err = sn2.Update(context.Background(), func(tx Tx) error {
-//		bucket := tx.Cursor(dbutils.BlockBodyPrefix)
-//		innerErr := bucket.Put(dbutils.BlockBodyKey(1, common.Hash{1}), []byte{1})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//		innerErr = bucket.Put(dbutils.BlockBodyKey(2, common.Hash{2}), []byte{2})
-//		if innerErr != nil {
-//			return innerErr
-//		}
-//
-//		return nil
-//	})
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	mainDB := NewLMDB().InMem().MustOpen()
-//
-//	kv := NewSnapshotKV().For(dbutils.HeaderPrefix).SnapshotDB(sn1).DB(mainDB).MustOpen()
-//	kv = NewSnapshotKV().For(dbutils.BlockBodyPrefix).SnapshotDB(sn2).DB(kv).MustOpen()
-//
-//	tx, err := kv.Begin(context.Background(), RW)
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//
-//	v, err := tx.GetOne(dbutils.HeaderPrefix, dbutils.HeaderKey(1, common.Hash{1}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{1}) {
-//		t.Fatal(v)
-//	}
-//
-//	v, err = tx.GetOne(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(1, common.Hash{1}))
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(v, []byte{1}) {
-//		t.Fatal(v)
-//	}
-//
-//	err = tx.Cursor(dbutils.BlockBodyPrefix).Put(dbutils.BlockBodyKey(4, common.Hash{4}), []byte{4})
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	err = tx.Cursor(dbutils.HeaderPrefix).Put(dbutils.HeaderKey(4, common.Hash{4}), []byte{4})
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	err = tx.Commit(context.Background())
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	tx, err = kv.Begin(context.Background(), RO)
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	c := tx.Cursor(dbutils.HeaderPrefix)
-//	k, v, err := c.First()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(k, dbutils.HeaderKey(1, common.Hash{1})) {
-//		t.Fatal(k, v)
-//	}
-//
-//	k, v, err = c.Next()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(k, dbutils.HeaderKey(2, common.Hash{2})) {
-//		t.Fatal()
-//	}
-//
-//	k, v, err = c.Next()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(k, dbutils.HeaderKey(4, common.Hash{4})) {
-//		t.Fatal()
-//	}
-//	k, v, err = c.Next()
-//	if k != nil || v != nil || err != nil {
-//		t.Fatal(k, v, err)
-//	}
-//
-//	c = tx.Cursor(dbutils.BlockBodyPrefix)
-//	k, v, err = c.First()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(k, dbutils.BlockBodyKey(1, common.Hash{1})) {
-//		t.Fatal(k, v)
-//	}
-//
-//	k, v, err = c.Next()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(k, dbutils.BlockBodyKey(2, common.Hash{2})) {
-//		t.Fatal()
-//	}
-//
-//	k, v, err = c.Next()
-//	if err != nil {
-//		t.Fatal(err)
-//	}
-//	if !bytes.Equal(k, dbutils.BlockBodyKey(4, common.Hash{4})) {
-//		t.Fatal()
-//	}
-//	k, v, err = c.Next()
-//	if k != nil || v != nil || err != nil {
-//		t.Fatal(k, v, err)
-//	}
-//}
-
 func TestSnapshot2Get(t *testing.T) {
 	sn1 := NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
 		return dbutils.BucketsCfg{
@@ -414,8 +96,8 @@ func TestSnapshot2Get(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.HeadersBucket}, sn1).
-		SnapshotDB([]string{dbutils.BlockBodyPrefix}, sn2).MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.HeadersBucket}, sn1).
+		SnapshotDB([]string{dbutils.BlockBodyPrefix}, sn2).Open()
 
 	tx, err := kv.BeginRo(context.Background())
 	if err != nil {
@@ -556,8 +238,8 @@ func TestSnapshot2WritableTxAndGet(t *testing.T) {
 
 	mainDB := NewLMDB().InMem().MustOpen()
 
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.HeadersBucket}, sn1).
-		SnapshotDB([]string{dbutils.BlockBodyPrefix}, sn2).MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.HeadersBucket}, sn1).
+		SnapshotDB([]string{dbutils.BlockBodyPrefix}, sn2).Open()
 	{
 		tx, err := kv.BeginRw(context.Background())
 		require.NoError(t, err)
@@ -662,8 +344,8 @@ func TestSnapshot2WritableTxWalkReplaceAndCreateNewKey(t *testing.T) {
 	}
 	mainDB := NewLMDB().InMem().MustOpen()
 
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	if err != nil {
@@ -731,8 +413,8 @@ func TestSnapshot2WritableTxWalkAndDeleteKey(t *testing.T) {
 	}
 
 	mainDB := NewLMDB().InMem().MustOpen()
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	if err != nil {
@@ -806,8 +488,8 @@ func TestSnapshot2WritableTxNextAndPrevAndDeleteKey(t *testing.T) {
 	}
 
 	mainDB := NewLMDB().InMem().MustOpen()
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	if err != nil {
@@ -912,8 +594,8 @@ func TestSnapshot2WritableTxWalkLastElementIsSnapshot(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	if err != nil {
@@ -996,8 +678,8 @@ func TestSnapshot2WritableTxWalkForwardAndBackward(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	if err != nil {
@@ -1069,7 +751,6 @@ func TestSnapshot2WritableTxWalkForwardAndBackward(t *testing.T) {
 
 	i := 0
 	err = Walk(c, []byte{}, 0, func(k, v []byte) (bool, error) {
-		fmt.Println(common.Bytes2Hex(k), " => ", common.Bytes2Hex(v))
 		checkKV(t, k, v, data[i].K, data[i].V)
 		i++
 		return true, nil
@@ -1093,8 +774,8 @@ func TestSnapshot2WalkByEmptyDB(t *testing.T) {
 	}
 
 	mainDB := NewLMDB().InMem().MustOpen()
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	if err != nil {
@@ -1106,7 +787,6 @@ func TestSnapshot2WalkByEmptyDB(t *testing.T) {
 
 	i := 0
 	err = Walk(c, []byte{}, 0, func(k, v []byte) (bool, error) {
-		fmt.Println(common.Bytes2Hex(k), " => ", common.Bytes2Hex(v))
 		checkKV(t, k, v, data[i].K, data[i].V)
 		i++
 		return true, nil
@@ -1131,8 +811,8 @@ func TestSnapshot2WritablePrevAndDeleteKey(t *testing.T) {
 	}
 
 	mainDB := NewLMDB().InMem().MustOpen()
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	require.NoError(t, err)
@@ -1193,8 +873,8 @@ func TestSnapshot2WritableTxNextAndPrevWithDeleteAndPutKeys(t *testing.T) {
 	}
 
 	mainDB := NewLMDB().InMem().MustOpen()
-	kv := NewSnapshot2KV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
-		MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
 
 	tx, err := kv.BeginRw(context.Background())
 	require.NoError(t, err)
@@ -1269,7 +949,112 @@ func TestSnapshot2WritableTxNextAndPrevWithDeleteAndPutKeys(t *testing.T) {
 
 }
 
-func printBucket(kv RwKV, bucket string) {
+func TestSnapshotUpdateSnapshot(t *testing.T) {
+	data := []KvData{
+		{K: []byte{1}, V: []byte{1}},
+		{K: []byte{2}, V: []byte{2}},
+		{K: []byte{3}, V: []byte{3}},
+		{K: []byte{4}, V: []byte{4}},
+		{K: []byte{5}, V: []byte{5}},
+	}
+	snapshotDB, err := GenStateData(data)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	data2 := append(data, []KvData{
+		{K: []byte{6}, V: []byte{6}},
+		{K: []byte{7}, V: []byte{7}},
+	}...)
+	snapshotDB2, err := GenStateData(data2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	mainDB := NewLMDB().InMem().MustOpen()
+	kv := NewSnapshotKV().DB(mainDB).SnapshotDB([]string{dbutils.PlainStateBucket}, snapshotDB).
+		Open()
+
+	tx, err := kv.BeginRo(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+	c, err := tx.Cursor(dbutils.PlainStateBucket)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	k, v, err := c.First()
+	if err != nil {
+		t.Fatal(err)
+	}
+	checkKVErr(t, k, v, err, []byte{1}, []byte{1})
+
+	done := make(chan struct{})
+	kv.(*SnapshotKV).UpdateSnapshots([]string{dbutils.PlainStateBucket}, snapshotDB2, done)
+
+	tx2, err := kv.BeginRo(context.Background())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	c2, err := tx2.Cursor(dbutils.PlainStateBucket)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	k2, v2, err2 := c2.First()
+	if err2 != nil {
+		t.Fatal(err2)
+	}
+	checkKVErr(t, k2, v2, err2, []byte{1}, []byte{1})
+
+	i := 2
+	for {
+		k, v, err = c.Next()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if k == nil {
+			break
+		}
+		checkKVErr(t, k, v, err, []byte{uint8(i)}, []byte{uint8(i)})
+		i++
+	}
+	//data[maxK]+1
+	if i != 6 {
+		t.Fatal("incorrect last key", i)
+	}
+	tx.Rollback()
+
+	i = 2
+	for {
+		k2, v2, err2 = c2.Next()
+		if err2 != nil {
+			t.Fatal(err2)
+		}
+		if k2 == nil {
+			break
+		}
+		checkKVErr(t, k2, v2, err2, []byte{uint8(i)}, []byte{uint8(i)})
+		i++
+	}
+	//data2[maxK]+1
+	if i != 8 {
+		t.Fatal("incorrect last key", i)
+	}
+
+	//a short delay to close
+	time.Sleep(time.Second)
+	select {
+	case <-done:
+	default:
+		t.Fatal("Hasn't closed database")
+
+	}
+}
+
+func printBucket(kv RoKV, bucket string) {
 	fmt.Println("+Print bucket", bucket)
 	defer func() {
 		fmt.Println("-Print bucket", bucket)
@@ -1295,6 +1080,19 @@ func printBucket(kv RwKV, bucket string) {
 	fmt.Println("Print err", err)
 }
 
+func checkKVErr(t *testing.T, k, v []byte, err error, expectedK, expectedV []byte) {
+	t.Helper()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(k, expectedK) {
+		t.Error("k!= expected", k, expectedK)
+	}
+	if !bytes.Equal(v, expectedV) {
+		t.Error("v!= expected", v, expectedV)
+	}
+}
+
 func checkKV(t *testing.T, key, val, expectedKey, expectedVal []byte) {
 	t.Helper()
 	if !bytes.Equal(key, expectedKey) {
@@ -1308,3 +1106,97 @@ func checkKV(t *testing.T, key, val, expectedKey, expectedVal []byte) {
 		t.Fatal("wrong value for key", common.Bytes2Hex(key))
 	}
 }
+
+type getKVMachine struct {
+	bucket        string
+	snKV          RwKV
+	modelKV       RwKV
+	overWriteKeys [][20]byte
+	snKeys        [][20]byte
+	newKeys       [][20]byte
+	allKeys       [][20]byte
+}
+
+func (m *getKVMachine) Init(t *rapid.T) {
+	m.bucket = dbutils.PlainStateBucket
+	m.snKV = NewMemKV()
+	m.modelKV = NewMemKV()
+	m.snKeys = rapid.SliceOf(rapid.ArrayOf(20, rapid.Byte())).Filter(func(_v [][20]byte) bool {
+		return len(_v) > 0
+	}).Draw(t, "generate keys").([][20]byte)
+	m.overWriteKeys = rapid.SliceOf(rapid.SampledFrom(m.snKeys)).Draw(t, "get snKeys").([][20]byte)
+	m.newKeys = rapid.SliceOf(rapid.ArrayOf(20, rapid.Byte())).Draw(t, "generate new keys").([][20]byte)
+	m.allKeys = append(m.snKeys, m.overWriteKeys...)
+	m.allKeys = append(m.allKeys, m.newKeys...)
+	notExistingKeys := rapid.SliceOf(rapid.ArrayOf(20, rapid.Byte())).Draw(t, "generate not excisting keys").([][20]byte)
+	m.allKeys = append(m.allKeys, notExistingKeys...)
+
+	txSn, err := m.snKV.BeginRw(context.Background())
+	require.NoError(t, err)
+
+	txModel, err := m.modelKV.BeginRw(context.Background())
+	require.NoError(t, err)
+	defer txModel.Rollback()
+	for _, key := range m.snKeys {
+		innerErr := txSn.Put(m.bucket, key[:], []byte("sn_"+common.Bytes2Hex(key[:])))
+		require.NoError(t, innerErr)
+		innerErr = txModel.Put(m.bucket, key[:], []byte("sn_"+common.Bytes2Hex(key[:])))
+		require.NoError(t, innerErr)
+	}
+
+	//save snapshot and wrap new write db
+	err = txSn.Commit()
+	require.NoError(t, err)
+	m.snKV = NewSnapshotKV().SnapshotDB([]string{m.bucket}, m.snKV).DB(NewMemKV()).Open()
+	txSn, err = m.snKV.BeginRw(context.Background())
+	require.NoError(t, err)
+	defer txSn.Rollback()
+
+	for _, key := range m.overWriteKeys {
+		innerErr := txSn.Put(m.bucket, key[:], []byte("overwrite_"+common.Bytes2Hex(key[:])))
+		require.NoError(t, innerErr)
+		innerErr = txModel.Put(m.bucket, key[:], []byte("overwrite_"+common.Bytes2Hex(key[:])))
+		require.NoError(t, innerErr)
+	}
+	for _, key := range m.newKeys {
+		innerErr := txSn.Put(m.bucket, key[:], []byte("new_"+common.Bytes2Hex(key[:])))
+		require.NoError(t, innerErr)
+		innerErr = txModel.Put(m.bucket, key[:], []byte("new_"+common.Bytes2Hex(key[:])))
+		require.NoError(t, innerErr)
+	}
+	err = txSn.Commit()
+	require.NoError(t, err)
+	err = txModel.Commit()
+	require.NoError(t, err)
+}
+func (m *getKVMachine) Cleanup() {
+	m.snKV.Close()
+	m.modelKV.Close()
+}
+
+func (m *getKVMachine) Check(t *rapid.T) {
+}
+
+func (m *getKVMachine) Get(t *rapid.T) {
+	key := rapid.SampledFrom(m.allKeys).Draw(t, "get a key").([20]byte)
+	var (
+		v1, v2     []byte
+		err1, err2 error
+	)
+	err := m.snKV.View(context.Background(), func(tx Tx) error {
+		v1, err1 = tx.GetOne(m.bucket, key[:])
+		return nil
+	})
+	require.NoError(t, err)
+	err = m.modelKV.View(context.Background(), func(tx Tx) error {
+		v2, err2 = tx.GetOne(m.bucket, key[:])
+		return nil
+	})
+	require.NoError(t, err)
+	require.Equal(t, err1, err2)
+	require.Equal(t, v1, v2)
+}
+
+func TestGet(t *testing.T) {
+	rapid.Check(t, rapid.Run(&getKVMachine{}))
+}
diff --git a/go.mod b/go.mod
index 1995962ef45f8c05347714aabcd643bd462fb555..cf5047a2dfb868de7ea96ee2c1328dc9b422592a 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
 	github.com/anacrolix/log v0.8.0
 	github.com/anacrolix/torrent v1.25.1
 	github.com/aws/aws-sdk-go v1.34.21
-	github.com/blend/go-sdk v1.1.1 // indirect
+	github.com/blend/go-sdk v0.0.0-20190205012150-4a150f307fcb // indirect
 	github.com/btcsuite/btcd v0.21.0-beta
 	github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2
 	github.com/cloudflare/cloudflare-go v0.13.2
@@ -70,4 +70,5 @@ require (
 	gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
 	gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
 	gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
+	pgregory.net/rapid v0.4.6
 )
diff --git a/go.sum b/go.sum
index 114e37a399a959e3fe5873876f22251706053979..77bbb725460de16fde989d41d3963ed45e246f76 100644
--- a/go.sum
+++ b/go.sum
@@ -189,8 +189,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
 github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/blend/go-sdk v1.1.1 h1:R7PcwuIxYvrGc/r9TLLfMpajIboTjqs/HyQouzgJ7mQ=
-github.com/blend/go-sdk v1.1.1/go.mod h1:IP1XHXFveOXHRnojRJO7XvqWGqyzevtXND9AdSztAe8=
+github.com/blend/go-sdk v0.0.0-20190205012150-4a150f307fcb h1:OORR/M4OxYBUb1YIOMz7d73bJMHWrm04igFo/0kaCLk=
+github.com/blend/go-sdk v0.0.0-20190205012150-4a150f307fcb/go.mod h1:IP1XHXFveOXHRnojRJO7XvqWGqyzevtXND9AdSztAe8=
 github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
 github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
 github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
@@ -1243,6 +1243,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
 honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+pgregory.net/rapid v0.4.6 h1:0z4eYXX8FaxgeFLAaPce6zMAiQYKLMN9dqKzZgtpv4w=
+pgregory.net/rapid v0.4.6/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
 rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/migrations/remove_clique.go b/migrations/remove_clique.go
index 46b40e3d3ec5d969a83b620998fd3cfa514b4e44..30a2557fe4a1e01bb511507ca413c9f4af23f52f 100644
--- a/migrations/remove_clique.go
+++ b/migrations/remove_clique.go
@@ -22,5 +22,4 @@ var removeCliqueBucket = Migration{
 
 		return CommitProgress(db, nil, true)
 	},
-
 }
diff --git a/node/config.go b/node/config.go
index fdc98144283fc79c06cb1a695fbddd2394c58b3f..fc428f7f53b9e83952061d64782c8180ae33c3a1 100644
--- a/node/config.go
+++ b/node/config.go
@@ -191,7 +191,7 @@ func (c *Config) IPCEndpoint() string {
 }
 
 // NodeDB returns the path to the discovery node database.
-func (c *Config) NodeDB() (string, error) {
+func (c *Config) NodeDB() string {
 	return c.ResolvePath(datadirNodeDatabase)
 }
 
@@ -269,14 +269,14 @@ func (c *Config) name() string {
 }
 
 // ResolvePath resolves path in the instance directory.
-func (c *Config) ResolvePath(path string) (string, error) {
+func (c *Config) ResolvePath(path string) string {
 	if filepath.IsAbs(path) {
-		return path, nil
+		return path
 	}
 	if c.DataDir == "" {
-		return "", nil
+		return ""
 	}
-	return filepath.Join(c.instanceDir(), path), nil
+	return filepath.Join(c.instanceDir(), path)
 }
 
 func (c *Config) instanceDir() string {
@@ -307,10 +307,7 @@ func (c *Config) NodeKey() (*ecdsa.PrivateKey, error) {
 		return key, nil
 	}
 
-	keyfile, err := c.ResolvePath(datadirPrivateKey)
-	if err != nil {
-		return nil, err
-	}
+	keyfile := c.ResolvePath(datadirPrivateKey)
 	if key, err := crypto.LoadECDSA(keyfile); err == nil {
 		return key, nil
 	}
@@ -333,19 +330,13 @@ func (c *Config) NodeKey() (*ecdsa.PrivateKey, error) {
 
 // StaticNodes returns a list of node enode URLs configured as static nodes.
 func (c *Config) StaticNodes() ([]*enode.Node, error) {
-	dbPath, err := c.ResolvePath(datadirStaticNodes)
-	if err != nil {
-		return nil, err
-	}
+	dbPath := c.ResolvePath(datadirStaticNodes)
 	return c.parsePersistentNodes(&c.staticNodesWarning, dbPath), nil
 }
 
 // TrustedNodes returns a list of node enode URLs configured as trusted nodes.
 func (c *Config) TrustedNodes() ([]*enode.Node, error) {
-	dbPath, err := c.ResolvePath(datadirTrustedNodes)
-	if err != nil {
-		return nil, err
-	}
+	dbPath := c.ResolvePath(datadirTrustedNodes)
 	return c.parsePersistentNodes(&c.trustedNodesWarning, dbPath), nil
 }
 
diff --git a/node/defaults.go b/node/defaults.go
index c15243eb62095743282cd69c9e34dc7352d114f0..80c386e54bd2d14d347c912536ceb25f4eb89cc3 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -32,13 +32,13 @@ const (
 
 // DefaultConfig contains reasonable default settings.
 var DefaultConfig = Config{
-	DataDir:             paths.DefaultDataDir(),
-	HTTPPort:            DefaultHTTPPort,
-	HTTPModules:         []string{"net", "web3"},
-	HTTPVirtualHosts:    []string{"localhost"},
-	HTTPTimeouts:        rpc.DefaultHTTPTimeouts,
-	WSPort:              DefaultWSPort,
-	WSModules:           []string{"net", "web3"},
+	DataDir:          paths.DefaultDataDir(),
+	HTTPPort:         DefaultHTTPPort,
+	HTTPModules:      []string{"net", "web3"},
+	HTTPVirtualHosts: []string{"localhost"},
+	HTTPTimeouts:     rpc.DefaultHTTPTimeouts,
+	WSPort:           DefaultWSPort,
+	WSModules:        []string{"net", "web3"},
 	P2P: p2p.Config{
 		ListenAddr: ":30303",
 		MaxPeers:   50,
diff --git a/node/node.go b/node/node.go
index 6eaafdc7f15c91cf2e594915f290b0d1368f7c7a..bb0f1982175e9b2e610350c16e35212fcc3a9a59 100644
--- a/node/node.go
+++ b/node/node.go
@@ -127,10 +127,7 @@ func New(conf *Config) (*Node, error) {
 		}
 	}
 	if node.server.Config.NodeDatabase == "" {
-		node.server.Config.NodeDatabase, err = node.config.NodeDB()
-		if err != nil {
-			return nil, err
-		}
+		node.server.Config.NodeDatabase = node.config.NodeDB()
 	}
 
 	// Check HTTP/WS prefixes are valid.
@@ -567,10 +564,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, datadir string) (*ethdb.Obje
 		fmt.Printf("Opening In-memory Database (LMDB): %s\n", name)
 		db = ethdb.NewMemDatabase()
 	} else {
-		dbPath, err := n.config.ResolvePath(name)
-		if err != nil {
-			return nil, err
-		}
+		dbPath := n.config.ResolvePath(name)
 
 		var openFunc func(exclusive bool) (*ethdb.ObjectDatabase, error)
 		if n.config.MDBX {
@@ -600,7 +594,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, datadir string) (*ethdb.Obje
 				return ethdb.NewObjectDatabase(kv), nil
 			}
 		}
-
+		var err error
 		db, err = openFunc(false)
 		if err != nil {
 			return nil, err
@@ -633,6 +627,6 @@ func (n *Node) OpenDatabaseWithFreezer(name string, datadir string) (*ethdb.Obje
 }
 
 // ResolvePath returns the absolute path of a resource in the instance directory.
-func (n *Node) ResolvePath(x string) (string, error) {
+func (n *Node) ResolvePath(x string) string {
 	return n.config.ResolvePath(x)
 }
diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go
index 3ab0f102944ee0d016ceec4be6064b78f159f157..90a5271cd9e2b7f6c6be7a2f222818ef071d6efb 100644
--- a/turbo/cli/default_flags.go
+++ b/turbo/cli/default_flags.go
@@ -25,6 +25,7 @@ var DefaultFlags = []cli.Flag{
 	StorageModeFlag,
 	SnapshotModeFlag,
 	SeedSnapshotsFlag,
+	SnapshotDatabaseLayoutFlag,
 	ExternalSnapshotDownloaderAddrFlag,
 	BatchSizeFlag,
 	DatabaseFlag,
diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go
index ddd8bbce4714f207b16bf526cd31399bc75fca8f..f3a5a8de771c18ecc5f82bf77f78be82f448bef7 100644
--- a/turbo/cli/flags.go
+++ b/turbo/cli/flags.go
@@ -82,6 +82,11 @@ var (
 		Name:  "snapshot.seed",
 		Usage: `Seed snapshot seeding(default: true)`,
 	}
+	//todo replace to BoolT
+	SnapshotDatabaseLayoutFlag = cli.BoolFlag{
+		Name:  "snapshot.layout",
+		Usage: `Enable snapshot db layout(default: false)`,
+	}
 
 	ExternalSnapshotDownloaderAddrFlag = cli.StringFlag{
 		Name:  "snapshot.downloader.addr",
@@ -141,6 +146,7 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config) {
 	}
 	cfg.SnapshotMode = snMode
 	cfg.SnapshotSeeding = ctx.GlobalBool(SeedSnapshotsFlag.Name)
+	cfg.SnapshotLayout = ctx.GlobalBool(SnapshotDatabaseLayoutFlag.Name)
 
 	if ctx.GlobalString(BatchSizeFlag.Name) != "" {
 		err := cfg.BatchSize.UnmarshalText([]byte(ctx.GlobalString(BatchSizeFlag.Name)))
diff --git a/turbo/snapshotsync/bittorrent/server.go b/turbo/snapshotsync/bittorrent/server.go
deleted file mode 100644
index d44900012d2bd1c6adb3d94743ace956b5ea7e6f..0000000000000000000000000000000000000000
--- a/turbo/snapshotsync/bittorrent/server.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package bittorrent
-
-import (
-	"context"
-	"errors"
-	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/ledgerwatch/turbo-geth/ethdb"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
-)
-
-var (
-	ErrNotSupportedNetworkID = errors.New("not supported network id")
-	ErrNotSupportedSnapshot  = errors.New("not supported snapshot for this network id")
-)
-var (
-	_ snapshotsync.DownloaderServer = &SNDownloaderServer{}
-)
-
-func NewServer(dir string, seeding bool) (*SNDownloaderServer, error) {
-	downloader, err := New(dir, seeding)
-	if err != nil {
-		return nil, err
-	}
-	return &SNDownloaderServer{
-		t:  downloader,
-		db: ethdb.MustOpen(dir + "/db"),
-	}, nil
-}
-
-type SNDownloaderServer struct {
-	snapshotsync.DownloaderServer
-	t  *Client
-	db ethdb.Database
-}
-
-func (S *SNDownloaderServer) Download(ctx context.Context, request *snapshotsync.DownloadSnapshotRequest) (*empty.Empty, error) {
-	err := S.t.AddSnapshotsTorrents(ctx, S.db, request.NetworkId, snapshotsync.FromSnapshotTypes(request.Type))
-	if err != nil {
-		return nil, err
-	}
-	return &empty.Empty{}, nil
-}
-func (S *SNDownloaderServer) Load() error {
-	return S.t.Load(S.db)
-}
-
-func (S *SNDownloaderServer) Snapshots(ctx context.Context, request *snapshotsync.SnapshotsRequest) (*snapshotsync.SnapshotsInfoReply, error) {
-	reply := snapshotsync.SnapshotsInfoReply{}
-	resp, err := S.t.GetSnapshots(S.db, request.NetworkId)
-	if err != nil {
-		return nil, err
-	}
-	for i := range resp {
-		reply.Info = append(reply.Info, resp[i])
-	}
-	return &reply, nil
-}
diff --git a/turbo/snapshotsync/bittorrent/build_infobytes.go b/turbo/snapshotsync/build_infobytes.go
similarity index 86%
rename from turbo/snapshotsync/bittorrent/build_infobytes.go
rename to turbo/snapshotsync/build_infobytes.go
index a453dc3d4ce66759782f5c280582d06edb6076e5..2f8b49e262bc28e76d87231becf2cd1028013f70 100644
--- a/turbo/snapshotsync/bittorrent/build_infobytes.go
+++ b/turbo/snapshotsync/build_infobytes.go
@@ -1,4 +1,4 @@
-package bittorrent
+package snapshotsync
 
 import (
 	"fmt"
@@ -10,8 +10,8 @@ import (
 	"github.com/anacrolix/torrent/metainfo"
 )
 
-func BuildInfoBytesForLMDBSnapshot(root string) (metainfo.Info, error) {
-	path := root + "/" + LmdbFilename
+func BuildInfoBytesForSnapshot(root string, fileName string) (metainfo.Info, error) {
+	path := root + "/" + fileName
 	fi, err := os.Stat(path)
 	if err != nil {
 		return metainfo.Info{}, err
diff --git a/turbo/snapshotsync/bittorrent/const.go b/turbo/snapshotsync/const.go
similarity index 93%
rename from turbo/snapshotsync/bittorrent/const.go
rename to turbo/snapshotsync/const.go
index ab5f2ee96a372cc006a9a86686a2d48950f64e00..fe80c690fd9239b4c85905baeb696b905474ec08 100644
--- a/turbo/snapshotsync/bittorrent/const.go
+++ b/turbo/snapshotsync/const.go
@@ -1,47 +1,50 @@
-package bittorrent
+package snapshotsync
 
 import (
 	"errors"
 
 	"github.com/anacrolix/torrent/metainfo"
 	"github.com/ledgerwatch/turbo-geth/params"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 )
 
 const (
 	DefaultChunkSize = 1024 * 1024
-	SnapshotBlock    = 11_000_000
 	LmdbFilename     = "data.mdb"
+	MdbxFilename     = "mdbx.dat"
+	EpochSize        = 500_000
 
-	HeadersSnapshotHash  = "460da4ffbc2b77f6662a8a7c15e21f4c5981656d" //11кk block 1mb chunk
-	BlocksSnapshotHash   = "6353d013d614f1f8145d71e1479de9b4361d273f" //11кk block 1mb chunk
-	StateSnapshotHash    = "fed1ef2b4d2cd8ea32eda24559b4d7eedaeb1b78"
-	ReceiptsSnapshotHash = ""
+	//todo It'll be changed after enabling new snapshot generation mechanism
+	HeadersSnapshotHash = "0000000000000000000000000000000000000000"
+	BlocksSnapshotHash  = "0000000000000000000000000000000000000000"
+	StateSnapshotHash   = "0000000000000000000000000000000000000000"
 
 	SnapshotInfoHashPrefix  = "ih"
 	SnapshotInfoBytesPrefix = "ib"
 )
 
 var (
-	TorrentHashes = map[uint64]map[snapshotsync.SnapshotType]metainfo.Hash{
+	TorrentHashes = map[uint64]map[SnapshotType]metainfo.Hash{
 		params.MainnetChainConfig.ChainID.Uint64(): {
-			snapshotsync.SnapshotType_headers: metainfo.NewHashFromHex(HeadersSnapshotHash),
-			snapshotsync.SnapshotType_bodies:  metainfo.NewHashFromHex(BlocksSnapshotHash),
-			snapshotsync.SnapshotType_state:   metainfo.NewHashFromHex(StateSnapshotHash),
+			SnapshotType_headers: metainfo.NewHashFromHex(HeadersSnapshotHash),
+			SnapshotType_bodies:  metainfo.NewHashFromHex(BlocksSnapshotHash),
+			SnapshotType_state:   metainfo.NewHashFromHex(StateSnapshotHash),
 		},
 	}
 	ErrInvalidSnapshot = errors.New("this snapshot for this chainID not supported ")
 )
 
-func GetAvailableSnapshotTypes(networkID uint64) []snapshotsync.SnapshotType {
-	types := make([]snapshotsync.SnapshotType, 0, len(TorrentHashes[networkID]))
-	for k := range TorrentHashes[networkID] {
-		types = append(types, k)
+func GetAvailableSnapshotTypes(chainID uint64) []SnapshotType {
+	v := TorrentHashes[chainID]
+	res := make([]SnapshotType, 0, len(v))
+	for i := range v {
+		res = append(res, i)
 	}
-	return types
+	return res
 }
 
 var Trackers = [][]string{{
+	"http://35.189.110.210:80/announce",
+}, {
 	"udp://tracker.openbittorrent.com:80",
 	"udp://tracker.openbittorrent.com:80",
 	"udp://tracker.publicbt.com:80",
diff --git a/turbo/snapshotsync/bittorrent/downloader.go b/turbo/snapshotsync/downloader.go
similarity index 71%
rename from turbo/snapshotsync/bittorrent/downloader.go
rename to turbo/snapshotsync/downloader.go
index 7cae8aff0660528fa2797fe070fb1e6985570fe7..e24392ca618ca0df9614313562c621b2da9ae134 100644
--- a/turbo/snapshotsync/bittorrent/downloader.go
+++ b/turbo/snapshotsync/downloader.go
@@ -1,4 +1,4 @@
-package bittorrent
+package snapshotsync
 
 import (
 	"bytes"
@@ -9,6 +9,8 @@ import (
 	"path/filepath"
 	"time"
 
+	"github.com/anacrolix/torrent/bencode"
+
 	lg "github.com/anacrolix/log"
 	"github.com/anacrolix/torrent"
 	"github.com/anacrolix/torrent/metainfo"
@@ -16,29 +18,32 @@ import (
 	"github.com/ledgerwatch/turbo-geth/common/dbutils"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/log"
-	"github.com/ledgerwatch/turbo-geth/turbo/snapshotsync"
 	"golang.org/x/sync/errgroup"
 )
 
 type Client struct {
 	Cli          *torrent.Client
 	snapshotsDir string
+	trackers     [][]string
 }
 
-func New(snapshotsDir string, seeding bool) (*Client, error) {
+func New(snapshotsDir string, seeding bool, peerID string) (*Client, error) {
 	torrentConfig := DefaultTorrentConfig()
 	torrentConfig.Seed = seeding
 	torrentConfig.DataDir = snapshotsDir
 	torrentConfig.UpnpID = torrentConfig.UpnpID + "leecher"
+	torrentConfig.PeerID = peerID
 
 	torrentClient, err := torrent.NewClient(torrentConfig)
 	if err != nil {
 		log.Error("Fail to start torrnet client", "err", err)
+		return nil, fmt.Errorf("fail to start: %w", err)
 	}
 
 	return &Client{
 		Cli:          torrentClient,
 		snapshotsDir: snapshotsDir,
+		trackers:     Trackers,
 	}, nil
 }
 
@@ -53,6 +58,14 @@ func DefaultTorrentConfig() *torrent.ClientConfig {
 	return torrentConfig
 }
 
+func (cli *Client) Torrents() []metainfo.Hash {
+	t := cli.Cli.Torrents()
+	hashes := make([]metainfo.Hash, 0, len(t))
+	for _, v := range t {
+		hashes = append(hashes, v.InfoHash())
+	}
+	return hashes
+}
 func (cli *Client) Load(db ethdb.Database) error {
 	log.Info("Load added torrents")
 	return db.Walk(dbutils.SnapshotInfoBucket, []byte{}, 0, func(k, infoHashBytes []byte) (bool, error) {
@@ -77,6 +90,18 @@ func (cli *Client) Load(db ethdb.Database) error {
 	})
 }
 
+func (cli *Client) SavePeerID(db ethdb.Putter) error {
+	return db.Put(dbutils.BittorrentInfoBucket, []byte(dbutils.BittorrentPeerID), cli.PeerID())
+}
+
+func (cli *Client) Close() {
+	cli.Cli.Close()
+}
+
+func (cli *Client) PeerID() []byte {
+	peerID := cli.Cli.PeerID()
+	return peerID[:]
+}
 func (cli *Client) AddTorrentSpec(snapshotName string, snapshotHash metainfo.Hash, infoBytes []byte) (*torrent.Torrent, error) {
 	t, ok := cli.Cli.Torrent(snapshotHash)
 	if ok {
@@ -91,7 +116,7 @@ func (cli *Client) AddTorrentSpec(snapshotName string, snapshotHash metainfo.Has
 	return t, err
 }
 
-func (cli *Client) AddTorrent(ctx context.Context, db ethdb.Database, snapshotType snapshotsync.SnapshotType, networkID uint64) error { //nolint: interfacer
+func (cli *Client) AddTorrent(ctx context.Context, db ethdb.Database, snapshotType SnapshotType, networkID uint64) error { //nolint: interfacer
 	infoHashBytes, infoBytes, err := getTorrentSpec(db, snapshotType.String(), networkID)
 	if err != nil {
 		return err
@@ -122,7 +147,7 @@ func (cli *Client) AddTorrent(ctx context.Context, db ethdb.Database, snapshotTy
 	}
 	t.AllowDataDownload()
 	t.DownloadAll()
-	log.Info("Got infobytes", "snapshot", snapshotType.String())
+	log.Info("Got infobytes", "snapshot", snapshotType.String(), "file", t.Files()[0].Path())
 
 	if newTorrent {
 		log.Info("Save spec", "snapshot", snapshotType.String())
@@ -152,32 +177,32 @@ func (cli *Client) GetInfoBytes(ctx context.Context, snapshotHash metainfo.Hash)
 	}
 }
 
-func (cli *Client) AddSnapshotsTorrents(ctx context.Context, db ethdb.Database, networkId uint64, mode snapshotsync.SnapshotMode) error {
+func (cli *Client) AddSnapshotsTorrents(ctx context.Context, db ethdb.Database, networkId uint64, mode SnapshotMode) error {
 	ctx, cancel := context.WithTimeout(ctx, time.Minute*10)
 	defer cancel()
 	eg := errgroup.Group{}
 
 	if mode.Headers {
 		eg.Go(func() error {
-			return cli.AddTorrent(ctx, db, snapshotsync.SnapshotType_headers, networkId)
+			return cli.AddTorrent(ctx, db, SnapshotType_headers, networkId)
 		})
 	}
 
 	if mode.Bodies {
 		eg.Go(func() error {
-			return cli.AddTorrent(ctx, db, snapshotsync.SnapshotType_bodies, networkId)
+			return cli.AddTorrent(ctx, db, SnapshotType_bodies, networkId)
 		})
 	}
 
 	if mode.State {
 		eg.Go(func() error {
-			return cli.AddTorrent(ctx, db, snapshotsync.SnapshotType_state, networkId)
+			return cli.AddTorrent(ctx, db, SnapshotType_state, networkId)
 		})
 	}
 
 	if mode.Receipts {
 		eg.Go(func() error {
-			return cli.AddTorrent(ctx, db, snapshotsync.SnapshotType_receipts, networkId)
+			return cli.AddTorrent(ctx, db, SnapshotType_receipts, networkId)
 		})
 	}
 	err := eg.Wait()
@@ -198,6 +223,7 @@ func (cli *Client) Download() {
 			t.DownloadAll()
 
 			tt := time.Now()
+			prev := t.BytesCompleted()
 		dwn:
 			for {
 				if t.Info().TotalLength()-t.BytesCompleted() == 0 {
@@ -205,8 +231,17 @@ func (cli *Client) Download() {
 					break dwn
 				} else {
 					stats := t.Stats()
-					log.Info("Downloading snapshot", "snapshot", t.Name(), "%", int(100*(float64(t.BytesCompleted())/float64(t.Info().TotalLength()))), "seeders", stats.ConnectedSeeders)
-					time.Sleep(time.Minute)
+					log.Info("Downloading snapshot",
+						"snapshot", t.Name(),
+						"%", int(100*(float64(t.BytesCompleted())/float64(t.Info().TotalLength()))),
+						"mb", t.BytesCompleted()/1024/1024,
+						"diff(kb)", (t.BytesCompleted()-prev)/1024,
+						"seeders", stats.ConnectedSeeders,
+						"active", stats.ActivePeers,
+						"total", stats.TotalPeers)
+					prev = t.BytesCompleted()
+					time.Sleep(time.Second * 10)
+
 				}
 
 			}
@@ -219,8 +254,8 @@ func (cli *Client) Download() {
 	}
 }
 
-func (cli *Client) GetSnapshots(db ethdb.Database, networkID uint64) (map[snapshotsync.SnapshotType]*snapshotsync.SnapshotsInfo, error) {
-	mp := make(map[snapshotsync.SnapshotType]*snapshotsync.SnapshotsInfo)
+func (cli *Client) GetSnapshots(db ethdb.Database, networkID uint64) (map[SnapshotType]*SnapshotsInfo, error) {
+	mp := make(map[SnapshotType]*SnapshotsInfo)
 	networkIDBytes := make([]byte, 8)
 	binary.BigEndian.PutUint64(networkIDBytes, networkID)
 	err := db.Walk(dbutils.SnapshotInfoBucket, append(networkIDBytes, []byte(SnapshotInfoHashPrefix)...), 8*8+16, func(k, v []byte) (bool, error) {
@@ -244,19 +279,19 @@ func (cli *Client) GetSnapshots(db ethdb.Database, networkID uint64) (map[snapsh
 		}
 
 		_, tpStr := ParseInfoHashKey(k)
-		tp, ok := snapshotsync.SnapshotType_value[tpStr]
+		tp, ok := SnapshotType_value[tpStr]
 		if !ok {
 			return false, fmt.Errorf("incorrect type: %v", tpStr)
 		}
 
-		val := &snapshotsync.SnapshotsInfo{
-			Type:          snapshotsync.SnapshotType(tp),
+		val := &SnapshotsInfo{
+			Type:          SnapshotType(tp),
 			GotInfoByte:   gotInfo,
 			Readiness:     readiness,
 			SnapshotBlock: SnapshotBlock,
 			Dbpath:        filepath.Join(cli.snapshotsDir, t.Files()[0].Path()),
 		}
-		mp[snapshotsync.SnapshotType(tp)] = val
+		mp[SnapshotType(tp)] = val
 		return true, nil
 	})
 	if err != nil {
@@ -266,6 +301,34 @@ func (cli *Client) GetSnapshots(db ethdb.Database, networkID uint64) (map[snapsh
 	return mp, nil
 }
 
+func (cli *Client) SeedSnapshot(name string, path string) (metainfo.Hash, error) {
+	info, err := BuildInfoBytesForSnapshot(path, LmdbFilename)
+	if err != nil {
+		return [20]byte{}, err
+	}
+
+	infoBytes, err := bencode.Marshal(info)
+	if err != nil {
+		return [20]byte{}, err
+	}
+
+	t, err := cli.AddTorrentSpec(name, metainfo.HashBytes(infoBytes), infoBytes)
+	if err != nil {
+		return [20]byte{}, err
+	}
+	return t.InfoHash(), nil
+}
+func (cli *Client) StopSeeding(hash metainfo.Hash) error {
+	t, ok := cli.Cli.Torrent(hash)
+	if !ok {
+		return nil
+	}
+	ch := t.Closed()
+	t.Drop()
+	<-ch
+	return nil
+}
+
 func getTorrentSpec(db ethdb.Database, snapshotName string, networkID uint64) ([]byte, []byte, error) {
 	var infohash, infobytes []byte
 	var err error
@@ -308,3 +371,21 @@ func MakeInfoBytesKey(snapshotName string, networkID uint64) []byte {
 func ParseInfoHashKey(k []byte) (uint64, string) {
 	return binary.BigEndian.Uint64(k), string(bytes.TrimPrefix(k[8:], []byte(SnapshotInfoHashPrefix)))
 }
+
+func SnapshotSeeding(chainDB ethdb.Database, cli *Client, name string, snapshotsDir string) error {
+	snapshotBlock, err := chainDB.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
+	if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) {
+		return err
+	}
+
+	if len(snapshotBlock) == 8 {
+		hash, err := cli.SeedSnapshot(name, SnapshotName(snapshotsDir, name, binary.BigEndian.Uint64(snapshotBlock)))
+		if err != nil {
+			return err
+		}
+		log.Info("Start seeding", "snapshot", name, "hash", hash.String())
+	} else {
+		log.Warn("Snapshot block unknown", "snapshot", name, "v", common.Bytes2Hex(snapshotBlock))
+	}
+	return nil
+}
diff --git a/turbo/snapshotsync/external_downloader.pb.go b/turbo/snapshotsync/external_downloader.pb.go
index a1d2b780cc7b7deff41de730e65562e4770a4b20..1b78fb93100a633f3d512cd2a1f57f9ca20a403e 100644
--- a/turbo/snapshotsync/external_downloader.pb.go
+++ b/turbo/snapshotsync/external_downloader.pb.go
@@ -7,12 +7,13 @@
 package snapshotsync
 
 import (
+	reflect "reflect"
+	sync "sync"
+
 	proto "github.com/golang/protobuf/proto"
 	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
 	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
 	emptypb "google.golang.org/protobuf/types/known/emptypb"
-	reflect "reflect"
-	sync "sync"
 )
 
 const (
diff --git a/turbo/snapshotsync/external_downloader_grpc.pb.go b/turbo/snapshotsync/external_downloader_grpc.pb.go
index e6f4f93628461f4255a6bb70190306652a645397..814d31e01c2fad5fb6f70b326098aff236096dee 100644
--- a/turbo/snapshotsync/external_downloader_grpc.pb.go
+++ b/turbo/snapshotsync/external_downloader_grpc.pb.go
@@ -4,6 +4,7 @@ package snapshotsync
 
 import (
 	context "context"
+
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	status "google.golang.org/grpc/status"
diff --git a/turbo/snapshotsync/bittorrent/logger.go b/turbo/snapshotsync/logger.go
similarity index 93%
rename from turbo/snapshotsync/bittorrent/logger.go
rename to turbo/snapshotsync/logger.go
index b78d4b75ef22515bd2da7c0ab6a91c586198f6b3..f9d1ddcf3a78952e14bca96d205c8e641db4d577 100644
--- a/turbo/snapshotsync/bittorrent/logger.go
+++ b/turbo/snapshotsync/logger.go
@@ -1,4 +1,4 @@
-package bittorrent
+package snapshotsync
 
 import (
 	lg "github.com/anacrolix/log"
@@ -24,7 +24,7 @@ func (b adapterLogger) Log(msg lg.Msg) {
 
 	switch lvl {
 	case lg.Debug:
-		log.Debug(msg.String())
+		log.Info(msg.String())
 	case lg.Info:
 		log.Info(msg.String())
 	case lg.Warning:
diff --git a/turbo/snapshotsync/postprocessing.go b/turbo/snapshotsync/postprocessing.go
index 9cf7a8f2b87d170f519ca86ec46a7637a2531f8d..fba0a4509b23b78f8f3e816951575c1626def0d2 100644
--- a/turbo/snapshotsync/postprocessing.go
+++ b/turbo/snapshotsync/postprocessing.go
@@ -3,6 +3,7 @@ package snapshotsync
 import (
 	"context"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"math/big"
 	"os"
@@ -18,32 +19,48 @@ import (
 	"github.com/ledgerwatch/turbo-geth/rlp"
 )
 
+const (
+	SnapshotBlock  = 11_500_000
+	HeaderHash11kk = "0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e"
+	Header11kk     = "f90211a01cb6a590440a9ed02e8762ac35faa04ec30cdbcaff0b276fa1ab5e2339033a6aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347944bb96091ee9d802ed039c4d1a5f6216f90f81b01a08b2258fc3693f6ed1102f3142839a174b27f215841d2f542b586682898981c6da07d63a1ceded7864e95f09fe65b6bd17fb3f02a3644b1340bb0ab8a7267251e62a04cb5cf79c8a58a4787ec1ed0af51bcce19e6ad701dd40a45086244b933104cf2b901002438522b194b05881a7d976aa8c45ff47193ba8adc6fe2cc85eb68c66503558fa0ba43cebbd2327cfa297a87228511374ed3a2f66f3999426dced224c464840303de108b8604dcafce84d678b589cbe8a74aa2c540668a9a9acfa1eb94c6569918d819063600c000f3c060d649129f8327cad2c7ba1f9495531224b34a1ad8ca0810ab2d2d43a18877484dc33d220c0531024f1dc7448f8a6c016340ae143efd87c5e681d40a34e6be5803ea696038d3ad090048cb267a2ae72e7290da6b385f9874c002302c85e96005aa08031e30ac2a8a9a021bdc2a7a39a1089a08586cefcb937700ff03e4acaa37448c00f4ad02116216437bc52846ebd205869231e574870bf465887ac96883a7d8c083bdfd7483bde3a8845f7befc090505059452d657468706f6f6c2d757331a07a1a8c57afdf3be769e0f6a54e92900374cc207c7cf01b9da6ccca80a8b4006c88d495a5d800490fad"
+	Body11kk       = "f99a34f99a30f8ac8214268541314cf0008301d4c0946b0359f95796327475ad4f12ae4e1047c3a67fa380b844a9059cbb000000000000000000000000fa1dfa4588664928951778b355b3de1f703ade4a00000000000000000000000000000000000000000000000000000000001e0b8725a0b3855a81e5f5ab38eae3b2c1bcd9ce72e143803ef4e47d9845c177aa156137f1a05993fea2c5b3e4995ca84aac5a11dd87ea1dd9a70f6cfc52f88a4b932edec56df8ac8214278541314cf0008301d4c0946b0359f95796327475ad4f12ae4e1047c3a67fa380b844a9059cbb000000000000000000000000def1b9aaedeeb0dbeba57773fad640aa7f30e735000000000000000000000000000000000000000000000000000000000057f29b26a0dca35e61c625a15c23579b346e032598a2ae33086ef1146330ae59335b28b90aa0162e3d461d1ecd5a57ef92ec582aab7daa6d7a402bb162963f37e1af9b0a8eb7f8ad830109f9853a35294400830138809412d79c345cac7b050a5ff0797b5a607e254c73f580b844a9059cbb000000000000000000000000e7e7a94e68be242a0db0c525dc3acefb755ae271000000000000000000000000000000000000000000000000000000000041696725a06f44e89a311d35682d93f81599bbb23c7a3477682c0ee66436688eca14ead9a5a00465af62a83af5135904c7953fcbb4821cc977a912b9e9055afc978224014a30f86d5785131794b40083021fdb9456710f7e591fa0559003831e041dd67789d2af4a881bc16d674ec800008025a08286e05045a922f5cb264de212d0fcd9bae59dfdc98a9a9d30b8cfc90baaa273a05945cb96da53d2ac56063d5a7273892e946f74ab371771ba75a725c223626820f88b825259851298879d29830249f0940000000071e801062eb0544403f66176bba42dc080a400000019000000000000000000000000000000000000000000000000000000000000079d25a0d704c1f7f9480b5f8f7e74c9dc5cf77391f4eb8abd901e5d5331af8272f4820ba026c7b701f8a24c6ce84d86c02fcdfd116b9a590bccb4848e012b6baefded1e99f8678211cc851229298c0083030d4094e5fd3dc48656dfed32601468426fe57595d8938080801ba0d1e94509c4ac498235452be50481fc49fb4037f37d2686a13c52280fcec15815a064fa69a8fb3bc49599be90dcd293f72a2575da8e991285c2a4923aa6042c1c45f86d82f06285120b5c270082627094ad978dc888e1a2590244f8bf9e716eb18b3b68d787354a6ba7a180008026a0ed667359fcecb1d62418dfd5535fa5ecc0abf7dcf314b1a59222f8e87a1af8a0a00755cf05e3dd4e9f95d9f9240cf5e66d8320fe3e64f7ed2eccf879c2db546ca7f8708302e5a28510c388d000830186a094dfda58c58d9d2e862d386706efdf6153e147ae8b8821db0ba827b770008026a07c7bbca5548e94b3da64473eb35b287d802a00433b5e67815da4765803b076a0a00c4c1bd0ba655f8c82cc4880bd4a1821de03bba379644685e6d8ceb3252d22def8708302e5a38510c388d000830186a0941afd9659259df405dd3f507ef77887aa0cb751b98843966d18b91b80008025a0c345c9fc5217f79b2e9375916020162f10f0505298ce5c074b15ad14495c8e81a00292440b70d3a6455902013ba251caa8578a22b76d59b14a668d839e2947f608f8ab818f851087ee06008301d4c0946bc1f3a1ae56231dbb64d3e82e070857eae8604580b844a9059cbb00000000000000000000000024434ebf296c2f9cd59b14412aae5c4ca1d5aad2000000000000000000000000000000000000000000000a9e566513a4910d400025a0db1575b0bb6f9ff489eceda148be18c2914680dd61d93e94b272d25919536feba0539db0b71bfd18077a63b7cec6624459bfb2cc1b5ec157bd540d7e323b833bf7f8aa1d851087ee06008301d4c0946bc1f3a1ae56231dbb64d3e82e070857eae8604580b844a9059cbb00000000000000000000000024434ebf296c2f9cd59b14412aae5c4ca1d5aad2000000000000000000000000000000000000000000002a4c2b736cdf768d400026a06abb41e737220b9f918e351d4e1106c7f4ab1c76e9f71dcf1313e34056c855e4a02e5737341566322cf7049561d9c62bebbb2d9fe681a9672a98b76d27b3525cc4f86c44851087ee060082520894efb2e870b14d7e555a31b392541acf002dae6ae98808a9fd000b5d10008026a0a3a8ce710f12d38d704b748aac579095fa76a2d4335fddc507c2b320ddadb341a048b3889aeb79855224b84c89aca69ef15716a965b1a3d4a416a2b932e53617d3f8aa3d851087ee06008301d4c0946bc1f3a1ae56231dbb64d3e82e070857eae8604580b844a9059cbb00000000000000000000000024434ebf296c2f9cd59b14412aae5c4ca1d5aad200000000000000000000000000000000000000000000386992ac084d52b9a40026a0a30a1900def92ba2d630ce8851016f200a6f0ab7cbb3726e856b80c8c7c2cdb9a046b2ebc2062888b195266b27c24b832d16c38a681097661e7c85dc918c659f74f8aa61851087ee06008301d4c0946bc1f3a1ae56231dbb64d3e82e070857eae8604580b844a9059cbb00000000000000000000000024434ebf296c2f9cd59b14412aae5c4ca1d5aad200000000000000000000000000000000000000000000d3998a945d035620e20025a00aef9d9d942121f0ec78f855f5b41a8f5012e9ad245adf3b19bd25a5e60d40cca07119f902bc2bf5ef3a30788ee545e4dbedbc52054c970409fc6a742281802831f8aa37851087ee06008301d4c0946bc1f3a1ae56231dbb64d3e82e070857eae8604580b844a9059cbb00000000000000000000000024434ebf296c2f9cd59b14412aae5c4ca1d5aad200000000000000000000000000000000000000000000135d111acd67564fac0026a0f1d8a696da6547e491e1fb89c642493f3ae1b80c1f2b7f48857940117b10e83da040b4b8557111d4fbfa45e8711e1856dacf6d5aacdfd83975d91f8507f6d2491af88a82238285104fe73d7882ea60940000000071e801062eb0544403f66176bba42dc080a400000039000000000000000000000000000000000000000000000000000000000000fdeb25a08fb55bfd8459411b7bb69bc388a2f1fb4ccf2e28a52739ba6e7246b8dfdd282ca06924a9f7387c9405b0ebe379a69a3fab0e5606254213b67646470d0d9ae36fdbf8ac82b747851010b872008302bf209410086399dd8c1e3de736724af52587a2044c9fa280b844a9059cbb000000000000000000000000444705e7ccdc6bf014f3e956e5c4eda78b59f79b0000000000000000000000000000000000000000000013ad784a9942cb78980026a0b2074cb4a3d2954203fb6dfdd75dbe2b4e581452c8111691b2a8903d64c741baa014edfd2a96f05c667482fb098da4df0d323ceaa03c1120fa8c3b11b1e6752cd9f86c03850ee6b28000825208946cbdaad85cf5f084c25d349972178e12e0f3811b8801cd3fc4be2751008026a08cce1042c48eac631c16b870576577bc6837ea9fc8aa8be4fe6085115fc1072fa01e5e0c39bb23d8a844d3fbb15a2b803db00d19f16db962c51aca9efac4735be2f8aa11850ee6b280008301117094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000f2b293e9c1cbef2316ba8a4eccfecc8ba7af123c0000000000000000000000000000000000000000000000000000000013fae61025a0fbdaa8990cf1562bc687fba0919bee234fc4943ba536959b5d53199b08e645eda03610f59eb4a9f1b264a41da1f4b75e6ddcfdddc25e51d319e8aeae9f1866e2c6f86f820109850ee6b28000830111709429bd07c50a2a2a843b309264361c9a91950ec650880a47e9a2ff200c008025a098d7a79510ec8088e40332b6c17eb825c85b0f7f5169b38f32b4176804ea798ba02103aa7c8c18724bd2266f56f6eb2982e59ce66fc7e815b96258f1733000680df8aa1d850ee6b280008301117094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000f2b293e9c1cbef2316ba8a4eccfecc8ba7af123c000000000000000000000000000000000000000000000000000000001141445026a0304b4214474adb7c012af51f66a557bd5e4dd5ca6384181b75718b695cc0ca58a028e921c3e0bde2b373d3327575493f56f2c38e938db5a3af79d9cf73ee5bf41df8aa07850ee6b280008301117094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000f2b293e9c1cbef2316ba8a4eccfecc8ba7af123c0000000000000000000000000000000000000000000000000000000015bb0ca026a05817ff739574af40bb6a2a1e3521c81d9feeac9e1581ea1862e1d1310e992f25a04062cbda0d6c9cfd1ec650ab2f566ad443a32ea185460ba7b848e0c707222bd5f8aa04850ee6b280008301117094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000f2b293e9c1cbef2316ba8a4eccfecc8ba7af123c000000000000000000000000000000000000000000000000000000002b181ac025a01db94ba187623de31c790f335151b554f6edc8c3d3ae31c4bd1ff87f76b494c1a02f2d366e315e0243f2c00127f5bb00e3306e6c56fd93d415588381e2636912d3f8ab820ee7850ee6b2800082a0f994dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000030c6ce4559c9e40f79a21be12d8ac6a7cab5fbf400000000000000000000000000000000000000000000000000000000115987401ca0760fc54c11cf12f3a7cccb5340b4a625a926dbde65d482400de8eb9c2b21eb26a0725adba5a62c12842b6167cff941048910ac3bb0aa3502924d713b5648819d15f86c09850ed81dd19882520894f47b8ff418b81d929b1e2ab079ca96f9bae00ada888ef0f36da28600008025a0f3c3e987f3474dd3b8d298f9b127ac5dcd0d7728ce040da6cf148170b61b9008a06cd26c1e56d27009833ecebee4dcc7b69791476a64cddb9f660b39e9a79bf19bf9015103850e6f7cec008302a5e6947a250d5630b4cf539739df2c5dacb4c659f2488d87943afad18a21f4b8e4fb3bdb4100000000000000000000000000000000000000000000000038e62046fb1a00000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000c279a165c11340ff6262bd74ce7b328ca7d738d5000000000000000000000000000000000000000000000000000000005f7bf4210000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000bd301be09eb78df47019aa833d29edc5d815d83825a02f07ebc6959fac30dcd3d89d9a102f919086a2641afb6be871b021a956fc9cb7a0195f7d7e0b59d32190c2fd10c7a448d9a2ea40b7c72762a99c4613b690089073f901ac818c850e6f7cec0083048968947a250d5630b4cf539739df2c5dacb4c659f2488d80b901448803dbee0000000000000000000000000000000000000000000000003782dace9d90000000000000000000000000000000000000000000000000000001d690b82191f53800000000000000000000000000000000000000000000000000000000000000a00000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f000000000000000000000000000000000000000000000000000000005f7bf45c00000000000000000000000000000000000000000000000000000000000000040000000000000000000000005befbb272290dd5b8521d4a938f6c4757742c430000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec7000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000001712aad2c773ee04bdc9114b32163c058321cd8526a064c6e05031df9eb138b78b0e4d6578d17d2b17fda9f05d8ac921ed707390fa5ca06a826a830e4e740cca349d89f1afce04cd48cdf527780401ce28d235497734c0f8ab820c6b850e6f7cec0082715e94a1d0e215a23d7030842fc67ce582a6afa3ccab8380b844095ea7b30000000000000000000000009748d39b0b0949c0d5214094045fb7e2222a26ca000000000000000000000000000000000000000000000000000000000000000025a038e6bd86c77f10475337e7cd32c88043465d9cdfde5d1fdce4e778ae55383b3ea0767b31439035cc8499be4c407d7c1db7f38755b7cc8cbbc09065b689de0716bdf9016b34850e6f7cec00830290bc947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe500000000000000000000000000000000000000000000000007492cb7eb148000000000000000000000000000000000000000000000000000075567ee0995c7e400000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000ddaaef5d33f009bef641ee9a8054d0fa7c1e8d0e000000000000000000000000000000000000000000000000000000005f7bf45c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000028cb7e841ee97947a86b06fa4090c8451f64c0be000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a02daaf93f114bbe4209f1c082ead858afdc91ed29f2d5fa7866457e14a114d99ca022e100caeb5c78e445a014875c10b9bdbeb2828e1d7f2b35b5f0558f3d230981f9016d820274850e6f7cec008302878c947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe50000000000000000000000000000000000000000000006d3c17ae6ef8aafbcc90000000000000000000000000000000000000000000000000e718f3360c35d6800000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000e53699169e2ce5c49dbb85d86de834ad4a4a9d6a000000000000000000000000000000000000000000000000000000005f7bf45c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000087c00817abe35ed4c093e59043fae488238d2f74000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a0747216c39ceee5fcfcf7981317eba8108952819d6d0cf1fc4301e2882d3d54d7a068555b7b379ae6a66f31f64616860db5be895ee5fb5e6e1045d6217769a80d00f9026e830efef8850e6f7cec0083017c4094baf075545c3a56ecbaf219e4a1b69bc2b94b0b7580b9020464887334000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000a7d8bd0000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000331005a675e1d000118e351dc07f3000000000000000000000000000000000000526b143b1737372ff3262608cdf5000000000000000000000000000000000000e37003f2f90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003d600e2aeaa0800fc011eb527f90f000000000000000000000000000000000000bfa0eaf4ebd0c9d80c09da01490d00000000000000000000000000000000000017a1fd0b0900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000021ba0e8b0c6d27b12a2cd8506f694a874547161c2ef5fb3134fec6415010124624e32a018c3b32b8c7fbd563c33025b712f4b311cfa10ae2b7d4e6a8df76721b0087b97f8cc822bf2850e6f7cec00830249f0940000000071e801062eb0544403f66176bba42dc080b86400000022000086fef14c27c78deaeb4349fd959caa11fc5b5d75000000000000000000000000000000000025948db5a78975a28a00000000000000000000000000000001000000000000000000000000fca59cd816ab1ead66534d82bc21e7515ce441cf25a049cbfb1f56c0a1555daf81f30121a791d149d1f4b5c9bfb278a070255d5bad65a0669c9a99b922a620ef5295e6f0b88bdc2619eb3ea9043d9ba784967e7c796721f9015482025b850e6f7cec008302aa06947a250d5630b4cf539739df2c5dacb4c659f2488d88011c640541017538b8e4fb3bdb41000000000000000000000000000000000000000000000001158e460913d0000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000003c8e49a61e91e6fdf7a56bfc4d6e347dd863ac27000000000000000000000000000000000000000000000000000000005f7bf45b0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000002a7f709ee001069771ceb6d42e85035f7d18e73626a02068b8d08937830226e99f3751ac9d21c381177b46f3cd7db65402655d4d8a0fa01a67a20e628a491743bee70f0dd3a573fca70c754d4b5b117052df173160b32df9015278850e6f7cec008302ad2f947a250d5630b4cf539739df2c5dacb4c659f2488d8801859bf17ded6942b8e4fb3bdb410000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000934831793baa281838c1e09e665a4f1406bc7560000000000000000000000000000000000000000000000000000000005f7bf45c0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000de201daec04ba73166d9917fdf08e1728e270f0625a0b9eee4c06f560fa62718be16a004561f8822346db521eff8b2454e8768bc0f07a07e4161e5898bab99feb09b44cf6737fb7c5f1e3f48138d599a22fc00ed867c0ff8ac820758850e6f7cec00830102cc94990f341946a3fdb507ae7e52d17851b87168017c80b844095ea7b30000000000000000000000002c6a9c15e7f13de8ae55302bd29608c9eed78f080000000000000000000000000000000000000000000000008ac7230489e8000026a06feeea2d9505a20d7b2d10e1936d5bf9117d4b88e632c786322c3217dd916ae5a02e618e1517a44cb023837b64f1e902f0ba4a9a6a7d2d9806175a83f5d9ec826cf8ab81a6850e6f7cec00830381f9944d23c0537d7ee782cfaf9bbecdd60840e3d0ffe880b844441a3e7000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000045be7ad5bf65491d325a0a2b643fdec2ee570d216e2fce4f6030e7f7d162157808fc0e07c22da253e96b1a0752e21f128a8a64f7b79beadb3c65714505e9df52cf99068c7acf83a21e2d928f86b02850e6f7cec00825208947264c9f085858c607733daac1eef2625237fb333870b4fcc9ecd62008026a053b3489fc1005069296c19ee693c6e0ad78367ba818b930558b006beed0fa479a003adaa5da3158347c5141ca120dc4f2623e0fb0a90fab1129093fa665893e11ff8aa04850e6f7cec0083013d8c943fef5a74bcd2b8d2d44885b144df7f9d149b4b4580b844a9059cbb00000000000000000000000044fa1f6a271c6eb84227304e92813142758e133300000000000000000000000000000000000000000000001226788055013fc00026a0e2e6d5433b941dd559ff46677069c3689ac3dd5e28d12ee0287cb6ff3cb48bc5a010ae409467d096bc0c07fb1c4b2a3d9afca1e69d4842f329ccf987966aaceccef86c18850e6f7cec0082520894dfc105318661fc8241eb4d50275ad49bfd99903d88014a6701dc1c80008025a0c6301d39576a4f65741d63644701dcf63fb0dd8b8faaf5dbec353f3f64197d7ba0011557cc13411b8664590e2bc18517cbec0683bf4da982cad2f85cf3eed68fa4f8ab8201b4850e6f7cec0082914d9432ce7e48debdccbfe0cd037cc89526e4382cb81b80b844095ea7b300000000000000000000000073282a63f0e3d7e9604575420f777361eca3c86a0000000000000000000000000000000000000000000000001e3c5e6d667f9c2e25a09d6afbad3bdf0216cba142d92e752fd8c22956ba61fa642d2665e507e7a1192fa047403cec0c3d6bf90fa1c933907d80ec4668a605f2a18dfbc87255c8dda92ea8f8d240850e6f7cec00830454ff9473282a63f0e3d7e9604575420f777361eca3c86a880de0b6b3a7640000b864aed351470000000000000000000000000000000000000000000000000000000000000cfc0000000000000000000000000000000000000000000000000de0b6b3a7640000000000000000000000000000000000000000000000000000000000000000000025a042d718cb1b80c7615c6ed21a5cf00b8b174f70eb1004005e0dfffc4fb2a4a090a07ceb1139767232185f78e822b78649d0d4123861205f8236b8e6268750048180f8aa80850e6f7cec008301117094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000040c266a51ced55caf390cb6e00b5b38c5736a79200000000000000000000000000000000000000000000000000000000b06d6f9026a0493eb28fb2ad6ed89ddcf5f2cc30f6432d503c69dd91d5ff0c990c9f4f188886a045bc2d33815b1765150e63ef106e5b4c5ad877b25c1419a122304b089572f15ff86d820229850e6f7cec0082520894bd7de91133e32ced22820aeb44a864b2ec25c7188711c37937e080008025a07d0a9d5cdb3f59f1bfb3df400acd0aa3112ab5948018e49024571d3254f7aee9a02f0a90d0eb973cf22f86fc9c4d15a1e6862f2e9cf2817d63a12380866938f93cf9013201850e6f7cec008302fbca947a250d5630b4cf539739df2c5dacb4c659f2488d888684fd111d7566bbb8c4f305d71900000000000000000000000080fb784b7ed66730e8b1dbd9820afd29931aab0300000000000000000000000000000000000000000000016cabe0ead394bc000000000000000000000000000000000000000000000000016ad91964884878800000000000000000000000000000000000000000000000000085d8cdb8b5a19d41000000000000000000000000e62d82f95e7fb1848b1a01a5972d4f7a0507a55a000000000000000000000000000000000000000000000000000000005f7bf43925a0305fb0ab4a1a1b73a4dbfb4ad525ca66b9fa64e737361343cf0881dfdf489024a0749ceb292f716bbf44ad3e8c5100da848a456086e45fb273d77accf232b8fd74f8a977850e6f7cec0082ad46946b175474e89094c44da98b954eedeac495271d0f80b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff25a096609f8ab69bbb36e2862e2d6cef3fd7cf38535243737431aac84dcb7ee0507ba00dc3a059327e9248c684828dbce2b482843dd2b066058ba81cb3bb4a4fd5af16f8a93e850e6f7cec0082beb29487047986e8e4961c11d2edcd94285e3a1331d97b80b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff25a0d2e07ce83d0c82fa351da9765e8a9ba3cb6c48694e40f39cfd5adaf10a114aa9a03281be0d89b856c3b5615af37ea71e8a8860b9b626d71d7bccf082b99fc2861bf9016c819c850e6f7cec008301f961947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010438ed173900000000000000000000000000000000000000000000001080bca471a87251120000000000000000000000000000000000000000000000000fd6f74af9fc2ee000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000f13f7bf69a5e57ea3367222c65dd3380096d3fbf000000000000000000000000000000000000000000000000000000005f7bf45800000000000000000000000000000000000000000000000000000000000000020000000000000000000000002a7f709ee001069771ceb6d42e85035f7d18e736000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a0e901cfdeae3ac58d894863e2c810c32c3dd9d3c93c2a5c205a1dd1f860d51d8aa07454fb37616bb4897a4ff1aae0690c678b4039fc88cbb24b942767444027c3cff86c09850e6f7cec00825208947fd97df8b1bd5b29e97ccf09197fa86196b6a79e880de0b6b3a76400008025a09f2c471e5f9372d9d1d67ff2ad43a2a290cef47624e71c0d41e28a6e4b7ce7aaa05601a15d106d6d9ebbfcaa8d755c88d7ab4144f689d394b712a30ddeb4794d78f8cc82525a850e6f7cebff830249f0940000000071e801062eb0544403f66176bba42dc080b86400000022000086fef14c27c78deaeb4349fd959caa11fc5b5d75000000000000000000000000000000000025948db5a78975a28a00000000000000000000000000000001000000000000000000000000fca59cd816ab1ead66534d82bc21e7515ce441cf26a0dae920ba1886fd25c3749ebe20b67e39de3fce33227c1205a9fbd389652f8674a069100b5700683f1f58729cf795837c93a1ffaf8095368a82c477e287b8530b8cf87083032eb4850df847580083015f90940ea6bf3bf79c21703f34d021c2b7dcf76c44fa888804820fbd9230bc008026a0128468f13235769729c71c3929fb11127d400455ff314782c1508c3c6c273163a025cdb5a37aa93a72b1848940336010df7e27d89396a3e6d56dfd44dd373d4538f8ac824268850dd4841200830119ed94dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000008d8478f898907e6251d98601f32a0c1a94d315580000000000000000000000000000000000000000000000000000000001a35a541ba092cf05068392bebbe1e7fb04c7afc1e8781b5dfa4aa8d09bce894e5d9353406ba00dd9f708bd64cebc57b26596c553d05620084aebab5c219bbf5db14ee071a0ddf901323f850dbcac94198302ea14947a250d5630b4cf539739df2c5dacb4c659f2488d882f24f966aa302b2eb8c4f305d719000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec70000000000000000000000000000000000000000000000000000000047820d4100000000000000000000000000000000000000000000000000000000472685810000000000000000000000000000000000000000000000002ee8a11d3104686700000000000000000000000015fdaab0c0e130c7f0e72d7efab5e96f103f143c000000000000000000000000000000000000000000000000000000005f7bf02025a01199f616db8a6af136eff302e4dea17713ab7a1f82f4ca3967dcdd7124626098a01f83463019dadf0b2f4d6d60808be65f59a02169c0a267adf8adcb2eb644bed6f86f83010d82850dbcac8e00830186a0945dee759699e83cbc165fa0bf12f80a9ad11c6a0087470de4df820000801ba072ca22aa17cf7a155c6be34f2591ff378832d8db9331009272d5edbfb3b38453a010fd777ad7ccfee732efc0b9309f917495eabf8c59d6425c5719af0cdd42654cf9018c8192850d92f367e98302d223947a250d5630b4cf539739df2c5dacb4c659f2488d80b901248803dbee0000000000000000000000000000000000000000000000d8d726b7177a800000000000000000000000000000000000000000000000000000000000004186672900000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000834d399905c86b993354777ab858e08f6d5b996c000000000000000000000000000000000000000000000000000000005f7bf45c0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec7000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000000ff6ffcfda92c53f615a4a75d982f399c989366b26a0b0f7ae019099233e1a7a4fee1bcc0dc0e21cbe61e49f110cf582169f2808dfeda0663acd0f4b34b6d4471748bc1f1bd3bc39d615d725592816d5a57866b6170a07f8ad8314be13850d4576fa00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000009a6cc6e8a82f6f4e10972300d0b1094072007dc000000000000000000000000000000000000000000000000000000002625a00026a02f21d1dfe66cacbd2d5558809ec77b7c2fa772bf02012f823bfe860f112fa672a056a28ff19d10debc6eba7b0ec39a5c61e88deb0e0258380421bb48b1f270c3baf86d81a0850d4576fa0082520894104ff25f04d2ebcc602495fc5b4f4d422eb7975088016345785d8a00008025a08c8ec39beb6ff5136625b1d07d364bbcdb8f408ffbc65f01951a7b6491601508a0425709fcd2cdd064dd1196044283e0b8667635dd72bd606015301f95c3baf0c0f8ad83147438850d4576fa00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000eb845a45e0dc707164b55f04e6a15e967705ff2500000000000000000000000000000000000000000000000000000000743b3c6526a01f72ff693b4ccf22e1e091643d9b1434e90297e8164560c6047844e0e18f1c0ca02886161bdc412246a540b082900dbddfb7448f1227405b9b52f9b67a858f1e01f8ad83147439850d4576fa00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000003e8d29f9ef0b3b4bdc997c7e644d1e44eac5a185000000000000000000000000000000000000000000000000000000003d648d8026a00748de85240f9a86b2584f9c905f7239a9cb6e136a7e495f19f3c009a62912d9a04cc16a5a7497b42d45368e01d0d6348edad9f8dfc62b8f80c6d23a30fa96115df870830115f1850d4576fa0083015f909433f543df44521a4777f9b65756922b4a51b189598801bea209329540008025a09500efe32941004db60b82da2478379add6ed9cd9592e48ebd63dac7f68dd42ba05977faec04d591e3bb8c6be36e7cffad810e8fdc614f9d0c0a877ee4fec8822cf870830115f2850d4576fa0083015f9094b44e83a8d9aaddc4864f5105822dd5ff234741f7880b4d876ab52f80008025a037d89eb746412bb23c7bcb9090736bc1ec8494c4094227676065f48089d1f4f1a005d6a3a8def1ae1c671ab50f172cf0ea4c744a8a40a8d926a693578e16044949f8ad83128a50850d4576fa00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000000556c4e74997b8c2aa9fab24bcfa7671714124ac000000000000000000000000000000000000000000000000000000001e0a6e0026a039497e16a9268eae2309470719234e0d0585d8c98c61e4ba97a2a10f5a586e1ca070f0a295cda26024665e80966761bcf4c9d489ca8a8211c83763edbd6abfd615f8ac821801850c92a69c00830141da94dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000097870b2951805078c826dec929a0a4e39c1e3ec40000000000000000000000000000000000000000000000000000000005b8d80025a0e40cbd82ebe816bd45396bc46035bbef4062358f71a6f808b6a13c775644b0a1a028b30761ba0479db9a2dcc965b9d6868eff9975f24dd9129aa6bc4f5b480e00df86e820e41850c92a69c00830186a0949469805c2450c797e94db7b684437883af5b26df87d529ae9e8600008025a0766d5f402b9cb8b770574d56a0a18e31a2858d953c02634035540e67f816e67ea0660cf0f8727caa2fff314e153021f34f08940f71af0300051eb9be94334cdeb6f8ac82013c850bdfd63e008301075094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000ea68673a2aa2d7845ebda48f68fece2233a3b90e00000000000000000000000000000000000000000000000000000000042c1d8026a03ddea161a13234d724e4d468935d97c898c1ac1d25498e32ccb37173ec12a9baa022f1e426f03dd02b67cea7068830628ea16df0a853e6a33a772d1b1a5b5215adf8b9829ae2850bdfd63e008304f58894000000000000006f6502b7f2bbac8c30a3f67e9a80b8510000d9c58600140051000000000a68000000000000c12d099be31567add4e4e4d0d45691c3f58f566337134075f5b5a0a94ac891c7b5ec5db5cfcf392cc04744ab87a4c37afd91680ef280b96ee21a026e26a091f853150fbed3d6ef89aa0eefa97ce96374ce267ce44e48b8e355aaae52450ea06d2ac99c554c8d922523541dbe7e961c87cd58621f90bc6d95b72f3bf103044af9029182122a850bdfd63e00830bc315947ee8ab2a8d890c000acc87bf6e22e2ad383e23ce80b90228865a6b4f00000000000000000000000063faee0a2cbcedc0102575c6ff17da38b1565c2200000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001847ecc63970000000000000000000000000000000000000000000000001c9eb028e55c1af0000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c12d099be31567add4e4e4d0d45691c3f58f5663000000000000000000000000164a1229f4826c9dd70ee3d9f4f3d7b68a172153000000000000000000000000c04744ab87a4c37afd91680ef280b96ee21a026e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000078d65bce7dd600000000000000000000000000c12d099be31567add4e4e4d0d45691c3f58f5663000000000000000000000000c04744ab87a4c37afd91680ef280b96ee21a026e000000000000000000000000000000000000000000003b3558c3d643af5e8324000000000000000000000000000000000000000000000000000000000000000126a0aa58ab93b2fc330ed5fd4d5e9e5165877fc517bdc944e03f9b107de1d558fd9aa010272681e6e57c3aa74f719f526873c81e01d01222157448e913f62ab9dcd06df8f6827823850bdfd63e00830a4cb894000000000000006f6502b7f2bbac8c30a3f67e9a80b88e0000bb6bee0051008e0000000014d1000000000000c12d099be31567add4e4e4d0d45691c3f58f566300000000000000000000000000000000000000000000000000000000000000000000000000000000003b0116363e435d9e4ef24eca6282a21b7cc662df164a1229f4826c9dd70ee3d9f4f3d7b68a172153c04744ab87a4c37afd91680ef280b96ee21a026e26a0c4e7826ef9776170a23747941d8e2031e1d632cf4bd0860d00c5ecf14f0fac4aa03a4ab912aa4b0bafeb1251e62a686c05d01856725c2bc9fcb9d514b16e8c3efcf86c01850bdfd63e00825208947a3b3a48da48dfd7171064c43efca3eb88dd83bd880a804988f69b5c008025a07ab7ce5668879c2a8b2271e9899006d1f955755bcca0e84c7aacc87f7384d8f5a001c09a314305066a7a99b0757c3fd34306fd740c3fe676ce9b779989027002def9021f83017e4a850bdfd63e008304f58894e33c8e3a0d14a81f0dd7e174830089e82f65fc8580b901b5000b060d0713abadabced27fd1eaeaeaaf0fd72adb0120000000000000000000007fc95945eaa14e7a2954052a4c9bfbaa79d170ae000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c12d099be31567add4e4e4d0d45691c3f58f56630000000000000000000000000000000000000000000003643f0f256d9376e9c40000000000000000000000000000000000000000000003520346458fd696cc0e00000000000000000000000000000000000000000000000018a893fdda355e00012000000000000000000000c04744ab87a4c37afd91680ef280b96ee21a026e000000000000000000000000c12d099be31567add4e4e4d0d45691c3f58f5663000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000e33c8e3a0d14a81f0dd7e174830089e82f65fc850000000000000000000000000000000000000000000000000000f539c0ca6be80000000000000000000000000000000000000000000000000000e92936feea3900000000000000000000000000000000000000000000017cd98b0be009d000001ca05422af299139186ec365f14c427205bc76e7b41d7d6eaf14183a282d41fe90b2a00cc85b86c76ce5d6f9cdcad085624d82476baf34d6c38afb99c8db0ee02a906ef8d48260ee850bdfd63e0083061a8094000000000000006f6502b7f2bbac8c30a3f67e9a80b86c00000bb276002f006c000000001bc1000000000000c12d099be31567add4e4e4d0d45691c3f58f5663017fc95945eaa14e7a2954052a4c9bfbaa79d170ae00071afd498d0000008ac7230489e80000022b1c8c1227a00000c04744ab87a4c37afd91680ef280b96ee21a026e25a00e84a9a46e23d619837c594d3192cea4d9fa388066a4d5b100f79a17a9b45928a06f82d747d441daa4de0c573baf56c442c53c615021b5e9e23d25a3f985d5cc10f9015233850bdfd63e0083026b7d947a250d5630b4cf539739df2c5dacb4c659f2488d88243ea5ce72ec5d47b8e4fb3bdb4100000000000000000000000000000000000000000000021e19e0c9bab24000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000c281e970225cf3bce07d9024b4423b1fd3df0e29000000000000000000000000000000000000000000000000000000005f7bf4390000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c12d099be31567add4e4e4d0d45691c3f58f566325a053005935343bd6b8355c3559ffcd855b30936a0386fbaa36b04aecb6b6fd51c8a0670215bcb30cb573ab8b4c424323039b00fccd611437dc99708c4a93769590f0f8f8821662850bdfd63e00831050879478a55b9b3bbeffb36a43d9905f654d2769dc55e880b890c89e4361bdb09816eaff92a339048a9119bfdb7f0b848b42628642fe0abf20dcc2c82f28fe4f3e406928c22938f493207e57277a8b483b3b122dcdc4000000005016c281e970225cf3bce07d9024b4423b1fd3df0e29470002b67d020000000c427fc95945eaa14e7a2954052a4c9bfbaa79d170ae000000c12d099be31567add4e4e4d0d45691c3f58f5663550000011ba074a4ce83493116995c8eb5746c1068cdd6422514ad83ef16f9e113c69bdb2b93a03002e2ca1e6e7ad945af6eb93d7a4b1d66c8e24efcee9923a565a4619f5640e6f8ab821a47850bcdf49b0082c7bc94a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb000000000000000000000000948045ebbc5fe5ded30f2ace84f99ce1b9fc30a900000000000000000000000000000000000000000000000000000000001bd4e325a083ebfa18687b06c006e7db126cc80855081a2b9954aa2ef84614c467276a1e08a01be36d428ed259ef55ef0cf93515fba796518b62c2a101bb3dcda2fbef6e20a6f901ed820182850ba43b740183061a8094a57bd00134b2850b2a1c55860c9e9ea100fdd6cf80b901841cff79cd000000000000000000000000728258295b1ccbc61d72c4e86c5b44345ac790ce000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001042fdc7315000000000000000000000000c2adda861f89bbb333c90c492cb837741916a2250000000000000000000000009f8f72aa9304c8b593d555f12ef6589cc3a579a200000000000000000000000056178a0d5f301baf6cf3e1cd53d9863437345bf90000000000000000000000000000000000000000000000228a17fe49a0620000000000000000000000000000000000000000000000000f69c471cb506f64bd8000000000000000000000000000000000000000000000000015c808740b32a90f000000000000000000000000000000000000000000000000000000005f7bf0340000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000001ca05df7b3acaeb55dec2fb125f4261814983a0f4fb24696bb3c02ecd4bdb8664ab2a0634cd17f4bd2eebd956dcd1c60c882483dd145f36738802d1540d6ca0bd18e6ef9015482016c850ba43b740083026981947a250d5630b4cf539739df2c5dacb4c659f2488d8801028b9db6d1e430b8e4fb3bdb410000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000277d46fcbe71144c6489d182a8aa00da61b57b07000000000000000000000000000000000000000000000000000000005f7c43e90000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000014c4d8e57751e083bcd1edcca5b435720166f5e925a00985497b640cb6f70739b068292050a427b6e0c908a9d29422582a9214a6003ea0415cb1f10d3b6f2a201ef340dbc8ca8c67171e7422cd315bb9fb574d36111317f8a901850ba43b740082ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000eba638932d82ebc1161eb9a992572ecafc197f7e00000000000000000000000000000000000000000000000000000001b9df78601ca0f0f487cfbb3896e83daab39082d361b5df9c6b7af1b24c227268f23c886c6ff7a00db21e461dea4897cb626c4aa60df3979aba5ab4e89971b962ecc59729403e1cf9015258850b2d05e00083037033947a250d5630b4cf539739df2c5dacb4c659f2488d88027f7d0bdb920000b8e47ff36ab5000000000000000000000000000000000000000000000229a587e3e68685925200000000000000000000000000000000000000000000000000000000000000800000000000000000000000007562be2022d31a75f9887b7b932256c704f0c8e7000000000000000000000000000000000000000000000000000000005f7bf45b0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000009b06d48e0529ecf05905ff52dd426ebec0ea301125a057e78766ae798e30b06e2ffea600bb64950ee8498790fae4be4232a58a7dab76a00bcf0307bd910dc4eb8a0f59c08c38941c951ca5c176e394c20e2802f0ec0569f8a928850b2d05e00082f17594dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000005f053acac0d67f0e6d1cc7a5284727ddc108a56400000000000000000000000000000000000000000000000000000000389fd98026a0af5ebed76724a1a2ec0480f7a928c758f105039ece0c9dd358fd36ff89148d96a058f4978b9b948fc259616bbc48bff2d42b201af89d3920f730c9bda89bd95ad9f8aa3c850b2d05e000830320b394de4ee8057785a7e8e800db58f9784845a5c2cbd680b844a9059cbb00000000000000000000000038e3c92a543ed6c87eaed33bcba04b9d302912200000000000000000000000000000000000000000000004d12beac3ee39f0000026a0df7786f7994c5546becc6741ef476cb6d1a4b07cdfc95ce9d4c54973b86460baa07a32b0a60ca94b0902b6a2640ca23c65daecc540d4c60662f8e1488373659117f8ab819c850b2d05e0008301494794dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000002ebd488b3b362228c503afc7cdf1698f37722fc200000000000000000000000000000000000000000000000000000000000f424026a0ac0838acef8e84034d475c2e2ab9b194ca0a29ad1550a69aabc497e857373d96a03ba2143fd0ab21a2ca741b874ccd2db3b6dcdf36756b7b7e678b90157e068f4ef86e8319ac74850b2d05e000825208941c6cc4f378266a1ea89edf1f92fa529869571a18870e90eda39440008026a0f1885607a6a384eeca3f2a221cbae0a7d1be32189a26e3f6024f1013b6458a66a02eea3bde50f2a790547d02e53f21a721131cc0d6823a7a0f104a1d2e027bcb9bf86e8319ac75850b2d05e0008252089498aae5c4097f803a0616b537a4f62bb25ad837ec870e90eda39440008026a08da329c004df7420e2065f1344a77dc6e2819b5fc5db5812c362af58596989e5a054e3f72cfedd01491d81ac512663d1bc6915757f32d12ac65385214dfa6b8390f86e8319ac76850b2d05e000825208945b879b10efcc5400c4b5931be5e577c8c97593d1870e90eda39440008026a02260ca75dc848b82e2e24e00de3dd0bea9c664adfde6805a791c77c16dca03f5a0225743a8e187c5b54148cb099920874a99a9e761b2870e46a3f9bf4d60fe9e4af86e8319ac77850b2d05e00082520894ec0ae94c1c03959118a66fbb43fc39bc2c1bb63b870e90eda39440008025a08b750dcdc29374b2cb70f517ca646f5725b820a0acfe10b9e3a216a652b2f54ea0765bc5c251ee433c85ab35bd130bba145df8511fccce743adafc46722d4da752f86e8319ac78850b2d05e00082520894558d7cdf026486652f3d94b2c860c9fdb9b05fae870e90eda39440008026a0bad87f9413c9d85e5fd708e81d62f68d8c34addcfb8e269594573c6f9a9427a2a0672011774c1598a0099b20bb3426768a96ef8962de389ecb94d6eb7e0d5f0f81f904b375850b09429fca83053d2b94dc6c91b569c98f9f6f74d90f9beff99fdaf4248b88745c7039d837c000b90444288b8133b1d717c9fedc8e8f40fb977a4a887e7f3b9721a403109441c45dac7dbd42d537000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000032464a3bc150000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000745c7039d837c00000000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000d0e6cbd9da5d6214818a3ca2c4be074174faea8100000000000000000000000041f8d14c9475444f30a80431c68cf24dc9a8369a000000000000000000000000b9e29984fe50602e7a619662ebed4f90d93824c7000000000000000000000000dc6c91b569c98f9f6f74d90f9beff99fdaf4248b00000000000000000000000000000000000000000000000000000000b0de47ff000000000000000000000000000000000000000000000000745c7039d837c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005f7bf6caa989bdc492903b542331fb370f3307fd1b2b2bbabf59c552db34fd50dcf47be5000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000001e00000000000000000000000000000000000000000000000000000000000000024f47261b0000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024f47261b0000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000581c0d6b99cda91d02c52cfe70cdee39e3908e10733ea0c7bbea96e4bd0cf1eb3c9a179df6e905e92a575c6d947671bfe84aa761eca6ffec506e310223bd236d6854da8d98e2a1a211130e940c71f2004885e8c7810800160400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000561c4cce9f6578c835fb47c7b5d6fb1b4d4f5f815dda45bd98d920c70577d1f063fb3a80eacb0dddc9730cef6735e23abe58b2747d3173eb0f75ce1ca86d3472bb01da8d98e2a1a211130e940c71f2004885e8c78108040000000000000000000025a014fad5808e4a9ae55ab8dd2c1807fa7ec2c1d4c15bea056922b2e791b6264941a047f50bb736001dec7314f4ea345412053d147442a4204236d5f27d3d69ccae6cf8ac820244850af16b1bb383030d4094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000ba6f0cb6688748e1dd48d1bd5924f930bd164ed2000000000000000000000000000000000000000000000000000000000cc6438025a0b5374ec5198978e5e9e362dbe3dce2a7271bbb466ae3aeeb4fbc7fe845089986a02110277cc3dcaf906ef487fe923f0c36347415a31425449d0edc92c9777d5782f86c1d850ab5d04c0082520894c0631a2e7e8b96f7a22e117560b39c9e908a6da98806f05b59d3b200008025a0196275ae3606075a8f23e705b9c8438c0fe9aba2cdac79ca0fbc52b456826ae5a003071f97a20eb4e372adfe0261bc123f31dbe1fdf431749281a8e4626e6f9ddef90d8b04850ab5d04c00830357e8943b5d2b254224954547a33cbf753bcaa5eb4b27bd80b90d249ec9b36b0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001c5d932241d20c7a72a42bc6a023437347bb80940c45e7fc8c4c5e6f1cc02c591b1c7b04f95c1be906d5354ad4d3458e97a04a9ba7a45b1303756728eaa0d45b7f000000000000000000000000000000000000000000000000000000000000003200000000000000000000000007bae6e785f8c9f5aee6ba49cf72d19d5e412707000000000000000000000000000000000000000000000000ac7b5695e7e7f530000000000000000000000000c31bf9a4969a3ad0f2b9823317b329125bd3e923000000000000000000000000000000000000000000000000abc29874867d4bf8000000000000000000000000d1e6fc718660618d73d458a77bd0339f6c2f4212000000000000000000000000000000000000000000000000a98966b8c60716b000000000000000000000000066b064b5fac00eeb32cd6bce64bbf5fd98581b08000000000000000000000000000000000000000000000000a8fc4c2e8afd20f000000000000000000000000061f1d048323de4aa996290f63a1e2a29636a3d06000000000000000000000000000000000000000000000000a8d7cb23466da910000000000000000000000000ad1b4d6d80aea57c966d9751a5fe2c60a0469f60000000000000000000000000000000000000000000000000a7a6751221a3ac90000000000000000000000000bdc7ac031671bd67190b09a8c3cb1e7872b6c6a1000000000000000000000000000000000000000000000000a7883a2f27b3c020000000000000000000000000088b99442b893a861db16e5c8c6a01a66d0c3ada000000000000000000000000000000000000000000000000a6e441d4b6183960000000000000000000000000dcf54138acc74e9b99570af10328e89d97d5512f000000000000000000000000000000000000000000000000a6610325fe2ec410000000000000000000000000d65e87f756d0f21faceb8d32eacc509c1284f25d000000000000000000000000000000000000000000000000a62aa70b1f6f1f6000000000000000000000000037a33fcd214405e74ed93addfc7f0787326c4f40000000000000000000000000000000000000000000000000a445a47aeb3c1b1800000000000000000000000068799b81c7bfca8c661197645755ac33da8ef4ff000000000000000000000000000000000000000000000000a3fa48f31df4cd6800000000000000000000000090496a8e1925ad4a1931ca2be0e50480abc24e8e000000000000000000000000000000000000000000000000a2f5c981c11bb8e000000000000000000000000097cf2bcd41c323940022bf8906da9011b8271ab5000000000000000000000000000000000000000000000000a289933188fd0790000000000000000000000000945fe27749f564173237fddf47749b676a14111f000000000000000000000000000000000000000000000000a1088c0a7dd222c0000000000000000000000000b21763ce87472dc76fc3b485fca9d57df5d47bfd000000000000000000000000000000000000000000000000a0b781dfa364764000000000000000000000000067513103973cf85cb20f2f8ebe61cfc1990d3d3f000000000000000000000000000000000000000000000000a0820178a791d2700000000000000000000000003b2e5507929f9b794f39acba3f0311cc636390140000000000000000000000000000000000000000000000009f93541fdff84f4000000000000000000000000080c5cae1fc0b6a548ea16fe40171d01555f4f57e0000000000000000000000000000000000000000000000009f8a6ff0dc0416b00000000000000000000000003d16451a4b73e778bfec126025ba79716a17e32d0000000000000000000000000000000000000000000000009ed512db2aea289c00000000000000000000000033debb5ee65549ffa71116957da6db17a9d8fe570000000000000000000000000000000000000000000000009e9a91a4b0b0683000000000000000000000000024d0db7e182fcd29ddea14c04bcb182c89cbb0c00000000000000000000000000000000000000000000000009dcaa8e3be581688000000000000000000000000952ab11461fc27f65175b0469eee9f0f5af3a9ef0000000000000000000000000000000000000000000000009d3748d8c22dc31000000000000000000000000061af6c7925cb106ea04fb3148affba6220bda5c20000000000000000000000000000000000000000000000009c6cf18620f1e0180000000000000000000000002db3c0f42022fdc8dfe70036fee85e48a24b88af0000000000000000000000000000000000000000000000009bd0fa53e03389700000000000000000000000002fe50c88f228dacfc24100de0c5167aa7a539dc600000000000000000000000000000000000000000000000099d85913e41be54000000000000000000000000033d66941465ac776c38096cb1bc496c673ae739000000000000000000000000000000000000000000000000098e8c47b582093f00000000000000000000000009e75d5cf5d6a1970e47bc3d37f3c21a878c321e900000000000000000000000000000000000000000000000097c535c877dce088000000000000000000000000852bab46dd2dd7a585d23a79a8600c2a65fcf2e000000000000000000000000000000000000000000000000097811829ba6749d0000000000000000000000000bb9297f03e92ecefe1a45bda0e585fc6f9fab41600000000000000000000000000000000000000000000000097811829ba6749d0000000000000000000000000dabc521955476811ed6d39d7d54224c37bd8acb000000000000000000000000000000000000000000000000097811829ba6749d0000000000000000000000000eb5831daff60bc27a3af7ded164ce159dcac035100000000000000000000000000000000000000000000000097811829ba6749d0000000000000000000000000f4317d52acdaf924329c8a15c37a9273e2a3797b00000000000000000000000000000000000000000000000097811829ba6749d00000000000000000000000006c31b772cc57cac99c85d2d0b0df8e54b07a7a5500000000000000000000000000000000000000000000000096b154951fcf8310000000000000000000000000ea94c9a3bcc2bf5a3c08e3de1ccc64c6b604971000000000000000000000000000000000000000000000000095585cb6c4282a10000000000000000000000000e5ae7e62ee71f675f2fe0ff5cc83c215a44e76d100000000000000000000000000000000000000000000000094ca53acb0c381500000000000000000000000000db5e7924d87d2ec27876882e7af911a58050dd1000000000000000000000000000000000000000000000000932042d59dfabc70000000000000000000000000c2d3ad9240a85cbdbf2e77223185542996f192eb00000000000000000000000000000000000000000000000091ad5c281f8ac6d00000000000000000000000001fd252771bc2dfd38ec774efeeb3c6ab7e8e535b000000000000000000000000000000000000000000000000915a77a49303dab80000000000000000000000000b946efae53975b97a0d1d02f75fabf55d0d6a960000000000000000000000000000000000000000000000009082d22b97189770000000000000000000000000e07e487d5a5e1098bbb4d259dac5ef83ae273f4e0000000000000000000000000000000000000000000000009074a6a89f402e980000000000000000000000001fc022c33332bb9e45f3ff8129a51f03347f842f0000000000000000000000000000000000000000000000008fb33fc665fa01a00000000000000000000000006726b6dc15eaef7a530cd9e27a5027fff66a46420000000000000000000000000000000000000000000000008ec37e27521c85500000000000000000000000002fec5d8835b39f314187dc1da933b273f32f18c30000000000000000000000000000000000000000000000008e2577f5cf5c0b700000000000000000000000001f4d088464a0175c8ddb90bc7a510b1d5a0da1a60000000000000000000000000000000000000000000000008c7de56bd4e5a0580000000000000000000000000c780749e6d0be3c64c130450b20c40b843fbec40000000000000000000000000000000000000000000000008c14be8f739d98a8000000000000000000000000291502048a3396486cded59288df83fb9cc9d1cf0000000000000000000000000000000000000000000000008c0d91180c2789f00000000000000000000000009e199d8a3a39c9892b1c3ae348a382662dcbaa120000000000000000000000000000000000000000000000008bd9a02684ae1cc0000000000000000000000000c13ec844eb19d6a72ddd5f2779484ba35279a8170000000000000000000000000000000000000000000000008bd9a02684ae1cc0000000000000000000000000970c7ab1ca32547eaacdfcc89f6ed924600da8670000000000000000000000000000000000000000000000008bd9a02684ae1cc025a0d7be5b752c62db2acc078d5ec60fdf525d8d1866399827772c7b97c8b2996b7ea0390cf16c61b37128ad6e8e56a36190e11de890f326ba7014b82fe655f08cb37ef86e8305985c850a567242d6825208941d86c8a33c7b3b6c7f2fc580e7a2b076c49cac75870118a797e62f6e8026a029ce1c2f5f6e95826e3484b579a32b14088966c802120a7f64051d64188683f8a044a9a1c7f2d05971808a1699b2d21e1240dd5e99ddb716cd34be9ebc23e17ff3f86e8305985d850a567242d68252089499ab81b3e74800ac05958cf8e8ebb9d3a3f994648702c8cc350d8c048026a0f06dc5296371c0503dfd1a8c7c1372f23bce4ded60a992e348df49116241b12aa0659b3d14f8a3a9a26bbf8571e9520c8f319977a368c665ced3708daa18074703f901ab43850a3e9ab8008304fba5947a250d5630b4cf539739df2c5dacb4c659f2488d80b90144ded9382a000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec700000000000000000000000000000000000000000000000000001e4bc4bcbad7000000000000000000000000000000000000000000000000000000002ddd72f10000000000000000000000000000000000000000000000001e3cf46b06880d66000000000000000000000000ce156d5d62a8f82326da8d808d0f3f76360036d0000000000000000000000000000000000000000000000000000000005f7bf4390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001bfd21d58146585fa30ccb92362c65c6e09d6b9ee2bdc77e87f02dae12152a13f72e3c0ba57b03f89e25d5ba8bbf4f945ac5b8f1693b7739f3d802f640876b682326a062147cdc091cd1f38684508540739bcd6fe25c07e3bcec27f88edae81e0c4753a0569a6a8f75bbac18ad6a97ee205aebafa30f8c31d53d0f582d6630c0eee3babbf9012c820539850a02ffee00830493e09451b4b27a7bd296fd34ca7c469f49d5bcd7fe513780b8c4942d468b0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000007c5e7fe5fbef2a35b02e1bce0d39e0076ec389a30000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000c9c653445750800025a013e64eaee87f711f381bb4a6c240ebe85489843050872b076872b512f7c809eda02864404d277f8dd792b0871b2873e086725661906c5cb606e93a1a4291261328f86b82040c850a02ffee00830493e0943388a0117f0c5c7f493afdf46ea03fe7445dab6980843d18b91225a0125032c84dd6e8d83c39daaeb652d36590cc3fbc36924b2cdedd080a70f5d819a03cb2f9762d738d5140831726d20d039e83a0b12ed93e28d0f5563737d1a043b0f8a901850a02ffee0082e74994dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000006c8d7c88371e673807372d7d3eb84101905be6cf000000000000000000000000000000000000000000000000000000004510cee21ba03e58bb7a78dcfa7ed656e565fe79ab2ff5787e6790d18abcd8db3151f4bdff32a024087d8b67f5a2c7f4e82d73444b7fe59d29128fa3e69a407da7ded484f1633af8d2278509fd0a0d00830124bc94006a7fab93971acdcfd5e465c29d714ea0d530b5880de0b6b3a7640000b864ad65d76d00000000000000000000000000000000000000000000000081c4d9b0b73ea666000000000000000000000000000000000000000000000000000000005f7bf45c0000000000000000000000001764dd87455540b164f45edd8d1efabeba26d52425a03359ecd73c9462378f22b31f6d3cc9cc0f4689d2748baf03a15bce87d66f826ba00ce062c7e1e797ecdee2f97625959230607aad8f182d44c367e855e26647fdfff86c028509fd0a0d00825208949187a1b5022ad5e801c474589c3d683e067467c188058d15e1762800008025a0151e70852cd730f3a3069bc8088f0caa6dec4c16defb7cf78776b0a55477641fa0709edc84332329640ba64a75b28e36c2bd9472cf251f10ae679da3cb0a8665b9f86b038509fd0a0d008252089469ae0b74d23a741a25a6e997de6418f374a0cf4d870c8304402804008026a02d86bfefc7eea31f1899d12e71d8ad9399c4a74ccca7fe43218a485858dc6bf3a00c93da0de2cfeda1909b651cafc831f28ab568b18a195bc5c4538ed09094f372f901548202848509fd0a0d008302a6b4947a250d5630b4cf539739df2c5dacb4c659f2488d8820b00eea5820b8beb8e4fb3bdb410000000000000000000000000000000000000000000000001bc16d674ec800000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000373d0ed6a697846661c3b0a0b9b94577c564c79b000000000000000000000000000000000000000000000000000000005f7bf4690000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c34ef872a751a10e2a80243ef826ec0942fe3f1426a0b9c2f746418b791458f6a3437cd5c73c486373a6917c2e6def8d938539aaa492a03e9771211cee9602321727096da87c4b78b086a5ea0d08fe6b5164217333eac4f8ab821b878509fd0a0d0082ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000003c707feb77ee94b42ea0544c6982865b651ac9660000000000000000000000000000000000000000000000000000000005f5e1001ca0d26fff2eaa7936817c0d64c73dd405a2671e6db3f9d5d38adf58312f035cf734a0418044c1fe119a29b5b280dc352c96a70d1ebc1a31e67dfbd4d9039f063f10c3f901527c8509fd0a0d00830243a1947a250d5630b4cf539739df2c5dacb4c659f2488d880d2f13f7789f0000b8e47ff36ab5000000000000000000000000000000000000000000000000073b44eb80b69e320000000000000000000000000000000000000000000000000000000000000080000000000000000000000000579c90f77ae0c44ea7ba329c41a980d36348d8a7000000000000000000000000000000000000000000000000000000005f7bf45c0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000005befbb272290dd5b8521d4a938f6c4757742c43026a0c9e13f207c1877b89b510db359d494bc9a46cbb54ecd7227eb28c8f101decfe2a05ada94e4ecce058759c82d417fb1135c93d878f3e2143f73e88a2741feb1a853f8aa018509fd0a0d00830132b294c05d14442a510de4d3d71a3d316585aa0ce32b5080b844a9059cbb0000000000000000000000006a43c606609fd2f10b5e04bd199c311f2381a26a00000000000000000000000000000000000000000000007062f534e5517b000026a0f41544dcc58b2f6c501f4d339e8019c1c05cdd8a95eea59a11a67564e06c16f0a070ab365c8e8138326e309989eba25093c4731748d7449f1903a403118f2ff62bf9016b558509c765240083029c35947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe5000000000000000000000000000000000000000000000000660fb7013f9c7b1a0000000000000000000000000000000000000000000000000cd39e0af9d7081f00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000f68b6c658087cc45a32afdb8c38509f6eaa84b0c000000000000000000000000000000000000000000000000000000005f7bf45c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000038acefad338b870373fb8c810fe705569e1c7225000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a05479d4365dc72b124109ddf48047e17ed0bd6b111dd72343dc57f3f0b23df5fda0544dd5bde89240cee4ecadcaaf71ece54053a68646fe98203b812cb5d9e6ca87f9018c81968509c765240083030526947a250d5630b4cf539739df2c5dacb4c659f2488d80b901248803dbee000000000000000000000000000000000000000000000091fce1efdfdef40000000000000000000000000000000000000000000000000000000000003185259f00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000b15dc08b5d8a0c596c1f7970732ba7e5074c9ebb000000000000000000000000000000000000000000000000000000005f7bf0b50000000000000000000000000000000000000000000000000000000000000003000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec7000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000009e78b8274e1d6a76a0dbbf90418894df27cbceb526a02fbcf47980497d578369380634d9107bce7e5e00e3db4335a46513a054878971a04abc8b37dec7aa082b6fcaa84577838fb08fccbceaff700be48102b5d6c17153f9015482019b8509c7652400830283bf947a250d5630b4cf539739df2c5dacb4c659f2488d880de0b6b3a7640000b8e47ff36ab500000000000000000000000000000000000000000000000274287b3e75d1707c0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000f3c31bb059036a9cb0fe4f516d88aca7a8c0ae0f000000000000000000000000000000000000000000000000000000005f7bf0240000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000000fdc5313333533cc0c00c22792bff7383d3055f226a0c263437091b1b6470cefea0b3e958b1df35b51bb14211dfb217a0ab42cd8686aa02c97da3139e224a50a66d0d0256a463e6fbe5c3e0eda7d5c4f1a156b07b02bfbf86c148509c765240082520894cecea3a4cc75bdb5681e0c1e9b9ef09a8793bba7880780d514a93f0000801ba0ddd15b761f33f327617511150ef88572812500f1b1720d124b118e39b8ffe000a051cec8a670a822bd6faabae5cdad84d770f97dce67a654bcfd5b65caf3aa167cf86b028509c7652400825208945cfe24f2912ebb0fbbde9d70e6c495e69f58a58f8725ec578c01e8a18025a0e755e96f5d27937fbe5a171c2601df09076a1cdd21f1717a1042bf8196b19af5a03d784c0743047be9f39af71cb3d0eddb9c1e89d4082cba5eb39cd6ed78a0a4daf86b0c8509c7652400825208940adca8d84b20b30a8a7d4ed207ccad825c0cd39f870aa87bee5380008026a09129654f51f4fa187ebf9b7af4b95bfa25a1135f51e1fd247ef9c0dd3046b527a0464e5c2b13f0e3108a67a3201676a00115770a915b2cd8a42e887cbce681b3e5f90131038509c7652400830308d3947a250d5630b4cf539739df2c5dacb4c659f2488d87071afd498d0000b8c4f305d719000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec700000000000000000000000000000000000000000000000000000000000ac6eb00000000000000000000000000000000000000000000000000000000000ab91f000000000000000000000000000000000000000000000000000711e4fb1a60000000000000000000000000006c015175b6fa02f93ce0fe99e13554fa6a104c59000000000000000000000000000000000000000000000000000000005f7bf45c26a0654606132d677aa2b799c7ff664581d1fd7e10534c7138fbcc871772e275ae46a0250615fdc003986fcda12488d162770f98973e583ab86b8c07132cd0fed93272f9016b028509c7652400830290c1947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe5000000000000000000000000000000000000000000000000013fbe85edc900000000000000000000000000000000000000000000000000000180b9cdf65f766d00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000e9bc563691fa5adca0dab7ed8e7c1341277ed4c2000000000000000000000000000000000000000000000000000000005f7bf45c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000009dfc4b433d359024eb3e810d77d60fbe8b0d9b82000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a05f5fc078448ccc652abf2e867870bacd9c4f78b10fb16dcc5597170d9d04f7a0a07ec8ad3754705ea33324ff8991dcaa36b8ea34722f506dabc7ed6733f3847da0f86c238509c765240082ae3494f22953567a8807848c36ee02576694649a37f645880bcbce7f1b1500008025a099c7734ad747ff41d40e3ca37f28c1e7c1cbf11bf495fcfc7db4161f76901aeda02c08079a11a875b06bd38b9db97266613f83b2c173462185e91295500b8c86d1f8aa3685098bca5a0083015f9094514910771af9ca656af840dff83e8264ecf986ca80b844a9059cbb0000000000000000000000005c985e89dde482efe97ea9f1950ad149eb73829b00000000000000000000000000000000000000000000004f78d640cc92a8000026a036462ef4f7dc79093050f26be02535f20064fdedcc456c27c45b4e4bcd3b186da03ac78299efb26baf2e54276886e2d726c6459b7d67e614f63f549c9cc6b29cc4f8aa0585098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000fdb16996831753d5331ff813c29a93c76834a0ad000000000000000000000000000000000000000000000000000000009fa9def025a02576ec6be227f2ccebe0fe03d1c89e0c6e85a4da440c13006662ef3b79f246cda01b207e62214fedca6621c32f8f9b50034bb76b73a9a196dde34c737c7591976af8aa0385098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000adb2b42f6bd96f5c65920b9ac88619dce4166f94000000000000000000000000000000000000000000000000000000007733b2c026a07c2de322142a5037fe2b0e849903883af9f7801b64b12049f2eb31fc6b4fc679a04287a9e2e1bf0238aac81b0bca1c6f2e147cd43bf9ef219f4eddd424504e3ed0f8aa0385098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000fdb16996831753d5331ff813c29a93c76834a0ad0000000000000000000000000000000000000000000000000000000077326f5025a0bc3b627348c07ceaecde8c20ae369ed4044435eadb98fd730d39327b03110ac0a078cff03080223c0fc2d61652c62d4b77fd4e2662338e1fd6df25738d482d45c8f8aa2185098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000fdb16996831753d5331ff813c29a93c76834a0ad000000000000000000000000000000000000000000000000000000007731273025a009d0269469b831785c4daa4f705547f3379099988f0409427a6b7aebfefc8b07a06004458520a81a83c5603c0ec2717660dfafa2f58c8411d237f44fe1312d2628f8aa1585098bca5a0083015f909432d74896f05204d1b6ae7b0a3cebd7fc0cd8f9c780b844a9059cbb000000000000000000000000e93381fb4c4f14bda253907b18fad305d799241a00000000000000000000000000000000000000000000014fb68f5d03c265400026a01855cad304a3c245f7635998e739ae70924444b10337677c554369b25ee76e95a00ec7988b478d3e0aa4e7a434da8928e413efc073108988091fc817d624d9a2bff8aa6985098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000000a98fb70939162725ae66e626fe4b52cff62c2e5000000000000000000000000000000000000000000000000000000007f60084026a03a371eca9ea58a82b38af8032e73870382e18009cf39d8eb290ed342b0825bfca03f3fa1cce7e07ff0909a1914d6747c33123634dd6a5940f2a3093e020ce9f8d2f8aa1585098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000046705dfff24256421a05d056c29e81bdc09723b800000000000000000000000000000000000000000000000000000000773021a026a063a1a1a30aded50d8619b08e0fbf2b9defc2e521a0731f2953518b65cb413907a02ddaa13c9793edf51f570429f09f714f2a79c1a9a1ccc343899be81911bcdfd6f8aa2e85098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000adb2b42f6bd96f5c65920b9ac88619dce4166f94000000000000000000000000000000000000000000000000000000007733232c25a0eea5d54d8a2988876099e73b7a7b458a370f94f4b7343119da720bbe130624aca048ae11d7219a72fc9f20e1144624db5f1ec7d2b220b5ef59d6cc22c4feda515df8aa1185098bca5a00830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000006748f50f686bfbca6fe8ad62b22228b87f31ff2b000000000000000000000000000000000000000000000000000000014b48598026a09bd76874b6033a5cab13a2215a7b3a78a571c4411a8225d4855de261a1ece36ea054edbd5d59ed79cdad561ea97df9fc17977495ded7ce4ea4955725a10562e7f1f8a911850979e8bd4482a0f994dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000006b71dcaa3fb9a4901491b748074a314dad9e980b000000000000000000000000000000000000000000000000000000001091f18026a0737ea17db2080698c2af3d7a9f9ca14bfcdd0e21fc0e592271fe050d4209d1f4a035ae008889f7d5d85b8ec48bc3929f681b09305f36eb65a6760918e9b9b04677f9018d8214c1850979e8bd4483010e2a94d141a26d786e9432d345774721fc141c13ec10f380b9012452bf1d92000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000010000000000000000000000003fdc8e278f5bc4576a7fd13d1a01dc081ae0a2b00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000da57b127b937185816683503de31cdb1fa203001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000af90e3681ba01773e74e7b8be1b0a6cdea3dd634dcb724613891e99b06e690a05addee7d8cfaa04d966d9097c1e3b0f2d2dd7a2eac5deea28d8fa328643d0b065ea86aea472fedf8aa07850979e8bd448301d4c094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000c3462da2baac16770caf09c6349241945c0714f700000000000000000000000000000000000000000000000000000000810351d426a0176f3b44e6e9c0c12c89132a64089ba2b9d66aaf7aee5df92747c5a9bd847fcda01d9d2f4f002f006817cbe5da7375dfd5949e682d24bf347c61fe3350c9098764f8ab821b88850979e8bd4482ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000531052427a3fdb4e1eebc8765cdf6e98972b31d4000000000000000000000000000000000000000000000000000000001e8480001ba019040c51840ce978756fb8d0b98a47b05d36e8e78228ecaf6d223f93fae8f3caa01ae2bdf972eae310ef838938165d47379563cdd265a7004ff42836917da8a522f8a906850979e8bd4482a0f994dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000006b71dcaa3fb9a4901491b748074a314dad9e980b0000000000000000000000000000000000000000000000000000000024d5ee8025a06fe92d92cc309c4150846ea5ed9413490306579b6c7ce664478443e6fbb74ac3a046448faf47445d8b533c6423aba3ab72322b3e24ecdea9a0051c6c7983927715f9018b51850973f2d6008303b352947a250d5630b4cf539739df2c5dacb4c659f2488d80b9012438ed17390000000000000000000000000000000000000000000000002636c37f92d4f0c0000000000000000000000000000000000000000000000000054e31d02918ea9000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000bcc414838d46bb02a5651a8714cd647a0c1fca27000000000000000000000000000000000000000000000000000000005f7bf4640000000000000000000000000000000000000000000000000000000000000003000000000000000000000000dfcb3fe1c69bf561a920f623fd152e8630674d0d000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000009464a922c053c519ac0237a9650e303d532a4d225a05d067e854341536417fcf031529d2ab65232d4d7964a8c7dac9907cf6fb738e8a055549b345c8b0a10e9aba2d721142995d9e4a74393107d46ca6f80a186b13dd9f9015267850973f2d6008302a2ad947a250d5630b4cf539739df2c5dacb4c659f2488d8805734bbb01b06a28b8e4fb3bdb410000000000000000000000000000000000000000000000000de0b6b3a764000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000001ce0c96cf5aad45a8824c22152ec5055f83ddaa1000000000000000000000000000000000000000000000000000000005f7bf45d0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000000c7be37b00c1d3127c09da66fadf3d6eb600dece26a07033f3a1433bd9455f9808c6ea10268aa3f0399b351c9fa96487fba14ab00e6da028578d7fcfa99a356d83787d38cec328f981bd2f3a50a5141f22298f8704edc0f8ca028509502f95b383019f539427b4bc90fbe56f02ef50f2e2f79d7813aa8941a780b86423b872dd000000000000000000000000946f2bf0f01580f3063b94115d48221a92916cf7000000000000000000000000f629527029499db6a6154a1cb20eb210735da33d000000000000000000000000000000000000000000000000000000000000927225a04852d05e9c6c106918bc5312720148113e6a48ae4403a6fd436b08ead64d41e8a05c1cec000313c180f5d29294c27b0d98b3969b7644800366f23ca91b73d31d78f8ca268509502f90008306b5e094bd277e47d0ecdd5db6c57eda717dd8f5a329edec80b864fc1d230e000000000000000000000000000000000000000000000000000000000000898a0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000025a0542d85a2796e173a90b813bce70913b51600f7d180a64195ef04d7e2c7b8f90aa0123ed49099ae6c7a368fba99cdb6b6fdd1622e66b9ad8e663ca7d9065e51bcf8f8aa808509502f9000830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000adb2b42f6bd96f5c65920b9ac88619dce4166f9400000000000000000000000000000000000000000000000000000000e10f4ba026a05574388857f5e60d3ec72ceb629d81b98c336f0907ef37f4359a46dc8213dc16a02e749cdc7f6b0a7b240235be95128f0a603abf4e7ad95e1c3df043eef640ef6ef8aa168509502f9000830186a094df574c24545e5ffecb9a659c229253d4111d87e180b844a9059cbb0000000000000000000000001062a747393198f70f71ec65a582423dba7e5ab3000000000000000000000000000000000000000000000000000000743058279026a063f627fcf6ac293cac6f84b5f8807aef92682368e99b244878d162998e5c250fa07a20fbb570af828a69f7e62d06cfe913fa3739f42cb43c2cc16e036efbaa0709f86c808509502f900082520894f85a34c8c414dc13ea15590cd383198ef0c5c63d880e397b561dbfa0008025a0228e8f924ee18c31d102873dada7c8564021e1bf7b3f717cfa230f59ddc42132a045bb47ed08ed4df51d1325f8670ffb91ee4c8891036886caf72b474afa6d94e1f8a90685094312a10082ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000030950d97d1854f587b73f8346c2a808ad73b73ac0000000000000000000000000000000000000000000000000000000051a88a801ba0a3bb6f585af27c6fd0053936542418d3583b5f7d2887a1ba353160f4edb008d2a01ccc663badc590398149dc6757d2351d8276cbd71c8771549cfd902ceda762adf8aa80850938580c008303d09094dac17f958d2ee523a2206206994597c13d831ec780b844095ea7b300000000000000000000000041f8d14c9475444f30a80431c68cf24dc9a8369affffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff26a02a8ef1097bb4bc7036343ad59ce010d590ba8c3ad659b160e50287bc817b6d89a06d4e37d3f219cb4f9cf06328259d09a5441e0bf2aec1eacd999ae3142fca4a25f8a90c850938580c0082a0f994dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000006b71dcaa3fb9a4901491b748074a314dad9e980b0000000000000000000000000000000000000000000000000000000005208fa026a04b59e7f4ce7a662e3d133a26fb7beea4c8dcd2df71f162dcfade0a0b28119d4ba06da4670c1575a071084fcebdfb6c73dffbb7d6bde8364d73c1c798aa8b0090f3f86b4b85091494cbb3825208948ac4bf2465dbeec007b7f9809979f22aedeb7806870a71ea17a3c0008026a05a5a5f216393d82283357f4bc9a9b569b467cebdd08da509cc1880301bbefd01a060d0c4d61ad1455b6847965f5f521e62affa0fc86f97eb6395344bb746549344f8890385091494c60083018808941f9840a85d5af5bf1d1762f925bdaddc4201f98480a45c19a95c000000000000000000000000050ed33cc70e3d7952967acb55901b4b44d3cee226a0def74c07de86a0fe72d478fb412e3430b28a2658fe6b10eac18311f6f5a3c4b4a06246e2c183d3dc928cf860bf2c82dc557788a4f8866c179297bfbe1d6479f06df8691985091494c6008311935f9401285d34fe29262a61788775df716acc7ddf897c8084d1058e5926a0ef28dfb04a55901b9a67b00c794502813cfb85b019c9804a004fec560b3c17c3a056254016ed435f806ba9b92be8b3a290a9617b79bac068f223b6abac5220af69f8698085091494c600830878a6949765fea9752505a685c1bce137ae5b2efe8ddf6280844e71d92d25a084331b1cd673c4634c16fa25c6d381b36fcca12b8fdfa34ceeee2f4e7fc68d29a05c0c6ffae21c5c348d0b03c82c6f0cfcb75a10c0b130809aa791977ce26bf17af8ca0185091494c6008301a3869427b4bc90fbe56f02ef50f2e2f79d7813aa8941a780b86442842e0e000000000000000000000000068323fa0171364b9fbb928dcfc22dfe965785f800000000000000000000000003bbdbc26a0aaf9a8991a28b1cc429c8e0ea2897000000000000000000000000000000000000000000000000000000000000927b25a032507f49426c3b56e42db20d0cea50235528728ab5b98097fc562461373161f8a065b9f9df23f54faf91e8a298f1b40da3f00de262492a62f0f9b85d89cbfb2085f8a90185091494c60082fde8940435316b3ab4b999856085c98c3b1ab21d85cd4d80b844a9059cbb0000000000000000000000007b00bcf548a069906f654627c3e04795357d82990000000000000000000000000000000000000000000001f29fe46ae84af800001ca0a7a4d1458365c4beb07b05a6e7dfa0a9bdf73807c508dbd4cd65aa254070147ba035333336910d89b785f3c33feb9c46a11230002e14e32f90533c348499ee6a35f8ac824b2b85091494c60083011170949dffe202df7f82ba57a7f8d571628805eff7fed980b844a9059cbb00000000000000000000000017849384ca9173290f540e566e369df84e924b9800000000000000000000000000000000000000000000006c6b935b8bbd40000025a069acf936392176ffb46c35851218efbe318218f8250261db49a8c9fd411aef94a049509eb3ddd2865547739a9be6c45958552f9d77f9abbbfea2c5047984e378f9f8aa8085091494c6008301259494725440512cb7b78bf56b334e50e31707418231cb80b844a9059cbb0000000000000000000000007b00bcf548a069906f654627c3e04795357d82990000000000000000000000000000000000000000000821a69b498f0c61f800001ca08b42ff2c6746c6663ac1ce97183287bfe79e1e36906ba0e3a349707bdf378442a04f8af6df29b3a0bbc4d5b2b7b1934388c096c9c5eb90172fb297d9361cdf80d4f8890285091494c6008301cdbe94a39a6e35d98e531058b52c28af4ffdeeafb1749d80a4a694fc3a00000000000000000000000000000000000000000000001d776c9997241a800026a0acc704c9c151bb6988ff16cc1f9c3605525c7e2dfebb1251ae4a21733e0a5447a00fc1abe875a992fa1368766c31d1a301467cf565387d9b3772dcd50b96dbad02f86d0185091494c60082520894d43ffc3367ea4fee1a32342c8b92ddc558e5e826890108721d3368fdf0008026a0efd4fbbadc6d2714947ce815eeb99f711459db9573d66a14b4837e1a994442cba02da5a5d74fbf3d4b5a8a17acff8d4e8d0055ed488ef13e56c738e95ef8c85efdf8cc82034585091494c6008305d22794b86021cba87337dea87bc055666146a263c9e0cd80b864fc1d230e000000000000000000000000000000000000000000000000000000000000922f0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000025a0734009bcae96f41f77d600c562a70c1606abcc0751268407b7a4829b92eb8cf7a00fb0a91b8b350cc898d3fbf4ea9249344ee677e334d353f228ebc1ec40bcffe5f8698085091494c600830878a6949765fea9752505a685c1bce137ae5b2efe8ddf6280844e71d92d25a0ee1e0d0c01d19fa2881d2db84d4c54c8719ecee4fe8ada44ab6b51bd1892480ba065d9eed77a7f76565d10552537b29753f8f7182403f0334b0a823bc70fa1da08f8897e85091494c6008302b4c59475ce0e501e2e6776fcaaa514f394a88a772a897080a451c6590a0000000000000000000000000000000000000000000000056bc75e2d6310000025a0b56d9e64fd7baac51c8a1464af551978ce94037ba93cd23499a01a42c7950a5fa06d04bf6de15e8f2ffb9359ce517960422106a3bd9c5a4baa3e865ee3e021e2a6f8cd830226b185091494c60083014c0894bb97e381f1d1e94ffa2a5844f6875e614698100980b86423b872dd0000000000000000000000006dd0ff909dc42abd3b6b52f6e7643d215715e1e7000000000000000000000000c1ab0c708aa638ad492d99383217c033f0eeab950000000000000000000000000000000000000000000000ce661595e03f85f8d925a01831096c49030fd369e15776f7d4bac38ef0f8e5572e3447841e66c5ccd5807da02fd5e62915e24339d30a472a565330b8808dbeb06009e6bed9715893c8b36d2df9018d82017585089d5f37b383035ff3947a250d5630b4cf539739df2c5dacb4c659f2488d80b9012438ed173900000000000000000000000000000000000000000000001650c2523dbb933d49000000000000000000000000000000000000000000000000000000000674219400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000082cd3c7035104d1fbfb55acc62667907791fe423000000000000000000000000000000000000000000000000000000005f7bf4390000000000000000000000000000000000000000000000000000000000000003000000000000000000000000b1f66997a5760428d3a87d68b90bfe0ae64121cc000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec725a0f6147595b4fba2f27e1d595a05c11fab5953335862c6edea856f9e9a77814529a056473fcf8e676c26ba05a1767ae9a514901c8fe6faa1ec473d47879afe7762f3f8ab82015785089d5f37b382ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000089191c0f10eebde841ea5d12b1f1098ef73859de000000000000000000000000000000000000000000000000000000174876e80025a0be3553ba3c9a205e4d76afbc0a6dd6f0d8a84f7efc8d5076eb98fc43a76808baa055e94b68d8a1da425f4febc9e0272cb65e1da38e6d58c06489d3b54e79b52b4ff8ab8218ee85089d5f37b3828fe094de7f89c3d1261f07589fa52fea19bc1b14dfc91980b844a9059cbb0000000000000000000000008700e5430a4b57f24a7f84abe38864e51af7a4b8000000000000000000000000000000000000000000000000000000909af338a526a02cfb25a32d955a1d8c6b51f339c8cb0acf6cf3e27d45fbb0a2ecfc12b85602f0a024d77aa56ace50dfe58c7413aa08a5006728e293bb26397e7be096277903938ff901522a85089d5f37b383028908947a250d5630b4cf539739df2c5dacb4c659f2488d880b263a94504c2e58b8e47ff36ab500000000000000000000000000000000000000000000000ae0b305bb98f9a7ad000000000000000000000000000000000000000000000000000000000000008000000000000000000000000071a85ed3f949a63dc13c127fc2f2751c13eed024000000000000000000000000000000000000000000000000000000005f7befe80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000002a7f709ee001069771ceb6d42e85035f7d18e73626a0e3ed1ca94b4285e9ae128d9ffc907da874634e6784ac29db70570e5a610aacc7a016da61376d706d3c8bbedfad33e229d2f80a0015e40e5345e7415aaf9fab79d9f9016b0a85089d5f37b38302935b947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe5000000000000000000000000000000000000000000000006551c67c8504ff9b600000000000000000000000000000000000000000000000003bc3298899feb5700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000e42bf6c707ff70adbcb5d3c41a18af9c7b59078d000000000000000000000000000000000000000000000000000000005f7bf436000000000000000000000000000000000000000000000000000000000000000200000000000000000000000005d3606d5c81eb9b7b18530995ec9b29da05faba000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a038bae485ad7254f45da6ee9ccfaa7b61e09335ed4d63145c171a6db4b2c58b43a06aaacde283c87486dfaeed8b7c65da2a4968b0da29e5891a74633d1142b9eb7bf8ab81be85089d5f37b383033e3794577af3dce5aaa89510135d7f6e095e33c06b8b1f80b844e2bbb158000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000026a05591dc7d86736fec625daea5eb6e71cf2d41f29b77d6d9e59f030ef0352aba5aa04459cd24f3cbba8eb1f5640ad1f1f36a8402cf438bcccd48b9feb280d80c1e10f901731685089d5f37b38303bbc3947a250d5630b4cf539739df2c5dacb4c659f2488d8802b3a3cb06ced308b901047ff36ab500000000000000000000000000000000000000000000022dbeb06eb8dc19a65e000000000000000000000000000000000000000000000000000000000000008000000000000000000000000032769651c655db12154b17910dd477592c54576d000000000000000000000000000000000000000000000000000000005f7bf4390000000000000000000000000000000000000000000000000000000000000003000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec70000000000000000000000009b06d48e0529ecf05905ff52dd426ebec0ea301125a0a0d302734d13499be466feeda44570a2bdbf364b31e34be62ee16b4dc4b5699ba0687177566a9a2e6bf7d3f2d68cf60e93482447dd3b472b67c17b8f1e00981b78f9016b7885089d5f37b3830295f8947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe500000000000000000000000000000000000000000000000f23c3cdb2637622cb0000000000000000000000000000000000000000000000000f2ec381bef4f22100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000003904ea4b55a09e7c6e4c213cce0db7a9f599a4c3000000000000000000000000000000000000000000000000000000005f7bf43900000000000000000000000000000000000000000000000000000000000000020000000000000000000000002a7f709ee001069771ceb6d42e85035f7d18e736000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a0f34f01bed0f338683c5207c1587d1a5ba229779db4c1de9d5482c4a905a2ad3ea068e89a5b8047924cb71cbc8e123e8b05a8c8719a049afb02a3c6972febcd0085f8a90985089d5f37b382ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000d3d0278216491edb86bc06f8cfb11d51a9dd808b0000000000000000000000000000000000000000000000000000000005f5e10025a06c7279aba31527aae21db6b895612f99ba1244bc9aef8264bc9fa6a15cb3efaaa073c383316de97f4c2b5b9b51ea7d74d17be0b4dd9529c6ab4e59e93f5fb57346f8aa81dd85089d5f37b382bf2c940ff6ffcfda92c53f615a4a75d982f399c989366b80b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff26a0e4ce469a5e40f47e838933ebf2a5af9cc0cb39b1c87dcc90db9ea90207060960a04210af1974ec3fbade5ffdf79e92a4287b68290a52c8301aa7a96f325b29b089f8a97285089d5f37b382ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000002562dac3ecc0df840568597f5fe5db96d7fad37f0000000000000000000000000000000000000000000000000000000011e1a30025a05d6b5a2fb581a0c76a0b6ad86d9f14f9461a924bfa23fd3e6dc29469aecee960a047cb4d29954ef81da7a65933c5e398a3d012b106ad1f49dd78ede22414a59ff0f8ab82011f85089d5f37b382ea6094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000006742a5be977a95e6bd413ad7dd67e750d25acf77000000000000000000000000000000000000000000000000000000000081b32026a09d70ff51194da072becc6095e121853a1e9500ee6285a37fb3fff0b7b37d8d46a03054b0e28af6cac98ca911dcfe7e47eef7e9546d1c4743d72b5761839c133f24f8ab82010585089d5f37b382ea60945913d0f34615923552ee913dbe809f9f348e706e80b844a9059cbb0000000000000000000000009bcae07a4f32391c337081db539f18c8a41f0b17000000000000000000000000000000000000000000000000016345785d8a000026a0d8364bccfe1cd13452d32320bbbe7007d2a49eb4c44e4192adcf13c5319dba12a0556f74086c4f19473dcd0e1651ed2f1ad41a6e4955ff4489e580637b76cf559bf8cc82027b85089d5f37b3830449df94398ec7346dcd622edc5ae82352f02be94c62d11980b864d2d0e0660000000000000000000000002260fac5e5542a773aa44fbcfedf7c193bc2c5990000000000000000000000000000000000000000000000000000000002f51218000000000000000000000000000000000000000000000000000000000000000026a05f066b907360fdd977a7be7b87f8abb63c026bd6a9b79961b2df31f4b05197b6a03ee77c3bb73623eaf754e19a3de2587917d5bc9fe9fd6d18d01263994a8ae482f9016b2d85089d5f37b383040b67947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe5000000000000000000000000000000000000000000000005fe80d4a5da71c8000000000000000000000000000000000000000000000000000f4d4114fc1b990700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000d5ef38a6bfe29e37fde0e05c3b5d053d35cde31c000000000000000000000000000000000000000000000000000000005f7bf45c0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000960b236a07cf122663c4303350609a66a7b288c0000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a077384993315e895c506addb765b6ead9aa40643cfe0e47fa9a7cbe7b68b5c26da023e3d59adf9659ebc24ac7b1c61a8d34870c1473482062dee58001e433dacffbf8ab8202e885089d5f37b38270d094c02aaa39b223fe8d0a0e5c4f27ead9083c756cc280b844095ea7b3000000000000000000000000e2466deb9536a69bf8131ecd0c267ee41dd1cda0000000000000000000000000000000000000000000000000000000000000000026a0122be7ab3dd42a95a6f9f7afd4b53cdd96a54533355678de8e6a079c1f3a507da0184286f4fb49dc18285b01b7b881ef7219783a94e4adba214d5f846cd05bf080f8ab8202e985089d5f37b38270d094c02aaa39b223fe8d0a0e5c4f27ead9083c756cc280b844095ea7b30000000000000000000000006f400810b62df8e13fded51be75ff5393eaa841f000000000000000000000000000000000000000000000000000000000000000026a09cad61d8896acfc385722a5bbb7cc933cf424848b3c1e5ca77d65bf05c6d6fcda074d8318b68ede0035abf2b7be4ff24b8434a14e9265539d0a7a667da017671a1f8ab8202ea85089d5f37b38270d094c02aaa39b223fe8d0a0e5c4f27ead9083c756cc280b844095ea7b30000000000000000000000003e66b66fd1d0b02fda6c811da9e0547970db2f21000000000000000000000000000000000000000000000000000000000000000026a0e6503c732760d18c807a75da58051281d667f8c4198c45ed51ca087614d5785aa01e224cb49e6837da617107107dc0ea4dfad191de3797142648bc1dde0df987faf9016d82018f85089d5f37b3830295f8947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe500000000000000000000000000000000000000000000000f068603508f9456ad0000000000000000000000000000000000000000000000000e494da22dc7c0b800000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000ffcee9c64c3cdab271f5c3c7c184b153f1344b2a000000000000000000000000000000000000000000000000000000005f7bf03d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000002a7f709ee001069771ceb6d42e85035f7d18e736000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a0197b3da148fcee4ccbf62eb74082ef5b79384afdaf3abd2175173b404d2c6c3ba02fa9b883c18f17f6327cc8de85764d2b59b3dc3e8b5359a225585e719067c0fdf8ab82069a850826299e0082a95994b4e16d0168e52d35cacd2c6185b44281ec28c9dc80b844095ea7b3000000000000000000000000fab1eab539d2cf50162245e59ddea174a301d85d000000000000000000000000000000000000000000000000000000000000000026a04919640cc96cbb20a3fb12871737fbd812a1f8c812f852db7abfbec889c04a20a04c6f5c296e8c2628537c559d76ad3b750cf068b689e6021dc17691bb846d6d5bf86b1b8507ea8ed9b38252089409f72bc14a0901c3d8127dc3b64971187900a66987470de4df8200008025a0ef0162ddd727191632140402fcd1d3d3e05cc0386eb5700c71b12d85a1732176a02958d5b3f7e4c9a555a172c6b4921cc568708ea8138c26bd1445036fbbaef2eff86b078507ea8ed9b38252089437434a338fdcff98e8ae3c07e3ce6bfae701a47d87ce0eb154f900008026a0d38057dbe925e7517a9321248cdce4a91aef0caac2a28b275909ba1e9b365312a01b1f4727a4a644c733b7fc975dd3791d4931d1e210ad8423415a8d3a4ec85b41f86c038507b4e9eb00825208941bb95bd09a9463a9fb0874cc9305744eed1965368815957b01e7cdcf5b801ba06a8e9d014dd495c9d5c98b8e7d71e3d170e2fe4b83752d219f8bf794c8c9f2eda023dd8b8cb5491984b639e0855817d71dca426e1ac25053e2156d5f95042b86f4c0"
+)
+
+/*
+0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347
+0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347
+0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e
+  ,BranchPageN,LeafPageN,OverflowN,Entries
+b,4085,326718,0,11635055
+eth_tx,212580,47829931,6132023,969755142
+*/
+
 var (
-	HeaderNumber    = stages.SyncStage("snapshot_header_number")
-	HeaderCanonical = stages.SyncStage("snapshot_canonical")
+	HeadersPostProcessingStage = stages.SyncStage("post processing")
+	Snapshot11kkTD             = []byte{138, 3, 199, 118, 5, 203, 95, 162, 81, 64, 161}
 )
 
-func PostProcessing(db ethdb.Database, mode SnapshotMode, downloadedSnapshots map[SnapshotType]*SnapshotsInfo) error {
-	if mode.Headers {
+func PostProcessing(db ethdb.Database, downloadedSnapshots map[SnapshotType]*SnapshotsInfo) error {
+	if _, ok := downloadedSnapshots[SnapshotType_headers]; ok {
 		err := GenerateHeaderIndexes(context.Background(), db)
 		if err != nil {
 			return err
 		}
 	}
-
-	if mode.Bodies {
-		err := PostProcessBodies(db)
+	if _, ok := downloadedSnapshots[SnapshotType_state]; ok {
+		err := PostProcessState(db, downloadedSnapshots[SnapshotType_state])
 		if err != nil {
 			return err
 		}
 	}
 
-	if mode.State {
-		err := PostProcessState(db, downloadedSnapshots[SnapshotType_state])
+	if _, ok := downloadedSnapshots[SnapshotType_bodies]; ok {
+		err := PostProcessBodies(db)
 		if err != nil {
 			return err
 		}
 	}
+
 	return nil
 }
 
@@ -56,8 +73,32 @@ func PostProcessBodies(db ethdb.Database) error {
 	if v > 0 {
 		return nil
 	}
+	err = db.(*ethdb.ObjectDatabase).ClearBuckets(dbutils.TxLookupPrefix)
+	if err != nil {
+		return err
+	}
+
+	tx, err := db.Begin(context.Background(), ethdb.RW)
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
 
-	k, body, err := db.Last(dbutils.BlockBodyPrefix)
+	k, _, err := tx.Last(dbutils.EthTx)
+	if err != nil {
+		return err
+	}
+	if len(k) != 8 {
+		return errors.New("incorrect transaction id in body snapshot")
+	}
+	secKey := make([]byte, 8)
+	binary.BigEndian.PutUint64(secKey, binary.BigEndian.Uint64(k)+1)
+	err = tx.Put(dbutils.Sequence, []byte(dbutils.EthTx), secKey)
+	if err != nil {
+		return err
+	}
+
+	k, body, err := tx.Last(dbutils.BlockBodyPrefix)
 	if err != nil {
 		return err
 	}
@@ -67,11 +108,11 @@ func PostProcessBodies(db ethdb.Database) error {
 	}
 
 	number := binary.BigEndian.Uint64(k[:8])
-	err = stages.SaveStageProgress(db, stages.Bodies, number)
+	err = stages.SaveStageProgress(tx, stages.Bodies, number)
 	if err != nil {
 		return err
 	}
-	return nil
+	return tx.Commit()
 }
 
 func PostProcessState(db ethdb.GetterPutter, info *SnapshotsInfo) error {
@@ -83,138 +124,207 @@ func PostProcessState(db ethdb.GetterPutter, info *SnapshotsInfo) error {
 	if v > 0 {
 		return nil
 	}
-
+	// clear genesis state
+	err = db.(*ethdb.ObjectDatabase).ClearBuckets(dbutils.PlainStateBucket, dbutils.EthTx)
+	if err != nil {
+		return err
+	}
 	err = stages.SaveStageProgress(db, stages.Execution, info.SnapshotBlock)
 	if err != nil {
 		return err
 	}
+	err = stages.SaveStageProgress(db, stages.Senders, info.SnapshotBlock)
+	if err != nil {
+		return err
+	}
 	return nil
 }
 
-func generateHeaderIndexesStep1(ctx context.Context, db ethdb.Database) error {
-	v, err1 := stages.GetStageProgress(db, HeaderNumber)
-	if err1 != nil {
-		return err1
+//It'll be enabled later
+func PostProcessNoBlocksSync(db ethdb.Database, blockNum uint64, blockHash common.Hash, blockHeaderBytes, blockBodyBytes []byte) error {
+	v, err := stages.GetStageProgress(db, stages.Execution)
+	if err != nil {
+		return err
 	}
 
-	if v == 0 {
-		tx, err := db.Begin(context.Background(), ethdb.RW)
-		if err != nil {
-			return err
-		}
-		defer tx.Rollback()
+	if v > 0 {
+		return nil
+	}
+	log.Info("PostProcessNoBlocksSync", "blocknum", blockNum, "hash", blockHash.String())
 
-		log.Info("Generate headers hash to number index")
-		headHashBytes, innerErr := tx.Get(dbutils.HeadersSnapshotInfoBucket, []byte(dbutils.SnapshotHeadersHeadHash))
-		if innerErr != nil {
-			return innerErr
-		}
+	tx, err := db.(ethdb.HasRwKV).RwKV().BeginRw(context.Background())
+	if err != nil {
+		return err
+	}
+	defer tx.Rollback()
 
-		headNumberBytes, innerErr := tx.Get(dbutils.HeadersSnapshotInfoBucket, []byte(dbutils.SnapshotHeadersHeadNumber))
-		if innerErr != nil {
-			return innerErr
-		}
+	//add header
+	err = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(SnapshotBlock, blockHash), blockHeaderBytes)
+	if err != nil {
+		return err
+	}
+	//add canonical
+	err = tx.Put(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(SnapshotBlock), blockHash.Bytes())
+	if err != nil {
+		return err
+	}
+	body := new(types.Body)
+	err = rlp.DecodeBytes(blockBodyBytes, body)
+	if err != nil {
+		return err
+	}
+	err = rawdb.WriteBody(tx, blockHash, SnapshotBlock, body)
+	if err != nil {
+		return err
+	}
 
-		headNumber := big.NewInt(0).SetBytes(headNumberBytes).Uint64()
-		headHash := common.BytesToHash(headHashBytes)
+	err = tx.Put(dbutils.HeaderNumberBucket, blockHash.Bytes(), dbutils.EncodeBlockNumber(SnapshotBlock))
+	if err != nil {
+		return err
+	}
+	b, err := rlp.EncodeToBytes(big.NewInt(0).SetBytes(Snapshot11kkTD))
+	if err != nil {
+		return err
+	}
+	err = tx.Put(dbutils.HeaderTDBucket, dbutils.HeaderKey(SnapshotBlock, blockHash), b)
+	if err != nil {
+		return err
+	}
 
-		innerErr = etl.Transform("Torrent post-processing 1", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), dbutils.HeadersBucket, dbutils.HeaderNumberBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error {
-			return next(k, common.CopyBytes(k[8:]), common.CopyBytes(k[:8]))
-		}, etl.IdentityLoadFunc, etl.TransformArgs{
-			Quit:          ctx.Done(),
-			ExtractEndKey: dbutils.HeaderKey(headNumber, headHash),
-		})
+	err = tx.Put(dbutils.HeadHeaderKey, []byte(dbutils.HeadHeaderKey), blockHash.Bytes())
+	if err != nil {
+		return err
+	}
+
+	err = tx.Put(dbutils.HeadBlockKey, []byte(dbutils.HeadBlockKey), blockHash.Bytes())
+	if err != nil {
+		return err
+	}
+
+	err = stages.SaveStageProgress(tx, stages.Headers, blockNum)
+	if err != nil {
+		return err
+	}
+	err = stages.SaveStageProgress(tx, stages.Bodies, blockNum)
+	if err != nil {
+		return err
+	}
+	err = stages.SaveStageProgress(tx, stages.BlockHashes, blockNum)
+	if err != nil {
+		return err
+	}
+	err = stages.SaveStageProgress(tx, stages.Senders, blockNum)
+	if err != nil {
+		return err
+	}
+	err = stages.SaveStageProgress(tx, stages.Execution, blockNum)
+	if err != nil {
+		return err
+	}
+	return tx.Commit()
+}
+
+func generateHeaderHashToNumberIndex(ctx context.Context, tx ethdb.DbWithPendingMutations) error {
+	log.Info("Generate headers hash to number index")
+	headHashBytes, innerErr := tx.Get(dbutils.HeadersSnapshotInfoBucket, []byte(dbutils.SnapshotHeadersHeadHash))
+	if innerErr != nil {
+		return innerErr
+	}
+
+	headNumberBytes, innerErr := tx.Get(dbutils.HeadersSnapshotInfoBucket, []byte(dbutils.SnapshotHeadersHeadNumber))
+	if innerErr != nil {
+		return innerErr
+	}
+
+	headNumber := big.NewInt(0).SetBytes(headNumberBytes).Uint64()
+	headHash := common.BytesToHash(headHashBytes)
+
+	return etl.Transform("Torrent post-processing 1", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), dbutils.HeadersBucket, dbutils.HeaderNumberBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error {
+		return next(k, common.CopyBytes(k[8:]), common.CopyBytes(k[:8]))
+	}, etl.IdentityLoadFunc, etl.TransformArgs{
+		Quit:          ctx.Done(),
+		ExtractEndKey: dbutils.HeaderKey(headNumber, headHash),
+	})
+}
+
+func generateHeaderTDAndCanonicalIndexes(ctx context.Context, tx ethdb.DbWithPendingMutations) error {
+	var hash common.Hash
+	var number uint64
+	var err error
+
+	h := rawdb.ReadHeaderByNumber(tx, 0)
+	td := h.Difficulty
+
+	log.Info("Generate TD index & canonical")
+	err = etl.Transform("Torrent post-processing 2", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), dbutils.HeadersBucket, dbutils.HeaderTDBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error {
+		header := &types.Header{}
+		innerErr := rlp.DecodeBytes(v, header)
 		if innerErr != nil {
 			return innerErr
 		}
-		if err = stages.SaveStageProgress(tx, HeaderNumber, 1); err != nil {
-			return err
-		}
-		if err = tx.Commit(); err != nil {
-			return err
+		number = header.Number.Uint64()
+		hash = header.Hash()
+		td = td.Add(td, header.Difficulty)
+		tdBytes, innerErr := rlp.EncodeToBytes(td)
+		if innerErr != nil {
+			return innerErr
 		}
+
+		return next(k, dbutils.HeaderKey(header.Number.Uint64(), header.Hash()), tdBytes)
+	}, etl.IdentityLoadFunc, etl.TransformArgs{
+		Quit: ctx.Done(),
+	})
+	if err != nil {
+		return err
 	}
+	log.Info("Generate TD index & canonical")
+	err = etl.Transform("Torrent post-processing 2", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), dbutils.HeadersBucket, dbutils.HeaderCanonicalBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error {
+		return next(k, common.CopyBytes(k[:8]), common.CopyBytes(k[8:]))
+	}, etl.IdentityLoadFunc, etl.TransformArgs{
+		Quit: ctx.Done(),
+	})
+	if err != nil {
+		return err
+	}
+	rawdb.WriteHeadHeaderHash(tx, hash)
+	rawdb.WriteHeaderNumber(tx, hash, number)
+	err = stages.SaveStageProgress(tx, stages.Headers, number)
+	if err != nil {
+		return err
+	}
+	err = stages.SaveStageProgress(tx, stages.BlockHashes, number)
+	if err != nil {
+		return err
+	}
+	rawdb.WriteHeadBlockHash(tx, hash)
+	log.Info("Last processed block", "num", number, "hash", hash.String())
 	return nil
 }
 
-func generateHeaderIndexesStep2(ctx context.Context, db ethdb.Database) error {
-	var hash common.Hash
-	var number uint64
-
-	v, err1 := stages.GetStageProgress(db, HeaderCanonical)
+func GenerateHeaderIndexes(ctx context.Context, db ethdb.Database) error {
+	v, err1 := stages.GetStageProgress(db, HeadersPostProcessingStage)
 	if err1 != nil {
 		return err1
 	}
+
 	if v == 0 {
 		tx, err := db.Begin(context.Background(), ethdb.RW)
 		if err != nil {
 			return err
 		}
 		defer tx.Rollback()
-
-		h := rawdb.ReadHeaderByNumber(tx, 0)
-		td := h.Difficulty
-
-		log.Info("Generate TD index & canonical")
-		err = etl.Transform("Torrent post-processing 2", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), dbutils.HeadersBucket, dbutils.HeaderTDBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error {
-			header := &types.Header{}
-			innerErr := rlp.DecodeBytes(v, header)
-			if innerErr != nil {
-				return innerErr
-			}
-			number = header.Number.Uint64()
-			hash = header.Hash()
-			td = td.Add(td, header.Difficulty)
-			tdBytes, innerErr := rlp.EncodeToBytes(td)
-			if innerErr != nil {
-				return innerErr
-			}
-
-			return next(k, dbutils.HeaderKey(header.Number.Uint64(), header.Hash()), tdBytes)
-		}, etl.IdentityLoadFunc, etl.TransformArgs{
-			Quit: ctx.Done(),
-		})
-		if err != nil {
-			return err
-		}
-		log.Info("Generate TD index & canonical")
-		err = etl.Transform("Torrent post-processing 2", tx.(ethdb.HasTx).Tx().(ethdb.RwTx), dbutils.HeadersBucket, dbutils.HeaderCanonicalBucket, os.TempDir(), func(k []byte, v []byte, next etl.ExtractNextFunc) error {
-			return next(k, common.CopyBytes(k[:8]), common.CopyBytes(k[8:]))
-		}, etl.IdentityLoadFunc, etl.TransformArgs{
-			Quit: ctx.Done(),
-		})
-		if err != nil {
+		if err = generateHeaderHashToNumberIndex(ctx, tx); err != nil {
 			return err
 		}
-		rawdb.WriteHeadHeaderHash(tx, hash)
-		rawdb.WriteHeaderNumber(tx, hash, number)
-		err = stages.SaveStageProgress(tx, stages.Headers, number)
-		if err != nil {
+		if err = generateHeaderTDAndCanonicalIndexes(ctx, tx); err != nil {
 			return err
 		}
-		err = stages.SaveStageProgress(tx, stages.BlockHashes, number)
+		err = stages.SaveStageProgress(tx, HeadersPostProcessingStage, 1)
 		if err != nil {
-			return err
+			return err1
 		}
-		rawdb.WriteHeadBlockHash(tx, hash)
-		if err = stages.SaveStageProgress(tx, HeaderCanonical, number); err != nil {
-			return err
-		}
-		if err = tx.Commit(); err != nil {
-			return err
-		}
-		log.Info("Last processed block", "num", number, "hash", hash.String())
-	}
-
-	return nil
-}
 
-func GenerateHeaderIndexes(ctx context.Context, db ethdb.Database) error {
-	if err := generateHeaderIndexesStep1(ctx, db); err != nil {
-		return err
-	}
-	if err := generateHeaderIndexesStep2(ctx, db); err != nil {
-		return err
+		return tx.Commit()
 	}
 	return nil
 }
diff --git a/turbo/snapshotsync/postprocessing_test.go b/turbo/snapshotsync/postprocessing_test.go
index 67c8442593a4c81933a1021858677590dc061d0f..623dc7e9d78775ac6b4a8e3156e8b9da2b3efd3b 100644
--- a/turbo/snapshotsync/postprocessing_test.go
+++ b/turbo/snapshotsync/postprocessing_test.go
@@ -58,7 +58,7 @@ func TestHeadersGenerateIndex(t *testing.T) {
 	}
 	snKV := ethdb.NewLMDB().Path(snPath).Flags(func(flags uint) uint { return flags | lmdb.Readonly }).WithBucketsConfig(ethdb.DefaultBucketConfigs).MustOpen()
 
-	snKV = ethdb.NewSnapshot2KV().SnapshotDB([]string{dbutils.HeadersSnapshotInfoBucket, dbutils.HeadersBucket}, snKV).DB(db).MustOpen()
+	snKV = ethdb.NewSnapshotKV().SnapshotDB([]string{dbutils.HeadersSnapshotInfoBucket, dbutils.HeadersBucket}, snKV).DB(db).Open()
 	snDb := ethdb.NewObjectDatabase(snKV)
 	err = GenerateHeaderIndexes(context.Background(), snDb)
 	if err != nil {
diff --git a/turbo/snapshotsync/server.go b/turbo/snapshotsync/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..b5fe682516c91c51f80dbf2253c2e154b4aec511
--- /dev/null
+++ b/turbo/snapshotsync/server.go
@@ -0,0 +1,80 @@
+package snapshotsync
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/anacrolix/torrent"
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/ledgerwatch/turbo-geth/common/dbutils"
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+)
+
+var (
+	ErrNotSupportedNetworkID = errors.New("not supported network id")
+	ErrNotSupportedSnapshot  = errors.New("not supported snapshot for this network id")
+)
+var (
+	_ DownloaderServer = &SNDownloaderServer{}
+)
+
+func NewServer(dir string, seeding bool) (*SNDownloaderServer, error) {
+	db := ethdb.MustOpen(dir + "/db")
+	peerID, err := db.Get(dbutils.BittorrentInfoBucket, []byte(dbutils.BittorrentPeerID))
+	if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) {
+		return nil, fmt.Errorf("get peer id: %w", err)
+	}
+	downloader, err := New(dir, seeding, string(peerID))
+	if err != nil {
+		return nil, err
+	}
+	if len(peerID) == 0 {
+		err = downloader.SavePeerID(db)
+		if err != nil {
+			return nil, fmt.Errorf("save peer id: %w", err)
+		}
+	}
+	return &SNDownloaderServer{
+		t:  downloader,
+		db: db,
+	}, nil
+}
+
+type SNDownloaderServer struct {
+	DownloaderServer
+	t  *Client
+	db ethdb.Database
+}
+
+func (S *SNDownloaderServer) Download(ctx context.Context, request *DownloadSnapshotRequest) (*empty.Empty, error) {
+	err := S.t.AddSnapshotsTorrents(ctx, S.db, request.NetworkId, FromSnapshotTypes(request.Type))
+	if err != nil {
+		return nil, err
+	}
+	return &empty.Empty{}, nil
+}
+func (S *SNDownloaderServer) Load() error {
+	return S.t.Load(S.db)
+}
+
+func (S *SNDownloaderServer) Snapshots(ctx context.Context, request *SnapshotsRequest) (*SnapshotsInfoReply, error) {
+	reply := SnapshotsInfoReply{}
+	resp, err := S.t.GetSnapshots(S.db, request.NetworkId)
+	if err != nil {
+		return nil, err
+	}
+	for i := range resp {
+		reply.Info = append(reply.Info, resp[i])
+	}
+	return &reply, nil
+}
+
+func (S *SNDownloaderServer) Stats(ctx context.Context) map[string]torrent.TorrentStats {
+	stats := map[string]torrent.TorrentStats{}
+	torrents := S.t.Cli.Torrents()
+	for _, t := range torrents {
+		stats[t.Name()] = t.Stats()
+	}
+	return stats
+}
diff --git a/turbo/snapshotsync/snapshot_builder.go b/turbo/snapshotsync/snapshot_builder.go
new file mode 100644
index 0000000000000000000000000000000000000000..3bf931fc9e5438e24c46d507f29fabb6157b1f6d
--- /dev/null
+++ b/turbo/snapshotsync/snapshot_builder.go
@@ -0,0 +1,446 @@
+package snapshotsync
+
+import (
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/ledgerwatch/lmdb-go/lmdb"
+	"github.com/ledgerwatch/turbo-geth/common"
+	"github.com/ledgerwatch/turbo-geth/common/dbutils"
+	"github.com/ledgerwatch/turbo-geth/core/rawdb"
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+	"github.com/ledgerwatch/turbo-geth/log"
+)
+
+//What number should we use?
+const maxReorgDepth = 90000
+
+func CalculateEpoch(block, epochSize uint64) uint64 {
+	return block - (block+maxReorgDepth)%epochSize //Epoch
+
+}
+
+func SnapshotName(baseDir, name string, blockNum uint64) string {
+	return path.Join(baseDir, name) + strconv.FormatUint(blockNum, 10)
+}
+
+func GetSnapshotInfo(db ethdb.Database) (uint64, []byte, error) {
+	v, err := db.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
+	if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) {
+		return 0, nil, err
+	}
+
+	var snapshotBlock uint64
+	if len(v) == 8 {
+		snapshotBlock = binary.BigEndian.Uint64(v)
+	}
+
+	infohash, err := db.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
+	if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) {
+		return 0, nil, err
+	}
+	return snapshotBlock, infohash, nil
+}
+
+func OpenHeadersSnapshot(dbPath string) (ethdb.RwKV, error) {
+	return ethdb.NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
+		return dbutils.BucketsCfg{
+			dbutils.HeadersBucket: dbutils.BucketsConfigs[dbutils.HeadersBucket],
+		}
+	}).Flags(func(u uint) uint {
+		return u | lmdb.Readonly
+	}).Path(dbPath).Open()
+
+}
+func CreateHeadersSnapshot(ctx context.Context, chainDB ethdb.Database, toBlock uint64, snapshotPath string) error {
+	// remove created snapshot if it's not saved in main db(to avoid append error)
+	err := os.RemoveAll(snapshotPath)
+	if err != nil {
+		return err
+	}
+	snKV, err := ethdb.NewLMDB().WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
+		return dbutils.BucketsCfg{
+			dbutils.HeadersBucket: dbutils.BucketsConfigs[dbutils.HeadersBucket],
+		}
+	}).Path(snapshotPath).Open()
+	if err != nil {
+		return err
+	}
+
+	sntx, err := snKV.BeginRw(context.Background())
+	if err != nil {
+		return fmt.Errorf("begin err: %w", err)
+	}
+	defer sntx.Rollback()
+	err = GenerateHeadersSnapshot(ctx, chainDB, sntx, toBlock)
+	if err != nil {
+		return fmt.Errorf("generate err: %w", err)
+	}
+	err = sntx.Commit()
+	if err != nil {
+		return fmt.Errorf("commit err: %w", err)
+	}
+	snKV.Close()
+
+	return nil
+}
+
+func GenerateHeadersSnapshot(ctx context.Context, db ethdb.Database, sntx ethdb.RwTx, toBlock uint64) error {
+	headerCursor, err := sntx.RwCursor(dbutils.HeadersBucket)
+	if err != nil {
+		return err
+	}
+	var hash common.Hash
+	var header []byte
+	t := time.NewTicker(time.Second * 30)
+	defer t.Stop()
+	tt := time.Now()
+	for i := uint64(0); i <= toBlock; i++ {
+		if common.IsCanceled(ctx) {
+			return common.ErrStopped
+		}
+		select {
+		case <-t.C:
+			log.Info("Headers snapshot generation", "t", time.Since(tt), "block", i)
+		default:
+		}
+		hash, err = rawdb.ReadCanonicalHash(db, i)
+		if err != nil {
+			return err
+		}
+		header = rawdb.ReadHeaderRLP(db, hash, i)
+		if len(header) < 2 {
+			return fmt.Errorf("header %d is empty, %v", i, header)
+		}
+
+		err = headerCursor.Append(dbutils.HeaderKey(i, hash), header)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func NewMigrator(snapshotDir string, currentSnapshotBlock uint64, currentSnapshotInfohash []byte) *SnapshotMigrator {
+	return &SnapshotMigrator{
+		snapshotsDir:               snapshotDir,
+		HeadersCurrentSnapshot:     currentSnapshotBlock,
+		HeadersNewSnapshotInfohash: currentSnapshotInfohash,
+	}
+}
+
+type SnapshotMigrator struct {
+	snapshotsDir               string
+	HeadersCurrentSnapshot     uint64
+	HeadersNewSnapshot         uint64
+	HeadersNewSnapshotInfohash []byte
+
+	Stage uint64
+	mtx   sync.RWMutex
+
+	cancel func()
+}
+
+func (sm *SnapshotMigrator) Close() {
+	sm.cancel()
+}
+
+func (sm *SnapshotMigrator) RemoveNonCurrentSnapshots() error {
+	files, err := ioutil.ReadDir(sm.snapshotsDir)
+	if err != nil {
+		return err
+	}
+
+	for i := range files {
+		snapshotName := files[i].Name()
+		if files[i].IsDir() && strings.HasPrefix(snapshotName, "headers") {
+			snapshotBlock, innerErr := strconv.ParseUint(strings.TrimPrefix(snapshotName, "headers"), 10, 64)
+			if innerErr != nil {
+				log.Warn("unknown snapshot", "name", snapshotName, "err", innerErr)
+				continue
+			}
+			if snapshotBlock != sm.HeadersCurrentSnapshot {
+				snapshotPath := path.Join(sm.snapshotsDir, snapshotName)
+				innerErr = os.RemoveAll(snapshotPath)
+				if innerErr != nil {
+					log.Warn("useless snapshot has't removed", "path", snapshotPath, "err", innerErr)
+				}
+				log.Info("removed useless snapshot", "path", snapshotPath)
+			}
+		}
+	}
+	return nil
+}
+
+func (sm *SnapshotMigrator) Finished(block uint64) bool {
+	return atomic.LoadUint64(&sm.HeadersNewSnapshot) == atomic.LoadUint64(&sm.HeadersCurrentSnapshot) && atomic.LoadUint64(&sm.HeadersCurrentSnapshot) > 0 && atomic.LoadUint64(&sm.Stage) == StageStart && atomic.LoadUint64(&sm.HeadersCurrentSnapshot) == block
+}
+
+const (
+	StageStart           = 0
+	StageGenerate        = 1
+	StageReplace         = 2
+	StageStopSeeding     = 3
+	StageStartSeedingNew = 4
+	StagePruneDB         = 5
+	StageFinish          = 6
+)
+
+func (sm *SnapshotMigrator) GetStage() string {
+	st := atomic.LoadUint64(&sm.Stage)
+	switch st {
+	case StageStart:
+		return "start"
+	case StageGenerate:
+		return "generate snapshot"
+	case StageReplace:
+		return "snapshot replace"
+	case StageStopSeeding:
+		return "stop seeding"
+	case StageStartSeedingNew:
+		return "start seeding"
+	case StagePruneDB:
+		return "prune db data"
+	case StageFinish:
+		return "finish"
+	default:
+		return "unknown stage"
+
+	}
+}
+func (sm *SnapshotMigrator) Migrate(db ethdb.Database, tx ethdb.Database, toBlock uint64, bittorrent *Client) error {
+	switch atomic.LoadUint64(&sm.Stage) {
+	case StageStart:
+		log.Info("Snapshot generation block", "skip", atomic.LoadUint64(&sm.HeadersNewSnapshot) >= toBlock)
+		sm.mtx.Lock()
+		if atomic.LoadUint64(&sm.HeadersNewSnapshot) >= toBlock {
+			sm.mtx.Unlock()
+			return nil
+		}
+
+		atomic.StoreUint64(&sm.HeadersNewSnapshot, toBlock)
+		atomic.StoreUint64(&sm.Stage, StageGenerate)
+		ctx, cancel := context.WithCancel(context.Background())
+		sm.cancel = cancel
+		sm.mtx.Unlock()
+		go func() {
+			var err error
+			defer func() {
+				sm.mtx.Lock()
+				//we need to put all errors to err var just to handle error case and return to start
+				if err != nil {
+					log.Warn("Rollback to stage start")
+					atomic.StoreUint64(&sm.Stage, StageStart)
+					atomic.StoreUint64(&sm.HeadersNewSnapshot, atomic.LoadUint64(&sm.HeadersCurrentSnapshot))
+				}
+				sm.cancel = nil
+				sm.mtx.Unlock()
+			}()
+			snapshotPath := SnapshotName(sm.snapshotsDir, "headers", toBlock)
+			tt := time.Now()
+			log.Info("Create snapshot", "type", "headers")
+			err = CreateHeadersSnapshot(ctx, db, toBlock, snapshotPath)
+			if err != nil {
+				log.Error("Create snapshot", "err", err, "block", toBlock)
+				return
+			}
+			log.Info("Snapshot created", "t", time.Since(tt))
+
+			atomic.StoreUint64(&sm.Stage, StageReplace)
+			log.Info("Replace snapshot", "type", "headers")
+			tt = time.Now()
+			err = sm.ReplaceHeadersSnapshot(db, snapshotPath)
+			if err != nil {
+				log.Error("Replace snapshot", "err", err, "block", toBlock, "path", snapshotPath)
+				return
+			}
+			log.Info("Replaced snapshot", "type", "headers", "t", time.Since(tt))
+
+			atomic.StoreUint64(&sm.Stage, StageStopSeeding)
+			//todo headers infohash
+			infohash, err := db.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
+			if err != nil {
+				if !errors.Is(err, ethdb.ErrKeyNotFound) {
+					log.Error("Get infohash", "err", err, "block", toBlock)
+					return
+				}
+			}
+
+			if len(infohash) == 20 {
+				var hash metainfo.Hash
+				copy(hash[:], infohash)
+				log.Info("Stop seeding snapshot", "type", "headers", "infohash", hash.String())
+				tt = time.Now()
+				err = bittorrent.StopSeeding(hash)
+				if err != nil {
+					log.Error("Stop seeding", "err", err, "block", toBlock)
+					return
+				}
+				log.Info("Stopped seeding snapshot", "type", "headers", "infohash", hash.String(), "t", time.Since(tt))
+				atomic.StoreUint64(&sm.Stage, StageStartSeedingNew)
+			} else {
+				log.Warn("Hasn't stopped snapshot", "infohash", common.Bytes2Hex(infohash))
+			}
+
+			log.Info("Start seeding snapshot", "type", "headers")
+			tt = time.Now()
+			seedingInfoHash, err := bittorrent.SeedSnapshot("headers", snapshotPath)
+			if err != nil {
+				log.Error("Seeding", "err", err)
+				return
+			}
+			sm.HeadersNewSnapshotInfohash = seedingInfoHash[:]
+			log.Info("Started seeding snapshot", "type", "headers", "t", time.Since(tt), "infohash", seedingInfoHash.String())
+			atomic.StoreUint64(&sm.Stage, StagePruneDB)
+		}()
+
+	case StagePruneDB:
+		var wtx ethdb.RwTx
+		var err error
+		tt := time.Now()
+		log.Info("Prune db", "current", sm.HeadersCurrentSnapshot, "new", sm.HeadersNewSnapshot)
+		if hasTx, ok := tx.(ethdb.HasTx); ok && hasTx.Tx() != nil {
+			wtx = tx.(ethdb.HasTx).Tx().(ethdb.DBTX).DBTX()
+		} else if wtx1, ok := tx.(ethdb.RwTx); ok {
+			wtx = wtx1
+		} else {
+			log.Error("Incorrect db type", "type", tx)
+			return nil
+		}
+
+		err = sm.RemoveHeadersData(db, wtx)
+		if err != nil {
+			log.Error("Remove headers data", "err", err)
+			return err
+		}
+		c, err := wtx.RwCursor(dbutils.BittorrentInfoBucket)
+		if err != nil {
+			return err
+		}
+		if len(sm.HeadersNewSnapshotInfohash) == 20 {
+			err = c.Put(dbutils.CurrentHeadersSnapshotHash, sm.HeadersNewSnapshotInfohash)
+			if err != nil {
+				return err
+			}
+		}
+		err = c.Put(dbutils.CurrentHeadersSnapshotBlock, dbutils.EncodeBlockNumber(sm.HeadersNewSnapshot))
+		if err != nil {
+			return err
+		}
+
+		log.Info("Prune db success", "t", time.Since(tt))
+		atomic.StoreUint64(&sm.Stage, StageFinish)
+
+	case StageFinish:
+		tt := time.Now()
+		//todo check commited
+		v, err := tx.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
+		if errors.Is(err, ethdb.ErrKeyNotFound) {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+
+		sm.mtx.RLock()
+		sm.mtx.RUnlock()
+		if sm.HeadersCurrentSnapshot < sm.HeadersNewSnapshot && sm.HeadersCurrentSnapshot != 0 {
+			oldSnapshotPath := SnapshotName(sm.snapshotsDir, "headers", sm.HeadersCurrentSnapshot)
+			log.Info("Removing old snapshot", "path", oldSnapshotPath)
+			tt = time.Now()
+			err = os.RemoveAll(oldSnapshotPath)
+			if err != nil {
+				log.Error("Remove snapshot", "err", err)
+				return err
+			}
+			log.Info("Removed old snapshot", "path", oldSnapshotPath, "t", time.Since(tt))
+		}
+
+		if len(v) != 8 {
+			log.Error("Incorrect length", "ln", len(v))
+			return nil
+		}
+
+		if binary.BigEndian.Uint64(v) == sm.HeadersNewSnapshot {
+			atomic.StoreUint64(&sm.Stage, StageStart)
+			atomic.StoreUint64(&sm.HeadersCurrentSnapshot, sm.HeadersNewSnapshot)
+		}
+		log.Info("Finish success", "t", time.Since(tt))
+
+	default:
+		return nil
+	}
+	return nil
+}
+
+func (sm *SnapshotMigrator) ReplaceHeadersSnapshot(chainDB ethdb.Database, snapshotPath string) error {
+	if snapshotPath == "" {
+		log.Error("snapshot path is empty")
+		return errors.New("snapshot path is empty")
+	}
+	if _, ok := chainDB.(ethdb.HasRwKV); !ok {
+		return errors.New("db don't implement hasKV interface")
+	}
+
+	if _, ok := chainDB.(ethdb.HasRwKV).RwKV().(ethdb.SnapshotUpdater); !ok {
+		return errors.New("db don't implement snapshotUpdater interface")
+	}
+	snapshotKV, err := OpenHeadersSnapshot(snapshotPath)
+	if err != nil {
+		return err
+	}
+
+	done := make(chan struct{})
+	chainDB.(ethdb.HasRwKV).RwKV().(ethdb.SnapshotUpdater).UpdateSnapshots([]string{dbutils.HeadersBucket}, snapshotKV, done)
+	select {
+	case <-time.After(time.Minute * 10):
+		log.Error("timeout on closing headers snapshot database")
+		panic("timeout")
+	case <-done:
+	}
+
+	return nil
+}
+
+func (sb *SnapshotMigrator) RemoveHeadersData(db ethdb.Database, tx ethdb.RwTx) (err error) {
+	return RemoveHeadersData(db, tx, sb.HeadersCurrentSnapshot, sb.HeadersNewSnapshot)
+}
+
+func RemoveHeadersData(db ethdb.Database, tx ethdb.RwTx, currentSnapshot, newSnapshot uint64) (err error) {
+	log.Info("Remove data", "from", currentSnapshot, "to", newSnapshot)
+	if _, ok := db.(ethdb.HasRwKV); !ok {
+		return errors.New("db don't implement hasKV interface")
+	}
+
+	if _, ok := db.(ethdb.HasRwKV).RwKV().(ethdb.SnapshotUpdater); !ok {
+		return errors.New("db don't implement snapshotUpdater interface")
+	}
+	headerSnapshot := db.(ethdb.HasRwKV).RwKV().(ethdb.SnapshotUpdater).SnapshotKV(dbutils.HeadersBucket)
+	if headerSnapshot == nil {
+		return nil
+	}
+
+	snapshotDB := ethdb.NewObjectDatabase(headerSnapshot.(ethdb.RwKV))
+	c, err := tx.RwCursor(dbutils.HeadersBucket)
+	if err != nil {
+		return fmt.Errorf("get headers cursor %w", err)
+	}
+	return snapshotDB.Walk(dbutils.HeadersBucket, dbutils.EncodeBlockNumber(currentSnapshot), 0, func(k, v []byte) (bool, error) {
+		innerErr := c.Delete(k, nil)
+		if innerErr != nil {
+			return false, fmt.Errorf("remove %v err:%w", common.Bytes2Hex(k), innerErr)
+		}
+		return true, nil
+	})
+}
diff --git a/turbo/snapshotsync/snapshot_builder_test.go b/turbo/snapshotsync/snapshot_builder_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..85238ba76e7f0208cb727594bd9f733c4bbd19f9
--- /dev/null
+++ b/turbo/snapshotsync/snapshot_builder_test.go
@@ -0,0 +1,299 @@
+package snapshotsync
+
+import (
+	"bytes"
+	"context"
+	"encoding/binary"
+	"errors"
+	"io/ioutil"
+	"math"
+	"os"
+	"path"
+	"testing"
+	"time"
+
+	"github.com/ledgerwatch/turbo-geth/common"
+	"github.com/ledgerwatch/turbo-geth/common/dbutils"
+	"github.com/ledgerwatch/turbo-geth/ethdb"
+)
+
+func TestSnapshotMigratorStage(t *testing.T) {
+	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+	dir, err := ioutil.TempDir(os.TempDir(), "tst")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	defer func() {
+		if err != nil {
+			t.Log(err, dir)
+		}
+		err = os.RemoveAll(dir)
+		if err != nil {
+			t.Log(err)
+		}
+
+	}()
+	snapshotsDir := path.Join(dir, "snapshots")
+	err = os.Mkdir(snapshotsDir, os.ModePerm)
+	if err != nil {
+		t.Fatal(err)
+	}
+	btCli, err := New(snapshotsDir, true, "12345123451234512345")
+	if err != nil {
+		t.Fatal(err)
+	}
+	btCli.trackers = [][]string{}
+
+	db := ethdb.MustOpen(path.Join(dir, "chaindata"))
+	db.SetRwKV(ethdb.NewSnapshotKV().DB(db.RwKV()).Open())
+
+	sb := &SnapshotMigrator{
+		snapshotsDir: snapshotsDir,
+	}
+	generateChan := make(chan int)
+	go func() {
+		currentSnapshotBlock := uint64(10)
+		tx, err := db.Begin(context.Background(), ethdb.RW)
+		if err != nil {
+			t.Error(err)
+			panic(err)
+		}
+		defer tx.Rollback()
+		err = GenerateHeaderData(tx, 0, 11)
+		if err != nil {
+			t.Error(err)
+			panic(err)
+		}
+
+		err = tx.Commit()
+		if err != nil {
+			t.Error(err)
+			panic(err)
+		}
+
+		for {
+			tx, err := db.Begin(context.Background(), ethdb.RW)
+			if err != nil {
+				tx.Rollback()
+				t.Error(err)
+			}
+
+			select {
+			case newHeight := <-generateChan:
+				err = GenerateHeaderData(tx, int(currentSnapshotBlock), newHeight)
+				if err != nil {
+					t.Error(err)
+					tx.Rollback()
+					panic(err)
+				}
+				currentSnapshotBlock = CalculateEpoch(uint64(newHeight), 10)
+			default:
+
+			}
+
+			err = sb.Migrate(db, tx, currentSnapshotBlock, btCli)
+			if err != nil {
+				tx.Rollback()
+				t.Error(err)
+				panic(err)
+			}
+			err = tx.Commit()
+			if err != nil {
+				t.Error(err)
+				panic(err)
+			}
+			tx.Rollback()
+			time.Sleep(time.Second)
+		}
+	}()
+
+	for !(sb.Finished(10)) {
+		time.Sleep(time.Second)
+	}
+
+	sa := db.RwKV().(ethdb.SnapshotUpdater)
+	wodb := ethdb.NewObjectDatabase(sa.WriteDB())
+
+	var headerNumber uint64
+	headerNumber = 11
+	err = wodb.Walk(dbutils.HeadersBucket, []byte{}, 0, func(k, v []byte) (bool, error) {
+		if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
+			t.Error(k)
+		}
+		headerNumber++
+		return true, nil
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if headerNumber != 12 {
+		t.Fatal(headerNumber)
+	}
+
+	snodb := ethdb.NewObjectDatabase(sa.SnapshotKV(dbutils.HeadersBucket).(ethdb.RwKV))
+	headerNumber = 0
+	err = snodb.Walk(dbutils.HeadersBucket, []byte{}, 0, func(k, v []byte) (bool, error) {
+		if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
+			t.Fatal(k)
+		}
+		headerNumber++
+
+		return true, nil
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if headerNumber != 11 {
+		t.Fatal(headerNumber)
+	}
+
+	headerNumber = 0
+	err = db.Walk(dbutils.HeadersBucket, []byte{}, 0, func(k, v []byte) (bool, error) {
+		if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
+			t.Fatal(k)
+		}
+		headerNumber++
+
+		return true, nil
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if headerNumber != 12 {
+		t.Fatal(headerNumber)
+	}
+
+	trnts := btCli.Torrents()
+	if len(trnts) != 1 {
+		t.Fatal("incorrect len", trnts)
+	}
+	v, err := db.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(v, trnts[0].Bytes()) {
+		t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes()))
+	}
+
+	v, err = db.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if binary.BigEndian.Uint64(v) != 10 {
+		t.Fatal("incorrect snapshot")
+	}
+
+	roTX, err := db.Begin(context.Background(), ethdb.RO)
+	if err != nil {
+		t.Fatal(err)
+	}
+	//just start snapshot transaction
+	roTX.Get(dbutils.HeadersBucket, []byte{})
+	defer roTX.Rollback()
+
+	generateChan <- 20
+
+	rollbacked := false
+	c := time.After(time.Second * 3)
+	for !(sb.Finished(20)) {
+		select {
+		case <-c:
+			roTX.Rollback()
+			rollbacked = true
+		default:
+		}
+		time.Sleep(time.Second)
+	}
+
+	if !rollbacked {
+		t.Fatal("it's not possible to close db without rollback. something went wrong")
+	}
+
+	err = wodb.Walk(dbutils.HeadersBucket, []byte{}, 0, func(k, v []byte) (bool, error) {
+		t.Fatal("main db must be empty here")
+		return true, nil
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	headerNumber = 0
+	err = ethdb.NewObjectDatabase(sa.SnapshotKV(dbutils.HeadersBucket).(ethdb.RwKV)).Walk(dbutils.HeadersBucket, []byte{}, 0, func(k, v []byte) (bool, error) {
+		if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
+			t.Fatal(k)
+		}
+		headerNumber++
+
+		return true, nil
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if headerNumber != 21 {
+		t.Fatal(headerNumber)
+	}
+	headerNumber = 0
+	err = db.Walk(dbutils.HeadersBucket, []byte{}, 0, func(k, v []byte) (bool, error) {
+		if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
+			t.Fatal(k)
+		}
+		headerNumber++
+
+		return true, nil
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if headerNumber != 21 {
+		t.Fatal(headerNumber)
+	}
+
+	trnts = btCli.Torrents()
+	if len(trnts) != 1 {
+		t.Fatal("incorrect len", trnts)
+	}
+	v, err = db.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(v, trnts[0].Bytes()) {
+		t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes()))
+	}
+
+	v, err = db.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if binary.BigEndian.Uint64(v) != 20 {
+		t.Fatal("incorrect snapshot")
+	}
+
+	if _, err = os.Stat(SnapshotName(snapshotsDir, "headers", 10)); os.IsExist(err) {
+		t.Fatal("snapshot exsists")
+	} else {
+		//just not to confuse defer
+		err = nil
+	}
+
+}
+
+func GenerateHeaderData(tx ethdb.DbWithPendingMutations, from, to int) error {
+	var err error
+	if to > math.MaxInt8 {
+		return errors.New("greater than uint8")
+	}
+	for i := from; i <= to; i++ {
+		err = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(uint64(i), common.Hash{uint8(i)}), []byte{uint8(i), uint8(i), uint8(i)})
+		if err != nil {
+			return err
+		}
+		err = tx.Put(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(uint64(i)), common.Hash{uint8(i)}.Bytes())
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/turbo/snapshotsync/wrapdb.go b/turbo/snapshotsync/wrapdb.go
index 65b4a96240ed5897dc8c4ff3be8b529de6286d37..dab86a84b2843748740cc0f02100afcb3dddce0d 100644
--- a/turbo/snapshotsync/wrapdb.go
+++ b/turbo/snapshotsync/wrapdb.go
@@ -1,20 +1,24 @@
 package snapshotsync
 
 import (
+	"context"
+	"encoding/binary"
+	"errors"
+	"time"
+
 	"github.com/ledgerwatch/turbo-geth/common/dbutils"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/log"
 )
 
 var (
-	bucketConfigs = map[SnapshotType]dbutils.BucketsCfg{
+	BucketConfigs = map[SnapshotType]dbutils.BucketsCfg{
 		SnapshotType_bodies: {
-			dbutils.BlockBodyPrefix:          dbutils.BucketConfigItem{},
-			dbutils.BodiesSnapshotInfoBucket: dbutils.BucketConfigItem{},
+			dbutils.BlockBodyPrefix: dbutils.BucketConfigItem{},
+			dbutils.EthTx:           dbutils.BucketConfigItem{},
 		},
 		SnapshotType_headers: {
-			dbutils.HeadersBucket:             dbutils.BucketConfigItem{},
-			dbutils.HeadersSnapshotInfoBucket: dbutils.BucketConfigItem{},
+			dbutils.HeadersBucket: dbutils.BucketConfigItem{},
 		},
 		SnapshotType_state: {
 			dbutils.PlainStateBucket: dbutils.BucketConfigItem{
@@ -25,57 +29,20 @@ var (
 			},
 			dbutils.PlainContractCodeBucket: dbutils.BucketConfigItem{},
 			dbutils.CodeBucket:              dbutils.BucketConfigItem{},
-			dbutils.StateSnapshotInfoBucket: dbutils.BucketConfigItem{},
 		},
 	}
 )
 
 func WrapBySnapshotsFromDir(kv ethdb.RwKV, snapshotDir string, mode SnapshotMode) (ethdb.RwKV, error) {
-	log.Info("Wrap db to snapshots", "dir", snapshotDir, "mode", mode.ToString())
-	snkv := ethdb.NewSnapshot2KV().DB(kv)
-
-	if mode.Bodies {
-		snapshotKV, err := ethdb.NewLMDB().Readonly().Path(snapshotDir + "/bodies").WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
-			return bucketConfigs[SnapshotType_bodies]
-		}).Open()
-		if err != nil {
-			log.Error("Can't open body snapshot", "err", err)
-			return nil, err
-		} else { //nolint
-			snkv.SnapshotDB([]string{dbutils.BlockBodyPrefix, dbutils.BodiesSnapshotInfoBucket}, snapshotKV)
-		}
-	}
-
-	if mode.Headers {
-		snapshotKV, err := ethdb.NewLMDB().Readonly().Path(snapshotDir + "/headers").WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
-			return bucketConfigs[SnapshotType_headers]
-		}).Open()
-		if err != nil {
-			log.Error("Can't open headers snapshot", "err", err)
-			return nil, err
-		} else { //nolint
-			snkv.SnapshotDB([]string{dbutils.HeadersBucket, dbutils.HeadersSnapshotInfoBucket}, snapshotKV)
-		}
-	}
-	if mode.State {
-		snapshotKV, err := ethdb.NewLMDB().Readonly().Path(snapshotDir + "/headers").WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
-			return bucketConfigs[SnapshotType_headers]
-		}).Open()
-		if err != nil {
-			log.Error("Can't open headers snapshot", "err", err)
-			return nil, err
-		} else { //nolint
-			snkv.SnapshotDB([]string{dbutils.StateSnapshotInfoBucket, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.CodeBucket}, snapshotKV)
-		}
-	}
-	return snkv.MustOpen(), nil
+	//todo remove it
+	return nil, errors.New("deprecated")
 }
 
 func WrapBySnapshotsFromDownloader(kv ethdb.RwKV, snapshots map[SnapshotType]*SnapshotsInfo) (ethdb.RwKV, error) {
-	snKV := ethdb.NewSnapshot2KV().DB(kv)
+	snKV := ethdb.NewSnapshotKV().DB(kv)
 	for k, v := range snapshots {
 		log.Info("Wrap db by", "snapshot", k.String(), "dir", v.Dbpath)
-		cfg := bucketConfigs[k]
+		cfg := BucketConfigs[k]
 		snapshotKV, err := ethdb.NewLMDB().Readonly().Path(v.Dbpath).WithBucketsConfig(func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg {
 			return cfg
 		}).Open()
@@ -85,13 +52,151 @@ func WrapBySnapshotsFromDownloader(kv ethdb.RwKV, snapshots map[SnapshotType]*Sn
 			return nil, err
 		} else { //nolint
 			buckets := make([]string, 0, 1)
-			for bucket := range bucketConfigs[k] {
+			for bucket := range BucketConfigs[k] {
 				buckets = append(buckets, bucket)
 			}
 
-			snKV.SnapshotDB(buckets, snapshotKV)
+			snKV = snKV.SnapshotDB(buckets, snapshotKV)
+		}
+	}
+
+	return snKV.Open(), nil
+}
+
+func WrapSnapshots(chainDb ethdb.Database, snapshotsDir string) error {
+	snapshotBlock, err := chainDb.Get(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
+	if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) {
+		return err
+	}
+	snKVOpts := ethdb.NewSnapshotKV().DB(chainDb.(ethdb.HasRwKV).RwKV())
+	if len(snapshotBlock) == 8 {
+		snKV, innerErr := OpenHeadersSnapshot(SnapshotName(snapshotsDir, "headers", binary.BigEndian.Uint64(snapshotBlock)))
+		if innerErr != nil {
+			return innerErr
 		}
+		snKVOpts = snKVOpts.SnapshotDB([]string{dbutils.HeadersBucket}, snKV)
 	}
+	//manually wrap current db for snapshot generation
+	chainDb.(ethdb.HasRwKV).SetRwKV(snKVOpts.Open())
+
+	return nil
+}
+
+func DownloadSnapshots(torrentClient *Client, ExternalSnapshotDownloaderAddr string, networkID uint64, snapshotMode SnapshotMode, chainDb ethdb.Database) error {
+	var downloadedSnapshots map[SnapshotType]*SnapshotsInfo
+	if ExternalSnapshotDownloaderAddr != "" {
+		cli, cl, innerErr := NewClient(ExternalSnapshotDownloaderAddr)
+		if innerErr != nil {
+			return innerErr
+		}
+		defer cl() //nolint
+
+		_, innerErr = cli.Download(context.Background(), &DownloadSnapshotRequest{
+			NetworkId: networkID,
+			Type:      snapshotMode.ToSnapshotTypes(),
+		})
+		if innerErr != nil {
+			return innerErr
+		}
+
+		waitDownload := func() (map[SnapshotType]*SnapshotsInfo, error) {
+			snapshotReadinessCheck := func(mp map[SnapshotType]*SnapshotsInfo, tp SnapshotType) bool {
+				if mp[tp].Readiness != int32(100) {
+					log.Info("Downloading", "snapshot", tp, "%", mp[tp].Readiness)
+					return false
+				}
+				return true
+			}
+			for {
+				downloadedSnapshots = make(map[SnapshotType]*SnapshotsInfo)
+				snapshots, err1 := cli.Snapshots(context.Background(), &SnapshotsRequest{NetworkId: networkID})
+				if err1 != nil {
+					return nil, err1
+				}
+				for i := range snapshots.Info {
+					if downloadedSnapshots[snapshots.Info[i].Type].SnapshotBlock < snapshots.Info[i].SnapshotBlock && snapshots.Info[i] != nil {
+						downloadedSnapshots[snapshots.Info[i].Type] = snapshots.Info[i]
+					}
+				}
+
+				downloaded := true
+				if snapshotMode.Headers {
+					if !snapshotReadinessCheck(downloadedSnapshots, SnapshotType_headers) {
+						downloaded = false
+					}
+				}
+				if snapshotMode.Bodies {
+					if !snapshotReadinessCheck(downloadedSnapshots, SnapshotType_bodies) {
+						downloaded = false
+					}
+				}
+				if snapshotMode.State {
+					if !snapshotReadinessCheck(downloadedSnapshots, SnapshotType_state) {
+						downloaded = false
+					}
+				}
+				if snapshotMode.Receipts {
+					if !snapshotReadinessCheck(downloadedSnapshots, SnapshotType_receipts) {
+						downloaded = false
+					}
+				}
+				if downloaded {
+					return downloadedSnapshots, nil
+				}
+				time.Sleep(time.Second * 10)
+			}
+		}
+		downloadedSnapshots, innerErr := waitDownload()
+		if innerErr != nil {
+			return innerErr
+		}
+		snapshotKV := chainDb.(ethdb.HasRwKV).RwKV()
 
-	return snKV.MustOpen(), nil
+		snapshotKV, innerErr = WrapBySnapshotsFromDownloader(snapshotKV, downloadedSnapshots)
+		if innerErr != nil {
+			return innerErr
+		}
+		chainDb.(ethdb.HasRwKV).SetRwKV(snapshotKV)
+
+		innerErr = PostProcessing(chainDb, downloadedSnapshots)
+		if innerErr != nil {
+			return innerErr
+		}
+	} else {
+		err := torrentClient.Load(chainDb)
+		if err != nil {
+			return err
+		}
+		err = torrentClient.AddSnapshotsTorrents(context.Background(), chainDb, networkID, snapshotMode)
+		if err == nil {
+			torrentClient.Download()
+			var innerErr error
+			snapshotKV := chainDb.(ethdb.HasRwKV).RwKV()
+			downloadedSnapshots, innerErr := torrentClient.GetSnapshots(chainDb, networkID)
+			if innerErr != nil {
+				return innerErr
+			}
+
+			snapshotKV, innerErr = WrapBySnapshotsFromDownloader(snapshotKV, downloadedSnapshots)
+			if innerErr != nil {
+				return innerErr
+			}
+			chainDb.(ethdb.HasRwKV).SetRwKV(snapshotKV)
+			tx, err := chainDb.Begin(context.Background(), ethdb.RW)
+			if err != nil {
+				return err
+			}
+			defer tx.Rollback()
+			innerErr = PostProcessing(chainDb, downloadedSnapshots)
+			if err = tx.Commit(); err != nil {
+				return err
+			}
+			if innerErr != nil {
+				return innerErr
+			}
+		} else {
+			log.Error("There was an error in snapshot init. Swithing to regular sync", "err", err)
+		}
+	}
+	return nil
 }