diff --git a/cmd/restapi/apis/storage_tombstone_api.go b/cmd/restapi/apis/storage_tombstone_api.go
deleted file mode 100644
index 7a718708420d078ea800052b9426d8b2ac4226dc..0000000000000000000000000000000000000000
--- a/cmd/restapi/apis/storage_tombstone_api.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package apis
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"net/http"
-
-	"github.com/gin-gonic/gin"
-	"github.com/ledgerwatch/turbo-geth/common"
-	"github.com/ledgerwatch/turbo-geth/common/dbutils"
-	"github.com/ledgerwatch/turbo-geth/ethdb"
-)
-
-func RegisterStorageTombstonesAPI(router *gin.RouterGroup, e *Env) error {
-	router.GET("/", e.FindStorageTombstone)
-	router.GET("/integrity/", e.GetTombstoneIntegrity)
-	return nil
-}
-func (e *Env) GetTombstoneIntegrity(c *gin.Context) {
-	results, err := storageTombstonesIntegrityDBCheck(e.DB)
-	if err != nil {
-		c.Error(err) //nolint:errcheck
-		return
-	}
-	c.JSON(http.StatusOK, results)
-}
-func (e *Env) FindStorageTombstone(c *gin.Context) {
-	results, err := findStorageTombstoneByPrefix(c.Query("prefix"), e.DB)
-	if err != nil {
-		c.Error(err) //nolint:errcheck
-		return
-	}
-	c.JSON(http.StatusOK, results)
-}
-
-type StorageTombsResponse struct {
-	Prefix      string `json:"prefix"`
-	HideStorage bool   `json:"hideStorage"`
-}
-
-func findStorageTombstoneByPrefix(prefixS string, remoteDB ethdb.KV) ([]*StorageTombsResponse, error) {
-	var results []*StorageTombsResponse
-	prefix := common.FromHex(prefixS)
-	if err := remoteDB.View(context.TODO(), func(tx ethdb.Tx) error {
-		interBucket := tx.Bucket(dbutils.IntermediateTrieHashBucket)
-		c := interBucket.Cursor().Prefix(prefix).NoValues()
-		storage := tx.Bucket(dbutils.StorageBucket).Cursor().Prefetch(1).NoValues()
-
-		for k, vSize, err := c.First(); k != nil || err != nil; k, vSize, err = c.Next() {
-			if err != nil {
-				return err
-			}
-
-			if vSize > 0 {
-				continue
-			}
-
-			// each tomb must cover storage
-			hideStorage := false
-			addrHash := common.CopyBytes(k[:common.HashLength])
-			storageK, _, err := storage.Seek(addrHash)
-			if err != nil {
-				return err
-			}
-			if !bytes.HasPrefix(storageK, addrHash) {
-				hideStorage = false
-			} else {
-				incarnation := dbutils.DecodeIncarnation(storageK[common.HashLength : common.HashLength+8])
-				for ; incarnation > 0; incarnation-- {
-					kWithInc := dbutils.GenerateStoragePrefix(common.BytesToHash(addrHash), incarnation)
-					kWithInc = append(kWithInc, k[common.HashLength:]...)
-					storageK, _, err = storage.Seek(kWithInc)
-					if err != nil {
-						return err
-					}
-					if bytes.HasPrefix(storageK, kWithInc) {
-						hideStorage = true
-					}
-				}
-				if hideStorage {
-					break
-				}
-			}
-
-			results = append(results, &StorageTombsResponse{
-				Prefix:      fmt.Sprintf("%x\n", k),
-				HideStorage: hideStorage,
-			})
-
-			if len(results) > 50 {
-				results = append(results, &StorageTombsResponse{
-					Prefix:      "too much results",
-					HideStorage: true,
-				})
-				return nil
-			}
-		}
-
-		return nil
-	}); err != nil {
-		return nil, err
-	}
-
-	return results, nil
-}
-
-type IntegrityCheck struct {
-	Name  string `json:"name"`
-	Value string `json:"value"`
-}
-
-func storageTombstonesIntegrityDBCheck(remoteDB ethdb.KV) ([]*IntegrityCheck, error) {
-	var results []*IntegrityCheck
-	return results, remoteDB.View(context.TODO(), func(tx ethdb.Tx) error {
-		res, err := storageTombstonesIntegrityDBCheckTx(tx)
-		if err != nil {
-			return err
-		}
-		results = res
-		return nil
-	})
-}
-
-func storageTombstonesIntegrityDBCheckTx(tx ethdb.Tx) ([]*IntegrityCheck, error) {
-	var res []*IntegrityCheck
-	check1 := &IntegrityCheck{
-		Name:  "tombstone must hide at least 1 storage",
-		Value: "ok",
-	}
-	res = append(res, check1)
-
-	inter := tx.Bucket(dbutils.IntermediateTrieHashBucket).Cursor().Prefetch(1000).NoValues()
-	storage := tx.Bucket(dbutils.StorageBucket).Cursor().Prefetch(10).NoValues()
-
-	for k, vSize, err := inter.First(); k != nil || err != nil; k, vSize, err = inter.Next() {
-		if err != nil {
-			return nil, err
-		}
-		if vSize > 0 {
-			continue
-		}
-
-		// each tombstone must hide at least 1 storage
-		addrHash := common.CopyBytes(k[:common.HashLength])
-		storageK, _, err := storage.Seek(addrHash)
-		if err != nil {
-			return nil, err
-		}
-		if !bytes.HasPrefix(storageK, addrHash) {
-			return nil, fmt.Errorf("tombstone %x has no storage to hide\n", k)
-		} else {
-			incarnation := dbutils.DecodeIncarnation(storageK[common.HashLength : common.HashLength+8])
-			hideStorage := false
-			for ; incarnation > 0; incarnation-- {
-				kWithInc := dbutils.GenerateStoragePrefix(common.BytesToHash(addrHash), incarnation)
-				kWithInc = append(kWithInc, k[common.HashLength:]...)
-				storageK, _, err = storage.Seek(kWithInc)
-				if err != nil {
-					return nil, err
-				}
-				if bytes.HasPrefix(storageK, kWithInc) {
-					hideStorage = true
-				}
-			}
-
-			if !hideStorage {
-				check1.Value = fmt.Sprintf("tombstone %x has no storage to hide\n", k)
-				break
-			}
-		}
-	}
-	return res, nil
-}
diff --git a/cmd/restapi/rest/serve_rest.go b/cmd/restapi/rest/serve_rest.go
index 7ce5e263171a879f9922ae2a73f505a09e76cec3..553bcf17e921a78cc053c2cf9c9052b4165c0fae 100644
--- a/cmd/restapi/rest/serve_rest.go
+++ b/cmd/restapi/rest/serve_rest.go
@@ -49,9 +49,6 @@ func ServeREST(ctx context.Context, localAddress, remoteDBAddress string) error
 	if err = apis.RegisterStorageAPI(root.Group("storage"), e); err != nil {
 		return err
 	}
-	if err = apis.RegisterStorageTombstonesAPI(root.Group("storage-tombstones"), e); err != nil {
-		return err
-	}
 
 	log.Printf("serving on %v... press ctrl+C to abort\n", localAddress)
 
diff --git a/core/state/database.go b/core/state/database.go
index 2d27a37a055772ea25ee2a25cdc812afe4aed1f2..45e24a699c20bde628ce2a17507e70937c6638f7 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -20,7 +20,6 @@ import (
 	"bytes"
 	"context"
 	"encoding/binary"
-	"errors"
 	"fmt"
 	"io"
 	"runtime"
@@ -28,11 +27,9 @@ import (
 	"sync"
 	"sync/atomic"
 
-	"github.com/ledgerwatch/bolt"
 	"github.com/ledgerwatch/turbo-geth/common"
 	"github.com/ledgerwatch/turbo-geth/common/dbutils"
 	"github.com/ledgerwatch/turbo-geth/common/debug"
-	"github.com/ledgerwatch/turbo-geth/common/pool"
 	"github.com/ledgerwatch/turbo-geth/core/types/accounts"
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/log"
@@ -253,257 +250,6 @@ func (tds *TrieDbState) Copy() *TrieDbState {
 	return &cpy
 }
 
-func ClearTombstonesForReCreatedAccount(db ethdb.MinDatabase, addrHash common.Hash) error {
-	addrHashBytes := addrHash[:]
-	if ok, err := HasTombstone(db, addrHashBytes); err != nil {
-		return err
-	} else if !ok {
-		return nil
-	}
-
-	var boltDb *bolt.DB
-	if hasBolt, ok := db.(ethdb.HasKV); ok {
-		boltDb = hasBolt.KV()
-	} else {
-		return fmt.Errorf("only Bolt supported yet, given: %T", db)
-	}
-
-	var toPut [][]byte
-	if err := boltDb.Update(func(tx *bolt.Tx) error {
-		if debug.IntermediateTrieHashAssertDbIntegrity {
-			defer func() {
-				if err := StorageTombstonesIntegrityDBCheck(tx); err != nil {
-					panic(fmt.Errorf("ClearTombstonesForReCreatedAccount(%x): %w\n", addrHash, err))
-				}
-			}()
-		}
-
-		storage := tx.Bucket(dbutils.StorageBucket).Cursor()
-
-		k, _ := storage.Seek(addrHashBytes)
-		if !bytes.HasPrefix(k, addrHashBytes) {
-			return nil
-		}
-
-		buf := pool.GetBuffer(256)
-		defer pool.PutBuffer(buf)
-
-		incarnation := dbutils.DecodeIncarnation(k[common.HashLength : common.HashLength+8])
-		for ; incarnation > 0; incarnation-- {
-			accWithInc := dbutils.GenerateStoragePrefix(addrHash, incarnation)
-			for k, _ = storage.Seek(accWithInc); k != nil; k, _ = storage.Next() {
-				if !bytes.HasPrefix(k, accWithInc) {
-					k = nil
-				}
-
-				if k == nil {
-					break
-				}
-
-				buf.Reset()
-				dbutils.RemoveIncarnationFromKey(k, &buf.B)
-				toPut = append(toPut, common.CopyBytes(buf.B[:common.HashLength+1]))
-			}
-		}
-		return nil
-	}); err != nil {
-		return err
-	}
-	for _, k := range toPut {
-		if err := db.Put(dbutils.IntermediateTrieHashBucket, k, []byte{}); err != nil {
-			return err
-		}
-	}
-
-	if err := db.Delete(dbutils.IntermediateTrieHashBucket, addrHashBytes); err != nil {
-		return err
-	}
-	return nil
-}
-
-// PutTombstoneForDeletedAccount - placing tombstone only if given account has storage in database
-func PutTombstoneForDeletedAccount(db ethdb.MinDatabase, addrHash []byte) error {
-	if len(addrHash) != common.HashLength {
-		return nil
-	}
-
-	var boltDb *bolt.DB
-	if hasKV, ok := db.(ethdb.HasKV); ok {
-		boltDb = hasKV.KV()
-	} else {
-		return fmt.Errorf("only Bolt supported yet, given: %T", db)
-	}
-
-	buf := pool.GetBuffer(64)
-	defer pool.PutBuffer(buf)
-
-	hasStorage := false
-	if err := boltDb.View(func(tx *bolt.Tx) error {
-		if debug.IntermediateTrieHashAssertDbIntegrity {
-			defer func() {
-				if err := StorageTombstonesIntegrityDBCheck(tx); err != nil {
-					panic(fmt.Errorf("PutTombstoneForDeletedAccount(%x): %w\n", addrHash, err))
-				}
-			}()
-		}
-
-		// place 1 tombstone to account if it has storage
-		storage := tx.Bucket(dbutils.StorageBucket).Cursor()
-		k, _ := storage.Seek(addrHash)
-		if !bytes.HasPrefix(k, addrHash) {
-			return nil
-		}
-
-		if k != nil {
-			hasStorage = true
-		}
-
-		return nil
-	}); err != nil {
-		return err
-	}
-
-	if !hasStorage {
-		return nil
-	}
-
-	return db.Put(dbutils.IntermediateTrieHashBucket, common.CopyBytes(addrHash), []byte{})
-}
-
-func ClearTombstonesForNewStorage(db ethdb.MinDatabase, storageKeyNoInc []byte) error {
-	var boltDb *bolt.DB
-	if hasKV, ok := db.(ethdb.HasKV); ok {
-		boltDb = hasKV.KV()
-	} else {
-		return fmt.Errorf("only Bolt supported yet, given: %T", db)
-	}
-	addrHashBytes := common.CopyBytes(storageKeyNoInc[:common.HashLength])
-
-	var toPut [][]byte
-	var toDelete [][]byte
-	if err := boltDb.View(func(tx *bolt.Tx) error {
-		if debug.IntermediateTrieHashAssertDbIntegrity {
-			defer func() {
-				if err := StorageTombstonesIntegrityDBCheck(tx); err != nil {
-					panic(fmt.Errorf("ClearTombstonesForNewStorage(%x): %w\n", storageKeyNoInc, err))
-				}
-			}()
-		}
-
-		interBucket := tx.Bucket(dbutils.IntermediateTrieHashBucket)
-		storage := tx.Bucket(dbutils.StorageBucket).Cursor()
-
-		storageK, _ := storage.Seek(addrHashBytes)
-		if !bytes.HasPrefix(storageK, addrHashBytes) {
-			storageK = nil
-		}
-		if storageK == nil {
-			return nil
-		}
-
-		kWithInc := pool.GetBuffer(256)
-		defer pool.PutBuffer(kWithInc)
-		kWithInc.B = kWithInc.B[:0]
-		kWithInc.B = append(kWithInc.B, storageK[:common.HashLength+8]...)
-		kWithInc.B = append(kWithInc.B, storageKeyNoInc[common.HashLength:]...)
-
-		buf := pool.GetBuffer(256)
-		defer pool.PutBuffer(buf)
-
-		for i := common.HashLength + 1; i < len(storageKeyNoInc)-1; i++ { // +1 because first step happened during account re-creation
-			tombStone, _ := interBucket.Get(storageKeyNoInc[:i])
-			foundTombstone := tombStone != nil && len(tombStone) == 0
-			if !foundTombstone {
-				continue
-			}
-
-			for storageK, _ = storage.Seek(kWithInc.B[:i+8]); storageK != nil; storageK, _ = storage.Next() {
-				if !bytes.HasPrefix(storageK, kWithInc.B[:i+8]) {
-					storageK = nil
-				}
-				if storageK == nil {
-					break
-				}
-
-				if storageK[i+8] == storageKeyNoInc[i] { // clear path for given storage
-					continue
-				}
-
-				buf.Reset()
-				dbutils.RemoveIncarnationFromKey(storageK[:i+1+8], &buf.B)
-				toPut = append(toPut, common.CopyBytes(buf.B))
-			}
-			toDelete = append(toDelete, common.CopyBytes(storageKeyNoInc[:i]))
-			break
-		}
-		return nil
-	}); err != nil {
-		return err
-	}
-
-	for _, k := range toPut {
-		if err := db.Put(dbutils.IntermediateTrieHashBucket, k, []byte{}); err != nil {
-			return err
-		}
-	}
-
-	for _, k := range toDelete {
-		if err := db.Delete(dbutils.IntermediateTrieHashBucket, k); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func StorageTombstonesIntegrityDBCheck(tx *bolt.Tx) error {
-	inter := tx.Bucket(dbutils.IntermediateTrieHashBucket).Cursor()
-	cOverlap := tx.Bucket(dbutils.IntermediateTrieHashBucket).Cursor()
-	storage := tx.Bucket(dbutils.StorageBucket).Cursor()
-
-	for k, v := inter.First(); k != nil; k, v = inter.Next() {
-		if len(v) > 0 {
-			continue
-		}
-
-		// 1 prefix must be covered only by 1 tombstone
-		from := append(k, []byte{0, 0}...)
-		for overlapK, overlapV := cOverlap.Seek(from); overlapK != nil; overlapK, overlapV = cOverlap.Next() {
-			if !bytes.HasPrefix(overlapK, from) {
-				overlapK = nil
-			}
-			if len(overlapV) > 0 {
-				continue
-			}
-
-			if bytes.HasPrefix(overlapK, k) {
-				return fmt.Errorf("%x is prefix of %x\n", overlapK, k)
-			}
-		}
-
-		addrHash := common.CopyBytes(k[:common.HashLength])
-		storageK, _ := storage.Seek(addrHash)
-		if !bytes.HasPrefix(storageK, addrHash) {
-			return fmt.Errorf("tombstone %x has no storage to hide\n", k)
-		} else {
-			incarnation := dbutils.DecodeIncarnation(storageK[common.HashLength : common.HashLength+8])
-			hideStorage := false
-			for ; incarnation > 0; incarnation-- {
-				kWithInc := dbutils.GenerateStoragePrefix(common.BytesToHash(addrHash), incarnation)
-				kWithInc = append(kWithInc, k[common.HashLength:]...)
-				storageK, _ = storage.Seek(kWithInc)
-				if bytes.HasPrefix(storageK, kWithInc) {
-					hideStorage = true
-				}
-			}
-			if !hideStorage {
-				return fmt.Errorf("tombstone %x has no storage to hide\n", k)
-			}
-		}
-	}
-	return nil
-}
-
 func (tds *TrieDbState) putIntermediateHash(key []byte, nodeHash []byte) {
 	if err := tds.db.Put(dbutils.IntermediateTrieHashBucket, common.CopyBytes(key), common.CopyBytes(nodeHash)); err != nil {
 		log.Warn("could not put intermediate trie hash", "err", err)
@@ -958,7 +704,6 @@ func (tds *TrieDbState) updateTrieRoots(forward bool) ([]common.Hash, error) {
 			// The only difference between Delete and DeleteSubtree is that Delete would delete accountNode too,
 			// wherewas DeleteSubtree will keep the accountNode, but will make the storage sub-trie empty
 			tds.t.DeleteSubtree(addrHash[:])
-			_ = ClearTombstonesForReCreatedAccount(tds.db, addrHash)
 		}
 
 		for addrHash, account := range b.accountUpdates {
@@ -982,7 +727,6 @@ func (tds *TrieDbState) updateTrieRoots(forward bool) ([]common.Hash, error) {
 				if len(v) > 0 {
 					//fmt.Printf("Update storage trie addrHash %x, keyHash %x: %x\n", addrHash, keyHash, v)
 					if forward {
-						_ = ClearTombstonesForNewStorage(tds.db, cKey)
 						tds.t.Update(cKey, v)
 					} else {
 						// If rewinding, it might not be possible to execute storage item update.
@@ -1079,7 +823,6 @@ func (tds *TrieDbState) updateTrieRoots(forward bool) ([]common.Hash, error) {
 			}
 
 			tds.t.DeleteSubtree(addrHash[:])
-			_ = PutTombstoneForDeletedAccount(tds.db, addrHash[:])
 		}
 		roots[i] = tds.t.Hash()
 	}
@@ -1087,17 +830,6 @@ func (tds *TrieDbState) updateTrieRoots(forward bool) ([]common.Hash, error) {
 	return roots, nil
 }
 
-func HasTombstone(db ethdb.MinDatabase, prefix []byte) (bool, error) {
-	v, err := db.Get(dbutils.IntermediateTrieHashBucket, prefix)
-	if err != nil {
-		if errors.Is(err, ethdb.ErrKeyNotFound) {
-			return false, nil
-		}
-		return false, err
-	}
-	return v != nil && len(v) == 0, nil
-}
-
 func (tds *TrieDbState) clearUpdates() {
 	tds.buffers = nil
 	tds.currentBuffer = nil
diff --git a/core/state/database_test.go b/core/state/database_test.go
index 996e9a043284212dbb75020ac188238f2c855ae6..3d8e78ae11a70fe72a781a6828167c1b46e93734 100644
--- a/core/state/database_test.go
+++ b/core/state/database_test.go
@@ -17,14 +17,12 @@
 package state_test
 
 import (
-	"bytes"
 	"context"
 	"fmt"
 	"math/big"
 	"testing"
 
 	"github.com/davecgh/go-spew/spew"
-	"github.com/ledgerwatch/bolt"
 	"github.com/ledgerwatch/turbo-geth/accounts/abi/bind"
 	"github.com/ledgerwatch/turbo-geth/accounts/abi/bind/backends"
 	"github.com/ledgerwatch/turbo-geth/common"
@@ -41,7 +39,6 @@ import (
 	"github.com/ledgerwatch/turbo-geth/ethdb"
 	"github.com/ledgerwatch/turbo-geth/params"
 	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
 )
 
 // Create revival problem
@@ -1156,206 +1153,6 @@ func TestWrongIncarnation2(t *testing.T) {
 
 }
 
-func TestClearTombstonesForReCreatedAccount(t *testing.T) {
-	require, assert, db := require.New(t), assert.New(t), ethdb.NewMemDatabase()
-
-	accKey := fmt.Sprintf("11%062x", 0)
-	k1 := fmt.Sprintf("11%062x", 0)
-	k2 := fmt.Sprintf("2211%062x", 0)
-	k3 := fmt.Sprintf("2233%062x", 0)
-	k4 := fmt.Sprintf("44%062x", 0)
-
-	storageKey := func(incarnation uint64, storageKey string) []byte {
-		return append(dbutils.GenerateStoragePrefix(common.HexToHash(accKey), incarnation), common.FromHex(storageKey)...)
-	}
-
-	putStorage := func(incarnation uint64, k string, v string) {
-		err := db.Put(dbutils.StorageBucket, storageKey(incarnation, k), common.FromHex(v))
-		require.NoError(err)
-	}
-
-	checkProps := func() {
-		if err := db.KV().View(func(tx *bolt.Tx) error {
-			inter := tx.Bucket(dbutils.IntermediateTrieHashBucket).Cursor()
-			storage := tx.Bucket(dbutils.StorageBucket).Cursor()
-
-			for k, v := inter.First(); k != nil; k, v = inter.Next() {
-				if len(v) > 0 {
-					continue
-				}
-
-				// each tomb must cover storage
-				addrHash := common.CopyBytes(k[:common.HashLength])
-				storageK, _ := storage.Seek(addrHash)
-				if !bytes.HasPrefix(storageK, addrHash) {
-					panic(fmt.Sprintf("tombstone %x has no storage to hide\n", k))
-				} else {
-					incarnation := dbutils.DecodeIncarnation(storageK[common.HashLength : common.HashLength+8])
-					hideStorage := false
-					for ; incarnation > 0; incarnation-- {
-						kWithInc := dbutils.GenerateStoragePrefix(common.BytesToHash(addrHash), incarnation)
-						kWithInc = append(kWithInc, k[common.HashLength:]...)
-						storageK, _ = storage.Seek(kWithInc)
-						if bytes.HasPrefix(storageK, kWithInc) {
-							hideStorage = true
-						}
-					}
-					if !hideStorage {
-						panic(fmt.Sprintf("tombstone %x has no storage to hide\n", k))
-					}
-				}
-			}
-			return nil
-		}); err != nil {
-			panic(err)
-		}
-	}
-
-	//printBucket := func() {
-	//	fmt.Printf("IH bucket print\n")
-	//	_ = db.HasKV().View(func(tx *bolt.Tx) error {
-	//		tx.Bucket(dbutils.IntermediateTrieHashBucket).ForEach(func(k, v []byte) error {
-	//			if len(v) == 0 {
-	//				fmt.Printf("IH: %x\n", k)
-	//			}
-	//			return nil
-	//		})
-	//		return nil
-	//	})
-	//	fmt.Printf("IH bucket print END\n")
-	//}
-
-	acc := accounts.NewAccount()
-	acc.Incarnation = 1
-	encodedAcc := make([]byte, acc.EncodingLengthForStorage())
-	acc.EncodeForStorage(encodedAcc)
-	err := db.Put(dbutils.AccountsBucket, common.FromHex(accKey), encodedAcc)
-	require.NoError(err)
-
-	putStorage(2, k1, "hi")
-	putStorage(2, k2, "hi")
-	putStorage(2, k3, "hi")
-	putStorage(2, k4, "hi")
-
-	// step 1: delete account
-	batch := db.NewBatch()
-	err = state.PutTombstoneForDeletedAccount(batch, common.FromHex(accKey))
-	require.NoError(err)
-	_, err = batch.Commit()
-	require.NoError(err)
-	//printBucket()
-	checkProps()
-
-	untouchedAcc := fmt.Sprintf("99%062x", 0)
-	checks := map[string]bool{
-		accKey:       true,
-		untouchedAcc: false,
-	}
-
-	for k, expect := range checks {
-		ok, err1 := state.HasTombstone(db, common.FromHex(k))
-		require.NoError(err1, k)
-		assert.Equal(expect, ok, k)
-	}
-
-	// step 2: re-create account
-	batch = db.NewBatch()
-	err = state.ClearTombstonesForReCreatedAccount(batch, common.HexToHash(accKey))
-	require.NoError(err)
-	_, err = batch.Commit()
-	require.NoError(err)
-	//printBucket()
-	checkProps()
-
-	checks = map[string]bool{
-		accKey:        false,
-		accKey + "11": true,
-		accKey + "22": true,
-		accKey + "aa": false,
-	}
-
-	for k, expect := range checks {
-		ok, err1 := state.HasTombstone(db, common.FromHex(k))
-		require.NoError(err1, k)
-		assert.Equal(expect, ok, k)
-	}
-
-	// step 3: re-create storage
-	batch = db.NewBatch()
-	err = state.ClearTombstonesForNewStorage(batch, common.FromHex(accKey+k2))
-	require.NoError(err)
-	_, err = batch.Commit()
-	require.NoError(err)
-	//printBucket()
-	checkProps()
-
-	checks = map[string]bool{
-		accKey + "11":     true,
-		accKey + k2:       false,
-		accKey + "22":     false,
-		accKey + "2200":   false,
-		accKey + "2211":   false,
-		accKey + "2233":   true,
-		accKey + "223300": false,
-		accKey + "22ab":   false,
-		accKey + "44":     true,
-	}
-
-	for k, expect := range checks {
-		ok, err1 := state.HasTombstone(db, common.FromHex(k))
-		require.NoError(err1, k)
-		assert.Equal(expect, ok, k)
-	}
-
-	// step 4: create one new storage
-	batch = db.NewBatch()
-	err = state.ClearTombstonesForNewStorage(batch, common.FromHex(accKey+k4))
-	require.NoError(err)
-	_, err = batch.Commit()
-	require.NoError(err)
-	//printBucket()
-	checkProps()
-
-	checks = map[string]bool{
-		accKey + k2:         false, // results of step2 preserved
-		accKey + "22":       false, // results of step2 preserved
-		accKey + "2211":     false, // results of step2 preserved
-		accKey + "22110000": false, // results of step2 preserved
-		accKey + "2233":     true,  // results of step2 preserved
-		accKey + "44":       false, // results of step2 preserved
-	}
-
-	for k, expect := range checks {
-		ok, err := state.HasTombstone(db, common.FromHex(k))
-		require.NoError(err, k)
-		assert.Equal(expect, ok, k)
-	}
-
-	// step 5: delete account again - it must remove all tombstones and keep only 1 which will cover account itself
-	batch = db.NewBatch()
-	err = state.PutTombstoneForDeletedAccount(batch, common.FromHex(accKey))
-	require.NoError(err)
-	_, err = batch.Commit()
-	require.NoError(err)
-	//printBucket()
-	checkProps()
-
-	checks = map[string]bool{
-		accKey:       true,
-		untouchedAcc: false,
-
-		// accKey + "2233" was true on previous step, don't delete this tombstone even one with shorter prefix exists.
-		// Because account creation must do predictable amount of operations.
-		accKey + "2233": true,
-	}
-
-	for k, expect := range checks {
-		ok, err1 := state.HasTombstone(db, common.FromHex(k))
-		require.NoError(err1, k)
-		assert.Equal(expect, ok, k)
-	}
-}
-
 func TestChangeAccountCodeBetweenBlocks(t *testing.T) {
 	contract := common.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624")
 
diff --git a/debug-web-ui/src/App.js b/debug-web-ui/src/App.js
index fcbb2cf3cfc13e3812fa42ce7ef5d3e2c3d04043..651bc68e2449e11eb8ca9380f8441692a0e959fb 100644
--- a/debug-web-ui/src/App.js
+++ b/debug-web-ui/src/App.js
@@ -5,7 +5,6 @@ import API from './utils/API.js';
 import ErrorCatcher from './components/ErrorCatcher.js';
 import { BrowserRouter as Router, Link, NavLink, Redirect, Route, Switch } from 'react-router-dom';
 import AccountsPage from './page/Accounts';
-import StorageTombstonesPage from './page/StorageTombstonesPage';
 import { ReactComponent as Logo } from './logo.svg';
 import './App.css';
 import StoragePage from './page/Storage';
@@ -20,10 +19,6 @@ const sidebar = [
     url: '/storage',
     label: 'Storage',
   },
-  {
-    url: '/storage-tombstones',
-    label: 'Storage Tombs',
-  },
 ];
 
 function App() {
@@ -68,9 +63,6 @@ function App() {
                 <Route path="/storage">
                   <StoragePage api={api} />
                 </Route>
-                <Route path="/storage-tombstones">
-                  <StorageTombstonesPage api={api} />
-                </Route>
               </Switch>
             </Col>
           </Row>
diff --git a/debug-web-ui/src/components/LookupStorageTombstonesForm.js b/debug-web-ui/src/components/LookupStorageTombstonesForm.js
deleted file mode 100644
index 61be186ba4c43f4de63719cddcbcb7069c39e4d1..0000000000000000000000000000000000000000
--- a/debug-web-ui/src/components/LookupStorageTombstonesForm.js
+++ /dev/null
@@ -1,73 +0,0 @@
-import React, { useState } from 'react';
-
-import Row from 'react-bootstrap/Row';
-import Col from 'react-bootstrap/Col';
-import { Spinner, Table } from 'react-bootstrap';
-
-import SearchField from './SearchField.js';
-
-const search = (prefix, api, setState) => {
-  setState({ hashes: undefined, loading: true });
-
-  const lookupSuccess = (response) => setState({ hashes: response.data, loading: false });
-  const lookupFail = (error) => {
-    setState({ hashes: undefined, loading: false });
-
-    setState(() => {
-      throw error;
-    });
-  };
-
-  return api.lookupStorageTombstones(prefix).then(lookupSuccess).catch(lookupFail);
-};
-
-const LookupStorageTombstonesForm = ({ api }) => {
-  const [state, setState] = useState({ hashes: undefined, loading: false });
-
-  return (
-    <div>
-      {state.loading && <Spinner animation="border" />}
-      {!state.loading && (
-        <SearchField placeholder="lookup by prefix" onClick={(prefix) => search(prefix, api, setState)} />
-      )}
-      {state.hashes && <Details hashes={state.hashes} />}
-    </div>
-  );
-};
-
-const Details = ({ hashes }) => (
-  <Row>
-    <Col>
-      <Table size="sm" borderless>
-        <thead>
-          <tr>
-            <th>
-              <strong>Prefix</strong>
-            </th>
-            <th>
-              <strong>Hide storage</strong>
-            </th>
-          </tr>
-        </thead>
-        <tbody>
-          {hashes.map((item, i) => (
-            <TableRow key={i} item={item} />
-          ))}
-        </tbody>
-      </Table>
-    </Col>
-  </Row>
-);
-
-const TableRow = ({ item }) => {
-  const { prefix, hideStorage } = item;
-
-  return (
-    <tr>
-      <td className="text-monospace">{prefix}</td>
-      <td className={hideStorage ? '' : 'bg-danger'}>{hideStorage ? 'yes' : 'no'}</td>
-    </tr>
-  );
-};
-
-export default LookupStorageTombstonesForm;
diff --git a/debug-web-ui/src/page/StorageTombstonesPage.js b/debug-web-ui/src/page/StorageTombstonesPage.js
deleted file mode 100644
index 464747244ebf3d03e505474e8366b07d7acb316a..0000000000000000000000000000000000000000
--- a/debug-web-ui/src/page/StorageTombstonesPage.js
+++ /dev/null
@@ -1,25 +0,0 @@
-import React from 'react';
-
-import { Col, Container, Row } from 'react-bootstrap';
-import LookupStorageTombstonesForm from '../components/LookupStorageTombstonesForm';
-import StorageTombstonesIntegrityChecks from '../components/StorageTombstonesIntegrityChecks';
-
-const StorageTombstonesPage = ({ api }) => (
-  <Container fluid className="mt-1">
-    <Row>
-      <Col>
-        <h1>Storage Tombstones</h1>
-      </Col>
-    </Row>
-    <Row>
-      <Col xs={10}>
-        <LookupStorageTombstonesForm api={api} />
-      </Col>
-      <Col xs={2}>
-        <StorageTombstonesIntegrityChecks api={api} />
-      </Col>
-    </Row>
-  </Container>
-);
-
-export default StorageTombstonesPage;
diff --git a/trie/resolver_stateful_cached.go b/trie/resolver_stateful_cached.go
index b93793afa074fe7bdd72a328542b633ed5a105c3..357f41f38900c5ec6a45656f606c542288031997 100644
--- a/trie/resolver_stateful_cached.go
+++ b/trie/resolver_stateful_cached.go
@@ -16,8 +16,6 @@ import (
 	"github.com/ledgerwatch/turbo-geth/trie/rlphacks"
 )
 
-const TraceFromBlock uint64 = 258199
-
 type ResolverStatefulCached struct {
 	*ResolverStateful
 	fromCache bool
@@ -251,9 +249,6 @@ func (tr *ResolverStatefulCached) WalkerStorage(keyIdx int, blockNr uint64, k []
 
 // Walker - k, v - shouldn't be reused in the caller's code
 func (tr *ResolverStatefulCached) Walker(isAccount bool, blockNr uint64, fromCache bool, keyIdx int, kAsNibbles []byte, v []byte) error {
-	//if isAccount && fromCache {
-	//	buf := pool.GetBuffer(256)
-	//	CompressNibbles(kAsNibbles, &buf.B)
 	if tr.trace {
 		fmt.Printf("Walker Cached: blockNr: %d, keyIdx: %d key:%x  value:%x, fromCache: %v\n", blockNr, keyIdx, kAsNibbles, v, fromCache)
 	}
@@ -385,7 +380,6 @@ func (tr *ResolverStatefulCached) MultiWalk2(db *bolt.DB, blockNr uint64, bucket
 		var minKey []byte
 		var fromCache bool
 		for k != nil || cacheK != nil {
-			//if blockNr > TraceFromBlock {
 			if tr.trace {
 				fmt.Printf("For loop: %x, %x\n", cacheK, k)
 			}
@@ -508,41 +502,29 @@ func (tr *ResolverStatefulCached) MultiWalk2(db *bolt.DB, blockNr uint64, bucket
 			}
 
 			// cache part
-			canUseCache := false
-
-			// Special case: self-destructed accounts.
-			// self-destructed accounts may be marked in cache bucket by empty value
-			// in this case: account - add to Trie, storage - skip with subtree (it will be deleted by a background pruner)
-			isSelfDestructedMarker := len(cacheV) == 0
-			if isSelfDestructedMarker {
-				if isAccountBucket && len(v) > 0 && bytes.Equal(k, cacheK) {
-					keyAsNibbles.Reset()
-					DecompressNibbles(minKey, &keyAsNibbles.B)
-					if err := walker(rangeIdx, blockNr, keyAsNibbles.B, v, false); err != nil {
-						return err
-					}
-				}
-				// skip subtree
-			} else {
-				currentReq := tr.requests[tr.reqIndices[rangeIdx]]
-				currentRs := tr.rss[rangeIdx]
-				keyAsNibbles.Reset()
-				DecompressNibbles(minKey, &keyAsNibbles.B)
-
-				if len(keyAsNibbles.B) < currentReq.extResolvePos {
-					cacheK, cacheV = cache.Next() // go to children, not to sibling
-					continue
-				}
+			if len(cacheV) == 0 { // skip empty values
+				cacheK, cacheV = cache.Next()
+				continue
+			}
 
-				canUseCache = currentRs.HashOnly(keyAsNibbles.B[currentReq.extResolvePos:])
-				if !canUseCache { // can't use cache as is, need go to children
-					cacheK, cacheV = cache.Next() // go to children, not to sibling
-					continue
-				}
+			currentReq := tr.requests[tr.reqIndices[rangeIdx]]
+			currentRs := tr.rss[rangeIdx]
+			keyAsNibbles.Reset()
+			DecompressNibbles(minKey, &keyAsNibbles.B)
 
-				if err := walker(rangeIdx, blockNr, keyAsNibbles.B, cacheV, fromCache); err != nil {
-					return fmt.Errorf("waker err: %w", err)
-				}
+			if len(keyAsNibbles.B) < currentReq.extResolvePos {
+				cacheK, cacheV = cache.Next() // go to children, not to sibling
+				continue
+			}
+
+			canUseCache := currentRs.HashOnly(keyAsNibbles.B[currentReq.extResolvePos:])
+			if !canUseCache { // can't use cache as is, need go to children
+				cacheK, cacheV = cache.Next() // go to children, not to sibling
+				continue
+			}
+
+			if err := walker(rangeIdx, blockNr, keyAsNibbles.B, cacheV, fromCache); err != nil {
+				return fmt.Errorf("waker err: %w", err)
 			}
 
 			// skip subtree
diff --git a/trie/resolver_stateful_test.go b/trie/resolver_stateful_test.go
index 7a9281feac55e0024f73c6d02cc3babba7dfc2e6..b6d571229053c57de4ddcbbfe27c6ce2c903416a 100644
--- a/trie/resolver_stateful_test.go
+++ b/trie/resolver_stateful_test.go
@@ -342,10 +342,10 @@ func TestApiDetails(t *testing.T) {
 
 	require, assert, db := require.New(t), assert.New(t), ethdb.NewMemDatabase()
 
-	storageKey := func(k string) []byte {
-		return dbutils.GenerateCompositeStorageKey(common.HexToHash(k), 1, common.HexToHash(k))
+	storageKey := func(incarnation uint64, k string) []byte {
+		return dbutils.GenerateCompositeStorageKey(common.HexToHash(k), incarnation, common.HexToHash(k))
 	}
-	putCache := func(k string, v string) {
+	putIH := func(k string, v string) {
 		require.NoError(db.Put(dbutils.IntermediateTrieHashBucket, common.Hex2Bytes(k), common.Hex2Bytes(v)))
 	}
 
@@ -353,19 +353,20 @@ func TestApiDetails(t *testing.T) {
 	// Test works with keys like: {base}{i}{j}{zeroes}
 	// base = 0 or f - it covers edge cases - first/last subtrees
 	//
-	// i=0 - has data, has cache, no resolve. Tree must have Hash.
-	// i=1 - has cache with empty value. Tree must have Nil.
-	// i=2 - has accounts and storage, no cache. Tree must have Account nodes.
-	// i>2 - no data, no cache, no resolve.
-	// i=f - has data, has cache, no resolve. Edge case - last subtree.
+	// i=0 - has data, has IntermediateHash, no resolve. Tree must have Hash.
+	// i=1 - has values with incarnation=1. Tree must have Nil.
+	// i=2 - has accounts and storage, no IntermediateHash. Tree must have Account nodes.
+	// i>2 - no data, no IntermediateHash, no resolve.
+	// i=f - has data, has IntermediateHash, no resolve. Edge case - last subtree.
 	for _, base := range []string{"0", "f"} {
 		for _, i := range []int{0, 1, 2, 15} {
 			for _, j := range []int{0, 1, 2, 15} {
 				k := fmt.Sprintf(base+"%x%x%061x", i, j, 0)
 				storageV := common.Hex2Bytes(fmt.Sprintf("%x%x", i, j))
+				incarnation := uint64(2)
 				if i == 1 {
 					storageV = []byte{}
-					putCache(k, "") // mark accounts as deleted
+					incarnation = 1
 				}
 
 				a := accounts.Account{
@@ -383,16 +384,16 @@ func TestApiDetails(t *testing.T) {
 				a.EncodeForStorage(v)
 
 				require.NoError(db.Put(dbutils.AccountsBucket, common.Hex2Bytes(k), v))
-				require.NoError(db.Put(dbutils.StorageBucket, storageKey(k), storageV))
+				require.NoError(db.Put(dbutils.StorageBucket, storageKey(incarnation, k), storageV))
 			}
 		}
 	}
 
-	putCache("00", "06e98f77330d54fa691a724018df5b2c5689596c03413ca59717ea9bd8a98893")
-	putCache("ff", "ad4f92ca84a5980e14a356667eaf0db5d9ff78063630ebaa3d00a6634cd2a3fe")
+	putIH("00", "06e98f77330d54fa691a724018df5b2c5689596c03413ca59717ea9bd8a98893")
+	putIH("ff", "ad4f92ca84a5980e14a356667eaf0db5d9ff78063630ebaa3d00a6634cd2a3fe")
 
-	// this cache key must not be used, because such key is in ResolveRequest
-	putCache("01", "0000000000000000000000000000000000000000000000000000000000000000")
+	// this IntermediateHash key must not be used, because such key is in ResolveRequest
+	putIH("01", "0000000000000000000000000000000000000000000000000000000000000000")
 
 	t.Run("account resolver from scratch", func(t *testing.T) {
 		tries := []*Trie{New(common.Hash{}), New(common.Hash{})}
@@ -413,72 +414,67 @@ func TestApiDetails(t *testing.T) {
 			}
 			assert.Equal(expectRootHash.String(), tries[i].Hash().String(), resolverName)
 		}
-
-		//tries[0].PrintDiff(tries[1], os.Stdout)
 	})
 
 	t.Run("account resolver", func(t *testing.T) {
-		tries := []*Trie{New(common.Hash{}), New(common.Hash{})}
-		for i, resolverName := range []string{Stateful, StatefulCached} {
+		for _, resolverName := range []string{Stateful, StatefulCached} {
+			tr := New(common.Hash{})
 			resolver := NewResolver(0, true, 0)
 			expectRootHash := common.HexToHash("1af5daf4281e4e5552e79069d0688492de8684c11b1e983f9c3bbac500ad694a")
 
-			resolver.AddRequest(tries[i].NewResolveRequest(nil, append(common.Hex2Bytes(fmt.Sprintf("000101%0122x", 0)), 16), 0, expectRootHash.Bytes()))
-			resolver.AddRequest(tries[i].NewResolveRequest(nil, common.Hex2Bytes("000202"), 0, expectRootHash.Bytes()))
-			resolver.AddRequest(tries[i].NewResolveRequest(nil, common.Hex2Bytes("0f"), 0, expectRootHash.Bytes()))
+			resolver.AddRequest(tr.NewResolveRequest(nil, append(common.Hex2Bytes(fmt.Sprintf("000101%0122x", 0)), 16), 0, expectRootHash.Bytes()))
+			resolver.AddRequest(tr.NewResolveRequest(nil, common.Hex2Bytes("000202"), 0, expectRootHash.Bytes()))
+			resolver.AddRequest(tr.NewResolveRequest(nil, common.Hex2Bytes("0f"), 0, expectRootHash.Bytes()))
 
 			if resolverName == Stateful {
 				err := resolver.ResolveStateful(db, 0)
-				//fmt.Printf("%x\n", tries[i].root.(*fullNode).Children[0].(*fullNode).Children[0].hash())
 				assert.NoError(err)
 			} else {
 				err := resolver.ResolveStatefulCached(db, 0, false)
-				//fmt.Printf("%x\n", tries[i].root.(*fullNode).Children[0].(*fullNode).Children[0].hash())
 				assert.NoError(err)
 			}
 
-			assert.Equal(expectRootHash.String(), tries[i].Hash().String(), resolverName)
+			assert.Equal(expectRootHash.String(), tr.Hash().String(), resolverName)
 
-			_, found := tries[i].GetAccount(common.Hex2Bytes(fmt.Sprintf("000%061x", 0)))
+			_, found := tr.GetAccount(common.Hex2Bytes(fmt.Sprintf("000%061x", 0)))
 			assert.False(found) // exists in DB but resolved, there is hashNode
 
-			acc, found := tries[i].GetAccount(common.Hex2Bytes(fmt.Sprintf("011%061x", 0)))
+			acc, found := tr.GetAccount(common.Hex2Bytes(fmt.Sprintf("011%061x", 0)))
 			assert.True(found)
 			require.NotNil(acc)              // cache bucket has empty value, but self-destructed Account still available
 			assert.Equal(int(acc.Nonce), 11) // i * 10 + j
 
-			acc, found = tries[i].GetAccount(common.Hex2Bytes(fmt.Sprintf("021%061x", 0)))
+			acc, found = tr.GetAccount(common.Hex2Bytes(fmt.Sprintf("021%061x", 0)))
 			assert.True(found)
 			require.NotNil(acc)              // exists in db and resolved
 			assert.Equal(int(acc.Nonce), 21) // i * 10 + j
 
-			//acc, found = tr.GetAccount(common.Hex2Bytes(fmt.Sprintf("051%061x", 0)))
-			//assert.True(found)
-			//assert.Nil(acc) // not exists in DB
-
-			//assert.Panics(func() {
-			//	tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("001%061x", 0)), &accounts.Account{})
-			//})
-			//assert.NotPanics(func() {
-			//	tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("011%061x", 0)), &accounts.Account{})
-			//	tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("021%061x", 0)), &accounts.Account{})
-			//	tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("051%061x", 0)), &accounts.Account{})
-			//})
-		}
+			acc, found = tr.GetAccount(common.Hex2Bytes(fmt.Sprintf("051%061x", 0)))
+			assert.True(found)
+			assert.Nil(acc) // not exists in DB
 
-		//tries[0].PrintDiff(tries[1], os.Stdout)
+			assert.Panics(func() {
+				tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("001%061x", 0)), &accounts.Account{})
+			})
+			assert.NotPanics(func() {
+				tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("011%061x", 0)), &accounts.Account{})
+				tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("021%061x", 0)), &accounts.Account{})
+				tr.UpdateAccount(common.Hex2Bytes(fmt.Sprintf("051%061x", 0)), &accounts.Account{})
+			})
+		}
 	})
 
 	t.Run("storage resolver", func(t *testing.T) {
-		putCache("00", "9e3571a3a3a75d023799452cfacea4d268b109bc685b9e8b63a50b55be81c7a3")
-		putCache("ff", "8d2b73f47eb0e6c79ca4f48ba551bfd62f058c9d1cff7e1ab72ba3b2d63aefed")
-		putCache("01", "")
+		putIH("00", "c733727d362a5c5b05dc90d95aa48a9f8a564907ef5360849bf4b338bf85f7a4")
+		putIH("ff", "087b59df7d243bf3c0d3e2886b774120c73ee90980a0b10e60908ca6e401c7f4")
 
 		for _, resolverName := range []string{Stateful, StatefulCached} {
 			tr, resolver := New(common.Hash{}), NewResolver(1, false, 0)
-			expectRootHash := common.HexToHash("b7861b26269e04ae4a865ed3900f56472ad248ffd2976cddef8018cc9700f846")
+			expectRootHash := common.HexToHash("c3fd3ccd887c47c57d3f9c8eb84689e82a65c26ec13a3bf64a0d635f0df9cdf6")
 
+			resolver.AddRequest(tr.NewResolveRequest(nil, append(common.Hex2Bytes(fmt.Sprintf("000101%0122x", 0)), 16), 0, expectRootHash.Bytes()))
 			resolver.AddRequest(tr.NewResolveRequest(nil, common.Hex2Bytes("00020100"), 0, expectRootHash.Bytes()))
+			resolver.AddRequest(tr.NewResolveRequest(nil, common.Hex2Bytes("0f"), 0, expectRootHash.Bytes()))
 
 			if resolverName == Stateful {
 				err := resolver.ResolveStateful(db, 0)
@@ -489,28 +485,28 @@ func TestApiDetails(t *testing.T) {
 			}
 			assert.Equal(expectRootHash.String(), tr.Hash().String())
 
-			_, found := tr.Get(storageKey(fmt.Sprintf("000%061x", 0)))
+			_, found := tr.Get(storageKey(2, fmt.Sprintf("000%061x", 0)))
 			assert.False(found) // exists in DB but not resolved, there is hashNode
 
-			storage, found := tr.Get(storageKey(fmt.Sprintf("011%061x", 0)))
+			storage, found := tr.Get(storageKey(2, fmt.Sprintf("011%061x", 0)))
 			assert.True(found)
 			require.Nil(storage) // deleted by empty value in cache bucket
 
-			storage, found = tr.Get(storageKey(fmt.Sprintf("021%061x", 0)))
+			storage, found = tr.Get(storageKey(2, fmt.Sprintf("021%061x", 0)))
 			assert.True(found)
 			require.Equal(storage, common.Hex2Bytes("21"))
 
-			storage, found = tr.Get(storageKey(fmt.Sprintf("051%061x", 0)))
+			storage, found = tr.Get(storageKey(2, fmt.Sprintf("051%061x", 0)))
 			assert.True(found)
 			assert.Nil(storage) // not exists in DB
 
 			assert.Panics(func() {
-				tr.Update(storageKey(fmt.Sprintf("001%061x", 0)), nil)
+				tr.Update(storageKey(2, fmt.Sprintf("001%061x", 0)), nil)
 			})
 			assert.NotPanics(func() {
-				tr.Update(storageKey(fmt.Sprintf("011%061x", 0)), nil)
-				tr.Update(storageKey(fmt.Sprintf("021%061x", 0)), nil)
-				tr.Update(storageKey(fmt.Sprintf("051%061x", 0)), nil)
+				tr.Update(storageKey(2, fmt.Sprintf("011%061x", 0)), nil)
+				tr.Update(storageKey(2, fmt.Sprintf("021%061x", 0)), nil)
+				tr.Update(storageKey(2, fmt.Sprintf("051%061x", 0)), nil)
 			})
 		}
 	})
diff --git a/trie/trie.go b/trie/trie.go
index 95275e9b4443bae18174b0a0edaa2fd15fc77c1e..e020cc1327015785b620c424bbbdc80d3f18e295 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -607,8 +607,7 @@ func (t *Trie) insert(origNode node, key []byte, pos int, value node) (updated b
 		newNode = n
 		return
 	default:
-		fmt.Printf("Key: %x, Pos: %d\n", key, pos)
-		panic(fmt.Sprintf("%T: invalid node: %v", n, n))
+		panic(fmt.Sprintf("%T: invalid node: %v. Searched by: key=%x, pos=%d", n, n, key, pos))
 	}
 }