diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 618a379f1118dd711dca2340f830c6f0ca6e6f5b..c7a41d1bfeea0fd2eae2da70623e091fde3e983c 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -56,7 +56,6 @@ import ( "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/internal/debug" - "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync" @@ -3612,142 +3611,6 @@ func scanReceipts2(chaindata string) error { return nil } -func scanReceipts(chaindata string, block uint64) error { - f, err := os.Create("fixed.txt") - if err != nil { - return err - } - defer f.Close() - w := bufio.NewWriter(f) - defer w.Flush() - db := mdbx.MustOpen(chaindata) - defer db.Close() - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - blockNum, err := changeset.AvailableFrom(tx) - if err != nil { - return err - } - if block > blockNum { - blockNum = block - } - - chainConfig := tool.ChainConfig(tx) - vmConfig := vm.Config{} - noOpWriter := state.NewNoopWriter() - var buf bytes.Buffer - fixedCount := 0 - logInterval := 30 * time.Second - logEvery := time.NewTicker(logInterval) - var key [8]byte - var v []byte - for ; true; blockNum++ { - select { - default: - case <-logEvery.C: - log.Info("Commit", "block", blockNum, "fixed", fixedCount) - tx.Commit() - if tx, err = db.BeginRw(context.Background()); err != nil { - return err - } - } - var hash common.Hash - if hash, err = rawdb.ReadCanonicalHash(tx, blockNum); err != nil { - return err - } - if hash == (common.Hash{}) { - break - } - binary.BigEndian.PutUint64(key[:], blockNum) - if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { - return err - } - var receipts types.Receipts - if err = cbor.Unmarshal(&receipts, bytes.NewReader(v)); err == nil { - broken := false - for _, receipt := range receipts { - if receipt.CumulativeGasUsed < 10000 { - broken = true - break - } - } - if !broken { - continue - } - } else { - // Receipt is using old CBOR encoding - var oldReceipts migrations.OldReceipts - if err = cbor.Unmarshal(&oldReceipts, bytes.NewReader(v)); err != nil { - return err - } - var body *types.Body - if chainConfig.IsBerlin(blockNum) { - body = rawdb.ReadBodyWithTransactions(tx, hash, blockNum) - } - receipts = make(types.Receipts, len(oldReceipts)) - for i, oldReceipt := range oldReceipts { - receipts[i] = new(types.Receipt) - receipts[i].PostState = oldReceipt.PostState - receipts[i].Status = oldReceipt.Status - receipts[i].CumulativeGasUsed = oldReceipt.CumulativeGasUsed - if body != nil { - receipts[i].Type = body.Transactions[i].Type() - } - } - buf.Reset() - if err = cbor.Marshal(&buf, receipts); err != nil { - return err - } - if err = tx.Put(kv.Receipts, common.CopyBytes(key[:]), common.CopyBytes(buf.Bytes())); err != nil { - return err - } - fixedCount++ - continue - } - var block *types.Block - if block, _, err = rawdb.ReadBlockWithSenders(tx, hash, blockNum); err != nil { - return err - } - - dbstate := state.NewPlainState(tx, block.NumberU64()-1) - intraBlockState := state.New(dbstate) - - getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) } - contractHasTEVM := ethdb.GetHasTEVM(tx) - receipts1, err1 := runBlock(intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, contractHasTEVM, block, vmConfig) - if err1 != nil { - return err1 - } - fix := true - if chainConfig.IsByzantium(blockNum) { - receiptSha := types.DeriveSha(receipts1) - if receiptSha != block.ReceiptHash() { - fmt.Printf("(retrace) mismatched receipt headers for block %d: %x, %x\n", block.NumberU64(), receiptSha, block.ReceiptHash()) - fix = false - } - } - if fix { - // All good, we can fix receipt record - buf.Reset() - err := cbor.Marshal(&buf, receipts1) - if err != nil { - return fmt.Errorf("encode block receipts for block %d: %w", blockNum, err) - } - if err = tx.Put(kv.Receipts, key[:], buf.Bytes()); err != nil { - return fmt.Errorf("writing receipts for block %d: %w", blockNum, err) - } - if _, err = w.Write([]byte(fmt.Sprintf("%d\n", blockNum))); err != nil { - return err - } - fixedCount++ - } - } - return tx.Commit() -} - func runBlock(ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWriter state.StateWriter, chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), block *types.Block, vmConfig vm.Config) (types.Receipts, error) { header := block.Header() @@ -3956,9 +3819,6 @@ func main() { case "scanTxs": err = scanTxs(*chaindata) - case "scanReceipts": - err = scanReceipts(*chaindata, uint64(*block)) - case "scanReceipts2": err = scanReceipts2(*chaindata) diff --git a/migrations/db_schema_version.go b/migrations/db_schema_version.go index b7a219f494d35ac1e57454fb6773d231170dc76e..cf2ce83f3216ee41b66881bc5b5b100f88f5a1ef 100644 --- a/migrations/db_schema_version.go +++ b/migrations/db_schema_version.go @@ -6,23 +6,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" ) -var dbSchemaVersion = Migration{ - Name: "db_schema_version", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - // This migration is no-op, but it forces the migration mechanism to apply it and thus write the DB schema version info - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -} - var dbSchemaVersion5 = Migration{ Name: "db_schema_version5", Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { diff --git a/migrations/fix_sequences.go b/migrations/fix_sequences.go deleted file mode 100644 index a890c045b6048955db53d486b6233aa9f6baeb49..0000000000000000000000000000000000000000 --- a/migrations/fix_sequences.go +++ /dev/null @@ -1,41 +0,0 @@ -package migrations - -import ( - "context" - - "github.com/ledgerwatch/erigon-lib/kv" -) - -var oldSequences = map[string]string{ - kv.EthTx: "eth_tx", -} - -var fixSequences = Migration{ - Name: "fix_sequences", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - for bkt, oldbkt := range oldSequences { - seq, getErr := tx.GetOne(kv.Sequence, []byte(oldbkt)) - if getErr != nil { - return getErr - } - - if seq != nil { - putErr := tx.Put(kv.Sequence, []byte(bkt), seq) - if putErr != nil { - return putErr - } - } - } - - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -} diff --git a/migrations/header_prefix.go b/migrations/header_prefix.go deleted file mode 100644 index fc9a809ae8900666a1939e1ad37bd08d6c515508..0000000000000000000000000000000000000000 --- a/migrations/header_prefix.go +++ /dev/null @@ -1,177 +0,0 @@ -package migrations - -import ( - "bytes" - "context" - "fmt" - - "github.com/ledgerwatch/erigon-lib/etl" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" -) - -var headerPrefixToSeparateBuckets = Migration{ - Name: "header_prefix_to_separate_buckets", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - exists, err := tx.ExistsBucket(kv.HeaderPrefixOld) - if err != nil { - return err - } - if !exists { - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - } - - if err = tx.ClearBucket(kv.HeaderCanonical); err != nil { - return err - } - if err = tx.ClearBucket(kv.HeaderTD); err != nil { - return err - } - logPrefix := "split_header_prefix_bucket" - const loadStep = "load" - - canonicalCollector, err := etl.NewCollectorFromFiles(logPrefix, tmpdir+"canonical") - if err != nil { - return err - } - tdCollector, err := etl.NewCollectorFromFiles(logPrefix, tmpdir+"td") - if err != nil { - return err - } - headersCollector, err := etl.NewCollectorFromFiles(logPrefix, tmpdir+"headers") - if err != nil { - return err - } - - switch string(progress) { - case "": - // can't use files if progress field not set, clear them - if canonicalCollector != nil { - canonicalCollector.Close() - canonicalCollector = nil - } - - if tdCollector != nil { - tdCollector.Close() - tdCollector = nil - } - if headersCollector != nil { - headersCollector.Close() - headersCollector = nil - } - case loadStep: - if headersCollector == nil || canonicalCollector == nil || tdCollector == nil { - return ErrMigrationETLFilesDeleted - } - defer func() { - // don't clean if error or panic happened - if err != nil { - return - } - if rec := recover(); rec != nil { - panic(rec) - } - canonicalCollector.Close() - tdCollector.Close() - headersCollector.Close() - }() - goto LoadStep - } - - canonicalCollector = etl.NewCriticalCollector(logPrefix, tmpdir+"canonical", etl.NewSortableBuffer(etl.BufferOptimalSize*4)) - tdCollector = etl.NewCriticalCollector(logPrefix, tmpdir+"td", etl.NewSortableBuffer(etl.BufferOptimalSize*4)) - headersCollector = etl.NewCriticalCollector(logPrefix, tmpdir+"headers", etl.NewSortableBuffer(etl.BufferOptimalSize*4)) - defer func() { - // don't clean if error or panic happened - if err != nil { - return - } - if rec := recover(); rec != nil { - panic(rec) - } - canonicalCollector.Close() - tdCollector.Close() - headersCollector.Close() - }() - - err = tx.ForEach(kv.HeaderPrefixOld, []byte{}, func(k, v []byte) error { - var innerErr error - switch { - case IsHeaderKey(k): - innerErr = headersCollector.Collect(k, v) - case IsHeaderTDKey(k): - innerErr = tdCollector.Collect(bytes.TrimSuffix(k, HeaderTDSuffix), v) - case IsHeaderHashKey(k): - innerErr = canonicalCollector.Collect(bytes.TrimSuffix(k, HeaderHashSuffix), v) - default: - return fmt.Errorf("incorrect header prefix key: %v", common.Bytes2Hex(k)) - } - if innerErr != nil { - return innerErr - } - return nil - }) - if err = tx.DropBucket(kv.HeaderPrefixOld); err != nil { - return err - } - - LoadStep: - // Now transaction would have been re-opened, and we should be re-using the space - if err = canonicalCollector.Load(tx, kv.HeaderCanonical, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { - return fmt.Errorf("loading the transformed data back into the storage table: %w", err) - } - if err = tdCollector.Load(tx, kv.HeaderTD, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { - return fmt.Errorf("loading the transformed data back into the acc table: %w", err) - } - if err = headersCollector.Load(tx, kv.Headers, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { - return fmt.Errorf("loading the transformed data back into the acc table: %w", err) - } - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -} - -func IsHeaderKey(k []byte) bool { - l := common.BlockNumberLength + common.HashLength - if len(k) != l { - return false - } - - return !IsHeaderHashKey(k) && !IsHeaderTDKey(k) -} - -func IsHeaderTDKey(k []byte) bool { - l := common.BlockNumberLength + common.HashLength + 1 - return len(k) == l && bytes.Equal(k[l-1:], HeaderTDSuffix) -} - -// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix -func HeaderHashKey(number uint64) []byte { - return append(dbutils.EncodeBlockNumber(number), HeaderHashSuffix...) -} - -func CheckCanonicalKey(k []byte) bool { - return len(k) == 8+len(HeaderHashSuffix) && bytes.Equal(k[8:], HeaderHashSuffix) -} - -func IsHeaderHashKey(k []byte) bool { - l := common.BlockNumberLength + 1 - return len(k) == l && bytes.Equal(k[l-1:], HeaderHashSuffix) -} - -var ( - HeaderTDSuffix = []byte("t") // block_num_u64 + hash + headerTDSuffix -> td - HeaderHashSuffix = []byte("n") // block_num_u64 + headerHashSuffix -> hash -) diff --git a/migrations/header_prefix_test.go b/migrations/header_prefix_test.go deleted file mode 100644 index c9c06b5af9db4a9519f3c9ddf5f3579ee2705655..0000000000000000000000000000000000000000 --- a/migrations/header_prefix_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package migrations - -import ( - "bytes" - "context" - "encoding/binary" - "strconv" - "testing" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestHeaderPrefix(t *testing.T) { - require := require.New(t) - db := memdb.NewTestDB(t) - - err := db.Update(context.Background(), func(tx kv.RwTx) error { - err := tx.CreateBucket(kv.HeaderPrefixOld) - if err != nil { - return err - } - for i := uint64(0); i < 10; i++ { - //header - err = tx.Put(kv.HeaderPrefixOld, dbutils.HeaderKey(i, common.Hash{uint8(i)}), []byte("header "+strconv.Itoa(int(i)))) - require.NoError(err) - //canonical - err = tx.Put(kv.HeaderPrefixOld, HeaderHashKey(i), common.Hash{uint8(i)}.Bytes()) - require.NoError(err) - err = tx.Put(kv.HeaderPrefixOld, append(dbutils.HeaderKey(i, common.Hash{uint8(i)}), HeaderTDSuffix...), []byte{uint8(i)}) - require.NoError(err) - } - return nil - }) - require.NoError(err) - - migrator := NewMigrator(kv.ChainDB) - migrator.Migrations = []Migration{headerPrefixToSeparateBuckets} - err = migrator.Apply(db, t.TempDir()) - require.NoError(err) - - num := 0 - err = db.View(context.Background(), func(tx kv.Tx) error { - return tx.ForEach(kv.HeaderCanonical, []byte{}, func(k, v []byte) error { - require.Len(k, 8) - bytes.Equal(v, common.Hash{uint8(binary.BigEndian.Uint64(k))}.Bytes()) - num++ - return nil - }) - }) - require.NoError(err) - require.Equal(num, 10) - - num = 0 - err = db.View(context.Background(), func(tx kv.Tx) error { - return tx.ForEach(kv.HeaderTD, []byte{}, func(k, v []byte) error { - require.Len(k, 40) - bytes.Equal(v, []byte{uint8(binary.BigEndian.Uint64(k))}) - num++ - return nil - }) - }) - require.NoError(err) - require.Equal(num, 10) - - num = 0 - err = db.View(context.Background(), func(tx kv.Tx) error { - return tx.ForEach(kv.Headers, []byte{}, func(k, v []byte) error { - require.Len(k, 40) - bytes.Equal(v, []byte("header "+strconv.Itoa(int(binary.BigEndian.Uint64(k))))) - num++ - return nil - }) - }) - require.NoError(err) - require.Equal(num, 10) - -} - -func TestHeaderTypeDetection(t *testing.T) { - // good input - headerHashKey := common.Hex2Bytes("00000000000000006e") - assert.False(t, IsHeaderKey(headerHashKey)) - assert.False(t, IsHeaderTDKey(headerHashKey)) - assert.True(t, IsHeaderHashKey(headerHashKey)) - - headerKey := common.Hex2Bytes("0000000000004321ed7240d411782ae438adfd85f7edad373cea722318c6e7f5f5b30f9abc9b36fd") - assert.True(t, IsHeaderKey(headerKey)) - assert.False(t, IsHeaderTDKey(headerKey)) - assert.False(t, IsHeaderHashKey(headerKey)) - - headerTdKey := common.Hex2Bytes("0000000000004321ed7240d411782ae438adfd85f7edad373cea722318c6e7f5f5b30f9abc9b36fd74") - assert.False(t, IsHeaderKey(headerTdKey)) - assert.True(t, IsHeaderTDKey(headerTdKey)) - assert.False(t, IsHeaderHashKey(headerTdKey)) - - // bad input - emptyKey := common.Hex2Bytes("") - assert.False(t, IsHeaderKey(emptyKey)) - assert.False(t, IsHeaderTDKey(emptyKey)) - assert.False(t, IsHeaderHashKey(emptyKey)) - - tooLongKey := common.Hex2Bytes("0000000000004321ed7240d411782ae438adfd85f7edad373cea722318c6e7f5f5b30f9abc9b36fd0000000000004321ed7240d411782ae438adfd85f7edad373cea722318c6e7f5f5b30f9abc9b36fd0000000000004321ed7240d411782ae438adfd85f7edad373cea722318c6e7f5f5b30f9abc9b36fd0000000000004321ed7240d411782ae438adfd85f7edad373cea722318c6e7f5f5b30f9abc9b36fd") - assert.False(t, IsHeaderKey(tooLongKey)) - assert.False(t, IsHeaderTDKey(tooLongKey)) - assert.False(t, IsHeaderHashKey(tooLongKey)) - - notRelatedInput := common.Hex2Bytes("alex") - assert.False(t, IsHeaderKey(notRelatedInput)) - assert.False(t, IsHeaderTDKey(notRelatedInput)) - assert.False(t, IsHeaderHashKey(notRelatedInput)) -} diff --git a/migrations/migrations.go b/migrations/migrations.go index 84816259340000f25b1e152bf11e29c60afa79e3..b9f38c42c259d5796abe732e91a6f25cbb86ba66 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -31,12 +31,6 @@ import ( // - write test - and check that it's safe to apply same migration twice var migrations = map[kv.Label][]Migration{ kv.ChainDB: { - headerPrefixToSeparateBuckets, - removeCliqueBucket, - dbSchemaVersion, - fixSequences, - storageMode, - setPruneType, dbSchemaVersion5, }, kv.TxPoolDB: {}, @@ -128,10 +122,37 @@ func (m *Migrator) Apply(db kv.RwDB, datadir string) error { } var applied map[string][]byte + var existingVersion []byte if err := db.View(context.Background(), func(tx kv.Tx) error { var err error applied, err = AppliedMigrations(tx, false) - return err + if err != nil { + return fmt.Errorf("reading applied migrations: %w", err) + } + existingVersion, err = tx.GetOne(kv.DatabaseInfo, kv.DBSchemaVersionKey) + if err != nil { + return fmt.Errorf("reading DB schema version: %w", err) + } + if len(existingVersion) != 0 && len(existingVersion) != 12 { + return fmt.Errorf("incorrect length of DB schema version: %d", len(existingVersion)) + } + if len(existingVersion) == 12 { + major := binary.BigEndian.Uint32(existingVersion) + minor := binary.BigEndian.Uint32(existingVersion[4:]) + if major > kv.DBSchemaVersion.Major { + return fmt.Errorf("cannot downgrade major DB version from %d to %d", major, kv.DBSchemaVersion.Major) + } else if major == kv.DBSchemaVersion.Major { + if minor > kv.DBSchemaVersion.Minor { + return fmt.Errorf("cannot downgrade minor DB version from %d.%d to %d.%d", major, minor, kv.DBSchemaVersion.Major, kv.DBSchemaVersion.Major) + } + } else { + // major < kv.DBSchemaVersion.Major + if kv.DBSchemaVersion.Major-major > 1 { + return fmt.Errorf("cannot upgrade major DB version for more than 1 version from %d to %d, use integration tool if you know what you are doing", major, kv.DBSchemaVersion.Major) + } + } + } + return nil }); err != nil { return err } diff --git a/migrations/prune.go b/migrations/prune.go deleted file mode 100644 index 72e31889be5a68c7dac6915090980c6bc2fa17e9..0000000000000000000000000000000000000000 --- a/migrations/prune.go +++ /dev/null @@ -1,89 +0,0 @@ -package migrations - -import ( - "context" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/ethdb/prune" - "github.com/ledgerwatch/erigon/params" -) - -var storageMode = Migration{ - Name: "storage_mode", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - var ( // old db keys - //StorageModeHistory - does node save history. - StorageModeHistory = []byte("smHistory") - //StorageModeReceipts - does node save receipts. - StorageModeReceipts = []byte("smReceipts") - //StorageModeTxIndex - does node save transactions index. - StorageModeTxIndex = []byte("smTxIndex") - //StorageModeCallTraces - does not build index of call traces - StorageModeCallTraces = []byte("smCallTraces") - ) - pm := prune.Mode{Initialised: true} - castToPruneDistance := func(v []byte) prune.Distance { - if len(v) == 1 && v[0] == 2 { - return params.FullImmutabilityThreshold // means, prune enabled - } - return math.MaxUint64 // means, prune disabled - } - { - v, err := tx.GetOne(kv.DatabaseInfo, StorageModeHistory) - if err != nil { - return err - } - if v == nil { // if no records in db - means Erigon just started first time and nothing to migrate. Noop. - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - } - pm.History = castToPruneDistance(v) - } - { - v, err := tx.GetOne(kv.DatabaseInfo, StorageModeReceipts) - if err != nil { - return err - } - pm.Receipts = castToPruneDistance(v) - } - { - v, err := tx.GetOne(kv.DatabaseInfo, StorageModeTxIndex) - if err != nil { - return err - } - pm.TxIndex = castToPruneDistance(v) - } - { - v, err := tx.GetOne(kv.DatabaseInfo, StorageModeCallTraces) - if err != nil { - return err - } - pm.CallTraces = castToPruneDistance(v) - } - { - v, err := tx.GetOne(kv.DatabaseInfo, kv.StorageModeTEVM) - if err != nil { - return err - } - pm.Experiments.TEVM = len(v) == 1 && v[0] == 1 - } - - err = prune.SetIfNotExist(tx, pm) - if err != nil { - return err - } - - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -} diff --git a/migrations/receipt_cbor.go b/migrations/receipt_cbor.go deleted file mode 100644 index e57bba6dfb5f6f11e879b9222e7b36ec2293140a..0000000000000000000000000000000000000000 --- a/migrations/receipt_cbor.go +++ /dev/null @@ -1,409 +0,0 @@ -package migrations - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - pkg2_big "math/big" - "runtime" - "strconv" - "time" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/cbor" - "github.com/ledgerwatch/log/v3" - - pkg1_common "github.com/ledgerwatch/erigon/common" - codec1978 "github.com/ugorji/go/codec" -) - -// OldReceipt is receipt structure before introduction of Type field -// to be able to read old records -type OldReceipt struct { - // Consensus fields: These fields are defined by the Yellow Paper - PostState []byte `json:"root" codec:"1"` - Status uint64 `json:"status" codec:"2"` - CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required" codec:"3"` -} - -type OldReceipts []*OldReceipt - -var ReceiptCbor = Migration{ - Name: "receipt_cbor", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0) - if err != nil { - return err - } - if genesisBlock == nil { - // Empty database check - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - } - chainConfig, cerr := rawdb.ReadChainConfig(tx, genesisBlock.Hash()) - if cerr != nil { - return cerr - } - logInterval := 30 * time.Second - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - var buf bytes.Buffer - var key [8]byte - var v []byte - var to uint64 - if to, err = stages.GetStageProgress(tx, stages.Execution); err != nil { - return err - } - for blockNum := uint64(1); blockNum <= to; blockNum++ { - binary.BigEndian.PutUint64(key[:], blockNum) - if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { - return err - } - if v == nil { - continue - } - select { - default: - case <-logEvery.C: - log.Info("Scanned receipts up to", "block", blockNum) - } - var receipts types.Receipts - var oldReceipts OldReceipts - if err = cbor.Unmarshal(&oldReceipts, bytes.NewReader(v)); err != nil { - continue - } - - var blockHash common.Hash - if blockHash, err = rawdb.ReadCanonicalHash(tx, blockNum); err != nil { - return err - } - var body *types.Body - if chainConfig.IsBerlin(blockNum) { - body = rawdb.ReadBodyWithTransactions(tx, blockHash, blockNum) - } - receipts = make(types.Receipts, len(oldReceipts)) - for i, oldReceipt := range oldReceipts { - receipts[i] = new(types.Receipt) - receipts[i].PostState = oldReceipt.PostState - receipts[i].Status = oldReceipt.Status - receipts[i].CumulativeGasUsed = oldReceipt.CumulativeGasUsed - if body != nil { - receipts[i].Type = body.Transactions[i].Type() - } - } - buf.Reset() - if err = cbor.Marshal(&buf, receipts); err != nil { - return err - } - if err = tx.Put(kv.Receipts, common.CopyBytes(key[:]), common.CopyBytes(buf.Bytes())); err != nil { - return err - } - } - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -} - -const ( - // ----- value types used ---- - codecSelferValueTypeArray2 = 10 - codecSelferValueTypeMap2 = 9 - codecSelferValueTypeNil2 = 1 -) - -var ( - errCodecSelferOnlyMapOrArrayEncodeToStruct2 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer2 struct{} - -func init() { - if codec1978.GenVersion != 19 { - _, file, _, _ := runtime.Caller(0) - ver := strconv.FormatInt(int64(codec1978.GenVersion), 10) - panic(errors.New("codecgen version mismatch: current: 19, need " + ver + ". Re-generate file: " + file)) - } - if false { // reference the types, but skip this branch at build/run time - var _ pkg1_common.Address - var _ pkg2_big.Int - } -} - -func (x *OldReceipt) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yy2arr2 := z.EncBasicHandle().StructToArray - _ = yy2arr2 - z.EncWriteArrayStart(3) - z.EncWriteArrayElem() - if x.PostState == nil { - r.EncodeNil() - } else { - r.EncodeStringBytesRaw([]byte(x.PostState)) - } // end block: if x.PostState slice == nil - z.EncWriteArrayElem() - r.EncodeUint(uint64(x.Status)) - z.EncWriteArrayElem() - r.EncodeUint(uint64(x.CumulativeGasUsed)) - z.EncWriteArrayEnd() - } -} - -func (x *OldReceipt) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeNil2 { - *(x) = OldReceipt{} - } else if yyct2 == codecSelferValueTypeMap2 { - yyl2 := z.DecReadMapStart() - if yyl2 == 0 { - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - z.DecReadMapEnd() - } else if yyct2 == codecSelferValueTypeArray2 { - yyl2 := z.DecReadArrayStart() - if yyl2 != 0 { - x.codecDecodeSelfFromArray(yyl2, d) - } - z.DecReadArrayEnd() - } else { - panic(errCodecSelferOnlyMapOrArrayEncodeToStruct2) - } -} - -func (x *OldReceipt) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if z.DecCheckBreak() { - break - } - } - z.DecReadMapElemKey() - yys3 := z.StringView(r.DecodeStringAsBytes()) - z.DecReadMapElemValue() - switch yys3 { - case "1": - x.PostState = r.DecodeBytes(([]byte)(x.PostState), false) - case "2": - x.Status = (uint64)(r.DecodeUint64()) - case "3": - x.CumulativeGasUsed = (uint64)(r.DecodeUint64()) - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 -} - -func (x *OldReceipt) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = z.DecCheckBreak() - } - if yyb8 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.PostState = r.DecodeBytes(([]byte)(x.PostState), false) - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = z.DecCheckBreak() - } - if yyb8 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.Status = (uint64)(r.DecodeUint64()) - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = z.DecCheckBreak() - } - if yyb8 { - z.DecReadArrayEnd() - return - } - z.DecReadArrayElem() - x.CumulativeGasUsed = (uint64)(r.DecodeUint64()) - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = z.DecCheckBreak() - } - if yyb8 { - break - } - z.DecReadArrayElem() - z.DecStructFieldNotFound(yyj8-1, "") - } -} - -func (x *OldReceipt) IsCodecEmpty() bool { - return !(len(x.PostState) != 0 && x.Status != 0 && x.CumulativeGasUsed != 0 && true) -} - -func (x OldReceipts) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - h.encReceipts((OldReceipts)(x), e) - } // end block: if x slice == nil -} - -func (x *OldReceipts) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - h.decReceipts((*OldReceipts)(x), d) -} - -func (x codecSelfer2) encReceipts(v OldReceipts, e *codec1978.Encoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if v == nil { - r.EncodeNil() - return - } - z.EncWriteArrayStart(len(v)) - for _, yyv1 := range v { - z.EncWriteArrayElem() - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncWriteArrayEnd() -} - -func (x codecSelfer2) decReceipts(v *OldReceipts, d *codec1978.Decoder) { - var h codecSelfer2 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyh1.IsNil { - if yyv1 != nil { - yyv1 = nil - yyc1 = true - } - } else if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*OldReceipt{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else { - yyhl1 := yyl1 > 0 - var yyrl1 int - _ = yyrl1 - if yyhl1 { - if yyl1 > cap(yyv1) { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*OldReceipt, yyrl1) - } - yyc1 = true - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - } - var yyj1 int - for yyj1 = 0; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || z.DecCheckBreak()); yyj1++ { // bounds-check-elimination - if yyj1 == 0 && yyv1 == nil { - if yyhl1 { - yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - } else { - yyrl1 = 8 - } - yyv1 = make([]*OldReceipt, yyrl1) - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - var yydb1 bool - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) - yyc1 = true - } - if yydb1 { - z.DecSwallow() - } else { - if r.TryNil() { - yyv1[yyj1] = nil - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(OldReceipt) - } - yyv1[yyj1].CodecDecodeSelf(d) - } - } - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = make([]*OldReceipt, 0) - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/migrations/receipt_repair.go b/migrations/receipt_repair.go deleted file mode 100644 index 240edddc7c37b43a2751276488fc22547e238781..0000000000000000000000000000000000000000 --- a/migrations/receipt_repair.go +++ /dev/null @@ -1,183 +0,0 @@ -package migrations - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "time" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/changeset" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/consensus/misc" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/ethdb/cbor" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/log/v3" -) - -func availableReceiptFrom(tx kv.Tx) (uint64, error) { - c, err := tx.Cursor(kv.Receipts) - if err != nil { - return 0, err - } - defer c.Close() - k, _, err := c.First() - if err != nil { - return 0, err - } - if len(k) == 0 { - return 0, nil - } - return binary.BigEndian.Uint64(k), nil -} - -var ReceiptRepair = Migration{ - Name: "receipt_repair", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - blockNum, err := changeset.AvailableFrom(tx) - if err != nil { - return err - } - receiptsFrom, err := availableReceiptFrom(tx) - if err != nil { - return err - } - if receiptsFrom > blockNum { - blockNum = receiptsFrom - } - - genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0) - if err != nil { - return err - } - chainConfig, cerr := rawdb.ReadChainConfig(tx, genesisBlock.Hash()) - if cerr != nil { - return cerr - } - vmConfig := vm.Config{} - noOpWriter := state.NewNoopWriter() - var buf bytes.Buffer - fixedCount := 0 - logInterval := 30 * time.Second - logEvery := time.NewTicker(logInterval) - var key [8]byte - var v []byte - for ; true; blockNum++ { - select { - default: - case <-logEvery.C: - log.Info("Progress", "block", blockNum, "fixed", fixedCount) - } - var hash common.Hash - if hash, err = rawdb.ReadCanonicalHash(tx, blockNum); err != nil { - return err - } - if hash == (common.Hash{}) { - break - } - binary.BigEndian.PutUint64(key[:], blockNum) - if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil { - return err - } - var receipts types.Receipts - if err = cbor.Unmarshal(&receipts, bytes.NewReader(v)); err == nil { - broken := false - for _, receipt := range receipts { - if receipt.CumulativeGasUsed < 10000 { - broken = true - break - } - } - if !broken { - continue - } - } - var block *types.Block - if block, _, err = rawdb.ReadBlockWithSenders(tx, hash, blockNum); err != nil { - return err - } - - dbstate := state.NewPlainState(tx, block.NumberU64()-1) - intraBlockState := state.New(dbstate) - - getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) } - contractHasTEVM := ethdb.GetHasTEVM(tx) - receipts1, err1 := runBlock(intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, contractHasTEVM, block, vmConfig) - if err1 != nil { - return err1 - } - fix := true - if chainConfig.IsByzantium(block.NumberU64()) { - receiptSha := types.DeriveSha(receipts1) - if receiptSha != block.ReceiptHash() { - fmt.Printf("(retrace) mismatched receipt headers for block %d: %x, %x\n", block.NumberU64(), receiptSha, block.ReceiptHash()) - fix = false - } - } - if fix { - // All good, we can fix receipt record - buf.Reset() - err := cbor.Marshal(&buf, receipts1) - if err != nil { - return fmt.Errorf("encode block receipts for block %d: %w", blockNum, err) - } - if err = tx.Put(kv.Receipts, key[:], buf.Bytes()); err != nil { - return fmt.Errorf("writing receipts for block %d: %w", blockNum, err) - } - fixedCount++ - } - } - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -} - -func runBlock(ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWriter state.StateWriter, - chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), block *types.Block, vmConfig vm.Config) (types.Receipts, error) { - header := block.Header() - vmConfig.TraceJumpDest = true - engine := ethash.NewFullFaker() - gp := new(core.GasPool).AddGas(block.GasLimit()) - usedGas := new(uint64) - var receipts types.Receipts - if chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyDAOHardFork(ibs) - } - for i, tx := range block.Transactions() { - ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) - if err != nil { - return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) - } - receipts = append(receipts, receipt) - } - - if !vmConfig.ReadOnly { - // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) - if _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil, nil, nil, nil); err != nil { - return nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err) - } - - if err := ibs.CommitBlock(chainConfig.Rules(header.Number.Uint64()), blockWriter); err != nil { - return nil, fmt.Errorf("committing block %d failed: %w", block.NumberU64(), err) - } - } - - return receipts, nil -} diff --git a/migrations/remove_clique.go b/migrations/remove_clique.go deleted file mode 100644 index 2675c112cf9c8417d2f929490eba82e93cd9d74d..0000000000000000000000000000000000000000 --- a/migrations/remove_clique.go +++ /dev/null @@ -1,36 +0,0 @@ -package migrations - -import ( - "context" - - "github.com/ledgerwatch/erigon-lib/kv" -) - -var removeCliqueBucket = Migration{ - Name: "remove_clique_bucket", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - if exists, err := tx.ExistsBucket(kv.Clique); err != nil { - return err - } else if !exists { - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - } - - if err := tx.DropBucket(kv.Clique); err != nil { - return err - } - - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -} diff --git a/migrations/set_prune_type.go b/migrations/set_prune_type.go deleted file mode 100644 index fac208775c4a4bae07f4c39a9de208147851f956..0000000000000000000000000000000000000000 --- a/migrations/set_prune_type.go +++ /dev/null @@ -1,41 +0,0 @@ -package migrations - -import ( - "context" - - "github.com/ledgerwatch/erigon-lib/kv" -) - -var setPruneType = Migration{ - Name: "set_prune_type", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - var pruneTypeKeys = [4][]byte{kv.PruneHistoryType, kv.PruneReceiptsType, kv.PruneTxIndexType, kv.PruneCallTracesType} - - for _, key := range pruneTypeKeys { - pruneType, getErr := tx.GetOne(kv.DatabaseInfo, key) - if getErr != nil { - return getErr - } - - if pruneType != nil { - continue - } - - putErr := tx.Put(kv.DatabaseInfo, key, kv.PruneTypeOlder) - if putErr != nil { - return putErr - } - } - - if err := BeforeCommit(tx, nil, true); err != nil { - return err - } - return tx.Commit() - }, -}