From 6d24a30a01a318a1cbef5c9dad9748c9c6d493d1 Mon Sep 17 00:00:00 2001 From: Alex Sharov <AskAlexSharov@gmail.com> Date: Tue, 23 Nov 2021 01:38:51 +0700 Subject: [PATCH] Drop txpool v1 (#3017) * drop_txpool_v1 * drop_txpool_v1 Co-authored-by: Alex Sharp <alexsharp@alexs-macbook-pro.home> --- cmd/integration/commands/reset_state.go | 13 - cmd/integration/commands/stages.go | 43 +- cmd/integration/commands/state_stages.go | 17 +- cmd/rpcdaemon/cli/config.go | 5 +- cmd/rpcdaemon/rpcdaemontest/test_util.go | 2 +- cmd/utils/flags.go | 1 - core/blockchain.go | 3 +- core/tx_journal.go | 181 -- core/tx_list.go | 576 ------ core/tx_list_test.go | 71 - core/tx_noncer.go | 79 - core/tx_pool.go | 1677 --------------- core/tx_pool_test.go | 2042 ------------------- eth/backend.go | 91 +- eth/protocols/eth/handler.go | 33 - eth/protocols/eth/handler_test.go | 7 +- eth/protocols/eth/protocol.go | 6 +- eth/stagedsync/default_stages.go | 43 +- eth/stagedsync/stage_mining_create_block.go | 85 +- eth/stagedsync/stage_txpool.go | 298 --- eth/stagedsync/stages/stages.go | 2 - turbo/stages/mock_sentry.go | 49 +- turbo/stages/stageloop.go | 25 +- turbo/stages/txpropagate/deprecated.go | 92 - turbo/txpool/README.md | 6 - turbo/txpool/p2p.go | 413 ---- 26 files changed, 99 insertions(+), 5761 deletions(-) delete mode 100644 core/tx_journal.go delete mode 100644 core/tx_list.go delete mode 100644 core/tx_list_test.go delete mode 100644 core/tx_noncer.go delete mode 100644 core/tx_pool_test.go delete mode 100644 eth/stagedsync/stage_txpool.go delete mode 100644 turbo/stages/txpropagate/deprecated.go delete mode 100644 turbo/txpool/README.md delete mode 100644 turbo/txpool/p2p.go diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 4308e4860d..acdc436866 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -65,9 +65,6 @@ func resetState(db kv.RwDB, logger log.Logger, ctx context.Context) error { if err := db.Update(ctx, resetTxLookup); err != nil { return err } - if err := db.Update(ctx, resetTxPool); err != nil { - return err - } if err := db.Update(ctx, resetFinish); err != nil { return err } @@ -223,16 +220,6 @@ func resetTxLookup(tx kv.RwTx) error { return nil } -func resetTxPool(tx kv.RwTx) error { - if err := stages.SaveStageProgress(tx, stages.TxPool, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.TxPool, 0); err != nil { - return err - } - return nil -} - func resetFinish(tx kv.RwTx) error { if err := stages.SaveStageProgress(tx, stages.Finish, 0); err != nil { return err diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index aebc42c0a2..945e527d26 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -14,7 +14,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/sentry/download" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" @@ -23,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/fetcher" "github.com/ledgerwatch/erigon/eth/integrity" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -31,10 +29,8 @@ import ( "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" stages2 "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/ledgerwatch/erigon/turbo/txpool" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/secp256k1" "github.com/spf13/cobra" @@ -476,7 +472,7 @@ func stageBodies(db kv.RwDB, ctx context.Context) error { func stageSenders(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) - _, _, chainConfig, _, _, sync, _, _ := newSync(ctx, db, nil) + _, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.Senders)) @@ -555,7 +551,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { } func stageExec(db kv.RwDB, ctx context.Context) error { - pm, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil) + pm, engine, chainConfig, vmConfig, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.Execution)) if reset { @@ -614,7 +610,7 @@ func stageExec(db kv.RwDB, ctx context.Context) error { } func stageTrie(db kv.RwDB, ctx context.Context) error { - pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) + pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.IntermediateHashes)) tmpdir := path.Join(datadir, etl.TmpDirName) @@ -670,7 +666,7 @@ func stageTrie(db kv.RwDB, ctx context.Context) error { func stageHashState(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) - pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) + pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.HashState)) tx, err := db.BeginRw(ctx) @@ -724,7 +720,7 @@ func stageHashState(db kv.RwDB, ctx context.Context) error { func stageLogIndex(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) - pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) + pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.LogIndex)) tx, err := db.BeginRw(ctx) if err != nil { @@ -779,7 +775,7 @@ func stageLogIndex(db kv.RwDB, ctx context.Context) error { func stageCallTraces(kv kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) - pm, _, _, _, _, sync, _, _ := newSync(ctx, kv, nil) + pm, _, _, _, sync, _, _ := newSync(ctx, kv, nil) must(sync.SetCurrentStage(stages.CallTraces)) tx, err := kv.BeginRw(ctx) if err != nil { @@ -839,7 +835,7 @@ func stageCallTraces(kv kv.RwDB, ctx context.Context) error { func stageHistory(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) - pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) + pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.AccountHistoryIndex)) tx, err := db.BeginRw(ctx) @@ -910,7 +906,7 @@ func stageHistory(db kv.RwDB, ctx context.Context) error { func stageTxLookup(db kv.RwDB, ctx context.Context) error { tmpdir := path.Join(datadir, etl.TmpDirName) - pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) + pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.TxLookup)) tx, err := db.BeginRw(ctx) @@ -943,7 +939,7 @@ func stageTxLookup(db kv.RwDB, ctx context.Context) error { return err } } else if pruneTo > 0 { - p, err := sync.PruneStageState(stages.TxPool, s.BlockNumber, tx, nil) + p, err := sync.PruneStageState(stages.TxLookup, s.BlockNumber, tx, nil) if err != nil { return err } @@ -1018,7 +1014,7 @@ func byChain() (*core.Genesis, *params.ChainConfig) { return genesis, chainConfig } -func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) (prune.Mode, consensus.Engine, *params.ChainConfig, *vm.Config, *core.TxPool, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { +func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) (prune.Mode, consensus.Engine, *params.ChainConfig, *vm.Config, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { tmpdir := path.Join(datadir, etl.TmpDirName) logger := log.New() @@ -1048,8 +1044,6 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) events := privateapi.NewEvents() - txPool := core.NewTxPool(ethconfig.Defaults.TxPool, chainConfig, db) - chainConfig, genesisBlock, genesisErr := core.CommitGenesisBlock(db, genesis) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { panic(genesisErr) @@ -1065,17 +1059,6 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) panic(err) } - txPoolP2PServer, err := txpool.NewP2PServer(context.Background(), nil, txPool) - if err != nil { - panic(err) - } - fetchTx := func(peerID enode.ID, hashes []common.Hash) error { - txPoolP2PServer.SendTxsRequest(context.TODO(), peerID, hashes) - return nil - } - - txPoolP2PServer.TxFetcher = fetcher.NewTxFetcher(txPool.Has, txPool.AddRemotes, fetchTx) - cfg := ethconfig.Defaults cfg.Prune = pm cfg.BatchSize = batchSize @@ -1084,7 +1067,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) cfg.Miner = *miningConfig } - sync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg, chainConfig.TerminalTotalDifficulty, downloadServer, tmpdir, txPool, txPoolP2PServer, nil) + sync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg, chainConfig.TerminalTotalDifficulty, downloadServer, tmpdir, nil) if err != nil { panic(err) } @@ -1092,7 +1075,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) miningSync := stagedsync.New( stagedsync.MiningStages(ctx, - stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, txPool, nil, nil, tmpdir), + stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, tmpdir), stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir), stagedsync.StageHashStateCfg(db, tmpdir), stagedsync.StageTrieCfg(db, false, true, tmpdir), @@ -1102,7 +1085,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) stagedsync.MiningPruneOrder, ) - return pm, engine, chainConfig, vmConfig, txPool, sync, miningSync, miner + return pm, engine, chainConfig, vmConfig, sync, miningSync, miner } func progress(tx kv.Getter, stage stages.SyncStage) uint64 { diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index ce455f452e..7d11edd36e 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -142,7 +142,7 @@ func init() { } func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.Context) error { - pm, engine, chainConfig, vmConfig, txPool, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig) + pm, engine, chainConfig, vmConfig, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig) tx, err := db.BeginRw(ctx) if err != nil { @@ -178,7 +178,6 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. } stateStages.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, - stages.TxPool, // TODO: enable TxPoolDB stage stages.Finish) execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, tmpDir) @@ -309,13 +308,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. miner.MiningConfig.ExtraData = nextBlock.Header().Extra miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx) error { err = stagedsync.SpawnMiningCreateBlockStage(s, tx, - stagedsync.StageMiningCreateBlockCfg(db, - miner, - *chainConfig, - engine, - txPool, - nil, nil, - tmpDir), + stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, tmpDir), quit) if err != nil { return err @@ -412,7 +405,7 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *params.ChainConfig) { } func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { - _, _, _, _, _, sync, _, _ := newSync(ctx, db, nil) + _, _, _, _, sync, _, _ := newSync(ctx, db, nil) tmpdir := path.Join(datadir, etl.TmpDirName) tx, err := db.BeginRw(ctx) if err != nil { @@ -420,7 +413,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { } defer tx.Rollback() - sync.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.Translation, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxPool, stages.TxLookup, stages.Finish) + sync.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.Translation, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxLookup, stages.Finish) if err = sync.Run(db, tx, false); err != nil { return err } @@ -477,7 +470,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { } func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { - pm, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil) + pm, engine, chainConfig, vmConfig, sync, _, _ := newSync(ctx, db, nil) tx, err := db.BeginRw(ctx) if err != nil { diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 1a525b3d9d..8281f6a7dd 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -56,7 +56,6 @@ type Flags struct { RpcAllowListFilePath string RpcBatchConcurrency uint TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum - TxPoolV2 bool TxPoolApiAddr string TevmEnabled bool StateCache kvcache.CoherentConfig @@ -120,7 +119,6 @@ func RootCommand() (*cobra.Command, *Flags) { } cfg.Snapshot.Dir = path.Join(cfg.Datadir, "snapshots") } - cfg.TxPoolV2 = true return nil } rootCmd.PersistentPostRunE = func(cmd *cobra.Command, args []string) error { @@ -285,12 +283,13 @@ func RemoteServices(ctx context.Context, cfg Flags, logger log.Logger, rootCance blockReader = remoteEth txpoolConn := conn - if cfg.TxPoolV2 { + if cfg.TxPoolApiAddr != cfg.PrivateApiAddr { txpoolConn, err = grpcutil.Connect(creds, cfg.TxPoolApiAddr) if err != nil { return nil, nil, nil, nil, nil, nil, fmt.Errorf("could not connect to txpool api: %w", err) } } + mining = services.NewMiningService(txpoolConn) txPool = services.NewTxPoolService(txpoolConn) if db == nil { diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index d7aa6ab327..d52fc77bc9 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -222,7 +222,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g server := grpc.NewServer() remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader())) - txpool.RegisterTxpoolServer(server, m.TxPoolV2GrpcServer) + txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) listener := bufconn.Listen(1024 * 1024) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 6bc492b956..5a9ab1bb08 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -975,7 +975,6 @@ func setGPOCobra(f *pflag.FlagSet, cfg *gasprice.Config) { } func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { - cfg.V2 = true if ctx.GlobalIsSet(TxPoolDisableFlag.Name) { cfg.Disable = true } diff --git a/core/blockchain.go b/core/blockchain.go index fdc2e0a411..9ad4609344 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -38,8 +38,7 @@ import ( ) var ( - blockExecutionTimer = metrics2.GetOrCreateSummary("chain_execution_seconds") - blockReorgInvalidatedTx = metrics2.GetOrCreateCounter("chain_reorg_invalidTx") + blockExecutionTimer = metrics2.GetOrCreateSummary("chain_execution_seconds") ) const ( diff --git a/core/tx_journal.go b/core/tx_journal.go deleted file mode 100644 index c1b753df09..0000000000 --- a/core/tx_journal.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package core - -import ( - "bufio" - "errors" - "io" - "os" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" -) - -// errNoActiveJournal is returned if a transaction is attempted to be inserted -// into the journal, but no such file is currently open. -var errNoActiveJournal = errors.New("no active journal") - -// devNull is a WriteCloser that just discards anything written into it. Its -// goal is to allow the transaction journal to write into a fake journal when -// loading transactions on startup without printing warnings due to no file -// being read for write. -type devNull struct{} - -func (*devNull) Write(p []byte) (n int, err error) { return len(p), nil } -func (*devNull) Close() error { return nil } - -// txJournal is a rotating log of transactions with the aim of storing locally -// created transactions to allow non-executed ones to survive node restarts. -type txJournal struct { - path string // Filesystem path to store the transactions at - writer io.WriteCloser // Output stream to write new transactions into -} - -// newTxJournal creates a new transaction journal to -func newTxJournal(path string) *txJournal { - return &txJournal{ - path: path, - } -} - -// load parses a transaction journal dump from disk, loading its contents into -// the specified pool. -func (journal *txJournal) load(add func([]types.Transaction) []error) error { - // Skip the parsing if the journal file doesn't exist at all - if _, err := os.Stat(journal.path); os.IsNotExist(err) { - return nil - } - // Open the journal for loading any past transactions - input, err := os.Open(journal.path) - if err != nil { - return err - } - defer input.Close() - - // Temporarily discard any journal additions (don't double add on load) - journal.writer = new(devNull) - defer func() { journal.writer = nil }() - - // Inject all transactions from the journal into the pool - stream := rlp.NewStream(bufio.NewReader(input), 0) - total, dropped := 0, 0 - - // Create a method to load a limited batch of transactions and bump the - // appropriate progress counters. Then use this method to load all the - // journaled transactions in small-ish batches. - loadBatch := func(txs types.Transactions) { - for _, err := range add(txs) { - if err != nil { - log.Trace("Failed to add journaled transaction", "err", err) - dropped++ - } - } - } - var ( - failure error - batch types.Transactions - ) - for { - // Parse the next transaction and terminate on error - tx, decodeErr := types.DecodeTransaction(stream) - if decodeErr != nil { - if !errors.Is(decodeErr, io.EOF) { - failure = decodeErr - } - if batch.Len() > 0 { - loadBatch(batch) - } - break - } - // New transaction parsed, queue up for later, import if threshold is reached - total++ - - if batch = append(batch, tx); batch.Len() > 1024 { - loadBatch(batch) - batch = batch[:0] - } - } - log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) - - return failure -} - -// insert adds the specified transaction to the local disk journal. -func (journal *txJournal) insert(tx types.Transaction) error { - if journal.writer == nil { - return errNoActiveJournal - } - if err := rlp.Encode(journal.writer, tx); err != nil { - return err - } - return nil -} - -// rotate regenerates the transaction journal based on the current contents of -// the transaction pool. -func (journal *txJournal) rotate(all map[common.Address]types.Transactions) error { - // Close the current journal (if any is open) - if journal.writer != nil { - if err := journal.writer.Close(); err != nil { - return err - } - journal.writer = nil - } - // Generate a new journal with the contents of the current pool - replacement, err := os.OpenFile(journal.path+".new", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755) - if err != nil { - return err - } - journaled := 0 - for _, txs := range all { - for _, tx := range txs { - if err = rlp.Encode(replacement, tx); err != nil { - replacement.Close() - return err - } - } - journaled += len(txs) - } - replacement.Close() - - // Replace the live journal with the newly generated one - if err = os.Rename(journal.path+".new", journal.path); err != nil { - return err - } - sink, err := os.OpenFile(journal.path, os.O_WRONLY|os.O_APPEND, 0755) - if err != nil { - return err - } - journal.writer = sink - log.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all)) - - return nil -} - -// close flushes the transaction journal contents to disk and closes the file. -func (journal *txJournal) close() error { - var err error - - if journal.writer != nil { - err = journal.writer.Close() - journal.writer = nil - } - return err -} diff --git a/core/tx_list.go b/core/tx_list.go deleted file mode 100644 index ee3591a2b6..0000000000 --- a/core/tx_list.go +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package core - -import ( - "container/heap" - "math" - "sort" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" -) - -// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for -// retrieving sorted transactions from the possibly gapped future queue. -type nonceHeap []uint64 - -func (h nonceHeap) Len() int { return len(h) } -func (h nonceHeap) Less(i, j int) bool { return h[i] < h[j] } -func (h nonceHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h *nonceHeap) Push(x interface{}) { - *h = append(*h, x.(uint64)) -} - -func (h *nonceHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -// txSortedMap is a nonce->transaction hash map with a heap based index to allow -// iterating over the contents in a nonce-incrementing way. -type txSortedMap struct { - items map[uint64]types.Transaction // Hash map storing the transaction data - index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) - cache types.Transactions // Cache of the transactions already sorted -} - -// newTxSortedMap creates a new nonce-sorted transaction map. -func newTxSortedMap() *txSortedMap { - return &txSortedMap{ - items: make(map[uint64]types.Transaction), - index: new(nonceHeap), - } -} - -// Get retrieves the current transactions associated with the given nonce. -func (m *txSortedMap) Get(nonce uint64) types.Transaction { - return m.items[nonce] -} - -// Put inserts a new transaction into the map, also updating the map's nonce -// index. If a transaction already exists with the same nonce, it's overwritten. -func (m *txSortedMap) Put(tx types.Transaction) { - nonce := tx.GetNonce() - if m.items[nonce] == nil { - heap.Push(m.index, nonce) - } - m.items[nonce], m.cache = tx, nil -} - -// Forward removes all transactions from the map with a nonce lower than the -// provided threshold. Every removed transaction is returned for any post-removal -// maintenance. -func (m *txSortedMap) Forward(threshold uint64) types.Transactions { - var removed types.Transactions - - // Pop off heap items until the threshold is reached - for m.index.Len() > 0 && (*m.index)[0] < threshold { - nonce := heap.Pop(m.index).(uint64) - removed = append(removed, m.items[nonce]) - delete(m.items, nonce) - } - // If we had a cached order, shift the front - if m.cache != nil { - m.cache = m.cache[len(removed):] - } - return removed -} - -// Filter iterates over the list of transactions and removes all of them for which -// the specified function evaluates to true. -// Filter, as opposed to 'filter', re-initialises the heap after the operation is done. -// If you want to do several consecutive filterings, it's therefore better to first -// do a .filter(func1) followed by .Filter(func2) or reheap() -func (m *txSortedMap) Filter(filter func(types.Transaction) bool) types.Transactions { - removed := m.filter(filter) - // If transactions were removed, the heap and cache are ruined - if len(removed) > 0 { - m.reheap() - } - return removed -} - -func (m *txSortedMap) reheap() { - *m.index = make([]uint64, 0, len(m.items)) - for nonce := range m.items { - *m.index = append(*m.index, nonce) - } - heap.Init(m.index) - m.cache = nil -} - -// filter is identical to Filter, but **does not** regenerate the heap. This method -// should only be used if followed immediately by a call to Filter or reheap() -func (m *txSortedMap) filter(filter func(types.Transaction) bool) types.Transactions { - var removed types.Transactions - - // Collect all the transactions to filter out - for nonce, tx := range m.items { - if filter(tx) { - removed = append(removed, tx) - delete(m.items, nonce) - } - } - if len(removed) > 0 { - m.cache = nil - } - return removed -} - -// Cap places a hard limit on the number of items, returning all transactions -// exceeding that limit. -func (m *txSortedMap) Cap(threshold int) types.Transactions { - // Short circuit if the number of items is under the limit - if len(m.items) <= threshold { - return nil - } - // Otherwise gather and drop the highest nonce'd transactions - var drops types.Transactions - - sort.Sort(*m.index) - for size := len(m.items); size > threshold; size-- { - drops = append(drops, m.items[(*m.index)[size-1]]) - delete(m.items, (*m.index)[size-1]) - } - *m.index = (*m.index)[:threshold] - heap.Init(m.index) - - // If we had a cache, shift the back - if m.cache != nil { - m.cache = m.cache[:len(m.cache)-len(drops)] - } - return drops -} - -// Remove deletes a transaction from the maintained map, returning whether the -// transaction was found. -func (m *txSortedMap) Remove(nonce uint64) bool { - // Short circuit if no transaction is present - _, ok := m.items[nonce] - if !ok { - return false - } - // Otherwise delete the transaction and fix the heap index - for i := 0; i < m.index.Len(); i++ { - if (*m.index)[i] == nonce { - heap.Remove(m.index, i) - break - } - } - delete(m.items, nonce) - m.cache = nil - - return true -} - -// Ready retrieves a sequentially increasing list of transactions starting at the -// provided nonce that is ready for processing. The returned transactions will be -// removed from the list. -// -// Note, all transactions with nonces lower than start will also be returned to -// prevent getting into and invalid state. This is not something that should ever -// happen but better to be self correcting than failing! -func (m *txSortedMap) Ready(start uint64) types.Transactions { - // Short circuit if no transactions are available - if m.index.Len() == 0 || (*m.index)[0] > start { - return nil - } - // Otherwise start accumulating incremental transactions - var ready types.Transactions - for next := (*m.index)[0]; m.index.Len() > 0 && (*m.index)[0] == next; next++ { - ready = append(ready, m.items[next]) - delete(m.items, next) - heap.Pop(m.index) - } - m.cache = nil - - return ready -} - -// Len returns the length of the transaction map. -func (m *txSortedMap) Len() int { - return len(m.items) -} - -func (m *txSortedMap) flatten() types.Transactions { - // If the sorting was not cached yet, create and cache it - if m.cache == nil { - m.cache = make(types.Transactions, 0, len(m.items)) - for _, tx := range m.items { - m.cache = append(m.cache, tx) - } - sort.Sort(types.TxByNonce(m.cache)) - } - return m.cache -} - -// Flatten creates a nonce-sorted slice of transactions based on the loosely -// sorted internal representation. The result of the sorting is cached in case -// it's requested again before any modifications are made to the contents. -func (m *txSortedMap) Flatten() types.Transactions { - // Copy the cache to prevent accidental modifications - cache := m.flatten() - txs := make(types.Transactions, len(cache)) - copy(txs, cache) - return txs -} - -// AppendHashes to given buffer and return it -func (m *txSortedMap) AppendHashes(buf []common.Hash) []common.Hash { - for _, tx := range m.items { - buf = append(buf, tx.Hash()) - } - return buf -} - -// LastElement returns the last element of a flattened list, thus, the -// transaction with the highest nonce -func (m *txSortedMap) LastElement() types.Transaction { - cache := m.flatten() - return cache[len(cache)-1] -} - -// txList is a "list" of transactions belonging to an account, sorted by account -// nonce. The same type can be used both for storing contiguous transactions for -// the executable/pending queue; and for storing gapped transactions for the non- -// executable/future queue, with minor behavioral changes. -type txList struct { - strict bool // Whether nonces are strictly continuous or not - txs *txSortedMap // Heap indexed sorted hash map of the transactions - - costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) - gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) -} - -// newTxList create a new transaction list for maintaining nonce-indexable fast, -// gapped, sortable transaction lists. -func newTxList(strict bool) *txList { - return &txList{ - strict: strict, - txs: newTxSortedMap(), - costcap: new(uint256.Int), - } -} - -// Overlaps returns whether the transaction specified has the same nonce as one -// already contained within the list. -func (l *txList) Overlaps(tx types.Transaction) bool { - return l.txs.Get(tx.GetNonce()) != nil -} - -// Add tries to insert a new transaction into the list, returning whether the -// transaction was accepted, and if yes, any previous transaction it replaced. -// -// If the new transaction is accepted into the list, the lists' cost and gas -// thresholds are also potentially updated. -func (l *txList) Add(tx types.Transaction, priceBump uint64) (bool, types.Transaction) { - // If there's an older better transaction, abort - old := l.txs.Get(tx.GetNonce()) - if old != nil { - // threshold = oldGP * (100 + priceBump) / 100 - a := uint256.NewInt(100 + priceBump) - a = a.Mul(a, old.GetPrice()) - b := uint256.NewInt(100) - threshold := a.Div(a, b) - // Have to ensure that the new gas price is higher than the old gas - // price as well as checking the percentage threshold to ensure that - // this is accurate for low (Wei-level) gas price replacements - if !tx.GetPrice().Gt(old.GetPrice()) || tx.GetPrice().Lt(threshold) { - return false, nil - } - } - // Otherwise overwrite the old transaction with the current one - l.txs.Put(tx) - if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 { - l.costcap = cost - } - if gas := tx.GetGas(); l.gascap < gas { - l.gascap = gas - } - return true, old -} - -// Forward removes all transactions from the list with a nonce lower than the -// provided threshold. Every removed transaction is returned for any post-removal -// maintenance. -func (l *txList) Forward(threshold uint64) types.Transactions { - return l.txs.Forward(threshold) -} - -// Filter removes all transactions from the list with a cost or gas limit higher -// than the provided thresholds. Every removed transaction is returned for any -// post-removal maintenance. Strict-mode invalidated transactions are also -// returned. -// -// This method uses the cached costcap and gascap to quickly decide if there's even -// a point in calculating all the costs or if the balance covers all. If the threshold -// is lower than the costgas cap, the caps will be reset to a new high after removing -// the newly invalidated transactions. -func (l *txList) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactions, types.Transactions) { - // If all transactions are below the threshold, short circuit - if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit { - return nil, nil - } - l.costcap = new(uint256.Int).Set(costLimit) // Lower the caps to the thresholds - l.gascap = gasLimit - - // Filter out all the transactions above the account's funds - removed := l.txs.Filter(func(tx types.Transaction) bool { - return tx.GetGas() > gasLimit || tx.Cost().Cmp(costLimit) > 0 - }) - - if len(removed) == 0 { - return nil, nil - } - var invalids types.Transactions - // If the list was strict, filter anything above the lowest nonce - if l.strict { - lowest := uint64(math.MaxUint64) - for _, tx := range removed { - if nonce := tx.GetNonce(); lowest > nonce { - lowest = nonce - } - } - invalids = l.txs.filter(func(tx types.Transaction) bool { return tx.GetNonce() > lowest }) - } - l.txs.reheap() - return removed, invalids -} - -// Cap places a hard limit on the number of items, returning all transactions -// exceeding that limit. -func (l *txList) Cap(threshold int) types.Transactions { - return l.txs.Cap(threshold) -} - -// Remove deletes a transaction from the maintained list, returning whether the -// transaction was found, and also returning any transaction invalidated due to -// the deletion (strict mode only). -func (l *txList) Remove(tx types.Transaction) (bool, types.Transactions) { - // Remove the transaction from the set - nonce := tx.GetNonce() - if removed := l.txs.Remove(nonce); !removed { - return false, nil - } - // In strict mode, filter out non-executable transactions - if l.strict { - return true, l.txs.Filter(func(tx types.Transaction) bool { return tx.GetNonce() > nonce }) - } - return true, nil -} - -// Ready retrieves a sequentially increasing list of transactions starting at the -// provided nonce that is ready for processing. The returned transactions will be -// removed from the list. -// -// Note, all transactions with nonces lower than start will also be returned to -// prevent getting into and invalid state. This is not something that should ever -// happen but better to be self correcting than failing! -func (l *txList) Ready(start uint64) types.Transactions { - return l.txs.Ready(start) -} - -// Len returns the length of the transaction list. -func (l *txList) Len() int { - return l.txs.Len() -} - -// Empty returns whether the list of transactions is empty or not. -func (l *txList) Empty() bool { - return l.Len() == 0 -} - -// Flatten creates a nonce-sorted slice of transactions based on the loosely -// sorted internal representation. The result of the sorting is cached in case -// it's requested again before any modifications are made to the contents. -func (l *txList) Flatten() types.Transactions { - return l.txs.Flatten() -} - -// AppendHashes to given buffer and return it -func (l *txList) AppendHashes(buf []common.Hash) []common.Hash { - return l.txs.AppendHashes(buf) -} - -// LastElement returns the last element of a flattened list, thus, the -// transaction with the highest nonce -func (l *txList) LastElement() types.Transaction { - return l.txs.LastElement() -} - -// priceHeap is a heap.Interface implementation over transactions for retrieving -// price-sorted transactions to discard when the pool fills up. -type priceHeap []types.Transaction - -func (h priceHeap) Len() int { return len(h) } -func (h priceHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h priceHeap) Less(i, j int) bool { - // Sort primarily by price, returning the cheaper one - switch h[i].GetPrice().Cmp(h[j].GetPrice()) { - case -1: - return true - case 1: - return false - } - // If the prices match, stabilize via nonces (high nonce is worse) - return h[i].GetNonce() > h[j].GetNonce() -} - -func (h *priceHeap) Push(x interface{}) { - *h = append(*h, x.(types.Transaction)) -} - -func (h *priceHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - old[n-1] = nil - *h = old[0 : n-1] - return x -} - -// txPricedList is a price-sorted heap to allow operating on transactions pool -// contents in a price-incrementing way. It's built opon the all transactions -// in txpool but only interested in the remote part. It means only remote transactions -// will be considered for tracking, sorting, eviction, etc. -type txPricedList struct { - all *txLookup // Pointer to the map of all transactions - remotes *priceHeap // Heap of prices of all the stored **remote** transactions - stales int // Number of stale price points to (re-heap trigger) -} - -// newTxPricedList creates a new price-sorted transaction heap. -func newTxPricedList(all *txLookup) *txPricedList { - return &txPricedList{ - all: all, - remotes: new(priceHeap), - } -} - -// Put inserts a new transaction into the heap. -func (l *txPricedList) Put(tx types.Transaction, local bool) { - if local { - return - } - heap.Push(l.remotes, tx) -} - -// Removed notifies the prices transaction list that an old transaction dropped -// from the pool. The list will just keep a counter of stale objects and update -// the heap if a large enough ratio of transactions go stale. -func (l *txPricedList) Removed(count int) { - // Bump the stale counter, but exit if still too low (< 25%) - l.stales += count - if l.stales <= len(*l.remotes)/4 { - return - } - // Seems we've reached a critical number of stale transactions, reheap - l.Reheap() -} - -// Cap finds all the transactions below the given price threshold, drops them -// from the priced list and returns them for further removal from the entire pool. -// -// Note: only remote transactions will be considered for eviction. -func (l *txPricedList) Cap(threshold *uint256.Int) types.Transactions { - drop := make(types.Transactions, 0, 128) // Remote underpriced transactions to drop - for len(*l.remotes) > 0 { - // Discard stale transactions if found during cleanup - cheapest := (*l.remotes)[0] - if l.all.GetRemote(cheapest.Hash()) == nil { // Removed or migrated - heap.Pop(l.remotes) - l.stales-- - continue - } - // Stop the discards if we've reached the threshold - if !cheapest.GetPrice().Lt(threshold) { - break - } - heap.Pop(l.remotes) - drop = append(drop, cheapest) - } - return drop -} - -// Underpriced checks whether a transaction is cheaper than (or as cheap as) the -// lowest priced (remote) transaction currently being tracked. -func (l *txPricedList) Underpriced(tx types.Transaction) bool { - // Discard stale price points if found at the heap start - for len(*l.remotes) > 0 { - head := []types.Transaction(*l.remotes)[0] - if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated - l.stales-- - heap.Pop(l.remotes) - continue - } - break - } - // Check if the transaction is underpriced or not - if len(*l.remotes) == 0 { - return false // There is no remote transaction at all. - } - // If the remote transaction is even cheaper than the - // cheapest one tracked locally, reject it. - cheapest := []types.Transaction(*l.remotes)[0] - return !cheapest.GetPrice().Lt(tx.GetPrice()) -} - -// Discard finds a number of most underpriced transactions, removes them from the -// priced list and returns them for further removal from the entire pool. -// -// Note local transaction won't be considered for eviction. -func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool) { - drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop - for len(*l.remotes) > 0 && slots > 0 { - // Discard stale transactions if found during cleanup - tx := heap.Pop(l.remotes).(types.Transaction) - if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - l.stales-- - continue - } - // Non stale transaction found, discard it - drop = append(drop, tx) - slots -= numSlots(tx) - } - // If we still can't make enough room for the new transaction - if slots > 0 && !force { - for _, tx := range drop { - heap.Push(l.remotes, tx) - } - return nil, false - } - return drop, true -} - -// Reheap forcibly rebuilds the heap based on the current remote transaction set. -func (l *txPricedList) Reheap() { - reheap := make(priceHeap, 0, l.all.RemoteCount()) - - l.stales, l.remotes = 0, &reheap - l.all.Range(func(hash common.Hash, tx types.Transaction, local bool) bool { - *l.remotes = append(*l.remotes, tx) - return true - }, false, true) // Only iterate remotes - heap.Init(l.remotes) -} diff --git a/core/tx_list_test.go b/core/tx_list_test.go deleted file mode 100644 index 7c77bfcb87..0000000000 --- a/core/tx_list_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package core - -import ( - "math/big" - "math/rand" - "testing" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" -) - -// Tests that transactions can be added to strict lists and list contents and -// nonce boundaries are correctly maintained. -func TestStrictTxListAdd(t *testing.T) { - // Generate a list of transactions to insert - key, _ := crypto.GenerateKey() - - txs := make(types.Transactions, 1024) - for i := 0; i < len(txs); i++ { - txs[i] = transaction(uint64(i), 0, key) - } - // Insert the transactions in a random order - list := newTxList(true) - for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultTxPoolConfig.PriceBump) - } - // Verify internal state - if len(list.txs.items) != len(txs) { - t.Errorf("transaction count mismatch: have %d, want %d", len(list.txs.items), len(txs)) - } - for i, tx := range txs { - if list.txs.items[tx.GetNonce()] != tx { - t.Errorf("item %d: transaction mismatch: have %v, want %v", i, list.txs.items[tx.GetNonce()], tx) - } - } -} - -func BenchmarkTxListAdd(t *testing.B) { - // Generate a list of transactions to insert - key, _ := crypto.GenerateKey() - - txs := make(types.Transactions, 100000) - for i := 0; i < len(txs); i++ { - txs[i] = transaction(uint64(i), 0, key) - } - // Insert the transactions in a random order - list := newTxList(true) - priceLimit, _ := uint256.FromBig(big.NewInt(int64(DefaultTxPoolConfig.PriceLimit))) - t.ResetTimer() - for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultTxPoolConfig.PriceBump) - list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump) - } -} diff --git a/core/tx_noncer.go b/core/tx_noncer.go deleted file mode 100644 index cdc25ae8bf..0000000000 --- a/core/tx_noncer.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package core - -import ( - "sync" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/state" -) - -// txNoncer is a tiny virtual state database to manage the executable nonces of -// accounts in the pool, falling back to reading from a real state database if -// an account is unknown. -type txNoncer struct { - fallback *state.IntraBlockState - nonces map[common.Address]uint64 - lock sync.Mutex -} - -// newTxNoncer creates a new virtual state database to track the pool nonces. -func newTxNoncer(fallback *state.IntraBlockState) *txNoncer { - return &txNoncer{ - fallback: fallback, - nonces: make(map[common.Address]uint64), - } -} - -// get returns the current nonce of an account, falling back to a real state -// database if the account is unknown. -func (txn *txNoncer) get(addr common.Address) uint64 { - // We use mutex for get operation is the underlying - // state will mutate db even for read access. - txn.lock.Lock() - defer txn.lock.Unlock() - - if _, ok := txn.nonces[addr]; !ok { - txn.nonces[addr] = txn.fallback.GetNonce(addr) - } - return txn.nonces[addr] -} - -// set inserts a new virtual nonce into the virtual state database to be returned -// whenever the pool requests it instead of reaching into the real state database. -func (txn *txNoncer) set(addr common.Address, nonce uint64) { - txn.lock.Lock() - defer txn.lock.Unlock() - - txn.nonces[addr] = nonce -} - -// setIfLower updates a new virtual nonce into the virtual state database if the -// the new one is lower. -func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) { - txn.lock.Lock() - defer txn.lock.Unlock() - - if _, ok := txn.nonces[addr]; !ok { - txn.nonces[addr] = txn.fallback.GetNonce(addr) - } - if txn.nonces[addr] <= nonce { - return - } - txn.nonces[addr] = nonce -} diff --git a/core/tx_pool.go b/core/tx_pool.go index 809c9fe147..d435ad7fb4 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -18,43 +18,9 @@ package core import ( "errors" - "fmt" - "sort" - "sync" "time" - "github.com/VictoriaMetrics/metrics" - "github.com/holiman/uint256" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/common/prque" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/ethdb/olddb" - "github.com/ledgerwatch/erigon/event" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/log/v3" - "go.uber.org/atomic" -) - -const ( - // chainHeadChanSize is the size of channel listening to ChainHeadEvent. - chainHeadChanSize = 10 - - // txSlotSize is used to calculate how many data slots a single transaction - // takes up based on its size. The slots are used as DoS protection, ensuring - // that validating a new transaction remains a constant operation (in reality - // O(maxslots), where max slots are 4 currently). - txSlotSize = 32 * 1024 - - // txMaxSize is the maximum size a single transaction can have. This field has - // non-trivial consequences: larger transactions are significantly harder and - // more expensive to propagate; larger transactions also take more resources - // to validate whether they fit into the pool or not. - txMaxSize = 4 * txSlotSize // 128KB ) var ( @@ -91,52 +57,9 @@ var ( ErrOversizedData = errors.New("oversized data") ) -var ( - evictionInterval = time.Minute // Time interval to check for evictable transactions - statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats -) - -var ( - // Metrics for the pending pool - pendingDiscardMeter = metrics.GetOrCreateCounter("txpool_pending_discard") - pendingReplaceMeter = metrics.GetOrCreateCounter("txpool_pending_replace") - pendingRateLimitMeter = metrics.GetOrCreateCounter("txpool_pending_ratelimit") // Dropped due to rate limiting - pendingNofundsMeter = metrics.GetOrCreateCounter("txpool_pending_nofunds") // Dropped due to out-of-funds - - // Metrics for the queued pool - queuedDiscardMeter = metrics.GetOrCreateCounter("txpool_queued_discard") - queuedReplaceMeter = metrics.GetOrCreateCounter("txpool_queued_replace") - queuedRateLimitMeter = metrics.GetOrCreateCounter("txpool_queued_ratelimit") // Dropped due to rate limiting - queuedNofundsMeter = metrics.GetOrCreateCounter("txpool_queued_nofunds") // Dropped due to out-of-funds - queuedEvictionMeter = metrics.GetOrCreateCounter("txpool_queued_eviction") // Dropped due to lifetime - - // General tx metrics - knownTxMeter = metrics.GetOrCreateCounter("txpool_known") - validTxMeter = metrics.GetOrCreateCounter("txpool_valid") - invalidTxMeter = metrics.GetOrCreateCounter("txpool_invalid") - underpricedTxMeter = metrics.GetOrCreateCounter("txpool_underpriced") - overflowedTxMeter = metrics.GetOrCreateCounter("txpool_overflowed") - - pendingGauge = metrics.GetOrCreateCounter("txpool_pending") - queuedGauge = metrics.GetOrCreateCounter("txpool_queued") - localGauge = metrics.GetOrCreateCounter("txpool_local") - slotsGauge = metrics.GetOrCreateCounter("txpool_slots") -) - -// TxStatus is the current status of a transaction as seen by the pool. -type TxStatus uint - -const ( - TxStatusUnknown TxStatus = iota - TxStatusQueued - TxStatusPending - TxStatusIncluded -) - // TxPoolConfig are the configuration parameters of the transaction pool. type TxPoolConfig struct { Disable bool - V2 bool Locals []common.Address // Addresses that should be treated by default as local NoLocals bool // Whether local transaction handling should be disabled Journal string // Journal of local transactions to survive node restarts @@ -173,1603 +96,3 @@ var DefaultTxPoolConfig = TxPoolConfig{ Lifetime: 3 * time.Hour, } - -// sanitize checks the provided user configurations and changes anything that's -// unreasonable or unworkable. -func (config *TxPoolConfig) sanitize() TxPoolConfig { - conf := *config - if conf.Rejournal < time.Second { - log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) - conf.Rejournal = time.Second - } - if conf.PriceLimit < 1 { - log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) - conf.PriceLimit = DefaultTxPoolConfig.PriceLimit - } - if conf.PriceBump < 1 { - log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) - conf.PriceBump = DefaultTxPoolConfig.PriceBump - } - if conf.AccountSlots < 1 { - log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) - conf.AccountSlots = DefaultTxPoolConfig.AccountSlots - } - if conf.GlobalSlots < 1 { - log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) - conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots - } - if conf.AccountQueue < 1 { - log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) - conf.AccountQueue = DefaultTxPoolConfig.AccountQueue - } - if conf.GlobalQueue < 1 { - log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) - conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue - } - if conf.Lifetime < 1 { - log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) - conf.Lifetime = DefaultTxPoolConfig.Lifetime - } - return conf -} - -// Backend contains all currently known transactions. Transactions -// enter the pool when they are received from the network or submitted -// locally. They exit the pool when they are included in the blockchain. -// -// The pool separates processable transactions (which can be applied to the -// current state) and future transactions. Transactions move between those -// two states over time as they are received and processed. -type TxPool struct { - config TxPoolConfig - chainconfig *params.ChainConfig - chaindb ethdb.Database - gasPrice *uint256.Int - txFeed event.Feed - scope event.SubscriptionScope - chainHeadCh chan ChainHeadEvent - signer *types.Signer - mu sync.RWMutex - - istanbul bool // Fork indicator whether we are in the istanbul stage. - eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. - - pendingNonces *txNoncer // Pending state tracking virtual nonces - currentState *state.IntraBlockState // Current state in the blockchain head - currentMaxGas uint64 // Current gas limit for transaction caps - - locals *accountSet // Set of local transaction to exempt from eviction rules - journal *txJournal // Journal of local transaction to back up to disk - - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price - - reqResetCh chan *txpoolResetRequest - reqPromoteCh chan *accountSet - queueTxEventCh chan types.Transaction - reorgDoneCh chan chan struct{} - reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop - wg sync.WaitGroup // tracks loop, scheduleReorgLoop - isStarted atomic.Bool - initFns []func() error - stopFns []func() error - stopCh chan struct{} -} - -type txpoolResetRequest struct { - oldHead, newHead *types.Header -} - -// NewTxPool creates a new transaction pool to gather, sort and filter inbound -// transactions from the network. -func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chaindb kv.RwDB) *TxPool { - // Sanitize the input to ensure no vulnerable gas prices are set - config = (&config).sanitize() - - // Create the transaction pool with its initial settings - pool := &TxPool{ - config: config, - chainconfig: chainconfig, - signer: types.LatestSigner(chainconfig), - pending: make(map[common.Address]*txList), - queue: make(map[common.Address]*txList), - beats: make(map[common.Address]time.Time), - all: newTxLookup(), - chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), - reqResetCh: make(chan *txpoolResetRequest), - reqPromoteCh: make(chan *accountSet), - queueTxEventCh: make(chan types.Transaction), - reorgDoneCh: make(chan chan struct{}), - reorgShutdownCh: make(chan struct{}, 1), - gasPrice: new(uint256.Int).SetUint64(config.PriceLimit), - stopCh: make(chan struct{}), - chaindb: olddb.NewObjectDatabase(chaindb), - } - pool.locals = newAccountSet(pool.signer) - for _, addr := range pool.config.Locals { - pool.locals.add(addr) - } - pool.priced = newTxPricedList(pool.all) - - return pool -} - -func (pool *TxPool) Start(gasLimit uint64, headNumber uint64) error { - pool.reorgShutdownCh = make(chan struct{}, 1) - - pool.locals = newAccountSet(pool.signer) - for _, addr := range pool.config.Locals { - pool.locals.add(addr) - } - - pool.priced = newTxPricedList(pool.all) - pool.resetHead(gasLimit, headNumber) - - // Start the reorg loop early so it can handle requests generated during journal loading. - pool.wg.Add(1) - go pool.scheduleReorgLoop() - - // If local transactions and journaling is enabled, load from disk - if !pool.config.NoLocals && pool.config.Journal != "" { - pool.journal = newTxJournal(pool.config.Journal) - - if err := pool.journal.load(pool.AddLocals); err != nil { - log.Warn("Failed to load transaction journal", "err", err) - } - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate transaction journal", "err", err) - } - } - - pool.wg.Add(1) - go pool.loop() - - pool.isStarted.Store(true) - - log.Info("transaction pool started") - return nil -} - -// loop is the transaction pool's main event loop, waiting for and reacting to -// outside blockchain events as well as for various reporting and transaction -// eviction events. -func (pool *TxPool) loop() { - defer debug.LogPanic() - defer pool.wg.Done() - - var ( - prevPending, prevQueued, prevStales int - // Start the stats reporting and transaction eviction tickers - report = time.NewTicker(statsReportInterval) - evict = time.NewTicker(evictionInterval) - journal = time.NewTicker(pool.config.Rejournal) - ) - defer report.Stop() - defer evict.Stop() - defer journal.Stop() - - for { - select { - - // System shutdown. - case <-pool.stopCh: - libcommon.SafeClose(pool.reorgShutdownCh) - return - - // Handle stats reporting ticks - case <-report.C: - pool.mu.RLock() - pending, queued := pool.stats() - stales := pool.priced.stales - pool.mu.RUnlock() - - if pending != prevPending || queued != prevQueued || stales != prevStales { - log.Trace("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) - prevPending, prevQueued, prevStales = pending, queued, stales - } - - // Handle inactive account transaction eviction - case <-evict.C: - pool.mu.Lock() - for addr := range pool.queue { - // Skip local transactions from the eviction mechanism - if pool.locals.contains(addr) { - continue - } - // Any non-locals old enough should be removed - if time.Since(pool.beats[addr]) > pool.config.Lifetime { - list := pool.queue[addr].Flatten() - for _, tx := range list { - pool.removeTxLocked(tx.Hash(), true) - } - queuedEvictionMeter.Set(uint64(len(list))) - } - } - pool.mu.Unlock() - - // Handle local transaction journal rotation - case <-journal.C: - if pool.journal != nil { - pool.mu.Lock() - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate local tx journal", "err", err) - } - pool.mu.Unlock() - } - } - } -} - -func (pool *TxPool) resetHead(blockGasLimit uint64, blockNumber uint64) { - pool.mu.Lock() - defer pool.mu.Unlock() - pool.currentState = state.New(state.NewPlainStateReader(pool.chaindb)) - pool.pendingNonces = newTxNoncer(pool.currentState) - pool.currentMaxGas = blockGasLimit - - // Update all fork indicator by next pending block number. - next := blockNumber + 1 - pool.istanbul = pool.chainconfig.IsIstanbul(next) - pool.eip2718 = pool.chainconfig.IsBerlin(next) -} - -func (pool *TxPool) ResetHead(blockGasLimit uint64, blockNumber uint64) { - pool.resetHead(blockGasLimit, blockNumber) - <-pool.requestReset(nil, nil) -} - -// Stop terminates the transaction pool. -func (pool *TxPool) Stop() { - // Unsubscribe all subscriptions registered from txpool - if !pool.IsStarted() { - return - } - close(pool.stopCh) - - // Unsubscribe subscriptions registered from blockchain - pool.wg.Wait() - - if pool.journal != nil { - pool.journal.close() - } - - pool.isStarted.Store(false) - - log.Info("Transaction pool stopped") -} - -// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { - return pool.scope.Track(pool.txFeed.Subscribe(ch)) -} - -// GasPrice returns the current gas price enforced by the transaction pool. -func (pool *TxPool) GasPrice() *uint256.Int { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return new(uint256.Int).Set(pool.gasPrice) -} - -// SetGasPrice updates the minimum price required by the transaction pool for a -// new transaction, and drops all transactions below this threshold. -func (pool *TxPool) SetGasPrice(price *uint256.Int) { - pool.mu.Lock() - defer pool.mu.Unlock() - - pool.gasPrice = price - for _, tx := range pool.priced.Cap(price) { - pool.RemoveTx(tx.Hash(), false) - } - log.Info("Transaction pool price threshold updated", "price", price) -} - -// Nonce returns the next nonce of an account, with all transactions executable -// by the pool already applied on top. -func (pool *TxPool) Nonce(addr common.Address) uint64 { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return pool.pendingNonces.get(addr) -} - -// Stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (pool *TxPool) Stats() (int, int) { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return pool.stats() -} - -// stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (pool *TxPool) stats() (int, int) { - pending := 0 - for _, list := range pool.pending { - pending += list.Len() - } - queued := 0 - for _, list := range pool.queue { - queued += list.Len() - } - return pending, queued -} - -// Content retrieves the data content of the transaction pool, returning all the -// pending as well as queued transactions, grouped by account and sorted by nonce. -func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { - pool.mu.Lock() - defer pool.mu.Unlock() - - pending := make(map[common.Address]types.Transactions) - for addr, list := range pool.pending { - pending[addr] = list.Flatten() - } - queued := make(map[common.Address]types.Transactions) - for addr, list := range pool.queue { - queued[addr] = list.Flatten() - } - return pending, queued -} - -// CountContent returns the number of pending and queued transactions -// in the transaction pool. -func (pool *TxPool) CountContent() (pending uint, queued uint) { - pool.mu.Lock() - defer pool.mu.Unlock() - pending = uint(len(pool.pending)) - queued = uint(len(pool.queue)) - return -} - -// Pending retrieves all currently processable transactions, grouped by origin -// account and sorted by nonce. The returned transaction set is a copy and can be -// freely modified by calling code. -func (pool *TxPool) Pending() (types.TransactionsGroupedBySender, error) { - var pending types.TransactionsGroupedBySender - if !pool.IsStarted() { - return pending, nil - } - pool.mu.Lock() - defer pool.mu.Unlock() - pending = make(types.TransactionsGroupedBySender, len(pool.pending)) - for _, list := range pool.pending { - pending = append(pending, list.Flatten()) - } - return pending, nil -} - -// AppendHashes to given buffer and return it -func (pool *TxPool) AppendHashes(buf []common.Hash) []common.Hash { - if !pool.IsStarted() { - return buf - } - pool.mu.Lock() - defer pool.mu.Unlock() - for _, list := range pool.pending { - buf = list.AppendHashes(buf) - } - return buf -} - -// AppendLocalHashes to given buffer and return it -func (pool *TxPool) AppendLocalHashes(buf []common.Hash) []common.Hash { - if !pool.IsStarted() { - return buf - } - pool.mu.Lock() - defer pool.mu.Unlock() - for txHash := range pool.all.locals { - buf = append(buf, txHash) - } - return buf -} - -// Locals retrieves the accounts currently considered local by the pool. -func (pool *TxPool) Locals() []common.Address { - pool.mu.Lock() - defer pool.mu.Unlock() - - return pool.locals.flatten() -} - -// local retrieves all currently known local transactions, grouped by origin -// account and sorted by nonce. The returned transaction set is a copy and can be -// freely modified by calling code. -func (pool *TxPool) local() map[common.Address]types.Transactions { - txs := make(map[common.Address]types.Transactions) - for addr := range pool.locals.accounts { - if pending := pool.pending[addr]; pending != nil { - txs[addr] = append(txs[addr], pending.Flatten()...) - } - if queued := pool.queue[addr]; queued != nil { - txs[addr] = append(txs[addr], queued.Flatten()...) - } - } - return txs -} - -// validateTx checks whether a transaction is valid according to the consensus -// rules and adheres to some heuristic limits of the local node (price and size). -func (pool *TxPool) validateTx(tx types.Transaction, local bool) error { - // Accept only legacy transactions until EIP-2718/2930 activates. - if !pool.eip2718 && tx.Type() != types.LegacyTxType { - return ErrTxTypeNotSupported - } - // Reject transactions over defined size to prevent DOS attacks - if uint64(tx.Size()) > txMaxSize { - return ErrOversizedData - } - // Transactions can't be negative. This may never happen using RLP decoded - // transactions but may occur if you create a transaction using the RPC. - if tx.GetValue().Sign() < 0 { - return ErrNegativeValue - } - // Ensure the transaction doesn't exceed the current block limit gas. - if pool.currentMaxGas < tx.GetGas() { - return ErrGasLimit - } - // Sanity check for extremely large numbers - if tx.GetFeeCap().BitLen() > 256 { - return ErrFeeCapVeryHigh - } - if tx.GetTip().BitLen() > 256 { - return ErrTipVeryHigh - } - // Make sure the transaction is signed properly. - from, err := tx.Sender(*pool.signer) - if err != nil { - return ErrInvalidSender - } - // Drop non-local transactions under our own minimal accepted gas price - if !local && pool.gasPrice.Gt(tx.GetPrice()) { - return ErrUnderpriced - } - // Ensure the transaction adheres to nonce ordering - if pool.currentState.GetNonce(from) > tx.GetNonce() { - return ErrNonceTooLow - } - // Transactor should have enough funds to cover the costs - // cost == V + GP * GL - if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { - return ErrInsufficientFunds - } - // Ensure the transaction has more gas than the basic tx fee. - intrGas, err := IntrinsicGas(tx.GetData(), tx.GetAccessList(), tx.GetTo() == nil, true, pool.istanbul) - if err != nil { - return err - } - if tx.GetGas() < intrGas { - return ErrIntrinsicGas - } - return nil -} - -// add validates a transaction and inserts it into the non-executable queue for later -// pending promotion and execution. If the transaction is a replacement for an already -// pending or queued one, it overwrites the previous transaction if its price is higher. -// -// If a newly added transaction is marked as local, its sending account will be -// whitelisted, preventing any associated transaction from being dropped out of the pool -// due to pricing constraints. -func (pool *TxPool) add(tx types.Transaction, local bool) (replaced bool, err error) { - // If the transaction is already known, discard it - hash := tx.Hash() - if pool.all.Get(hash) != nil { - log.Trace("Discarding already known transaction", "hash", hash) - knownTxMeter.Set(1) - return false, ErrAlreadyKnown - } - // Make the local flag. If it's from local source or it's from the network but - // the sender is marked as local previously, treat it as the local transaction. - isLocal := local || pool.locals.containsTx(tx) - - // If the transaction fails basic validation, discard it - if pool.currentState != nil { - if err = pool.validateTx(tx, isLocal); err != nil { - log.Trace("Discarding invalid transaction", "hash", hash, "err", err) - invalidTxMeter.Set(1) - return false, err - } - } - // If the transaction pool is full, discard underpriced transactions - if uint64(pool.all.Count()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { - // If the new transaction is underpriced, don't accept it - if !isLocal && pool.priced.Underpriced(tx) { - log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GetPrice()) - underpricedTxMeter.Set(1) - return false, ErrUnderpriced - } - // New transaction is better than our worse ones, make room for it. - // If it's a local transaction, forcibly discard all available transactions. - // Otherwise if we can't make enough room for new one, abort the operation. - drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) - - // Special case, we still can't make the room for the new remote one. - if !isLocal && !success { - log.Trace("Discarding overflown transaction", "hash", hash) - overflowedTxMeter.Set(1) - return false, ErrTxPoolOverflow - } - // Kick out the underpriced remote transactions. - for _, tx := range drop { - log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GetPrice()) - underpricedTxMeter.Set(1) - pool.removeTxLocked(tx.Hash(), false) - } - } - // Try to replace an existing transaction in the pending pool - from, _ := tx.Sender(*pool.signer) // already validated - if list := pool.pending[from]; list != nil && list.Overlaps(tx) { - // Nonce already pending, check if required price bump is met - inserted, old := list.Add(tx, pool.config.PriceBump) - if !inserted { - pendingDiscardMeter.Set(1) - return false, ErrReplaceUnderpriced - } - // New transaction is better, replace old one - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - pendingReplaceMeter.Set(1) - } - pool.all.Add(tx, isLocal) - pool.priced.Put(tx, isLocal) - pool.journalTx(from, tx) - pool.queueTxEvent(tx) - log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.GetTo()) - - // Successful promotion, bump the heartbeat - pool.beats[from] = time.Now() - return old != nil, nil - } - // New transaction isn't replacing a pending one, push into queue - replaced, err = pool.enqueueTx(hash, tx, isLocal, true) - if err != nil { - return false, err - } - // Mark local addresses and journal local transactions - if local && !pool.locals.contains(from) { - log.Info("Setting new local account", "address", from) - pool.locals.add(from) - pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. - } - if isLocal { - localGauge.Inc() - } - pool.journalTx(from, tx) - - log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.GetTo()) - return replaced, nil -} - -// enqueueTx inserts a new transaction into the non-executable transaction queue. -// -// Note, this method assumes the pool lock is held! -func (pool *TxPool) enqueueTx(hash common.Hash, tx types.Transaction, local bool, addAll bool) (bool, error) { - // Try to insert the transaction into the future queue - from, _ := tx.Sender(*pool.signer) // already validated - if pool.queue[from] == nil { - pool.queue[from] = newTxList(false) - } - inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) - if !inserted { - // An older transaction was better, discard this - queuedDiscardMeter.Set(1) - return false, ErrReplaceUnderpriced - } - // Discard any previous transaction and mark this - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - queuedReplaceMeter.Set(1) - } else { - // Nothing was replaced, bump the queued counter - queuedGauge.Inc() - } - // If the transaction isn't in lookup set but it's expected to be there, - // show the error log. - if pool.all.Get(hash) == nil && !addAll { - log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) - } - if addAll { - pool.all.Add(tx, local) - pool.priced.Put(tx, local) - } - // If we never record the heartbeat, do it right now. - if _, exist := pool.beats[from]; !exist { - pool.beats[from] = time.Now() - } - return old != nil, nil -} - -// journalTx adds the specified transaction to the local disk journal if it is -// deemed to have been sent from a local account. -func (pool *TxPool) journalTx(from common.Address, tx types.Transaction) { - // Only journal if it's enabled and the transaction is local - if pool.journal == nil || !pool.locals.contains(from) { - return - } - if err := pool.journal.insert(tx); err != nil { - log.Warn("Failed to journal local transaction", "err", err) - } -} - -// promoteTx adds a transaction to the pending (processable) list of transactions -// and returns whether it was inserted or an older was better. -// -// Note, this method assumes the pool lock is held! -func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx types.Transaction) bool { - // Try to insert the transaction into the pending queue - if pool.pending[addr] == nil { - pool.pending[addr] = newTxList(true) - } - list := pool.pending[addr] - - inserted, old := list.Add(tx, pool.config.PriceBump) - if !inserted { - // An older transaction was better, discard this - pool.all.Remove(hash) - pool.priced.Removed(1) - pendingDiscardMeter.Set(1) - return false - } - // Otherwise discard any previous transaction and mark this - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - pendingReplaceMeter.Set(1) - } else { - // Nothing was replaced, bump the pending counter - pendingGauge.Inc() - } - // Set the potentially new pending nonce and notify any subsystems of the new tx - pool.pendingNonces.set(addr, tx.GetNonce()+1) - - // Successful promotion, bump the heartbeat - pool.beats[addr] = time.Now() - return true -} - -// AddLocals enqueues a batch of transactions into the pool if they are valid, marking the -// senders as a local ones, ensuring they go around the local pricing constraints. -// -// This method is used to add transactions from the RPC API and performs synchronous pool -// reorganization and event propagation. -func (pool *TxPool) AddLocals(txs []types.Transaction) []error { - return pool.addTxs(txs, !pool.config.NoLocals, true) -} - -func (pool *TxPool) IsLocalTx(txHash common.Hash) bool { - return pool.all.GetLocal(txHash) != nil -} - -// AddLocal enqueues a single local transaction into the pool if it is valid. This is -// a convenience wrapper aroundd AddLocals. -func (pool *TxPool) AddLocal(tx types.Transaction) error { - errs := pool.AddLocals([]types.Transaction{tx}) - return errs[0] -} - -// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the -// senders are not among the locally tracked ones, full pricing constraints will apply. -// -// This method is used to add transactions from the p2p network and does not wait for pool -// reorganization and internal event propagation. -func (pool *TxPool) AddRemotes(txs []types.Transaction) []error { - return pool.addTxs(txs, false, false) -} - -// This is like AddRemotes, but waits for pool reorganization. Tests use this method. -func (pool *TxPool) AddRemotesSync(txs []types.Transaction) []error { - return pool.addTxs(txs, false, true) -} - -// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. -func (pool *TxPool) addRemoteSync(tx types.Transaction) error { - errs := pool.AddRemotesSync([]types.Transaction{tx}) - return errs[0] -} - -// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience -// wrapper around AddRemotes. -// -// Deprecated: use AddRemotes -func (pool *TxPool) AddRemote(tx types.Transaction) error { - errs := pool.AddRemotes([]types.Transaction{tx}) - return errs[0] -} - -// addTxs attempts to queue a batch of transactions if they are valid. -func (pool *TxPool) addTxs(txs []types.Transaction, local, sync bool) []error { - // Filter out known ones without obtaining the pool lock or recovering signatures - var ( - errs = make([]error, len(txs)) - news = make([]types.Transaction, 0, len(txs)) - ) - for i, tx := range txs { - // If the transaction is known, pre-set the error slot - if pool.all.Get(tx.Hash()) != nil { - errs[i] = ErrAlreadyKnown - knownTxMeter.Set(1) - continue - } - // Exclude transactions with invalid signatures as soon as - // possible and cache senders in transactions before - // obtaining lock - _, err := tx.Sender(*pool.signer) - if err != nil { - errs[i] = ErrInvalidSender - invalidTxMeter.Set(1) - continue - } - // Accumulate all unknown transactions for deeper processing - news = append(news, tx) - } - if len(news) == 0 { - return errs - } - - // Process all the new transaction and merge any errors into the original slice - pool.mu.Lock() - newErrs, dirtyAddrs := pool.addTxsLocked(news, local) - pool.mu.Unlock() - var nilSlot = 0 - for _, err := range newErrs { - for errs[nilSlot] != nil { - nilSlot++ - } - errs[nilSlot] = err - nilSlot++ - } - // Reorg the pool internals if needed and return - done := pool.requestPromoteExecutables(dirtyAddrs) - if sync { - <-done - } - return errs -} - -// addTxsLocked attempts to queue a batch of transactions if they are valid. -// The transaction pool lock must be held. -func (pool *TxPool) addTxsLocked(txs []types.Transaction, local bool) ([]error, *accountSet) { - dirty := newAccountSet(pool.signer) - errs := make([]error, len(txs)) - for i, tx := range txs { - replaced, err := pool.add(tx, local) - errs[i] = err - if err == nil && !replaced { - dirty.addTx(tx) - } - } - validTxMeter.Set(uint64(len(dirty.accounts))) - return errs, dirty -} - -// Status returns the status (unknown/pending/queued) of a batch of transactions -// identified by their hashes. -func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { - status := make([]TxStatus, len(hashes)) - for i, hash := range hashes { - tx := pool.Get(hash) - if tx == nil { - continue - } - from, _ := tx.Sender(*pool.signer) // already validated - pool.mu.RLock() - if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.GetNonce()] != nil { - status[i] = TxStatusPending - } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.GetNonce()] != nil { - status[i] = TxStatusQueued - } - // implicit else: the tx may have been included into a block between - // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct - pool.mu.RUnlock() - } - return status -} - -// Get returns a transaction if it is contained in the pool and nil otherwise. -func (pool *TxPool) Get(hash common.Hash) types.Transaction { - return pool.all.Get(hash) -} - -// Has returns an indicator whether txpool has a transaction cached with the -// given hash. -func (pool *TxPool) Has(hash common.Hash) bool { - return pool.all.Get(hash) != nil -} - -// removeTx removes a single transaction from the queue, moving all subsequent -// transactions back to the future queue. -func (pool *TxPool) RemoveTx(hash common.Hash, outofbound bool) { - pool.mu.Lock() - defer pool.mu.Unlock() - pool.removeTxLocked(hash, outofbound) -} - -func (pool *TxPool) removeTxLocked(hash common.Hash, outofbound bool) { - // Fetch the transaction we wish to delete - tx := pool.all.Get(hash) - if tx == nil { - return - } - addr, _ := tx.Sender(*pool.signer) // already validated during insertion - - // Remove it from the list of known transactions - pool.all.Remove(hash) - if outofbound { - pool.priced.Removed(1) - } - if pool.locals.contains(addr) { - localGauge.Dec() - } - // Remove the transaction from the pending lists and reset the account nonce - if pending := pool.pending[addr]; pending != nil { - if removed, invalids := pending.Remove(tx); removed { - // If no more pending transactions are left, remove the list - if pending.Empty() { - delete(pool.pending, addr) - } - // Postpone any invalidated transactions - for _, tx := range invalids { - // Internal shuffle shouldn't touch the lookup set. - if _, err := pool.enqueueTx(tx.Hash(), tx, false, false); err != nil { - log.Error("enqueueTx", "error", err) - } - } - // Update the account nonce if needed - pool.pendingNonces.setIfLower(addr, tx.GetNonce()) - // Reduce the pending counter - pendingGauge.Add(-(1 + len(invalids))) - return - } - } - // Transaction is in the future queue - if future := pool.queue[addr]; future != nil { - if removed, _ := future.Remove(tx); removed { - // Reduce the queued counter - queuedGauge.Dec() - } - if future.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - } - } -} - -// requestPromoteExecutables requests a pool reset to the new head block. -// The returned channel is closed when the reset has occurred. -func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { - select { - case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: - return <-pool.reorgDoneCh - case <-pool.reorgShutdownCh: - return pool.reorgShutdownCh - } -} - -// requestPromoteExecutables requests transaction promotion checks for the given addresses. -// The returned channel is closed when the promotion checks have occurred. -func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { - select { - case pool.reqPromoteCh <- set: - return <-pool.reorgDoneCh - case <-pool.reorgShutdownCh: - return pool.reorgShutdownCh - } -} - -// queueTxEvent enqueues a transaction event to be sent in the next reorg run. -func (pool *TxPool) queueTxEvent(tx types.Transaction) { - select { - case pool.queueTxEventCh <- tx: - case <-pool.reorgShutdownCh: - } -} - -// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not -// call those methods directly, but request them being run using requestReset and -// requestPromoteExecutables instead. -func (pool *TxPool) scheduleReorgLoop() { - defer debug.LogPanic() - defer pool.wg.Done() - - var ( - curDone chan struct{} // non-nil while runReorg is active - nextDone = make(chan struct{}) - launchNextRun bool - dirtyAccounts *accountSet - queuedEvents = make(map[common.Address]*txSortedMap) - reset bool - ) - for { - // Launch next background reorg if needed - if curDone == nil && launchNextRun { - // Run the background reorg and announcements - go pool.runReorg(nextDone, dirtyAccounts, queuedEvents, reset) - - // Prepare everything for the next round of reorg - curDone, nextDone = nextDone, make(chan struct{}) - launchNextRun = false - - dirtyAccounts = nil - reset = false - queuedEvents = make(map[common.Address]*txSortedMap) - } - - select { - - case <-pool.reqResetCh: - // Reset request: update head if request is already pending. - reset = true - launchNextRun = true - pool.reorgDoneCh <- nextDone - case req := <-pool.reqPromoteCh: - // Promote request: update address set if request is already pending. - if dirtyAccounts == nil { - dirtyAccounts = req - } else { - dirtyAccounts.merge(req) - } - launchNextRun = true - pool.reorgDoneCh <- nextDone - - case tx := <-pool.queueTxEventCh: - // Queue up the event, but don't schedule a reorg. It's up to the caller to - // request one later if they want the events sent. - addr, _ := tx.Sender(*pool.signer) - if _, ok := queuedEvents[addr]; !ok { - queuedEvents[addr] = newTxSortedMap() - } - queuedEvents[addr].Put(tx) - - case <-curDone: - curDone = nil - - case <-pool.reorgShutdownCh: - // Wait for current run to finish. - if curDone != nil { - <-curDone - } - close(nextDone) - return - } - } -} - -// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *TxPool) runReorg(done chan struct{}, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap, reset bool) { - defer debug.LogPanic() - defer close(done) - - var promoteAddrs []common.Address - if dirtyAccounts != nil && !reset { - // Only dirty accounts need to be promoted, unless we're resetting. - // For resets, all addresses in the tx queue will be promoted and - // the flatten operation can be avoided. - promoteAddrs = dirtyAccounts.flatten() - } - pool.mu.Lock() - if reset { - // Nonces were reset, discard any events that became stale - for addr := range events { - events[addr].Forward(pool.pendingNonces.get(addr)) - if events[addr].Len() == 0 { - delete(events, addr) - } - } - // Reset needs promote for all addresses - promoteAddrs = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - promoteAddrs = append(promoteAddrs, addr) - } - } - // Check for pending transactions for every account that sent new ones - promoted := pool.promoteExecutables(promoteAddrs) - // If a new block appeared, validate the pool of pending transactions. This will - // remove any transaction that has been included in the block or was invalidated - // because of another transaction (e.g. higher gas price). - if reset { - pool.demoteUnexecutables() - } - - // Ensure pool.queue and pool.pending sizes stay within the configured limits. - pool.truncatePending() - pool.truncateQueue() - - // Update all accounts to the latest known pending nonce - for addr, list := range pool.pending { - highestPending := list.LastElement() - pool.pendingNonces.set(addr, highestPending.GetNonce()+1) - } - pool.mu.Unlock() - - // Notify subsystems for newly added transactions - for _, tx := range promoted { - addr, _ := tx.Sender(*pool.signer) - if _, ok := events[addr]; !ok { - events[addr] = newTxSortedMap() - } - events[addr].Put(tx) - } - if len(events) > 0 { - var txs []types.Transaction - for _, set := range events { - txs = append(txs, set.Flatten()...) - } - pool.txFeed.Send(NewTxsEvent{txs}) - } -} - -// promoteExecutables moves transactions that have become processable from the -// future queue to the set of pending transactions. During this process, all -// invalidated transactions (low nonce, low balance) are deleted. -func (pool *TxPool) promoteExecutables(accounts []common.Address) []types.Transaction { - // Track the promoted transactions to broadcast them at once - var promoted []types.Transaction - - // Iterate over all accounts and promote any executable transactions - for _, addr := range accounts { - list := pool.queue[addr] - if list == nil { - continue // Just in case someone calls with a non existing account - } - // Drop all transactions that are deemed too old (low nonce) - forwards := list.Forward(pool.currentState.GetNonce(addr)) - for _, tx := range forwards { - hash := tx.Hash() - pool.all.Remove(hash) - } - log.Trace("Removed old queued transactions", "count", len(forwards)) - // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) - for _, tx := range drops { - hash := tx.Hash() - pool.all.Remove(hash) - } - log.Trace("Removed unpayable queued transactions", "count", len(drops)) - queuedNofundsMeter.Set(uint64(len(drops))) - - // Gather all executable transactions and promote them - readies := list.Ready(pool.pendingNonces.get(addr)) - for _, tx := range readies { - hash := tx.Hash() - if pool.promoteTx(addr, hash, tx) { - promoted = append(promoted, tx) - } - } - log.Trace("Promoted queued transactions", "count", len(promoted)) - queuedGauge.Add(-len(readies)) - - // Drop all transactions over the allowed limit - var caps types.Transactions - if !pool.locals.contains(addr) { - caps = list.Cap(int(pool.config.AccountQueue)) - for _, tx := range caps { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed cap-exceeding queued transaction", "hash", hash) - } - queuedRateLimitMeter.Set(uint64(len(caps))) - } - // Mark all the items dropped as removed - pool.priced.Removed(len(forwards) + len(drops) + len(caps)) - queuedGauge.Add(-int(len(forwards) + len(drops) + len(caps))) - if pool.locals.contains(addr) { - localGauge.Add(-int(len(forwards) + len(drops) + len(caps))) - } - // Delete the entire queue entry if it became empty. - if list.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - } - } - return promoted -} - -// truncatePending removes transactions from the pending queue if the pool is above the -// pending limit. The algorithm tries to reduce transaction counts by an approximately -// equal number for all for accounts with many pending transactions. -func (pool *TxPool) truncatePending() { - pending := uint64(0) - for _, list := range pool.pending { - pending += uint64(list.Len()) - } - if pending <= pool.config.GlobalSlots { - return - } - - pendingBeforeCap := pending - // Assemble a spam order to penalize large transactors first - spammers := prque.New(nil) - for addr, list := range pool.pending { - // Only evict transactions from high rollers - if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, int64(list.Len())) - } - } - // Gradually drop transactions from offenders - offenders := []common.Address{} - for pending > pool.config.GlobalSlots && !spammers.Empty() { - // Retrieve the next offender if not local address - offender, _ := spammers.Pop() - offenders = append(offenders, offender.(common.Address)) - - // Equalize balances until all the same or below threshold - if len(offenders) > 1 { - // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender.(common.Address)].Len() - - // Iteratively reduce all offenders until below limit or threshold reached - for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { - for i := 0; i < len(offenders)-1; i++ { - list := pool.pending[offenders[i]] - - caps := list.Cap(list.Len() - 1) - for _, tx := range caps { - // Drop the transaction from the global pools too - hash := tx.Hash() - pool.all.Remove(hash) - - // Update the account nonce to the dropped transaction - pool.pendingNonces.setIfLower(offenders[i], tx.GetNonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) - } - pool.priced.Removed(len(caps)) - pendingGauge.Add(-len(caps)) - if pool.locals.contains(offenders[i]) { - localGauge.Add(-len(caps)) - } - pending-- - } - } - } - } - - // If still above threshold, reduce to limit or min allowance - if pending > pool.config.GlobalSlots && len(offenders) > 0 { - for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { - for _, addr := range offenders { - list := pool.pending[addr] - - caps := list.Cap(list.Len() - 1) - for _, tx := range caps { - // Drop the transaction from the global pools too - hash := tx.Hash() - pool.all.Remove(hash) - - // Update the account nonce to the dropped transaction - pool.pendingNonces.setIfLower(addr, tx.GetNonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) - } - pool.priced.Removed(len(caps)) - pendingGauge.Add(-len(caps)) - if pool.locals.contains(addr) { - localGauge.Add(len(caps)) - } - pending-- - } - } - } - pendingRateLimitMeter.Set(pendingBeforeCap - pending) -} - -// truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. -func (pool *TxPool) truncateQueue() { - queued := uint64(0) - for _, list := range pool.queue { - queued += uint64(list.Len()) - } - if queued <= pool.config.GlobalQueue { - return - } - - // Sort all accounts with queued transactions by heartbeat - addresses := make(addressesByHeartbeat, 0, len(pool.queue)) - for addr := range pool.queue { - if !pool.locals.contains(addr) { // don't drop locals - addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) - } - } - sort.Sort(addresses) - - // Drop transactions until the total is below the limit or only locals remain - for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { - addr := addresses[len(addresses)-1] - list := pool.queue[addr.address] - - addresses = addresses[:len(addresses)-1] - - // Drop all transactions if they are less than the overflow - if size := uint64(list.Len()); size <= drop { - for _, tx := range list.Flatten() { - pool.removeTxLocked(tx.Hash(), true) - } - drop -= size - queuedRateLimitMeter.Set(uint64(size)) - continue - } - // Otherwise drop only last few transactions - txs := list.Flatten() - for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - pool.removeTxLocked(txs[i].Hash(), true) - drop-- - queuedRateLimitMeter.Set(1) - } - } -} - -// demoteUnexecutables removes invalid and processed transactions from the pools -// executable/pending queue and any subsequent transactions that become unexecutable -// are moved back into the future queue. -func (pool *TxPool) demoteUnexecutables() { - // Iterate over all accounts and demote any non-executable transactions - for addr, list := range pool.pending { - nonce := pool.currentState.GetNonce(addr) - - // Drop all transactions that are deemed too old (low nonce) - olds := list.Forward(nonce) - for _, tx := range olds { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed old pending transaction", "hash", hash) - } - // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) - for _, tx := range drops { - hash := tx.Hash() - log.Trace("Removed unpayable pending transaction", "hash", hash) - pool.all.Remove(hash) - } - pool.priced.Removed(len(olds) + len(drops)) - pendingNofundsMeter.Set(uint64(len(drops))) - - for _, tx := range invalids { - hash := tx.Hash() - log.Trace("Demoting pending transaction", "hash", hash) - - // Internal shuffle shouldn't touch the lookup set. - if _, err := pool.enqueueTx(hash, tx, false, false); err != nil { - log.Error("enqueueTx", "error", err) - } - } - pendingGauge.Add(-(len(olds) + len(drops) + len(invalids))) - if pool.locals.contains(addr) { - localGauge.Add(-(len(olds) + len(drops) + len(invalids))) - } - // If there's a gap in front, alert (should never happen) and postpone all transactions - if list.Len() > 0 && list.txs.Get(nonce) == nil { - gapped := list.Cap(0) - for _, tx := range gapped { - hash := tx.Hash() - log.Error("Demoting invalidated transaction", "hash", hash) - - // Internal shuffle shouldn't touch the lookup set. - if _, err := pool.enqueueTx(hash, tx, false, false); err != nil { - log.Error("enqueueTx", "error", err) - } - } - pendingGauge.Add(-len(gapped)) - // This might happen in a reorg, so log it to the metering - blockReorgInvalidatedTx.Set(uint64(len(gapped))) - } - // Delete the entire pending entry if it became empty. - if list.Empty() { - delete(pool.pending, addr) - } - } -} - -func (pool *TxPool) IsStarted() bool { - if pool == nil { - return false - } - - return pool.isStarted.Load() -} - -func (pool *TxPool) AddInit(fns ...func() error) { - if pool == nil { - return - } - - pool.initFns = append(pool.initFns, fns...) -} - -func (pool *TxPool) RunInit() error { - if pool == nil { - return errors.New("can't init a nil transaction pool") - } - - if pool.IsStarted() { - return errors.New("transaction pool is already started") - } - - var err error - for _, fn := range pool.initFns { - if err = fn(); err != nil { - return fmt.Errorf("can't init a transaction pool: %w", err) - } - } - return nil -} - -func (pool *TxPool) AddStop(fns ...func() error) { - if pool == nil { - return - } - - pool.stopFns = append(pool.stopFns, fns...) -} - -func (pool *TxPool) RunStop() error { - if pool == nil { - return errors.New("can't stop a nil transaction pool") - } - - if !pool.IsStarted() { - return errors.New("transaction pool is already stopped") - } - - var err error - for _, fn := range pool.stopFns { - if err = fn(); err != nil { - return fmt.Errorf("can't stop a transaction pool: %w", err) - } - } - return nil -} - -// addressByHeartbeat is an account address tagged with its last activity timestamp. -type addressByHeartbeat struct { - address common.Address - heartbeat time.Time -} - -type addressesByHeartbeat []addressByHeartbeat - -func (a addressesByHeartbeat) Len() int { return len(a) } -func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } -func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// accountSet is simply a set of addresses to check for existence, and a signer -// capable of deriving addresses from transactions. -type accountSet struct { - accounts map[common.Address]struct{} - signer *types.Signer - cache *[]common.Address -} - -// newAccountSet creates a new address set with an associated signer for sender -// derivations. -func newAccountSet(signer *types.Signer, addrs ...common.Address) *accountSet { - as := &accountSet{ - accounts: make(map[common.Address]struct{}), - signer: signer, - } - for _, addr := range addrs { - as.add(addr) - } - return as -} - -// contains checks if a given address is contained within the set. -func (as *accountSet) contains(addr common.Address) bool { - _, exist := as.accounts[addr] - return exist -} - -// containsTx checks if the sender of a given tx is within the set. If the sender -// cannot be derived, this method returns false. -func (as *accountSet) containsTx(tx types.Transaction) bool { - if addr, err := tx.Sender(*as.signer); err == nil { - return as.contains(addr) - } - return false -} - -// add inserts a new address into the set to track. -func (as *accountSet) add(addr common.Address) { - as.accounts[addr] = struct{}{} - as.cache = nil -} - -// addTx adds the sender of tx into the set. -func (as *accountSet) addTx(tx types.Transaction) { - if addr, err := tx.Sender(*as.signer); err == nil { - as.add(addr) - } -} - -// flatten returns the list of addresses within this set, also caching it for later -// reuse. The returned slice should not be changed! -func (as *accountSet) flatten() []common.Address { - if as.cache == nil { - accounts := make([]common.Address, 0, len(as.accounts)) - for account := range as.accounts { - accounts = append(accounts, account) - } - as.cache = &accounts - } - return *as.cache -} - -// merge adds all addresses from the 'other' set into 'as'. -func (as *accountSet) merge(other *accountSet) { - for addr := range other.accounts { - as.accounts[addr] = struct{}{} - } - as.cache = nil -} - -// txLookup is used internally by Backend to track transactions while allowing lookup without -// mutex contention. -// -// Note, although this type is properly protected against concurrent access, it -// is **not** a type that should ever be mutated or even exposed outside of the -// transaction pool, since its internal state is tightly coupled with the pools -// internal mechanisms. The sole purpose of the type is to permit out-of-bound -// peeking into the pool in Backend.Get without having to acquire the widely scoped -// Backend.mu mutex. -// -// This lookup set combines the notion of "local transactions", which is useful -// to build upper-level structure. -type txLookup struct { - slots int - lock sync.RWMutex - locals map[common.Hash]types.Transaction - remotes map[common.Hash]types.Transaction -} - -// newTxLookup returns a new txLookup structure. -func newTxLookup() *txLookup { - return &txLookup{ - locals: make(map[common.Hash]types.Transaction), - remotes: make(map[common.Hash]types.Transaction), - } -} - -// Range calls f on each key and value present in the map. The callback passed -// should return the indicator whether the iteration needs to be continued. -// Callers need to specify which set (or both) to be iterated. -func (t *txLookup) Range(f func(hash common.Hash, tx types.Transaction, local bool) bool, local bool, remote bool) { - t.lock.RLock() - defer t.lock.RUnlock() - - if local { - for key, value := range t.locals { - if !f(key, value, true) { - return - } - } - } - if remote { - for key, value := range t.remotes { - if !f(key, value, false) { - return - } - } - } -} - -// Get returns a transaction if it exists in the lookup, or nil if not found. -func (t *txLookup) Get(hash common.Hash) types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - if tx := t.locals[hash]; tx != nil { - return tx - } - return t.remotes[hash] -} - -// GetLocal returns a transaction if it exists in the lookup, or nil if not found. -func (t *txLookup) GetLocal(hash common.Hash) types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.locals[hash] -} - -// GetRemote returns a transaction if it exists in the lookup, or nil if not found. -func (t *txLookup) GetRemote(hash common.Hash) types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.remotes[hash] -} - -// Count returns the current number of transactions in the lookup. -func (t *txLookup) Count() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.locals) + len(t.remotes) -} - -// LocalCount returns the current number of local transactions in the lookup. -func (t *txLookup) LocalCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.locals) -} - -// RemoteCount returns the current number of remote transactions in the lookup. -func (t *txLookup) RemoteCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.remotes) -} - -// Slots returns the current number of slots used in the lookup. -func (t *txLookup) Slots() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.slots -} - -// Add adds a transaction to the lookup. -func (t *txLookup) Add(tx types.Transaction, local bool) { - t.lock.Lock() - defer t.lock.Unlock() - - t.slots += numSlots(tx) - slotsGauge.Set(uint64(t.slots)) - - if local { - t.locals[tx.Hash()] = tx - } else { - t.remotes[tx.Hash()] = tx - } -} - -// Remove removes a transaction from the lookup. -func (t *txLookup) Remove(hash common.Hash) { - t.lock.Lock() - defer t.lock.Unlock() - - tx, ok := t.locals[hash] - if !ok { - tx, ok = t.remotes[hash] - } - if !ok { - log.Error("No transaction found to be deleted", "hash", hash) - return - } - t.slots -= numSlots(tx) - slotsGauge.Set(uint64(t.slots)) - - delete(t.locals, hash) - delete(t.remotes, hash) -} - -// RemoteToLocals migrates the transactions belongs to the given locals to locals -// set. The assumption is held the locals set is thread-safe to be used. -func (t *txLookup) RemoteToLocals(locals *accountSet) int { - t.lock.Lock() - defer t.lock.Unlock() - - var migrated int - for hash, tx := range t.remotes { - if locals.containsTx(tx) { - t.locals[hash] = tx - delete(t.remotes, hash) - migrated++ - } - } - return migrated -} - -// numSlots calculates the number of slots needed for a single transaction. -func numSlots(tx types.Transaction) int { - return int((tx.Size() + txSlotSize - 1) / txSlotSize) -} diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go deleted file mode 100644 index 84664b211b..0000000000 --- a/core/tx_pool_test.go +++ /dev/null @@ -1,2042 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package core - -import ( - "context" - "crypto/ecdsa" - "fmt" - "io/ioutil" - "math/big" - "math/rand" - "os" - "testing" - "time" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/u256" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/params" - "github.com/stretchr/testify/require" -) - -// TestTxPoolConfig is a transaction pool configuration without stateful disk -// sideeffects used during testing. -var TestTxPoolConfig TxPoolConfig - -// eip1559Config is a chain config with EIP-1559 enabled at block 0. -var eip1559Config *params.ChainConfig - -func init() { - TestTxPoolConfig = DefaultTxPoolConfig - TestTxPoolConfig.Journal = "" - TestTxPoolConfig.StartOnInit = true - - cpy := *params.TestChainConfig - eip1559Config = &cpy - eip1559Config.BerlinBlock = common.Big0 - eip1559Config.LondonBlock = common.Big0 -} - -func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) types.Transaction { - return pricedTransaction(nonce, gaslimit, u256.Num1, key) -} - -func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *uint256.Int, key *ecdsa.PrivateKey) types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, uint256.NewInt(100), gaslimit, gasprice, nil), *types.LatestSignerForChainID(nil), key) - return tx -} - -func pricedDataTransaction(nonce uint64, gaslimit uint64, gasprice *uint256.Int, key *ecdsa.PrivateKey, bytes uint64) types.Transaction { - data := make([]byte, bytes) - // it is only a test, so insecure random is fine here - rand.Read(data) //nolint:gosec - - tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, uint256.NewInt(0), gaslimit, gasprice, data), *types.LatestSignerForChainID(nil), key) - return tx -} - -func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *uint256.Int, tip *uint256.Int, key *ecdsa.PrivateKey) types.Transaction { - chainID, _ := uint256.FromBig(params.TestChainConfig.ChainID) - tx, err := types.SignNewTx(key, *types.LatestSigner(params.TestChainConfig), &types.DynamicFeeTransaction{ - CommonTx: types.CommonTx{ - Nonce: nonce, - Gas: gaslimit, - To: &common.Address{}, - Value: uint256.NewInt(100), - Data: nil, - }, - ChainID: chainID, - Tip: tip, - FeeCap: gasFee, - AccessList: nil, - }) - if err != nil { - panic(err) - } - return tx -} - -func setupTxPool(t testing.TB) (*TxPool, *ecdsa.PrivateKey) { - return setupTxPoolWithConfig(t, params.TestChainConfig) -} - -func setupTxPoolWithConfig(t testing.TB, config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { - db := memdb.NewTestDB(t) - - key, _ := crypto.GenerateKey() - pool := NewTxPool(TestTxPoolConfig, config, db) - //nolint:errcheck - pool.Start(1000000000, 0) - - t.Cleanup(func() { - pool.Stop() - }) - return pool, key -} - -// validateTxPoolInternals checks various consistency invariants within the pool. -func validateTxPoolInternals(pool *TxPool) error { - pool.mu.RLock() - defer pool.mu.RUnlock() - - // Ensure the total transaction set is consistent with pending + queued - pending, queued := pool.stats() - if total := pool.all.Count(); total != pending+queued { - return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) - } - pool.priced.Reheap() - priced, remote := pool.priced.remotes.Len(), pool.all.RemoteCount() - if priced != remote { - return fmt.Errorf("total priced transaction count %d != %d", priced, remote) - } - // Ensure the next nonce to assign is the correct one - for addr, txs := range pool.pending { - // Find the last transaction - var last uint64 - for nonce := range txs.txs.items { - if last < nonce { - last = nonce - } - } - if nonce := pool.pendingNonces.get(addr); nonce != last+1 { - return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) - } - } - return nil -} - -// validateEvents checks that the correct number of transaction addition events -// were fired on the pool's event feed. -func validateEvents(events chan NewTxsEvent, count int) error { - var received []types.Transaction - - for len(received) < count { - select { - case ev := <-events: - received = append(received, ev.Txs...) - case <-time.After(time.Second): - return fmt.Errorf("event #%d not fired", len(received)) - } - } - if len(received) > count { - return fmt.Errorf("more than %d events fired: %v", count, received[count:]) - } - select { - case ev := <-events: - return fmt.Errorf("more than %d events fired: %v", count, ev.Txs) - - case <-time.After(50 * time.Millisecond): - // This branch should be "default", but it's a data race between goroutines, - // reading the event channel and pushing into it, so better wait a bit ensuring - // really nothing gets injected. - } - return nil -} - -func deriveSender(tx types.Transaction) (common.Address, error) { - return tx.Sender(*types.LatestSignerForChainID(nil)) -} - -// This test simulates a scenario where a new block is imported during a -// state reset and tests whether the pending state is in sync with the -// block head event that initiated the resetState(). -func TestStateChangeDuringTransactionPoolReset(t *testing.T) { - db := memdb.NewTestDB(t) - var ( - key, _ = crypto.GenerateKey() - address = crypto.PubkeyToAddress(key.PublicKey) - ) - err := db.Update(context.Background(), func(tx kv.RwTx) error { - stateWriter := state.NewPlainStateWriter(tx, nil, 1) - ibs := state.New(state.NewPlainStateReader(tx)) - - // setup pool with 2 transaction in it - // Using AddBalance instead of SetBalance to make it dirty - ibs.AddBalance(address, new(uint256.Int).SetUint64(params.Ether)) - return ibs.CommitBlock(params.Rules{}, stateWriter) - }) - require.NoError(t, err) - tx0 := transaction(0, 100000, key) - tx1 := transaction(1, 100000, key) - - pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("start tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - nonce := pool.Nonce(address) - if nonce != 0 { - t.Fatalf("Invalid nonce, want 0, got %d", nonce) - } - - pool.AddRemotesSync([]types.Transaction{tx0, tx1}) - - nonce = pool.Nonce(address) - if nonce != 2 { - t.Fatalf("Invalid nonce, want 2, got %d", nonce) - } - - // trigger state change in the background - //<-pool.requestReset(nil, nil) - - if _, err := pool.Pending(); err != nil { - t.Fatalf("Could not fetch pending transactions: %v", err) - } - nonce = pool.Nonce(address) - if nonce != 2 { - t.Fatalf("Invalid nonce, want 2, got %d", nonce) - } -} - -func TestInvalidTransactions(t *testing.T) { - pool, key := setupTxPool(t) - - tx := transaction(0, 100, key) - from, _ := deriveSender(tx) - - pool.currentState.AddBalance(from, u256.Num1) - if err := pool.AddRemote(tx); err != ErrInsufficientFunds { - t.Error("expected", ErrInsufficientFunds) - } - - balance := new(big.Int).Add(tx.GetValue().ToBig(), new(big.Int).Mul(new(big.Int).SetUint64(tx.GetGas()), tx.GetPrice().ToBig())) - x, _ := uint256.FromBig(balance) - pool.currentState.AddBalance(from, x) - if err := pool.AddRemote(tx); err != ErrIntrinsicGas { - t.Error("expected", ErrIntrinsicGas, "got", err) - } - - pool.currentState.SetNonce(from, 1) - pool.currentState.AddBalance(from, uint256.NewInt(0xffffffffffffff)) - tx = transaction(0, 100000, key) - if err := pool.AddRemote(tx); err != ErrNonceTooLow { - t.Error("expected", ErrNonceTooLow) - } - - tx = transaction(1, 100000, key) - pool.gasPrice = newInt(1000) - if err := pool.AddRemote(tx); err != ErrUnderpriced { - t.Error("expected", ErrUnderpriced, "got", err) - } - if err := pool.AddLocal(tx); err != nil { - t.Error("expected", nil, "got", err) - } -} - -func newInt(value int64) *uint256.Int { - v, _ := uint256.FromBig(big.NewInt(value)) - return v -} - -func TestTransactionQueue(t *testing.T) { - pool, key := setupTxPool(t) - - tx := transaction(0, 100, key) - from, _ := deriveSender(tx) - pool.currentState.AddBalance(from, uint256.NewInt(1000)) - - if _, err := pool.enqueueTx(tx.Hash(), tx, false, true); err != nil { - t.Fatal(err) - } - <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) - if len(pool.pending) != 1 { - t.Error("expected valid txs to be 1 is", len(pool.pending)) - } - - tx = transaction(1, 100, key) - from, _ = deriveSender(tx) - pool.currentState.SetNonce(from, 2) - if _, err := pool.enqueueTx(tx.Hash(), tx, false, true); err != nil { - t.Fatal(err) - } - - <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) - if _, ok := pool.pending[from].txs.items[tx.GetNonce()]; ok { - t.Error("expected transaction to be in tx pool") - } - if len(pool.queue) > 0 { - t.Error("expected transaction queue to be empty. is", len(pool.queue)) - } -} - -func TestTransactionQueue2(t *testing.T) { - pool, key := setupTxPool(t) - - tx1 := transaction(0, 100, key) - tx2 := transaction(10, 100, key) - tx3 := transaction(11, 100, key) - from, _ := deriveSender(tx1) - pool.currentState.AddBalance(from, uint256.NewInt(1000)) - - if _, err := pool.enqueueTx(tx1.Hash(), tx1, false, true); err != nil { - t.Fatal(err) - } - if _, err := pool.enqueueTx(tx2.Hash(), tx2, false, true); err != nil { - t.Fatal(err) - } - if _, err := pool.enqueueTx(tx3.Hash(), tx3, false, true); err != nil { - t.Fatal(err) - } - - pool.promoteExecutables([]common.Address{from}) - if len(pool.pending) != 1 { - t.Error("expected pending length to be 1, got", len(pool.pending)) - } - if pool.queue[from].Len() != 2 { - t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) - } -} - -func TestTransactionVeryHighValues(t *testing.T) { - t.Skip("uint 256 can't encode 1 << 300") - pool, key := setupTxPoolWithConfig(t, eip1559Config) - defer pool.Stop() - - veryBigNumber := uint256.NewInt(1) - veryBigNumber.Lsh(veryBigNumber, 260) - - tx := dynamicFeeTx(0, 100, uint256.NewInt(1), veryBigNumber, key) - if err := pool.AddRemote(tx); err != ErrTipVeryHigh { - t.Error("expected", ErrTipVeryHigh, "got", err) - } - - tx2 := dynamicFeeTx(0, 100, veryBigNumber, uint256.NewInt(1), key) - if err := pool.AddRemote(tx2); err != ErrFeeCapVeryHigh { - t.Error("expected", ErrFeeCapVeryHigh, "got", err) - } -} - -func TestTransactionChainFork(t *testing.T) { - pool, key := setupTxPool(t) - db := pool.chaindb.RwKV() - - addr := crypto.PubkeyToAddress(key.PublicKey) - resetState := func() { - err := db.Update(context.Background(), func(tx kv.RwTx) error { - stateWriter := state.NewPlainStateWriter(tx, nil, 1) - ibs := state.New(state.NewPlainStateReader(tx)) - ibs.AddBalance(addr, uint256.NewInt(100000000000000)) - return ibs.CommitBlock(params.Rules{}, stateWriter) - }) - require.NoError(t, err) - pool.ResetHead(1000000000, 1) - } - resetState() - - tx := transaction(0, 100000, key) - if _, err := pool.add(tx, false); err != nil { - t.Error("didn't expect error", err) - } - pool.RemoveTx(tx.Hash(), true) - - // reset the pool's internal state - resetState() - if _, err := pool.add(tx, false); err != nil { - t.Error("didn't expect error", err) - } -} - -func TestTransactionDoubleNonce(t *testing.T) { - pool, key := setupTxPool(t) - - addr := crypto.PubkeyToAddress(key.PublicKey) - resetState := func() { - stateWriter := state.NewPlainStateWriter(pool.chaindb, nil, 1) - ibs := state.New(state.NewPlainStateReader(pool.chaindb)) - ibs.AddBalance(addr, uint256.NewInt(100000000000000)) - if err := ibs.CommitBlock(params.Rules{}, stateWriter); err != nil { - t.Fatal(err) - } - pool.ResetHead(1000000000, 1) - } - resetState() - - signer := types.LatestSignerForChainID(nil) - tx1, _ := types.SignTx(types.NewTransaction(0, common.Address{}, uint256.NewInt(100), 100000, uint256.NewInt(1), nil), *signer, key) - tx2, _ := types.SignTx(types.NewTransaction(0, common.Address{}, uint256.NewInt(100), 1000000, uint256.NewInt(2), nil), *signer, key) - tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, uint256.NewInt(100), 1000000, uint256.NewInt(1), nil), *signer, key) - - // Add the first two transaction, ensure higher priced stays only - if replace, err := pool.add(tx1, false); err != nil || replace { - t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace) - } - if replace, err := pool.add(tx2, false); err != nil || !replace { - t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) - } - <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) - if pool.pending[addr].Len() != 1 { - t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) - } - if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { - t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) - } - - // Add the third transaction and ensure it's not saved (smaller price) - pool.add(tx3, false) - <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) - if pool.pending[addr].Len() != 1 { - t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) - } - if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { - t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) - } - // Ensure the total transaction count is correct - if pool.all.Count() != 1 { - t.Error("expected 1 total transactions, got", pool.all.Count()) - } -} - -func TestTransactionMissingNonce(t *testing.T) { - pool, key := setupTxPool(t) - - addr := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(addr, uint256.NewInt(100000000000000)) - tx := transaction(1, 100000, key) - if _, err := pool.add(tx, false); err != nil { - t.Error("didn't expect error", err) - } - if len(pool.pending) != 0 { - t.Error("expected 0 pending transactions, got", len(pool.pending)) - } - if pool.queue[addr].Len() != 1 { - t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) - } - if pool.all.Count() != 1 { - t.Error("expected 1 total transactions, got", pool.all.Count()) - } -} - -func TestTransactionNonceRecovery(t *testing.T) { - t.Skip("fix when refactoring tx pool") - const n = 10 - pool, key := setupTxPool(t) - - addr := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.SetNonce(addr, n) - pool.currentState.AddBalance(addr, uint256.NewInt(100000000000000)) - <-pool.requestReset(nil, nil) - - tx := transaction(n, 100000, key) - if err := pool.AddRemote(tx); err != nil { - t.Error(err) - } - // simulate some weird re-order of transactions and missing nonce(s) - pool.currentState.SetNonce(addr, n-1) - pool.currentState.AddBalance(addr, u256.Num1) - <-pool.requestReset(nil, nil) - if fn := pool.Nonce(addr); fn != n-1 { - t.Errorf("expected nonce to be %d, got %d", n-1, fn) - } -} - -// Tests that if an account runs out of funds, any pending and queued transactions -// are dropped. -func TestTransactionDropping(t *testing.T) { - // Create a test account and fund it - pool, key := setupTxPool(t) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000)) - - // Add some pending and some queued transactions - var ( - tx0 = transaction(0, 100, key) - tx1 = transaction(1, 200, key) - tx2 = transaction(2, 300, key) - tx10 = transaction(10, 100, key) - tx11 = transaction(11, 200, key) - tx12 = transaction(12, 300, key) - ) - pool.all.Add(tx0, false) - pool.priced.Put(tx0, false) - pool.promoteTx(account, tx0.Hash(), tx0) - - pool.all.Add(tx1, false) - pool.priced.Put(tx1, false) - pool.promoteTx(account, tx1.Hash(), tx1) - - pool.all.Add(tx2, false) - pool.priced.Put(tx2, false) - pool.promoteTx(account, tx2.Hash(), tx2) - - if _, err := pool.enqueueTx(tx10.Hash(), tx10, false, true); err != nil { - t.Fatal(err) - } - if _, err := pool.enqueueTx(tx11.Hash(), tx11, false, true); err != nil { - t.Fatal(err) - } - if _, err := pool.enqueueTx(tx12.Hash(), tx12, false, true); err != nil { - t.Fatal(err) - } - - // Check that pre and post validations leave the pool as is - if pool.pending[account].Len() != 3 { - t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) - } - if pool.queue[account].Len() != 3 { - t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) - } - if pool.all.Count() != 6 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) - } - <-pool.requestReset(nil, nil) - if pool.pending[account].Len() != 3 { - t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) - } - if pool.queue[account].Len() != 3 { - t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) - } - if pool.all.Count() != 6 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) - } - // Reduce the balance of the account, and check that invalidated transactions are dropped - pool.currentState.AddBalance(account, uint256.NewInt(0).Neg(uint256.NewInt(650))) - <-pool.requestReset(nil, nil) - - if _, ok := pool.pending[account].txs.items[tx0.GetNonce()]; !ok { - t.Errorf("funded pending transaction missing: %v", tx0) - } - if _, ok := pool.pending[account].txs.items[tx1.GetNonce()]; !ok { - t.Errorf("funded pending transaction missing: %v", tx1) - } - if _, ok := pool.pending[account].txs.items[tx2.GetNonce()]; ok { - t.Errorf("out-of-fund pending transaction present: %v", tx2) - } - if _, ok := pool.queue[account].txs.items[tx10.GetNonce()]; !ok { - t.Errorf("funded queued transaction missing: %v", tx10) - } - if _, ok := pool.queue[account].txs.items[tx11.GetNonce()]; !ok { - t.Errorf("funded queued transaction missing: %v", tx11) - } - if _, ok := pool.queue[account].txs.items[tx12.GetNonce()]; ok { - t.Errorf("out-of-fund queued transaction present: %v", tx12) - } - if pool.all.Count() != 4 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) - } - // Reduce the block gas limit, check that invalidated transactions are dropped - pool.currentMaxGas = 100 - <-pool.requestReset(nil, nil) - - if _, ok := pool.pending[account].txs.items[tx0.GetNonce()]; !ok { - t.Errorf("funded pending transaction missing: %v", tx0) - } - if _, ok := pool.pending[account].txs.items[tx1.GetNonce()]; ok { - t.Errorf("over-gased pending transaction present: %v", tx1) - } - if _, ok := pool.queue[account].txs.items[tx10.GetNonce()]; !ok { - t.Errorf("funded queued transaction missing: %v", tx10) - } - if _, ok := pool.queue[account].txs.items[tx11.GetNonce()]; ok { - t.Errorf("over-gased queued transaction present: %v", tx11) - } - if pool.all.Count() != 2 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2) - } -} - -// Tests that if a transaction is dropped from the current pending pool (e.g. out -// of fund), all consecutive (still valid, but not executable) transactions are -// postponed back into the future queue to prevent broadcasting them. -func TestTransactionPostponing(t *testing.T) { - // Create the pool to test the postponing with - db := memdb.NewTestDB(t) - - pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create two test accounts to produce different gap profiles with - keys := make([]*ecdsa.PrivateKey, 2) - accs := make([]common.Address, len(keys)) - - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - accs[i] = crypto.PubkeyToAddress(keys[i].PublicKey) - - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(50100)) - } - // Add a batch consecutive pending transactions for validation - txs := []types.Transaction{} - for i, key := range keys { - - for j := 0; j < 100; j++ { - var tx types.Transaction - if (i+j)%2 == 0 { - tx = transaction(uint64(j), 25000, key) - } else { - tx = transaction(uint64(j), 50000, key) - } - txs = append(txs, tx) - } - } - for i, err := range pool.AddRemotesSync(txs) { - if err != nil { - t.Fatalf("tx %d: failed to add transactions: %v", i, err) - } - } - // Check that pre and post validations leave the pool as is - if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { - t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) - } - if len(pool.queue) != 0 { - t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) - } - if pool.all.Count() != len(txs) { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) - } - <-pool.requestReset(nil, nil) - if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { - t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) - } - if len(pool.queue) != 0 { - t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) - } - if pool.all.Count() != len(txs) { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) - } - // Reduce the balance of the account, and check that transactions are reorganised - for _, addr := range accs { - pool.currentState.AddBalance(addr, uint256.NewInt(0).Neg(u256.Num1)) - } - <-pool.requestReset(nil, nil) - - // The first account's first transaction remains valid, check that subsequent - // ones are either filtered out, or queued up for later. - if _, ok := pool.pending[accs[0]].txs.items[txs[0].GetNonce()]; !ok { - t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) - } - if _, ok := pool.queue[accs[0]].txs.items[txs[0].GetNonce()]; ok { - t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) - } - for i, tx := range txs[1:100] { - if i%2 == 1 { - if _, ok := pool.pending[accs[0]].txs.items[tx.GetNonce()]; ok { - t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx) - } - if _, ok := pool.queue[accs[0]].txs.items[tx.GetNonce()]; !ok { - t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx) - } - } else { - if _, ok := pool.pending[accs[0]].txs.items[tx.GetNonce()]; ok { - t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx) - } - if _, ok := pool.queue[accs[0]].txs.items[tx.GetNonce()]; ok { - t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx) - } - } - } - // The second account's first transaction got invalid, check that all transactions - // are either filtered out, or queued up for later. - if pool.pending[accs[1]] != nil { - t.Errorf("invalidated account still has pending transactions") - } - for i, tx := range txs[100:] { - if i%2 == 1 { - if _, ok := pool.queue[accs[1]].txs.items[tx.GetNonce()]; !ok { - t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx) - } - } else { - if _, ok := pool.queue[accs[1]].txs.items[tx.GetNonce()]; ok { - t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx) - } - } - } - if pool.all.Count() != len(txs)/2 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2) - } -} - -// Tests that if the transaction pool has both executable and non-executable -// transactions from an origin account, filling the nonce gap moves all queued -// ones into the pending pool. -func TestTransactionGapFilling(t *testing.T) { - // Create a test account and fund it - pool, key := setupTxPool(t) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000000)) - - // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, TestTxPoolConfig.AccountQueue+5) - sub := pool.txFeed.Subscribe(events) - defer sub.Unsubscribe() - - // Create a pending and a queued transaction with a nonce-gap in between - pool.AddRemotesSync([]types.Transaction{ - transaction(0, 100000, key), - transaction(2, 100000, key), - }) - pending, queued := pool.Stats() - if pending != 1 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) - } - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - if err := validateEvents(events, 1); err != nil { - t.Fatalf("original event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Fill the nonce gap and ensure all transactions become pending - if err := pool.addRemoteSync(transaction(1, 100000, key)); err != nil { - t.Fatalf("failed to add gapped transaction: %v", err) - } - pending, queued = pool.Stats() - if pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if err := validateEvents(events, 2); err != nil { - t.Fatalf("gap-filling event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that if the transaction count belonging to a single account goes above -// some threshold, the higher transactions are dropped to prevent DOS attacks. -func TestTransactionQueueAccountLimiting(t *testing.T) { - // Create a test account and fund it - pool, key := setupTxPool(t) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000000)) - - // Keep queuing up transactions and make sure all above a limit are dropped - for i := uint64(1); i <= TestTxPoolConfig.AccountQueue+5; i++ { - if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { - t.Fatalf("tx %d: failed to add transaction: %v", i, err) - } - if len(pool.pending) != 0 { - t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) - } - if i <= TestTxPoolConfig.AccountQueue { - if pool.queue[account].Len() != int(i) { - t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) - } - } else { - if pool.queue[account].Len() != int(TestTxPoolConfig.AccountQueue) { - t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), TestTxPoolConfig.AccountQueue) - } - } - } - if pool.all.Count() != int(TestTxPoolConfig.AccountQueue) { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), TestTxPoolConfig.AccountQueue) - } -} - -// Tests that if the transaction count belonging to multiple accounts go above -// some threshold, the higher transactions are dropped to prevent DOS attacks. -// -// This logic should not hold for local transactions, unless the local tracking -// mechanism is disabled. -func TestTransactionQueueGlobalLimiting(t *testing.T) { - testTransactionQueueGlobalLimiting(t, false) -} -func TestTransactionQueueGlobalLimitingNoLocals(t *testing.T) { - testTransactionQueueGlobalLimiting(t, true) -} - -func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { - // Create the pool to test the limit enforcement with - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.NoLocals = nolocals - config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create a number of test accounts and fund them (last one will be the local) - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000000)) - } - local := keys[len(keys)-1] - - // Generate and queue a batch of transactions - nonces := make(map[common.Address]uint64) - - txs := make(types.Transactions, 0, 3*config.GlobalQueue) - for len(txs) < cap(txs) { - key := keys[rand.Intn(len(keys)-1)] // skip adding transactions with the local account - addr := crypto.PubkeyToAddress(key.PublicKey) - - txs = append(txs, transaction(nonces[addr]+1, 100000, key)) - nonces[addr]++ - } - // Import the batch and verify that limits have been enforced - pool.AddRemotesSync(txs) - - queued := 0 - for addr, list := range pool.queue { - if list.Len() > int(config.AccountQueue) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) - } - queued += list.Len() - } - if queued > int(config.GlobalQueue) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) - } - // Generate a batch of transactions from the local account and import them - txs = txs[:0] - for i := uint64(0); i < 3*config.GlobalQueue; i++ { - txs = append(txs, transaction(i+1, 100000, local)) - } - pool.AddLocals(txs) - - // If locals are disabled, the previous eviction algorithm should apply here too - if nolocals { - queued := 0 - for addr, list := range pool.queue { - if list.Len() > int(config.AccountQueue) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) - } - queued += list.Len() - } - if queued > int(config.GlobalQueue) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) - } - } else { - // Local exemptions are enabled, make sure the local account owned the queue - if len(pool.queue) != 1 { - t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1) - } - // Also ensure no local transactions are ever dropped, even if above global limits - if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue { - t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue) - } - } -} - -// Tests that if an account remains idle for a prolonged amount of time, any -// non-executable transactions queued up are dropped to prevent wasting resources -// on shuffling them around. -// -// This logic should not hold for local transactions, unless the local tracking -// mechanism is disabled. -func TestTransactionQueueTimeLimiting(t *testing.T) { testTransactionQueueTimeLimiting(t, false) } -func TestTransactionQueueTimeLimitingNoLocals(t *testing.T) { - testTransactionQueueTimeLimiting(t, true) -} - -func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { - // Reduce the eviction interval to a testable amount - defer func(old time.Duration) { evictionInterval = old }(evictionInterval) - evictionInterval = time.Millisecond * 100 - - // Create the pool to test the non-expiration enforcement - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.Lifetime = time.Second - config.NoLocals = nolocals - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create two test accounts to ensure remotes expire but locals do not - local, _ := crypto.GenerateKey() - remote, _ := crypto.GenerateKey() - - pool.currentState.AddBalance(crypto.PubkeyToAddress(local.PublicKey), uint256.NewInt(1000000000)) - pool.currentState.AddBalance(crypto.PubkeyToAddress(remote.PublicKey), uint256.NewInt(1000000000)) - - // Add the two transactions and ensure they both are queued up - if err := pool.AddLocal(pricedTransaction(1, 100000, u256.Num1, local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(1, 100000, u256.Num1, remote)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } - pending, queued := pool.Stats() - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) - } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - - // Allow the eviction interval to run - time.Sleep(2 * evictionInterval) - - // Transactions should not be evicted from the queue yet since lifetime duration has not passed - pending, queued = pool.Stats() - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) - } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - - // Wait a bit for eviction to run and clean up any leftovers, and ensure only the local remains - time.Sleep(2 * config.Lifetime) - - pending, queued = pool.Stats() - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) - } - if nolocals { - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - } else { - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that even if the transaction count belonging to a single account goes -// above some threshold, as long as the transactions are executable, they are -// accepted. -func TestTransactionPendingLimiting(t *testing.T) { - // Create a test account and fund it - pool, key := setupTxPool(t) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000000)) - - // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, TestTxPoolConfig.AccountQueue+5) - sub := pool.txFeed.Subscribe(events) - defer sub.Unsubscribe() - - // Keep queuing up transactions and make sure all above a limit are dropped - for i := uint64(0); i < TestTxPoolConfig.AccountQueue+5; i++ { - if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { - t.Fatalf("tx %d: failed to add transaction: %v", i, err) - } - if pool.pending[account].Len() != int(i)+1 { - t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1) - } - if len(pool.queue) != 0 { - t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) - } - } - if pool.all.Count() != int(TestTxPoolConfig.AccountQueue+5) { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), TestTxPoolConfig.AccountQueue+5) - } - if err := validateEvents(events, int(TestTxPoolConfig.AccountQueue+5)); err != nil { - t.Fatalf("event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that if the transaction count belonging to multiple accounts go above -// some hard threshold, the higher transactions are dropped to prevent DOS -// attacks. -func TestTransactionPendingGlobalLimiting(t *testing.T) { - // Create the pool to test the limit enforcement with - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.GlobalSlots = config.AccountSlots * 10 - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000000)) - } - // Generate and queue a batch of transactions - nonces := make(map[common.Address]uint64) - - txs := types.Transactions{} - for _, key := range keys { - addr := crypto.PubkeyToAddress(key.PublicKey) - for j := 0; j < int(config.GlobalSlots)/len(keys)*2; j++ { - txs = append(txs, transaction(nonces[addr], 100000, key)) - nonces[addr]++ - } - } - // Import the batch and verify that limits have been enforced - pool.AddRemotesSync(txs) - - pending := 0 - for _, list := range pool.pending { - pending += list.Len() - } - if pending > int(config.GlobalSlots) { - t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Test the limit on transaction size is enforced correctly. -// This test verifies every transaction having allowed size -// is added to the pool, and longer transactions are rejected. -func TestTransactionAllowedTxSize(t *testing.T) { - // Create a test account and fund it - pool, key := setupTxPool(t) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000000000)) - - // Compute maximal data size for transactions (lower bound). - // - // It is assumed the fields in the transaction (except of the data) are: - // - nonce <= 32 bytes - // - gasPrice <= 32 bytes - // - gasLimit <= 32 bytes - // - recipient == 20 bytes - // - value <= 32 bytes - // - signature == 65 bytes - // All those fields are summed up to at most 213 bytes. - baseSize := uint64(213) - dataSize := txMaxSize - baseSize - - // Try adding a transaction with maximal allowed size - tx := pricedDataTransaction(0, pool.currentMaxGas, u256.Num1, key, dataSize) - if err := pool.addRemoteSync(tx); err != nil { - t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err) - } - // Try adding a transaction with random allowed size - if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentMaxGas, u256.Num1, key, uint64(rand.Intn(int(dataSize))))); err != nil { - t.Fatalf("failed to add transaction of random allowed size: %v", err) - } - // Try adding a transaction of minimal not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, u256.Num1, key, txMaxSize)); err == nil { - t.Fatalf("expected rejection on slightly oversize transaction") - } - // Try adding a transaction of random not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, u256.Num1, key, dataSize+1+uint64(rand.Intn(int(10*txMaxSize))))); err == nil { - t.Fatalf("expected rejection on oversize transaction") - } - // Run some sanity checks on the pool internals - pending, queued := pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that if transactions start being capped, transactions are also removed from 'all' -func TestTransactionCapClearsFromAll(t *testing.T) { - // Create the pool to test the limit enforcement with - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.AccountSlots = 2 - config.AccountQueue = 2 - config.GlobalSlots = 8 - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create a number of test accounts and fund them - key, _ := crypto.GenerateKey() - addr := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(addr, uint256.NewInt(1000000)) - - txs := types.Transactions{} - for j := 0; j < int(config.GlobalSlots)*2; j++ { - txs = append(txs, transaction(uint64(j), 100000, key)) - } - // Import the batch and verify that limits have been enforced - pool.AddRemotes(txs) - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that if the transaction count belonging to multiple accounts go above -// some hard threshold, if they are under the minimum guaranteed slot count then -// the transactions are still kept. -func TestTransactionPendingMinimumAllowance(t *testing.T) { - // Create the pool to test the limit enforcement with - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.GlobalSlots = 1 - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000000)) - } - // Generate and queue a batch of transactions - nonces := make(map[common.Address]uint64) - - txs := types.Transactions{} - for _, key := range keys { - addr := crypto.PubkeyToAddress(key.PublicKey) - for j := 0; j < int(config.AccountSlots)*2; j++ { - txs = append(txs, transaction(nonces[addr], 100000, key)) - nonces[addr]++ - } - } - // Import the batch and verify that limits have been enforced - pool.AddRemotesSync(txs) - - for addr, list := range pool.pending { - if list.Len() != int(config.AccountSlots) { - t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots) - } - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that setting the transaction pool gas price to a higher value correctly -// discards everything cheaper than that and moves any gapped transactions back -// from the pending pool to the queue. -// -// Note, local transactions are never allowed to be dropped. -func TestTransactionPoolRepricing(t *testing.T) { - t.Skip("deadlock") - // Create the pool to test the pricing enforcement with - db := memdb.NewTestDB(t) - - pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) - sub := pool.txFeed.Subscribe(events) - defer sub.Unsubscribe() - - // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 4) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000000)) - } - // Generate and queue a batch of transactions, both pending and queued - txs := types.Transactions{} - - txs = append(txs, pricedTransaction(0, 100000, u256.Num2, keys[0])) - txs = append(txs, pricedTransaction(1, 100000, u256.Num1, keys[0])) - txs = append(txs, pricedTransaction(2, 100000, u256.Num2, keys[0])) - - txs = append(txs, pricedTransaction(0, 100000, u256.Num1, keys[1])) - txs = append(txs, pricedTransaction(1, 100000, u256.Num2, keys[1])) - txs = append(txs, pricedTransaction(2, 100000, u256.Num2, keys[1])) - - txs = append(txs, pricedTransaction(1, 100000, u256.Num2, keys[2])) - txs = append(txs, pricedTransaction(2, 100000, u256.Num1, keys[2])) - txs = append(txs, pricedTransaction(3, 100000, u256.Num2, keys[2])) - - ltx := pricedTransaction(0, 100000, u256.Num1, keys[3]) - - // Import the batch and that both pending and queued transactions match up - pool.AddRemotesSync(txs) - pool.AddLocal(ltx) - - pending, queued := pool.Stats() - if pending != 7 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) - } - if queued != 3 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) - } - if err := validateEvents(events, 7); err != nil { - t.Fatalf("original event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Reprice the pool and check that underpriced transactions get dropped - pool.SetGasPrice(newInt(2)) - - pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - if queued != 5 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) - } - if err := validateEvents(events, 0); err != nil { - t.Fatalf("reprice event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Check that we can't add the old transactions back - if err := pool.AddRemote(pricedTransaction(1, 100000, u256.Num1, keys[0])); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) - } - if err := pool.AddRemote(pricedTransaction(0, 100000, u256.Num1, keys[1])); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) - } - if err := pool.AddRemote(pricedTransaction(2, 100000, u256.Num1, keys[2])); err != ErrUnderpriced { - t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) - } - if err := validateEvents(events, 0); err != nil { - t.Fatalf("post-reprice event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // However we can add local underpriced transactions - tx := pricedTransaction(1, 100000, u256.Num1, keys[3]) - if err := pool.AddLocal(tx); err != nil { - t.Fatalf("failed to add underpriced local transaction: %v", err) - } - if pending, _ = pool.Stats(); pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if err := validateEvents(events, 1); err != nil { - t.Fatalf("post-reprice local event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // And we can fill gaps with properly priced transactions - if err := pool.AddRemote(pricedTransaction(1, 100000, u256.Num2, keys[0])); err != nil { - t.Fatalf("failed to add pending transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(0, 100000, u256.Num2, keys[1])); err != nil { - t.Fatalf("failed to add pending transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(2, 100000, u256.Num2, keys[2])); err != nil { - t.Fatalf("failed to add queued transaction: %v", err) - } - if err := validateEvents(events, 5); err != nil { - t.Fatalf("post-reprice event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that setting the transaction pool gas price to a higher value does not -// remove local transactions. -func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { - // Create the pool to test the pricing enforcement with - db := memdb.NewTestDB(t) - - pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 3) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000*1000000)) - } - // Create transaction (both pending and queued) with a linearly growing gasprice - for i := uint64(0); i < 500; i++ { - // Add pending transaction. - pendingTx := pricedTransaction(i, 100000, uint256.NewInt(i), keys[2]) - if err := pool.AddLocal(pendingTx); err != nil { - t.Fatal(err) - } - // Add queued transaction. - queuedTx := pricedTransaction(i+501, 100000, uint256.NewInt(i), keys[2]) - if err := pool.AddLocal(queuedTx); err != nil { - t.Fatal(err) - } - } - pending, queued := pool.Stats() - expPending, expQueued := 500, 500 - validate := func() { - pending, queued = pool.Stats() - if pending != expPending { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, expPending) - } - if queued != expQueued { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued) - } - - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - } - validate() - - // Reprice the pool and check that nothing is dropped - pool.SetGasPrice(newInt(2)) - validate() - - pool.SetGasPrice(newInt(2)) - pool.SetGasPrice(newInt(4)) - pool.SetGasPrice(newInt(8)) - pool.SetGasPrice(newInt(100)) - validate() -} - -// Tests that when the pool reaches its global transaction limit, underpriced -// transactions are gradually shifted out for more expensive ones and any gapped -// pending transactions are moved into the queue. -// -// Note, local transactions are never allowed to be dropped. -func TestTransactionPoolUnderpricing(t *testing.T) { - // Create the pool to test the pricing enforcement with - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.GlobalSlots = 2 - config.GlobalQueue = 2 - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) - sub := pool.txFeed.Subscribe(events) - defer sub.Unsubscribe() - - // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 4) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000000)) - } - // Generate and queue a batch of transactions, both pending and queued - txs := types.Transactions{} - - txs = append(txs, pricedTransaction(0, 100000, u256.Num1, keys[0])) - txs = append(txs, pricedTransaction(1, 100000, u256.Num2, keys[0])) - - txs = append(txs, pricedTransaction(1, 100000, u256.Num1, keys[1])) - - ltx := pricedTransaction(0, 100000, u256.Num1, keys[2]) - - // Import the batch and that both pending and queued transactions match up - pool.AddRemotes(txs) - pool.AddLocal(ltx) - - pending, queued := pool.Stats() - if pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - if err := validateEvents(events, 3); err != nil { - t.Fatalf("original event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Ensure that adding an underpriced transaction on block limit fails - if err := pool.AddRemote(pricedTransaction(0, 100000, uint256.NewInt(1), keys[1])); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) - } - // Ensure that adding high priced transactions drops cheap ones, but not own - if err := pool.AddRemote(pricedTransaction(0, 100000, uint256.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - - t.Fatalf("failed to add well priced transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(2, 100000, uint256.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 - t.Fatalf("failed to add well priced transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(3, 100000, uint256.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 - t.Fatalf("failed to add well priced transaction: %v", err) - } - pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) - } - if err := validateEvents(events, 1); err != nil { - t.Fatalf("additional event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Ensure that adding local transactions can push out even higher priced ones - ltx = pricedTransaction(1, 100000, u256.Num0, keys[2]) - if err := pool.AddLocal(ltx); err != nil { - t.Fatalf("failed to append underpriced local transaction: %v", err) - } - ltx = pricedTransaction(0, 100000, u256.Num0, keys[3]) - if err := pool.AddLocal(ltx); err != nil { - t.Fatalf("failed to add new underpriced local transaction: %v", err) - } - pending, queued = pool.Stats() - if pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - if err := validateEvents(events, 2); err != nil { - t.Fatalf("local event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that more expensive transactions push out cheap ones from the pool, but -// without producing instability by creating gaps that start jumping transactions -// back and forth between queued/pending. -func TestTransactionPoolStableUnderpricing(t *testing.T) { - // Create the pool to test the pricing enforcement with - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.GlobalSlots = 128 - config.GlobalQueue = 0 - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) - sub := pool.txFeed.Subscribe(events) - defer sub.Unsubscribe() - - // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 2) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000000)) - } - // Fill up the entire queue with the same transaction price points - txs := types.Transactions{} - for i := uint64(0); i < config.GlobalSlots; i++ { - txs = append(txs, pricedTransaction(i, 100000, u256.Num1, keys[0])) - } - pool.AddRemotesSync(txs) - - pending, queued := pool.Stats() - if pending != int(config.GlobalSlots) { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) - } - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if err := validateEvents(events, int(config.GlobalSlots)); err != nil { - t.Fatalf("original event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap - if err := pool.addRemoteSync(pricedTransaction(0, 100000, uint256.NewInt(3), keys[1])); err != nil { - t.Fatalf("failed to add well priced transaction: %v", err) - } - pending, queued = pool.Stats() - if pending != int(config.GlobalSlots) { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) - } - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if err := validateEvents(events, 1); err != nil { - t.Fatalf("additional event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that the pool rejects duplicate transactions. -func TestTransactionDeduplication(t *testing.T) { - db := memdb.NewTestDB(t) - - pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create a test account to add transactions with - key, _ := crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(1000000000)) - - // Create a batch of transactions and add a few of them - txs := make([]types.Transaction, 16) - for i := 0; i < len(txs); i++ { - txs[i] = pricedTransaction(uint64(i), 100000, u256.Num1, key) - } - var firsts []types.Transaction - for i := 0; i < len(txs); i += 2 { - firsts = append(firsts, txs[i]) - } - errs := pool.AddRemotesSync(firsts) - if len(errs) != len(firsts) { - t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) - } - for i, err := range errs { - if err != nil { - t.Errorf("add %d failed: %v", i, err) - } - } - pending, queued := pool.Stats() - if pending != 1 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) - } - if queued != len(txs)/2-1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) - } - // Try to add all of them now and ensure previous ones error out as knowns - errs = pool.AddRemotesSync(txs) - if len(errs) != len(txs) { - t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) - } - for i, err := range errs { - if i%2 == 0 && err == nil { - t.Errorf("add %d succeeded, should have failed as known", i) - } - if i%2 == 1 && err != nil { - t.Errorf("add %d failed: %v", i, err) - } - } - pending, queued = pool.Stats() - if pending != len(txs) { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs)) - } - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that the pool rejects replacement transactions that don't meet the minimum -// price bump required. -func TestTransactionReplacement(t *testing.T) { - // Create the pool to test the pricing enforcement with - db := memdb.NewTestDB(t) - - pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) - sub := pool.txFeed.Subscribe(events) - defer sub.Unsubscribe() - - // Create a test account to add transactions with - key, _ := crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(1000000000)) - - // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) - price := uint64(100) - threshold := (price * (100 + TestTxPoolConfig.PriceBump)) / 100 - - if err := pool.addRemoteSync(pricedTransaction(0, 100000, u256.Num1, key)); err != nil { - t.Fatalf("failed to add original cheap pending transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(0, 100001, u256.Num1, key)); err != ErrReplaceUnderpriced { - t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) - } - if err := pool.AddRemote(pricedTransaction(0, 100000, u256.Num2, key)); err != nil { - t.Fatalf("failed to replace original cheap pending transaction: %v", err) - } - if err := validateEvents(events, 2); err != nil { - t.Fatalf("cheap replacement event firing failed: %v", err) - } - - if err := pool.addRemoteSync(pricedTransaction(0, 100000, uint256.NewInt(price), key)); err != nil { - t.Fatalf("failed to add original proper pending transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(0, 100001, uint256.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { - t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) - } - if err := pool.AddRemote(pricedTransaction(0, 100000, uint256.NewInt(threshold), key)); err != nil { - t.Fatalf("failed to replace original proper pending transaction: %v", err) - } - if err := validateEvents(events, 2); err != nil { - t.Fatalf("proper replacement event firing failed: %v", err) - } - - // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) - if err := pool.AddRemote(pricedTransaction(2, 100000, uint256.NewInt(1), key)); err != nil { - t.Fatalf("failed to add original cheap queued transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(2, 100001, uint256.NewInt(1), key)); err != ErrReplaceUnderpriced { - t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) - } - if err := pool.AddRemote(pricedTransaction(2, 100000, uint256.NewInt(2), key)); err != nil { - t.Fatalf("failed to replace original cheap queued transaction: %v", err) - } - - if err := pool.AddRemote(pricedTransaction(2, 100000, uint256.NewInt(price), key)); err != nil { - t.Fatalf("failed to add original proper queued transaction: %v", err) - } - if err := pool.AddRemote(pricedTransaction(2, 100001, uint256.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { - t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) - } - if err := pool.AddRemote(pricedTransaction(2, 100000, uint256.NewInt(threshold), key)); err != nil { - t.Fatalf("failed to replace original proper queued transaction: %v", err) - } - - if err := validateEvents(events, 0); err != nil { - t.Fatalf("queued replacement event firing failed: %v", err) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// Tests that local transactions are journaled to disk, but remote transactions -// get discarded between restarts. -func TestTransactionJournaling(t *testing.T) { testTransactionJournaling(t, false) } -func TestTransactionJournalingNoLocals(t *testing.T) { testTransactionJournaling(t, true) } - -func testTransactionJournaling(t *testing.T, nolocals bool) { - // Create a temporary file for the journal - file, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("failed to create temporary journal: %v", err) - } - journal := file.Name() - defer os.Remove(journal) - - // Clean up the temporary file, we only need the path for now - file.Close() - os.Remove(journal) - - // Create the original pool to inject transaction into the journal - db := memdb.NewTestDB(t) - - config := TestTxPoolConfig - config.NoLocals = nolocals - config.Journal = journal - config.Rejournal = time.Second - - pool := NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create two test accounts to ensure remotes expire but locals do not - local, _ := crypto.GenerateKey() - remote, _ := crypto.GenerateKey() - - err = db.Update(context.Background(), func(tx kv.RwTx) error { - stateWriter := state.NewPlainStateWriter(tx, nil, 1) - ibs := state.New(state.NewPlainStateReader(tx)) - ibs.AddBalance(crypto.PubkeyToAddress(local.PublicKey), uint256.NewInt(1000000000)) - ibs.AddBalance(crypto.PubkeyToAddress(remote.PublicKey), uint256.NewInt(1000000000)) - return ibs.CommitBlock(params.Rules{}, stateWriter) - }) - require.NoError(t, err) - - // Add three local and a remote transactions and ensure they are queued up - if err := pool.AddLocal(pricedTransaction(0, 100000, u256.Num1, local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } - if err := pool.AddLocal(pricedTransaction(1, 100000, u256.Num1, local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } - if err := pool.AddLocal(pricedTransaction(2, 100000, u256.Num1, local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } - if err := pool.addRemoteSync(pricedTransaction(0, 100000, u256.Num1, remote)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } - pending, queued := pool.Stats() - if pending != 4 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) - } - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - - // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive - pool.Stop() - - err = db.Update(context.Background(), func(tx kv.RwTx) error { - stateWriter := state.NewPlainStateWriter(tx, nil, 1) - ibs := state.New(state.NewPlainStateReader(tx)) - ibs.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - return ibs.CommitBlock(params.Rules{}, stateWriter) - }) - require.NoError(t, err) - pool = NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - - pending, queued = pool.Stats() - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if nolocals { - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) - } - } else { - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Bump the nonce temporarily and ensure the newly invalidated transaction is removed - err = db.Update(context.Background(), func(tx kv.RwTx) error { - stateWriter := state.NewPlainStateWriter(tx, nil, 1) - ibs := state.New(state.NewPlainStateReader(tx)) - ibs.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) - return ibs.CommitBlock(params.Rules{}, stateWriter) - }) - require.NoError(t, err) - pool.ResetHead(1000000000, 1) - //<-pool.requestReset(nil, nil) - time.Sleep(2 * config.Rejournal) - - pool.Stop() - - err = db.Update(context.Background(), func(tx kv.RwTx) error { - stateWriter := state.NewPlainStateWriter(tx, nil, 1) - ibs := state.New(state.NewPlainStateReader(tx)) - ibs.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - return ibs.CommitBlock(params.Rules{}, stateWriter) - }) - require.NoError(t, err) - - pool = NewTxPool(config, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - - pending, queued = pool.Stats() - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) - } - if nolocals { - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - } else { - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } -} - -// TestTransactionStatusCheck tests that the pool can correctly retrieve the -// pending status of individual transactions. -func TestTransactionStatusCheck(t *testing.T) { - // Create the pool to test the status retrievals with - db := memdb.NewTestDB(t) - - pool := NewTxPool(TestTxPoolConfig, params.TestChainConfig, db) - if err := pool.Start(1000000000, 0); err != nil { - t.Fatalf("starting tx pool: %v", err) - } - defer func() { - pool.Stop() - }() - - // Create the test accounts to check various transaction statuses with - keys := make([]*ecdsa.PrivateKey, 3) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), uint256.NewInt(1000000)) - } - // Generate and queue a batch of transactions, both pending and queued - txs := types.Transactions{} - - txs = append(txs, pricedTransaction(0, 100000, u256.Num1, keys[0])) // Pending only - txs = append(txs, pricedTransaction(0, 100000, u256.Num1, keys[1])) // Pending and queued - txs = append(txs, pricedTransaction(2, 100000, u256.Num1, keys[1])) - txs = append(txs, pricedTransaction(2, 100000, u256.Num1, keys[2])) // Queued only - - // Import the transaction and ensure they are correctly added - pool.AddRemotesSync(txs) - - pending, queued := pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) - } - if err := validateTxPoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Retrieve the status of each transaction and validate them - hashes := make([]common.Hash, len(txs)) - for i, tx := range txs { - hashes[i] = tx.Hash() - } - hashes = append(hashes, common.Hash{}) - - statuses := pool.Status(hashes) - expect := []TxStatus{TxStatusPending, TxStatusPending, TxStatusQueued, TxStatusQueued, TxStatusUnknown} - - for i := 0; i < len(statuses); i++ { - if statuses[i] != expect[i] { - t.Errorf("transaction %d: status mismatch: have %v, want %v", i, statuses[i], expect[i]) - } - } -} - -// Test the transaction slots consumption is computed correctly -func TestTransactionSlotCount(t *testing.T) { - key, _ := crypto.GenerateKey() - - // Check that an empty transaction consumes a single slot - smallTx := pricedDataTransaction(0, 0, u256.Num0, key, 0) - if slots := numSlots(smallTx); slots != 1 { - t.Fatalf("small transactions slot count mismatch: have %d want %d", slots, 1) - } - // Check that a large transaction consumes the correct number of slots - bigTx := pricedDataTransaction(0, 0, u256.Num0, key, uint64(10*txSlotSize)) - if slots := numSlots(bigTx); slots != 11 { - t.Fatalf("big transactions slot count mismatch: have %d want %d", slots, 11) - } -} - -// Benchmarks the speed of validating the contents of the pending queue of the -// transaction pool. -func BenchmarkPendingDemotion100(b *testing.B) { benchmarkPendingDemotion(b, 100) } -func BenchmarkPendingDemotion1000(b *testing.B) { benchmarkPendingDemotion(b, 1000) } -func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 10000) } - -func benchmarkPendingDemotion(b *testing.B, size int) { - // Add a batch of transactions to a pool one by one - pool, key := setupTxPool(b) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000000)) - - for i := 0; i < size; i++ { - tx := transaction(uint64(i), 100000, key) - pool.promoteTx(account, tx.Hash(), tx) - } - // Benchmark the speed of pool validation - b.ResetTimer() - for i := 0; i < b.N; i++ { - pool.demoteUnexecutables() - } -} - -// Benchmarks the speed of scheduling the contents of the future queue of the -// transaction pool. -func BenchmarkFuturePromotion100(b *testing.B) { benchmarkFuturePromotion(b, 100) } -func BenchmarkFuturePromotion1000(b *testing.B) { benchmarkFuturePromotion(b, 1000) } -func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 10000) } - -func benchmarkFuturePromotion(b *testing.B, size int) { - // Add a batch of transactions to a pool one by one - pool, key := setupTxPool(b) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000000)) - - for i := 0; i < size; i++ { - tx := transaction(uint64(1+i), 100000, key) - if _, err := pool.enqueueTx(tx.Hash(), tx, false, true); err != nil { - b.Fatal(err) - } - } - // Benchmark the speed of pool validation - b.ResetTimer() - for i := 0; i < b.N; i++ { - pool.promoteExecutables(nil) - } -} - -// Benchmarks the speed of batched transaction insertion. -func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, false) } -func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, false) } -func BenchmarkPoolBatchInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, false) } - -func BenchmarkPoolBatchLocalInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, true) } -func BenchmarkPoolBatchLocalInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, true) } -func BenchmarkPoolBatchLocalInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, true) } - -func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) { - // Generate a batch of transactions to enqueue into the pool - pool, key := setupTxPool(b) - - account := crypto.PubkeyToAddress(key.PublicKey) - pool.currentState.AddBalance(account, uint256.NewInt(1000000)) - - batches := make([]types.Transactions, b.N) - for i := 0; i < b.N; i++ { - batches[i] = make(types.Transactions, size) - for j := 0; j < size; j++ { - batches[i][j] = transaction(uint64(size*i+j), 100000, key) - } - } - // Benchmark importing the transactions into the queue - b.ResetTimer() - for _, batch := range batches { - if local { - pool.AddLocals(batch) - } else { - pool.AddRemotes(batch) - } - } -} - -func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { - // Allocate keys for testing - key, _ := crypto.GenerateKey() - account := crypto.PubkeyToAddress(key.PublicKey) - - remoteKey, _ := crypto.GenerateKey() - remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey) - - locals := make([]types.Transaction, 4096+1024) // Occupy all slots - for i := 0; i < len(locals); i++ { - locals[i] = transaction(uint64(i), 100000, key) - } - remotes := make([]types.Transaction, 1000) - for i := 0; i < len(remotes); i++ { - remotes[i] = pricedTransaction(uint64(i), 100000, newInt(2), remoteKey) // Higher gasprice - } - // Benchmark importing the transactions into the queue - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - pool, _ := setupTxPool(b) - pool.currentState.AddBalance(account, newInt(100000000)) - for _, local := range locals { - if err := pool.AddLocal(local); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - // Assign a high enough balance for testing - pool.currentState.AddBalance(remoteAddr, newInt(100000000)) - for i := 0; i < len(remotes); i++ { - pool.AddRemotes([]types.Transaction{remotes[i]}) - } - pool.Stop() - } -} diff --git a/eth/backend.go b/eth/backend.go index cb81bf192d..9e4144d3ee 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -55,21 +55,17 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethutils" - "github.com/ledgerwatch/erigon/eth/fetcher" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync" stages2 "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/ledgerwatch/erigon/turbo/stages/txpropagate" - "github.com/ledgerwatch/erigon/turbo/txpool" "github.com/ledgerwatch/log/v3" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -84,9 +80,6 @@ type Ethereum struct { config *ethconfig.Config logger log.Logger - // Handlers - txPool *core.TxPool - // DB interfaces chainDB kv.RwDB privateAPI *grpc.Server @@ -107,13 +100,12 @@ type Ethereum struct { minedBlocks chan *types.Block // downloader fields - downloadCtx context.Context - downloadCancel context.CancelFunc - downloadServer *download.ControlServerImpl - sentryServers []*download.SentryServerImpl - txPoolP2PServer *txpool.P2PServer - sentries []direct.SentryClient - stagedSync *stagedsync.Sync + downloadCtx context.Context + downloadCancel context.CancelFunc + downloadServer *download.ControlServerImpl + sentryServers []*download.SentryServerImpl + sentries []direct.SentryClient + stagedSync *stagedsync.Sync notifications *stagedsync.Notifications @@ -239,8 +231,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } - backend.txPool = core.NewTxPool(config.TxPool, chainConfig, chainKv) - // setting notifier to support streaming events to rpc daemon var mg *snapshotsync.SnapshotMigrator if config.Snapshot.Enabled { @@ -313,7 +303,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere var txPoolRPC txpool_proto.TxpoolServer var miningRPC txpool_proto.MiningServer - if config.TxPool.V2 { + if !config.TxPool.Disable { cfg := txpool2.DefaultConfig cfg.DBDir = path.Join(stack.Config().DataDir, "txpool") cfg.PendingSubPoolLimit = int(config.TxPool.GlobalSlots) @@ -337,19 +327,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return nil, err } txPoolRPC = backend.txPool2GrpcServer - } else { - backend.txPoolP2PServer, err = txpool.NewP2PServer(backend.downloadCtx, backend.sentries, backend.txPool) - if err != nil { - return nil, err - } - - fetchTx := func(peerID enode.ID, hashes []common.Hash) error { - backend.txPoolP2PServer.SendTxsRequest(context.TODO(), peerID, hashes) - return nil - } - - backend.txPoolP2PServer.TxFetcher = fetcher.NewTxFetcher(backend.txPool.Has, backend.txPool.AddRemotes, fetchTx) - txPoolRPC = privateapi.NewTxPoolServer(ctx, backend.txPool) } backend.notifyMiningAboutNewTxs = make(chan struct{}, 1) @@ -364,7 +341,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere mining := stagedsync.New( stagedsync.MiningStages(backend.downloadCtx, - stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool, backend.txPool2, backend.txPool2DB, tmpdir), + stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, tmpdir), stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir), stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), stagedsync.StageTrieCfg(backend.chainDB, false, true, tmpdir), @@ -410,38 +387,17 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } if !config.TxPool.Disable { - if config.TxPool.V2 { - backend.txPool2Fetch.ConnectCore() - backend.txPool2Fetch.ConnectSentries() - go txpool2.MainLoop(backend.downloadCtx, - backend.txPool2DB, backend.chainDB, - backend.txPool2, backend.newTxs2, backend.txPool2Send, backend.txPool2GrpcServer.NewSlotsStreams, - func() { - select { - case backend.notifyMiningAboutNewTxs <- struct{}{}: - default: - } - }) - } else { - go txpropagate.BroadcastPendingTxsToNetwork(backend.downloadCtx, backend.txPool, backend.txPoolP2PServer.RecentPeers, backend.downloadServer) - go func() { - newTransactions := make(chan core.NewTxsEvent, 128) - sub := backend.txPool.SubscribeNewTxsEvent(newTransactions) - defer sub.Unsubscribe() - defer close(newTransactions) - for { - select { - case <-ctx.Done(): - return - case <-newTransactions: - select { - case backend.notifyMiningAboutNewTxs <- struct{}{}: - default: - } - } + backend.txPool2Fetch.ConnectCore() + backend.txPool2Fetch.ConnectSentries() + go txpool2.MainLoop(backend.downloadCtx, + backend.txPool2DB, backend.chainDB, + backend.txPool2, backend.newTxs2, backend.txPool2Send, backend.txPool2GrpcServer.NewSlotsStreams, + func() { + select { + case backend.notifyMiningAboutNewTxs <- struct{}{}: + default: } - }() - } + }) } go func() { defer debug.LogPanic() @@ -475,7 +431,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return nil, err } - backend.stagedSync, err = stages2.NewStagedSync(backend.downloadCtx, backend.logger, backend.chainDB, stack.Config().P2P, *config, chainConfig.TerminalTotalDifficulty, backend.downloadServer, tmpdir, backend.txPool, backend.txPoolP2PServer, backend.notifications.Accumulator) + backend.stagedSync, err = stages2.NewStagedSync(backend.downloadCtx, backend.logger, backend.chainDB, stack.Config().P2P, *config, chainConfig.TerminalTotalDifficulty, backend.downloadServer, tmpdir, backend.notifications.Accumulator) if err != nil { return nil, err } @@ -570,7 +526,6 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy if !cfg.Enabled { return nil } - s.txPool.SetGasPrice(gasPrice) // Configure the local mining address eb, err := s.Etherbase() @@ -699,9 +654,6 @@ func (s *Ethereum) Start() error { func (s *Ethereum) Stop() error { // Stop all the peer-related stuff first. s.downloadCancel() - if s.txPoolP2PServer != nil { - s.txPoolP2PServer.TxFetcher.Stop() - } if s.privateAPI != nil { shutdownDone := make(chan bool) go func() { @@ -714,9 +666,6 @@ func (s *Ethereum) Stop() error { case <-shutdownDone: } } - if s.txPool != nil { - s.txPool.Stop() - } if s.quitMining != nil { close(s.quitMining) } @@ -731,7 +680,7 @@ func (s *Ethereum) Stop() error { sentryServer.Close() } s.chainDB.Close() - if s.config.TxPool.V2 { + if s.txPool2DB != nil { s.txPool2DB.Close() } return nil diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index f52067abba..ddc7cb4c60 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -25,9 +25,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/p2p/enr" "github.com/ledgerwatch/erigon/params" ) @@ -91,37 +89,6 @@ type TxPool interface { Get(hash common.Hash) types.Transaction } -// MakeProtocols constructs the P2P protocol definitions for `eth`. -func MakeProtocols(backend Backend, readNodeInfo func() *NodeInfo, dnsdisc enode.Iterator, chainConfig *params.ChainConfig, genesisHash common.Hash, headHeight uint64) []p2p.Protocol { - protocols := make([]p2p.Protocol, len(ProtocolVersions)) - for i, version := range ProtocolVersions { - version := version // Closure - - protocols[i] = p2p.Protocol{ - Name: ProtocolName, - Version: version, - Length: protocolLengths[version], - Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - peer := NewPeer(version, p, rw, backend.TxPool()) - defer peer.Close() - - return backend.RunPeer(peer, func(peer *Peer) error { - return Handle(backend, peer) - }) - }, - NodeInfo: func() interface{} { - return readNodeInfo() - }, - PeerInfo: func(peerID enode.ID) interface{} { - return backend.PeerInfo(peerID) - }, - Attributes: []enr.Entry{CurrentENREntry(chainConfig, genesisHash, headHeight)}, - DialCandidates: dnsdisc, - } - } - return protocols -} - // NodeInfo represents a short summary of the `eth` sub-protocol metadata // known about the host peer. type NodeInfo struct { diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 888bb0c18b..9b5e7dbd9f 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -49,7 +49,6 @@ var ( // in the `eth` protocol without actually doing any data processing. type testBackend struct { db kv.RwDB - txpool *core.TxPool headBlock *types.Block genesis *types.Block chainConfig *params.ChainConfig @@ -83,19 +82,15 @@ func newTestBackendWithGenerator(t *testing.T, blocks int, generator func(int, * b := &testBackend{ db: m.DB, - txpool: core.NewTxPool(txconfig, m.ChainConfig, m.DB), headBlock: headBlock, genesis: m.Genesis, chainConfig: params.TestChainConfig, } - t.Cleanup(func() { - b.txpool.Stop() - }) return b } func (b *testBackend) DB() kv.RwDB { return b.db } -func (b *testBackend) TxPool() eth.TxPool { return b.txpool } +func (b *testBackend) TxPool() eth.TxPool { return nil } func (b *testBackend) RunPeer(peer *eth.Peer, handler eth.Handler) error { // Normally the backend would do peer mainentance and handshakes. All that // is omitted and we will just give control back to the handler. diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index d2305763c9..4aa5524eb3 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -46,11 +46,11 @@ const ProtocolName = "eth" // ProtocolVersions are the supported versions of the `eth` protocol (first // is primary). -var ProtocolVersions = []uint{ETH66} +var ProtocolVersions = []uint{ETH66} //nolint // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{ETH66: 17} +var protocolLengths = map[uint]uint64{ETH66: 17} //nolint // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 @@ -582,7 +582,7 @@ func (rb BlockRawBody) EncodeRLP(w io.Writer) error { var txsLen int for _, tx := range rb.Transactions { txsLen++ - var txLen int = len(tx) + var txLen = len(tx) if txLen >= 56 { txsLen += (bits.Len(uint(txLen)) + 7) / 8 } diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 435a61eb91..ca50f5ba6e 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -8,25 +8,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb/prune" ) -func DefaultStages(ctx context.Context, - sm prune.Mode, - headers HeadersCfg, - blockHashCfg BlockHashesCfg, - bodies BodiesCfg, - difficulty DifficultyCfg, - senders SendersCfg, - exec ExecuteBlockCfg, - trans TranspileCfg, - hashState HashStateCfg, - trieCfg TrieCfg, - history HistoryCfg, - logIndex LogIndexCfg, - callTraces CallTracesCfg, - txLookup TxLookupCfg, - txPool TxPoolCfg, - finish FinishCfg, - test bool, -) []*Stage { +func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, blockHashCfg BlockHashesCfg, bodies BodiesCfg, difficulty DifficultyCfg, senders SendersCfg, exec ExecuteBlockCfg, trans TranspileCfg, hashState HashStateCfg, trieCfg TrieCfg, history HistoryCfg, logIndex LogIndexCfg, callTraces CallTracesCfg, txLookup TxLookupCfg, finish FinishCfg, test bool) []*Stage { return []*Stage{ { ID: stages.Headers, @@ -217,20 +199,6 @@ func DefaultStages(ctx context.Context, return PruneTxLookup(p, tx, txLookup, ctx) }, }, - { - ID: stages.TxPool, - Description: "Update transaction pool", - Disabled: txPool.config.Disable || txPool.config.V2, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx) error { - return SpawnTxPool(s, tx, txPool, ctx) - }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { - return UnwindTxPool(u, s, tx, txPool, ctx) - }, - Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { - return PruneTxPool(p, tx, txPool, ctx) - }, - }, { ID: stages.Finish, Description: "Final: update current block for the RPC API", @@ -263,7 +231,6 @@ var DefaultForwardOrder = UnwindOrder{ stages.StorageHistoryIndex, stages.LogIndex, stages.TxLookup, - stages.TxPool, stages.Finish, } @@ -290,10 +257,6 @@ var DefaultUnwindOrder = UnwindOrder{ stages.Execution, stages.Senders, - // Unwinding of tx pool (re-injecting transactions into the pool needs to happen after unwinding execution) - // also tx pool is before senders because senders unwind is inside cycle transaction - stages.TxPool, - stages.Bodies, stages.BlockHashes, stages.Headers, @@ -315,10 +278,6 @@ var DefaultPruneOrder = PruneOrder{ stages.Execution, stages.Senders, - // Unwinding of tx pool (reinjecting transactions into the pool needs to happen after unwinding execution) - // also tx pool is before senders because senders unwind is inside cycle transaction - stages.TxPool, - stages.Bodies, stages.BlockHashes, stages.Headers, diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index 95d9017523..1c1145464d 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -55,28 +55,17 @@ type MiningCreateBlockCfg struct { miner MiningState chainConfig params.ChainConfig engine consensus.Engine - txPool *core.TxPool txPool2 *txpool.TxPool txPool2DB kv.RoDB tmpdir string } -func StageMiningCreateBlockCfg( - db kv.RwDB, - miner MiningState, - chainConfig params.ChainConfig, - engine consensus.Engine, - txPool *core.TxPool, - txPool2 *txpool.TxPool, - txPool2DB kv.RoDB, - tmpdir string, -) MiningCreateBlockCfg { +func StageMiningCreateBlockCfg(db kv.RwDB, miner MiningState, chainConfig params.ChainConfig, engine consensus.Engine, txPool2 *txpool.TxPool, txPool2DB kv.RoDB, tmpdir string) MiningCreateBlockCfg { return MiningCreateBlockCfg{ db: db, miner: miner, chainConfig: chainConfig, engine: engine, - txPool: txPool, txPool2: txPool2, txPool2DB: txPool2DB, tmpdir: tmpdir, @@ -88,7 +77,7 @@ func StageMiningCreateBlockCfg( // - resubmitAdjustCh - variable is not implemented func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBlockCfg, quit <-chan struct{}) (err error) { current := cfg.miner.MiningBlock - txPoolLocals := cfg.txPool.Locals() + txPoolLocals := []common.Address{} //txPoolV2 has no concept of local addresses (yet?) coinbase := cfg.miner.MiningConfig.Etherbase const ( @@ -111,64 +100,31 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc } blockNum := executionAt + 1 - if cfg.txPool2 != nil { - var txs []types.Transaction - if err = cfg.txPool2DB.View(context.Background(), func(tx kv.Tx) error { - txSlots := txpool.TxsRlp{} - if err := cfg.txPool2.Best(200, &txSlots, tx); err != nil { - return err - } - - txs, err = types.DecodeTransactions(txSlots.Txs) - if err != nil { - return fmt.Errorf("decode rlp of pending txs: %w", err) - } - var sender common.Address - for i := range txs { - copy(sender[:], txSlots.Senders.At(i)) - txs[i].SetSender(sender) - } - - return nil - }); err != nil { + var txs []types.Transaction + if err = cfg.txPool2DB.View(context.Background(), func(tx kv.Tx) error { + txSlots := txpool.TxsRlp{} + if err := cfg.txPool2.Best(200, &txSlots, tx); err != nil { return err } - current.RemoteTxs = types.NewTransactionsFixedOrder(txs) - // txpool v2 - doesn't prioritise local txs over remote - current.LocalTxs = types.NewTransactionsFixedOrder(nil) - log.Debug(fmt.Sprintf("[%s] Candidate txs", logPrefix), "amount", len(txs)) - } else { - pendingTxs, err := cfg.txPool.Pending() + + txs, err = types.DecodeTransactions(txSlots.Txs) if err != nil { - return err + return fmt.Errorf("decode rlp of pending txs: %w", err) } - // Split the pending transactions into locals and remotes - localTxs, remoteTxs := types.TransactionsGroupedBySender{}, types.TransactionsGroupedBySender{} - signer := types.MakeSigner(&cfg.chainConfig, blockNum) - for _, txs := range pendingTxs { - if len(txs) == 0 { - continue - } - from, _ := txs[0].Sender(*signer) - isLocal := false - for _, local := range txPoolLocals { - if local == from { - isLocal = true - break - } - } - - if isLocal { - localTxs = append(localTxs, txs) - } else { - remoteTxs = append(remoteTxs, txs) - } + var sender common.Address + for i := range txs { + copy(sender[:], txSlots.Senders.At(i)) + txs[i].SetSender(sender) } - current.LocalTxs = types.NewTransactionsByPriceAndNonce(*signer, localTxs) - current.RemoteTxs = types.NewTransactionsByPriceAndNonce(*signer, remoteTxs) - log.Debug(fmt.Sprintf("[%s] Candidate txs", logPrefix), "local", len(localTxs), "remote", len(remoteTxs)) + return nil + }); err != nil { + return err } + current.RemoteTxs = types.NewTransactionsFixedOrder(txs) + // txpool v2 - doesn't prioritise local txs over remote + current.LocalTxs = types.NewTransactionsFixedOrder(nil) + log.Debug(fmt.Sprintf("[%s] Candidate txs", logPrefix), "amount", len(txs)) localUncles, remoteUncles, err := readNonCanonicalHeaders(tx, blockNum, cfg.engine, coinbase, txPoolLocals) if err != nil { return err @@ -281,6 +237,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc }) return uncles } + // when 08 is processed ancestors contain 07 (quick block) for _, ancestor := range GetBlocksFromHash(parent.Hash(), 7) { for _, uncle := range ancestor.Uncles() { diff --git a/eth/stagedsync/stage_txpool.go b/eth/stagedsync/stage_txpool.go deleted file mode 100644 index a986e68300..0000000000 --- a/eth/stagedsync/stage_txpool.go +++ /dev/null @@ -1,298 +0,0 @@ -package stagedsync - -import ( - "context" - "encoding/binary" - "fmt" - - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/length" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/log/v3" -) - -type TxPoolCfg struct { - db kv.RwDB - pool *core.TxPool - config core.TxPoolConfig - startFunc func() -} - -func StageTxPoolCfg(db kv.RwDB, pool *core.TxPool, config core.TxPoolConfig, startFunc func()) TxPoolCfg { - return TxPoolCfg{ - db: db, - pool: pool, - config: config, - startFunc: startFunc, - } -} - -func SpawnTxPool(s *StageState, tx kv.RwTx, cfg TxPoolCfg, ctx context.Context) error { - quitCh := ctx.Done() - useExternalTx := tx != nil - if !useExternalTx { - var err error - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - to, err := s.ExecutionAt(tx) - if err != nil { - return err - } - - logPrefix := s.LogPrefix() - if to < s.BlockNumber { - return fmt.Errorf("to (%d) < from (%d)", to, s.BlockNumber) - } - if cfg.pool != nil && !cfg.pool.IsStarted() { - log.Info(fmt.Sprintf("[%s] Starting tx pool after sync", logPrefix), "from", s.BlockNumber, "to", to) - headHash, err := rawdb.ReadCanonicalHash(tx, to) - if err != nil { - return err - } - headHeader := rawdb.ReadHeader(tx, headHash, to) - if err := cfg.pool.Start(headHeader.GasLimit, to); err != nil { - return fmt.Errorf(" start pool phase 1: %w", err) - } - if cfg.startFunc != nil { - cfg.startFunc() - } - } - if cfg.pool != nil && cfg.pool.IsStarted() && s.BlockNumber > 0 { - if err := incrementalTxPoolUpdate(logPrefix, s.BlockNumber, to, cfg.pool, tx, quitCh); err != nil { - return err - } - pending, queued := cfg.pool.Stats() - log.Info(fmt.Sprintf("[%s] Transaction stats", logPrefix), "pending", pending, "queued", queued) - } - if err := s.Update(tx, to); err != nil { - return err - } - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - return nil -} - -func incrementalTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx kv.RwTx, quitCh <-chan struct{}) error { - headHash, err := rawdb.ReadCanonicalHash(tx, to) - if err != nil { - return err - } - - headHeader := rawdb.ReadHeader(tx, headHash, to) - pool.ResetHead(headHeader.GasLimit, to) - canonical := make([]common.Hash, to-from) - currentHeaderIdx := uint64(0) - - canonicals, err := tx.Cursor(kv.HeaderCanonical) - if err != nil { - return err - } - defer canonicals.Close() - for k, v, err := canonicals.Seek(dbutils.EncodeBlockNumber(from + 1)); k != nil; k, v, err = canonicals.Next() { - if err != nil { - return err - } - if err := libcommon.Stopped(quitCh); err != nil { - return err - } - - if currentHeaderIdx >= to-from { // if header stage is ahead of body stage - break - } - - copy(canonical[currentHeaderIdx][:], v) - currentHeaderIdx++ - } - - log.Trace(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical)) - bodies, err := tx.Cursor(kv.BlockBody) - if err != nil { - return err - } - defer bodies.Close() - for k, _, err := bodies.Seek(dbutils.EncodeBlockNumber(from + 1)); k != nil; k, _, err = bodies.Next() { - if err != nil { - return err - } - if err := libcommon.Stopped(quitCh); err != nil { - return err - } - - blockNumber := binary.BigEndian.Uint64(k[:8]) - blockHash := common.BytesToHash(k[8:]) - if blockNumber > to { - break - } - - if canonical[blockNumber-from-1] != blockHash { - // non-canonical case - continue - } - - body := rawdb.ReadBodyWithTransactions(tx, blockHash, blockNumber) - for _, tx := range body.Transactions { - pool.RemoveTx(tx.Hash(), true /* outofbound */) - } - } - return nil -} - -func UnwindTxPool(u *UnwindState, s *StageState, tx kv.RwTx, cfg TxPoolCfg, ctx context.Context) (err error) { - if u.UnwindPoint >= s.BlockNumber { - return nil - } - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - quitCh := ctx.Done() - - logPrefix := s.LogPrefix() - if cfg.pool != nil && cfg.pool.IsStarted() { - if err := unwindTxPoolUpdate(logPrefix, u.UnwindPoint, s.BlockNumber, cfg.pool, tx, quitCh); err != nil { - return err - } - pending, queued := cfg.pool.Stats() - log.Info(fmt.Sprintf("[%s] Transaction stats", logPrefix), "pending", pending, "queued", queued) - } - if err := u.Done(tx); err != nil { - return err - } - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - return nil -} - -func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx kv.RwTx, quitCh <-chan struct{}) error { - headHash, err := rawdb.ReadCanonicalHash(tx, from) - if err != nil { - return err - } - headHeader := rawdb.ReadHeader(tx, headHash, from) - pool.ResetHead(headHeader.GasLimit, from) - canonical := make([]common.Hash, to-from) - - canonicals, err := tx.Cursor(kv.HeaderCanonical) - if err != nil { - return err - } - defer canonicals.Close() - for k, v, err := canonicals.Seek(dbutils.EncodeBlockNumber(from + 1)); k != nil; k, v, err = canonicals.Next() { - if err != nil { - return err - } - if err := libcommon.Stopped(quitCh); err != nil { - return err - } - blockNumber := binary.BigEndian.Uint64(k[:8]) - - if blockNumber > to { - break - } - - copy(canonical[blockNumber-from-1][:], v) - } - log.Trace(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical)) - senders := make([][]common.Address, to-from+1) - sendersC, err := tx.Cursor(kv.Senders) - if err != nil { - return err - } - defer sendersC.Close() - for k, v, err := sendersC.Seek(dbutils.EncodeBlockNumber(from + 1)); k != nil; k, v, err = sendersC.Next() { - if err != nil { - return err - } - if err := libcommon.Stopped(quitCh); err != nil { - return err - } - - blockNumber := binary.BigEndian.Uint64(k[:8]) - blockHash := common.BytesToHash(k[8:]) - if blockNumber > to { - break - } - - if canonical[blockNumber-from-1] != blockHash { - // non-canonical case - continue - } - sendersArray := make([]common.Address, len(v)/length.Addr) - for i := 0; i < len(sendersArray); i++ { - copy(sendersArray[i][:], v[i*length.Addr:]) - } - senders[blockNumber-from-1] = sendersArray - } - - var txsToInject []types.Transaction - bodies, err := tx.Cursor(kv.BlockBody) - if err != nil { - return err - } - defer bodies.Close() - for k, _, err := bodies.Seek(dbutils.EncodeBlockNumber(from + 1)); k != nil; k, _, err = bodies.Next() { - if err != nil { - return err - } - if err := libcommon.Stopped(quitCh); err != nil { - return err - } - - blockNumber := binary.BigEndian.Uint64(k[:8]) - blockHash := common.BytesToHash(k[8:]) - if blockNumber > to { - break - } - - if canonical[blockNumber-from-1] != blockHash { - // non-canonical case - continue - } - - body := rawdb.ReadBodyWithTransactions(tx, blockHash, blockNumber) - body.SendersToTxs(senders[blockNumber-from-1]) - txsToInject = append(txsToInject, body.Transactions...) - } - //nolint:errcheck - log.Info(fmt.Sprintf("[%s] Injecting txs into the pool", logPrefix), "number", len(txsToInject)) - pool.AddRemotesSync(txsToInject) - log.Info(fmt.Sprintf("[%s] Injection complete", logPrefix)) - return nil -} - -func PruneTxPool(s *PruneState, tx kv.RwTx, cfg TxPoolCfg, ctx context.Context) (err error) { - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil -} diff --git a/eth/stagedsync/stages/stages.go b/eth/stagedsync/stages/stages.go index 3f4a91c895..c67dde783e 100644 --- a/eth/stagedsync/stages/stages.go +++ b/eth/stagedsync/stages/stages.go @@ -43,7 +43,6 @@ var ( LogIndex SyncStage = "LogIndex" // Generating logs index (from receipts) CallTraces SyncStage = "CallTraces" // Generating call traces index TxLookup SyncStage = "TxLookup" // Generating transactions lookup index - TxPool SyncStage = "TxPool" // Starts Backend Finish SyncStage = "Finish" // Nominal stage after all other stages MiningCreateBlock SyncStage = "MiningCreateBlock" @@ -65,7 +64,6 @@ var AllStages = []SyncStage{ LogIndex, CallTraces, TxLookup, - TxPool, Finish, } diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 04432db946..8400246ec8 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -19,7 +19,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" - txpool2 "github.com/ledgerwatch/erigon-lib/txpool" + "github.com/ledgerwatch/erigon-lib/txpool" "github.com/ledgerwatch/erigon/cmd/sentry/download" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" @@ -73,18 +73,18 @@ type MockSentry struct { Notifications *stagedsync.Notifications - // Pool v2 - TxPoolV2Fetch *txpool2.Fetch - TxPoolV2Send *txpool2.Send - TxPoolV2GrpcServer *txpool2.GrpcServer - TxPoolV2 *txpool2.TxPool - txPoolV2DB kv.RwDB + // TxPool + TxPoolFetch *txpool.Fetch + TxPoolSend *txpool.Send + TxPoolGrpcServer *txpool.GrpcServer + TxPool *txpool.TxPool + txPoolDB kv.RwDB } func (ms *MockSentry) Close() { ms.cancel() - if ms.txPoolV2DB != nil { - ms.txPoolV2DB.Close() + if ms.txPoolDB != nil { + ms.txPoolDB.Close() } ms.DB.Close() } @@ -218,34 +218,33 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey blockPropagator := func(Ctx context.Context, block *types.Block, td *big.Int) {} if !cfg.TxPool.Disable { - cfg.TxPool.V2 = true - poolCfg := txpool2.DefaultConfig - newTxs := make(chan txpool2.Hashes, 1024) + poolCfg := txpool.DefaultConfig + newTxs := make(chan txpool.Hashes, 1024) if t != nil { t.Cleanup(func() { close(newTxs) }) } chainID, _ := uint256.FromBig(mock.ChainConfig.ChainID) - mock.TxPoolV2, err = txpool2.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID) + mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID) if err != nil { t.Fatal(err) } - mock.txPoolV2DB = memdb.NewPoolDB() + mock.txPoolDB = memdb.NewPoolDB() stateChangesClient := direct.NewStateDiffClientDirect(erigonGrpcServeer) - mock.TxPoolV2Fetch = txpool2.NewFetch(mock.Ctx, sentries, mock.TxPoolV2, stateChangesClient, mock.DB, mock.txPoolV2DB, *chainID) - mock.TxPoolV2Fetch.SetWaitGroup(&mock.ReceiveWg) - mock.TxPoolV2Send = txpool2.NewSend(mock.Ctx, sentries, mock.TxPoolV2) - mock.TxPoolV2GrpcServer = txpool2.NewGrpcServer(mock.Ctx, mock.TxPoolV2, mock.txPoolV2DB, *chainID) + mock.TxPoolFetch = txpool.NewFetch(mock.Ctx, sentries, mock.TxPool, stateChangesClient, mock.DB, mock.txPoolDB, *chainID) + mock.TxPoolFetch.SetWaitGroup(&mock.ReceiveWg) + mock.TxPoolSend = txpool.NewSend(mock.Ctx, sentries, mock.TxPool) + mock.TxPoolGrpcServer = txpool.NewGrpcServer(mock.Ctx, mock.TxPool, mock.txPoolDB, *chainID) - mock.TxPoolV2Fetch.ConnectCore() + mock.TxPoolFetch.ConnectCore() mock.StreamWg.Add(1) - mock.TxPoolV2Fetch.ConnectSentries() + mock.TxPoolFetch.ConnectSentries() mock.StreamWg.Wait() - go txpool2.MainLoop(mock.Ctx, mock.txPoolV2DB, mock.DB, mock.TxPoolV2, newTxs, mock.TxPoolV2Send, mock.TxPoolV2GrpcServer.NewSlotsStreams, func() {}) + go txpool.MainLoop(mock.Ctx, mock.txPoolDB, mock.DB, mock.TxPool, newTxs, mock.TxPoolSend, mock.TxPoolGrpcServer.NewSlotsStreams, func() {}) } // Committed genesis will be shared between download and mock sentry @@ -302,7 +301,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.DB, cfg.BatchSize, mock.ChainConfig, - ), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), stagedsync.StageTrieCfg(mock.DB, true, true, mock.tmpdir), stagedsync.StageHistoryCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), stagedsync.StageTxLookupCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageTxPoolCfg(mock.DB, nil, cfg.TxPool, func() {}), stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, mock.Log), true), + ), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), stagedsync.StageTrieCfg(mock.DB, true, true, mock.tmpdir), stagedsync.StageHistoryCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), stagedsync.StageTxLookupCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, mock.Log), true), stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, ) @@ -319,7 +318,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.MiningSync = stagedsync.New( stagedsync.MiningStages(mock.Ctx, - stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, nil, mock.TxPoolV2, nil, mock.tmpdir), + stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, mock.TxPool, nil, mock.tmpdir), stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, mock.tmpdir), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), stagedsync.StageTrieCfg(mock.DB, false, true, mock.tmpdir), @@ -429,13 +428,13 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { ms.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := false highestSeenHeader := chain.TopBlock.NumberU64() - if ms.TxPoolV2 != nil { + if ms.TxPool != nil { ms.ReceiveWg.Add(1) } if err := StageLoopStep(ms.Ctx, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil); err != nil { return err } - if ms.TxPoolV2 != nil { + if ms.TxPool != nil { ms.ReceiveWg.Wait() // Wait for TxPool notification } // Check if the latest header was imported or rolled back diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 7448ff2486..399efdec90 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -14,7 +14,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/sentry/download" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/misc" - "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -23,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" - "github.com/ledgerwatch/erigon/turbo/txpool" "github.com/ledgerwatch/log/v3" ) @@ -223,8 +221,6 @@ func NewStagedSync( terminalTotalDifficulty *big.Int, controlServer *download.ControlServerImpl, tmpdir string, - txPool *core.TxPool, - txPoolServer *txpool.P2PServer, accumulator *shards.Accumulator, ) (*stagedsync.Sync, error) { return stagedsync.New( @@ -261,20 +257,13 @@ func NewStagedSync( db, cfg.BatchSize, controlServer.ChainConfig, - ), stagedsync.StageHashStateCfg(db, tmpdir), stagedsync.StageTrieCfg(db, true, true, tmpdir), stagedsync.StageHistoryCfg(db, cfg.Prune, tmpdir), stagedsync.StageLogIndexCfg(db, cfg.Prune, tmpdir), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, tmpdir), stagedsync.StageTxLookupCfg(db, cfg.Prune, tmpdir), stagedsync.StageTxPoolCfg(db, txPool, cfg.TxPool, func() { - if cfg.TxPool.V2 { - } else { - for i := range txPoolServer.Sentries { - go func(i int) { - txpool.RecvTxMessageLoop(ctx, txPoolServer.Sentries[i], txPoolServer.HandleInboundMessage, nil) - }(i) - go func(i int) { - txpool.RecvPeersLoop(ctx, txPoolServer.Sentries[i], txPoolServer.RecentPeers, nil) - }(i) - } - txPoolServer.TxFetcher.Start() - } - }), stagedsync.StageFinishCfg(db, tmpdir, logger), false), + ), stagedsync.StageHashStateCfg(db, tmpdir), + stagedsync.StageTrieCfg(db, true, true, tmpdir), + stagedsync.StageHistoryCfg(db, cfg.Prune, tmpdir), + stagedsync.StageLogIndexCfg(db, cfg.Prune, tmpdir), + stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, tmpdir), + stagedsync.StageTxLookupCfg(db, cfg.Prune, tmpdir), + stagedsync.StageFinishCfg(db, tmpdir, logger), false), stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, ), nil diff --git a/turbo/stages/txpropagate/deprecated.go b/turbo/stages/txpropagate/deprecated.go deleted file mode 100644 index 99ed73ce15..0000000000 --- a/turbo/stages/txpropagate/deprecated.go +++ /dev/null @@ -1,92 +0,0 @@ -package txpropagate - -import ( - "context" - "sync" - "time" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "github.com/ledgerwatch/erigon/cmd/sentry/download" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/core" -) - -const txChanSize int = 4096 - -// BroadcastPendingTxsToNetwork - does send to p2p: -// - new txs -// - all pooled txs to recently connected peers -// - all local pooled txs to random peers periodically -func BroadcastPendingTxsToNetwork(ctx context.Context, txPool *core.TxPool, recentPeers *RecentlyConnectedPeers, s *download.ControlServerImpl) { - defer debug.LogPanic() - - txsCh := make(chan core.NewTxsEvent, txChanSize) - txsSub := txPool.SubscribeNewTxsEvent(txsCh) - defer txsSub.Unsubscribe() - - syncToNewPeersEvery := time.NewTicker(2 * time.Minute) - defer syncToNewPeersEvery.Stop() - - broadcastLocalTransactionsEvery := time.NewTicker(2 * time.Minute) - defer broadcastLocalTransactionsEvery.Stop() - - localTxHashes := make([]common.Hash, 128) - remoteTxHashes := make([]common.Hash, 128) - - for { - select { - case <-txsSub.Err(): - return - case <-ctx.Done(): - return - case e := <-txsCh: // new txs - // first broadcast all local txs to all peers, then non-local to random sqrt(peersAmount) peers - localTxHashes = localTxHashes[:0] - remoteTxHashes = remoteTxHashes[:0] - for i := range e.Txs { - h := e.Txs[i].Hash() - if txPool.IsLocalTx(h) { - localTxHashes = append(localTxHashes, h) - } else { - remoteTxHashes = append(remoteTxHashes, h) - } - } - s.BroadcastLocalPooledTxs(ctx, localTxHashes) - s.BroadcastRemotePooledTxs(ctx, remoteTxHashes) - case <-syncToNewPeersEvery.C: // new peer - newPeers := recentPeers.GetAndClean() - if len(newPeers) == 0 { - continue - } - remoteTxHashes = txPool.AppendHashes(remoteTxHashes[:0]) - s.PropagatePooledTxsToPeersList(ctx, newPeers, remoteTxHashes) - case <-broadcastLocalTransactionsEvery.C: // periodically broadcast local txs to random peers - localTxHashes = txPool.AppendLocalHashes(localTxHashes[:0]) - s.BroadcastLocalPooledTxs(ctx, localTxHashes) - } - } -} - -type RecentlyConnectedPeers struct { - lock sync.RWMutex - peers []*types.H256 -} - -func (l *RecentlyConnectedPeers) Len() int { - l.lock.RLock() - defer l.lock.RUnlock() - return len(l.peers) -} -func (l *RecentlyConnectedPeers) AddPeer(p *types.H256) { - l.lock.Lock() - defer l.lock.Unlock() - l.peers = append(l.peers, p) -} -func (l *RecentlyConnectedPeers) GetAndClean() []*types.H256 { - l.lock.Lock() - defer l.lock.Unlock() - peers := l.peers - l.peers = nil - return peers -} diff --git a/turbo/txpool/README.md b/turbo/txpool/README.md deleted file mode 100644 index 87783b07f4..0000000000 --- a/turbo/txpool/README.md +++ /dev/null @@ -1,6 +0,0 @@ -External TxPool ---- - -Not ready yet: - -- need kv.StateDiff stream support from core side diff --git a/turbo/txpool/p2p.go b/turbo/txpool/p2p.go deleted file mode 100644 index 179adde755..0000000000 --- a/turbo/txpool/p2p.go +++ /dev/null @@ -1,413 +0,0 @@ -package txpool - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "math/rand" - "strings" - "sync" - "time" - - "github.com/ledgerwatch/erigon-lib/common/dbg" - "github.com/ledgerwatch/erigon-lib/direct" - "github.com/ledgerwatch/erigon-lib/gointerfaces" - proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" - "github.com/ledgerwatch/erigon/cmd/sentry/download" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/eth/fetcher" - "github.com/ledgerwatch/erigon/eth/protocols/eth" - "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/stages/txpropagate" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" -) - -// P2PServer - receiving and sending messages to Sentries -type P2PServer struct { - ctx context.Context - Sentries []direct.SentryClient - TxPool *core.TxPool - TxFetcher *fetcher.TxFetcher - RecentPeers *txpropagate.RecentlyConnectedPeers -} - -func NewP2PServer(ctx context.Context, sentries []direct.SentryClient, txPool *core.TxPool) (*P2PServer, error) { - cs := &P2PServer{ - ctx: ctx, - Sentries: sentries, - TxPool: txPool, - RecentPeers: &txpropagate.RecentlyConnectedPeers{}, - } - - return cs, nil -} - -func (tp *P2PServer) newPooledTransactionHashes66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - var query eth.NewPooledTransactionHashesPacket - if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { - return fmt.Errorf("decoding newPooledTransactionHashes66: %w, data: %x", err, inreq.Data) - } - return tp.TxFetcher.Notify(download.ConvertH256ToPeerID(inreq.PeerId), query) -} - -func (tp *P2PServer) newPooledTransactionHashes65(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - var query eth.NewPooledTransactionHashesPacket - if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { - return fmt.Errorf("decoding newPooledTransactionHashes65: %w, data: %x", err, inreq.Data) - } - return tp.TxFetcher.Notify(download.ConvertH256ToPeerID(inreq.PeerId), query) -} - -func (tp *P2PServer) pooledTransactions66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - txs := ð.PooledTransactionsPacket66{} - if err := txs.DecodeRLP(rlp.NewStream(bytes.NewReader(inreq.Data), 0)); err != nil { - return fmt.Errorf("decoding pooledTransactions66: %w, data: %x", err, inreq.Data) - } - - return tp.TxFetcher.Enqueue(download.ConvertH256ToPeerID(inreq.PeerId), txs.PooledTransactionsPacket, true) -} - -func (tp *P2PServer) pooledTransactions65(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - txs := ð.PooledTransactionsPacket{} - if err := txs.DecodeRLP(rlp.NewStream(bytes.NewReader(inreq.Data), 0)); err != nil { - return fmt.Errorf("decoding pooledTransactions65: %w, data: %x", err, inreq.Data) - } - - return tp.TxFetcher.Enqueue(download.ConvertH256ToPeerID(inreq.PeerId), *txs, true) -} - -func (tp *P2PServer) transactions66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - return tp.transactions65(ctx, inreq, sentry) -} - -func (tp *P2PServer) transactions65(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - if tp.TxPool == nil { - return nil - } - var query eth.TransactionsPacket - if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { - return fmt.Errorf("decoding TransactionsPacket: %w, data: %x", err, inreq.Data) - } - return tp.TxFetcher.Enqueue(download.ConvertH256ToPeerID(inreq.PeerId), query, false) -} - -func (tp *P2PServer) getPooledTransactions66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - if tp.TxPool == nil { - return nil - } - var query eth.GetPooledTransactionsPacket66 - if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { - return fmt.Errorf("decoding GetPooledTransactionsPacket66: %w, data: %x", err, inreq.Data) - } - _, txs := eth.AnswerGetPooledTransactions(tp.TxPool, query.GetPooledTransactionsPacket) - b, err := rlp.EncodeToBytes(ð.PooledTransactionsRLPPacket66{ - RequestId: query.RequestId, - PooledTransactionsRLPPacket: txs, - }) - if err != nil { - return fmt.Errorf("encode GetPooledTransactionsPacket66 response: %w", err) - } - // TODO: implement logic from perr.ReplyPooledTransactionsRLP - to remember tx ids - outreq := proto_sentry.SendMessageByIdRequest{ - PeerId: inreq.PeerId, - Data: &proto_sentry.OutboundMessageData{Id: proto_sentry.MessageId_POOLED_TRANSACTIONS_66, Data: b}, - } - _, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{}) - if err != nil { - return fmt.Errorf("send pooled transactions response: %w", err) - } - return nil -} - -func (tp *P2PServer) getPooledTransactions65(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - if tp.TxPool == nil { - return nil - } - var query eth.GetPooledTransactionsPacket - if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { - return fmt.Errorf("decoding getPooledTransactions65: %w, data: %x", err, inreq.Data) - } - _, txs := eth.AnswerGetPooledTransactions(tp.TxPool, query) - b, err := rlp.EncodeToBytes(eth.PooledTransactionsRLPPacket(txs)) - if err != nil { - return fmt.Errorf("encode getPooledTransactions65 response: %w", err) - } - // TODO: implement logic from perr.ReplyPooledTransactionsRLP - to remember tx ids - outreq := proto_sentry.SendMessageByIdRequest{ - PeerId: inreq.PeerId, - Data: &proto_sentry.OutboundMessageData{Id: proto_sentry.MessageId_POOLED_TRANSACTIONS_65, Data: b}, - } - _, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{}) - if err != nil { - return fmt.Errorf("send pooled transactions response: %w", err) - } - return nil -} - -func (tp *P2PServer) SendTxsRequest(ctx context.Context, peerID enode.ID, hashes []common.Hash) (_ enode.ID, ok bool) { - var outreq66 *proto_sentry.SendMessageByIdRequest - - // if sentry not found peers to send such message, try next one. stop if found. - for i, ok, next := tp.randSentryIndex(); ok; i, ok = next() { - if !tp.Sentries[i].Ready() { - continue - } - - switch tp.Sentries[i].Protocol() { - case eth.ETH66: - if outreq66 == nil { - data66, err := rlp.EncodeToBytes(ð.GetPooledTransactionsPacket66{ - RequestId: rand.Uint64(), //nolint:gosec - GetPooledTransactionsPacket: hashes, - }) - if err != nil { - log.Error("Could not encode transactions request", "err", err) - return enode.ID{}, false - } - - outreq66 = &proto_sentry.SendMessageByIdRequest{ - PeerId: gointerfaces.ConvertHashToH256(peerID), - Data: &proto_sentry.OutboundMessageData{Id: proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66, Data: data66}, - } - } - - if sentPeers, err1 := tp.Sentries[i].SendMessageById(ctx, outreq66, &grpc.EmptyCallOption{}); err1 != nil { - if isPeerNotFoundErr(err1) { - continue - } - log.Error("[SendTxsRequest]", "err", err1) - - } else if sentPeers != nil && len(sentPeers.Peers) != 0 { - return download.ConvertH256ToPeerID(sentPeers.Peers[0]), true - } - } - } - return enode.ID{}, false -} - -func (tp *P2PServer) randSentryIndex() (int, bool, func() (int, bool)) { - var i int - if len(tp.Sentries) > 1 { - i = rand.Intn(len(tp.Sentries)) - } - to := i - return i, true, func() (int, bool) { - i = (i + 1) % len(tp.Sentries) - return i, i != to - } -} - -func (tp *P2PServer) HandleInboundMessage(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - switch inreq.Id { - - // ==== eth 65 ==== - case proto_sentry.MessageId_TRANSACTIONS_65: - return tp.transactions65(ctx, inreq, sentry) - case proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_65: - return tp.newPooledTransactionHashes65(ctx, inreq, sentry) - case proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_65: - return tp.getPooledTransactions65(ctx, inreq, sentry) - case proto_sentry.MessageId_POOLED_TRANSACTIONS_65: - return tp.pooledTransactions65(ctx, inreq, sentry) - - // ==== eth 66 ==== - case proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: - return tp.newPooledTransactionHashes66(ctx, inreq, sentry) - case proto_sentry.MessageId_POOLED_TRANSACTIONS_66: - return tp.pooledTransactions66(ctx, inreq, sentry) - case proto_sentry.MessageId_TRANSACTIONS_66: - return tp.transactions66(ctx, inreq, sentry) - case proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66: - return tp.getPooledTransactions66(ctx, inreq, sentry) - default: - return fmt.Errorf("not implemented for message Id: %s", inreq.Id) - } -} - -func RecvTxMessageLoop(ctx context.Context, sentry direct.SentryClient, handleInboundMessage func(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error, wg *sync.WaitGroup) { - for { - select { - case <-ctx.Done(): - return - default: - } - - if _, err := sentry.HandShake(ctx, &emptypb.Empty{}, grpc.WaitForReady(true)); err != nil { - s, ok := status.FromError(err) - doLog := !((ok && s.Code() == codes.Canceled) || errors.Is(err, io.EOF) || errors.Is(err, context.Canceled)) - if doLog { - log.Error("[RecvTxMessage] sentry not ready yet", "err", err) - } - time.Sleep(time.Second) - continue - } - if err := RecvTxMessage(ctx, sentry, handleInboundMessage, wg); err != nil { - s, ok := status.FromError(err) - doLog := !((ok && s.Code() == codes.Canceled) || errors.Is(err, io.EOF) || errors.Is(err, context.Canceled)) - if doLog { - log.Error("[RecvTxMessage]", "err", err) - } - continue - } - } -} - -// RecvTxMessage -// wg is used only in tests to avoid time.Sleep. For non-test code wg == nil -func RecvTxMessage(ctx context.Context, - sentry direct.SentryClient, - handleInboundMessage func(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error, - wg *sync.WaitGroup, -) (err error) { - defer func() { - if rec := recover(); rec != nil { - err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) - } - }() // avoid crash because Erigon's core does many things - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - - stream, err := sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: []proto_sentry.MessageId{ - eth.ToProto[eth.ETH66][eth.NewPooledTransactionHashesMsg], - eth.ToProto[eth.ETH66][eth.GetPooledTransactionsMsg], - eth.ToProto[eth.ETH66][eth.TransactionsMsg], - eth.ToProto[eth.ETH66][eth.PooledTransactionsMsg], - }}, grpc.WaitForReady(true)) - if err != nil { - select { - case <-ctx.Done(): - return - default: - } - return err - } - - var req *proto_sentry.InboundMessage - for req, err = stream.Recv(); ; req, err = stream.Recv() { - if err != nil { - select { - case <-ctx.Done(): - return - default: - } - return err - } - if req == nil { - return - } - if err = handleInboundMessage(ctx, req, sentry); err != nil { - s, ok := status.FromError(err) - doLog := !((ok && s.Code() == codes.Canceled) || errors.Is(err, io.EOF) || errors.Is(err, context.Canceled)) - if doLog { - if rlp.IsDecodeError(err) { - log.Debug("[RecvTxMessage] Handling incoming message", "error", err) - } else { - log.Warn("[RecvTxMessage] Handling incoming message", "error", err) - } - } - } - if wg != nil { - wg.Done() - } - } -} - -func RecvPeersLoop(ctx context.Context, sentry direct.SentryClient, recentPeers *txpropagate.RecentlyConnectedPeers, wg *sync.WaitGroup) { - for { - select { - case <-ctx.Done(): - return - default: - } - - if _, err := sentry.HandShake(ctx, &emptypb.Empty{}, grpc.WaitForReady(true)); err != nil { - s, ok := status.FromError(err) - doLog := !((ok && s.Code() == codes.Canceled) || errors.Is(err, io.EOF) || errors.Is(err, context.Canceled)) - if doLog { - log.Warn("[RecvPeers] sentry not ready yet", "err", err) - } - time.Sleep(time.Second) - continue - } - if err := RecvPeers(ctx, sentry, recentPeers, wg); err != nil { - s, ok := status.FromError(err) - doLog := !((ok && s.Code() == codes.Canceled) || errors.Is(err, io.EOF) || errors.Is(err, context.Canceled)) - if doLog { - log.Warn("[RecvPeers]", "err", err) - } - continue - } - } -} - -// RecvPeers -// wg is used only in tests to avoid time.Sleep. For non-test code wg == nil -func RecvPeers(ctx context.Context, - sentry direct.SentryClient, - recentPeers *txpropagate.RecentlyConnectedPeers, - wg *sync.WaitGroup, -) (err error) { - defer func() { - if rec := recover(); rec != nil { - err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) - } - }() // avoid crash because Erigon's core does many things - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - - stream, err := sentry.Peers(streamCtx, &proto_sentry.PeersRequest{}) - if err != nil { - select { - case <-ctx.Done(): - return - default: - } - if s, ok := status.FromError(err); ok && s.Code() == codes.Canceled { - return - } - if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { - return - } - return err - } - - var req *proto_sentry.PeersReply - for req, err = stream.Recv(); ; req, err = stream.Recv() { - if err != nil { - select { - case <-ctx.Done(): - return - default: - } - if s, ok := status.FromError(err); ok && s.Code() == codes.Canceled { - return - } - if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { - return - } - return err - } - if req == nil { - return - } - switch req.Event { - case proto_sentry.PeersReply_Connect: - recentPeers.AddPeer(req.PeerId) - } - if wg != nil { - wg.Done() - } - } -} - -func isPeerNotFoundErr(err error) bool { - return strings.Contains(err.Error(), "peer not found") -} -- GitLab