diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 73816e833c9654710d697328d148156cd29f9b81..e5a49429c979637937983b3dfb26981cf2116f7b 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -109,6 +109,7 @@ var ( utils.CacheTrieFlag, utils.CacheGCFlag, utils.TrieCacheGenFlag, + utils.DownloadOnlyFlag, utils.NoHistory, utils.ArchiveSyncInterval, utils.ListenPortFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 58d3fcdcf607cba50527755d461bd322a1d0040f..b79007678dc6e0eea52d35d963aa9f583d61d4cd 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -87,6 +87,7 @@ var AppHelpFlagGroups = []flagGroup{ utils.IdentityFlag, utils.LightKDFFlag, utils.WhitelistFlag, + utils.DownloadOnlyFlag, utils.NoHistory, utils.ArchiveSyncInterval, }, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 12b71742a5d8f5717a5cfacef1e43d10a80ad4b6..e86601e90c552662459d6535c5fa8ffefa6fbb8a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -297,6 +297,10 @@ var ( Name: "ulc.onlyannounce", Usage: "Ultra light server sends announcements only", } + DownloadOnlyFlag = cli.BoolFlag{ + Name: "download-only", + Usage: "Run in download only mode - only fetch blocks but not process them", + } // Dashboard settings DashboardEnabledFlag = cli.BoolFlag{ Name: "dashboard", @@ -1468,6 +1472,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { cfg.BlocksToPrune = ctx.GlobalUint64(GCModeBlockToPruneFlag.Name) cfg.PruningTimeout = ctx.GlobalDuration(GCModeTickTimeout.Name) + cfg.DownloadOnly = ctx.GlobalBoolT(DownloadOnlyFlag.Name) cfg.NoHistory = ctx.GlobalBoolT(NoHistory.Name) cfg.ArchiveSyncInterval = ctx.GlobalInt(ArchiveSyncInterval.Name) diff --git a/core/blockchain.go b/core/blockchain.go index 1b3499994c42ffedf1f0562e97234577150f434f..e0ffca15871934c74b3be187c56e3f7bc5d7e035 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -124,6 +124,7 @@ type CacheConfig struct { BlocksToPrune uint64 PruneTimeout time.Duration ArchiveSyncInterval uint64 + DownloadOnly bool NoHistory bool } @@ -205,6 +206,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, + DownloadOnly: false, NoHistory: false, } } @@ -1243,7 +1245,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. // writeBlockWithState writes the block and all associated state to the database, // but is expects the chain mutex to be held. -func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.IntraBlockState, tds *state.TrieDbState) (status WriteStatus, err error) { +func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, stateDb *state.IntraBlockState, tds *state.TrieDbState) (status WriteStatus, err error) { bc.wg.Add(1) defer bc.wg.Done() @@ -1264,13 +1266,17 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } rawdb.WriteBlock(bc.db, block) - tds.SetBlockNr(block.NumberU64()) + if tds != nil { + tds.SetBlockNr(block.NumberU64()) + } ctx := bc.WithContext(context.Background(), block.Number()) - if err := state.CommitBlock(ctx, tds.DbStateWriter()); err != nil { - return NonStatTy, err + if stateDb != nil { + if err := stateDb.CommitBlock(ctx, tds.DbStateWriter()); err != nil { + return NonStatTy, err + } } - if bc.enableReceipts { + if bc.enableReceipts && !bc.cacheConfig.DownloadOnly { rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), receipts) } @@ -1300,8 +1306,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } } // Write the positional metadata for transaction/receipt lookups and preimages - rawdb.WriteTxLookupEntries(bc.db, block) - rawdb.WritePreimages(bc.db, state.Preimages()) + if !bc.cacheConfig.DownloadOnly { + rawdb.WriteTxLookupEntries(bc.db, block) + } + if stateDb != nil && !bc.cacheConfig.DownloadOnly { + rawdb.WritePreimages(bc.db, stateDb.Preimages()) + } status = CanonStatTy //} else { @@ -1547,17 +1557,19 @@ func (bc *BlockChain) insertChain(ctx context.Context, chain types.Blocks, verif } readBlockNr := parentNumber var root common.Hash - if bc.trieDbState == nil { + if bc.trieDbState == nil && !bc.cacheConfig.DownloadOnly { if _, err = bc.GetTrieDbState(); err != nil { return k, events, coalescedLogs, err } } - root = bc.trieDbState.LastRoot() + if !bc.cacheConfig.DownloadOnly { + root = bc.trieDbState.LastRoot() + } var parentRoot common.Hash if parent != nil { parentRoot = parent.Root() } - if parent != nil && root != parentRoot { + if parent != nil && root != parentRoot && !bc.cacheConfig.DownloadOnly { log.Info("Rewinding from", "block", bc.CurrentBlock().NumberU64(), "to block", readBlockNr) if _, err = bc.db.Commit(); err != nil { log.Error("Could not commit chainDb before rewinding", "error", err) @@ -1591,38 +1603,44 @@ func (bc *BlockChain) insertChain(ctx context.Context, chain types.Blocks, verif return 0, events, coalescedLogs, err } } - stateDB := state.New(bc.trieDbState) - // Process block using the parent state as reference point. - //t0 := time.Now() - receipts, logs, usedGas, err := bc.processor.Process(block, stateDB, bc.trieDbState, bc.vmConfig) - //t1 := time.Now() - if err != nil { - bc.db.Rollback() - bc.trieDbState = nil - bc.reportBlock(block, receipts, err) - return k, events, coalescedLogs, err - } - // Update the metrics touched during block processing - /* - accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them - storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them - accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them - storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them - - triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation - trieproc := statedb.AccountReads + statedb.AccountUpdates - trieproc += statedb.StorageReads + statedb.StorageUpdates - - blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) - */ - - // Validate the state using the default validator - err = bc.Validator().ValidateState(block, parent, stateDB, bc.trieDbState, receipts, usedGas) - if err != nil { - bc.db.Rollback() - bc.trieDbState = nil - bc.reportBlock(block, receipts, err) - return k, events, coalescedLogs, err + var stateDB *state.IntraBlockState + var receipts types.Receipts + var logs []*types.Log + var usedGas uint64 + if !bc.cacheConfig.DownloadOnly { + stateDB = state.New(bc.trieDbState) + // Process block using the parent state as reference point. + //t0 := time.Now() + receipts, logs, usedGas, err = bc.processor.Process(block, stateDB, bc.trieDbState, bc.vmConfig) + //t1 := time.Now() + if err != nil { + bc.db.Rollback() + bc.trieDbState = nil + bc.reportBlock(block, receipts, err) + return k, events, coalescedLogs, err + } + // Update the metrics touched during block processing + /* + accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them + storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them + accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them + storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them + + triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation + trieproc := statedb.AccountReads + statedb.AccountUpdates + trieproc += statedb.StorageReads + statedb.StorageUpdates + + blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) + */ + + // Validate the state using the default validator + err = bc.Validator().ValidateState(block, parent, stateDB, bc.trieDbState, receipts, usedGas) + if err != nil { + bc.db.Rollback() + bc.trieDbState = nil + bc.reportBlock(block, receipts, err) + return k, events, coalescedLogs, err + } } proctime := time.Since(start) @@ -1693,7 +1711,9 @@ func (bc *BlockChain) insertChain(ctx context.Context, chain types.Blocks, verif bc.trieDbState = nil return 0, events, coalescedLogs, err } - bc.trieDbState.PruneTries(false) + if bc.trieDbState != nil { + bc.trieDbState.PruneTries(false) + } log.Info("Database", "size", bc.db.Size(), "written", written) } } diff --git a/core/types/derive_sha.go b/core/types/derive_sha.go index 6dc4bac5a5b544709965a9efa052ac7315417432..74c8d8d78c06b40dae3b9440c147beebc73d09b1 100644 --- a/core/types/derive_sha.go +++ b/core/types/derive_sha.go @@ -17,8 +17,6 @@ package types import ( - "bytes" - "github.com/ledgerwatch/turbo-geth/common" "github.com/ledgerwatch/turbo-geth/rlp" "github.com/ledgerwatch/turbo-geth/trie" @@ -30,12 +28,128 @@ type DerivableList interface { } func DeriveSha(list DerivableList) common.Hash { - keybuf := new(bytes.Buffer) - trie := trie.New(common.Hash{}) - for i := 0; i < list.Len(); i++ { - keybuf.Reset() - rlp.Encode(keybuf, uint(i)) - trie.Update(keybuf.Bytes(), list.GetRlp(i), 0) + if list.Len() < 1 { + return trie.EmptyRoot + } + + prev := &trie.OneBytesTape{} + curr := &trie.OneBytesTape{} + succ := &trie.OneBytesTape{} + value := &trie.OneBytesTape{} + + hb := trie.NewHashBuilder() + + hb.SetKeyTape(curr) + hb.SetValueTape(value) + + hb.Reset() + prev.Reset() + curr.Reset() + succ.Reset() + + hexWriter := &hexTapeWriter{succ} + + var groups []uint16 + + traverseInLexOrder(list, func(i int, next int) { + prev.Reset() + prev.Write(curr.Bytes()) + curr.Reset() + curr.Write(succ.Bytes()) + succ.Reset() + + if next >= 0 { + encodeUint(uint(next), hexWriter) + hexWriter.Commit() + } + + value.Reset() + + if curr.Len() > 0 { + value.Write(list.GetRlp(i)) + groups, _ = trie.GenStructStep(0, hashOnly, false, prev.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups) + } + }) + + hash, _ := hb.RootHash() + return hash +} + +type bytesWriter interface { + WriteByte(byte) error +} + +// hexTapeWriter hex-encodes data and writes it directly to a tape. +type hexTapeWriter struct { + tape *trie.OneBytesTape +} + +func (w *hexTapeWriter) WriteByte(b byte) error { + w.tape.WriteByte(b / 16) + w.tape.WriteByte(b % 16) + return nil +} + +func (w *hexTapeWriter) Commit() { + w.tape.WriteByte(16) +} + +func adjustIndex(i int, l int) int { + if i >= 0 && i < 127 && i < l-1 { + return i + 1 + } else if i == 127 || (i < 127 && i >= l-1) { + return 0 + } + return i +} + +// traverseInLexOrder traverses the list indices in the order suitable for HashBuilder. +// HashBuilder requires keys to be in the lexicographical order. Our keys are unit indices in RLP encoding in hex. +// In RLP encoding 0 is 0080 where 1 is 000110, 2 is 000210, etc up until 128 which is 0801080010. +// So, knowing that we can order indices in the right order even w/o really sorting them. Only 0 is misplaced, and should take the position after 127. +// So, in the end we transform [0,...,127,128,...n] to [1,...,127,0,128,...,n] which will be [000110....070f10, 080010, 0801080010....] in hex encoding. +func traverseInLexOrder(list DerivableList, traverser func(int, int)) { + for i := -1; i < list.Len(); i++ { + adjustedIndex := adjustIndex(i, list.Len()) + nextIndex := i + 1 + if nextIndex >= list.Len() { + nextIndex = -1 + } + nextIndex = adjustIndex(nextIndex, list.Len()) + + traverser(adjustedIndex, nextIndex) + } +} + +func hashOnly(_ []byte) bool { + return true +} + +func encodeUint(i uint, buffer bytesWriter) { + if i == 0 { + _ = buffer.WriteByte(byte(rlp.EmptyStringCode)) + return + } + + if i < 128 { + _ = buffer.WriteByte(byte(i)) + return + } + + size := intsize(i) + _ = buffer.WriteByte(rlp.EmptyStringCode + byte(size)) + for j := 1; j <= size; j++ { + shift := uint((size - j) * 8) + w := byte(i >> shift) + _ = buffer.WriteByte(w) + } +} + +// intsize computes the minimum number of bytes required to store i. +func intsize(i uint) (size int) { + for size = 1; ; size++ { + if i >>= 8; i == 0 { + return size + } } - return trie.Hash() } diff --git a/core/types/derive_sha_test.go b/core/types/derive_sha_test.go new file mode 100644 index 0000000000000000000000000000000000000000..47e1122a8b17a4570da34113a83f6e864d1db8b7 --- /dev/null +++ b/core/types/derive_sha_test.go @@ -0,0 +1,114 @@ +package types + +import ( + "bytes" + "fmt" + "math/big" + "testing" + + "github.com/ledgerwatch/turbo-geth/common" + "github.com/ledgerwatch/turbo-geth/rlp" + "github.com/ledgerwatch/turbo-geth/trie" +) + +func genTransactions(n uint64) Transactions { + txs := Transactions{} + + for i := uint64(0); i < n; i++ { + tx := NewTransaction(i, common.Address{}, big.NewInt(1000+int64(i)), 10+i, big.NewInt(1000+int64(i)), []byte(fmt.Sprintf("hello%d", i))) + txs = append(txs, tx) + } + + return txs +} + +func TestEncodeUint(t *testing.T) { + for i := 0; i < 64000; i++ { + bbOld := bytes.NewBuffer(make([]byte, 10)) + bbNew := bytes.NewBuffer(make([]byte, 10)) + bbOld.Reset() + bbNew.Reset() + _ = rlp.Encode(bbOld, uint(i)) + + bbNew.Reset() + encodeUint(uint(i), bbNew) + + if !bytes.Equal(bbOld.Bytes(), bbNew.Bytes()) { + t.Errorf("unexpected byte sequence. got: %x (expected %x)", bbNew.Bytes(), bbOld.Bytes()) + } + } +} + +func TestDeriveSha(t *testing.T) { + tests := []DerivableList{ + Transactions{}, + genTransactions(1), + genTransactions(2), + genTransactions(4), + genTransactions(10), + genTransactions(100), + genTransactions(1000), + genTransactions(10000), + genTransactions(100000), + } + + for _, test := range tests { + checkDeriveSha(t, test) + } +} + +func checkDeriveSha(t *testing.T, list DerivableList) { + legacySha := legacyDeriveSha(list) + deriveSha := DeriveSha(list) + if !hashesEqual(legacySha, deriveSha) { + t.Errorf("unexpected hash: %v (expected: %v)\n", deriveSha.Hex(), legacySha.Hex()) + + } +} + +func hashesEqual(h1, h2 common.Hash) bool { + if len(h1) != len(h2) { + return false + } + return h1.Hex() == h2.Hex() +} + +func legacyDeriveSha(list DerivableList) common.Hash { + keybuf := new(bytes.Buffer) + trie := trie.New(common.Hash{}) + for i := 0; i < list.Len(); i++ { + keybuf.Reset() + _ = rlp.Encode(keybuf, uint(i)) + trie.Update(keybuf.Bytes(), list.GetRlp(i), 0) + } + return trie.Hash() +} + +var ( + smallTxList = genTransactions(100) + largeTxList = genTransactions(100000) +) + +func BenchmarkLegacySmallList(b *testing.B) { + for i := 0; i < b.N; i++ { + legacyDeriveSha(smallTxList) + } +} + +func BenchmarkCurrentSmallList(b *testing.B) { + for i := 0; i < b.N; i++ { + DeriveSha(smallTxList) + } +} + +func BenchmarkLegacyLargeList(b *testing.B) { + for i := 0; i < b.N; i++ { + legacyDeriveSha(largeTxList) + } +} + +func BenchmarkCurrentLargeList(b *testing.B) { + for i := 0; i < b.N; i++ { + DeriveSha(largeTxList) + } +} diff --git a/docs/programmers_guide/guide.md b/docs/programmers_guide/guide.md index e5bebe7cf04db7d2ddacdf1c13f4b2834bc4c2da..b689b580b6300a2deb11848c5faaabe6b000ec4f 100644 --- a/docs/programmers_guide/guide.md +++ b/docs/programmers_guide/guide.md @@ -386,7 +386,7 @@ In the deeper recursive step, max common prefix is empty. Since the common prefi the common prefix with the succeeding key (they are both empty). The optional part of the step happens, opcode `BRANCH 0123` is emitted, and `groups` is trimmed to become empty. No recursive invocation follows. -The step of this algorithm is implemented by the function `genStructStep` in [trie/structural_2.go](../../trie/structural_2.go). +The step of this algorithm is implemented by the function `GenStructStep` in [trie/structural_2.go](../../trie/structural_2.go). ### Converting sequence of keys and value into a multiproof diff --git a/eth/backend.go b/eth/backend.go index 1d464b63731433f1a7158ec5387f53ef26525ffa..06d445d56c67288cfb5849d2730ed15ba82b91f9 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -186,6 +186,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { TrieDirtyLimit: config.TrieDirtyCache, TrieCleanNoPrefetch: config.NoPrefetch, TrieTimeLimit: config.TrieTimeout, + DownloadOnly: config.DownloadOnly, NoHistory: config.NoHistory, ArchiveSyncInterval: uint64(config.ArchiveSyncInterval), } diff --git a/eth/config.go b/eth/config.go index 71f829ac5959943ed41c5144fc993bb11a9284ae..024367e513bed3d2031bb5bb075190f140fc404d 100644 --- a/eth/config.go +++ b/eth/config.go @@ -98,7 +98,10 @@ type Config struct { NoPruning bool // Whether to disable pruning and flush everything to disk NoPrefetch bool // Whether to disable prefetching and only load state on demand - NoHistory bool + NoHistory bool + // DownloadOnly is set when the node does not need to process the blocks, but simply + // download them + DownloadOnly bool ArchiveSyncInterval int BlocksBeforePruning uint64 BlocksToPrune uint64 diff --git a/trie/resolver.go b/trie/resolver.go index e0d1f064bca013d612580fb5069f1052853c0669..884493d720b7e8ec0369f0a7af8b50e4345a86e2 100644 --- a/trie/resolver.go +++ b/trie/resolver.go @@ -220,7 +220,7 @@ func (tr *Resolver) finaliseRoot() error { tr.succ.Reset() if tr.curr.Len() > 0 { var err error - tr.groups, err = genStructStep(tr.fieldSet, tr.currentRs.HashOnly, false, tr.prec.Bytes(), tr.curr.Bytes(), tr.succ.Bytes(), tr.hb, tr.groups) + tr.groups, err = GenStructStep(tr.fieldSet, tr.currentRs.HashOnly, false, tr.prec.Bytes(), tr.curr.Bytes(), tr.succ.Bytes(), tr.hb, tr.groups) if err != nil { return err } @@ -296,7 +296,7 @@ func (tr *Resolver) Walker(keyIdx int, k []byte, v []byte) (bool, error) { tr.succ.WriteByte(16) if tr.curr.Len() > 0 { var err error - tr.groups, err = genStructStep(tr.fieldSet, tr.currentRs.HashOnly, false, tr.prec.Bytes(), tr.curr.Bytes(), tr.succ.Bytes(), tr.hb, tr.groups) + tr.groups, err = GenStructStep(tr.fieldSet, tr.currentRs.HashOnly, false, tr.prec.Bytes(), tr.curr.Bytes(), tr.succ.Bytes(), tr.hb, tr.groups) if err != nil { return false, err } diff --git a/trie/structural_2.go b/trie/structural_2.go index d5a93c67633c1d52e6f39e2c6325a7086646f38c..51e6204ad77b0f1efd803d105d73c333144d7566 100644 --- a/trie/structural_2.go +++ b/trie/structural_2.go @@ -44,7 +44,7 @@ type structInfoReceiver interface { hash(number int) error } -// genStructStep is one step of the algorithm that generates the structural information based on the sequence of keys. +// GenStructStep is one step of the algorithm that generates the structural information based on the sequence of keys. // `fieldSet` parameter specifies whether the generated leaf should be a binary string (fieldSet==0), or // an account (in that case the opcodes `ACCOUNTLEAF`/`ACCOUNTLEAFHASH` are emitted instead of `LEAF`/`LEAFHASH`). // `hashOnly` parameter is the function that, called for a certain prefix, determines whether the trie node for that prefix needs to be @@ -59,8 +59,7 @@ type structInfoReceiver interface { // Whenever a `BRANCH` or `BRANCHHASH` opcode is emitted, the set of digits is taken from the corresponding `groups` item, which is // then removed from the slice. This signifies the usage of the number of the stack items by the `BRANCH` or `BRANCHHASH` opcode. // DESCRIBED: docs/programmers_guide/guide.md#separation-of-keys-and-the-structure - -func genStructStep( +func GenStructStep( fieldSet uint32, hashOnly func(prefix []byte) bool, recursive bool, @@ -153,7 +152,7 @@ func genStructStep( } // Recursion - return genStructStep(fieldSet, hashOnly, true, newPrec, newCurr, succ, e, groups) + return GenStructStep(fieldSet, hashOnly, true, newPrec, newCurr, succ, e, groups) } // BytesTape is an abstraction for an input tape that allows reading binary strings ([]byte) sequentially @@ -801,6 +800,13 @@ func (hb *HashBuilder) emptyRoot() { hb.hashStack = append(hb.hashStack, hash[:]...) } +func (hb *HashBuilder) RootHash() (common.Hash, error) { + if !hb.hasRoot() { + return common.Hash{}, fmt.Errorf("no root in the tree") + } + return hb.rootHash(), nil +} + func (hb *HashBuilder) rootHash() common.Hash { var hash common.Hash copy(hash[:], hb.hashStack[1:hashStackStride]) diff --git a/trie/structural_test.go b/trie/structural_test.go index eab10174dfc3ff4916df69ee8159a297d10714e5..05b9c959f49795896690eade663d3cd77c113687 100644 --- a/trie/structural_test.go +++ b/trie/structural_test.go @@ -76,7 +76,7 @@ func TestV2HashBuilding(t *testing.T) { succ.WriteByte(16) if curr.Len() > 0 { var err error - groups, err = genStructStep(0, func(_ []byte) bool { return true }, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups) + groups, err = GenStructStep(0, func(_ []byte) bool { return true }, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups) if err != nil { t.Errorf("Could not execute step of structGen algorithm: %v", err) } @@ -93,7 +93,7 @@ func TestV2HashBuilding(t *testing.T) { curr.Reset() curr.Write(succ.Bytes()) succ.Reset() - if _, err := genStructStep(0, func(_ []byte) bool { return true }, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups); err != nil { + if _, err := GenStructStep(0, func(_ []byte) bool { return true }, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups); err != nil { t.Errorf("Could not execute step of structGen algorithm: %v", err) } builtHash := hb.rootHash() @@ -150,7 +150,7 @@ func TestV2Resolution(t *testing.T) { succ.WriteByte(16) if curr.Len() > 0 { var err error - groups, err = genStructStep(0, rs.HashOnly, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups) + groups, err = GenStructStep(0, rs.HashOnly, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups) if err != nil { t.Errorf("Could not execute step of structGen algorithm: %v", err) } @@ -163,7 +163,7 @@ func TestV2Resolution(t *testing.T) { curr.Reset() curr.Write(succ.Bytes()) succ.Reset() - if _, err := genStructStep(0, rs.HashOnly, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups); err != nil { + if _, err := GenStructStep(0, rs.HashOnly, false, prec.Bytes(), curr.Bytes(), succ.Bytes(), hb, groups); err != nil { t.Errorf("Could not execute step of structGen algorithm: %v", err) } tr1 := New(common.Hash{})