diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go
index 1331ccadbdadca3dcfa1a8a7372f1f963f2756f2..d1932cdc8b7cdbffe31c88c635ee40bc6c5bb4a0 100644
--- a/cmd/hack/hack.go
+++ b/cmd/hack/hack.go
@@ -882,6 +882,15 @@ func testBlockHashes(chaindata string, block int, stateRoot common.Hash) {
}
}
+func printCurrentBlockNumber(chaindata string) {
+ ethDb, err := ethdb.NewBoltDatabase(chaindata)
+ check(err)
+ defer ethDb.Close()
+ hash := rawdb.ReadHeadBlockHash(ethDb)
+ number := rawdb.ReadHeaderNumber(ethDb, hash)
+ fmt.Printf("Block number: %d\n", *number)
+}
+
func printTxHashes() {
ethDb, err := ethdb.NewBoltDatabase("/Users/alexeyakhunov/Library/Ethereum/geth/chaindata")
check(err)
@@ -1170,6 +1179,26 @@ func testMemBolt() {
check(err)
}
+func printBucket(chaindata string) {
+ db, err := bolt.Open(chaindata, 0600, &bolt.Options{ReadOnly: true})
+ check(err)
+ defer db.Close()
+ f, err := os.Create("bucket.txt")
+ check(err)
+ defer f.Close()
+ fb := bufio.NewWriter(f)
+ err = db.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket(dbutils.StorageHistoryBucket)
+ c := b.Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Fprintf(fb, "%x %x\n", k, v)
+ }
+ return nil
+ })
+ check(err)
+ fb.Flush()
+}
+
func main() {
flag.Parse()
if *cpuprofile != "" {
@@ -1239,4 +1268,10 @@ func main() {
if *action == "dumpStorage" {
dumpStorage()
}
+ if *action == "current" {
+ printCurrentBlockNumber(*chaindata)
+ }
+ if *action == "bucket" {
+ printBucket(*chaindata)
+ }
}
diff --git a/cmd/migrate/migrate.go b/cmd/migrate/migrate.go
index 03f0cbcd99de971b9b3f2b10bb7ab61ed0fefe17..a35c0ede98073c6ebc47fe35417394b2e8cdfe3c 100644
--- a/cmd/migrate/migrate.go
+++ b/cmd/migrate/migrate.go
@@ -60,10 +60,10 @@ func convertDatabaseToCBOR(db *bolt.DB, maxTxOperations uint) error {
k = common.CopyBytes(k)
return nil
})
-
if err != nil {
return err
}
+ fmt.Printf("Transaction completed with %d records\n", i)
i = 0
}
return nil
diff --git a/cmd/state/commands/stateless.go b/cmd/state/commands/stateless.go
index 7b1d12ca9d0469c5231956bb215cf68891e133de..4566b477583a067daad5695a676dcfe3f4615a79 100644
--- a/cmd/state/commands/stateless.go
+++ b/cmd/state/commands/stateless.go
@@ -15,6 +15,8 @@ var (
witnessInterval uint64
noverify bool
bintries bool
+ starkBlocksFile string
+ starkStatsBase string
)
func init() {
@@ -30,6 +32,8 @@ func init() {
statelessCmd.Flags().Uint64Var(&witnessInterval, "witnessInterval", 1, "after which block to extract witness (put a large number like 10000000 to disable)")
statelessCmd.Flags().BoolVar(&noverify, "noVerify", false, "skip snapshot verification on loading")
statelessCmd.Flags().BoolVar(&bintries, "bintries", false, "use binary tries instead of hexary to generate/load block witnesses")
+ statelessCmd.Flags().StringVar(&starkBlocksFile, "starkBlocksFile", "", "file with the list of blocks for which to produce stark data")
+ statelessCmd.Flags().StringVar(&starkStatsBase, "starkStatsBase", "stark_stats", "template for names of the files to write stark stats in")
rootCmd.AddCommand(statelessCmd)
@@ -57,6 +61,8 @@ var statelessCmd = &cobra.Command{
!noverify,
bintries,
createDb,
+ starkBlocksFile,
+ starkStatsBase,
)
return nil
diff --git a/cmd/state/stateless/state_snapshot.go b/cmd/state/stateless/state_snapshot.go
index 5829b0d6385f7e38abe9d04d78519025e5179ecb..64bd11e7ace4e8703cff47bcfe5ff5b73f579f4b 100644
--- a/cmd/state/stateless/state_snapshot.go
+++ b/cmd/state/stateless/state_snapshot.go
@@ -12,6 +12,7 @@ import (
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/consensus/ethash"
"github.com/ledgerwatch/turbo-geth/core"
+ "github.com/ledgerwatch/turbo-geth/core/rawdb"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
"github.com/ledgerwatch/turbo-geth/core/vm"
"github.com/ledgerwatch/turbo-geth/crypto"
@@ -309,19 +310,24 @@ func compare_snapshot(stateDb ethdb.Database, db *bolt.DB, filename string) {
func checkRoots(stateDb ethdb.Database, rootHash common.Hash, blockNum uint64) {
startTime := time.Now()
- t := trie.New(rootHash)
- r := trie.NewResolver(0, true, blockNum)
- key := []byte{}
- req := t.NewResolveRequest(nil, key, 0, rootHash[:])
- fmt.Printf("new resolve request for root block with hash %x\n", rootHash)
- r.AddRequest(req)
var err error
- if err = r.ResolveWithDb(stateDb, blockNum); err != nil {
- fmt.Printf("%v\n", err)
+ if blockNum > 0 {
+ t := trie.New(rootHash)
+ r := trie.NewResolver(0, true, blockNum)
+ key := []byte{}
+ req := t.NewResolveRequest(nil, key, 0, rootHash[:])
+ fmt.Printf("new resolve request for root block with hash %x\n", rootHash)
+ r.AddRequest(req)
+ if err = r.ResolveWithDb(stateDb, blockNum); err != nil {
+ fmt.Printf("%v\n", err)
+ }
+ fmt.Printf("Trie computation took %v\n", time.Since(startTime))
+ } else {
+ fmt.Printf("block number is unknown, account trie verification skipped\n")
}
- fmt.Printf("Trie computation took %v\n", time.Since(startTime))
startTime = time.Now()
roots := make(map[common.Hash]*accounts.Account)
+ incarnationMap := make(map[uint64]int)
err = stateDb.Walk(dbutils.StorageBucket, nil, 0, func(k, v []byte) (bool, error) {
var addrHash common.Hash
copy(addrHash[:], k[:32])
@@ -334,6 +340,7 @@ func checkRoots(stateDb ethdb.Database, rootHash common.Hash, blockNum uint64) {
return false, err
}
roots[addrHash] = &account
+ incarnationMap[account.Incarnation]++
}
}
@@ -342,6 +349,7 @@ func checkRoots(stateDb ethdb.Database, rootHash common.Hash, blockNum uint64) {
if err != nil {
panic(err)
}
+ fmt.Printf("Incarnation map: %v\n", incarnationMap)
for addrHash, account := range roots {
if account != nil {
st := trie.New(account.Root)
@@ -355,12 +363,7 @@ func checkRoots(stateDb ethdb.Database, rootHash common.Hash, blockNum uint64) {
err = sr.ResolveWithDb(stateDb, blockNum)
if err != nil {
fmt.Printf("%x: %v\n", addrHash, err)
- filename := fmt.Sprintf("tries/root_%x.txt", addrHash)
- f, err := os.Create(filename)
- if err == nil {
- defer f.Close()
- st.Print(f)
- }
+ fmt.Printf("incarnation: %d, account.Root: %x\n", account.Incarnation, account.Root)
}
}
}
@@ -401,14 +404,16 @@ func VerifySnapshot(blockNum uint64, chaindata string) {
ethDb, err := ethdb.NewBoltDatabase(chaindata)
check(err)
defer ethDb.Close()
- engine := ethash.NewFullFaker()
- chainConfig := params.MainnetChainConfig
- bc, err := core.NewBlockChain(ethDb, nil, chainConfig, engine, vm.Config{}, nil)
- check(err)
- currentBlock := bc.CurrentBlock()
- currentBlockNr := currentBlock.NumberU64()
+ hash := rawdb.ReadHeadBlockHash(ethDb)
+ number := rawdb.ReadHeaderNumber(ethDb, hash)
+ var currentBlockNr uint64
+ var preRoot common.Hash
+ if number != nil {
+ header := rawdb.ReadHeader(ethDb, hash, *number)
+ currentBlockNr = *number
+ preRoot = header.Root
+ }
fmt.Printf("Block number: %d\n", currentBlockNr)
- fmt.Printf("Block root hash: %x\n", currentBlock.Root())
- preRoot := currentBlock.Root()
- checkRoots(ethDb, preRoot, blockNum)
+ fmt.Printf("Block root hash: %x\n", preRoot)
+ checkRoots(ethDb, preRoot, currentBlockNr)
}
diff --git a/cmd/state/stateless/stateless.go b/cmd/state/stateless/stateless.go
index e4eac0ec5aadf2a85210200b6da32506742fc575..c14b5288e795eac9177ed17c0ef97b14c2ff385a 100644
--- a/cmd/state/stateless/stateless.go
+++ b/cmd/state/stateless/stateless.go
@@ -4,8 +4,11 @@ import (
"bytes"
"context"
"fmt"
+ "io/ioutil"
"os"
"os/signal"
+ "strconv"
+ "strings"
"syscall"
"time"
@@ -22,6 +25,7 @@ import (
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/params"
"github.com/ledgerwatch/turbo-geth/trie"
+ "github.com/ledgerwatch/turbo-geth/visual"
)
var chartColors = []drawing.Color{
@@ -72,6 +76,66 @@ func runBlock(dbstate *state.Stateless, chainConfig *params.ChainConfig,
return nil
}
+func statePicture(t *trie.Trie, codeMap map[common.Hash][]byte, number uint64) error {
+ filename := fmt.Sprintf("state_%d.dot", number)
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ indexColors := visual.HexIndexColors
+ fontColors := visual.HexFontColors
+ visual.StartGraph(f, false)
+ trie.Visual(t, f, &trie.VisualOpts{
+ Highlights: nil,
+ IndexColors: indexColors,
+ FontColors: fontColors,
+ Values: true,
+ CutTerminals: 0,
+ CodeMap: codeMap,
+ CodeCompressed: false,
+ ValCompressed: false,
+ ValHex: true,
+ })
+ visual.EndGraph(f)
+ if err := f.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseStarkBlockFile(starkBlocksFile string) (map[uint64]struct{}, error) {
+ dat, err := ioutil.ReadFile(starkBlocksFile)
+ if err != nil {
+ return nil, err
+ }
+ blockStrs := strings.Split(string(dat), "\n")
+ m := make(map[uint64]struct{})
+ for _, blockStr := range blockStrs {
+ if len(blockStr) == 0 {
+ continue
+ }
+ if b, err1 := strconv.ParseUint(blockStr, 10, 64); err1 == nil {
+ m[b] = struct{}{}
+ } else {
+ return nil, err1
+ }
+
+ }
+ return m, nil
+}
+
+func starkData(witness *trie.Witness, starkStatsBase string, blockNum uint64) error {
+ filename := fmt.Sprintf("%s_%d.txt", starkStatsBase, blockNum)
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ if err = trie.StarkStats(witness, f, false); err != nil {
+ return err
+ }
+ return nil
+}
+
type CreateDbFunc func(string) (ethdb.Database, error)
func Stateless(
@@ -86,7 +150,9 @@ func Stateless(
statsfile string,
verifySnapshot bool,
binary bool,
- createDb CreateDbFunc) {
+ createDb CreateDbFunc,
+ starkBlocksFile string,
+ starkStatsBase string) {
state.MaxTrieCacheGen = triesize
startTime := time.Now()
@@ -117,6 +183,11 @@ func Stateless(
stateDb, err := createDb(statefile)
check(err)
defer stateDb.Close()
+ var starkBlocks map[uint64]struct{}
+ if starkBlocksFile != "" {
+ starkBlocks, err = parseStarkBlockFile(starkBlocksFile)
+ check(err)
+ }
var preRoot common.Hash
if blockNum == 1 {
_, _, _, err = core.SetupGenesisBlock(stateDb, core.DefaultGenesisBlock())
@@ -223,7 +294,7 @@ func Stateless(
check(err)
}
finalRootFail := false
- if blockNum >= witnessThreshold && blockWitness != nil { // witness == nil means the extraction fails
+ if blockNum >= witnessThreshold && blockWitness != nil { // blockWitness == nil means the extraction fails
var s *state.Stateless
var w *trie.Witness
w, err = trie.NewWitnessFromReader(bytes.NewReader(blockWitness), false)
@@ -232,6 +303,10 @@ func Stateless(
fmt.Printf("error deserializing witness for block %d: %v\n", blockNum, err)
return
}
+ if _, ok := starkBlocks[blockNum-1]; ok {
+ err = starkData(w, starkStatsBase, blockNum-1)
+ check(err)
+ }
s, err = state.NewStateless(preRoot, w, blockNum-1, trace, binary /* is binary */)
if err != nil {
fmt.Printf("Error making stateless2 for block %d: %v\n", blockNum, err)
@@ -243,6 +318,10 @@ func Stateless(
}
return
}
+ if _, ok := starkBlocks[blockNum-1]; ok {
+ err = statePicture(s.GetTrie(), s.GetCodeMap(), blockNum-1)
+ check(err)
+ }
if err = runBlock(s, chainConfig, bcb, header, block, trace, !binary); err != nil {
fmt.Printf("Error running block %d through stateless2: %v\n", blockNum, err)
finalRootFail = true
diff --git a/core/state/stateless.go b/core/state/stateless.go
index c848a505cacfc6641752b2b75ef302e43357dcdc..d67d61e546dfcf9e0c572ed5deb62295f62d31df 100644
--- a/core/state/stateless.go
+++ b/core/state/stateless.go
@@ -301,3 +301,11 @@ func (s *Stateless) CheckRoot(expected common.Hash) error {
s.created = make(map[common.Hash]struct{})
return nil
}
+
+func (s *Stateless) GetTrie() *trie.Trie {
+ return s.t
+}
+
+func (s *Stateless) GetCodeMap() map[common.Hash][]byte {
+ return s.codeMap
+}
diff --git a/trie/stark_stats.go b/trie/stark_stats.go
new file mode 100644
index 0000000000000000000000000000000000000000..9264979db33ce348082cfca46957e41a2b017206
--- /dev/null
+++ b/trie/stark_stats.go
@@ -0,0 +1,262 @@
+package trie
+
+import (
+ "fmt"
+ "io"
+ "math/big"
+ "math/bits"
+ "sort"
+
+ "github.com/ledgerwatch/turbo-geth/common"
+ "github.com/ledgerwatch/turbo-geth/core/types/accounts"
+ "github.com/ledgerwatch/turbo-geth/trie/rlphacks"
+)
+
+type StarkStatsBuilder struct {
+ keccakCounter int // Number of Keccak invocations
+ perInputSize map[int]int // Number of invocation for certain size of input
+ sizeStack []int // Stack of input sizes
+}
+
+func NewStarkStatsBuilder() *StarkStatsBuilder {
+ return &StarkStatsBuilder{
+ keccakCounter: 0,
+ perInputSize: make(map[int]int),
+ }
+}
+
+func (hb *StarkStatsBuilder) leafHash(length int, keyHex []byte, val rlphacks.RlpSerializable) error {
+ key := keyHex[len(keyHex)-length:]
+ var compactLen int
+ var kp, kl int
+ if hasTerm(key) {
+ compactLen = (len(key)-1)/2 + 1
+ } else {
+ compactLen = len(key)/2 + 1
+ }
+ if compactLen > 1 {
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ totalLen := kp + kl + val.DoubleRLPLen()
+ var lenPrefix [4]byte
+ pt := rlphacks.GenerateStructLen(lenPrefix[:], totalLen)
+ inputSize := totalLen + pt
+ if inputSize > common.HashLength {
+ inputSize = 32
+ }
+ hb.sizeStack = append(hb.sizeStack, inputSize)
+ return nil
+}
+
+func (hb *StarkStatsBuilder) leaf(length int, keyHex []byte, val rlphacks.RlpSerializable) error {
+ return hb.leafHash(length, keyHex, val)
+}
+
+func (hb *StarkStatsBuilder) extensionHash(key []byte) error {
+ var kp, kl int
+ var compactLen int
+ if hasTerm(key) {
+ compactLen = (len(key)-1)/2 + 1
+ } else {
+ compactLen = len(key)/2 + 1
+ }
+ if compactLen > 1 {
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ totalLen := kp + kl + 33
+ var lenPrefix [4]byte
+ pt := rlphacks.GenerateStructLen(lenPrefix[:], totalLen)
+ inputSize := pt + totalLen
+ hb.keccakCounter++
+ hb.perInputSize[inputSize]++
+ hb.sizeStack[len(hb.sizeStack)-1] = 32
+ return nil
+}
+
+func (hb *StarkStatsBuilder) extension(key []byte) error {
+ return hb.extensionHash(key)
+}
+
+func (hb *StarkStatsBuilder) branchHash(set uint16) error {
+ digits := bits.OnesCount16(set)
+ inputSizes := hb.sizeStack[len(hb.sizeStack)-digits:]
+ totalLen := 17 // These are 17 length prefixes
+ var i int
+ for digit := uint(0); digit < 16; digit++ {
+ if ((uint16(1) << digit) & set) != 0 {
+ totalLen += inputSizes[i]
+ i++
+ }
+ }
+ var lenPrefix [4]byte
+ pt := rlphacks.GenerateStructLen(lenPrefix[:], totalLen)
+ inputSize := pt + totalLen
+ hb.keccakCounter++
+ hb.perInputSize[inputSize]++
+ hb.sizeStack = hb.sizeStack[:len(hb.sizeStack)-digits+1]
+ hb.sizeStack[len(hb.sizeStack)-1] = 32
+ return nil
+}
+
+func (hb *StarkStatsBuilder) branch(set uint16) error {
+ return hb.branchHash(set)
+}
+
+func (hb *StarkStatsBuilder) hash(_ common.Hash) {
+ hb.sizeStack = append(hb.sizeStack, 32)
+}
+
+func (hb *StarkStatsBuilder) code(_ []byte) common.Hash {
+ hb.sizeStack = append(hb.sizeStack, 32)
+ return common.Hash{}
+}
+
+func (hb *StarkStatsBuilder) accountLeafHash(length int, keyHex []byte, _ uint64, balance *big.Int, nonce uint64, fieldSet uint32) (err error) {
+ key := keyHex[len(keyHex)-length:]
+ var acc accounts.Account
+ acc.Root = EmptyRoot
+ acc.CodeHash = EmptyCodeHash
+ acc.Nonce = nonce
+ acc.Balance.Set(balance)
+ acc.Initialised = true
+ if fieldSet&uint32(4) == 0 && fieldSet&uint32(8) == 0 {
+ // In this case we can precompute the hash of the entire account leaf
+ hb.sizeStack = append(hb.sizeStack, 32)
+ } else {
+ if fieldSet&uint32(4) != 0 {
+ hb.sizeStack = hb.sizeStack[:len(hb.sizeStack)-1]
+ }
+ if fieldSet&uint32(8) != 0 {
+ hb.sizeStack = hb.sizeStack[:len(hb.sizeStack)-1]
+ }
+ }
+ var kp, kl int
+ var compactLen int
+ if hasTerm(key) {
+ compactLen = (len(key)-1)/2 + 1
+ } else {
+ compactLen = len(key)/2 + 1
+ }
+ if compactLen > 1 {
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ valLen := acc.EncodingLengthForHashing()
+ valBuf := make([]byte, valLen)
+ acc.EncodeForHashing(valBuf)
+ val := rlphacks.RlpEncodedBytes(valBuf)
+ totalLen := kp + kl + val.DoubleRLPLen()
+ var lenPrefix [4]byte
+ pt := rlphacks.GenerateStructLen(lenPrefix[:], totalLen)
+ inputSize := pt + totalLen
+ hb.keccakCounter++
+ hb.perInputSize[inputSize]++
+ hb.sizeStack = append(hb.sizeStack, 32)
+ return nil
+}
+
+func (hb *StarkStatsBuilder) accountLeaf(length int, keyHex []byte, storageSize uint64, balance *big.Int, nonce uint64, _ uint64, fieldSet uint32) (err error) {
+ return hb.accountLeafHash(length, keyHex, storageSize, balance, nonce, fieldSet)
+}
+
+func (hb *StarkStatsBuilder) emptyRoot() {
+ hb.sizeStack = append(hb.sizeStack, 32)
+}
+
+// StarkStats collects Keccak256 stats from the witness and write them into the file
+func StarkStats(witness *Witness, w io.Writer, trace bool) error {
+ hb := NewStarkStatsBuilder()
+
+ for _, operator := range witness.Operators {
+ switch op := operator.(type) {
+ case *OperatorLeafValue:
+ if trace {
+ fmt.Printf("LEAF ")
+ }
+ keyHex := op.Key
+ val := op.Value
+ if err := hb.leaf(len(op.Key), keyHex, rlphacks.RlpSerializableBytes(val)); err != nil {
+ return err
+ }
+ case *OperatorExtension:
+ if trace {
+ fmt.Printf("EXTENSION ")
+ }
+ if err := hb.extension(op.Key); err != nil {
+ return err
+ }
+ case *OperatorBranch:
+ if trace {
+ fmt.Printf("BRANCH ")
+ }
+ if err := hb.branch(uint16(op.Mask)); err != nil {
+ return err
+ }
+ case *OperatorHash:
+ if trace {
+ fmt.Printf("HASH ")
+ }
+ hb.hash(op.Hash)
+ case *OperatorCode:
+ if trace {
+ fmt.Printf("CODE ")
+ }
+
+ hb.code(op.Code)
+
+ case *OperatorLeafAccount:
+ if trace {
+ fmt.Printf("ACCOUNTLEAF(code=%v storage=%v) ", op.HasCode, op.HasStorage)
+ }
+ balance := big.NewInt(0)
+ balance.SetBytes(op.Balance.Bytes())
+ nonce := op.Nonce
+
+ // FIXME: probably not needed, fix hb.accountLeaf
+ fieldSet := uint32(3)
+ if op.HasCode && op.HasStorage {
+ fieldSet = 15
+ }
+
+ // Incarnation is always needed for a hashbuilder.
+ // but it is just our implementation detail needed for contract self-descruction suport with our
+ // db structure. Stateless clients don't access the DB so we can just pass 0 here.
+ incarnaton := uint64(0)
+
+ if err := hb.accountLeaf(len(op.Key), op.Key, 0, balance, nonce, incarnaton, fieldSet); err != nil {
+ return err
+ }
+ case *OperatorEmptyRoot:
+ if trace {
+ fmt.Printf("EMPTYROOT ")
+ }
+ hb.emptyRoot()
+ default:
+ return fmt.Errorf("unknown operand type: %T", operator)
+ }
+ }
+ if trace {
+ fmt.Printf("\n")
+ }
+
+ inputSizes := make([]int, len(hb.perInputSize))
+ i := 0
+ for inputSize := range hb.perInputSize {
+ inputSizes[i] = inputSize
+ i++
+ }
+ sort.Ints(inputSizes)
+ fmt.Fprintf(w, "%d\n", hb.keccakCounter)
+ for _, inputSize := range inputSizes {
+ fmt.Fprintf(w, "%d %d\n", inputSize, hb.perInputSize[inputSize])
+ }
+ return nil
+}
diff --git a/trie/visual.go b/trie/visual.go
index 4baee23e0b4b244ac6ef6e7681d345118f9cd6f3..10e5a53406595c980b739fbcca4f7b64ede24645 100644
--- a/trie/visual.go
+++ b/trie/visual.go
@@ -82,6 +82,7 @@ func visualNode(nd node, hex []byte, w io.Writer, highlights [][]byte, opts *Vis
if v, ok := n.Val.(valueNode); ok {
if leaves != nil {
leaves[string(hex)] = struct{}{}
+ /*
var valStr string
if opts.ValHex {
valStr = fmt.Sprintf("%x", []byte(v))
@@ -91,7 +92,10 @@ func visualNode(nd node, hex []byte, w io.Writer, highlights [][]byte, opts *Vis
if opts.ValCompressed && len(valStr) > 10 {
valStr = fmt.Sprintf("%x..%x", []byte(v)[:2], []byte(v)[len(v)-2:])
}
- visual.Circle(w, fmt.Sprintf("e_%x", concat(hex, n.Key...)), valStr, false)
+ */
+ valHex := keybytesToHex(v)
+ valHex = valHex[:len(valHex)-1]
+ visual.HexBox(w, fmt.Sprintf("e_%x", concat(hex, n.Key...)), valHex, 32, opts.ValCompressed, false)
fmt.Fprintf(w,
`n_%x -> e_%x;
`, hex, concat(hex, n.Key...))
@@ -104,25 +108,33 @@ func visualNode(nd node, hex []byte, w io.Writer, highlights [][]byte, opts *Vis
`n_%x -> e_%x;
`, hex, accountHex)
if !a.IsEmptyCodeHash() {
- codeHex := keybytesToHex(opts.CodeMap[a.CodeHash])
- codeHex = codeHex[:len(codeHex)-1]
- visual.HexBox(w, fmt.Sprintf("c_%x", accountHex), codeHex, 32, opts.CodeCompressed, false)
+ if code, ok := opts.CodeMap[a.CodeHash]; ok {
+ codeHex := keybytesToHex(code)
+ codeHex = codeHex[:len(codeHex)-1]
+ visual.HexBox(w, fmt.Sprintf("c_%x", accountHex), codeHex, 32, opts.CodeCompressed, false)
+ } else {
+ visual.Box(w, fmt.Sprintf("c_%x", accountHex), "codeHash")
+ }
fmt.Fprintf(w,
`e_%x -> c_%x;
`, accountHex, accountHex)
}
if !a.IsEmptyRoot() {
- nKey := n.Key
- if nKey[len(nKey)-1] == 16 {
- nKey = nKey[:len(nKey)-1]
- }
- var newHighlights [][]byte
- for _, h := range highlights {
- if h != nil && bytes.HasPrefix(h, nKey) {
- newHighlights = append(newHighlights, h[len(nKey):])
+ if a.storage != nil {
+ nKey := n.Key
+ if nKey[len(nKey)-1] == 16 {
+ nKey = nKey[:len(nKey)-1]
}
+ var newHighlights [][]byte
+ for _, h := range highlights {
+ if h != nil && bytes.HasPrefix(h, nKey) {
+ newHighlights = append(newHighlights, h[len(nKey):])
+ }
+ }
+ visualNode(a.storage, accountHex[:len(accountHex)-1], w, newHighlights, opts, leaves, hashes)
+ } else {
+ visual.Box(w, fmt.Sprintf("n_%x", accountHex[:len(accountHex)-1]), "storHash")
}
- visualNode(a.storage, accountHex[:len(accountHex)-1], w, newHighlights, opts, leaves, hashes)
fmt.Fprintf(w,
`e_%x -> n_%x;
`, accountHex, accountHex[:len(accountHex)-1])