good morning!!!!

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • github/maticnetwork/bor
  • open/bor
2 results
Show changes
Showing
with 389 additions and 8 deletions
...@@ -99,3 +99,39 @@ func TestGenesisContractChange(t *testing.T) { ...@@ -99,3 +99,39 @@ func TestGenesisContractChange(t *testing.T) {
// make sure balance change DOES NOT take effect // make sure balance change DOES NOT take effect
assert.Equal(t, statedb.GetBalance(addr0), big.NewInt(0)) assert.Equal(t, statedb.GetBalance(addr0), big.NewInt(0))
} }
func TestEncodeSigHeaderJaipur(t *testing.T) {
// As part of the EIP-1559 fork in mumbai, an incorrect seal hash
// was used for Bor that did not included the BaseFee. The Jaipur
// block is a hard fork to fix that.
h := &types.Header{
Difficulty: new(big.Int),
Number: big.NewInt(1),
Extra: make([]byte, 32+65),
}
var (
// hash for the block without the BaseFee
hashWithoutBaseFee = common.HexToHash("0x1be13e83939b3c4701ee57a34e10c9290ce07b0e53af0fe90b812c6881826e36")
// hash for the block with the baseFee
hashWithBaseFee = common.HexToHash("0xc55b0cac99161f71bde1423a091426b1b5b4d7598e5981ad802cce712771965b")
)
// Jaipur NOT enabled and BaseFee not set
hash := SealHash(h, &params.BorConfig{JaipurBlock: 10})
assert.Equal(t, hash, hashWithoutBaseFee)
// Jaipur enabled (Jaipur=0) and BaseFee not set
hash = SealHash(h, &params.BorConfig{JaipurBlock: 0})
assert.Equal(t, hash, hashWithoutBaseFee)
h.BaseFee = big.NewInt(2)
// Jaipur enabled (Jaipur=Header block) and BaseFee set
hash = SealHash(h, &params.BorConfig{JaipurBlock: 1})
assert.Equal(t, hash, hashWithBaseFee)
// Jaipur NOT enabled and BaseFee set
hash = SealHash(h, &params.BorConfig{JaipurBlock: 10})
assert.Equal(t, hash, hashWithoutBaseFee)
}
...@@ -27,11 +27,13 @@ type IHeimdallClient interface { ...@@ -27,11 +27,13 @@ type IHeimdallClient interface {
Fetch(path string, query string) (*ResponseWithHeight, error) Fetch(path string, query string) (*ResponseWithHeight, error)
FetchWithRetry(path string, query string) (*ResponseWithHeight, error) FetchWithRetry(path string, query string) (*ResponseWithHeight, error)
FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error) FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error)
Close()
} }
type HeimdallClient struct { type HeimdallClient struct {
urlString string urlString string
client http.Client client http.Client
closeCh chan struct{}
} }
func NewHeimdallClient(urlString string) (*HeimdallClient, error) { func NewHeimdallClient(urlString string) (*HeimdallClient, error) {
...@@ -40,6 +42,7 @@ func NewHeimdallClient(urlString string) (*HeimdallClient, error) { ...@@ -40,6 +42,7 @@ func NewHeimdallClient(urlString string) (*HeimdallClient, error) {
client: http.Client{ client: http.Client{
Timeout: time.Duration(5 * time.Second), Timeout: time.Duration(5 * time.Second),
}, },
closeCh: make(chan struct{}),
} }
return h, nil return h, nil
} }
...@@ -96,13 +99,22 @@ func (h *HeimdallClient) FetchWithRetry(rawPath string, rawQuery string) (*Respo ...@@ -96,13 +99,22 @@ func (h *HeimdallClient) FetchWithRetry(rawPath string, rawQuery string) (*Respo
u.Path = rawPath u.Path = rawPath
u.RawQuery = rawQuery u.RawQuery = rawQuery
// create a new ticker for retrying the request
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for { for {
res, err := h.internalFetch(u) select {
if err == nil && res != nil { case <-h.closeCh:
return res, nil log.Debug("Shutdown detected, terminating request")
return nil, errShutdownDetected
case <-ticker.C:
res, err := h.internalFetch(u)
if err == nil && res != nil {
return res, nil
}
log.Info("Retrying again in 5 seconds for next Heimdall data", "path", u.Path)
} }
log.Info("Retrying again in 5 seconds for next Heimdall span", "path", u.Path)
time.Sleep(5 * time.Second)
} }
} }
...@@ -137,3 +149,9 @@ func (h *HeimdallClient) internalFetch(u *url.URL) (*ResponseWithHeight, error) ...@@ -137,3 +149,9 @@ func (h *HeimdallClient) internalFetch(u *url.URL) (*ResponseWithHeight, error)
return &response, nil return &response, nil
} }
// Close sends a signal to stop the running process
func (h *HeimdallClient) Close() {
close(h.closeCh)
h.client.CloseIdleConnections()
}
...@@ -131,7 +131,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { ...@@ -131,7 +131,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
} }
// Resolve the authorization key and check against signers // Resolve the authorization key and check against signers
signer, err := ecrecover(header, s.sigcache) signer, err := ecrecover(header, s.sigcache, s.config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -217,6 +217,7 @@ type BlockChain struct { ...@@ -217,6 +217,7 @@ type BlockChain struct {
borReceiptsCache *lru.Cache // Cache for the most recent bor receipt receipts per block borReceiptsCache *lru.Cache // Cache for the most recent bor receipt receipts per block
stateSyncData []*types.StateSyncData // State sync data stateSyncData []*types.StateSyncData // State sync data
stateSyncFeed event.Feed // State sync feed stateSyncFeed event.Feed // State sync feed
chain2HeadFeed event.Feed // Reorg/NewHead/Fork data feed
} }
// NewBlockChain returns a fully initialised block chain using information // NewBlockChain returns a fully initialised block chain using information
...@@ -1640,10 +1641,21 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. ...@@ -1640,10 +1641,21 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
for _, data := range bc.stateSyncData { for _, data := range bc.stateSyncData {
bc.stateSyncFeed.Send(StateSyncEvent{Data: data}) bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
} }
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadCanonicalEvent,
NewChain: []*types.Block{block},
})
// BOR // BOR
} }
} else { } else {
bc.chainSideFeed.Send(ChainSideEvent{Block: block}) bc.chainSideFeed.Send(ChainSideEvent{Block: block})
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadForkEvent,
NewChain: []*types.Block{block},
})
} }
return status, nil return status, nil
} }
...@@ -1737,6 +1749,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er ...@@ -1737,6 +1749,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
defer func() { defer func() {
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadCanonicalEvent,
NewChain: []*types.Block{lastCanon},
})
} }
}() }()
// Start the parallel header verifier // Start the parallel header verifier
...@@ -2262,6 +2279,13 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { ...@@ -2262,6 +2279,13 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} }
// Ensure the user sees large reorgs // Ensure the user sees large reorgs
if len(oldChain) > 0 && len(newChain) > 0 { if len(oldChain) > 0 && len(newChain) > 0 {
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadReorgEvent,
NewChain: newChain,
OldChain: oldChain,
})
logFn := log.Info logFn := log.Info
msg := "Chain reorg detected" msg := "Chain reorg detected"
if len(oldChain) > 63 { if len(oldChain) > 63 {
...@@ -2570,6 +2594,11 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su ...@@ -2570,6 +2594,11 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
} }
// SubscribeChain2HeadEvent registers a subscription of ChainHeadEvent. ()
func (bc *BlockChain) SubscribeChain2HeadEvent(ch chan<- Chain2HeadEvent) event.Subscription {
return bc.scope.Track(bc.chain2HeadFeed.Subscribe(ch))
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent. // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
......
package core
import (
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
)
func TestChain2HeadEvent(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
gspec = &Genesis{
Config: params.TestChainConfig,
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
}
genesis = gspec.MustCommit(db)
signer = types.LatestSigner(gspec.Config)
)
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
chain2HeadCh := make(chan Chain2HeadEvent, 64)
blockchain.SubscribeChain2HeadEvent(chain2HeadCh)
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
replacementBlocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 4, func(i int, gen *BlockGen) {
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
if i == 2 {
gen.OffsetTime(-9)
}
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
})
if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
type eventTest struct {
Type string
Added []common.Hash
Removed []common.Hash
}
readEvent := func(expect *eventTest) {
select {
case ev := <-chain2HeadCh:
if ev.Type != expect.Type {
t.Fatal("Type mismatch")
}
if len(ev.NewChain) != len(expect.Added) {
t.Fatal("Newchain and Added Array Size don't match")
}
if len(ev.OldChain) != len(expect.Removed) {
t.Fatal("Oldchain and Removed Array Size don't match")
}
for j := 0; j < len(ev.OldChain); j++ {
if ev.OldChain[j].Hash() != expect.Removed[j] {
t.Fatal("Oldchain hashes Do Not Match")
}
}
for j := 0; j < len(ev.NewChain); j++ {
if ev.NewChain[j].Hash() != expect.Added[j] {
t.Fatal("Newchain hashes Do Not Match")
}
}
case <-time.After(2 * time.Second):
t.Fatal("timeout")
}
}
// head event
readEvent(&eventTest{
Type: Chain2HeadCanonicalEvent,
Added: []common.Hash{
chain[2].Hash(),
}})
// fork event
readEvent(&eventTest{
Type: Chain2HeadForkEvent,
Added: []common.Hash{
replacementBlocks[0].Hash(),
}})
// fork event
readEvent(&eventTest{
Type: Chain2HeadForkEvent,
Added: []common.Hash{
replacementBlocks[1].Hash(),
}})
// reorg event
//In this event the channel recieves an array of Blocks in NewChain and OldChain
readEvent(&eventTest{
Type: Chain2HeadReorgEvent,
Added: []common.Hash{
replacementBlocks[2].Hash(),
replacementBlocks[1].Hash(),
replacementBlocks[0].Hash(),
},
Removed: []common.Hash{
chain[2].Hash(),
chain[1].Hash(),
chain[0].Hash(),
},
})
// head event
readEvent(&eventTest{
Type: Chain2HeadCanonicalEvent,
Added: []common.Hash{
replacementBlocks[3].Hash(),
}})
}
...@@ -8,3 +8,16 @@ import ( ...@@ -8,3 +8,16 @@ import (
type StateSyncEvent struct { type StateSyncEvent struct {
Data *types.StateSyncData Data *types.StateSyncData
} }
var (
Chain2HeadReorgEvent = "reorg"
Chain2HeadCanonicalEvent = "head"
Chain2HeadForkEvent = "fork"
)
// For tracking reorgs related information
type Chain2HeadEvent struct {
NewChain []*types.Block
OldChain []*types.Block
Type string
}
...@@ -151,8 +151,10 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui ...@@ -151,8 +151,10 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
// This way they don't have to sync again from block 0 and still be compatible // This way they don't have to sync again from block 0 and still be compatible
// for block logs for future blocks. Note that already synced nodes // for block logs for future blocks. Note that already synced nodes
// won't have past block logs. Newly synced node will have all the data. // won't have past block logs. Newly synced node will have all the data.
if err := freezer.tables[freezerBorReceiptTable].Fill(freezer.tables[freezerHeaderTable].items); err != nil { if _, ok := freezer.tables[freezerBorReceiptTable]; ok {
return nil, err if err := freezer.tables[freezerBorReceiptTable].Fill(freezer.tables[freezerHeaderTable].items); err != nil {
return nil, err
}
} }
// Truncate all tables to common length. // Truncate all tables to common length.
......
...@@ -53,6 +53,7 @@ func TestStateProcessorErrors(t *testing.T) { ...@@ -53,6 +53,7 @@ func TestStateProcessorErrors(t *testing.T) {
BerlinBlock: big.NewInt(0), BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0), LondonBlock: big.NewInt(0),
Ethash: new(params.EthashConfig), Ethash: new(params.EthashConfig),
Bor: &params.BorConfig{BurntContract: map[string]string{"0": "0x000000000000000000000000000000000000dead"}},
} }
signer = types.LatestSigner(config) signer = types.LatestSigner(config)
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
......
...@@ -337,6 +337,11 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ...@@ -337,6 +337,11 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
effectiveTip = cmath.BigMin(st.gasTipCap, new(big.Int).Sub(st.gasFeeCap, st.evm.Context.BaseFee)) effectiveTip = cmath.BigMin(st.gasTipCap, new(big.Int).Sub(st.gasFeeCap, st.evm.Context.BaseFee))
} }
amount := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), effectiveTip) amount := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), effectiveTip)
if london {
burntContractAddress := common.HexToAddress(st.evm.ChainConfig().Bor.CalculateBurntContract(st.evm.Context.BlockNumber.Uint64()))
burnAmount := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.evm.Context.BaseFee)
st.state.AddBalance(burntContractAddress, burnAmount)
}
st.state.AddBalance(st.evm.Context.Coinbase, amount) st.state.AddBalance(st.evm.Context.Coinbase, amount)
output1 := new(big.Int).SetBytes(input1.Bytes()) output1 := new(big.Int).SetBytes(input1.Bytes())
output2 := new(big.Int).SetBytes(input2.Bytes()) output2 := new(big.Int).SetBytes(input2.Bytes())
......
# Documentation
- [Command-line-interface](./cli)
- [Configuration file](./config.md)
## Deprecation notes
- The new entrypoint to run the Bor client is ```server```.
```
$ bor server
```
- Toml files to configure nodes are being deprecated. Currently, we only allow for static and trusted nodes to be configured using toml files.
```
$ bor server --config ./legacy.toml
```
- ```Admin```, ```Personal``` and account related endpoints in ```Eth``` are being removed from the JsonRPC interface. Some of this functionality will be moved to the new GRPC server for operational tasks.
# Command line interface
## Commands
- [```server```](./server.md)
- [```debug```](./debug.md)
- [```account```](./account.md)
- [```account new```](./account_new.md)
- [```account list```](./account_list.md)
- [```account import```](./account_import.md)
- [```chain```](./chain.md)
- [```chain sethead```](./chain_sethead.md)
- [```peers```](./peers.md)
- [```peers add```](./peers_add.md)
- [```peers list```](./peers_list.md)
- [```peers remove```](./peers_remove.md)
- [```peers status```](./peers_status.md)
- [```status```](./status.md)
- [```chain watch```](./chain_watch.md)
- [```version```](./version.md)
# Account
The ```account``` command groups actions to interact with accounts:
- [```account new```](./account_new.md): Create a new account in the Bor client.
- [```account list```](./account_list.md): List the wallets in the Bor client.
- [```account import```](./account_import.md): Import an account to the Bor client.
# Account import
The ```account import``` command imports an account in Json format to the Bor data directory.
# Account list
The ```account list``` command lists all the accounts in the Bor data directory.
# Account new
The ```account new``` command creates a new local account file on the Bor data directory. Bor should not be running to execute this command.
# Chain
The ```chain``` command groups actions to interact with the blockchain in the client:
- [```chain sethead```](./chain_sethead.md): Set the current chain to a certain block.
# Chain sethead
The ```chain sethead <number>``` command sets the current chain to a certain block.
## Arguments
- ```number```: The block number to roll back.
## Options
- ```yes```: Force set head.
# Chain watch
The ```chain watch``` command is used to view the chainHead, reorg and fork events in real-time.
# Debug
The ```bor debug``` command takes a debug dump of the running client.
## Options
- ```seconds```: Number of seconds to trace cpu and traces.
- ```output```: Output directory for the data dump.
## Examples
By default it creates a tar.gz file with the output:
```
$ bor debug
Starting debugger...
Created debug archive: bor-debug-2021-10-26-073819Z.tar.gz
```
Send the output to a specific directory:
```
$ bor debug --output data
Starting debugger...
Created debug directory: data/bor-debug-2021-10-26-075437Z
```
# Peers
The ```peers``` command groups actions to interact with peers:
- [```peers add```](./peers_add.md): Joins the local client to another remote peer.
- [```peers list```](./peers_list.md): Lists the connected peers to the Bor client.
- [```peers remove```](./peers_remove.md): Disconnects the local client from a connected peer if exists.
- [```peers status```](./peers_status.md): Display the status of a peer by its id.