good morning!!!!

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • github/maticnetwork/bor
  • open/bor
2 results
Show changes
Commits on Source (78)
Showing
with 568 additions and 165 deletions
...@@ -13,3 +13,8 @@ jobs: ...@@ -13,3 +13,8 @@ jobs:
run: make all run: make all
- name: "Run tests" - name: "Run tests"
run: make test run: make test
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
file: ./cover.out
name: Bor Docker Image CI
on:
push:
branches-ignore:
- '**'
tags:
- 'v*.*.*'
# to be used by fork patch-releases ^^
- 'v*.*.*-*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build the Bor Docker image
env:
DOCKERHUB: ${{ secrets.DOCKERHUB }}
DOCKERHUB_KEY: ${{ secrets.DOCKERHUB_KEY }}
run: |
ls -l
echo "Docker login"
docker login -u $DOCKERHUB -p $DOCKERHUB_KEY
echo "running build"
docker build -f Dockerfile.classic -t maticnetwork/bor:${GITHUB_REF/refs\/tags\//} .
echo "pushing image"
docker push maticnetwork/bor:${GITHUB_REF/refs\/tags\//}
echo "DONE!"
...@@ -16,9 +16,11 @@ builds: ...@@ -16,9 +16,11 @@ builds:
env: env:
- CC=o64-clang - CC=o64-clang
- CXX=o64-clang++ - CXX=o64-clang++
tags:
- netgo
ldflags: ldflags:
-s -w -s -w
- id: darwin-arm64 - id: darwin-arm64
main: ./cmd/geth main: ./cmd/geth
binary: bor binary: bor
...@@ -29,9 +31,11 @@ builds: ...@@ -29,9 +31,11 @@ builds:
env: env:
- CC=oa64-clang - CC=oa64-clang
- CXX=oa64-clang++ - CXX=oa64-clang++
tags:
- netgo
ldflags: ldflags:
-s -w -s -w
- id: linux-amd64 - id: linux-amd64
main: ./cmd/geth main: ./cmd/geth
binary: bor binary: bor
...@@ -42,9 +46,11 @@ builds: ...@@ -42,9 +46,11 @@ builds:
env: env:
- CC=gcc - CC=gcc
- CXX=g++ - CXX=g++
tags:
- netgo
ldflags: ldflags:
# We need to build a static binary because we are building in a glibc based system and running in a musl container # We need to build a static binary because we are building in a glibc based system and running in a musl container
-s -w -linkmode external -extldflags "-static" -s -w -extldflags "-static"
- id: linux-arm64 - id: linux-arm64
main: ./cmd/geth main: ./cmd/geth
...@@ -56,9 +62,11 @@ builds: ...@@ -56,9 +62,11 @@ builds:
env: env:
- CC=aarch64-linux-gnu-gcc - CC=aarch64-linux-gnu-gcc
- CXX=aarch64-linux-gnu-g++ - CXX=aarch64-linux-gnu-g++
tags:
- netgo
ldflags: ldflags:
# We need to build a static binary because we are building in a glibc based system and running in a musl container # We need to build a static binary because we are building in a glibc based system and running in a musl container
-s -w -linkmode external -extldflags "-static" -s -w -extldflags "-static"
nfpms: nfpms:
- vendor: 0xPolygon - vendor: 0xPolygon
...@@ -76,6 +84,12 @@ nfpms: ...@@ -76,6 +84,12 @@ nfpms:
- src: builder/files/bor.service - src: builder/files/bor.service
dst: /lib/systemd/system/bor.service dst: /lib/systemd/system/bor.service
type: config type: config
- src: builder/files/genesis-mainnet-v1.json
dst: /etc/bor/genesis-mainnet-v1.json
type: config
- src: builder/files/genesis-testnet-v4.json
dst: /etc/bor/genesis-testnet-v4.json
type: config
overrides: overrides:
rpm: rpm:
...@@ -95,8 +109,10 @@ dockers: ...@@ -95,8 +109,10 @@ dockers:
- linux-amd64 - linux-amd64
build_flag_templates: build_flag_templates:
- --platform=linux/amd64 - --platform=linux/amd64
skip_push: true extra_files:
- builder/files/genesis-mainnet-v1.json
- builder/files/genesis-testnet-v4.json
- image_templates: - image_templates:
- 0xpolygon/{{ .ProjectName }}:{{ .Version }}-arm64 - 0xpolygon/{{ .ProjectName }}:{{ .Version }}-arm64
dockerfile: Dockerfile.release dockerfile: Dockerfile.release
...@@ -105,8 +121,10 @@ dockers: ...@@ -105,8 +121,10 @@ dockers:
ids: ids:
- linux-arm64 - linux-arm64
build_flag_templates: build_flag_templates:
- --platform=linux/arm64 - --platform=linux/arm64/v8
skip_push: true extra_files:
- builder/files/genesis-mainnet-v1.json
- builder/files/genesis-testnet-v4.json
docker_manifests: docker_manifests:
- name_template: 0xpolygon/{{ .ProjectName }}:{{ .Version }} - name_template: 0xpolygon/{{ .ProjectName }}:{{ .Version }}
......
# Build Geth in a stock Go builder container
FROM golang:1.17-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git bash
ADD . /bor
RUN cd /bor && make bor-all
CMD ["/bin/bash"]
# Pull Bor into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /bor/build/bin/bor /usr/local/bin/
COPY --from=builder /bor/build/bin/bootnode /usr/local/bin/
EXPOSE 8545 8546 8547 30303 30303/udp
FROM alpine:3.14 FROM alpine:3.14
RUN apk add --no-cache ca-certificates RUN apk add --no-cache ca-certificates && \
mkdir -p /etc/bor
COPY bor /usr/local/bin/ COPY bor /usr/local/bin/
COPY builder/files/genesis-mainnet-v1.json /etc/bor/
COPY builder/files/genesis-testnet-v4.json /etc/bor/
EXPOSE 8545 8546 8547 30303 30303/udp EXPOSE 8545 8546 8547 30303 30303/udp
ENTRYPOINT ["bor"] ENTRYPOINT ["bor"]
...@@ -13,9 +13,6 @@ GO ?= latest ...@@ -13,9 +13,6 @@ GO ?= latest
GORUN = env GO111MODULE=on go run GORUN = env GO111MODULE=on go run
GOPATH = $(shell go env GOPATH) GOPATH = $(shell go env GOPATH)
protoc:
protoc --go_out=. --go-grpc_out=. ./command/server/proto/*.proto
bor: bor:
$(GORUN) build/ci.go install ./cmd/geth $(GORUN) build/ci.go install ./cmd/geth
mkdir -p $(GOPATH)/bin/ mkdir -p $(GOPATH)/bin/
...@@ -28,6 +25,9 @@ bor-all: ...@@ -28,6 +25,9 @@ bor-all:
cp $(GOBIN)/geth $(GOBIN)/bor cp $(GOBIN)/geth $(GOBIN)/bor
cp $(GOBIN)/* $(GOPATH)/bin/ cp $(GOBIN)/* $(GOPATH)/bin/
protoc:
protoc --go_out=. --go-grpc_out=. ./command/server/proto/*.proto
geth: geth:
$(GORUN) build/ci.go install ./cmd/geth $(GORUN) build/ci.go install ./cmd/geth
@echo "Done building." @echo "Done building."
...@@ -48,10 +48,9 @@ ios: ...@@ -48,10 +48,9 @@ ios:
@echo "Done building." @echo "Done building."
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library." @echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
test: all test:
# $(GORUN) build/ci.go test # Skip mobile and cmd tests since they are being deprecated
go test github.com/ethereum/go-ethereum/consensus/bor go test -v $$(go list ./... | grep -v go-ethereum/cmd/) -cover -coverprofile=cover.out
go test github.com/ethereum/go-ethereum/tests/bor
lint: ## Run linters. lint: ## Run linters.
$(GORUN) build/ci.go lint $(GORUN) build/ci.go lint
......
...@@ -6,11 +6,21 @@ ...@@ -6,11 +6,21 @@
[Service] [Service]
Restart=on-failure Restart=on-failure
RestartSec=5s RestartSec=5s
WorkingDirectory=$NODE_DIR ExecStart=/usr/local/bin/bor \
EnvironmentFile=/etc/matic/metadata --bor-mumbai \
ExecStart=/usr/local/bin/bor $VALIDATOR_ADDRESS # --bor-mainnet \
--datadir /var/lib/bor/data \
--bootnodes "enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303,enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303"
# Validator params
# Uncomment and configure the following lines in case you run a validator
# --keystore /var/lib/bor/keystore \
# --unlock [VALIDATOR ADDRESS] \
# --password /var/lib/bor/password.txt \
# --allow-insecure-unlock \
# --nodiscover --maxpeers 1 \
# --mine
Type=simple Type=simple
User=$USER User=root
KillSignal=SIGINT KillSignal=SIGINT
TimeoutStopSec=120 TimeoutStopSec=120
......
This diff is collapsed.
This diff is collapsed.
package main
import (
"os"
"github.com/ethereum/go-ethereum/internal/cli"
)
func main() {
os.Exit(cli.Run(os.Args[1:]))
}
...@@ -347,7 +347,7 @@ func setDefaultMumbaiGethConfig(ctx *cli.Context, config *gethConfig) { ...@@ -347,7 +347,7 @@ func setDefaultMumbaiGethConfig(ctx *cli.Context, config *gethConfig) {
config.Node.HTTPPort = 8545 config.Node.HTTPPort = 8545
config.Node.IPCPath = utils.MakeDataDir(ctx) + "/bor.ipc" config.Node.IPCPath = utils.MakeDataDir(ctx) + "/bor.ipc"
config.Node.HTTPModules = []string{"eth", "net", "web3", "txpool", "bor"} config.Node.HTTPModules = []string{"eth", "net", "web3", "txpool", "bor"}
config.Eth.SyncMode = downloader.SnapSync config.Eth.SyncMode = downloader.FullSync
config.Eth.NetworkId = 80001 config.Eth.NetworkId = 80001
config.Eth.Miner.GasCeil = 20000000 config.Eth.Miner.GasCeil = 20000000
//--miner.gastarget is depreceated, No longed used //--miner.gastarget is depreceated, No longed used
...@@ -370,7 +370,7 @@ func setDefaultBorMainnetGethConfig(ctx *cli.Context, config *gethConfig) { ...@@ -370,7 +370,7 @@ func setDefaultBorMainnetGethConfig(ctx *cli.Context, config *gethConfig) {
config.Node.HTTPPort = 8545 config.Node.HTTPPort = 8545
config.Node.IPCPath = utils.MakeDataDir(ctx) + "/bor.ipc" config.Node.IPCPath = utils.MakeDataDir(ctx) + "/bor.ipc"
config.Node.HTTPModules = []string{"eth", "net", "web3", "txpool", "bor"} config.Node.HTTPModules = []string{"eth", "net", "web3", "txpool", "bor"}
config.Eth.SyncMode = downloader.SnapSync config.Eth.SyncMode = downloader.FullSync
config.Eth.NetworkId = 137 config.Eth.NetworkId = 137
config.Eth.Miner.GasCeil = 20000000 config.Eth.Miner.GasCeil = 20000000
//--miner.gastarget is depreceated, No longed used //--miner.gastarget is depreceated, No longed used
......
...@@ -1645,13 +1645,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { ...@@ -1645,13 +1645,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.NetworkId = 80001 cfg.NetworkId = 80001
} }
cfg.Genesis = core.DefaultMumbaiGenesisBlock() cfg.Genesis = core.DefaultMumbaiGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.MumbaiGenesisHash)
case ctx.GlobalBool(BorMainnetFlag.Name): case ctx.GlobalBool(BorMainnetFlag.Name):
if !ctx.GlobalIsSet(BorMainnetFlag.Name) { if !ctx.GlobalIsSet(BorMainnetFlag.Name) {
cfg.NetworkId = 137 cfg.NetworkId = 137
} }
cfg.Genesis = core.DefaultBorMainnetGenesisBlock() cfg.Genesis = core.DefaultBorMainnetGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.BorMainnetGenesisHash)
case ctx.GlobalBool(DeveloperFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) { if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337 cfg.NetworkId = 1337
......
package chains
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
var mainnetBor = &Chain{
Hash: common.HexToHash("0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b"),
NetworkId: 137,
Genesis: &core.Genesis{
Config: &params.ChainConfig{
ChainID: big.NewInt(137),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Hash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(3395000),
MuirGlacierBlock: big.NewInt(3395000),
BerlinBlock: big.NewInt(14750000),
Bor: &params.BorConfig{
Period: 2,
ProducerDelay: 6,
Sprint: 64,
BackupMultiplier: 2,
ValidatorContract: "0x0000000000000000000000000000000000001000",
StateReceiverContract: "0x0000000000000000000000000000000000001001",
OverrideStateSyncRecords: map[string]int{
"14949120": 8,
"14949184": 0,
"14953472": 0,
"14953536": 5,
"14953600": 0,
"14953664": 0,
"14953728": 0,
"14953792": 0,
"14953856": 0,
},
},
},
Nonce: 0,
Timestamp: 1590824836,
GasLimit: 10000000,
Difficulty: big.NewInt(1),
Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
Alloc: readPrealloc("allocs/mainnet.json"),
},
Bootnodes: []string{
"enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303",
"enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303",
},
}
package chains
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
var mumbaiTestnet = &Chain{
Hash: common.HexToHash("0x7b66506a9ebdbf30d32b43c5f15a3b1216269a1ec3a75aa3182b86176a2b1ca7"),
NetworkId: 80001,
Genesis: &core.Genesis{
Config: &params.ChainConfig{
ChainID: big.NewInt(80001),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Hash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(2722000),
MuirGlacierBlock: big.NewInt(2722000),
BerlinBlock: big.NewInt(13996000),
Bor: &params.BorConfig{
Period: 2,
ProducerDelay: 6,
Sprint: 64,
BackupMultiplier: 2,
ValidatorContract: "0x0000000000000000000000000000000000001000",
StateReceiverContract: "0x0000000000000000000000000000000000001001",
},
},
Nonce: 0,
Timestamp: 1558348305,
GasLimit: 10000000,
Difficulty: big.NewInt(1),
Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
Alloc: readPrealloc("allocs/mumbai.json"),
},
Bootnodes: []string{
"enode://320553cda00dfc003f499a3ce9598029f364fbb3ed1222fdc20a94d97dcc4d8ba0cd0bfa996579dcc6d17a534741fb0a5da303a90579431259150de66b597251@54.147.31.250:30303",
"enode://f0f48a8781629f95ff02606081e6e43e4aebd503f3d07fc931fad7dd5ca1ba52bd849a6f6c3be0e375cf13c9ae04d859c4a9ae3546dc8ed4f10aa5dbb47d4998@34.226.134.117:30303",
},
}
...@@ -116,6 +116,9 @@ var ( ...@@ -116,6 +116,9 @@ var (
// errOutOfRangeChain is returned if an authorization list is attempted to // errOutOfRangeChain is returned if an authorization list is attempted to
// be modified via out-of-range or non-contiguous headers. // be modified via out-of-range or non-contiguous headers.
errOutOfRangeChain = errors.New("out of range or non-contiguous chain") errOutOfRangeChain = errors.New("out of range or non-contiguous chain")
// errShutdownDetected is returned if a shutdown was detected
errShutdownDetected = errors.New("shutdown detected")
) )
// SignerFn is a signer callback function to request a header to be signed by a // SignerFn is a signer callback function to request a header to be signed by a
...@@ -123,7 +126,7 @@ var ( ...@@ -123,7 +126,7 @@ var (
type SignerFn func(accounts.Account, string, []byte) ([]byte, error) type SignerFn func(accounts.Account, string, []byte) ([]byte, error)
// ecrecover extracts the Ethereum account address from a signed header. // ecrecover extracts the Ethereum account address from a signed header.
func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) { func ecrecover(header *types.Header, sigcache *lru.ARCCache, c *params.BorConfig) (common.Address, error) {
// If the signature's already cached, return that // If the signature's already cached, return that
hash := header.Hash() hash := header.Hash()
if address, known := sigcache.Get(hash); known { if address, known := sigcache.Get(hash); known {
...@@ -136,7 +139,7 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, er ...@@ -136,7 +139,7 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, er
signature := header.Extra[len(header.Extra)-extraSeal:] signature := header.Extra[len(header.Extra)-extraSeal:]
// Recover the public key and the Ethereum address // Recover the public key and the Ethereum address
pubkey, err := crypto.Ecrecover(SealHash(header).Bytes(), signature) pubkey, err := crypto.Ecrecover(SealHash(header, c).Bytes(), signature)
if err != nil { if err != nil {
return common.Address{}, err return common.Address{}, err
} }
...@@ -148,15 +151,15 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, er ...@@ -148,15 +151,15 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, er
} }
// SealHash returns the hash of a block prior to it being sealed. // SealHash returns the hash of a block prior to it being sealed.
func SealHash(header *types.Header) (hash common.Hash) { func SealHash(header *types.Header, c *params.BorConfig) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256() hasher := sha3.NewLegacyKeccak256()
encodeSigHeader(hasher, header) encodeSigHeader(hasher, header, c)
hasher.Sum(hash[:0]) hasher.Sum(hash[:0])
return hash return hash
} }
func encodeSigHeader(w io.Writer, header *types.Header) { func encodeSigHeader(w io.Writer, header *types.Header, c *params.BorConfig) {
err := rlp.Encode(w, []interface{}{ enc := []interface{}{
header.ParentHash, header.ParentHash,
header.UncleHash, header.UncleHash,
header.Coinbase, header.Coinbase,
...@@ -172,8 +175,13 @@ func encodeSigHeader(w io.Writer, header *types.Header) { ...@@ -172,8 +175,13 @@ func encodeSigHeader(w io.Writer, header *types.Header) {
header.Extra[:len(header.Extra)-65], // Yes, this will panic if extra is too short header.Extra[:len(header.Extra)-65], // Yes, this will panic if extra is too short
header.MixDigest, header.MixDigest,
header.Nonce, header.Nonce,
}) }
if err != nil { if c.IsJaipur(header.Number.Uint64()) {
if header.BaseFee != nil {
enc = append(enc, header.BaseFee)
}
}
if err := rlp.Encode(w, enc); err != nil {
panic("can't encode: " + err.Error()) panic("can't encode: " + err.Error())
} }
} }
...@@ -182,12 +190,12 @@ func encodeSigHeader(w io.Writer, header *types.Header) { ...@@ -182,12 +190,12 @@ func encodeSigHeader(w io.Writer, header *types.Header) {
func CalcProducerDelay(number uint64, succession int, c *params.BorConfig) uint64 { func CalcProducerDelay(number uint64, succession int, c *params.BorConfig) uint64 {
// When the block is the first block of the sprint, it is expected to be delayed by `producerDelay`. // When the block is the first block of the sprint, it is expected to be delayed by `producerDelay`.
// That is to allow time for block propagation in the last sprint // That is to allow time for block propagation in the last sprint
delay := c.Period delay := c.CalculatePeriod(number)
if number%c.Sprint == 0 { if number%c.Sprint == 0 {
delay = c.ProducerDelay delay = c.ProducerDelay
} }
if succession > 0 { if succession > 0 {
delay += uint64(succession) * c.BackupMultiplier delay += uint64(succession) * c.CalculateBackupMultiplier(number)
} }
return delay return delay
} }
...@@ -199,9 +207,9 @@ func CalcProducerDelay(number uint64, succession int, c *params.BorConfig) uint6 ...@@ -199,9 +207,9 @@ func CalcProducerDelay(number uint64, succession int, c *params.BorConfig) uint6
// Note, the method requires the extra data to be at least 65 bytes, otherwise it // Note, the method requires the extra data to be at least 65 bytes, otherwise it
// panics. This is done to avoid accidentally using both forms (signature present // panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header. // or not), which could be abused to produce different hashes for the same header.
func BorRLP(header *types.Header) []byte { func BorRLP(header *types.Header, c *params.BorConfig) []byte {
b := new(bytes.Buffer) b := new(bytes.Buffer)
encodeSigHeader(b, header) encodeSigHeader(b, header, c)
return b.Bytes() return b.Bytes()
} }
...@@ -280,7 +288,7 @@ func New( ...@@ -280,7 +288,7 @@ func New(
// Author implements consensus.Engine, returning the Ethereum address recovered // Author implements consensus.Engine, returning the Ethereum address recovered
// from the signature in the header's extra-data section. // from the signature in the header's extra-data section.
func (c *Bor) Author(header *types.Header) (common.Address, error) { func (c *Bor) Author(header *types.Header) (common.Address, error) {
return ecrecover(header, c.signatures) return ecrecover(header, c.signatures, c.config)
} }
// VerifyHeader checks whether a header conforms to the consensus rules. // VerifyHeader checks whether a header conforms to the consensus rules.
...@@ -353,6 +361,11 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head ...@@ -353,6 +361,11 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
return errInvalidDifficulty return errInvalidDifficulty
} }
} }
// Verify that the gas limit is <= 2^63-1
cap := uint64(0x7fffffffffffffff)
if header.GasLimit > cap {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
}
// If all checks passed, validate any special fields for hard forks // If all checks passed, validate any special fields for hard forks
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil { if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
return err return err
...@@ -396,7 +409,24 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t ...@@ -396,7 +409,24 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
if parent.Time+c.config.Period > header.Time { // Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit {
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
}
if !chain.Config().IsLondon(header.Number) {
// Verify BaseFee not present before EIP-1559 fork.
if header.BaseFee != nil {
return fmt.Errorf("invalid baseFee before fork: have %d, want <nil>", header.BaseFee)
}
if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil {
return err
}
} else if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil {
// Verify the header's EIP-1559 attributes.
return err
}
if parent.Time+c.config.CalculatePeriod(number) > header.Time {
return ErrInvalidTimestamp return ErrInvalidTimestamp
} }
...@@ -556,7 +586,7 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header ...@@ -556,7 +586,7 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header
} }
// Resolve the authorization key and check against signers // Resolve the authorization key and check against signers
signer, err := ecrecover(header, c.signatures) signer, err := ecrecover(header, c.signatures, c.config)
if err != nil { if err != nil {
return err return err
} }
...@@ -791,7 +821,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result ...@@ -791,7 +821,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
return errUnknownBlock return errUnknownBlock
} }
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing) // For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 { if c.config.CalculatePeriod(number) == 0 && len(block.Transactions()) == 0 {
log.Info("Sealing paused, waiting for transactions") log.Info("Sealing paused, waiting for transactions")
return nil return nil
} }
...@@ -819,10 +849,10 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result ...@@ -819,10 +849,10 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
// Sweet, the protocol permits us to sign the block, wait for our time // Sweet, the protocol permits us to sign the block, wait for our time
delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple
// wiggle was already accounted for in header.Time, this is just for logging // wiggle was already accounted for in header.Time, this is just for logging
wiggle := time.Duration(successionNumber) * time.Duration(c.config.BackupMultiplier) * time.Second wiggle := time.Duration(successionNumber) * time.Duration(c.config.CalculateBackupMultiplier(number)) * time.Second
// Sign all the things! // Sign all the things!
sighash, err := signFn(accounts.Account{Address: signer}, accounts.MimetypeBor, BorRLP(header)) sighash, err := signFn(accounts.Account{Address: signer}, accounts.MimetypeBor, BorRLP(header, c.config))
if err != nil { if err != nil {
return err return err
} }
...@@ -854,7 +884,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result ...@@ -854,7 +884,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
select { select {
case results <- block.WithSeal(header): case results <- block.WithSeal(header):
default: default:
log.Warn("Sealing result was not read by miner", "number", number, "sealhash", SealHash(header)) log.Warn("Sealing result was not read by miner", "number", number, "sealhash", SealHash(header, c.config))
} }
}() }()
return nil return nil
...@@ -873,7 +903,7 @@ func (c *Bor) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, par ...@@ -873,7 +903,7 @@ func (c *Bor) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, par
// SealHash returns the hash of a block prior to it being sealed. // SealHash returns the hash of a block prior to it being sealed.
func (c *Bor) SealHash(header *types.Header) common.Hash { func (c *Bor) SealHash(header *types.Header) common.Hash {
return SealHash(header) return SealHash(header, c.config)
} }
// APIs implements consensus.Engine, returning the user facing RPC API to allow // APIs implements consensus.Engine, returning the user facing RPC API to allow
...@@ -889,6 +919,7 @@ func (c *Bor) APIs(chain consensus.ChainHeaderReader) []rpc.API { ...@@ -889,6 +919,7 @@ func (c *Bor) APIs(chain consensus.ChainHeaderReader) []rpc.API {
// Close implements consensus.Engine. It's a noop for bor as there are no background threads. // Close implements consensus.Engine. It's a noop for bor as there are no background threads.
func (c *Bor) Close() error { func (c *Bor) Close() error {
c.HeimdallClient.Close()
return nil return nil
} }
......
...@@ -99,3 +99,39 @@ func TestGenesisContractChange(t *testing.T) { ...@@ -99,3 +99,39 @@ func TestGenesisContractChange(t *testing.T) {
// make sure balance change DOES NOT take effect // make sure balance change DOES NOT take effect
assert.Equal(t, statedb.GetBalance(addr0), big.NewInt(0)) assert.Equal(t, statedb.GetBalance(addr0), big.NewInt(0))
} }
func TestEncodeSigHeaderJaipur(t *testing.T) {
// As part of the EIP-1559 fork in mumbai, an incorrect seal hash
// was used for Bor that did not included the BaseFee. The Jaipur
// block is a hard fork to fix that.
h := &types.Header{
Difficulty: new(big.Int),
Number: big.NewInt(1),
Extra: make([]byte, 32+65),
}
var (
// hash for the block without the BaseFee
hashWithoutBaseFee = common.HexToHash("0x1be13e83939b3c4701ee57a34e10c9290ce07b0e53af0fe90b812c6881826e36")
// hash for the block with the baseFee
hashWithBaseFee = common.HexToHash("0xc55b0cac99161f71bde1423a091426b1b5b4d7598e5981ad802cce712771965b")
)
// Jaipur NOT enabled and BaseFee not set
hash := SealHash(h, &params.BorConfig{JaipurBlock: 10})
assert.Equal(t, hash, hashWithoutBaseFee)
// Jaipur enabled (Jaipur=0) and BaseFee not set
hash = SealHash(h, &params.BorConfig{JaipurBlock: 0})
assert.Equal(t, hash, hashWithoutBaseFee)
h.BaseFee = big.NewInt(2)
// Jaipur enabled (Jaipur=Header block) and BaseFee set
hash = SealHash(h, &params.BorConfig{JaipurBlock: 1})
assert.Equal(t, hash, hashWithBaseFee)
// Jaipur NOT enabled and BaseFee set
hash = SealHash(h, &params.BorConfig{JaipurBlock: 10})
assert.Equal(t, hash, hashWithoutBaseFee)
}
...@@ -27,11 +27,13 @@ type IHeimdallClient interface { ...@@ -27,11 +27,13 @@ type IHeimdallClient interface {
Fetch(path string, query string) (*ResponseWithHeight, error) Fetch(path string, query string) (*ResponseWithHeight, error)
FetchWithRetry(path string, query string) (*ResponseWithHeight, error) FetchWithRetry(path string, query string) (*ResponseWithHeight, error)
FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error) FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error)
Close()
} }
type HeimdallClient struct { type HeimdallClient struct {
urlString string urlString string
client http.Client client http.Client
closeCh chan struct{}
} }
func NewHeimdallClient(urlString string) (*HeimdallClient, error) { func NewHeimdallClient(urlString string) (*HeimdallClient, error) {
...@@ -40,6 +42,7 @@ func NewHeimdallClient(urlString string) (*HeimdallClient, error) { ...@@ -40,6 +42,7 @@ func NewHeimdallClient(urlString string) (*HeimdallClient, error) {
client: http.Client{ client: http.Client{
Timeout: time.Duration(5 * time.Second), Timeout: time.Duration(5 * time.Second),
}, },
closeCh: make(chan struct{}),
} }
return h, nil return h, nil
} }
...@@ -96,13 +99,22 @@ func (h *HeimdallClient) FetchWithRetry(rawPath string, rawQuery string) (*Respo ...@@ -96,13 +99,22 @@ func (h *HeimdallClient) FetchWithRetry(rawPath string, rawQuery string) (*Respo
u.Path = rawPath u.Path = rawPath
u.RawQuery = rawQuery u.RawQuery = rawQuery
// create a new ticker for retrying the request
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for { for {
res, err := h.internalFetch(u) select {
if err == nil && res != nil { case <-h.closeCh:
return res, nil log.Debug("Shutdown detected, terminating request")
return nil, errShutdownDetected
case <-ticker.C:
res, err := h.internalFetch(u)
if err == nil && res != nil {
return res, nil
}
log.Info("Retrying again in 5 seconds for next Heimdall data", "path", u.Path)
} }
log.Info("Retrying again in 5 seconds for next Heimdall span", "path", u.Path)
time.Sleep(5 * time.Second)
} }
} }
...@@ -137,3 +149,9 @@ func (h *HeimdallClient) internalFetch(u *url.URL) (*ResponseWithHeight, error) ...@@ -137,3 +149,9 @@ func (h *HeimdallClient) internalFetch(u *url.URL) (*ResponseWithHeight, error)
return &response, nil return &response, nil
} }
// Close sends a signal to stop the running process
func (h *HeimdallClient) Close() {
close(h.closeCh)
h.client.CloseIdleConnections()
}
...@@ -131,7 +131,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { ...@@ -131,7 +131,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
} }
// Resolve the authorization key and check against signers // Resolve the authorization key and check against signers
signer, err := ecrecover(header, s.sigcache) signer, err := ecrecover(header, s.sigcache, s.config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -217,6 +217,7 @@ type BlockChain struct { ...@@ -217,6 +217,7 @@ type BlockChain struct {
borReceiptsCache *lru.Cache // Cache for the most recent bor receipt receipts per block borReceiptsCache *lru.Cache // Cache for the most recent bor receipt receipts per block
stateSyncData []*types.StateSyncData // State sync data stateSyncData []*types.StateSyncData // State sync data
stateSyncFeed event.Feed // State sync feed stateSyncFeed event.Feed // State sync feed
chain2HeadFeed event.Feed // Reorg/NewHead/Fork data feed
} }
// NewBlockChain returns a fully initialised block chain using information // NewBlockChain returns a fully initialised block chain using information
...@@ -1640,10 +1641,21 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. ...@@ -1640,10 +1641,21 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
for _, data := range bc.stateSyncData { for _, data := range bc.stateSyncData {
bc.stateSyncFeed.Send(StateSyncEvent{Data: data}) bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
} }
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadCanonicalEvent,
NewChain: []*types.Block{block},
})
// BOR // BOR
} }
} else { } else {
bc.chainSideFeed.Send(ChainSideEvent{Block: block}) bc.chainSideFeed.Send(ChainSideEvent{Block: block})
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadForkEvent,
NewChain: []*types.Block{block},
})
} }
return status, nil return status, nil
} }
...@@ -1737,6 +1749,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er ...@@ -1737,6 +1749,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
defer func() { defer func() {
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadCanonicalEvent,
NewChain: []*types.Block{lastCanon},
})
} }
}() }()
// Start the parallel header verifier // Start the parallel header verifier
...@@ -2262,6 +2279,13 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { ...@@ -2262,6 +2279,13 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} }
// Ensure the user sees large reorgs // Ensure the user sees large reorgs
if len(oldChain) > 0 && len(newChain) > 0 { if len(oldChain) > 0 && len(newChain) > 0 {
bc.chain2HeadFeed.Send(Chain2HeadEvent{
Type: Chain2HeadReorgEvent,
NewChain: newChain,
OldChain: oldChain,
})
logFn := log.Info logFn := log.Info
msg := "Chain reorg detected" msg := "Chain reorg detected"
if len(oldChain) > 63 { if len(oldChain) > 63 {
...@@ -2570,6 +2594,11 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su ...@@ -2570,6 +2594,11 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
} }
// SubscribeChain2HeadEvent registers a subscription of ChainHeadEvent. ()
func (bc *BlockChain) SubscribeChain2HeadEvent(ch chan<- Chain2HeadEvent) event.Subscription {
return bc.scope.Track(bc.chain2HeadFeed.Subscribe(ch))
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent. // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
......
package core
import (
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
)
func TestChain2HeadEvent(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
gspec = &Genesis{
Config: params.TestChainConfig,
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
}
genesis = gspec.MustCommit(db)
signer = types.LatestSigner(gspec.Config)
)
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
chain2HeadCh := make(chan Chain2HeadEvent, 64)
blockchain.SubscribeChain2HeadEvent(chain2HeadCh)
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
replacementBlocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 4, func(i int, gen *BlockGen) {
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
if i == 2 {
gen.OffsetTime(-9)
}
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
})
if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
type eventTest struct {
Type string
Added []common.Hash
Removed []common.Hash
}
readEvent := func(expect *eventTest) {
select {
case ev := <-chain2HeadCh:
if ev.Type != expect.Type {
t.Fatal("Type mismatch")
}
if len(ev.NewChain) != len(expect.Added) {
t.Fatal("Newchain and Added Array Size don't match")
}
if len(ev.OldChain) != len(expect.Removed) {
t.Fatal("Oldchain and Removed Array Size don't match")
}
for j := 0; j < len(ev.OldChain); j++ {
if ev.OldChain[j].Hash() != expect.Removed[j] {
t.Fatal("Oldchain hashes Do Not Match")
}
}
for j := 0; j < len(ev.NewChain); j++ {
if ev.NewChain[j].Hash() != expect.Added[j] {
t.Fatal("Newchain hashes Do Not Match")
}
}
case <-time.After(2 * time.Second):
t.Fatal("timeout")
}
}
// head event
readEvent(&eventTest{
Type: Chain2HeadCanonicalEvent,
Added: []common.Hash{
chain[2].Hash(),
}})
// fork event
readEvent(&eventTest{
Type: Chain2HeadForkEvent,
Added: []common.Hash{
replacementBlocks[0].Hash(),
}})
// fork event
readEvent(&eventTest{
Type: Chain2HeadForkEvent,
Added: []common.Hash{
replacementBlocks[1].Hash(),
}})
// reorg event
//In this event the channel recieves an array of Blocks in NewChain and OldChain
readEvent(&eventTest{
Type: Chain2HeadReorgEvent,
Added: []common.Hash{
replacementBlocks[2].Hash(),
replacementBlocks[1].Hash(),
replacementBlocks[0].Hash(),
},
Removed: []common.Hash{
chain[2].Hash(),
chain[1].Hash(),
chain[0].Hash(),
},
})
// head event
readEvent(&eventTest{
Type: Chain2HeadCanonicalEvent,
Added: []common.Hash{
replacementBlocks[3].Hash(),
}})
}