diff --git a/core/headerchain.go b/core/headerchain.go
index 1dbf958786d06353bab84d3343fd429790a064bd..d9f3b575ad6166d9a550c3666cbd0147391a0a5f 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -165,6 +165,7 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
)
batch := hc.chainDb.NewBatch()
+ parentKnown := true // Set to true to force hc.HasHeader check the first iteration
for i, header := range headers {
var hash common.Hash
// The headers have already been validated at this point, so we already
@@ -178,8 +179,10 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
number := header.Number.Uint64()
newTD.Add(newTD, header.Difficulty)
+ // If the parent was not present, store it
// If the header is already known, skip it, otherwise store
- if !hc.HasHeader(hash, number) {
+ alreadyKnown := parentKnown && hc.HasHeader(hash, number)
+ if !alreadyKnown {
// Irrelevant of the canonical status, write the TD and header to the database.
rawdb.WriteTd(batch, hash, number, newTD)
hc.tdCache.Add(hash, new(big.Int).Set(newTD))
@@ -192,6 +195,7 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
firstInserted = i
}
}
+ parentKnown = alreadyKnown
lastHeader, lastHash, lastNumber = header, hash, number
}
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index ac7edc2c68a7b484f6832fd7a91d1038440084b2..04ec12cfa9e7cef851c1dacaa223846bc7e8379e 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -40,10 +40,10 @@ const (
)
var (
- blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download
- blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks
- blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
- blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones
+ blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download
+ blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks
+ blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
+ blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones
)
var (
@@ -783,8 +783,9 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
+ trieHasher := trie.NewStackTrie(nil)
validate := func(index int, header *types.Header) error {
- if types.DeriveSha(types.Transactions(txLists[index]), trie.NewStackTrie(nil)) != header.TxHash {
+ if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash {
return errInvalidBody
}
if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
@@ -808,8 +809,9 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
+ trieHasher := trie.NewStackTrie(nil)
validate := func(index int, header *types.Header) error {
- if types.DeriveSha(types.Receipts(receiptList[index]), trie.NewStackTrie(nil)) != header.ReceiptHash {
+ if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash {
return errInvalidReceipt
}
return nil