Post-merge cleanup

This commit is contained in:
Shane Bammel 2022-01-14 19:24:28 -06:00
parent 9b8a328d0b
commit 64bdf24bd0
44 changed files with 54 additions and 1072 deletions

View File

@ -1,100 +0,0 @@
# Changelog
## v1.1.0
*[\#282](https://github.com/binance-chain/bsc/pull/282) update discord link
*[\#280](https://github.com/binance-chain/bsc/pull/280) update discord link
*[\#227](https://github.com/binance-chain/bsc/pull/227) use more aggressive write cache policy
## v1.1.0-beta
*[\#152](https://github.com/binance-chain/bsc/pull/152) upgrade to go-ethereum 1.10.3
## v1.0.7-hf.2
BUGFIX
* [\#194](https://github.com/binance-chain/bsc/pull/194) bump btcd to v0.20.1-beta
## v1.0.7-hf.1
BUGFIX
* [\#190](https://github.com/binance-chain/bsc/pull/190) fix disk increase dramaticly
* [\#191](https://github.com/binance-chain/bsc/pull/191) fix the reorg routine of tx pool stuck issue
## v1.0.7
* [\#120](https://github.com/binance-chain/bsc/pull/120) add health check endpoint
* [\#116](https://github.com/binance-chain/bsc/pull/116) validator only write database state when enough distance
* [\#115](https://github.com/binance-chain/bsc/pull/115) add batch query methods
* [\#112](https://github.com/binance-chain/bsc/pull/112) apply max commit tx time for miner worker to avoid empty block
* [\#101](https://github.com/binance-chain/bsc/pull/101) apply block number limit for the `eth_getLogs` api
* [\#99](https://github.com/binance-chain/bsc/pull/99) enable directbroadcast flag to decrease the block propagation time
* [\#90](https://github.com/binance-chain/bsc/pull/90) add tini in docker image
* [\#84](https://github.com/binance-chain/bsc/pull/84) add jq in docker image
## v1.0.6
* [\#68](https://github.com/binance-chain/bsc/pull/68) apply mirror sync upgrade on mainnet
## v1.0.5
SECURITY
* [\#63](https://github.com/binance-chain/bsc/pull/63) security patches from go-ethereum
* [\#54](https://github.com/binance-chain/bsc/pull/54) les: fix GetProofsV2 that could potentially cause a panic.
FEATURES
* [\#56](https://github.com/binance-chain/bsc/pull/56) apply mirror sync upgrade
* [\#53](https://github.com/binance-chain/bsc/pull/53) support fork id in header; elegant upgrade
IMPROVEMENT
* [\#61](https://github.com/binance-chain/bsc/pull/61)Add `x-forward-for` log message when handle message failed
* [\#60](https://github.com/binance-chain/bsc/pull/61) add rpc method request gauge
BUGFIX
* [\#59](https://github.com/binance-chain/bsc/pull/59) fix potential deadlock of pub/sub module
## v1.0.4
IMPROVEMENT
* [\#35](https://github.com/binance-chain/bsc/pull/35) use fixed gas price when network is idle
* [\#38](https://github.com/binance-chain/bsc/pull/38) disable noisy log from consensus engine
* [\#47](https://github.com/binance-chain/bsc/pull/47) upgrade to golang1.15.5
* [\#49](https://github.com/binance-chain/bsc/pull/49) Create pull request template for all developer to follow
## v1.0.3
IMPROVEMENT
* [\#36](https://github.com/binance-chain/bsc/pull/36) add max gas allwance calculation
## v1.0.2
IMPROVEMENT
* [\#29](https://github.com/binance-chain/bsc/pull/29) eth/tracers: revert reason in call_tracer + error for failed internal…
## v1.0.1-beta
IMPROVEMENT
* [\#22](https://github.com/binance-chain/bsc/pull/22) resolve best practice advice
FEATURES
* [\#23](https://github.com/binance-chain/bsc/pull/23) enforce backoff time for out-turn validator
BUGFIX
* [\#25](https://github.com/binance-chain/bsc/pull/25) minor fix for ramanujan upgrade
UPGRADE
* [\#26](https://github.com/binance-chain/bsc/pull/26) update chapel network config for ramanujan fork
## v1.0.0-beta.0
FEATURES
* [\#5](https://github.com/binance-chain/bsc/pull/5) enable bep2e tokens for faucet
* [\#14](https://github.com/binance-chain/bsc/pull/14) add cross chain contract to system contract
* [\#15](https://github.com/binance-chain/bsc/pull/15) Allow liveness slash fail
IMPROVEMENT
* [\#11](https://github.com/binance-chain/bsc/pull/11) remove redundant gaslimit check
BUGFIX
* [\#4](https://github.com/binance-chain/bsc/pull/4) fix validator failed to sync a block produced by itself
* [\#6](https://github.com/binance-chain/bsc/pull/6) modify params for Parlia consensus with 21 validators
* [\#10](https://github.com/binance-chain/bsc/pull/10) add gas limit check in parlia implement
* [\#13](https://github.com/binance-chain/bsc/pull/13) fix debug_traceTransaction crashed issue

View File

@ -6,10 +6,10 @@ ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.17-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
RUN apk add --no-cache make gcc musl-dev linux-headers git bash
ADD . /go-ethereum
RUN cd /go-ethereum && go run build/ci.go install ./cmd/geth
RUN cd /go-ethereum && make geth
# Pull Geth into a second stage deploy alpine container
FROM alpine:latest

View File

@ -477,12 +477,6 @@ func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, erro
return big.NewInt(1), nil
}
// SuggestGasTipCap implements ContractTransactor.SuggestGasTipCap. Since the simulated
// chain doesn't have miners, we just return a gas tip of 1 for any call.
func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
return big.NewInt(1), nil
}
// EstimateGas executes the requested code against the currently pending block/state and
// returns the used amount of gas.
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {

View File

@ -33,10 +33,6 @@ import (
"text/template"
"time"
pcsclite "github.com/gballet/go-libpcsclite"
gopsutil "github.com/shirou/gopsutil/mem"
"gopkg.in/urfave/cli.v1"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
@ -1257,7 +1253,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
cfg.NetRestrict = list
}
if ctx.GlobalBool(DeveloperFlag.Name) || ctx.GlobalBool(CatalystFlag.Name) {
if ctx.GlobalBool(DeveloperFlag.Name) {
// --dev mode can't use p2p networking.
cfg.MaxPeers = 0
cfg.ListenAddr = ""

View File

@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@ -268,29 +269,34 @@ func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.H
}
// Finalize implements consensus.Engine, setting the final state on the header
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs *[]*types.Transaction, uncles []*types.Header, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction, usedGas *uint64) (err error) {
// Finalize is different with Prepare, it can be used in both block generation
// and verification. So determine the consensus rules by header type.
if !beacon.IsPoSHeader(header) {
beacon.ethone.Finalize(chain, header, state, txs, uncles)
beacon.ethone.Finalize(chain, header, state, txs, uncles, receipts, systemTxs, usedGas)
return
}
// The block reward is no longer handled here. It's done by the
// external consensus engine.
header.Root = state.IntermediateRoot(true)
return
}
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
// assembling the block.
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, finalUpdate bool) (*types.Block, []*types.Receipt, error) {
// FinalizeAndAssemble is different with Prepare, it can be used in both block
// generation and verification. So determine the consensus rules by header type.
if !beacon.IsPoSHeader(header) {
return beacon.ethone.FinalizeAndAssemble(chain, header, state, txs, uncles, receipts)
return beacon.ethone.FinalizeAndAssemble(chain, header, state, txs, uncles, receipts, finalUpdate)
}
// Finalize and assemble the block
beacon.Finalize(chain, header, state, txs, uncles)
return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil
beacon.Finalize(chain, header, state, &txs, uncles, nil, nil, nil)
return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), receipts, nil
}
func (ethash *Beacon) Delay(_ consensus.ChainReader, _ *types.Header) *time.Duration {
return nil
}
// Seal generates a new sealing request for the given input block and pushes

View File

@ -459,7 +459,7 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e
// consensus protocol requirements. The method accepts an optional list of parent
// headers that aren't yet part of the local blockchain to generate the snapshots
// from.
func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*types.Header) error {
// Verifying the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {

View File

@ -49,12 +49,6 @@ type ChainHeaderReader interface {
// GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header
}
// ChainReader defines a small collection of methods needed to access the local
// blockchain during header and/or uncle verification.
type ChainReader interface {
ChainHeaderReader
// GetTd retrieves the total difficulty from the database by hash and number.
GetTd(hash common.Hash, number uint64) *big.Int

View File

@ -650,10 +650,6 @@ var (
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
// Skip block reward in catalyst mode
if config.IsCatalyst(header.Number) {
return
}
// Select the correct block reward based on chain progression
blockReward := FrontierBlockReward
if config.IsByzantium(header.Number) {

View File

@ -280,6 +280,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
if err := bc.loadLastState(); err != nil {
return nil, err
}
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
@ -308,6 +309,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
}
}
// Ensure that a previous crash in SetHead doesn't leave extra ancients
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
var (
@ -760,24 +762,6 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
headBlockGauge.Update(int64(block.NumberU64()))
}
// ContractCode retrieves a blob of data associated with a contract hash
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
return bc.stateCache.ContractCode(common.Hash{}, hash)
}
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
// hash either from ephemeral in-memory cache, or from persistent storage.
//
// If the code doesn't exist in the in-memory cache, check the storage with
// new code scheme.
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
type codeReader interface {
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
}
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
}
// Stop stops the blockchain service. If any imports are currently in progress
// it will abort them using the procInterrupt.
func (bc *BlockChain) Stop() {
@ -1113,6 +1097,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
updateHead(blockChain[len(blockChain)-1])
return 0, nil
}
// Write downloaded chain data and corresponding receipt chain data
if len(ancientBlocks) > 0 {
if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
@ -1158,18 +1143,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return 0, nil
}
// SetTxLookupLimit is responsible for updating the txlookup limit to the
// original one stored in db if the new mismatches with the old one.
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
bc.txLookupLimit = limit
}
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
// stale transaction indices.
func (bc *BlockChain) TxLookupLimit() uint64 {
return bc.txLookupLimit
}
var lastWrite uint64
// writeBlockWithoutState writes only the block and its metadata to the database,
@ -1277,10 +1250,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
lastWrite = chosen
bc.gcproc = 0
}
// Flush an entire trie and restart the counters
triedb.Commit(header.Root, true, nil)
lastWrite = chosen
bc.gcproc = 0
}
}
// Garbage collect anything below our required write retention
@ -1409,22 +1378,6 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
return bc.insertChain(chain, true, true)
}
// InsertChainWithoutSealVerification works exactly the same
// except for seal verification, seal verification is omitted
func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (int, error) {
bc.blockProcFeed.Send(true)
defer bc.blockProcFeed.Send(false)
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
bc.chainmu.Lock()
n, err := bc.insertChain(types.Blocks([]*types.Block{block}), false)
bc.chainmu.Unlock()
bc.wg.Done()
return n, err
}
// insertChain is the internal implementation of InsertChain, which assumes that
// 1) chains are contiguous, and 2) The chain mutex is held.
//
@ -1626,6 +1579,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
if err != nil {
return it.index, err
}
// Enable prefetching to pull in trie node paths while processing transactions
statedb.StartPrefetcher("chain")
activeState = statedb
@ -1656,6 +1610,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
atomic.StoreUint32(&followupInterrupt, 1)
return it.index, err
}
bc.CacheReceipts(block.Hash(), receipts)
// Update the metrics touched during block processing
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them

View File

@ -2723,223 +2723,6 @@ func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
check(&tail, chain)
}
func TestTransactionIndices(t *testing.T) {
// Configure and generate a sample block chain
var (
gendb = rawdb.NewMemoryDatabase()
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(100000000000000000)
gspec = &Genesis{
Config: params.TestChainConfig,
Alloc: GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
genesis = gspec.MustCommit(gendb)
signer = types.LatestSigner(gspec.Config)
)
height := uint64(128)
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), func(i int, block *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
if err != nil {
panic(err)
}
block.AddTx(tx)
})
blocks2, _ := GenerateChain(gspec.Config, blocks[len(blocks)-1], ethash.NewFaker(), gendb, 10, nil)
check := func(tail *uint64, chain *BlockChain) {
stored := rawdb.ReadTxIndexTail(chain.db)
if tail == nil && stored != nil {
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
}
if tail != nil && *stored != *tail {
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
}
if tail != nil {
for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 {
continue
}
for _, tx := range block.Transactions() {
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
}
}
}
for i := uint64(0); i < *tail; i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 {
continue
}
for _, tx := range block.Transactions() {
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
}
}
}
}
}
frdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(frdir)
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
// Import all blocks into ancient db
l := uint64(0)
chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
if n, err := chain.InsertReceiptChain(blocks, receipts, 128); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
chain.Stop()
ancientDb.Close()
// Init block chain with external ancients, check all needed indices has been indexed.
limit := []uint64{0, 32, 64, 128}
for _, l := range limit {
ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
var tail uint64
if l != 0 {
tail = uint64(128) - l + 1
}
check(&tail, chain)
chain.Stop()
ancientDb.Close()
}
// Reconstruct a block chain which only reserves HEAD-64 tx indices
ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
for i, l := range limit {
chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
chain.InsertChain(blocks2[i : i+1]) // Feed chain a higher block to trigger indices updater.
time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
check(&tails[i], chain)
chain.Stop()
}
}
func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
// Configure and generate a sample block chain
var (
gendb = rawdb.NewMemoryDatabase()
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(100000000000000000)
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
genesis = gspec.MustCommit(gendb)
signer = types.LatestSigner(gspec.Config)
)
height := uint64(128)
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), func(i int, block *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
if err != nil {
panic(err)
}
block.AddTx(tx)
})
check := func(tail *uint64, chain *BlockChain) {
stored := rawdb.ReadTxIndexTail(chain.db)
if tail == nil && stored != nil {
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
}
if tail != nil && *stored != *tail {
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
}
if tail != nil {
for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 {
continue
}
for _, tx := range block.Transactions() {
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
}
}
}
for i := uint64(0); i < *tail; i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 {
continue
}
for _, tx := range block.Transactions() {
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
}
}
}
}
}
frdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(frdir)
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
// Import all blocks into ancient db, only HEAD-32 indices are kept.
l := uint64(32)
chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
// The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
tail := uint64(32)
check(&tail, chain)
}
// Benchmarks large blocks with value transfers to non-existing accounts
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
var (
@ -3066,6 +2849,7 @@ func TestSideImportPrunedBlocks(t *testing.T) {
// Generate and import the canonical chain
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {

View File

@ -115,15 +115,6 @@ func NewIDWithChain(chain Blockchain) ID {
)
}
// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
func NewIDWithChain(chain Blockchain) ID {
return NewID(
chain.Config(),
chain.Genesis().Hash(),
chain.CurrentHeader().Number.Uint64(),
)
}
// NewFilter creates a filter that returns if a fork ID should be rejected or not
// based on the local chain's status.
func NewFilter(chain Blockchain) Filter {

View File

@ -156,7 +156,7 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, genesis, nil)
return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
}
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, common.Hash, error) {

View File

@ -457,25 +457,6 @@ func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
return data
}
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
// block at number, in RLP encoding.
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
// If it's an ancient one, we don't need the canonical hash
data, _ := db.Ancient(freezerBodiesTable, number)
if len(data) == 0 {
// Need to get the hash
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
if len(data) == 0 {
data, _ = db.Ancient(freezerBodiesTable, number)
}
}
return data
}
// WriteBodyRLP stores an RLP encoded block body into the database.
func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {

View File

@ -478,6 +478,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
log.Crit("Failed to delete dangling side blocks", "err", err)
}
}
// Log something friendly for the user
context := []interface{}{
"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,

View File

@ -85,19 +85,6 @@ func (start *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId
return start.offset, end.offset, end.filenum
}
// bounds returns the start- and end- offsets, and the file number of where to
// read there data item marked by the two index entries. The two entries are
// assumed to be sequential.
func (start *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
if start.filenum != end.filenum {
// If a piece of data 'crosses' a data-file,
// it's actually in one piece on the second data-file.
// We return a zero-indexEntry for the second file as start
return 0, end.offset, end.filenum
}
return start.offset, end.offset, end.filenum
}
// freezerTable represents a single chained data table within the freezer (e.g. blocks).
// It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
// file (uncompressed 64 bit indices into the data file).
@ -573,6 +560,7 @@ func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, e
func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []int, error) {
t.lock.RLock()
defer t.lock.RUnlock()
// Ensure the table and the item is accessible
if t.index == nil || t.head == nil {
return nil, nil, errClosed

View File

@ -685,34 +685,6 @@ func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
t.Fatalf("wrong error for item %d: %v", item, err)
}
}
checkPresent(4)
// Now, let's pretend we have deleted 1M items
{
// Read the index file
p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
if err != nil {
t.Fatal(err)
}
indexBuf := make([]byte, 3*indexEntrySize)
indexFile.Read(indexBuf)
// Update the index file, so that we store
// [ file = 2, offset = 1M ] at index zero
tailId := uint32(2) // First file is 2
itemOffset := uint32(1000000) // We have removed 1M items
zeroIndex := indexEntry{
offset: itemOffset,
filenum: tailId,
}
buf := zeroIndex.marshallBinary()
// Overwrite index zero
copy(indexBuf, buf)
indexFile.WriteAt(indexBuf, 0)
indexFile.Close()
}
checkPresent(1000000)
}
// Gets a chunk of data, filled with 'b'

View File

@ -229,20 +229,6 @@ func IsCodeKey(key []byte) (bool, []byte) {
return false, nil
}
// codeKey = CodePrefix + hash
func codeKey(hash common.Hash) []byte {
return append(CodePrefix, hash.Bytes()...)
}
// IsCodeKey reports whether the given byte slice is the key of contract code,
// if so return the raw code hash as well.
func IsCodeKey(key []byte) (bool, []byte) {
if bytes.HasPrefix(key, CodePrefix) && len(key) == common.HashLength+len(CodePrefix) {
return true, key[len(CodePrefix):]
}
return false, nil
}
// configKey = configPrefix + hash
func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...)

View File

@ -237,54 +237,6 @@ func (result *proofResult) forEach(callback func(key []byte, val []byte) error)
if err := callback(key, val); err != nil {
return err
}
keys = append(keys, common.CopyBytes(key[len(prefix):]))
if valueConvertFn == nil {
vals = append(vals, common.CopyBytes(iter.Value()))
} else {
val, err := valueConvertFn(iter.Value())
if err != nil {
// Special case, the state data is corrupted (invalid slim-format account),
// don't abort the entire procedure directly. Instead, let the fallback
// generation to heal the invalid data.
//
// Here append the original value to ensure that the number of key and
// value are the same.
vals = append(vals, common.CopyBytes(iter.Value()))
log.Error("Failed to convert account state data", "err", err)
} else {
vals = append(vals, val)
}
}
}
// Update metrics for database iteration and merkle proving
if kind == "storage" {
snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds())
} else {
snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds())
}
defer func(start time.Time) {
if kind == "storage" {
snapStorageProveCounter.Inc(time.Since(start).Nanoseconds())
} else {
snapAccountProveCounter.Inc(time.Since(start).Nanoseconds())
}
}(time.Now())
// The snap state is exhausted, pass the entire key/val set for verification
if origin == nil && !diskMore {
stackTr := trie.NewStackTrie(nil)
for i, key := range keys {
stackTr.TryUpdate(key, vals[i])
}
if gotRoot := stackTr.Hash(); gotRoot != root {
return &proofResult{
keys: keys,
vals: vals,
proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root),
}, nil
}
return &proofResult{keys: keys, vals: vals}, nil
}
return nil
}

View File

@ -309,7 +309,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
if msg.Value().Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From(), msg.Value()) {
return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From().Hex())
}
st.gas -= gas
// Set up the initial access list.
if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber); rules.IsBerlin {

View File

@ -341,5 +341,4 @@ func TestRlpDecodeParentHash(t *testing.T) {
}
}
}
return NewBlock(header, txs, uncles, receipts, newHasher())
}

View File

@ -1,177 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm
import (
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// accessList is an accumulator for the set of accounts and storage slots an EVM
// contract execution touches.
type accessList map[common.Address]accessListSlots
// accessListSlots is an accumulator for the set of storage slots within a single
// contract that an EVM contract execution touches.
type accessListSlots map[common.Hash]struct{}
// newAccessList creates a new accessList.
func newAccessList() accessList {
return make(map[common.Address]accessListSlots)
}
// addAddress adds an address to the accesslist.
func (al accessList) addAddress(address common.Address) {
// Set address if not previously present
if _, present := al[address]; !present {
al[address] = make(map[common.Hash]struct{})
}
}
// addSlot adds a storage slot to the accesslist.
func (al accessList) addSlot(address common.Address, slot common.Hash) {
// Set address if not previously present
al.addAddress(address)
// Set the slot on the surely existent storage set
al[address][slot] = struct{}{}
}
// equal checks if the content of the current access list is the same as the
// content of the other one.
func (al accessList) equal(other accessList) bool {
// Cross reference the accounts first
if len(al) != len(other) {
return false
}
for addr := range al {
if _, ok := other[addr]; !ok {
return false
}
}
for addr := range other {
if _, ok := al[addr]; !ok {
return false
}
}
// Accounts match, cross reference the storage slots too
for addr, slots := range al {
otherslots := other[addr]
if len(slots) != len(otherslots) {
return false
}
for hash := range slots {
if _, ok := otherslots[hash]; !ok {
return false
}
}
for hash := range otherslots {
if _, ok := slots[hash]; !ok {
return false
}
}
}
return true
}
// accesslist converts the accesslist to a types.AccessList.
func (al accessList) accessList() types.AccessList {
acl := make(types.AccessList, 0, len(al))
for addr, slots := range al {
tuple := types.AccessTuple{Address: addr, StorageKeys: []common.Hash{}}
for slot := range slots {
tuple.StorageKeys = append(tuple.StorageKeys, slot)
}
acl = append(acl, tuple)
}
return acl
}
// AccessListTracer is a tracer that accumulates touched accounts and storage
// slots into an internal set.
type AccessListTracer struct {
excl map[common.Address]struct{} // Set of account to exclude from the list
list accessList // Set of accounts and storage slots touched
}
// NewAccessListTracer creates a new tracer that can generate AccessLists.
// An optional AccessList can be specified to occupy slots and addresses in
// the resulting accesslist.
func NewAccessListTracer(acl types.AccessList, from, to common.Address, precompiles []common.Address) *AccessListTracer {
excl := map[common.Address]struct{}{
from: {}, to: {},
}
for _, addr := range precompiles {
excl[addr] = struct{}{}
}
list := newAccessList()
for _, al := range acl {
if _, ok := excl[al.Address]; !ok {
list.addAddress(al.Address)
}
for _, slot := range al.StorageKeys {
list.addSlot(al.Address, slot)
}
}
return &AccessListTracer{
excl: excl,
list: list,
}
}
func (a *AccessListTracer) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
}
// CaptureState captures all opcodes that touch storage or addresses and adds them to the accesslist.
func (a *AccessListTracer) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
stack := scope.Stack
if (op == SLOAD || op == SSTORE) && stack.len() >= 1 {
slot := common.Hash(stack.data[stack.len()-1].Bytes32())
a.list.addSlot(scope.Contract.Address(), slot)
}
if (op == EXTCODECOPY || op == EXTCODEHASH || op == EXTCODESIZE || op == BALANCE || op == SELFDESTRUCT) && stack.len() >= 1 {
addr := common.Address(stack.data[stack.len()-1].Bytes20())
if _, ok := a.excl[addr]; !ok {
a.list.addAddress(addr)
}
}
if (op == DELEGATECALL || op == CALL || op == STATICCALL || op == CALLCODE) && stack.len() >= 5 {
addr := common.Address(stack.data[stack.len()-2].Bytes20())
if _, ok := a.excl[addr]; !ok {
a.list.addAddress(addr)
}
}
}
func (*AccessListTracer) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
}
func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {}
// AccessList returns the current accesslist maintained by the tracer.
func (a *AccessListTracer) AccessList() types.AccessList {
return a.list.accessList()
}
// Equal returns if the content of two access list traces are equal.
func (a *AccessListTracer) Equal(other *AccessListTracer) bool {
return a.list.equal(other.list)
}

View File

@ -17,10 +17,7 @@
package vm
import (
"fmt"
"io"
"math/big"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
@ -39,65 +36,3 @@ type EVMLogger interface {
CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error)
CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error)
}
type mdLogger struct {
out io.Writer
cfg *LogConfig
}
// NewMarkdownLogger creates a logger which outputs information in a format adapted
// for human readability, and is also a valid markdown table
func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger {
l := &mdLogger{writer, cfg}
if l.cfg == nil {
l.cfg = &LogConfig{}
}
return l
}
func (t *mdLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
if !create {
fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n",
from.String(), to.String(),
input, gas, value)
} else {
fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n",
from.String(), to.String(),
input, gas, value)
}
fmt.Fprintf(t.out, `
| Pc | Op | Cost | Stack | RStack | Refund |
|-------|-------------|------|-----------|-----------|---------|
`)
}
// CaptureState also tracks SLOAD/SSTORE ops to track storage change.
func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
stack := scope.Stack
fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost)
if !t.cfg.DisableStack {
// format stack
var a []string
for _, elem := range stack.data {
a = append(a, elem.Hex())
}
b := fmt.Sprintf("[%v]", strings.Join(a, ","))
fmt.Fprintf(t.out, "%10v |", b)
}
fmt.Fprintf(t.out, "%10v |", env.StateDB.GetRefund())
fmt.Fprintln(t.out, "")
if err != nil {
fmt.Fprintf(t.out, "Error: %v\n", err)
}
}
func (t *mdLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err)
}
func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) {
fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n",
output, gasUsed, err)
}

View File

@ -272,26 +272,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Successful startup; push a marker and check previous unclean shutdowns.
eth.shutdownTracker.MarkStartup()
// Start the RPC service
eth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, config.NetworkId)
// Register the backend on the node
stack.RegisterAPIs(eth.APIs())
stack.RegisterProtocols(eth.Protocols())
stack.RegisterLifecycle(eth)
// Check for unclean shutdown
if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil {
log.Error("Could not update unclean-shutdown-marker list", "error", err)
} else {
if discards > 0 {
log.Warn("Old unclean shutdowns found", "count", discards)
}
for _, tstamp := range uncleanShutdowns {
t := time.Unix(int64(tstamp), 0)
log.Warn("Unclean shutdown detected", "booted", t,
"age", common.PrettyAge(t))
}
}
return eth, nil
}

View File

@ -373,22 +373,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
snapshots.Disable()
}
}
// If snap sync was requested, create the snap scheduler and switch to fast
// sync mode. Long term we could drop fast sync or merge the two together,
// but until snap becomes prevalent, we should support both. TODO(karalabe).
if mode == SnapSync {
if !d.snapSync {
// Snap sync uses the snapshot namespace to store potentially flakey data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean
// time to prevent access.
if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
snapshots.Disable()
}
log.Warn("Enabling snapshot sync prototype")
d.snapSync = true
}
mode = FastSync
}
// Reset the queue, peer set and wake channels to clean any internal leftover state
d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
d.peers.Reset()
@ -619,9 +603,6 @@ func (d *Downloader) Terminate() {
default:
close(d.quitCh)
}
if d.stateBloom != nil {
d.stateBloom.Close()
}
d.quitLock.Unlock()
// Cancel any pending download requests

View File

@ -232,7 +232,6 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int,
break
}
}
hashes = append(hashes, hash)
}
hashes := make([]common.Hash, len(headers))
for i, header := range headers {
@ -457,6 +456,7 @@ func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync)
func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester()
defer tester.terminate()
// Create a long block chain to download and the tester
targetBlocks := len(testChainBase.blocks) - 1
@ -514,7 +514,6 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
}
// Permit the blocked blocks to import
if atomic.LoadUint32(&blocked) > 0 {
atomic.StoreUint32(&blocked, uint32(0))
@ -526,8 +525,6 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
if err := <-errc; err != nil {
t.Fatalf("block synchronization failed: %v", err)
}
tester.terminate()
}
// Tests that simple synchronization against a forked chain works correctly. In
@ -629,6 +626,7 @@ func TestBoundedHeavyForkedSync66Light(t *testing.T) {
func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester()
defer tester.terminate()
// Create a long enough forked chain
chainA := testChainForkLightA
@ -838,6 +836,7 @@ func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t
func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester()
defer tester.terminate()
// Create a small enough block chain to download
targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
@ -923,13 +922,13 @@ func TestHighTDStarvationAttack66Light(t *testing.T) {
func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester()
defer tester.terminate()
chain := testChainBase.shorten(1)
tester.newPeer("attack", protocol, chain.blocks[1:])
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
}
tester.terminate()
}
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
@ -988,6 +987,7 @@ func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, Ligh
func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester()
defer tester.terminate()
chain := testChainBase.shorten(blockCacheMaxItems - 15)
// Set a sync init hook to catch progress changes
@ -1135,6 +1135,7 @@ func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth
func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester()
defer tester.terminate()
chain := testChainBase.shorten(blockCacheMaxItems - 15)
// Set a sync init hook to catch progress changes
@ -1200,6 +1201,7 @@ func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.E
func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester()
defer tester.terminate()
chain := testChainBase.shorten(blockCacheMaxItems - 15)
// Set a sync init hook to catch progress changes

View File

@ -38,11 +38,6 @@ const (
receiptType = uint(1)
)
const (
bodyType = uint(0)
receiptType = uint(1)
)
var (
blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download
blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks
@ -390,20 +385,6 @@ func (q *queue) Results(block bool) []*fetchResult {
case ch <- true:
default:
}
q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
}
// Using the newly calibrated resultsize, figure out the new throttle limit
// on the result cache
throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
// Log some info at certain times
if time.Since(q.lastStatLog) > 60*time.Second {
q.lastStatLog = time.Now()
info := q.Stats()
info = append(info, "throttle", throttleThreshold)
log.Info("Downloader queue stats", info...)
}
// Log some info at certain times
if time.Since(q.lastStatLog) > 60*time.Second {

View File

@ -18,7 +18,6 @@ package downloader
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
@ -67,55 +66,14 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
for {
select {
case next := <-d.stateSyncStart:
d.spindownStateSync(active, finished, timeout, peerDrop)
return next
case <-s.done:
d.spindownStateSync(active, finished, timeout, peerDrop)
return nil
}
}
}
// spindownStateSync 'drains' the outstanding requests; some will be delivered and other
// will time out. This is to ensure that when the next stateSync starts working, all peers
// are marked as idle and de facto _are_ idle.
func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) {
log.Trace("State sync spinning down", "active", len(active), "finished", len(finished))
for len(active) > 0 {
var (
req *stateReq
reason string
)
select {
// Handle (drop) incoming state packs:
case pack := <-d.stateCh:
req = active[pack.PeerId()]
reason = "delivered"
// Handle dropped peer connections:
case p := <-peerDrop:
req = active[p.id]
reason = "peerdrop"
// Handle timed-out requests:
case req = <-timeout:
reason = "timeout"
}
if req == nil {
continue
}
req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason)
req.timer.Stop()
delete(active, req.peer.id)
req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
}
// The 'finished' set contains deliveries that we were going to pass to processing.
// Those are now moot, but we still need to set those peers as idle, which would
// otherwise have been done after processing
for _, req := range finished {
req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
}
}
// stateSync schedules requests for downloading a particular state trie defined
// by a given state root.
type stateSync struct {

View File

@ -531,14 +531,6 @@ func testPendingDeduplication(t *testing.T, light bool) {
return tester.getHeader(hashes[0]) == nil
}
}
checkNonExist := func() bool {
return tester.getBlock(hashes[0]) == nil
}
if light {
checkNonExist = func() bool {
return tester.getHeader(hashes[0]) == nil
}
}
// Announce the same block many times until it's fetched (wait for any pending ops)
for checkNonExist() {
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)

View File

@ -39,7 +39,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
const (
@ -108,7 +107,6 @@ type handler struct {
maxPeers int
downloader *downloader.Downloader
stateBloom *trie.SyncBloom
blockFetcher *fetcher.BlockFetcher
txFetcher *fetcher.TxFetcher
peers *peerSet

View File

@ -165,6 +165,7 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp {
return nil
}
mode, ourTD := cs.modeAndLocalHead()
op := peerToSyncOp(mode, peer)
if op.td.Cmp(ourTD) <= 0 {
return nil // We're in sync.

View File

@ -52,8 +52,6 @@ func (l *JSONLogger) CaptureFault(pc uint64, op vm.OpCode, gas uint64, cost uint
l.CaptureState(pc, op, gas, cost, scope, nil, depth, err)
}
func (l *JSONLogger) CaptureFault(*EVM, uint64, OpCode, uint64, uint64, *ScopeContext, int, error) {}
// CaptureState outputs state information on the logger.
func (l *JSONLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
memory := scope.Memory
@ -73,7 +71,7 @@ func (l *JSONLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco
log.Memory = memory.Data()
}
if !l.cfg.DisableStack {
log.Stack = stack.data
log.Stack = stack.Data()
}
if l.cfg.EnableReturnData {
log.ReturnData = rData

View File

@ -17,9 +17,7 @@
package tracers
import (
"encoding/json"
"math/big"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
@ -126,99 +124,3 @@ func BenchmarkTransactionTrace(b *testing.B) {
tracer.Reset()
}
}
// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
// comparison
func jsonEqual(x, y interface{}) bool {
xTrace := new(callTrace)
yTrace := new(callTrace)
if xj, err := json.Marshal(x); err == nil {
json.Unmarshal(xj, xTrace)
} else {
return false
}
if yj, err := json.Marshal(y); err == nil {
json.Unmarshal(yj, yTrace)
} else {
return false
}
return reflect.DeepEqual(xTrace, yTrace)
}
func BenchmarkTransactionTrace(b *testing.B) {
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
from := crypto.PubkeyToAddress(key.PublicKey)
gas := uint64(1000000) // 1M gas
to := common.HexToAddress("0x00000000000000000000000000000000deadbeef")
signer := types.LatestSignerForChainID(big.NewInt(1337))
tx, err := types.SignNewTx(key, signer,
&types.LegacyTx{
Nonce: 1,
GasPrice: big.NewInt(500),
Gas: gas,
To: &to,
})
if err != nil {
b.Fatal(err)
}
txContext := vm.TxContext{
Origin: from,
GasPrice: tx.GasPrice(),
}
context := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: common.Address{},
BlockNumber: new(big.Int).SetUint64(uint64(5)),
Time: new(big.Int).SetUint64(uint64(5)),
Difficulty: big.NewInt(0xffffffff),
GasLimit: gas,
}
alloc := core.GenesisAlloc{}
// The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns
// the address
loop := []byte{
byte(vm.JUMPDEST), // [ count ]
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = core.GenesisAccount{
Nonce: 1,
Code: loop,
Balance: big.NewInt(1),
}
alloc[from] = core.GenesisAccount{
Nonce: 1,
Code: []byte{},
Balance: big.NewInt(500000000000000),
}
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
// Create the tracer, the EVM environment and run it
tracer := vm.NewStructLogger(&vm.LogConfig{
Debug: false,
//DisableStorage: true,
//DisableMemory: true,
//DisableReturnData: true,
})
evm := vm.NewEVM(context, txContext, statedb, params.AllEthashProtocolChanges, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
snap := statedb.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
_, err = st.TransitionDb()
if err != nil {
b.Fatal(err)
}
statedb.RevertToSnapshot(snap)
if have, want := len(tracer.StructLogs()), 244752; have != want {
b.Fatalf("trace wrong, want %d steps, have %d", want, have)
}
tracer.Reset()
}
}

View File

@ -424,6 +424,7 @@ func testChainID(t *testing.T, client *rpc.Client) {
func testGetBlock(t *testing.T, client *rpc.Client) {
ec := NewClient(client)
// Get current block number
blockNumber, err := ec.BlockNumber(context.Background())
if err != nil {
@ -477,6 +478,7 @@ func testStatusFunctions(t *testing.T, client *rpc.Client) {
if progress != nil {
t.Fatalf("unexpected progress: %v", progress)
}
// NetworkID
networkID, err := ec.NetworkID(context.Background())
if err != nil {

View File

@ -90,39 +90,6 @@ var (
Name: "trace",
Usage: "Write execution trace to the given file",
}
// (Deprecated April 2020)
legacyPprofPortFlag = cli.IntFlag{
Name: "pprofport",
Usage: "pprof HTTP server listening port (deprecated, use --pprof.port)",
Value: 6060,
}
legacyPprofAddrFlag = cli.StringFlag{
Name: "pprofaddr",
Usage: "pprof HTTP server listening interface (deprecated, use --pprof.addr)",
Value: "127.0.0.1",
}
legacyMemprofilerateFlag = cli.IntFlag{
Name: "memprofilerate",
Usage: "Turn on memory profiling with the given rate (deprecated, use --pprof.memprofilerate)",
Value: runtime.MemProfileRate,
}
legacyBlockprofilerateFlag = cli.IntFlag{
Name: "blockprofilerate",
Usage: "Turn on block profiling with the given rate (deprecated, use --pprof.blockprofilerate)",
}
legacyCpuprofileFlag = cli.StringFlag{
Name: "cpuprofile",
Usage: "Write CPU profile to the given file (deprecated, use --pprof.cpuprofile)",
}
legacyBacktraceAtFlag = cli.StringFlag{
Name: "backtrace",
Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\") (deprecated, use --log.backtrace)",
Value: "",
}
legacyDebugFlag = cli.BoolFlag{
Name: "debug",
Usage: "Prepends log messages with call-site location (file and line number) (deprecated, use --log.debug)",
}
)
// Flags holds all command-line flags required for debugging.

View File

@ -1196,7 +1196,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
"sha3Uncles": head.UncleHash,
"logsBloom": head.Bloom,
"stateRoot": head.Root,
"miner": miner,
"miner": head.Coinbase,
"difficulty": (*hexutil.Big)(head.Difficulty),
"extraData": hexutil.Bytes(head.Extra),
"size": hexutil.Uint64(head.Size()),

View File

@ -202,8 +202,7 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"bignumber.js": bignumberJs,
"web3.js": web3Js,
"web3.js": web3Js,
}
// AssetDebug is true if the assets were built with the debug flag enabled.

View File

@ -477,17 +477,6 @@ func (lc *LightChain) GetTdOdr(ctx context.Context, hash common.Hash, number uin
return td
}
// GetHeaderByNumberOdr retrieves the total difficult from the database or
// network by hash and number, caching it (associated with its hash) if found.
func (lc *LightChain) GetTdOdr(ctx context.Context, hash common.Hash, number uint64) *big.Int {
td := lc.GetTd(hash, number)
if td != nil {
return td
}
td, _ = GetTd(ctx, lc.odr, hash, number)
return td
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (lc *LightChain) GetHeader(hash common.Hash, number uint64) *types.Header {

View File

@ -69,7 +69,6 @@ func main() {
nodes []*eth.Ethereum
enodes []*enode.Node
)
for _, sealer := range sealers {
// Start the node and wait until it's up
stack, ethBackend, err := makeSealer(genesis)

View File

@ -352,28 +352,6 @@ func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) t
return time.Duration(int64(next))
}
// recalcRecommit recalculates the resubmitting interval upon feedback.
func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration {
var (
prevF = float64(prev.Nanoseconds())
next float64
)
if inc {
next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias)
max := float64(maxRecommitInterval.Nanoseconds())
if next > max {
next = max
}
} else {
next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias)
min := float64(minRecommit.Nanoseconds())
if next < min {
next = min
}
}
return time.Duration(int64(next))
}
// newWorkLoop is a standalone goroutine to submit new mining work upon received events.
func (w *worker) newWorkLoop(recommit time.Duration) {
defer w.wg.Done()

View File

@ -78,10 +78,10 @@ var (
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 413,
SectionHead: common.HexToHash("0x8aa8e64ceadcdc5f23bc41d2acb7295a261a5cf680bb00a34f0e01af08200083"),
CHTRoot: common.HexToHash("0x008af584d385a2610706c5a439d39f15ddd4b691c5d42603f65ae576f703f477"),
BloomRoot: common.HexToHash("0x5a081af71a588f4d90bced242545b08904ad4fb92f7effff2ceb6e50e6dec157"),
SectionIndex: 395,
SectionHead: common.HexToHash("0xbfca95b8c1de014e252288e9c32029825fadbff58285f5b54556525e480dbb5b"),
CHTRoot: common.HexToHash("0x2ccf3dbb58eb6375e037fdd981ca5778359e4b8fa0270c2878b14361e64161e7"),
BloomRoot: common.HexToHash("0x2d46ec65a6941a2dc1e682f8f81f3d24192021f492fdf6ef0fdd51acb0f4ba0f"),
}
// MainnetCheckpointOracle contains a set of configs for the main network oracle.
@ -790,6 +790,12 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
if isForkIncompatible(c.MergeForkBlock, newcfg.MergeForkBlock, head) {
return newCompatError("Merge Start fork block", c.MergeForkBlock, newcfg.MergeForkBlock)
}
if isForkIncompatible(c.PrimordialPulseBlock, newcfg.PrimordialPulseBlock, head) {
return newCompatError("PrimordialPulse fork block", c.PrimordialPulseBlock, newcfg.PrimordialPulseBlock)
}
if isForkIncompatible(c.SystemZeroBlock, newcfg.SystemZeroBlock, head) {
return newCompatError("SystemZero fork block", c.SystemZeroBlock, newcfg.SystemZeroBlock)
}
return nil
}

View File

@ -129,10 +129,6 @@ const (
// Precompiled contract gas prices
//TODO need further discussion
TendermintHeaderValidateGas uint64 = 3000 // Gas for validate tendermiint consensus state
IAVLMerkleProofValidateGas uint64 = 3000 // Gas for validate merkle proof
EcrecoverGas uint64 = 3000 // Elliptic curve sender recovery gas price
Sha256BaseGas uint64 = 60 // Base price for a SHA256 operation
Sha256PerWordGas uint64 = 12 // Per-word price for a SHA256 operation

View File

@ -186,6 +186,10 @@ var (
accounts.MimetypeClique,
0x02,
}
ApplicationParlia = SigFormat{
accounts.MimetypeParlia,
0x03,
}
TextPlain = SigFormat{
accounts.MimetypeTextPlain,
0x45,

View File

@ -174,7 +174,7 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType
case apitypes.ApplicationParlia.Mime:
stringData, ok := data.(string)
if !ok {
return nil, useEthereumV, fmt.Errorf("input for %v must be an hex-encoded string", ApplicationParlia.Mime)
return nil, useEthereumV, fmt.Errorf("input for %v must be an hex-encoded string", apitypes.ApplicationParlia.Mime)
}
parliaData, err := hexutil.Decode(stringData)
if err != nil {
@ -196,7 +196,7 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType
if err != nil {
return nil, useEthereumV, err
}
messages := []*NameValueType{
messages := []*apitypes.NameValueType{
{
Name: "Parlia header",
Typ: "parlia",

View File

@ -48,9 +48,6 @@ func TestState(t *testing.T) {
// Uses 1GB RAM per tested fork
st.skipLoad(`^stStaticCall/static_Call1MB`)
// Uses 1GB RAM per tested fork
st.skipLoad(`^stStaticCall/static_Call1MB`)
// Broken tests:
// Expected failures:
//st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/Byzantium/0`, "bug in test")