Add support for syncing pre-fork blocks

This commit is contained in:
Shane Bammel 2023-01-19 19:24:08 -06:00
parent c9e7be708b
commit 40db66c21c
11 changed files with 119 additions and 42 deletions

View File

@ -80,18 +80,20 @@ func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
// VerifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum consensus engine.
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header) error {
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
if err != nil {
return err
}
if !reached {
return beacon.ethone.VerifyHeader(chain, header)
return beacon.ethone.VerifyHeader(chain, header, parent)
}
// Short circuit if the parent is not known
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parent == nil {
return consensus.ErrUnknownAncestor
parent = chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
}
// Sanity checks passed, do a proper verification
return beacon.verifyHeader(chain, header, parent)
@ -128,21 +130,16 @@ func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []
var (
preHeaders = headers
postHeaders []*types.Header
td = new(big.Int).Set(ptd)
tdPassed bool
)
for i, header := range headers {
if tdPassed {
// PulseChain Special Case:
// Partition based on header difficulty (IsPoSHeader) instead of TTD being reached
// so we can properly handle ETH Beacon blocks below the increased PulseChain TTD.
if beacon.IsPoSHeader(header) {
preHeaders = headers[:i]
postHeaders = headers[i:]
break
}
td = td.Add(td, header.Difficulty)
if td.Cmp(ttd) >= 0 {
// This is the last PoW header, it still belongs to
// the preHeaders, so we cannot split+break yet.
tdPassed = true
}
}
return preHeaders, postHeaders, nil
}
@ -151,31 +148,73 @@ func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications.
// VerifyHeaders expect the headers to be ordered and continuous.
//
// Normal Cases:
// 1. x * POW blocks
// 2. x * POS blocks
// 3. x * POW blocks => y * POS blocks
//
// Special Cases for PulseChain:
// 4. x * POS blocks[eth] => POW fork block[pls]
// 5. x * POS blocks[eth] => POW fork block[pls] => y * POS blocks[pls]
func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
preHeaders, postHeaders, err := beacon.splitHeaders(chain, headers)
if err != nil {
return make(chan struct{}), errOut(len(headers), err)
}
// Case 1
if len(postHeaders) == 0 {
return beacon.ethone.VerifyHeaders(chain, headers)
}
if len(preHeaders) == 0 {
chainCfg := chain.Config()
primordialPulseIndex := 0
if chainCfg.PrimordialPulseAhead(postHeaders[0].Number) && !chainCfg.PrimordialPulseAhead(postHeaders[len(postHeaders)-1].Number) {
primordialPulseIndex = int(new(big.Int).Sub(chainCfg.PrimordialPulseBlock, postHeaders[0].Number).Uint64())
}
// Case 2
if len(preHeaders) == 0 && primordialPulseIndex == 0 {
return beacon.verifyHeaders(chain, headers, nil)
}
// The transition point exists in the middle, separate the headers
// into two batches and apply different verification rules for them.
// into batches and apply different verification rules for them.
var (
abort = make(chan struct{})
results = make(chan error, len(headers))
)
go func() {
var (
old, new, out = 0, len(preHeaders), 0
errors = make([]error, len(headers))
done = make([]bool, len(headers))
oldDone, oldResult = beacon.ethone.VerifyHeaders(chain, preHeaders)
newDone, newResult = beacon.verifyHeaders(chain, postHeaders, preHeaders[len(preHeaders)-1])
oldIdx, out = 0, 0
errors = make([]error, len(headers))
done = make([]bool, len(headers))
oldDone, oldResult = beacon.ethone.VerifyHeaders(chain, preHeaders)
lastPowHeader *types.Header
pulseChainForkHeader *types.Header
preforkPosIdx = len(preHeaders)
preforkPosHeaders = postHeaders
postforkPosIdx = len(headers)
postforkPosHeaders = []*types.Header{}
)
// Case 3
if len(preHeaders) > 0 {
lastPowHeader = preHeaders[len(preHeaders)-1]
}
// Handle fork partitioning and verification for cases 4 and 5
if primordialPulseIndex > 0 {
preforkPosHeaders = postHeaders[:primordialPulseIndex]
pulseChainForkHeader = postHeaders[primordialPulseIndex]
// Verify the fork block
forkBlockResult := beacon.ethone.VerifyHeader(chain, pulseChainForkHeader, postHeaders[primordialPulseIndex-1])
forkBlockIdx := preforkPosIdx + len(preforkPosHeaders)
errors[forkBlockIdx], done[forkBlockIdx] = forkBlockResult, true
// Can be empty in case 4
postforkPosHeaders = postHeaders[primordialPulseIndex+1:]
postforkPosIdx = forkBlockIdx + 1
}
preforkPosDone, preforkPosResult := beacon.verifyHeaders(chain, preforkPosHeaders, lastPowHeader)
postforkPosDone, postforkPosResult := beacon.verifyHeaders(chain, postforkPosHeaders, pulseChainForkHeader)
// Collect the results
for {
for ; done[out]; out++ {
@ -186,16 +225,20 @@ func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
}
select {
case err := <-oldResult:
if !done[old] { // skip TTD-verified failures
errors[old], done[old] = err, true
if !done[oldIdx] { // skip TTD-verified failures
errors[oldIdx], done[oldIdx] = err, true
}
old++
case err := <-newResult:
errors[new], done[new] = err, true
new++
oldIdx++
case err := <-preforkPosResult:
errors[preforkPosIdx], done[preforkPosIdx] = err, true
preforkPosIdx++
case err := <-postforkPosResult:
errors[postforkPosIdx], done[postforkPosIdx] = err, true
postforkPosIdx++
case <-abort:
close(oldDone)
close(newDone)
close(preforkPosDone)
close(postforkPosDone)
return
}
}

View File

@ -216,8 +216,12 @@ func (c *Clique) Author(header *types.Header) (common.Address, error) {
}
// VerifyHeader checks whether a header conforms to the consensus rules.
func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
return c.verifyHeader(chain, header, nil)
func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header) error {
var parentHeaders []*types.Header
if parent != nil {
parentHeaders = []*types.Header{parent}
}
return c.verifyHeader(chain, header, parentHeaders)
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The

View File

@ -67,7 +67,7 @@ type Engine interface {
// VerifyHeader checks whether a header conforms to the consensus rules of a
// given engine.
VerifyHeader(chain ChainHeaderReader, header *types.Header) error
VerifyHeader(chain ChainHeaderReader, header *types.Header, parent *types.Header) error
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and

View File

@ -100,15 +100,17 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
// VerifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum ethash engine.
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header) error {
// Short circuit if the header is known, or its parent not
number := header.Number.Uint64()
if chain.GetHeader(header.Hash(), number) != nil {
return nil
}
parent := chain.GetHeader(header.ParentHash, number-1)
if parent == nil {
return consensus.ErrUnknownAncestor
parent = chain.GetHeader(header.ParentHash, number-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
}
// Sanity checks passed, do a proper verification
return ethash.verifyHeader(chain, header, parent, false, time.Now().Unix())

View File

@ -410,7 +410,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// The first thing the node will do is reconstruct the verification data for
// the head block (ethash cache or clique voting snapshot). Might as well do
// it in advance.
bc.engine.VerifyHeader(bc, bc.CurrentHeader())
bc.engine.VerifyHeader(bc, bc.CurrentHeader(), nil)
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash := range BadHashes {

View File

@ -149,7 +149,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
for _, block := range chain {
// Try and process the block
err := blockchain.engine.VerifyHeader(blockchain, block.Header())
err := blockchain.engine.VerifyHeader(blockchain, block.Header(), nil)
if err == nil {
err = blockchain.validator.ValidateBody(block)
}
@ -188,7 +188,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
for _, header := range chain {
// Try and validate the header
if err := blockchain.engine.VerifyHeader(blockchain, header); err != nil {
if err := blockchain.engine.VerifyHeader(blockchain, header, nil); err != nil {
return err
}
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)

View File

@ -108,6 +108,10 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (b
currentPreserve, externPreserve = f.preserve(current), f.preserve(extern)
}
reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5)
} else if f.chain.Config().PrimordialPulseAhead(extern.Number) {
// Pre-fork ethereum mainnet pos blocks have a difficulty of 0.
// These blocks should be accepted onto the chain head.
reorg = true
}
return reorg, nil
}

View File

@ -39,15 +39,22 @@ type sigCache struct {
// MakeSigner returns a Signer based on the given chain config and block number.
func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64) Signer {
var signer Signer
chainID := config.ChainID
if config.PrimordialPulseAhead(blockNumber) {
// Use ethereum mainnet chainid for pre-fork transactions
chainID = big.NewInt(1)
}
switch {
case config.IsCancun(blockNumber, blockTime):
signer = NewCancunSigner(config.ChainID)
case config.IsLondon(blockNumber):
signer = NewLondonSigner(config.ChainID)
signer = NewLondonSigner(chainID)
case config.IsBerlin(blockNumber):
signer = NewEIP2930Signer(config.ChainID)
signer = NewEIP2930Signer(chainID)
case config.IsEIP155(blockNumber):
signer = NewEIP155Signer(config.ChainID)
signer = NewEIP155Signer(chainID)
case config.IsHomestead(blockNumber):
signer = HomesteadSigner{}
default:

View File

@ -137,13 +137,25 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
blockCtx.BlobBaseFee = new(big.Int)
}
}
// FlexChainConfig allows for processing prefork transactions on PulseChain
flexChainConfig := chainConfig
if chainConfig.PrimordialPulseAhead(blockCtx.BlockNumber) {
// Create a shallow of chainConfig struct and set to ethereum mainnet
chainCfgCpy := *chainConfig
chainCfgCpy.ChainID = big.NewInt(1)
// Use the new chainCfgCpy
flexChainConfig = &chainCfgCpy
}
evm := &EVM{
Context: blockCtx,
TxContext: txCtx,
StateDB: statedb,
Config: config,
chainConfig: chainConfig,
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
chainConfig: flexChainConfig,
chainRules: flexChainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
}
evm.interpreter = NewEVMInterpreter(evm)
return evm

View File

@ -215,7 +215,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
return errors.New("unexpected post-merge header")
}
}
return h.chain.Engine().VerifyHeader(h.chain, header)
return h.chain.Engine().VerifyHeader(h.chain, header, nil)
}
heighter := func() uint64 {
return h.chain.CurrentBlock().Number.Uint64()

View File

@ -591,6 +591,11 @@ func (c *ChainConfig) IsPrimordialPulseBlock(num *big.Int) bool {
return c.PrimordialPulseBlock != nil && c.PrimordialPulseBlock.Cmp(num) == 0
}
// Returns true if there is a PrimordialPulse block in the future.
func (c *ChainConfig) PrimordialPulseAhead(num *big.Int) bool {
return c.PrimordialPulseBlock != nil && c.PrimordialPulseBlock.Cmp(num) > 0
}
// CheckCompatible checks whether scheduled fork transitions have been imported
// with a mismatching chain configuration.
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError {