UX improvement for pre-merge sync with Teku (#4955)

Co-authored-by: giuliorebuffo <giuliorebuffo@system76-pc.localdomain>
This commit is contained in:
Giulio rebuffo 2022-08-08 11:26:34 +02:00 committed by GitHub
parent 1b20322b60
commit 7a64fe44eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 36 additions and 41 deletions

View File

@ -313,23 +313,14 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E
}
block := types.NewBlockFromStorage(blockHash, &header, transactions, nil)
possibleStatus, err := s.getPayloadStatusFromHashIfPossible(blockHash, req.BlockNumber, header.ParentHash, true)
possibleStatus, err := s.getPayloadStatusFromHashIfPossible(blockHash, req.BlockNumber, header.ParentHash, nil, true)
if err != nil {
return nil, err
}
if possibleStatus != nil {
return convertPayloadStatus(possibleStatus), nil
}
// If another payload is already commissioned then we just reply with syncing
if s.stageLoopIsBusy() {
// We are still syncing a commissioned payload
// TODO(yperbasis): not entirely correct since per the spec:
// The process of validating a payload on the canonical chain MUST NOT be affected by an active sync process on a side branch of the block tree.
// For example, if side branch B is SYNCING but the requisite data for validating a payload from canonical branch A is available, client software MUST initiate the validation process.
// https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#payload-validation
log.Debug("[NewPayload] stage loop is busy")
return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil
}
s.lock.Lock()
defer s.lock.Unlock()
@ -347,7 +338,7 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E
}
// Check if we can make out a status from the payload hash/head hash.
func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, newPayload bool) (*engineapi.PayloadStatus, error) {
func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, forkchoiceMessage *engineapi.ForkChoiceMessage, newPayload bool) (*engineapi.PayloadStatus, error) {
// Determine which prefix to use for logs
var prefix string
if newPayload {
@ -361,7 +352,7 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H
}
if s.hd == nil {
return nil, nil
return nil, fmt.Errorf("headerdownload is nil")
}
tx, err := s.db.BeginRo(s.ctx)
@ -369,6 +360,14 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H
return nil, err
}
defer tx.Rollback()
// Some Consensus layer clients sometimes sends us repeated FCUs and make Erigon print a gazillion logs.
// E.G teku sometimes will end up spamming fcu on the terminal block if it has not synced to that point.
if forkchoiceMessage != nil &&
forkchoiceMessage.FinalizedBlockHash == rawdb.ReadForkchoiceFinalized(tx) &&
forkchoiceMessage.HeadBlockHash == rawdb.ReadForkchoiceHead(tx) &&
forkchoiceMessage.SafeBlockHash == rawdb.ReadForkchoiceSafe(tx) {
return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil
}
header, err := rawdb.ReadHeaderByHash(tx, blockHash)
if err != nil {
@ -437,14 +436,10 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H
if parent == nil && s.hd.PosStatus() == headerdownload.Syncing {
return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil
}
return nil, nil
}
} else {
if header == nil {
if s.hd.PosStatus() == headerdownload.Syncing {
return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil
}
return nil, nil
}
@ -460,6 +455,13 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H
if blockHash != headHash && canonicalHash == blockHash {
return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil
}
}
// If another payload is already commissioned then we just reply with syncing
if s.stageLoopIsBusy() {
log.Debug(fmt.Sprintf("[%s] stage loop is busy", prefix))
return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil
}
return nil, nil
}
@ -526,18 +528,11 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r
FinalizedBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.FinalizedBlockHash),
}
status, err := s.getPayloadStatusFromHashIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, false)
status, err := s.getPayloadStatusFromHashIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, &forkChoice, false)
if err != nil {
return nil, err
}
if status == nil && s.stageLoopIsBusy() {
log.Debug("[ForkChoiceUpdated] stage loop is busy")
return &remote.EngineForkChoiceUpdatedReply{
PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING},
}, nil
}
s.lock.Lock()
defer s.lock.Unlock()

View File

@ -186,7 +186,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body
return
}
// MakesBodyCanonical do not support PoS.
if has && len(sb.body.Transactions) > 0 {
if has {
status = remote.EngineStatus_ACCEPTED
return
}