mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2025-01-04 08:44:28 +00:00
eb900a8193
* refactor initial sync to prevent reorg infinite loops * lint * fixed build * passing tests * tests passing * terence suggestion * new attempt * clean up and refactor sync service * complete the new initial sync logic * revert head * init sync working * config for blockchain receive block * all works * builds * fix a few more tests * init sync tests pass * revert scripts * revert accounts changes * lint * lint2 * travis lint * fix build * fix single use argument * any peer * imports spacing * imports * ready for a rolling restart * add todo * fork choice in blocks when exiting sync * readd finalized state root to requests * successful build * revert blockchain config * old config reversion * initial sync tests pass * initial sync full test works * lint * use the new block processing api * new proto defs * init sync functions again * remove sync polling * tests fixed * fixed catching up with chain * tests pass * spacing * lint * goimports * add changes * add lock and conditional to prevent multiple goroutines * make reg sync synchronous * add * fixed the parent block issue * fix errors in chain service * tests pass * check nil block * typo * fix nil state * merge & conflicts * revert synchronus reg sync * add more spans to state db * fix lint * lint
70 lines
1.9 KiB
Go
70 lines
1.9 KiB
Go
package initialsync
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"runtime/debug"
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
|
"github.com/prysmaticlabs/prysm/shared/p2p"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/sirupsen/logrus"
|
|
"go.opencensus.io/trace"
|
|
)
|
|
|
|
func (s *InitialSync) checkBlockValidity(ctx context.Context, block *pb.BeaconBlock) error {
|
|
ctx, span := trace.StartSpan(ctx, "beacon-chain.sync.initial-sync.checkBlockValidity")
|
|
defer span.End()
|
|
beaconState, err := s.db.State(ctx)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get beacon state: %v", err)
|
|
}
|
|
|
|
if block.Slot < beaconState.FinalizedEpoch*params.BeaconConfig().SlotsPerEpoch {
|
|
return errors.New(debugError + "discarding received block with a slot number smaller than the last finalized slot")
|
|
}
|
|
// Attestation from proposer not verified as, other nodes only store blocks not proposer
|
|
// attestations.
|
|
return nil
|
|
}
|
|
|
|
func (s *InitialSync) doesParentExist(block *pb.BeaconBlock) bool {
|
|
parentHash := bytesutil.ToBytes32(block.ParentRootHash32)
|
|
return s.db.HasBlock(parentHash)
|
|
}
|
|
|
|
// safelyHandleMessage will recover and log any panic that occurs from the
|
|
// function argument.
|
|
func safelyHandleMessage(fn func(p2p.Message), msg p2p.Message) {
|
|
defer func() {
|
|
if r := recover(); r != nil {
|
|
printedMsg := "message contains no data"
|
|
if msg.Data != nil {
|
|
printedMsg = proto.MarshalTextString(msg.Data)
|
|
}
|
|
log.WithFields(logrus.Fields{
|
|
"r": r,
|
|
"msg": printedMsg,
|
|
}).Error("Panicked when handling p2p message! Recovering...")
|
|
|
|
debug.PrintStack()
|
|
|
|
if msg.Ctx == nil {
|
|
return
|
|
}
|
|
if span := trace.FromContext(msg.Ctx); span != nil {
|
|
span.SetStatus(trace.Status{
|
|
Code: trace.StatusCodeInternal,
|
|
Message: fmt.Sprintf("Panic: %v", r),
|
|
})
|
|
}
|
|
}
|
|
}()
|
|
|
|
// Fingers crossed that it doesn't panic...
|
|
fn(msg)
|
|
}
|