mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-27 21:57:16 +00:00
cc741ed8af
* begin state service * begin on the state trie idea * created beacon state structure * add in the full clone getter * return by value instead * add all setters * new state setters are being completed * arrays roots exposed * close to finishing all these headerssss * functionality complete * added in proto benchmark test * test for compatibility * add test for compat * comments fixed * add clone * add clone * remove underlying copies * make it immutable * integrate it into chainservice * revert * wrap up comments for package * address all comments and godocs * address all comments * clone the pending attestation properly * properly clone remaining items * tests pass fixed bug * begin using it instead of head state * prevent nil pointer exceptions * begin using new struct in db * integrated new type into db package * add proper nil checks * using new state in archiver * refactored much of core * editing all the precompute functions * done with most core refactor * fixed up some bugs in the clone comparisons * append current epoch atts * add missing setters * add new setters * fix other core methods * fix up transition * main service and forkchoice * fix rpc * integrated to powchain * some more changes * fix build * improve processing of deposits * fix error * prevent panic * comment * fix process att * gaz * fix up att process * resolve existing review comments * resolve another batch of gh comments * resolve broken cpt state * revise testutil to use the new state * begin updating the state transition func to pass in more compartmentalized args * finish editing transition function to return errors * block operations pretty much done with refactor * state transition fully refactored * got epoch processing completed * fix build in fork choice * fixing more of the build * fix up broken sync package * it builds nowww it buildssss * revert registry changes * Recompute on Read (#4627) * compute on read * fix up eth1 data votes * looking into slashings bug introduced in core/ * able to advance more slots * add logging * can now sync with testnet yay * remove the leaves algorithm and other merkle imports * expose initialize unsafe funcs * Update beacon-chain/db/kv/state.go * lint Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * More Optimizations for New State (#4641) * map optimization * more optimizations * use a custom hasher * comment * block operations optimizations * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * fixed up various operations to use the validator index map access Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * archiver tests pass * fixing cache tests * cache tests passing * edited validator tests * powchain tests passing * halfway thru sync tests * more sync test fixes * add in tests for state/ * working through rpc tests * assignments tests passed * almost done with rpc/beacon tests * resolved painful validator test * fixed up even more tests * resolve tests * fix build * reduce a randao mixes copy * fixes under //beacon-chain/blockchain/... * build //beacon-chain/core/... * fixes * Runtime Optimizations (#4648) * parallelize shuffling * clean up * lint * fix build * use callback to read from registry * fix array roots and size map * new improvements * reduce hash allocs * improved shuffling * terence's review * use different method * raul's comment * new array roots * remove clone in pre-compute * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * raul's review * lint * fix build issues * fix visibility Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * fix visibility * build works for all * fix blockchain test * fix a few tests * fix more tests * update validator in slashing * archiver passing * fixed rpc/validator * progress on core tests * resolve broken rpc tests * blockchain tests passed * fix up some tests in core * fix message diff * remove unnecessary save * Save validator after slashing * Update validators one by one * another update * fix everything * fix more precompute tests * fix blocks tests * more elegant fix * more helper fixes * change back ? * fix test * fix skip slot * fix test * reset caches * fix testutil * raceoff fixed * passing * Retrieve cached state in the beginning * lint * Fixed tests part 1 * Fixed rest of the tests * Minor changes to avoid copying, small refactor to reduce deplicated code * Handle att req for slot 0 * New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689) * Only populate merkle layers as needed, copy merkle layers on copy/clone. * use custom copy * Make maps of correct size * slightly fast, doesn't wait for lock Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Target root can't be 0x00 * Don't use cache for current slot (may not be the right fix) * fixed up tests * Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing... * Align with prev logic for process slots cachedState.Slot() < slot * Fix Initial Sync Flag (#4692) * fixes * fix up some test failures due to lack of nil checks * fix up some test failures due to lack of nil checks * fix up imports * revert some changes * imports Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * resolving further conflicts * Better skip slot cache (#4694) * Return copy of skip slot cache state, disable skip slot cache on sync * fix * Fix pruning * fix up issues with broken tests Co-authored-by: Nishant Das <nish1993@hotmail.com> Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com> Co-authored-by: shayzluf <thezluf@gmail.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
197 lines
6.9 KiB
Go
197 lines
6.9 KiB
Go
package archiver
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/pkg/errors"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
|
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
var log = logrus.WithField("prefix", "archiver")
|
|
|
|
// Service defining archiver functionality for persisting checkpointed
|
|
// beacon chain information to a database backend for historical purposes.
|
|
type Service struct {
|
|
ctx context.Context
|
|
cancel context.CancelFunc
|
|
beaconDB db.NoHeadAccessDatabase
|
|
headFetcher blockchain.HeadFetcher
|
|
participationFetcher blockchain.ParticipationFetcher
|
|
stateNotifier statefeed.Notifier
|
|
lastArchivedEpoch uint64
|
|
}
|
|
|
|
// Config options for the archiver service.
|
|
type Config struct {
|
|
BeaconDB db.NoHeadAccessDatabase
|
|
HeadFetcher blockchain.HeadFetcher
|
|
ParticipationFetcher blockchain.ParticipationFetcher
|
|
StateNotifier statefeed.Notifier
|
|
}
|
|
|
|
// NewArchiverService initializes the service from configuration options.
|
|
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
return &Service{
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
beaconDB: cfg.BeaconDB,
|
|
headFetcher: cfg.HeadFetcher,
|
|
participationFetcher: cfg.ParticipationFetcher,
|
|
stateNotifier: cfg.StateNotifier,
|
|
}
|
|
}
|
|
|
|
// Start the archiver service event loop.
|
|
func (s *Service) Start() {
|
|
go s.run(s.ctx)
|
|
}
|
|
|
|
// Stop the archiver service event loop.
|
|
func (s *Service) Stop() error {
|
|
defer s.cancel()
|
|
return nil
|
|
}
|
|
|
|
// Status reports the healthy status of the archiver. Returning nil means service
|
|
// is correctly running without error.
|
|
func (s *Service) Status() error {
|
|
return nil
|
|
}
|
|
|
|
// We archive committee information pertaining to the head state's epoch.
|
|
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
|
|
proposerSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconProposer)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not generate seed")
|
|
}
|
|
attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not generate seed")
|
|
}
|
|
|
|
info := &pb.ArchivedCommitteeInfo{
|
|
ProposerSeed: proposerSeed[:],
|
|
AttesterSeed: attesterSeed[:],
|
|
}
|
|
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
|
|
return errors.Wrap(err, "could not archive committee info")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// We archive active validator set changes that happened during the previous epoch.
|
|
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
|
|
prevEpoch := epoch - 1
|
|
vals := headState.Validators()
|
|
activations := validators.ActivatedValidatorIndices(prevEpoch, vals)
|
|
slashings := validators.SlashedValidatorIndices(prevEpoch, vals)
|
|
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, prevEpoch)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not get active validator count")
|
|
}
|
|
exited, err := validators.ExitedValidatorIndices(prevEpoch, vals, activeValidatorCount)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not determine exited validator indices")
|
|
}
|
|
activeSetChanges := &pb.ArchivedActiveSetChanges{
|
|
Activated: activations,
|
|
Exited: exited,
|
|
Slashed: slashings,
|
|
}
|
|
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, prevEpoch, activeSetChanges); err != nil {
|
|
return errors.Wrap(err, "could not archive active validator set changes")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// We compute participation metrics by first retrieving the head state and
|
|
// matching validator attestations during the epoch.
|
|
func (s *Service) archiveParticipation(ctx context.Context, epoch uint64) error {
|
|
p := s.participationFetcher.Participation(epoch)
|
|
participation := ðpb.ValidatorParticipation{}
|
|
if p != nil {
|
|
participation = ðpb.ValidatorParticipation{
|
|
EligibleEther: p.PrevEpoch,
|
|
VotedEther: p.PrevEpochTargetAttesters,
|
|
GlobalParticipationRate: float32(p.PrevEpochTargetAttesters) / float32(p.PrevEpoch),
|
|
}
|
|
}
|
|
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, epoch, participation)
|
|
}
|
|
|
|
// We archive validator balances and active indices.
|
|
func (s *Service) archiveBalances(ctx context.Context, balances []uint64, epoch uint64) error {
|
|
if err := s.beaconDB.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
|
return errors.Wrap(err, "could not archive balances")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *Service) run(ctx context.Context) {
|
|
stateChannel := make(chan *feed.Event, 1)
|
|
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
|
defer stateSub.Unsubscribe()
|
|
for {
|
|
select {
|
|
case event := <-stateChannel:
|
|
if event.Type == statefeed.BlockProcessed {
|
|
data := event.Data.(*statefeed.BlockProcessedData)
|
|
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
|
|
headState, err := s.headFetcher.HeadState(ctx)
|
|
if err != nil {
|
|
log.WithError(err).Error("Head state is not available")
|
|
continue
|
|
}
|
|
slot := headState.Slot()
|
|
currentEpoch := helpers.SlotToEpoch(slot)
|
|
if !helpers.IsEpochEnd(slot) && currentEpoch <= s.lastArchivedEpoch {
|
|
continue
|
|
}
|
|
epochToArchive := currentEpoch
|
|
if !helpers.IsEpochEnd(slot) {
|
|
epochToArchive--
|
|
}
|
|
if err := s.archiveCommitteeInfo(ctx, headState, epochToArchive); err != nil {
|
|
log.WithError(err).Error("Could not archive committee info")
|
|
continue
|
|
}
|
|
if err := s.archiveActiveSetChanges(ctx, headState, epochToArchive); err != nil {
|
|
log.WithError(err).Error("Could not archive active validator set changes")
|
|
continue
|
|
}
|
|
if err := s.archiveParticipation(ctx, epochToArchive); err != nil {
|
|
log.WithError(err).Error("Could not archive validator participation")
|
|
continue
|
|
}
|
|
if err := s.archiveBalances(ctx, headState.Balances(), epochToArchive); err != nil {
|
|
log.WithError(err).Error("Could not archive validator balances and active indices")
|
|
continue
|
|
}
|
|
log.WithField(
|
|
"epoch",
|
|
epochToArchive,
|
|
).Debug("Successfully archived beacon chain data during epoch")
|
|
s.lastArchivedEpoch = epochToArchive
|
|
}
|
|
case <-s.ctx.Done():
|
|
log.Debug("Context closed, exiting goroutine")
|
|
return
|
|
case err := <-stateSub.Err():
|
|
log.WithError(err).Error("Subscription to state feed notifier failed")
|
|
return
|
|
}
|
|
}
|
|
}
|