mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-28 14:17:17 +00:00
053038446c
* plug forkchoice to blockchain service's block processing * fixed tests * more fixes... * clean ups * fixed test * Update beacon-chain/blockchain/block_processing.go * merged with 2006 and started fixing tests * remove prints * fixed tests * lint * include ops service * if there's a skip slot, slot-- * fixed typo * started working on test * no fork choice in propose * bleh, need to fix state generator first * state gen takes input slot * feedback * fixed tests * preston's feedback * fmt * removed extra logging * add more logs * fixed validator attest * builds * fixed save block * children fix * removed verbose logs * fix fork choice * right logs * Add Prometheus Counter for Reorg (#2051) * fetch every slot (#2052) * test Fixes * lint * only regenerate state if there was a reorg * better logging * fixed seed * better logging * process skip slots in assignment requests * fix lint * disable state root computation * filter attestations in regular sync * log important items * better info logs * added spans to stategen * span in stategen * set validator deadline * randao stuff * disable sig verify * lint * lint * save only using historical states * use new goroutine for handling sync messages * change default buffer sizes * better p2p * rem some useless logs * lint * sync tests complete * complete tests * tests fixed * lint * fix flakey att service * PR feedback * undo k8s changes * Update beacon-chain/blockchain/block_processing.go * Update beacon-chain/sync/regular_sync.go * Add feature flag to enable compute state root * add comment * gazelle lint fix
149 lines
6.0 KiB
Go
149 lines
6.0 KiB
Go
package client
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/bitutil"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/sirupsen/logrus"
|
|
"go.opencensus.io/trace"
|
|
)
|
|
|
|
var delay = params.BeaconConfig().SecondsPerSlot / 2
|
|
|
|
// AttestToBlockHead completes the validator client's attester responsibility at a given slot.
|
|
// It fetches the latest beacon block head along with the latest canonical beacon state
|
|
// information in order to sign the block and include information about the validator's
|
|
// participation in voting on the block.
|
|
func (v *validator) AttestToBlockHead(ctx context.Context, slot uint64) {
|
|
ctx, span := trace.StartSpan(ctx, "validator.AttestToBlockHead")
|
|
defer span.End()
|
|
|
|
v.waitToSlotMidpoint(ctx, slot)
|
|
|
|
// First the validator should construct attestation_data, an AttestationData
|
|
// object based upon the state at the assigned slot.
|
|
attData := &pbp2p.AttestationData{
|
|
Slot: slot,
|
|
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:], // Stub for Phase 0.
|
|
}
|
|
// We fetch the validator index as it is necessary to generate the aggregation
|
|
// bitfield of the attestation itself.
|
|
pubKey := v.key.PublicKey.Marshal()
|
|
idxReq := &pb.ValidatorIndexRequest{
|
|
PublicKey: pubKey,
|
|
}
|
|
validatorIndexRes, err := v.validatorClient.ValidatorIndex(ctx, idxReq)
|
|
if err != nil {
|
|
log.Errorf("Could not fetch validator index: %v", err)
|
|
return
|
|
}
|
|
req := &pb.CommitteeAssignmentsRequest{
|
|
EpochStart: slot,
|
|
PublicKey: [][]byte{pubKey},
|
|
}
|
|
resp, err := v.validatorClient.CommitteeAssignment(ctx, req)
|
|
if err != nil {
|
|
log.Errorf("Could not fetch crosslink committees at slot %d: %v",
|
|
slot-params.BeaconConfig().GenesisSlot, err)
|
|
return
|
|
}
|
|
// Set the attestation data's shard as the shard associated with the validator's
|
|
// committee as retrieved by CrosslinkCommitteesAtSlot.
|
|
attData.Shard = resp.Assignment[0].Shard
|
|
|
|
// Fetch other necessary information from the beacon node in order to attest
|
|
// including the justified epoch, epoch boundary information, and more.
|
|
infoReq := &pb.AttestationDataRequest{
|
|
Slot: slot,
|
|
Shard: resp.Assignment[0].Shard,
|
|
}
|
|
infoRes, err := v.attesterClient.AttestationDataAtSlot(ctx, infoReq)
|
|
if err != nil {
|
|
log.Errorf("Could not fetch necessary info to produce attestation at slot %d: %v",
|
|
slot-params.BeaconConfig().GenesisSlot, err)
|
|
return
|
|
}
|
|
// Set the attestation data's beacon block root = hash_tree_root(head) where head
|
|
// is the validator's view of the head block of the beacon chain during the slot.
|
|
attData.BeaconBlockRootHash32 = infoRes.BeaconBlockRootHash32
|
|
// Set the attestation data's epoch boundary root = hash_tree_root(epoch_boundary)
|
|
// where epoch_boundary is the block at the most recent epoch boundary in the
|
|
// chain defined by head -- i.e. the BeaconBlock where block.slot == get_epoch_start_slot(slot_to_epoch(head.slot)).
|
|
attData.EpochBoundaryRootHash32 = infoRes.EpochBoundaryRootHash32
|
|
// Set the attestation data's latest crosslink root = state.latest_crosslinks[shard].shard_block_root
|
|
// where state is the beacon state at head and shard is the validator's assigned shard.
|
|
attData.LatestCrosslink = infoRes.LatestCrosslink
|
|
// Set the attestation data's justified epoch = state.justified_epoch where state
|
|
// is the beacon state at the head.
|
|
attData.JustifiedEpoch = infoRes.JustifiedEpoch
|
|
// Set the attestation data's justified block root = hash_tree_root(justified_block) where
|
|
// justified_block is the block at state.justified_epoch in the chain defined by head.
|
|
// On the server side, this is fetched by calling get_block_root(state, justified_epoch).
|
|
attData.JustifiedBlockRootHash32 = infoRes.JustifiedBlockRootHash32
|
|
|
|
// The validator now creates an Attestation object using the AttestationData as
|
|
// set in the code above after all properties have been set.
|
|
attestation := &pbp2p.Attestation{
|
|
Data: attData,
|
|
}
|
|
|
|
// We set the custody bitfield to an slice of zero values as a stub for phase 0
|
|
// of length len(committee)+7 // 8.
|
|
attestation.CustodyBitfield = make([]byte, (len(resp.Assignment[0].Committee)+7)/8)
|
|
|
|
// Find the index in committee to be used for
|
|
// the aggregation bitfield
|
|
var indexInCommittee int
|
|
for i, vIndex := range resp.Assignment[0].Committee {
|
|
if vIndex == validatorIndexRes.Index {
|
|
indexInCommittee = i
|
|
break
|
|
}
|
|
}
|
|
|
|
aggregationBitfield := bitutil.SetBitfield(indexInCommittee)
|
|
attestation.AggregationBitfield = aggregationBitfield
|
|
|
|
// TODO(#1366): Use BLS to generate an aggregate signature.
|
|
attestation.AggregateSignature = []byte("signed")
|
|
|
|
log.WithField(
|
|
"blockRoot", fmt.Sprintf("%#x", attData.BeaconBlockRootHash32),
|
|
).Info("Current beacon chain head block")
|
|
log.WithFields(logrus.Fields{
|
|
"justifiedEpoch": attData.JustifiedEpoch - params.BeaconConfig().GenesisEpoch,
|
|
"shard": attData.Shard,
|
|
"slot": slot - params.BeaconConfig().GenesisSlot,
|
|
}).Info("Attesting to beacon chain head...")
|
|
|
|
log.Infof("Produced attestation with block root: %#x", attestation.Data.BeaconBlockRootHash32)
|
|
attResp, err := v.attesterClient.AttestHead(ctx, attestation)
|
|
if err != nil {
|
|
log.Errorf("Could not submit attestation to beacon node: %v", err)
|
|
return
|
|
}
|
|
log.WithFields(logrus.Fields{
|
|
"attestationHash": fmt.Sprintf("%#x", attResp.AttestationHash),
|
|
"shard": attData.Shard,
|
|
"slot": slot - params.BeaconConfig().GenesisSlot,
|
|
}).Info("Beacon node processed attestation successfully")
|
|
}
|
|
|
|
// waitToSlotMidpoint waits until halfway through the current slot period
|
|
// such that any blocks from this slot have time to reach the beacon node
|
|
// before creating the attestation.
|
|
func (v *validator) waitToSlotMidpoint(ctx context.Context, slot uint64) {
|
|
_, span := trace.StartSpan(ctx, "validator.waitToSlotMidpoint")
|
|
defer span.End()
|
|
|
|
duration := time.Duration(slot*params.BeaconConfig().SecondsPerSlot+delay) * time.Second
|
|
timeToBroadcast := time.Unix(int64(v.genesisTime), 0).Add(duration)
|
|
|
|
time.Sleep(time.Until(timeToBroadcast))
|
|
}
|