fixing sa4006 (#13350)

This commit is contained in:
Preston Van Loon 2023-12-15 10:49:27 -06:00 committed by GitHub
parent 344e68b81b
commit db096488b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 125 additions and 104 deletions

View File

@ -126,7 +126,7 @@ STATICCHECK_ANALYZERS = [
"sa4003",
"sa4004",
"sa4005",
# "sa4006", # TODO: Fix violations of unused arguments.
"sa4006",
"sa4008",
"sa4009",
"sa4010",
@ -172,7 +172,7 @@ STATICCHECK_ANALYZERS = [
"sa6006",
"sa9001",
"sa9002",
#"sa9003", # Doesn't build
#"sa9003", # Doesn't build. See https://github.com/dominikh/go-tools/pull/1483
"sa9004",
"sa9005",
"sa9006",

View File

@ -276,7 +276,7 @@ func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
// It does a full copy on head state for immutability.
// This is a lock free version.
func (s *Service) headState(ctx context.Context) state.BeaconState {
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
_, span := trace.StartSpan(ctx, "blockChain.headState")
defer span.End()
return s.head.state.Copy()
@ -286,7 +286,7 @@ func (s *Service) headState(ctx context.Context) state.BeaconState {
// It does not perform a copy of the head state.
// This is a lock free version.
func (s *Service) headStateReadOnly(ctx context.Context) state.ReadOnlyBeaconState {
ctx, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
_, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
defer span.End()
return s.head.state

View File

@ -235,14 +235,14 @@ func TestStore_SaveCheckpointState(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
s2, err := service.getAttPreState(ctx, cp2)
_, err = service.getAttPreState(ctx, cp2)
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s2, err = service.getAttPreState(ctx, cp2)
s2, err := service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")

View File

@ -70,7 +70,7 @@ func New() (*DepositCache, error) {
// InsertDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
@ -104,7 +104,7 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
@ -124,7 +124,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
@ -176,7 +176,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
// AllDepositContainers returns all historical deposit containers.
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@ -220,7 +220,7 @@ func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
// root that corresponds to the latest deposit at that blockheight.
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@ -236,7 +236,7 @@ func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, block
// DepositByPubkey looks through historical deposits and finds one which contains
// a certain public key within its deposit data.
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@ -257,7 +257,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
// FinalizedDeposits returns the finalized deposits trie.
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@ -271,7 +271,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedD
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@ -292,7 +292,7 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedI
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()

View File

@ -29,7 +29,7 @@ type PendingDepositsFetcher interface {
// InsertPendingDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
@ -66,7 +66,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
// PendingContainers returns a list of deposit containers until the given block number
// (inclusive).
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
@ -90,7 +90,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
// RemovePendingDeposit from the database. The deposit is indexed by the
// Index. This method does nothing if deposit ptr is nil.
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
defer span.End()
if d == nil {
@ -128,7 +128,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
defer span.End()
if merkleTreeIndex == 0 {

View File

@ -76,7 +76,7 @@ func (c *Cache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
// AllDepositContainers returns all historical deposit containers.
func (c *Cache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "Cache.AllDepositContainers")
_, span := trace.StartSpan(ctx, "Cache.AllDepositContainers")
defer span.End()
c.depositsLock.RLock()
defer c.depositsLock.RUnlock()
@ -101,7 +101,7 @@ func (c *Cache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContai
// DepositByPubkey looks through historical deposits and finds one which contains
// a certain public key within its deposit data.
func (c *Cache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
ctx, span := trace.StartSpan(ctx, "Cache.DepositByPubkey")
_, span := trace.StartSpan(ctx, "Cache.DepositByPubkey")
defer span.End()
c.depositsLock.RLock()
defer c.depositsLock.RUnlock()
@ -123,7 +123,7 @@ func (c *Cache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Depo
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
// root that corresponds to the latest deposit at that blockheight.
func (c *Cache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
ctx, span := trace.StartSpan(ctx, "Cache.DepositsNumberAndRootAtHeight")
_, span := trace.StartSpan(ctx, "Cache.DepositsNumberAndRootAtHeight")
defer span.End()
c.depositsLock.RLock()
defer c.depositsLock.RUnlock()
@ -141,7 +141,7 @@ func (c *Cache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *
// FinalizedDeposits returns the finalized deposits trie.
func (c *Cache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
ctx, span := trace.StartSpan(ctx, "Cache.FinalizedDeposits")
_, span := trace.StartSpan(ctx, "Cache.FinalizedDeposits")
defer span.End()
c.depositsLock.RLock()
defer c.depositsLock.RUnlock()
@ -159,7 +159,7 @@ func (c *Cache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits,
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "Cache.NonFinalizedDeposits")
_, span := trace.StartSpan(ctx, "Cache.NonFinalizedDeposits")
defer span.End()
c.depositsLock.RLock()
defer c.depositsLock.RUnlock()
@ -180,7 +180,7 @@ func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
ctx, span := trace.StartSpan(ctx, "Cache.PruneProofs")
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
defer span.End()
c.depositsLock.Lock()
defer c.depositsLock.Unlock()
@ -202,7 +202,7 @@ func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
ctx, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
defer span.End()
if merkleTreeIndex == 0 {
@ -227,7 +227,7 @@ func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
// InsertPendingDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
ctx, span := trace.StartSpan(ctx, "Cache.InsertPendingDeposit")
_, span := trace.StartSpan(ctx, "Cache.InsertPendingDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
@ -282,7 +282,7 @@ func (c *Cache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb
// PendingContainers returns a list of deposit containers until the given block number
// (inclusive).
func (c *Cache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
ctx, span := trace.StartSpan(ctx, "Cache.PendingContainers")
_, span := trace.StartSpan(ctx, "Cache.PendingContainers")
defer span.End()
c.depositsLock.RLock()
defer c.depositsLock.RUnlock()

View File

@ -28,6 +28,9 @@ var (
func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "Cache.InsertDeposit")
defer span.End()
if ctx.Err() != nil {
return ctx.Err()
}
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
@ -60,7 +63,7 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
func (c *Cache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
ctx, span := trace.StartSpan(ctx, "Cache.InsertDepositContainers")
_, span := trace.StartSpan(ctx, "Cache.InsertDepositContainers")
defer span.End()
c.depositsLock.Lock()
defer c.depositsLock.Unlock()
@ -89,6 +92,10 @@ func (c *Cache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex in
c.depositsLock.Lock()
defer c.depositsLock.Unlock()
if ctx.Err() != nil {
return ctx.Err()
}
depositTrie := c.finalizedDeposits.depositTree
insertIndex := int(c.finalizedDeposits.MerkleTrieIndex() + 1)

View File

@ -24,7 +24,7 @@ type AttDelta struct {
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
defer span.End()
vals := make([]*precompute.Validator, beaconState.NumValidators())
bal := &precompute.Balance{}
@ -86,7 +86,7 @@ func ProcessInactivityScores(
beaconState state.BeaconState,
vals []*precompute.Validator,
) (state.BeaconState, []*precompute.Validator, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
defer span.End()
cfg := params.BeaconConfig()
@ -155,7 +155,7 @@ func ProcessEpochParticipation(
bal *precompute.Balance,
vals []*precompute.Validator,
) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
defer span.End()
cp, err := beaconState.CurrentEpochParticipation()

View File

@ -18,7 +18,7 @@ import (
// pre computed instances of validators attesting records and total
// balances attested in an epoch.
func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, error) {
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.New")
_, span := trace.StartSpan(ctx, "precomputeEpoch.New")
defer span.End()
pValidators := make([]*Validator, s.NumValidators())

View File

@ -11,7 +11,7 @@ import (
// LastArchivedSlot from the db.
func (s *Store) LastArchivedSlot(ctx context.Context) (primitives.Slot, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
defer span.End()
var index primitives.Slot
err := s.db.View(func(tx *bolt.Tx) error {
@ -26,7 +26,7 @@ func (s *Store) LastArchivedSlot(ctx context.Context) (primitives.Slot, error) {
// LastArchivedRoot from the db.
func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
defer span.End()
var blockRoot []byte
@ -44,7 +44,7 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
// ArchivedPointRoot returns the block root of an archived point from the DB.
// This is essential for cold state management and to restore a cold state.
func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
defer span.End()
var blockRoot []byte
@ -61,7 +61,7 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32
// HasArchivedPoint returns true if an archived point exists in DB.
func (s *Store) HasArchivedPoint(ctx context.Context, slot primitives.Slot) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
_, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
defer span.End()
var exists bool
if err := s.db.View(func(tx *bolt.Tx) error {

View File

@ -53,7 +53,7 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadO
// at the time the chain was started, used to initialize the database and chain
// without syncing from genesis.
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
defer span.End()
var root [32]byte
@ -72,7 +72,7 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
defer span.End()
var root [32]byte
@ -168,7 +168,7 @@ func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
// HasBlock checks if a block by root exists in the db.
func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
_, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
defer span.End()
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
return true
@ -379,7 +379,7 @@ func (s *Store) GenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeac
}
func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlockRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
@ -396,7 +396,7 @@ func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
// SaveGenesisBlockRoot to the db.
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
@ -409,7 +409,7 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
// This value is used by a running beacon chain node to locate the state at the beginning
// of the chain history, in places where genesis would typically be used.
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
@ -420,7 +420,7 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
// the node was initialized via checkpoint sync.
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
@ -519,7 +519,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
// Error is returned if `ids` and `recipients` are not the same length.
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, feeRecipients []common.Address) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
defer span.End()
if len(ids) != len(feeRecipients) {
@ -644,7 +644,7 @@ func blockRootsBySlotRange(
bkt *bolt.Bucket,
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
defer span.End()
// Return nothing when all slot parameters are missing
@ -709,7 +709,7 @@ func blockRootsBySlotRange(
// blockRootsBySlot retrieves the block roots by slot
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([][32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
defer span.End()
bkt := tx.Bucket(blockSlotIndicesBucket)
@ -730,7 +730,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyBeaconBlock) map[string][]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
defer span.End()
indicesByBucket := make(map[string][]byte)
// Every index has a unique bucket for fast, binary-search
@ -758,7 +758,7 @@ func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyB
// objects. If a certain filter criterion does not apply to
// blocks, an appropriate error is returned.
func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter) (map[string][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
defer span.End()
indicesByBucket := make(map[string][]byte)
for k, v := range f.Filters() {

View File

@ -12,7 +12,7 @@ import (
// DepositContractAddress returns contract address is the address of
// the deposit contract on the proof of work chain.
func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
_, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
defer span.End()
var addr []byte
if err := s.db.View(func(tx *bolt.Tx) error {
@ -27,7 +27,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
// SaveDepositContractAddress to the db. It returns an error if an address has been previously saved.
func (s *Store) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
_, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {

View File

@ -16,6 +16,10 @@ func decode(ctx context.Context, data []byte, dst proto.Message) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.decode")
defer span.End()
if ctx.Err() != nil {
return ctx.Err()
}
data, err := snappy.Decode(nil, data)
if err != nil {
return err
@ -30,6 +34,10 @@ func encode(ctx context.Context, msg proto.Message) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.encode")
defer span.End()
if ctx.Err() != nil {
return nil, ctx.Err()
}
if msg == nil || reflect.ValueOf(msg).IsNil() {
return nil, errors.New("cannot encode nil message")
}

View File

@ -13,7 +13,7 @@ import (
// SaveExecutionChainData saves the execution chain data.
func (s *Store) SaveExecutionChainData(ctx context.Context, data *v2.ETH1ChainData) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveExecutionChainData")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveExecutionChainData")
defer span.End()
if data == nil {
@ -36,7 +36,7 @@ func (s *Store) SaveExecutionChainData(ctx context.Context, data *v2.ETH1ChainDa
// ExecutionChainData retrieves the execution chain data.
func (s *Store) ExecutionChainData(ctx context.Context) (*v2.ETH1ChainData, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.ExecutionChainData")
_, span := trace.StartSpan(ctx, "BeaconDB.ExecutionChainData")
defer span.End()
var data *v2.ETH1ChainData

View File

@ -168,7 +168,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
// considered canonical in the "head view" of the beacon node.
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
defer span.End()
var exists bool

View File

@ -412,7 +412,7 @@ func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx
// HasState checks if a state by root exists in the db.
func (s *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasState")
_, span := trace.StartSpan(ctx, "BeaconDB.HasState")
defer span.End()
hasState := false
err := s.db.View(func(tx *bolt.Tx) error {
@ -742,7 +742,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*et
// retrieves and assembles the state information from multiple buckets.
func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
_, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
defer span.End()
var dst []byte
err := s.db.View(func(tx *bolt.Tx) error {
@ -865,7 +865,7 @@ func (s *Store) HighestSlotStatesBelow(ctx context.Context, slot primitives.Slot
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).
func createStateIndicesFromStateSlot(ctx context.Context, slot primitives.Slot) map[string][]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.createStateIndicesFromState")
_, span := trace.StartSpan(ctx, "BeaconDB.createStateIndicesFromState")
defer span.End()
indicesByBucket := make(map[string][]byte)
// Every index has a unique bucket for fast, binary-search

View File

@ -64,7 +64,7 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
// HasStateSummary returns true if a state summary exists in DB.
func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
_, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
defer span.End()
if s.stateSummaryCache.has(blockRoot) {

View File

@ -18,7 +18,7 @@ import (
// we might find roots `0x23` and `0x45` stored under that index. We can then
// do a batch read for attestations corresponding to those roots.
func lookupValuesForIndices(ctx context.Context, indicesByBucket map[string][]byte, tx *bolt.Tx) [][][]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.lookupValuesForIndices")
_, span := trace.StartSpan(ctx, "BeaconDB.lookupValuesForIndices")
defer span.End()
values := make([][][]byte, 0, len(indicesByBucket))
for k, v := range indicesByBucket {
@ -37,7 +37,7 @@ func lookupValuesForIndices(ctx context.Context, indicesByBucket map[string][]by
// values stored at said index. Typically, indices are roots of data that can then
// be used for reads or batch reads from the DB.
func updateValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateValueForIndices")
_, span := trace.StartSpan(ctx, "BeaconDB.updateValueForIndices")
defer span.End()
for k, idx := range indicesByBucket {
bkt := tx.Bucket([]byte(k))
@ -63,7 +63,7 @@ func updateValueForIndices(ctx context.Context, indicesByBucket map[string][]byt
// deleteValueForIndices clears a root stored at each index.
func deleteValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteValueForIndices")
_, span := trace.StartSpan(ctx, "BeaconDB.deleteValueForIndices")
defer span.End()
for k, idx := range indicesByBucket {
bkt := tx.Bucket([]byte(k))

View File

@ -31,7 +31,7 @@ const (
func (s *Store) LastEpochWrittenForValidators(
ctx context.Context, validatorIndices []primitives.ValidatorIndex,
) ([]*slashertypes.AttestedEpochForValidator, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
_, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
defer span.End()
attestedEpochs := make([]*slashertypes.AttestedEpochForValidator, 0)
encodedIndices := make([][]byte, len(validatorIndices))
@ -183,7 +183,7 @@ func (s *Store) CheckAttesterDoubleVotes(
func (s *Store) AttestationRecordForValidator(
ctx context.Context, validatorIdx primitives.ValidatorIndex, targetEpoch primitives.Epoch,
) (*slashertypes.IndexedAttestationWrapper, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttestationRecordForValidator")
_, span := trace.StartSpan(ctx, "BeaconDB.AttestationRecordForValidator")
defer span.End()
var record *slashertypes.IndexedAttestationWrapper
encIdx := encodeValidatorIndex(validatorIdx)
@ -215,7 +215,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
ctx context.Context,
attestations []*slashertypes.IndexedAttestationWrapper,
) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
defer span.End()
encodedTargetEpoch := make([][]byte, len(attestations))
encodedRecords := make([][]byte, len(attestations))
@ -259,7 +259,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
func (s *Store) LoadSlasherChunks(
ctx context.Context, kind slashertypes.ChunkKind, diskKeys [][]byte,
) ([][]uint16, []bool, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
_, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
defer span.End()
chunks := make([][]uint16, 0)
var exists []bool
@ -290,7 +290,7 @@ func (s *Store) LoadSlasherChunks(
func (s *Store) SaveSlasherChunks(
ctx context.Context, kind slashertypes.ChunkKind, chunkKeys [][]byte, chunks [][]uint16,
) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
defer span.End()
encodedKeys := make([][]byte, len(chunkKeys))
encodedChunks := make([][]byte, len(chunkKeys))
@ -320,7 +320,7 @@ func (s *Store) SaveSlasherChunks(
func (s *Store) CheckDoubleBlockProposals(
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
) ([]*ethpb.ProposerSlashing, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
_, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
defer span.End()
proposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposals))
err := s.db.View(func(tx *bolt.Tx) error {
@ -359,7 +359,7 @@ func (s *Store) CheckDoubleBlockProposals(
func (s *Store) BlockProposalForValidator(
ctx context.Context, validatorIdx primitives.ValidatorIndex, slot primitives.Slot,
) (*slashertypes.SignedBlockHeaderWrapper, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlockProposalForValidator")
_, span := trace.StartSpan(ctx, "BeaconDB.BlockProposalForValidator")
defer span.End()
var record *slashertypes.SignedBlockHeaderWrapper
key, err := keyForValidatorProposal(slot, validatorIdx)
@ -387,7 +387,7 @@ func (s *Store) BlockProposalForValidator(
func (s *Store) SaveBlockProposals(
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
defer span.End()
encodedKeys := make([][]byte, len(proposals))
encodedProposals := make([][]byte, len(proposals))

View File

@ -71,7 +71,7 @@ func (c *AttCaches) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
// UnaggregatedAttestationsBySlotIndex returns the unaggregated attestations in cache,
// filtered by committee index and slot.
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []*ethpb.Attestation {
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndex")
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndex")
defer span.End()
atts := make([]*ethpb.Attestation, 0)

View File

@ -33,7 +33,7 @@ func NewPool() *Pool {
func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.AttesterSlashing {
p.lock.Lock()
defer p.lock.Unlock()
ctx, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
_, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
defer span.End()
// Update prom metric.
@ -80,7 +80,7 @@ func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnl
func (p *Pool) PendingProposerSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.ProposerSlashing {
p.lock.Lock()
defer p.lock.Unlock()
ctx, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
_, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
defer span.End()
// Update prom metric.
@ -187,7 +187,7 @@ func (p *Pool) InsertProposerSlashing(
) error {
p.lock.Lock()
defer p.lock.Unlock()
ctx, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
_, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
defer span.End()
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {

View File

@ -95,7 +95,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
}
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
_, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@ -148,7 +148,7 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
}
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.

View File

@ -43,7 +43,7 @@ func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.SignedBea
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
// The contributions are filtered based on matching of the input root and slot then profitability.
func (vs *Server) getSyncAggregate(ctx context.Context, slot primitives.Slot, root [32]byte) (*ethpb.SyncAggregate, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getSyncAggregate")
_, span := trace.StartSpan(ctx, "ProposerServer.getSyncAggregate")
defer span.End()
if vs.SyncCommitteePool == nil {

View File

@ -19,6 +19,9 @@ import (
func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "ComputeFieldRootsWithHasher")
defer span.End()
if ctx.Err() != nil {
return nil, ctx.Err()
}
if state == nil {
return nil, errors.New("nil state")

View File

@ -939,7 +939,7 @@ func (b *BeaconState) IsNil() bool {
}
func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "beaconState.rootSelector")
_, span := trace.StartSpan(ctx, "beaconState.rootSelector")
defer span.End()
span.AddAttributes(trace.StringAttribute("field", field.String()))

View File

@ -187,7 +187,7 @@ func TestMigrateToCold_ParallelCalls(t *testing.T) {
require.NoError(t, err)
wB7, err := consensusblocks.NewSignedBeaconBlock(b7)
require.NoError(t, err)
beaconState, err = executeStateTransitionStateGen(ctx, beaconState, wB7)
_, err = executeStateTransitionStateGen(ctx, beaconState, wB7)
assert.NoError(t, err)
r7, err := b7.Block.HashTreeRoot()
require.NoError(t, err)

View File

@ -50,7 +50,7 @@ func (s *Service) verifierRoutine() {
}
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
ctx, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
defer span.End()
resChan := make(chan error)

View File

@ -93,7 +93,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err
// filterPeers returns transformed list of peers, weight sorted by scores and capacity remaining.
// List can be further constrained using peersPercentage, where only percentage of peers are returned.
func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersPercentage float64) []peer.ID {
ctx, span := trace.StartSpan(ctx, "initialsync.filterPeers")
_, span := trace.StartSpan(ctx, "initialsync.filterPeers")
defer span.End()
if len(peers) == 0 {

View File

@ -201,7 +201,7 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
// check specifies the pending attestation could not fall one epoch behind
// of the current slot.
func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "validatePendingAtts")
_, span := trace.StartSpan(ctx, "validatePendingAtts")
defer span.End()
s.pendingAttsLock.Lock()

View File

@ -24,7 +24,7 @@ func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, wQuota
if wQuota == 0 {
return 0, nil
}
ctx, span := trace.StartSpan(ctx, "sync.streamBlobBatch")
_, span := trace.StartSpan(ctx, "sync.streamBlobBatch")
defer span.End()
for _, b := range batch.canonical() {
root := b.Root()

View File

@ -27,7 +27,6 @@ import (
)
func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
ctx := context.Background()
p := p2ptest.NewTestP2P(t)
db := dbtest.SetupDB(t)
chain := &mockChain.ChainService{

View File

@ -176,7 +176,7 @@ func (s *Service) rejectIncorrectSyncCommittee(
committeeIndices []primitives.CommitteeIndex, topic string,
) validationFn {
return func(ctx context.Context) (pubsub.ValidationResult, error) {
ctx, span := trace.StartSpan(ctx, "sync.rejectIncorrectSyncCommittee")
_, span := trace.StartSpan(ctx, "sync.rejectIncorrectSyncCommittee")
defer span.End()
isValid := false
digest, err := s.currentForkDigest()

View File

@ -118,7 +118,7 @@ func rejectIncorrectSubcommitteeIndex(
m *ethpb.SignedContributionAndProof,
) validationFn {
return func(ctx context.Context) (pubsub.ValidationResult, error) {
ctx, span := trace.StartSpan(ctx, "sync.rejectIncorrectSubcommitteeIndex")
_, span := trace.StartSpan(ctx, "sync.rejectIncorrectSubcommitteeIndex")
defer span.End()
// The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
if m.Message.Contribution.SubcommitteeIndex >= params.BeaconConfig().SyncCommitteeSubnetCount {

View File

@ -102,7 +102,7 @@ func AttestingIndices(bf bitfield.Bitfield, committee []primitives.ValidatorInde
// signing_root = compute_signing_root(indexed_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
func VerifyIndexedAttestationSig(ctx context.Context, indexedAtt *ethpb.IndexedAttestation, pubKeys []bls.PublicKey, domain []byte) error {
ctx, span := trace.StartSpan(ctx, "attestationutil.VerifyIndexedAttestationSig")
_, span := trace.StartSpan(ctx, "attestationutil.VerifyIndexedAttestationSig")
defer span.End()
indices := indexedAtt.AttestingIndices
@ -143,7 +143,7 @@ func VerifyIndexedAttestationSig(ctx context.Context, indexedAtt *ethpb.IndexedA
// signing_root = compute_signing_root(indexed_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
func IsValidAttestationIndices(ctx context.Context, indexedAttestation *ethpb.IndexedAttestation) error {
ctx, span := trace.StartSpan(ctx, "attestationutil.IsValidAttestationIndices")
_, span := trace.StartSpan(ctx, "attestationutil.IsValidAttestationIndices")
defer span.End()
if indexedAttestation == nil || indexedAttestation.Data == nil || indexedAttestation.Data.Target == nil || indexedAttestation.AttestingIndices == nil {

View File

@ -53,6 +53,8 @@ func (c beaconApiValidatorClient) getBeaconBlock(ctx context.Context, slot primi
ver = produceBlindedBlockResponseJson.Version
blinded = true
decoder = json.NewDecoder(bytes.NewReader(produceBlindedBlockResponseJson.Data))
} else if errJson == nil {
return nil, errors.Wrap(err, "failed to query GET REST endpoint: nil result")
} else {
log.Debug("Endpoint /eth/v1/validator/blinded_blocks failed to produce a blinded block, trying /eth/v2/validator/blocks.")
produceBlockResponseJson := abstractProduceBlockResponseJson{}
@ -60,6 +62,8 @@ func (c beaconApiValidatorClient) getBeaconBlock(ctx context.Context, slot primi
errJson, err = c.jsonRestHandler.Get(ctx, queryUrl, &produceBlockResponseJson)
if err != nil {
return nil, errors.Wrap(err, "failed to query GET REST endpoint")
} else if errJson != nil {
return nil, errors.Wrap(errJson, "failed to query GET REST endpoint")
}
ver = produceBlockResponseJson.Version
blinded = false

View File

@ -102,7 +102,7 @@ var (
// we have stored in the database for the given validator public key.
func (s *Store) AttestationHistoryForPubKey(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte) ([]*AttestationRecord, error) {
records := make([]*AttestationRecord, 0)
ctx, span := trace.StartSpan(ctx, "Validator.AttestationHistoryForPubKey")
_, span := trace.StartSpan(ctx, "Validator.AttestationHistoryForPubKey")
defer span.End()
err := s.view(func(tx *bolt.Tx) error {
bucket := tx.Bucket(pubKeysBucket)
@ -423,7 +423,7 @@ func (s *Store) flushAttestationRecords(ctx context.Context, records []*Attestat
// transaction to minimize write lock contention compared to doing them
// all in individual, isolated boltDB transactions.
func (s *Store) saveAttestationRecords(ctx context.Context, atts []*AttestationRecord) error {
ctx, span := trace.StartSpan(ctx, "Validator.saveAttestationRecords")
_, span := trace.StartSpan(ctx, "Validator.saveAttestationRecords")
defer span.End()
return s.update(func(tx *bolt.Tx) error {
// Initialize buckets for the lowest target and source epochs.
@ -519,7 +519,7 @@ func (s *Store) saveAttestationRecords(ctx context.Context, atts []*AttestationR
// AttestedPublicKeys retrieves all public keys that have attested.
func (s *Store) AttestedPublicKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
ctx, span := trace.StartSpan(ctx, "Validator.AttestedPublicKeys")
_, span := trace.StartSpan(ctx, "Validator.AttestedPublicKeys")
defer span.End()
var err error
attestedPublicKeys := make([][fieldparams.BLSPubkeyLength]byte, 0)
@ -538,7 +538,7 @@ func (s *Store) AttestedPublicKeys(ctx context.Context) ([][fieldparams.BLSPubke
// SigningRootAtTargetEpoch checks for an existing signing root at a specified
// target epoch for a given validator public key.
func (s *Store) SigningRootAtTargetEpoch(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, target primitives.Epoch) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "Validator.SigningRootAtTargetEpoch")
_, span := trace.StartSpan(ctx, "Validator.SigningRootAtTargetEpoch")
defer span.End()
var (
@ -585,7 +585,7 @@ func (s *Store) SigningRootAtTargetEpoch(ctx context.Context, pubKey [fieldparam
// LowestSignedSourceEpoch returns the lowest signed source epoch for a validator public key.
// If no data exists, returning 0 is a sensible default.
func (s *Store) LowestSignedSourceEpoch(ctx context.Context, publicKey [fieldparams.BLSPubkeyLength]byte) (primitives.Epoch, bool, error) {
ctx, span := trace.StartSpan(ctx, "Validator.LowestSignedSourceEpoch")
_, span := trace.StartSpan(ctx, "Validator.LowestSignedSourceEpoch")
defer span.End()
var err error
@ -608,7 +608,7 @@ func (s *Store) LowestSignedSourceEpoch(ctx context.Context, publicKey [fieldpar
// LowestSignedTargetEpoch returns the lowest signed target epoch for a validator public key.
// If no data exists, returning 0 is a sensible default.
func (s *Store) LowestSignedTargetEpoch(ctx context.Context, publicKey [fieldparams.BLSPubkeyLength]byte) (primitives.Epoch, bool, error) {
ctx, span := trace.StartSpan(ctx, "Validator.LowestSignedTargetEpoch")
_, span := trace.StartSpan(ctx, "Validator.LowestSignedTargetEpoch")
defer span.End()
var err error

View File

@ -17,7 +17,7 @@ const backupsDirectoryName = "backups"
// Backup the database to the datadir backup directory.
// Example for backup: $DATADIR/backups/prysm_validatordb_1029019.backup
func (s *Store) Backup(ctx context.Context, outputDir string, permissionOverride bool) error {
ctx, span := trace.StartSpan(ctx, "ValidatorDB.Backup")
_, span := trace.StartSpan(ctx, "ValidatorDB.Backup")
defer span.End()
var backupsDir string

View File

@ -11,7 +11,7 @@ import (
// EIPImportBlacklistedPublicKeys returns keys that were marked as blacklisted during EIP-3076 slashing
// protection imports, ensuring that we can prevent these keys from having duties at runtime.
func (s *Store) EIPImportBlacklistedPublicKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
ctx, span := trace.StartSpan(ctx, "Validator.EIPImportBlacklistedPublicKeys")
_, span := trace.StartSpan(ctx, "Validator.EIPImportBlacklistedPublicKeys")
defer span.End()
var err error
publicKeys := make([][fieldparams.BLSPubkeyLength]byte, 0)
@ -32,7 +32,7 @@ func (s *Store) EIPImportBlacklistedPublicKeys(ctx context.Context) ([][fieldpar
// SaveEIPImportBlacklistedPublicKeys stores a list of blacklisted public keys that
// were determined during EIP-3076 slashing protection imports.
func (s *Store) SaveEIPImportBlacklistedPublicKeys(ctx context.Context, publicKeys [][fieldparams.BLSPubkeyLength]byte) error {
ctx, span := trace.StartSpan(ctx, "Validator.SaveEIPImportBlacklistedPublicKeys")
_, span := trace.StartSpan(ctx, "Validator.SaveEIPImportBlacklistedPublicKeys")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(slashablePublicKeysBucket)

View File

@ -27,7 +27,7 @@ type Proposal struct {
// ProposedPublicKeys retrieves all public keys in our proposals history bucket.
func (s *Store) ProposedPublicKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
ctx, span := trace.StartSpan(ctx, "Validator.ProposedPublicKeys")
_, span := trace.StartSpan(ctx, "Validator.ProposedPublicKeys")
defer span.End()
var err error
proposedPublicKeys := make([][fieldparams.BLSPubkeyLength]byte, 0)
@ -47,7 +47,7 @@ func (s *Store) ProposedPublicKeys(ctx context.Context) ([][fieldparams.BLSPubke
// as a boolean that tells us if we have a proposal history stored at the slot and a boolean that tells us if we have
// a signed root at the slot.
func (s *Store) ProposalHistoryForSlot(ctx context.Context, publicKey [fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) ([32]byte, bool, bool, error) {
ctx, span := trace.StartSpan(ctx, "Validator.ProposalHistoryForSlot")
_, span := trace.StartSpan(ctx, "Validator.ProposalHistoryForSlot")
defer span.End()
var (
@ -84,7 +84,7 @@ func (s *Store) ProposalHistoryForSlot(ctx context.Context, publicKey [fieldpara
// ProposalHistoryForPubKey returns the entire proposal history for a given public key.
func (s *Store) ProposalHistoryForPubKey(ctx context.Context, publicKey [fieldparams.BLSPubkeyLength]byte) ([]*Proposal, error) {
ctx, span := trace.StartSpan(ctx, "Validator.ProposalHistoryForPubKey")
_, span := trace.StartSpan(ctx, "Validator.ProposalHistoryForPubKey")
defer span.End()
proposals := make([]*Proposal, 0)
@ -112,7 +112,7 @@ func (s *Store) ProposalHistoryForPubKey(ctx context.Context, publicKey [fieldpa
// We also check if the incoming proposal slot is lower than the lowest signed proposal slot
// for the validator and override its value on disk.
func (s *Store) SaveProposalHistoryForSlot(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, slot primitives.Slot, signingRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "Validator.SaveProposalHistoryForEpoch")
_, span := trace.StartSpan(ctx, "Validator.SaveProposalHistoryForEpoch")
defer span.End()
err := s.update(func(tx *bolt.Tx) error {
@ -159,7 +159,7 @@ func (s *Store) SaveProposalHistoryForSlot(ctx context.Context, pubKey [fieldpar
// LowestSignedProposal returns the lowest signed proposal slot for a validator public key.
// If no data exists, a boolean of value false is returned.
func (s *Store) LowestSignedProposal(ctx context.Context, publicKey [fieldparams.BLSPubkeyLength]byte) (primitives.Slot, bool, error) {
ctx, span := trace.StartSpan(ctx, "Validator.LowestSignedProposal")
_, span := trace.StartSpan(ctx, "Validator.LowestSignedProposal")
defer span.End()
var err error
@ -182,7 +182,7 @@ func (s *Store) LowestSignedProposal(ctx context.Context, publicKey [fieldparams
// HighestSignedProposal returns the highest signed proposal slot for a validator public key.
// If no data exists, a boolean of value false is returned.
func (s *Store) HighestSignedProposal(ctx context.Context, publicKey [fieldparams.BLSPubkeyLength]byte) (primitives.Slot, bool, error) {
ctx, span := trace.StartSpan(ctx, "Validator.HighestSignedProposal")
_, span := trace.StartSpan(ctx, "Validator.HighestSignedProposal")
defer span.End()
var err error

View File

@ -15,7 +15,7 @@ import (
// target epoch minus some constant of how many epochs we keep track of for slashing
// protection. This routine is meant to run on startup.
func (s *Store) PruneAttestations(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "Validator.PruneAttestations")
_, span := trace.StartSpan(ctx, "Validator.PruneAttestations")
defer span.End()
var pubkeys [][]byte
err := s.view(func(tx *bolt.Tx) error {

View File

@ -176,7 +176,7 @@ func (km *Keymanager) initializeKeysCachesFromKeystore() error {
// FetchValidatingPublicKeys fetches the list of active public keys from the local account keystores.
func (_ *Keymanager) FetchValidatingPublicKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
ctx, span := trace.StartSpan(ctx, "keymanager.FetchValidatingPublicKeys")
_, span := trace.StartSpan(ctx, "keymanager.FetchValidatingPublicKeys")
defer span.End()
lock.RLock()