mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-21 19:20:38 +00:00
parent
7a9608ea20
commit
0b261cba5e
@ -108,10 +108,10 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("block_slot", b.Block().Slot()).
|
||||
WithField("state_slot", s.Slot()).
|
||||
WithField("state_root", hexutil.Encode(sr[:])).
|
||||
WithField("block_root", hexutil.Encode(br[:])).
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
|
@ -310,8 +310,8 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
|
||||
for _, failure := range errorJson.Failures {
|
||||
w := request[failure.Index].Message
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": w.ValidatorIndex,
|
||||
"withdrawal_address": w.ToExecutionAddress,
|
||||
"validatorIndex": w.ValidatorIndex,
|
||||
"withdrawalAddress": w.ToExecutionAddress,
|
||||
}).Error(failure.Message)
|
||||
}
|
||||
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
|
||||
|
@ -57,8 +57,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if r.Body == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
return nil
|
||||
}
|
||||
@ -74,8 +74,8 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
}
|
||||
r.Body = io.NopCloser(b)
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": string(body),
|
||||
"url": r.URL.String(),
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
|
||||
return nil
|
||||
|
@ -419,7 +419,7 @@ func (s *Service) startFromExecutionChain() error {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
|
@ -74,10 +74,10 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
|
@ -33,10 +33,10 @@ func (c *Cache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum ui
|
||||
}
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"depositRoot": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ChainStartTime": chainStartTime,
|
||||
"chainStartTime": chainStartTime,
|
||||
}).Info("Minimum number of validators reached for beacon-chain to start")
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.ChainStarted,
|
||||
|
@ -639,7 +639,7 @@ func (s *Service) logTillChainStart(ctx context.Context) {
|
||||
}
|
||||
|
||||
fields := logrus.Fields{
|
||||
"Additional validators needed": valNeeded,
|
||||
"additionalValidatorsNeeded": valNeeded,
|
||||
}
|
||||
if secondsLeft > 0 {
|
||||
fields["Generating genesis state in"] = time.Duration(secondsLeft) * time.Second
|
||||
|
@ -44,11 +44,11 @@ func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.A
|
||||
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
|
||||
func logMessageTimelyFlagsForIndex(idx primitives.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": data.Slot,
|
||||
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
"validatorIndex": idx,
|
||||
"slot": data.Slot,
|
||||
"source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -146,12 +146,12 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
aggregatedPerf.totalCorrectTarget++
|
||||
}
|
||||
}
|
||||
logFields["CorrectHead"] = latestPerf.timelyHead
|
||||
logFields["CorrectSource"] = latestPerf.timelySource
|
||||
logFields["CorrectTarget"] = latestPerf.timelyTarget
|
||||
logFields["InclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["NewBalance"] = balance
|
||||
logFields["BalanceChange"] = balanceChg
|
||||
logFields["correctHead"] = latestPerf.timelyHead
|
||||
logFields["correctSource"] = latestPerf.timelySource
|
||||
logFields["correctTarget"] = latestPerf.timelyTarget
|
||||
logFields["inclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["newBalance"] = balance
|
||||
logFields["balanceChange"] = balanceChg
|
||||
|
||||
s.latestPerformance[primitives.ValidatorIndex(idx)] = latestPerf
|
||||
s.aggregatedPerformance[primitives.ValidatorIndex(idx)] = aggregatedPerf
|
||||
@ -167,7 +167,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping unaggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
@ -190,13 +190,13 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
|
||||
defer s.Unlock()
|
||||
if s.trackedIndex(att.AggregatorIndex) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AggregatorIndex": att.AggregatorIndex,
|
||||
"Slot": att.Aggregate.Data.Slot,
|
||||
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"aggregatorIndex": att.AggregatorIndex,
|
||||
"slot": att.Aggregate.Data.Slot,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.BeaconBlockRoot)),
|
||||
"SourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Source.Root)),
|
||||
"TargetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
|
||||
att.Aggregate.Data.Target.Root)),
|
||||
}).Info("Processed attestation aggregation")
|
||||
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
|
||||
@ -209,7 +209,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
|
||||
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping aggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
|
@ -55,8 +55,8 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processIncludedAttestation(context.Background(), state, att)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
@ -124,8 +124,8 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
@ -162,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
},
|
||||
}
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x000000000000 prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
|
||||
require.LogsContain(t, hook, "Skipping aggregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
@ -200,9 +200,9 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" aggregatorIndex=2 beaconBlockRoot=0x68656c6c6f2d prefix=monitor slot=1 sourceRoot=0x68656c6c6f2d targetRoot=0x68656c6c6f2d")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12")
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
@ -240,8 +240,8 @@ func TestProcessAttestations(t *testing.T) {
|
||||
wrappedBlock, err := blocks.NewBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processAttestations(ctx, state, wrappedBlock)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2"
|
||||
wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
|
||||
|
@ -39,7 +39,7 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
}
|
||||
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if st == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping block collection due to state not found in cache")
|
||||
return
|
||||
}
|
||||
@ -90,13 +90,13 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
|
||||
|
||||
parentRoot := blk.ParentRoot()
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": blk.ProposerIndex(),
|
||||
"Slot": blk.Slot(),
|
||||
"Version": blk.Version(),
|
||||
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
|
||||
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
"proposerIndex": blk.ProposerIndex(),
|
||||
"slot": blk.Slot(),
|
||||
"version": blk.Version(),
|
||||
"parentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(parentRoot[:])),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"newBalance": balance,
|
||||
"balanceChange": balanceChg,
|
||||
}).Info("Proposed beacon block was included")
|
||||
}
|
||||
}
|
||||
@ -109,11 +109,11 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
"SlashingSlot": slashing.Header_1.Header.Slot,
|
||||
"BodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"BodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
"proposerIndex": idx,
|
||||
"slot": blk.Slot(),
|
||||
"slashingSlot": slashing.Header_1.Header.Slot,
|
||||
"bodyRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"bodyRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
}).Info("Proposer slashing was included")
|
||||
}
|
||||
}
|
||||
@ -122,16 +122,16 @@ func (s *Service) processSlashings(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
|
||||
if s.trackedIndex(primitives.ValidatorIndex(idx)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AttesterIndex": idx,
|
||||
"BlockInclusionSlot": blk.Slot(),
|
||||
"AttestationSlot1": slashing.Attestation_1.Data.Slot,
|
||||
"BeaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"AttestationSlot2": slashing.Attestation_2.Data.Slot,
|
||||
"BeaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
"attesterIndex": idx,
|
||||
"blockInclusionSlot": blk.Slot(),
|
||||
"attestationSlot1": slashing.Attestation_1.Data.Slot,
|
||||
"beaconBlockRoot1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"targetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"attestationSlot2": slashing.Attestation_2.Data.Slot,
|
||||
"beaconBlockRoot2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"sourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"targetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
}
|
||||
}
|
||||
@ -159,19 +159,19 @@ func (s *Service) logAggregatedPerformance() {
|
||||
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"StartEpoch": p.startEpoch,
|
||||
"StartBalance": p.startBalance,
|
||||
"TotalRequested": p.totalRequestedCount,
|
||||
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
|
||||
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
|
||||
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
|
||||
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
|
||||
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
|
||||
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"TotalProposedBlocks": p.totalProposedCount,
|
||||
"TotalAggregations": p.totalAggregations,
|
||||
"TotalSyncContributions": p.totalSyncCommitteeContributions,
|
||||
"validatorIndex": idx,
|
||||
"startEpoch": p.startEpoch,
|
||||
"startBalance": p.startBalance,
|
||||
"totalRequested": p.totalRequestedCount,
|
||||
"attestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
|
||||
"balanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
|
||||
"correctlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
|
||||
"correctlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
|
||||
"correctlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
|
||||
"averageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"totalProposedBlocks": p.totalProposedCount,
|
||||
"totalAggregations": p.totalAggregations,
|
||||
"totalSyncContributions": p.totalSyncCommitteeContributions,
|
||||
}).Info("Aggregated performance since launch")
|
||||
}
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func TestProcessSlashings(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Proposer slashing was included\" BodyRoot1= BodyRoot2= ProposerIndex=2",
|
||||
wantedErr: "\"Proposer slashing was included\" bodyRoot1= bodyRoot2= prefix=monitor proposerIndex=2",
|
||||
},
|
||||
{
|
||||
name: "Proposer slashing an untracked index",
|
||||
@ -89,8 +89,8 @@ func TestProcessSlashings(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Attester slashing was included\" AttestationSlot1=0 AttestationSlot2=0 AttesterIndex=1 " +
|
||||
"BeaconBlockRoot1=0x000000000000 BeaconBlockRoot2=0x000000000000 BlockInclusionSlot=0 SourceEpoch1=1 SourceEpoch2=0 TargetEpoch1=0 TargetEpoch2=0",
|
||||
wantedErr: "\"Attester slashing was included\" attestationSlot1=0 attestationSlot2=0 attesterIndex=1 " +
|
||||
"beaconBlockRoot1=0x000000000000 beaconBlockRoot2=0x000000000000 blockInclusionSlot=0 prefix=monitor sourceEpoch1=1 sourceEpoch2=0 targetEpoch1=0 targetEpoch2=0",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing untracked index",
|
||||
@ -150,7 +150,7 @@ func TestProcessProposedBlock(t *testing.T) {
|
||||
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
wantedErr: "\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
|
||||
wantedErr: "\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=0x68656c6c6f2d newBalance=32000000000 parentRoot=0x68656c6c6f2d prefix=monitor proposerIndex=12 slot=6 version=0",
|
||||
},
|
||||
{
|
||||
name: "Block proposed by untracked validator",
|
||||
@ -225,10 +225,10 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
root, err := b.GetBlock().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" BodyRoot1=0x000100000000 BodyRoot2=0x000200000000 ProposerIndex=%d SlashingSlot=0 Slot=1 prefix=monitor", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=3 ExpectedContribCount=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
|
||||
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" bodyRoot1=0x000100000000 bodyRoot2=0x000200000000 prefix=monitor proposerIndex=%d slashingSlot=0 slot=1", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" balanceChange=0 contribCount=3 expectedContribCount=3 newBalance=32000000000 prefix=monitor validatorIndex=1"
|
||||
wanted4 := "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=1 newBalance=32000000000 prefix=monitor validatorIndex=2"
|
||||
wrapped, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
s.processBlock(ctx, wrapped)
|
||||
@ -278,10 +278,10 @@ func TestLogAggregatedPerformance(t *testing.T) {
|
||||
}
|
||||
|
||||
s.logAggregatedPerformance()
|
||||
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
|
||||
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
|
||||
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
|
||||
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
|
||||
"ValidatorIndex=1 prefix=monitor"
|
||||
wanted := "\"Aggregated performance since launch\" attestationInclusion=\"80.00%\"" +
|
||||
" averageInclusionDistance=1.2 balanceChangePct=\"0.95%\" correctlyVotedHeadPct=\"66.67%\" " +
|
||||
"correctlyVotedSourcePct=\"91.67%\" correctlyVotedTargetPct=\"100.00%\" prefix=monitor startBalance=31700000000 " +
|
||||
"startEpoch=0 totalAggregations=0 totalProposedBlocks=1 totalRequested=15 totalSyncContributions=0 " +
|
||||
"validatorIndex=1"
|
||||
require.LogsContain(t, hook, wanted)
|
||||
}
|
||||
|
@ -14,8 +14,8 @@ func (s *Service) processExitsFromBlock(blk interfaces.ReadOnlyBeaconBlock) {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
"validatorIndex": idx,
|
||||
"slot": blk.Slot(),
|
||||
}).Info("Voluntary exit was included")
|
||||
}
|
||||
}
|
||||
@ -28,7 +28,7 @@ func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
|
||||
defer s.RUnlock()
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"validatorIndex": idx,
|
||||
}).Info("Voluntary exit was processed")
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
|
||||
wb, err := blocks.NewBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processExitsFromBlock(wb)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" prefix=monitor slot=0 validatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
|
||||
@ -99,7 +99,7 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) {
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" prefix=monitor validatorIndex=1")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
|
||||
|
@ -21,7 +21,7 @@ func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedCon
|
||||
aggPerf.totalSyncCommitteeAggregations++
|
||||
s.aggregatedPerformance[idx] = aggPerf
|
||||
|
||||
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
log.WithField("validatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,11 +69,11 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
|
||||
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": validatorIdx,
|
||||
"ExpectedContribCount": len(committeeIndices),
|
||||
"ContribCount": contrib,
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
"validatorIndex": validatorIdx,
|
||||
"expectedContribCount": len(committeeIndices),
|
||||
"contribCount": contrib,
|
||||
"newBalance": balance,
|
||||
"balanceChange": balanceChg,
|
||||
}).Info("Sync committee contribution included")
|
||||
}
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ func TestProcessSyncCommitteeContribution(t *testing.T) {
|
||||
}
|
||||
|
||||
s.processSyncCommitteeContribution(contrib)
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" prefix=monitor validatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "validatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessSyncAggregate(t *testing.T) {
|
||||
@ -53,7 +53,7 @@ func TestProcessSyncAggregate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s.processSyncAggregate(beaconState, wrappedBlock)
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 ContribCount=1 ExpectedContribCount=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 ContribCount=2 ExpectedContribCount=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=0 contribCount=1 expectedContribCount=4 newBalance=32000000000 prefix=monitor validatorIndex=1")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" balanceChange=100000000 contribCount=2 expectedContribCount=2 newBalance=32000000000 prefix=monitor validatorIndex=12")
|
||||
require.LogsDoNotContain(t, hook, "validatorIndex=2")
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ func (s *Service) Start() {
|
||||
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndices": tracked,
|
||||
"validatorIndices": tracked,
|
||||
}).Info("Starting service")
|
||||
|
||||
go s.run()
|
||||
@ -134,7 +134,7 @@ func (s *Service) run() {
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
|
||||
log.WithField("epoch", epoch).Info("Synced to head epoch, starting reporting performance")
|
||||
|
||||
s.Lock()
|
||||
s.initializePerformanceStructures(st, epoch)
|
||||
@ -157,7 +157,7 @@ func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch
|
||||
for idx := range s.TrackedValidators {
|
||||
balance, err := state.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
log.WithError(err).WithField("validatorIndex", idx).Error(
|
||||
"Could not fetch starting balance, skipping aggregated logs.")
|
||||
balance = 0
|
||||
}
|
||||
@ -276,7 +276,7 @@ func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
|
||||
for idx := range s.TrackedValidators {
|
||||
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
log.WithError(err).WithField("validatorIndex", idx).Error(
|
||||
"Sync committee assignments will not be reported")
|
||||
delete(s.trackedSyncCommitteeIndices, idx)
|
||||
} else if len(syncIdx) == 0 {
|
||||
|
@ -148,7 +148,7 @@ func TestStart(t *testing.T) {
|
||||
// wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
|
||||
require.LogsContain(t, hook, "\"Starting service\" prefix=monitor validatorIndices=\"[1 2 12 15]\"")
|
||||
s.Lock()
|
||||
require.Equal(t, s.isLogging, true, "monitor is not running")
|
||||
s.Unlock()
|
||||
@ -237,7 +237,7 @@ func TestMonitorRoutine(t *testing.T) {
|
||||
|
||||
// Wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" balanceChange=100000000 blockRoot=%#x newBalance=32000000000 parentRoot=0xf732eaeb7fae prefix=monitor proposerIndex=15 slot=1 version=1", bytesutil.Trunc(root[:]))
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
|
||||
}
|
||||
|
@ -151,19 +151,19 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name)
|
||||
log.WithField("terminal block difficult", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
|
||||
log.WithField("terminalBlockDifficulty", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
|
||||
log.WithField("terminal block hash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
|
||||
log.WithField("terminalBlockHash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHashActivationEpoch = primitives.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
|
||||
log.WithField("terminal block hash activation epoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
|
||||
log.WithField("terminalBlockHashActivationEpoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
|
||||
|
@ -426,7 +426,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
@ -529,7 +529,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
|
@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() {
|
||||
switch slotInterval.Interval {
|
||||
case 0:
|
||||
duration := time.Since(t)
|
||||
log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations")
|
||||
log.WithField("duration", duration).Debug("Aggregated unaggregated attestations")
|
||||
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
|
||||
case 1:
|
||||
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
|
||||
|
@ -238,7 +238,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block root": hexutil.Encode(root),
|
||||
"blockRoot": hexutil.Encode(root),
|
||||
}).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root))
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
@ -254,8 +254,8 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), index)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block root": hexutil.Encode(root),
|
||||
"blob index": index,
|
||||
"blockRoot": hexutil.Encode(root),
|
||||
"blobIndex": index,
|
||||
}).Error(errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index))
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
|
||||
}
|
||||
|
@ -146,8 +146,8 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
|
||||
|
||||
if shouldRebuildTrie(canonicalEth1Data.DepositCount, uint64(len(upToEth1DataDeposits))) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"unfinalized deposits": len(upToEth1DataDeposits),
|
||||
"total deposit count": canonicalEth1Data.DepositCount,
|
||||
"unfinalizedDeposits": len(upToEth1DataDeposits),
|
||||
"totalDepositCount": canonicalEth1Data.DepositCount,
|
||||
}).Warn("Too many unfinalized deposits, building a deposit trie from scratch.")
|
||||
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal
|
||||
if err != nil {
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
log.WithField("starttime", clock.GenesisTime()).Debug("Received chain started event")
|
||||
log.WithField("startTime", clock.GenesisTime()).Debug("Received chain started event")
|
||||
log.Debug("Sending genesis time notification to connected validator clients")
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
res := ðpb.ChainStartResponse{
|
||||
|
@ -196,7 +196,7 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
|
||||
|
||||
if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) ||
|
||||
(prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) {
|
||||
log.WithField("ValidatorIndex", valIndex).Infof("Participation flag found")
|
||||
log.WithField("validatorIndex", valIndex).Infof("Participation flag found")
|
||||
resp.Responses = append(resp.Responses,
|
||||
ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: v.PublicKey,
|
||||
|
@ -436,8 +436,8 @@ func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlo
|
||||
if err != nil {
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
log.
|
||||
WithField("block_slot", block.Slot()).
|
||||
WithField("retention_start", blobWindowStart).
|
||||
WithField("blockSlot", block.Slot()).
|
||||
WithField("retentionStart", blobWindowStart).
|
||||
Warn("block with slot within blob retention period has version which does not support commitments")
|
||||
continue
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Skip blocks that are already being processed.
|
||||
if s.cfg.chain.BlockBeingSynced(blkRoot) {
|
||||
log.WithField("BlockRoot", fmt.Sprintf("%#x", blkRoot)).Info("Skipping pending block already being processed")
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blkRoot)).Info("Skipping pending block already being processed")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -210,5 +210,5 @@ func (l *limiter) retrieveCollector(topic string) (*leakybucket.Collector, error
|
||||
}
|
||||
|
||||
func (_ *limiter) topicLogger(topic string) *logrus.Entry {
|
||||
return log.WithField("rate limiter", topic)
|
||||
return log.WithField("rateLimiter", topic)
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.WithField("error", r).
|
||||
WithField("recovered_at", "registerRPC").
|
||||
WithField("recoveredAt", "registerRPC").
|
||||
WithField("stack", string(debug.Stack())).
|
||||
Error("Panic occurred")
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code p2ptypes.RPCGoodb
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
log := log.WithField("Reason", goodbyeMessage(code))
|
||||
log := log.WithField("reason", goodbyeMessage(code))
|
||||
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Sending Goodbye message to peer")
|
||||
|
||||
// Wait up to the response timeout for the peer to receive the goodbye
|
||||
|
@ -295,11 +295,11 @@ func (s *Service) waitForChainStart() {
|
||||
}
|
||||
s.cfg.clock = clock
|
||||
startTime := clock.GenesisTime()
|
||||
log.WithField("starttime", startTime).Debug("Received state initialized event")
|
||||
log.WithField("startTime", startTime).Debug("Received state initialized event")
|
||||
|
||||
ctxMap, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("genesis_validator_root", clock.GenesisValidatorsRoot()).
|
||||
log.WithError(err).WithField("genesisValidatorRoot", clock.GenesisValidatorsRoot()).
|
||||
Error("sync service failed to initialize context version map")
|
||||
return
|
||||
}
|
||||
@ -311,7 +311,7 @@ func (s *Service) waitForChainStart() {
|
||||
if startTime.After(prysmTime.Now()) {
|
||||
time.Sleep(prysmTime.Until(startTime))
|
||||
}
|
||||
log.WithField("starttime", startTime).Debug("Chain started in sync service")
|
||||
log.WithField("startTime", startTime).Debug("Chain started in sync service")
|
||||
s.markForChainStart()
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
if r := recover(); r != nil {
|
||||
tracing.AnnotateError(span, fmt.Errorf("panic occurred: %v", r))
|
||||
log.WithField("error", r).
|
||||
WithField("recovered_at", "subscribeWithBase").
|
||||
WithField("recoveredAt", "subscribeWithBase").
|
||||
WithField("stack", string(debug.Stack())).
|
||||
Error("Panic occurred")
|
||||
}
|
||||
@ -290,9 +290,9 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
fields := logrus.Fields{
|
||||
"topic": topic,
|
||||
"multiaddress": multiAddr(pid, s.cfg.p2p.Peers()),
|
||||
"peer id": pid.String(),
|
||||
"peerID": pid.String(),
|
||||
"agent": agentString(pid, s.cfg.p2p.Host()),
|
||||
"gossip score": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
|
||||
"gossipScore": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
|
||||
}
|
||||
if features.Get().EnableFullSSZDataLogging {
|
||||
fields["message"] = hexutil.Encode(msg.Data)
|
||||
@ -305,9 +305,9 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"multiaddress": multiAddr(pid, s.cfg.p2p.Peers()),
|
||||
"peer id": pid.String(),
|
||||
"peerID": pid.String(),
|
||||
"agent": agentString(pid, s.cfg.p2p.Host()),
|
||||
"gossip score": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
|
||||
"gossipScore": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
|
||||
}).Debugf("Gossip message was ignored")
|
||||
}
|
||||
messageIgnoredValidationCounter.WithLabelValues(topic).Inc()
|
||||
|
@ -310,7 +310,7 @@ func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err erro
|
||||
}
|
||||
if idx != bv.blob.ProposerIndex() {
|
||||
log.WithError(ErrSidecarUnexpectedProposer).
|
||||
WithFields(logging.BlobFields(bv.blob)).WithField("expected_proposer", idx).
|
||||
WithFields(logging.BlobFields(bv.blob)).WithField("expectedProposer", idx).
|
||||
Debug("unexpected blob proposer")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
|
@ -53,11 +53,11 @@ type SignatureData struct {
|
||||
|
||||
func (d SignatureData) logFields() log.Fields {
|
||||
return log.Fields{
|
||||
"root": fmt.Sprintf("%#x", d.Root),
|
||||
"parent_root": fmt.Sprintf("%#x", d.Parent),
|
||||
"signature": fmt.Sprintf("%#x", d.Signature),
|
||||
"proposer": d.Proposer,
|
||||
"slot": d.Slot,
|
||||
"root": fmt.Sprintf("%#x", d.Root),
|
||||
"parentRoot": fmt.Sprintf("%#x", d.Parent),
|
||||
"signature": fmt.Sprintf("%#x", d.Signature),
|
||||
"proposer": d.Proposer,
|
||||
"slot": d.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -144,8 +144,8 @@ func checkIfWithdrawsAreInPool(ctx context.Context, client *beacon.Client, reque
|
||||
if len(requestMap) != 0 {
|
||||
for key, address := range requestMap {
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": key,
|
||||
"execution_address:": address,
|
||||
"validatorIndex": key,
|
||||
"executionAddress:": address,
|
||||
}).Warn("Set withdrawal address message not found in the node's operations pool.")
|
||||
}
|
||||
log.Warn("Please check before resubmitting. Set withdrawal address messages that were not found in the pool may have been already included into a block.")
|
||||
|
@ -11,12 +11,12 @@ import (
|
||||
// which can be passed to log.WithFields.
|
||||
func BlobFields(blob blocks.ROBlob) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": blob.Slot(),
|
||||
"proposer_index": blob.ProposerIndex(),
|
||||
"block_root": fmt.Sprintf("%#x", blob.BlockRoot()),
|
||||
"parent_root": fmt.Sprintf("%#x", blob.ParentRoot()),
|
||||
"kzg_commitment": fmt.Sprintf("%#x", blob.KzgCommitment),
|
||||
"index": blob.Index,
|
||||
"slot": blob.Slot(),
|
||||
"proposerIndex": blob.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", blob.BlockRoot()),
|
||||
"parentRoot": fmt.Sprintf("%#x", blob.ParentRoot()),
|
||||
"kzgCommitment": fmt.Sprintf("%#x", blob.KzgCommitment),
|
||||
"index": blob.Index,
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,9 +24,9 @@ func BlobFields(blob blocks.ROBlob) logrus.Fields {
|
||||
// all other sidecars for the block.
|
||||
func BlockFieldsFromBlob(blob blocks.ROBlob) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": blob.Slot(),
|
||||
"proposer_index": blob.ProposerIndex(),
|
||||
"block_root": fmt.Sprintf("%#x", blob.BlockRoot()),
|
||||
"parent_root": fmt.Sprintf("%#x", blob.ParentRoot()),
|
||||
"slot": blob.Slot(),
|
||||
"proposerIndex": blob.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", blob.BlockRoot()),
|
||||
"parentRoot": fmt.Sprintf("%#x", blob.ParentRoot()),
|
||||
}
|
||||
}
|
||||
|
@ -185,9 +185,9 @@ func (node *BeaconNode) saveGenesis(ctx context.Context) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
log.WithField("fork_version", g.Fork().CurrentVersion).
|
||||
WithField("latest_block_header.root", fmt.Sprintf("%#x", lbhr)).
|
||||
WithField("state_root", fmt.Sprintf("%#x", root)).
|
||||
log.WithField("forkVersion", g.Fork().CurrentVersion).
|
||||
WithField("latestBlockHeaderRoot", fmt.Sprintf("%#x", lbhr)).
|
||||
WithField("stateRoot", fmt.Sprintf("%#x", root)).
|
||||
Infof("BeaconState info")
|
||||
|
||||
genesisBytes, err := g.MarshalSSZ()
|
||||
|
@ -290,9 +290,9 @@ func (node *LighthouseBeaconNode) saveGenesis(ctx context.Context, testNetDir st
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithField("fork_version", g.Fork().CurrentVersion).
|
||||
WithField("latest_block_header.root", fmt.Sprintf("%#x", lbhr)).
|
||||
WithField("state_root", fmt.Sprintf("%#x", root)).
|
||||
log.WithField("forkVersion", g.Fork().CurrentVersion).
|
||||
WithField("latestBlockHeaderRoot", fmt.Sprintf("%#x", lbhr)).
|
||||
WithField("stateRoot", fmt.Sprintf("%#x", root)).
|
||||
Infof("BeaconState info")
|
||||
|
||||
genesisBytes, err := g.MarshalSSZ()
|
||||
|
@ -103,7 +103,7 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
|
||||
continue
|
||||
}
|
||||
if len(payload.FeeRecipient) == 0 || hexutil.Encode(payload.FeeRecipient) == params.BeaconConfig().EthBurnAddressHex {
|
||||
log.WithField("proposer_index", bb.ProposerIndex).WithField("slot", bb.Slot).Error("fee recipient eval bug")
|
||||
log.WithField("proposerIndex", bb.ProposerIndex).WithField("slot", bb.Slot).Error("fee recipient eval bug")
|
||||
return errors.New("fee recipient is not set")
|
||||
}
|
||||
|
||||
@ -132,8 +132,8 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
|
||||
if !knownKey {
|
||||
log.WithField("pubkey", pk).
|
||||
WithField("slot", bb.Slot).
|
||||
WithField("proposer_index", bb.ProposerIndex).
|
||||
WithField("fee_recipient", fr.Hex()).
|
||||
WithField("proposerIndex", bb.ProposerIndex).
|
||||
WithField("feeRecipient", fr.Hex()).
|
||||
Warn("unknown key observed, not a deterministically generated key")
|
||||
return errors.New("unknown key observed, not a deterministically generated key")
|
||||
}
|
||||
|
@ -123,21 +123,21 @@ func compareHeads(clients map[string]pb.BeaconChainClient) {
|
||||
func logHead(endpt string, head *pb.ChainHead) {
|
||||
log.WithFields(
|
||||
logrus.Fields{
|
||||
"HeadSlot": head.HeadSlot,
|
||||
"HeadRoot": hex.EncodeToString(head.HeadBlockRoot),
|
||||
"JustifiedEpoch": head.JustifiedEpoch,
|
||||
"JustifiedRoot": hex.EncodeToString(head.JustifiedBlockRoot),
|
||||
"FinalizedEpoch": head.FinalizedEpoch,
|
||||
"FinalizedRoot": hex.EncodeToString(head.FinalizedBlockRoot),
|
||||
"headSlot": head.HeadSlot,
|
||||
"headRoot": hex.EncodeToString(head.HeadBlockRoot),
|
||||
"justifiedEpoch": head.JustifiedEpoch,
|
||||
"justifiedRoot": hex.EncodeToString(head.JustifiedBlockRoot),
|
||||
"finalizedEpoch": head.FinalizedEpoch,
|
||||
"finalizedRoot": hex.EncodeToString(head.FinalizedBlockRoot),
|
||||
}).Info("Head from beacon node ", endpt)
|
||||
}
|
||||
|
||||
func logParticipation(endpt string, p *pb.ValidatorParticipation) {
|
||||
log.WithFields(
|
||||
logrus.Fields{
|
||||
"VotedEther": p.VotedEther,
|
||||
"TotalEther": p.EligibleEther,
|
||||
"ParticipationRate": p.GlobalParticipationRate,
|
||||
"votedEther": p.VotedEther,
|
||||
"totalEther": p.EligibleEther,
|
||||
"participationRate": p.GlobalParticipationRate,
|
||||
}).Info("Participation rate from beacon node ", endpt)
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func zipKeystoresToOutputDir(keystoresToBackup []*keymanager.Keystore, outputDir
|
||||
}
|
||||
}
|
||||
log.WithField(
|
||||
"backup-path", archivePath,
|
||||
"backupPath", archivePath,
|
||||
).Infof("Successfully backed up %d accounts", len(keystoresToBackup))
|
||||
return nil
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ func OpenOrCreateNewWallet(cliCtx *cli.Context) (*Wallet, error) {
|
||||
if err := w.SaveWallet(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save wallet to disk")
|
||||
}
|
||||
log.WithField("wallet-path", walletDir).Info(
|
||||
log.WithField("walletPath", walletDir).Info(
|
||||
"Successfully created new wallet",
|
||||
)
|
||||
return w, nil
|
||||
|
@ -36,7 +36,7 @@ func (acm *CLIManager) WalletCreate(ctx context.Context) (*wallet.Wallet, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("--wallet-dir", acm.walletDir).Info(
|
||||
log.WithField("walletDir", acm.walletDir).Info(
|
||||
"Successfully created wallet with ability to import keystores",
|
||||
)
|
||||
case keymanager.Derived:
|
||||
@ -50,7 +50,7 @@ func (acm *CLIManager) WalletCreate(ctx context.Context) (*wallet.Wallet, error)
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize wallet")
|
||||
}
|
||||
log.WithField("--wallet-dir", acm.walletDir).Info(
|
||||
log.WithField("walletDir", acm.walletDir).Info(
|
||||
"Successfully created HD wallet from mnemonic and regenerated accounts",
|
||||
)
|
||||
case keymanager.Web3Signer:
|
||||
|
@ -48,7 +48,7 @@ func (acm *CLIManager) WalletRecover(ctx context.Context) (*wallet.Wallet, error
|
||||
if err := km.RecoverAccountsFromMnemonic(ctx, acm.mnemonic, acm.mnemonicLanguage, acm.mnemonic25thWord, acm.numAccounts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("wallet-path", w.AccountsDir()).Infof(
|
||||
log.WithField("walletPath", w.AccountsDir()).Infof(
|
||||
"Successfully recovered HD wallet with %d accounts. Please use `accounts list` to view details for your accounts",
|
||||
acm.numAccounts,
|
||||
)
|
||||
|
@ -271,8 +271,8 @@ func (c *ValidatorClient) initializeFromCLI(cliCtx *cli.Context, router *mux.Rou
|
||||
c.wallet = w
|
||||
// TODO(#9883) - Remove this when we have a better way to handle this.
|
||||
log.WithFields(logrus.Fields{
|
||||
"wallet": w.AccountsDir(),
|
||||
"keymanager-kind": w.KeymanagerKind().String(),
|
||||
"wallet": w.AccountsDir(),
|
||||
"keymanagerKind": w.KeymanagerKind().String(),
|
||||
}).Info("Opened validator wallet")
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user