Combines func params of the same type (#7500)

* combines func params

* update leftovers

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
This commit is contained in:
Victor Farazdagi 2020-10-12 18:43:19 +03:00 committed by GitHub
parent db48e12270
commit a019a0db4c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
79 changed files with 151 additions and 189 deletions

View File

@ -115,7 +115,7 @@ var (
)
// reportSlotMetrics reports slot related metrics.
func reportSlotMetrics(stateSlot uint64, headSlot uint64, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
func reportSlotMetrics(stateSlot, headSlot, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
clockTimeSlot.Set(float64(clockSlot))
beaconSlot.Set(float64(stateSlot))
beaconHeadSlot.Set(float64(headSlot))

View File

@ -162,7 +162,7 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
}
// verifyLMDFFGConsistent verifies LMD GHOST and FFG votes are consistent with each other.
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot []byte, lmdRoot []byte) error {
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot, lmdRoot []byte) error {
ffgSlot, err := helpers.StartSlot(ffgEpoch)
if err != nil {
return err

View File

@ -284,7 +284,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
b := signed.Block
s.saveInitSyncBlock(blockRoot, signed)
@ -358,7 +358,7 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock,
root [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}

View File

@ -355,7 +355,7 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]*ethpb.BeaconBlock, 0)
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)

View File

@ -474,7 +474,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
// information to fork choice service to initializes fork choice store.
func (s *Service) resumeForkChoice(justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) {
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
s.forkChoiceStore = store
}

View File

@ -41,7 +41,7 @@ func newSubnetIDs() *subnetIDs {
}
// AddAttesterSubnetID adds the subnet index for subscribing subnet for the attester of a given slot.
func (c *subnetIDs) AddAttesterSubnetID(slot uint64, subnetID uint64) {
func (c *subnetIDs) AddAttesterSubnetID(slot, subnetID uint64) {
c.attesterLock.Lock()
defer c.attesterLock.Unlock()
@ -69,7 +69,7 @@ func (c *subnetIDs) GetAttesterSubnetIDs(slot uint64) []uint64 {
}
// AddAggregatorSubnetID adds the subnet ID for subscribing subnet for the aggregator of a given slot.
func (c *subnetIDs) AddAggregatorSubnetID(slot uint64, subnetID uint64) {
func (c *subnetIDs) AddAggregatorSubnetID(slot, subnetID uint64) {
c.aggregatorLock.Lock()
defer c.aggregatorLock.Unlock()

View File

@ -115,7 +115,7 @@ func VerifyAttesterSlashing(ctx context.Context, beaconState *stateTrie.BeaconSt
// # Surround vote
// (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
// )
func IsSlashableAttestationData(data1 *ethpb.AttestationData, data2 *ethpb.AttestationData) bool {
func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
if data1 == nil || data2 == nil || data1.Target == nil || data2.Target == nil || data1.Source == nil || data2.Source == nil {
return false
}

View File

@ -15,7 +15,7 @@ import (
)
// retrieves the signature set from the raw data, public key,signature and domain provided.
func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
func retrieveSignatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
@ -36,7 +36,7 @@ func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domai
}
// verifies the signature from the raw data, public key and domain provided.
func verifySignature(signedData []byte, pub []byte, signature []byte, domain []byte) error {
func verifySignature(signedData, pub, signature, domain []byte) error {
set, err := retrieveSignatureSet(signedData, pub, signature, domain)
if err != nil {
return err

View File

@ -419,7 +419,7 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
}
}
func buildState(t testing.TB, slot uint64, validatorCount uint64) *state.BeaconState {
func buildState(t testing.TB, slot, validatorCount uint64) *state.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{

View File

@ -68,7 +68,7 @@ func AttestationsDelta(state *stateTrie.BeaconState, pBal *Balance, vp []*Valida
return rewards, penalties, nil
}
func attestationDelta(pBal *Balance, v *Validator, prevEpoch uint64, finalizedEpoch uint64) (uint64, uint64) {
func attestationDelta(pBal *Balance, v *Validator, prevEpoch, finalizedEpoch uint64) (uint64, uint64) {
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
if !eligible || pBal.ActiveCurrentEpoch == 0 {
return 0, 0
@ -182,7 +182,7 @@ func ProposersDelta(state *stateTrie.BeaconState, pBal *Balance, vp []*Validator
// Spec code:
// def is_in_inactivity_leak(state: BeaconState) -> bool:
// return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY
func isInInactivityLeak(prevEpoch uint64, finalizedEpoch uint64) bool {
func isInInactivityLeak(prevEpoch, finalizedEpoch uint64) bool {
return finalityDelay(prevEpoch, finalizedEpoch) > params.BeaconConfig().MinEpochsToInactivityPenalty
}
@ -191,6 +191,6 @@ func isInInactivityLeak(prevEpoch uint64, finalizedEpoch uint64) bool {
// Spec code:
// def get_finality_delay(state: BeaconState) -> uint64:
// return get_previous_epoch(state) - state.finalized_checkpoint.epoch
func finalityDelay(prevEpoch uint64, finalizedEpoch uint64) uint64 {
func finalityDelay(prevEpoch, finalizedEpoch uint64) uint64 {
return prevEpoch - finalizedEpoch
}

View File

@ -254,7 +254,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
}
}
func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
func buildState(slot, validatorCount uint64) *pb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{

View File

@ -67,7 +67,7 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index,
// count=committees_per_slot * SLOTS_PER_EPOCH,
// )
func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot uint64, committeeIndex uint64) ([]uint64, error) {
func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot, committeeIndex uint64) ([]uint64, error) {
epoch := SlotToEpoch(slot)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
@ -93,7 +93,7 @@ func BeaconCommitteeFromState(state *stateTrie.BeaconState, slot uint64, committ
// BeaconCommittee returns the crosslink committee of a given slot and committee index. The
// validator indices and seed are provided as an argument rather than a direct implementation
// from the spec definition. Having them as an argument allows for cheaper computation run time.
func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot uint64, committeeIndex uint64) ([]uint64, error) {
func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot, committeeIndex uint64) ([]uint64, error) {
indices, err := committeeCache.Committee(slot, seed, committeeIndex)
if err != nil {
return nil, errors.Wrap(err, "could not interface with committee cache")
@ -127,8 +127,7 @@ func BeaconCommittee(validatorIndices []uint64, seed [32]byte, slot uint64, comm
func ComputeCommittee(
indices []uint64,
seed [32]byte,
index uint64,
count uint64,
index, count uint64,
) ([]uint64, error) {
validatorCount := uint64(len(indices))
start := sliceutil.SplitOffset(validatorCount, count, index)

View File

@ -64,7 +64,7 @@ func TotalActiveBalance(state *stateTrie.BeaconState) (uint64, error) {
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
func IncreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) error {
func IncreaseBalance(state *stateTrie.BeaconState, idx, delta uint64) error {
balAtIdx, err := state.BalanceAtIndex(idx)
if err != nil {
return err
@ -82,7 +82,7 @@ func IncreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) err
// Increase the validator balance at index ``index`` by ``delta``.
// """
// state.balances[index] += delta
func IncreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
func IncreaseBalanceWithVal(currBalance, delta uint64) uint64 {
return currBalance + delta
}
@ -94,7 +94,7 @@ func IncreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
func DecreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) error {
func DecreaseBalance(state *stateTrie.BeaconState, idx, delta uint64) error {
balAtIdx, err := state.BalanceAtIndex(idx)
if err != nil {
return err
@ -112,7 +112,7 @@ func DecreaseBalance(state *stateTrie.BeaconState, idx uint64, delta uint64) err
// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
// """
// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
func DecreaseBalanceWithVal(currBalance uint64, delta uint64) uint64 {
func DecreaseBalanceWithVal(currBalance, delta uint64) uint64 {
if delta > currBalance {
return 0
}

View File

@ -34,13 +34,13 @@ func SplitIndices(l []uint64, n uint64) [][]uint64 {
// We utilize 'swap or not' shuffling in this implementation; we are allocating the memory with the seed that stays
// constant between iterations instead of reallocating it each iteration as in the spec. This implementation is based
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
func ShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, error) {
func ShuffledIndex(index, indexCount uint64, seed [32]byte) (uint64, error) {
return ComputeShuffledIndex(index, indexCount, seed, true /* shuffle */)
}
// UnShuffledIndex returns the inverse of ShuffledIndex. This implementation is based
// on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle
func UnShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, error) {
func UnShuffledIndex(index, indexCount uint64, seed [32]byte) (uint64, error) {
return ComputeShuffledIndex(index, indexCount, seed, false /* un-shuffle */)
}
@ -64,7 +64,7 @@ func UnShuffledIndex(index uint64, indexCount uint64, seed [32]byte) (uint64, er
// index = flip if bit else index
//
// return ValidatorIndex(index)
func ComputeShuffledIndex(index uint64, indexCount uint64, seed [32]byte, shuffle bool) (uint64, error) {
func ComputeShuffledIndex(index, indexCount uint64, seed [32]byte, shuffle bool) (uint64, error) {
if params.BeaconConfig().ShuffleRoundCount == 0 {
return index, nil
}

View File

@ -73,7 +73,7 @@ func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, er
}
// ComputeDomainVerifySigningRoot computes domain and verifies signing root of an object given the beacon state, validator index and signature.
func ComputeDomainVerifySigningRoot(state *state.BeaconState, index uint64, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
func ComputeDomainVerifySigningRoot(state *state.BeaconState, index, epoch uint64, obj interface{}, domain [4]byte, sig []byte) error {
v, err := state.ValidatorAtIndex(index)
if err != nil {
return err
@ -86,7 +86,7 @@ func ComputeDomainVerifySigningRoot(state *state.BeaconState, index uint64, epoc
}
// VerifySigningRoot verifies the signing root of an object given it's public key, signature and domain.
func VerifySigningRoot(obj interface{}, pub []byte, signature []byte, domain []byte) error {
func VerifySigningRoot(obj interface{}, pub, signature, domain []byte) error {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return errors.Wrap(err, "could not convert bytes to public key")
@ -106,7 +106,7 @@ func VerifySigningRoot(obj interface{}, pub []byte, signature []byte, domain []b
}
// VerifyBlockSigningRoot verifies the signing root of a block given it's public key, signature and domain.
func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain []byte) error {
func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub, signature, domain []byte) error {
set, err := RetrieveBlockSignatureSet(blk, pub, signature, domain)
if err != nil {
return err
@ -128,7 +128,7 @@ func VerifyBlockSigningRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte
// RetrieveBlockSignatureSet retrieves the relevant signature, message and pubkey data from a block and collating it
// into a signature set object.
func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub, signature, domain []byte) (*bls.SignatureSet, error) {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
@ -146,7 +146,7 @@ func RetrieveBlockSignatureSet(blk *ethpb.BeaconBlock, pub []byte, signature []b
}
// VerifyBlockHeaderSigningRoot verifies the signing root of a block header given it's public key, signature and domain.
func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, signature []byte, domain []byte) error {
func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signature, domain []byte) error {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return errors.Wrap(err, "could not convert bytes to public key")
@ -178,7 +178,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub []byte, s
// genesis_validators_root = Root() # all bytes zero by default
// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root)
// return Domain(domain_type + fork_data_root[:28])
func ComputeDomain(domainType [DomainByteLength]byte, forkVersion []byte, genesisValidatorsRoot []byte) ([]byte, error) {
func ComputeDomain(domainType [DomainByteLength]byte, forkVersion, genesisValidatorsRoot []byte) ([]byte, error) {
if forkVersion == nil {
forkVersion = params.BeaconConfig().GenesisForkVersion
}
@ -217,7 +217,7 @@ func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte {
// current_version=current_version,
// genesis_validators_root=genesis_validators_root,
// ))
func computeForkDataRoot(version []byte, root []byte) ([32]byte, error) {
func computeForkDataRoot(version, root []byte) ([32]byte, error) {
r, err := (&pb.ForkData{
CurrentVersion: version,
GenesisValidatorsRoot: root,
@ -238,7 +238,7 @@ func computeForkDataRoot(version []byte, root []byte) ([32]byte, error) {
// 4-bytes suffices for practical separation of forks/chains.
// """
// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4])
func ComputeForkDigest(version []byte, genesisValidatorsRoot []byte) ([4]byte, error) {
func ComputeForkDigest(version, genesisValidatorsRoot []byte) ([4]byte, error) {
dataRoot, err := computeForkDataRoot(version, genesisValidatorsRoot)
if err != nil {
return [4]byte{}, err

View File

@ -114,7 +114,7 @@ func SlotsSinceEpochStarts(slot uint64) uint64 {
}
// VerifySlotTime validates the input slot is not from the future.
func VerifySlotTime(genesisTime uint64, slot uint64, timeTolerance time.Duration) error {
func VerifySlotTime(genesisTime, slot uint64, timeTolerance time.Duration) error {
slotTime, err := SlotToTime(genesisTime, slot)
if err != nil {
return err
@ -136,7 +136,7 @@ func VerifySlotTime(genesisTime uint64, slot uint64, timeTolerance time.Duration
}
// SlotToTime takes the given slot and genesis time to determine the start time of the slot.
func SlotToTime(genesisTimeSec uint64, slot uint64) (time.Time, error) {
func SlotToTime(genesisTimeSec, slot uint64) (time.Time, error) {
timeSinceGenesis, err := mathutil.Mul64(slot, params.BeaconConfig().SecondsPerSlot)
if err != nil {
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %v", slot, err)
@ -167,7 +167,7 @@ func CurrentSlot(genesisTimeSec uint64) uint64 {
// ValidateSlotClock validates a provided slot against the local
// clock to ensure slots that are unreasonable are returned with
// an error.
func ValidateSlotClock(slot uint64, genesisTimeSec uint64) error {
func ValidateSlotClock(slot, genesisTimeSec uint64) error {
maxPossibleSlot := CurrentSlot(genesisTimeSec) + MaxSlotBuffer
// Defensive check to ensure that we only process slots up to a hard limit
// from our local clock.

View File

@ -29,7 +29,7 @@ func IsActiveValidatorUsingTrie(validator *stateTrie.ReadOnlyValidator, epoch ui
return checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
}
func checkValidatorActiveStatus(activationEpoch uint64, exitEpoch uint64, epoch uint64) bool {
func checkValidatorActiveStatus(activationEpoch, exitEpoch, epoch uint64) bool {
return activationEpoch <= epoch && epoch < exitEpoch
}
@ -42,7 +42,7 @@ func checkValidatorActiveStatus(activationEpoch uint64, exitEpoch uint64, epoch
// Check if ``validator`` is slashable.
// """
// return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
func IsSlashableValidator(activationEpoch uint64, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
func IsSlashableValidator(activationEpoch, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
return checkValidatorSlashable(activationEpoch, withdrawableEpoch, slashed, epoch)
}
@ -51,7 +51,7 @@ func IsSlashableValidatorUsingTrie(val *stateTrie.ReadOnlyValidator, epoch uint6
return checkValidatorSlashable(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), epoch)
}
func checkValidatorSlashable(activationEpoch uint64, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
func checkValidatorSlashable(activationEpoch, withdrawableEpoch uint64, slashed bool, epoch uint64) bool {
active := activationEpoch <= epoch
beforeWithdrawable := epoch < withdrawableEpoch
return beforeWithdrawable && active && !slashed
@ -313,7 +313,7 @@ func IsEligibleForActivationQueueUsingTrie(validator *stateTrie.ReadOnlyValidato
}
// isEligibleForActivationQueue carries out the logic for IsEligibleForActivationQueue*
func isEligibileForActivationQueue(activationEligibilityEpoch uint64, effectiveBalance uint64) bool {
func isEligibileForActivationQueue(activationEligibilityEpoch, effectiveBalance uint64) bool {
return activationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch &&
effectiveBalance == params.BeaconConfig().MaxEffectiveBalance
}
@ -346,7 +346,7 @@ func IsEligibleForActivationUsingTrie(state *stateTrie.BeaconState, validator *s
}
// isEligibleForActivation carries out the logic for IsEligibleForActivation*
func isEligibleForActivation(activationEligibilityEpoch uint64, activationEpoch uint64, finalizedEpoch uint64) bool {
func isEligibleForActivation(activationEligibilityEpoch, activationEpoch, finalizedEpoch uint64) bool {
return activationEligibilityEpoch <= finalizedEpoch &&
activationEpoch == params.BeaconConfig().FarFutureEpoch
}

View File

@ -249,7 +249,7 @@ func EmptyGenesisState() (*stateTrie.BeaconState, error) {
// return True
// This method has been modified from the spec to allow whole states not to be saved
// but instead only cache the relevant information.
func IsValidGenesisState(chainStartDepositCount uint64, currentTime uint64) bool {
func IsValidGenesisState(chainStartDepositCount, currentTime uint64) bool {
if currentTime < params.BeaconConfig().MinGenesisTime {
return false
}

View File

@ -376,11 +376,7 @@ func getBlockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFil
func fetchBlockRootsBySlotRange(
ctx context.Context,
bkt *bolt.Bucket,
startSlotEncoded interface{},
endSlotEncoded interface{},
startEpochEncoded interface{},
endEpochEncoded interface{},
slotStepEncoded interface{},
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.fetchBlockRootsBySlotRange")
defer span.End()

View File

@ -181,7 +181,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
}
func setup(justifiedEpoch uint64, finalizedEpoch uint64) *ForkChoice {
func setup(justifiedEpoch, finalizedEpoch uint64) *ForkChoice {
f := New(0, 0, params.BeaconConfig().ZeroHash)
f.store.nodesIndices[params.BeaconConfig().ZeroHash] = 0
f.store.nodes = append(f.store.nodes, &Node{

View File

@ -13,8 +13,7 @@ func computeDeltas(
ctx context.Context,
blockIndices map[[32]byte]uint64,
votes []Vote,
oldBalances []uint64,
newBalances []uint64,
oldBalances, newBalances []uint64,
) ([]int, []Vote, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.computeDeltas")
defer span.End()

View File

@ -58,10 +58,8 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
// It then updates the new node's parent with best child and descendant node.
func (s *Store) insert(ctx context.Context,
slot uint64,
root [32]byte,
parent [32]byte,
graffiti [32]byte,
justifiedEpoch uint64, finalizedEpoch uint64) error {
root, parent, graffiti [32]byte,
justifiedEpoch, finalizedEpoch uint64) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
defer span.End()
@ -113,7 +111,7 @@ func (s *Store) insert(ctx context.Context,
// and its best child. For each node, it updates the weight with input delta and
// back propagate the nodes delta to its parents delta. After scoring changes,
// the best child is then updated along with best descendant.
func (s *Store) applyWeightChanges(ctx context.Context, justifiedEpoch uint64, finalizedEpoch uint64, delta []int) error {
func (s *Store) applyWeightChanges(ctx context.Context, justifiedEpoch, finalizedEpoch uint64, delta []int) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.applyWeightChanges")
defer span.End()
@ -180,7 +178,7 @@ func (s *Store) applyWeightChanges(ctx context.Context, justifiedEpoch uint64, f
// 2.) The child is already the best child and the parent is updated with the new best descendant.
// 3.) The child is not the best child but becomes the best child.
// 4.) The child is not the best child and does not become best child.
func (s *Store) updateBestChildAndDescendant(parentIndex uint64, childIndex uint64) error {
func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) error {
// Protection against parent index out of bound, this should not happen.
if parentIndex >= uint64(len(s.nodes)) {

View File

@ -16,7 +16,7 @@ const defaultPruneThreshold = 256
var lastHeadRoot [32]byte
// New initializes a new fork choice store.
func New(justifiedEpoch uint64, finalizedEpoch uint64, finalizedRoot [32]byte) *ForkChoice {
func New(justifiedEpoch, finalizedEpoch uint64, finalizedRoot [32]byte) *ForkChoice {
s := &Store{
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
@ -91,7 +91,7 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
}
// ProcessBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) ProcessBlock(ctx context.Context, slot uint64, blockRoot [32]byte, parentRoot [32]byte, graffiti [32]byte, justifiedEpoch uint64, finalizedEpoch uint64) error {
func (f *ForkChoice) ProcessBlock(ctx context.Context, slot uint64, blockRoot, parentRoot, graffiti [32]byte, justifiedEpoch, finalizedEpoch uint64) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessBlock")
defer span.End()

View File

@ -23,7 +23,7 @@ func (p *AttCaches) AggregateUnaggregatedAttestations() error {
// AggregateUnaggregatedAttestationsBySlotIndex aggregates the unaggregated attestations and saves
// newly aggregated attestations in the pool. Unaggregated attestations are filtered by slot and
// committee index.
func (p *AttCaches) AggregateUnaggregatedAttestationsBySlotIndex(slot uint64, committeeIndex uint64) error {
func (p *AttCaches) AggregateUnaggregatedAttestationsBySlotIndex(slot, committeeIndex uint64) error {
unaggregatedAtts := p.UnaggregatedAttestationsBySlotIndex(slot, committeeIndex)
return p.aggregateUnaggregatedAttestations(unaggregatedAtts)
}
@ -153,7 +153,7 @@ func (p *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (p *AttCaches) AggregatedAttestationsBySlotIndex(slot uint64, committeeIndex uint64) []*ethpb.Attestation {
func (p *AttCaches) AggregatedAttestationsBySlotIndex(slot, committeeIndex uint64) []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0)
p.aggregatedAttLock.RLock()

View File

@ -66,7 +66,7 @@ func (p *AttCaches) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
// UnaggregatedAttestationsBySlotIndex returns the unaggregated attestations in cache,
// filtered by committee index and slot.
func (p *AttCaches) UnaggregatedAttestationsBySlotIndex(slot uint64, committeeIndex uint64) []*ethpb.Attestation {
func (p *AttCaches) UnaggregatedAttestationsBySlotIndex(slot, committeeIndex uint64) []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0)
p.unAggregateAttLock.RLock()

View File

@ -182,8 +182,7 @@ func (s *Service) createListener(
func (s *Service) createLocalNode(
privKey *ecdsa.PrivateKey,
ipAddr net.IP,
udpPort int,
tcpPort int,
udpPort, tcpPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
if err != nil {
@ -289,7 +288,7 @@ func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
return discv5Nodes
}
func parseGenericAddrs(addrs []string) (enodeString []string, multiAddrString []string) {
func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
for _, addr := range addrs {
if addr == "" {
// Ignore empty entries

View File

@ -413,7 +413,7 @@ func (s *Service) requestMissingLogs(ctx context.Context, blkNumber uint64, want
return nil
}
func (s *Service) processBlksInRange(ctx context.Context, startBlk uint64, endBlk uint64) error {
func (s *Service) processBlksInRange(ctx context.Context, startBlk, endBlk uint64) error {
for i := startBlk; i <= endBlk; i++ {
err := s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
if err != nil {
@ -452,8 +452,7 @@ func (s *Service) checkHeaderForChainstart(header *gethTypes.Header) {
s.checkForChainstart(header.Hash(), header.Number, header.Time)
}
func (s *Service) checkHeaderRange(start uint64, end uint64,
headersMap map[uint64]*gethTypes.Header,
func (s *Service) checkHeaderRange(start, end uint64, headersMap map[uint64]*gethTypes.Header,
requestHeaders func(uint64, uint64) error) error {
for i := start; i <= end; i++ {
if !s.chainStartData.Chainstarted {

View File

@ -543,7 +543,7 @@ func (s *Service) processBlockHeader(header *gethTypes.Header) {
// batchRequestHeaders requests the block range specified in the arguments. Instead of requesting
// each block in one call, it batches all requests into a single rpc call.
func (s *Service) batchRequestHeaders(startBlock uint64, endBlock uint64) ([]*gethTypes.Header, error) {
func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes.Header, error) {
requestRange := (endBlock - startBlock) + 1
elems := make([]gethRPC.BatchElem, 0, requestRange)
headers := make([]*gethTypes.Header, 0, requestRange)

View File

@ -305,8 +305,7 @@ func (vs *Server) slotStartTime(slot uint64) uint64 {
func (vs *Server) inRangeVotes(ctx context.Context,
beaconState *stateTrie.BeaconState,
firstValidBlockNumber *big.Int,
lastValidBlockNumber *big.Int) ([]eth1DataSingleVote, error) {
firstValidBlockNumber, lastValidBlockNumber *big.Int) ([]eth1DataSingleVote, error) {
currentETH1Data := vs.HeadFetcher.HeadETH1Data()

View File

@ -401,7 +401,7 @@ func (b *BeaconState) SetBalances(val []uint64) error {
// UpdateBalancesAtIndex for the beacon state. This method updates the balance
// at a specific index to a new value.
func (b *BeaconState) UpdateBalancesAtIndex(idx uint64, val uint64) error {
func (b *BeaconState) UpdateBalancesAtIndex(idx, val uint64) error {
if !b.HasInnerState() {
return ErrNilInnerState
}
@ -490,7 +490,7 @@ func (b *BeaconState) SetSlashings(val []uint64) error {
// UpdateSlashingsAtIndex for the beacon state. Updates the slashings
// at a specific index to a new value.
func (b *BeaconState) UpdateSlashingsAtIndex(idx uint64, val uint64) error {
func (b *BeaconState) UpdateSlashingsAtIndex(idx, val uint64) error {
if !b.HasInnerState() {
return ErrNilInnerState
}

View File

@ -64,7 +64,7 @@ func (s *State) ReplayBlocks(ctx context.Context, state *stateTrie.BeaconState,
// LoadBlocks loads the blocks between start slot and end slot by recursively fetching from end block root.
// The Blocks are returned in slot-descending order.
func (s *State) LoadBlocks(ctx context.Context, startSlot uint64, endSlot uint64, endBlockRoot [32]byte) ([]*ethpb.SignedBeaconBlock, error) {
func (s *State) LoadBlocks(ctx context.Context, startSlot, endSlot uint64, endBlockRoot [32]byte) ([]*ethpb.SignedBeaconBlock, error) {
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
blocks, err := s.beaconDB.Blocks(ctx, filter)
if err != nil {
@ -265,7 +265,7 @@ func (s *State) genesisRoot(ctx context.Context) ([32]byte, error) {
// Given the start slot and the end slot, this returns the finalized beacon blocks in between.
// Since hot states don't have finalized blocks, this should ONLY be used for replaying cold state.
func (s *State) loadFinalizedBlocks(ctx context.Context, startSlot uint64, endSlot uint64) ([]*ethpb.SignedBeaconBlock, error) {
func (s *State) loadFinalizedBlocks(ctx context.Context, startSlot, endSlot uint64) ([]*ethpb.SignedBeaconBlock, error) {
f := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
bs, err := s.beaconDB.Blocks(ctx, f)
if err != nil {

View File

@ -214,7 +214,7 @@ func (s *Service) validateBlockInAttestation(ctx context.Context, satt *ethpb.Si
}
// Returns true if the node has received aggregate for the aggregator with index and target epoch.
func (s *Service) hasSeenAggregatorIndexEpoch(epoch uint64, aggregatorIndex uint64) bool {
func (s *Service) hasSeenAggregatorIndexEpoch(epoch, aggregatorIndex uint64) bool {
s.seenAttestationLock.RLock()
defer s.seenAttestationLock.RUnlock()
b := append(bytesutil.Bytes32(epoch), bytesutil.Bytes32(aggregatorIndex)...)
@ -223,7 +223,7 @@ func (s *Service) hasSeenAggregatorIndexEpoch(epoch uint64, aggregatorIndex uint
}
// Set aggregate's aggregator index target epoch as seen.
func (s *Service) setAggregatorIndexEpochSeen(epoch uint64, aggregatorIndex uint64) {
func (s *Service) setAggregatorIndexEpochSeen(epoch, aggregatorIndex uint64) {
s.seenAttestationLock.Lock()
defer s.seenAttestationLock.Unlock()
b := append(bytesutil.Bytes32(epoch), bytesutil.Bytes32(aggregatorIndex)...)

View File

@ -62,7 +62,7 @@ func (s *Service) validateAttesterSlashing(ctx context.Context, pid peer.ID, msg
}
// Returns true if the node has already received a valid attester slashing with the attesting indices.
func (s *Service) hasSeenAttesterSlashingIndices(indices1 []uint64, indices2 []uint64) bool {
func (s *Service) hasSeenAttesterSlashingIndices(indices1, indices2 []uint64) bool {
s.seenAttesterSlashingLock.RLock()
defer s.seenAttesterSlashingLock.RUnlock()
@ -81,7 +81,7 @@ func (s *Service) hasSeenAttesterSlashingIndices(indices1 []uint64, indices2 []u
}
// Set attester slashing indices in attester slashing cache.
func (s *Service) setAttesterSlashingIndicesSeen(indices1 []uint64, indices2 []uint64) {
func (s *Service) setAttesterSlashingIndicesSeen(indices1, indices2 []uint64) {
s.seenAttesterSlashingLock.Lock()
defer s.seenAttesterSlashingLock.Unlock()

View File

@ -206,7 +206,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
}
// Returns true if the attestation was already seen for the participating validator for the slot.
func (s *Service) hasSeenCommitteeIndicesSlot(slot uint64, committeeID uint64, aggregateBits []byte) bool {
func (s *Service) hasSeenCommitteeIndicesSlot(slot, committeeID uint64, aggregateBits []byte) bool {
s.seenAttestationLock.RLock()
defer s.seenAttestationLock.RUnlock()
b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...)
@ -216,7 +216,7 @@ func (s *Service) hasSeenCommitteeIndicesSlot(slot uint64, committeeID uint64, a
}
// Set committee's indices and slot as seen for incoming attestations.
func (s *Service) setSeenCommitteeIndicesSlot(slot uint64, committeeID uint64, aggregateBits []byte) {
func (s *Service) setSeenCommitteeIndicesSlot(slot, committeeID uint64, aggregateBits []byte) {
s.seenAttestationLock.Lock()
defer s.seenAttestationLock.Unlock()
b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...)

View File

@ -163,7 +163,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
}
// Returns true if the block is not the first block proposed for the proposer for the slot.
func (s *Service) hasSeenBlockIndexSlot(slot uint64, proposerIdx uint64) bool {
func (s *Service) hasSeenBlockIndexSlot(slot, proposerIdx uint64) bool {
s.seenBlockLock.RLock()
defer s.seenBlockLock.RUnlock()
b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(proposerIdx)...)
@ -172,7 +172,7 @@ func (s *Service) hasSeenBlockIndexSlot(slot uint64, proposerIdx uint64) bool {
}
// Set block proposer index and slot as seen for incoming blocks.
func (s *Service) setSeenBlockIndexSlot(slot uint64, proposerIdx uint64) {
func (s *Service) setSeenBlockIndexSlot(slot, proposerIdx uint64) {
s.seenBlockLock.Lock()
defer s.seenBlockLock.Unlock()
b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(proposerIdx)...)
@ -198,7 +198,7 @@ func (s *Service) setBadBlock(ctx context.Context, root [32]byte) {
}
// This captures metrics for block arrival time by subtracts slot start time.
func captureArrivalTimeMetric(genesisTime uint64, currentSlot uint64) error {
func captureArrivalTimeMetric(genesisTime, currentSlot uint64) error {
startTime, err := helpers.SlotToTime(genesisTime, currentSlot)
if err != nil {
return err

View File

@ -8,8 +8,7 @@ import (
)
// UnpackDepositLogData unpacks the data from a deposit log using the ABI decoder.
func UnpackDepositLogData(data []byte) (pubkey []byte, withdrawalCredentials []byte, amount []byte,
signature []byte, index []byte, err error) {
func UnpackDepositLogData(data []byte) (pubkey, withdrawalCredentials, amount, signature, index []byte, err error) {
reader := bytes.NewReader([]byte(DepositContractABI))
contractAbi, err := abi.JSON(reader)
if err != nil {

View File

@ -43,7 +43,7 @@ func StartValidatorClients(t *testing.T, config *types.E2EConfig) {
}
// StartNewValidatorClient starts a validator client with the passed in configuration.
func StartNewValidatorClient(t *testing.T, config *types.E2EConfig, validatorNum int, index int, offset int) {
func StartNewValidatorClient(t *testing.T, config *types.E2EConfig, validatorNum, index, offset int) {
binaryPath, found := bazel.FindBinary("validator", "validator")
if !found {
t.Fatal("validator binary not found")
@ -82,7 +82,7 @@ func StartNewValidatorClient(t *testing.T, config *types.E2EConfig, validatorNum
}
// SendAndMineDeposits sends the requested amount of deposits and mines the chain after to ensure the deposits are seen.
func SendAndMineDeposits(t *testing.T, keystorePath string, validatorNum int, offset int) {
func SendAndMineDeposits(t *testing.T, keystorePath string, validatorNum, offset int) {
client, err := rpc.DialHTTP(fmt.Sprintf("http://127.0.0.1:%d", e2e.TestParams.Eth1RPCPort))
if err != nil {
t.Fatal(err)
@ -107,7 +107,7 @@ func SendAndMineDeposits(t *testing.T, keystorePath string, validatorNum int, of
}
// SendDeposits uses the passed in web3 and keystore bytes to send the requested deposits.
func SendDeposits(web3 *ethclient.Client, keystoreBytes []byte, num int, offset int) error {
func SendDeposits(web3 *ethclient.Client, keystoreBytes []byte, num, offset int) error {
txOps, err := bind.NewTransactor(bytes.NewReader(keystoreBytes), "" /*password*/)
if err != nil {
return err

View File

@ -146,7 +146,7 @@ func metricsTest(conns ...*grpc.ClientConn) error {
return nil
}
func metricCheckLessThan(pageContent string, topic string, value int) error {
func metricCheckLessThan(pageContent, topic string, value int) error {
topicValue, err := getValueOfTopic(pageContent, topic)
if err != nil {
return err
@ -162,7 +162,7 @@ func metricCheckLessThan(pageContent string, topic string, value int) error {
return nil
}
func metricCheckComparison(pageContent string, topic1 string, topic2 string, comparison float64) error {
func metricCheckComparison(pageContent, topic1, topic2 string, comparison float64) error {
topic2Value, err := getValueOfTopic(pageContent, topic2)
// If we can't find the first topic (error metrics), then assume the test passes.
if topic2Value != -1 {
@ -191,7 +191,7 @@ func metricCheckComparison(pageContent string, topic1 string, topic2 string, com
return nil
}
func getValueOfTopic(pageContent string, topic string) (int, error) {
func getValueOfTopic(pageContent, topic string) (int, error) {
regexExp, err := regexp.Compile(topic + " ")
if err != nil {
return -1, errors.Wrap(err, "could not create regex expression")

View File

@ -71,7 +71,7 @@ var ValidatorHasExited = types.Evaluator{
}
// Not including first epoch because of issues with genesis.
func isBetweenEpochs(fromEpoch uint64, toEpoch uint64) func(uint64) bool {
func isBetweenEpochs(fromEpoch, toEpoch uint64) func(uint64) bool {
return func(currentEpoch uint64) bool {
return fromEpoch < currentEpoch && currentEpoch < toEpoch
}

View File

@ -43,8 +43,7 @@ func GetEpochTicker(genesisTime time.Time, secondsPerEpoch uint64) *EpochTicker
func (s *EpochTicker) start(
genesisTime time.Time,
secondsPerEpoch uint64,
since func(time.Time) time.Duration,
until func(time.Time) time.Duration,
since, until func(time.Time) time.Duration,
after func(time.Duration) <-chan time.Time) {
d := time.Duration(secondsPerEpoch) * time.Second

View File

@ -48,7 +48,7 @@ func KillProcesses(t *testing.T, pIDs []int) {
// DeleteAndCreateFile checks if the file path given exists, if it does, it deletes it and creates a new file.
// If not, it just creates the requested file.
func DeleteAndCreateFile(tmpPath string, fileName string) (*os.File, error) {
func DeleteAndCreateFile(tmpPath, fileName string) (*os.File, error) {
filePath := path.Join(tmpPath, fileName)
if _, err := os.Stat(filePath); os.IsExist(err) {
if err := os.Remove(filePath); err != nil {
@ -163,7 +163,7 @@ func WritePprofFiles(testDir string, index int) error {
return writeURLRespAtPath(url, filePath)
}
func writeURLRespAtPath(url string, filePath string) error {
func writeURLRespAtPath(url, filePath string) error {
resp, err := http.Get(url)
if err != nil {
return err

View File

@ -50,7 +50,7 @@ func Aggregate(atts []*ethpb.Attestation) ([]*ethpb.Attestation, error) {
}
// AggregatePair aggregates pair of attestations a1 and a2 together.
func AggregatePair(a1 *ethpb.Attestation, a2 *ethpb.Attestation) (*ethpb.Attestation, error) {
func AggregatePair(a1, a2 *ethpb.Attestation) (*ethpb.Attestation, error) {
if a1.AggregationBits.Len() != a2.AggregationBits.Len() {
return nil, aggregation.ErrBitsDifferentLen
}

View File

@ -51,7 +51,7 @@ func NewMaxCoverCandidate(key int, bits *bitfield.Bitlist) *MaxCoverCandidate {
// Cover calculates solution to Maximum k-Cover problem in O(knm), where
// n is number of candidates and m is a length of bitlist in each candidate.
func (mc *MaxCoverProblem) Cover(k int, allowOverlaps bool, allowDuplicates bool) (*Aggregation, error) {
func (mc *MaxCoverProblem) Cover(k int, allowOverlaps, allowDuplicates bool) (*Aggregation, error) {
if len(mc.Candidates) == 0 {
return nil, errors.Wrap(ErrInvalidMaxCoverProblem, "cannot calculate set coverage")
}

View File

@ -162,7 +162,7 @@ func IsValidAttestationIndices(ctx context.Context, indexedAttestation *ethpb.In
}
// AttDataIsEqual this function performs an equality check between 2 attestation data, if they're unequal, it will return false.
func AttDataIsEqual(attData1 *ethpb.AttestationData, attData2 *ethpb.AttestationData) bool {
func AttDataIsEqual(attData1, attData2 *ethpb.AttestationData) bool {
if attData1.Slot != attData2.Slot {
return false
}
@ -188,7 +188,7 @@ func AttDataIsEqual(attData1 *ethpb.AttestationData, attData2 *ethpb.Attestation
}
// CheckPointIsEqual performs an equality check between 2 check points, returns false if unequal.
func CheckPointIsEqual(checkPt1 *ethpb.Checkpoint, checkPt2 *ethpb.Checkpoint) bool {
func CheckPointIsEqual(checkPt1, checkPt2 *ethpb.Checkpoint) bool {
if checkPt1.Epoch != checkPt2.Epoch {
return false
}

View File

@ -85,7 +85,7 @@ func RandKey() iface.SecretKey {
}
// VerifyCompressed signature.
func VerifyCompressed(signature []byte, pub []byte, msg []byte) bool {
func VerifyCompressed(signature, pub, msg []byte) bool {
if featureconfig.Get().EnableBlst {
return blst.VerifyCompressed(signature, pub, msg)
}

View File

@ -112,6 +112,6 @@ func RandKey() iface.SecretKey {
}
// VerifyCompressed -- stub
func VerifyCompressed(_ []byte, _ []byte, _ []byte) bool {
func VerifyCompressed(_, _, _ []byte) bool {
panic(err)
}

View File

@ -15,7 +15,7 @@ var log = logrus.WithField("prefix", "node")
// ConfirmAction uses the passed in actionText as the confirmation text displayed in the terminal.
// The user must enter Y or N to indicate whether they confirm the action detailed in the warning text.
// Returns a boolean representing the user's answer.
func ConfirmAction(actionText string, deniedText string) (bool, error) {
func ConfirmAction(actionText, deniedText string) (bool, error) {
var confirmed bool
reader := bufio.NewReader(os.Stdin)
log.Warn(actionText)

View File

@ -31,11 +31,7 @@ import (
// - Send a transaction on the Ethereum 1.0 chain to DEPOSIT_CONTRACT_ADDRESS executing def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]) along with a deposit of amount Gwei.
//
// See: https://github.com/ethereum/eth2.0-specs/blob/master/specs/validator/0_beacon-chain-validator.md#submit-deposit
func DepositInput(
depositKey bls.SecretKey,
withdrawalKey bls.SecretKey,
amountInGwei uint64,
) (*ethpb.Deposit_Data, [32]byte, error) {
func DepositInput(depositKey, withdrawalKey bls.SecretKey, amountInGwei uint64) (*ethpb.Deposit_Data, [32]byte, error) {
di := &ethpb.Deposit_Data{
PublicKey: depositKey.PublicKey().Marshal(),
WithdrawalCredentials: WithdrawalCredentialsHash(withdrawalKey),
@ -116,10 +112,7 @@ func VerifyDepositSignature(dd *ethpb.Deposit_Data, domain []byte) error {
// GenerateDepositTransaction uses the provided validating key and withdrawal key to
// create a transaction object for the deposit contract.
func GenerateDepositTransaction(
validatingKey bls.SecretKey,
withdrawalKey bls.SecretKey,
) (*types.Transaction, *ethpb.Deposit_Data, error) {
func GenerateDepositTransaction(validatingKey, withdrawalKey bls.SecretKey) (*types.Transaction, *ethpb.Deposit_Data, error) {
depositData, depositRoot, err := DepositInput(
validatingKey, withdrawalKey, params.BeaconConfig().MaxEffectiveBalance,
)

View File

@ -37,7 +37,7 @@ func (h *HasherFunc) Hash(a []byte) [32]byte {
}
// Combi appends the two inputs and hashes them.
func (h *HasherFunc) Combi(a [32]byte, b [32]byte) [32]byte {
func (h *HasherFunc) Combi(a, b [32]byte) [32]byte {
copy(h.b[:32], a[:])
copy(h.b[32:], b[:])
return h.Hash(h.b[:])

View File

@ -45,7 +45,7 @@ func BitlistRoot(hasher HashFn, bfield bitfield.Bitfield, maxCapacity uint64) ([
// and return the root.
// Note that merkleize on a single chunk is simply that chunk, i.e. the identity
// when the number of chunks is one.
func BitwiseMerkleize(hasher HashFn, chunks [][]byte, count uint64, limit uint64) ([32]byte, error) {
func BitwiseMerkleize(hasher HashFn, chunks [][]byte, count, limit uint64) ([32]byte, error) {
if count > limit {
return [32]byte{}, errors.New("merkleizing list that is too large, over limit")
}
@ -57,7 +57,7 @@ func BitwiseMerkleize(hasher HashFn, chunks [][]byte, count uint64, limit uint64
}
// BitwiseMerkleizeArrays is used when a set of 32-byte root chunks are provided.
func BitwiseMerkleizeArrays(hasher HashFn, chunks [][32]byte, count uint64, limit uint64) ([32]byte, error) {
func BitwiseMerkleizeArrays(hasher HashFn, chunks [][32]byte, count, limit uint64) ([32]byte, error) {
if count > limit {
return [32]byte{}, errors.New("merkleizing list that is too large, over limit")
}

View File

@ -68,7 +68,7 @@ func GetDepth(v uint64) (out uint8) {
}
// Merkleize with log(N) space allocation
func Merkleize(hasher Hasher, count uint64, limit uint64, leaf func(i uint64) []byte) (out [32]byte) {
func Merkleize(hasher Hasher, count, limit uint64, leaf func(i uint64) []byte) (out [32]byte) {
if count > limit {
panic("merkleizing list that is too large, over limit")
}
@ -134,7 +134,7 @@ func Merkleize(hasher Hasher, count uint64, limit uint64, leaf func(i uint64) []
// ConstructProof builds a merkle-branch of the given depth, at the given index (at that depth),
// for a list of leafs of a balanced binary tree.
func ConstructProof(hasher Hasher, count uint64, limit uint64, leaf func(i uint64) []byte, index uint64) (branch [][32]byte) {
func ConstructProof(hasher Hasher, count, limit uint64, leaf func(i uint64) []byte, index uint64) (branch [][32]byte) {
if count > limit {
panic("merkleizing list that is too large, over limit")
}

View File

@ -216,7 +216,7 @@ func DecryptKey(keyJSON []byte, password string) (*Key, error) {
}, nil
}
func decryptKeyJSON(keyProtected *encryptedKeyJSON, auth string) (keyBytes []byte, keyID []byte, err error) {
func decryptKeyJSON(keyProtected *encryptedKeyJSON, auth string) (keyBytes, keyID []byte, err error) {
keyID = uuid.Parse(keyProtected.ID)
if keyProtected.Crypto.Cipher != "aes-128-ctr" {
return nil, nil, fmt.Errorf("cipher not supported: %v", keyProtected.Crypto.Cipher)

View File

@ -74,7 +74,7 @@ func ClosestPowerOf2(n uint64) uint64 {
// in the standard math library because that max function
// has to check for some special floating point cases
// making it slower by a magnitude of 10.
func Max(a uint64, b uint64) uint64 {
func Max(a, b uint64) uint64 {
if a > b {
return a
}
@ -86,7 +86,7 @@ func Max(a uint64, b uint64) uint64 {
// in the standard math library because that min function
// has to check for some special floating point cases
// making it slower by a magnitude of 10.
func Min(a uint64, b uint64) uint64 {
func Min(a, b uint64) uint64 {
if a < b {
return a
}

View File

@ -11,7 +11,7 @@ import (
// StartAndEndPage takes in the requested page token, wanted page size, total page size.
// It returns start, end page and the next page token.
func StartAndEndPage(pageToken string, pageSize int, totalSize int) (int, int, string, error) {
func StartAndEndPage(pageToken string, pageSize, totalSize int) (int, int, string, error) {
if pageToken == "" {
pageToken = "0"
}

View File

@ -50,7 +50,7 @@ func ValidatePrompt(r io.Reader, promptText string, validateFunc func(string) er
}
// DefaultPrompt prompts the user for any text and performs no validation. If nothing is entered it returns the default.
func DefaultPrompt(promptText string, defaultValue string) (string, error) {
func DefaultPrompt(promptText, defaultValue string) (string, error) {
var response string
if defaultValue != "" {
fmt.Printf("%s %s:\n", promptText, fmt.Sprintf("(%s: %s)", au.BrightGreen("default"), defaultValue))
@ -71,7 +71,7 @@ func DefaultPrompt(promptText string, defaultValue string) (string, error) {
// DefaultAndValidatePrompt prompts the user for any text and expects it to fulfill a validation function. If nothing is entered
// the default value is returned.
func DefaultAndValidatePrompt(promptText string, defaultValue string, validateFunc func(string) error) (string, error) {
func DefaultAndValidatePrompt(promptText, defaultValue string, validateFunc func(string) error) (string, error) {
var responseValid bool
var response string
for !responseValid {
@ -122,8 +122,7 @@ func PasswordPrompt(promptText string, validateFunc func(string) error) (string,
func InputPassword(
cliCtx *cli.Context,
passwordFileFlag *cli.StringFlag,
promptText string,
confirmText string,
promptText, confirmText string,
shouldConfirmPassword bool,
passwordValidator func(input string) error,
) (string, error) {

View File

@ -115,7 +115,7 @@ func ValidatePasswordInput(input string) error {
}
// ValidatePhrase checks whether the user input is equal to the wanted phrase. The verification is case sensitive.
func ValidatePhrase(input string, wantedPhrase string) error {
func ValidatePhrase(input, wantedPhrase string) error {
if strings.TrimSpace(input) != wantedPhrase {
return errIncorrectPhrase
}

View File

@ -7,7 +7,7 @@ import (
// SubsetUint64 returns true if the first array is
// completely contained in the second array with time
// complexity of approximately o(n).
func SubsetUint64(a []uint64, b []uint64) bool {
func SubsetUint64(a, b []uint64) bool {
if len(a) > len(b) {
return false
}
@ -120,7 +120,7 @@ func IsUint64Sorted(a []uint64) bool {
// not in slice a with time complexity of approximately
// O(n) leveraging a map to check for element existence
// off by a constant factor of underlying map efficiency.
func NotUint64(a []uint64, b []uint64) []uint64 {
func NotUint64(a, b []uint64) []uint64 {
set := make([]uint64, 0)
m := make(map[uint64]bool)
@ -206,7 +206,7 @@ func UnionInt64(s ...[]int64) []int64 {
// not in slice b with time complexity of approximately
// O(n) leveraging a map to check for element existence
// off by a constant factor of underlying map efficiency.
func NotInt64(a []int64, b []int64) []int64 {
func NotInt64(a, b []int64) []int64 {
set := make([]int64, 0)
m := make(map[int64]bool)
@ -299,6 +299,6 @@ func SplitCommaSeparated(arr []string) []string {
// split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)]
// """
// return (list_size * index) // chunks
func SplitOffset(listSize uint64, chunks uint64, index uint64) uint64 {
func SplitOffset(listSize, chunks, index uint64) uint64 {
return (listSize * index) / chunks
}

View File

@ -71,8 +71,7 @@ func GetSlotTickerWithOffset(genesisTime time.Time, offset time.Duration, second
func (s *SlotTicker) start(
genesisTime time.Time,
secondsPerSlot uint64,
since func(time.Time) time.Duration,
until func(time.Time) time.Duration,
since, until func(time.Time) time.Duration,
after func(time.Duration) <-chan time.Time) {
d := time.Duration(secondsPerSlot) * time.Second

View File

@ -9,7 +9,7 @@ import (
// SlotStartTime returns the start time in terms of its unix epoch
// value.
func SlotStartTime(genesis uint64, slot uint64) time.Time {
func SlotStartTime(genesis, slot uint64) time.Time {
duration := time.Second * time.Duration(slot*params.BeaconConfig().SecondsPerSlot)
startTime := time.Unix(int64(genesis), 0).Add(duration)
return startTime

View File

@ -354,7 +354,7 @@ func generateAttesterSlashings(
// for the same data with their aggregation bits split uniformly.
//
// If you request 4 attestations, but there are 8 committees, you will get 4 fully aggregated attestations.
func GenerateAttestations(bState *stateTrie.BeaconState, privs []bls.SecretKey, numToGen uint64, slot uint64, randomRoot bool) ([]*ethpb.Attestation, error) {
func GenerateAttestations(bState *stateTrie.BeaconState, privs []bls.SecretKey, numToGen, slot uint64, randomRoot bool) ([]*ethpb.Attestation, error) {
currentEpoch := helpers.SlotToEpoch(slot)
var attestations []*ethpb.Attestation
generateHeadState := false

View File

@ -42,7 +42,7 @@ func UnmarshalYaml(y []byte, dest interface{}) error {
// TestFolders sets the proper config and returns the result of ReadDir
// on the passed in eth2-spec-tests directory along with its path.
func TestFolders(t testing.TB, config string, folderPath string) ([]os.FileInfo, string) {
func TestFolders(t testing.TB, config, folderPath string) ([]os.FileInfo, string) {
testsFolderPath := path.Join("tests", config, "phase0", folderPath)
filepath, err := bazel.Runfile(testsFolderPath)
require.NoError(t, err)

View File

@ -120,7 +120,7 @@ func GeneralizedIndexLength(index int) int {
// Return the given bit of a generalized index.
// """
// return (index & (1 << position)) > 0
func GeneralizedIndexBit(index uint64, pos uint64) bool {
func GeneralizedIndexBit(index, pos uint64) bool {
return (index & (1 << pos)) > 0
}

View File

@ -182,7 +182,7 @@ func (m *SparseMerkleTrie) ToProto() *protodb.SparseMerkleTrie {
}
// VerifyMerkleBranch verifies a Merkle branch against a root of a trie.
func VerifyMerkleBranch(root []byte, item []byte, merkleIndex int, proof [][]byte, depth uint64) bool {
func VerifyMerkleBranch(root, item []byte, merkleIndex int, proof [][]byte, depth uint64) bool {
if len(proof) != int(depth)+1 {
return false
}

View File

@ -28,7 +28,7 @@ func unmarshalBlockHeader(ctx context.Context, enc []byte) (*ethpb.SignedBeaconB
// BlockHeaders accepts an slot and validator id and returns the corresponding block header array.
// Returns nil if the block header for those values does not exist.
func (db *Store) BlockHeaders(ctx context.Context, slot uint64, validatorID uint64) ([]*ethpb.SignedBeaconBlockHeader, error) {
func (db *Store) BlockHeaders(ctx context.Context, slot, validatorID uint64) ([]*ethpb.SignedBeaconBlockHeader, error) {
ctx, span := trace.StartSpan(ctx, "slasherDB.BlockHeaders")
defer span.End()
var blockHeaders []*ethpb.SignedBeaconBlockHeader
@ -48,7 +48,7 @@ func (db *Store) BlockHeaders(ctx context.Context, slot uint64, validatorID uint
}
// HasBlockHeader accepts a slot and validator id and returns true if the block header exists.
func (db *Store) HasBlockHeader(ctx context.Context, slot uint64, validatorID uint64) bool {
func (db *Store) HasBlockHeader(ctx context.Context, slot, validatorID uint64) bool {
ctx, span := trace.StartSpan(ctx, "slasherDB.HasBlockHeader")
defer span.End()
prefix := encodeSlotValidatorID(slot, validatorID)
@ -113,7 +113,7 @@ func (db *Store) DeleteBlockHeader(ctx context.Context, blockHeader *ethpb.Signe
}
// PruneBlockHistory leaves only records younger then history size.
func (db *Store) PruneBlockHistory(ctx context.Context, currentEpoch uint64, pruningEpochAge uint64) error {
func (db *Store) PruneBlockHistory(ctx context.Context, currentEpoch, pruningEpochAge uint64) error {
ctx, span := trace.StartSpan(ctx, "slasherDB.pruneBlockHistory")
defer span.End()
pruneTill := int64(currentEpoch) - int64(pruningEpochAge)

View File

@ -162,7 +162,7 @@ func (db *Store) DeleteIndexedAttestation(ctx context.Context, idxAttestation *e
}
// PruneAttHistory removes all attestations from the DB older than the pruning epoch age.
func (db *Store) PruneAttHistory(ctx context.Context, currentEpoch uint64, pruningEpochAge uint64) error {
func (db *Store) PruneAttHistory(ctx context.Context, currentEpoch, pruningEpochAge uint64) error {
ctx, span := trace.StartSpan(ctx, "slasherDB.pruneAttHistory")
defer span.End()
pruneFromEpoch := int64(currentEpoch) - int64(pruningEpochAge)

View File

@ -27,11 +27,11 @@ var (
validatorsMinMaxSpanBucketNew = []byte("validators-min-max-span-bucket-new")
)
func encodeSlotValidatorID(slot uint64, validatorID uint64) []byte {
func encodeSlotValidatorID(slot, validatorID uint64) []byte {
return append(bytesutil.Bytes8(slot), bytesutil.Bytes8(validatorID)...)
}
func encodeSlotValidatorIDSig(slot uint64, validatorID uint64, sig []byte) []byte {
func encodeSlotValidatorIDSig(slot, validatorID uint64, sig []byte) []byte {
return append(append(bytesutil.Bytes8(slot), bytesutil.Bytes8(validatorID)...), sig...)
}

View File

@ -118,7 +118,7 @@ func (db *Store) EpochSpansMap(ctx context.Context, epoch uint64) (map[uint64]ty
// it reads the epoch spans from cache and gets the requested value from there if it exists
// when caching is enabled.
// Returns error if the spans for this validator index and epoch does not exist.
func (db *Store) EpochSpanByValidatorIndex(ctx context.Context, validatorIdx uint64, epoch uint64) (types.Span, error) {
func (db *Store) EpochSpanByValidatorIndex(ctx context.Context, validatorIdx, epoch uint64) (types.Span, error) {
ctx, span := trace.StartSpan(ctx, "slasherDB.EpochSpanByValidatorIndex")
defer span.End()
if db.spanCacheEnabled {
@ -224,12 +224,7 @@ func (db *Store) SaveEpochsSpanByValidatorsIndices(ctx context.Context, epochsSp
// it reads the epoch spans from cache, updates it and save it back to cache
// if caching is enabled.
// Returns error if the spans for this validator index and epoch does not exist.
func (db *Store) SaveValidatorEpochSpan(
ctx context.Context,
validatorIdx uint64,
epoch uint64,
span types.Span,
) error {
func (db *Store) SaveValidatorEpochSpan(ctx context.Context, validatorIdx, epoch uint64, span types.Span) error {
ctx, traceSpan := trace.StartSpan(ctx, "slasherDB.SaveValidatorEpochSpan")
defer traceSpan.End()
if db.spanCacheEnabled {
@ -329,7 +324,7 @@ func (db *Store) DeleteEpochSpans(ctx context.Context, epoch uint64) error {
// DeleteValidatorSpanByEpoch deletes a validator span for a certain epoch
// deletes spans from cache if caching is enabled.
// using a validator index as bucket key.
func (db *Store) DeleteValidatorSpanByEpoch(ctx context.Context, validatorIdx uint64, epoch uint64) error {
func (db *Store) DeleteValidatorSpanByEpoch(ctx context.Context, validatorIdx, epoch uint64) error {
ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteValidatorSpanByEpoch")
defer span.End()
if db.spanCacheEnabled {

View File

@ -15,7 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/slasher/detection/attestations/types"
)
func indexedAttestation(source uint64, target uint64, indices []uint64) *ethpb.IndexedAttestation {
func indexedAttestation(source, target uint64, indices []uint64) *ethpb.IndexedAttestation {
return &ethpb.IndexedAttestation{
AttestingIndices: indices,
Data: &ethpb.AttestationData{

View File

@ -199,20 +199,17 @@ func resultHash(result *types.DetectionResult) [32]byte {
return hashutil.Hash(resultBytes)
}
func isDoublePropose(
incomingBlockHeader *ethpb.SignedBeaconBlockHeader,
prevBlockHeader *ethpb.SignedBeaconBlockHeader,
) bool {
func isDoublePropose(incomingBlockHeader, prevBlockHeader *ethpb.SignedBeaconBlockHeader) bool {
return incomingBlockHeader.Header.ProposerIndex == prevBlockHeader.Header.ProposerIndex &&
!bytes.Equal(incomingBlockHeader.Signature, prevBlockHeader.Signature) &&
incomingBlockHeader.Header.Slot == prevBlockHeader.Header.Slot
}
func isDoubleVote(incomingAtt *ethpb.IndexedAttestation, prevAtt *ethpb.IndexedAttestation) bool {
func isDoubleVote(incomingAtt, prevAtt *ethpb.IndexedAttestation) bool {
return !attestationutil.AttDataIsEqual(incomingAtt.Data, prevAtt.Data) && incomingAtt.Data.Target.Epoch == prevAtt.Data.Target.Epoch
}
func isSurrounding(incomingAtt *ethpb.IndexedAttestation, prevAtt *ethpb.IndexedAttestation) bool {
func isSurrounding(incomingAtt, prevAtt *ethpb.IndexedAttestation) bool {
return incomingAtt.Data.Source.Epoch < prevAtt.Data.Source.Epoch &&
incomingAtt.Data.Target.Epoch > prevAtt.Data.Target.Epoch
}

View File

@ -9,7 +9,7 @@ import (
// SignedBlockHeader given slot, proposer index this function generates signed block header.
// with random bytes as its signature.
func SignedBlockHeader(slot uint64, proposerIdx uint64) (*ethpb.SignedBeaconBlockHeader, error) {
func SignedBlockHeader(slot, proposerIdx uint64) (*ethpb.SignedBeaconBlockHeader, error) {
sig, err := genRandomByteArray(96)
if err != nil {
return nil, err
@ -28,7 +28,7 @@ func SignedBlockHeader(slot uint64, proposerIdx uint64) (*ethpb.SignedBeaconBloc
}
// BlockHeader given slot, proposer index this function generates block header.
func BlockHeader(slot uint64, proposerIdx uint64) (*ethpb.BeaconBlockHeader, error) {
func BlockHeader(slot, proposerIdx uint64) (*ethpb.BeaconBlockHeader, error) {
root := [32]byte{1, 2, 3}
return &ethpb.BeaconBlockHeader{
ProposerIndex: proposerIdx,

View File

@ -39,14 +39,7 @@ type server struct {
clientLock sync.Mutex
}
func newServer(
db *db,
rpcAddr string,
depositContractAddr string,
funderPK string,
validatorDepositAmount string,
beaconRPCAddr string,
) *server {
func newServer(db *db, rpcAddr, depositContractAddr, funderPK, validatorDepositAmount, beaconRPCAddr string) *server {
rpcClient, err := rpc.Dial(rpcAddr)
if err != nil {
panic(err)
@ -81,7 +74,7 @@ func newServer(
}
}
func (s *server) makeDeposit(pubkey []byte, withdrawalCredentials []byte, signature []byte, depositRoot [32]byte) (*types.Transaction, error) {
func (s *server) makeDeposit(pubkey, withdrawalCredentials, signature []byte, depositRoot [32]byte) (*types.Transaction, error) {
txOps := bind.NewKeyedTransactor(s.txPk)
txOps.Value = s.depositAmount
txOps.GasLimit = gasLimit

View File

@ -51,7 +51,7 @@ func init() {
func newFaucetServer(
r recaptcha.Recaptcha,
rpcPath string,
rpcPath,
funderPrivateKey string,
minScore float64,
) *faucetServer {

View File

@ -224,7 +224,7 @@ func encrypt(cliCtx *cli.Context) error {
// Reads the keystore file at the provided path and attempts
// to decrypt it with the specified passwords.
func readAndDecryptKeystore(fullPath string, password string) error {
func readAndDecryptKeystore(fullPath, password string) error {
file, err := ioutil.ReadFile(fullPath)
if err != nil {
return errors.Wrapf(err, "could not read file at path: %s", fullPath)

View File

@ -32,7 +32,7 @@ var errFailedToCloseManyDb = errors.New("failed to close one or more databases")
// DecryptKeysFromKeystore extracts a set of validator private keys from
// an encrypted keystore directory and a password string.
func DecryptKeysFromKeystore(directory string, filePrefix string, password string) (map[string]*keystore.Key, error) {
func DecryptKeysFromKeystore(directory, filePrefix, password string) (map[string]*keystore.Key, error) {
ks := keystore.New(directory)
validatorKeys, err := ks.GetKeys(directory, filePrefix, password, true)
if err != nil {
@ -43,7 +43,7 @@ func DecryptKeysFromKeystore(directory string, filePrefix string, password strin
// VerifyAccountNotExists checks if a validator has not yet created an account
// and keystore in the provided directory string.
func VerifyAccountNotExists(directory string, password string) error {
func VerifyAccountNotExists(directory, password string) error {
if directory == "" || password == "" {
return errors.New("expected a path to the validator keystore and password to be provided, received nil")
}
@ -65,7 +65,7 @@ func VerifyAccountNotExists(directory string, password string) error {
// parameters needed to deposit into the deposit contract on the ETH1.0 chain. Specifically, this
// generates a BLS private and public key, and then logs the serialized deposit input hex string
// to be used in an ETH1.0 transaction by the validator.
func NewValidatorAccount(directory string, password string) error {
func NewValidatorAccount(directory, password string) error {
if password == "" {
return errors.New("empty passphrase is not allowed")
}
@ -158,7 +158,7 @@ func Exists(keystorePath string, assertNonEmpty bool) (bool, error) {
}
// CreateValidatorAccount creates a validator account from the given cli context.
func CreateValidatorAccount(path string, passphrase string) (string, string, error) {
func CreateValidatorAccount(path, passphrase string) (string, string, error) {
// Forces user to create directory if using non-default path.
if path != DefaultValidatorDir() {
exists, err := Exists(path, false /* assertNonEmpty */)
@ -176,7 +176,7 @@ func CreateValidatorAccount(path string, passphrase string) (string, string, err
}
// PrintPublicAndPrivateKeys uses the passed in path and prints out the public and private keys in that directory.
func PrintPublicAndPrivateKeys(path string, passphrase string) error {
func PrintPublicAndPrivateKeys(path, passphrase string) error {
keystores, err := DecryptKeysFromKeystore(path, params.BeaconConfig().ValidatorPrivkeyFileName, passphrase)
if err != nil {
return errors.Wrapf(err, "failed to decrypt keystore keys at path %s", path)
@ -276,7 +276,7 @@ func Merge(ctx context.Context, sourceDirectories []string, targetDirectory stri
// Split splits data from one validator database in sourceDirectory into several validator databases.
// Each validator database is created in its own subdirectory inside targetDirectory.
func Split(ctx context.Context, sourceDirectory string, targetDirectory string) (err error) {
func Split(ctx context.Context, sourceDirectory, targetDirectory string) (err error) {
var sourceStore *kv.Store
sourceStore, err = kv.GetKVStore(sourceDirectory)
if err != nil {
@ -302,7 +302,7 @@ func Split(ctx context.Context, sourceDirectory string, targetDirectory string)
// ChangePassword changes the password for all keys located in a keystore.
// Password is changed only for keys that can be decrypted using the old password.
func ChangePassword(keystorePath string, oldPassword string, newPassword string) error {
func ChangePassword(keystorePath, oldPassword, newPassword string) error {
err := changePasswordForKeyType(
keystorePath,
params.BeaconConfig().ValidatorPrivkeyFileName,
@ -319,7 +319,7 @@ func ChangePassword(keystorePath string, oldPassword string, newPassword string)
newPassword)
}
func changePasswordForKeyType(keystorePath string, filePrefix string, oldPassword string, newPassword string) error {
func changePasswordForKeyType(keystorePath, filePrefix, oldPassword, newPassword string) error {
keys, err := DecryptKeysFromKeystore(keystorePath, filePrefix, oldPassword)
if err != nil {
return errors.Wrap(err, "failed to decrypt keys")
@ -337,7 +337,7 @@ func changePasswordForKeyType(keystorePath string, filePrefix string, oldPasswor
}
// ExtractPublicKeysFromKeyStore extracts only the public keys from the decrypted keys from the keystore.
func ExtractPublicKeysFromKeyStore(keystorePath string, passphrase string) ([][]byte, error) {
func ExtractPublicKeysFromKeyStore(keystorePath, passphrase string) ([][]byte, error) {
decryptedKeys, err := DecryptKeysFromKeystore(keystorePath, params.BeaconConfig().ValidatorPrivkeyFileName, passphrase)
if err != nil {
return nil, errors.Wrapf(err, "could not decrypt keys from keystore in path %s", keystorePath)

View File

@ -280,8 +280,7 @@ func prepareSourcesForMerging(firstStorePubKey [48]byte, firstStore *kv.Store, s
func assertMergedStore(
t *testing.T,
mergedStore *kv.Store,
firstStorePubKey [48]byte,
secondStorePubKey [48]byte,
firstStorePubKey, secondStorePubKey [48]byte,
history *sourceStoresHistory) {
mergedProposalHistory1, err := mergedStore.ProposalHistoryForEpoch(

View File

@ -69,7 +69,7 @@ func (v *validator) postAttSignUpdate(ctx context.Context, indexedAtt *ethpb.Ind
// isNewAttSlashable uses the attestation history to determine if an attestation of sourceEpoch
// and targetEpoch would be slashable. It can detect double, surrounding, and surrounded votes.
func isNewAttSlashable(history *slashpb.AttestationHistory, sourceEpoch uint64, targetEpoch uint64) bool {
func isNewAttSlashable(history *slashpb.AttestationHistory, sourceEpoch, targetEpoch uint64) bool {
if history == nil {
return false
}
@ -109,7 +109,7 @@ func isNewAttSlashable(history *slashpb.AttestationHistory, sourceEpoch uint64,
// markAttestationForTargetEpoch returns the modified attestation history with the passed-in epochs marked
// as attested for. This is done to prevent the validator client from signing any slashable attestations.
func markAttestationForTargetEpoch(history *slashpb.AttestationHistory, sourceEpoch uint64, targetEpoch uint64) *slashpb.AttestationHistory {
func markAttestationForTargetEpoch(history *slashpb.AttestationHistory, sourceEpoch, targetEpoch uint64) *slashpb.AttestationHistory {
if history == nil {
return nil
}

View File

@ -679,7 +679,7 @@ func (v *validator) logDuties(slot uint64, duties []*ethpb.DutiesResponse_Duty)
// This constructs a validator subscribed key, it's used to track
// which subnet has already been pending requested.
func validatorSubscribeKey(slot uint64, committeeID uint64) [64]byte {
func validatorSubscribeKey(slot, committeeID uint64) [64]byte {
return bytesutil.ToBytes64(append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...))
}

View File

@ -160,7 +160,7 @@ func (km *Remote) Sign(_ context.Context, _ [48]byte, _ [32]byte) (bls.Signature
}
// SignGeneric signs a generic message for the validator to broadcast.
func (km *Remote) SignGeneric(ctx context.Context, pubKey [48]byte, root [32]byte, domain [32]byte) (bls.Signature, error) {
func (km *Remote) SignGeneric(ctx context.Context, pubKey [48]byte, root, domain [32]byte) (bls.Signature, error) {
accountInfo, exists := km.accounts[pubKey]
if !exists {
return nil, ErrNoSuchKey

View File

@ -516,7 +516,7 @@ func initializeWalletSeedFile(password string, skipMnemonicConfirm bool) (*SeedC
// Uses the provided mnemonic seed phrase to generate the
// appropriate seed file for recovering a derived wallets.
func seedFileFromMnemonic(mnemonic string, password string) (*SeedConfig, error) {
func seedFileFromMnemonic(mnemonic, password string) (*SeedConfig, error) {
if ok := bip39.IsMnemonicValid(mnemonic); !ok {
return nil, bip39.ErrInvalidMnemonic
}