mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-22 03:30:35 +00:00
Various code inspection resolutions (#7438)
* remove unused code * remove defer use in loop * Remove unused methods and constants * gofmt and gaz * nilness check * remove unused args * Add TODO for refactoring subscribeWithBase to remove unused arg. It seems too involved to include in this sweeping PR. https://github.com/prysmaticlabs/prysm/issues/7437 * replace empty slice declaration * Remove unnecessary type conversions * remove redundant type declaration * rename receivers to be consistent * Remove bootnode query tool. It is now obsolete by discv5 * Remove relay node. It is no longer used or supported * Revert "Remove relay node. It is no longer used or supported" This reverts commit 4bd7717334dad85ef4766ed9bc4da711fb5fa810. * Delete unused test directory * Delete unsupported gcp startup script * Delete old k8s script * build fixes * fix build * go mod tidy * revert slasher/db/kv/block_header.go * fix build * remove redundant nil check * combine func args Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
This commit is contained in:
parent
0214553415
commit
7cc32c4dda
@ -1,17 +1,13 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -100,13 +96,6 @@ var (
|
||||
Name: "beacon_reorg_total",
|
||||
Help: "Count the number of times beacon chain has a reorg",
|
||||
})
|
||||
sentBlockPropagationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "block_sent_latency_milliseconds",
|
||||
Help: "Captures blocks broadcast time. Blocks sent in milliseconds distribution",
|
||||
Buckets: []float64{1000, 2000, 3000, 4000, 5000, 6000},
|
||||
},
|
||||
)
|
||||
attestationInclusionDelay = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "attestation_inclusion_delay_slots",
|
||||
@ -233,18 +222,6 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
}
|
||||
}
|
||||
|
||||
// This captures metrics for block sent time by subtracts slot start time.
|
||||
func captureSentTimeMetric(genesisTime uint64, currentSlot uint64) error {
|
||||
startTime, err := helpers.SlotToTime(genesisTime, currentSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diffMs := timeutils.Now().Sub(startTime) / time.Millisecond
|
||||
sentBlockPropagationHistogram.Observe(float64(diffMs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func reportAttestationInclusion(blk *ethpb.BeaconBlock) {
|
||||
for _, att := range blk.Body.Attestations {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot - att.Data.Slot))
|
||||
|
@ -99,7 +99,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui
|
||||
return nil, err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := c.PubKeys[indices[i]]
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
|
||||
|
@ -126,7 +126,7 @@ func (s *Service) getAttCheckPtInfo(ctx context.Context, c *ethpb.Checkpoint, e
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Service) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
|
@ -146,8 +146,8 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
blks := []*ethpb.SignedBeaconBlock{}
|
||||
blkRoots := [][32]byte{}
|
||||
var blks []*ethpb.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState *stateTrie.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), uint64(i))
|
||||
|
@ -141,7 +141,7 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
@ -168,7 +168,7 @@ func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethp
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, roots [][32]byte) error {
|
||||
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
@ -197,7 +197,7 @@ func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.Sig
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
@ -232,7 +232,7 @@ func (ms *ChainService) HeadSlot() uint64 {
|
||||
}
|
||||
|
||||
// HeadRoot mocks HeadRoot method in chain service.
|
||||
func (ms *ChainService) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
func (ms *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
|
||||
if len(ms.Root) > 0 {
|
||||
return ms.Root, nil
|
||||
}
|
||||
@ -270,7 +270,7 @@ func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
|
||||
func (ms *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -280,12 +280,12 @@ func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attes
|
||||
}
|
||||
|
||||
// AttestationPreState mocks AttestationPreState method in chain service.
|
||||
func (ms *ChainService) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
func (ms *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
func (ms *ChainService) HeadValidatorsIndices(_ context.Context, epoch uint64) ([]uint64, error) {
|
||||
if ms.State == nil {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
@ -293,7 +293,7 @@ func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64)
|
||||
}
|
||||
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
|
||||
func (ms *ChainService) HeadSeed(_ context.Context, epoch uint64) ([32]byte, error) {
|
||||
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
@ -323,18 +323,18 @@ func (ms *ChainService) CurrentSlot() uint64 {
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
|
||||
func (ms *ChainService) Participation(_ uint64) *precompute.Balance {
|
||||
return ms.Balance
|
||||
}
|
||||
|
||||
// IsValidAttestation always returns true.
|
||||
func (ms *ChainService) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
|
||||
func (ms *ChainService) IsValidAttestation(_ context.Context, _ *ethpb.Attestation) bool {
|
||||
return ms.ValidAttestation
|
||||
}
|
||||
|
||||
// IsCanonical returns and determines whether a block with the provided root is part of
|
||||
// the canonical chain.
|
||||
func (ms *ChainService) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
func (ms *ChainService) IsCanonical(_ context.Context, _ [32]byte) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -342,7 +342,7 @@ func (ms *ChainService) IsCanonical(ctx context.Context, blockRoot [32]byte) (bo
|
||||
func (ms *ChainService) ClearCachedStates() {}
|
||||
|
||||
// HasInitSyncBlock mocks the same method in the chain service.
|
||||
func (ms *ChainService) HasInitSyncBlock(root [32]byte) bool {
|
||||
func (ms *ChainService) HasInitSyncBlock(_ [32]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -352,17 +352,17 @@ func (ms *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
}
|
||||
|
||||
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
func (ms *ChainService) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
func (ms *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return ms.VerifyBlkDescendantErr
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (ms *ChainService) VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error {
|
||||
func (ms *ChainService) VerifyLmdFfgConsistency(_ context.Context, _ *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationCheckPtInfo mocks AttestationCheckPtInfo and always returns nil.
|
||||
func (ms *ChainService) AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
|
||||
func (ms *ChainService) AttestationCheckPtInfo(_ context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
|
||||
f := ms.State.Fork()
|
||||
g := bytesutil.ToBytes32(ms.State.GenesisValidatorRoot())
|
||||
seed, err := helpers.Seed(ms.State, helpers.SlotToEpoch(att.Data.Slot), params.BeaconConfig().DomainBeaconAttester)
|
||||
|
2
beacon-chain/cache/attestation_data.go
vendored
2
beacon-chain/cache/attestation_data.go
vendored
@ -138,7 +138,7 @@ func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest)
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
func (c *AttestationCache) Put(_ context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
data := &attestationReqResWrapper{
|
||||
req,
|
||||
res,
|
||||
|
5
beacon-chain/cache/checkpoint_state.go
vendored
5
beacon-chain/cache/checkpoint_state.go
vendored
@ -1,7 +1,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
@ -13,10 +12,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotCheckpointState will be returned when a cache object is not a pointer to
|
||||
// a CheckpointState struct.
|
||||
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
|
||||
|
||||
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
|
||||
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
|
||||
// window to accept attestation based on latest spec.
|
||||
|
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@ -24,6 +24,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
func popProcessNoopFunc(_ interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
2
beacon-chain/cache/skip_slot_cache.go
vendored
2
beacon-chain/cache/skip_slot_cache.go
vendored
@ -136,7 +136,7 @@ func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(ctx context.Context, slot uint64, state *stateTrie.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, slot uint64, state *stateTrie.BeaconState) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
|
2
beacon-chain/cache/subnet_ids.go
vendored
2
beacon-chain/cache/subnet_ids.go
vendored
@ -113,7 +113,7 @@ func (c *subnetIDs) GetAllSubnets() []uint64 {
|
||||
defer c.subnetsLock.RUnlock()
|
||||
|
||||
itemsMap := c.persistentSubnets.Items()
|
||||
committees := []uint64{}
|
||||
var committees []uint64
|
||||
|
||||
for _, v := range itemsMap {
|
||||
if v.Expired() {
|
||||
|
@ -314,7 +314,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.Beacon
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i])
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
|
||||
@ -365,7 +365,7 @@ func VerifyAttSigUseCheckPt(ctx context.Context, c *pb.CheckPtInfo, att *ethpb.A
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := c.PubKeys[indices[i]]
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
|
||||
|
@ -639,7 +639,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111111}
|
||||
atts := []*ethpb.Attestation{}
|
||||
var atts []*ethpb.Attestation
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
|
@ -57,7 +57,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{}
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := uint64(0)
|
||||
|
||||
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
@ -77,7 +77,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
|
||||
registry := []*ethpb.Validator{}
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := uint64(0)
|
||||
|
||||
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
|
@ -46,13 +46,13 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ba := []byte{}
|
||||
var ba []byte
|
||||
pubkey := [48]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
p := []byte{}
|
||||
s := []byte{}
|
||||
d := []byte{}
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
@ -98,7 +98,7 @@ func TestFuzzareEth1DataEqual_10000(t *testing.T) {
|
||||
func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
eth1data := ð.Eth1Data{}
|
||||
stateVotes := []*eth.Eth1Data{}
|
||||
var stateVotes []*eth.Eth1Data
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
|
@ -50,7 +50,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
aggSigs := []bls.Signature{}
|
||||
var aggSigs []bls.Signature
|
||||
for _, index := range setA {
|
||||
sig := privKeys[index].Sign(signingRoot[:])
|
||||
aggSigs = append(aggSigs, sig)
|
||||
|
@ -158,8 +158,7 @@ func ProcessDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositSig := deposit.Data.Signature
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil {
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil {
|
||||
// Ignore this error as in the spec pseudo code.
|
||||
log.Debugf("Skipping deposit: could not verify deposit data signature: %v", err)
|
||||
return beaconState, nil
|
||||
@ -223,7 +222,7 @@ func verifyDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit) e
|
||||
}
|
||||
|
||||
// Deprecated: This method uses deprecated ssz.SigningRoot.
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain []byte) error {
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error {
|
||||
return depositutil.VerifyDepositSignature(obj, domain)
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
// state.eth1_data_votes.append(body.eth1_data)
|
||||
// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
|
||||
// state.latest_eth1_data = body.eth1_data
|
||||
func ProcessEth1DataInBlock(ctx context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
func ProcessEth1DataInBlock(_ context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
block := b.Block
|
||||
if beaconState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
|
@ -37,7 +37,7 @@ import (
|
||||
// # Initiate exit
|
||||
// initiate_validator_exit(state, exit.validator_index)
|
||||
func ProcessVoluntaryExits(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
@ -37,7 +37,7 @@ import (
|
||||
// # Verify proposer signature
|
||||
// assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER))
|
||||
func ProcessBlockHeader(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
block *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
@ -36,7 +36,7 @@ import (
|
||||
//
|
||||
// slash_validator(state, proposer_slashing.proposer_index)
|
||||
func ProcessProposerSlashings(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
|
||||
// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
|
||||
func ProcessRandao(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
@ -122,7 +122,7 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
base, err := epoch.BaseReward(state, i)
|
||||
assert.NoError(t, err, "Could not get base reward")
|
||||
assert.Equal(t, uint64(0), rewards[i], "Unexpected slashed indices reward balance")
|
||||
assert.Equal(t, uint64(3*base), penalties[i], "Unexpected slashed indices penalty balance")
|
||||
assert.Equal(t, 3*base, penalties[i], "Unexpected slashed indices penalty balance")
|
||||
}
|
||||
|
||||
nonAttestedIndices := []uint64{434, 677, 872, 791}
|
||||
|
@ -60,7 +60,7 @@ func TestSplitIndices_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestShuffleList_Vs_ShuffleIndex(t *testing.T) {
|
||||
list := []uint64{}
|
||||
var list []uint64
|
||||
listSize := uint64(1000)
|
||||
seed := [32]byte{123, 42}
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
@ -124,7 +124,7 @@ func BenchmarkShuffleList(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestShuffledIndex(t *testing.T) {
|
||||
list := []uint64{}
|
||||
var list []uint64
|
||||
listSize := uint64(399)
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
list = append(list, i)
|
||||
|
@ -198,7 +198,7 @@ func ComputeDomain(domainType [DomainByteLength]byte, forkVersion []byte, genesi
|
||||
|
||||
// This returns the bls domain given by the domain type and fork data root.
|
||||
func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte {
|
||||
b := []byte{}
|
||||
var b []byte
|
||||
b = append(b, domainType[:4]...)
|
||||
b = append(b, forkDataRoot[:28]...)
|
||||
return b
|
||||
|
@ -63,9 +63,9 @@ func TestFuzzverifySigningRoot_10000(t *testing.T) {
|
||||
pubkey := [48]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
p := []byte{}
|
||||
s := []byte{}
|
||||
d := []byte{}
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
|
@ -64,7 +64,7 @@ func GenesisBeaconState(deposits []*ethpb.Deposit, genesisTime uint64, eth1Data
|
||||
return nil, err
|
||||
}
|
||||
// Process initial deposits.
|
||||
leaves := [][]byte{}
|
||||
var leaves [][]byte
|
||||
for _, deposit := range deposits {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, fmt.Errorf("nil deposit or deposit with nil data cannot be processed: %v", deposit)
|
||||
|
@ -599,7 +599,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
}
|
||||
|
||||
// VerifyOperationLengths verifies that block operation lengths are valid.
|
||||
func VerifyOperationLengths(ctx context.Context, state *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
func VerifyOperationLengths(_ context.Context, state *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func InitiateValidatorExit(state *stateTrie.BeaconState, idx uint64) (*stateTrie
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return state, nil
|
||||
}
|
||||
exitEpochs := []uint64{}
|
||||
var exitEpochs []uint64
|
||||
for _, val := range readOnlyVals {
|
||||
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch())
|
||||
|
@ -9,11 +9,11 @@ import (
|
||||
)
|
||||
|
||||
// LastArchivedSlot from the db.
|
||||
func (kv *Store) LastArchivedSlot(ctx context.Context) (uint64, error) {
|
||||
func (s *Store) LastArchivedSlot(ctx context.Context) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
defer span.End()
|
||||
var index uint64
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
b, _ := bkt.Cursor().Last()
|
||||
index = bytesutil.BytesToUint64BigEndian(b)
|
||||
@ -24,12 +24,12 @@ func (kv *Store) LastArchivedSlot(ctx context.Context) (uint64, error) {
|
||||
}
|
||||
|
||||
// LastArchivedRoot from the db.
|
||||
func (kv *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
if err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
_, blockRoot = bkt.Cursor().Last()
|
||||
return nil
|
||||
@ -42,12 +42,12 @@ func (kv *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
|
||||
// ArchivedPointRoot returns the block root of an archived point from the DB.
|
||||
// This is essential for cold state management and to restore a cold state.
|
||||
func (kv *Store) ArchivedPointRoot(ctx context.Context, slot uint64) [32]byte {
|
||||
func (s *Store) ArchivedPointRoot(ctx context.Context, slot uint64) [32]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
if err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSlotIndicesBucket)
|
||||
blockRoot = bucket.Get(bytesutil.Uint64ToBytesBigEndian(slot))
|
||||
return nil
|
||||
@ -59,11 +59,11 @@ func (kv *Store) ArchivedPointRoot(ctx context.Context, slot uint64) [32]byte {
|
||||
}
|
||||
|
||||
// HasArchivedPoint returns true if an archived point exists in DB.
|
||||
func (kv *Store) HasArchivedPoint(ctx context.Context, slot uint64) bool {
|
||||
func (s *Store) HasArchivedPoint(ctx context.Context, slot uint64) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
defer span.End()
|
||||
var exists bool
|
||||
if err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
iBucket := tx.Bucket(stateSlotIndicesBucket)
|
||||
exists = iBucket.Get(bytesutil.Uint64ToBytesBigEndian(slot)) != nil
|
||||
return nil
|
||||
|
@ -17,12 +17,12 @@ const backupsDirectoryName = "backups"
|
||||
|
||||
// Backup the database to the datadir backup directory.
|
||||
// Example for backup at slot 345: $DATADIR/backups/prysm_beacondb_at_slot_0000345.backup
|
||||
func (kv *Store) Backup(ctx context.Context) error {
|
||||
func (s *Store) Backup(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Backup")
|
||||
defer span.End()
|
||||
|
||||
backupsDir := path.Join(kv.databasePath, backupsDirectoryName)
|
||||
head, err := kv.HeadBlock(ctx)
|
||||
backupsDir := path.Join(s.databasePath, backupsDirectoryName)
|
||||
head, err := s.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -46,7 +46,7 @@ func (kv *Store) Backup(ctx context.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
return kv.db.View(func(tx *bolt.Tx) error {
|
||||
return s.db.View(func(tx *bolt.Tx) error {
|
||||
return tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
||||
logrus.Debugf("Copying bucket %s\n", name)
|
||||
return copyDB.Update(func(tx2 *bolt.Tx) error {
|
||||
|
@ -17,15 +17,15 @@ import (
|
||||
)
|
||||
|
||||
// Block retrieval by root.
|
||||
func (kv *Store) Block(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
// Return block from cache if it exists.
|
||||
if v, ok := kv.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return v.(*ethpb.SignedBeaconBlock), nil
|
||||
}
|
||||
var block *ethpb.SignedBeaconBlock
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
enc := bkt.Get(blockRoot[:])
|
||||
if enc == nil {
|
||||
@ -38,11 +38,11 @@ func (kv *Store) Block(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBe
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in eth2.
|
||||
func (kv *Store) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Store) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
defer span.End()
|
||||
var headBlock *ethpb.SignedBeaconBlock
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
headRoot := bkt.Get(headBlockRootKey)
|
||||
if headRoot == nil {
|
||||
@ -59,11 +59,11 @@ func (kv *Store) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error
|
||||
}
|
||||
|
||||
// Blocks retrieves a list of beacon blocks by filter criteria.
|
||||
func (kv *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]*ethpb.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Blocks")
|
||||
defer span.End()
|
||||
blocks := make([]*ethpb.SignedBeaconBlock, 0)
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
|
||||
keys, err := getBlockRootsByFilter(ctx, tx, f)
|
||||
@ -85,11 +85,11 @@ func (kv *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]*ethpb.S
|
||||
}
|
||||
|
||||
// BlockRoots retrieves a list of beacon block roots by filter criteria.
|
||||
func (kv *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error) {
|
||||
func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlockRoots")
|
||||
defer span.End()
|
||||
blockRoots := make([][32]byte, 0)
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
keys, err := getBlockRootsByFilter(ctx, tx, f)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -107,14 +107,14 @@ func (kv *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]
|
||||
}
|
||||
|
||||
// HasBlock checks if a block by root exists in the db.
|
||||
func (kv *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
defer span.End()
|
||||
if v, ok := kv.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return true
|
||||
}
|
||||
exists := false
|
||||
if err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
exists = bkt.Get(blockRoot[:]) != nil
|
||||
return nil
|
||||
@ -125,10 +125,10 @@ func (kv *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
}
|
||||
|
||||
// deleteBlock by block root.
|
||||
func (kv *Store) deleteBlock(ctx context.Context, blockRoot [32]byte) error {
|
||||
func (s *Store) deleteBlock(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteBlock")
|
||||
defer span.End()
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
enc := bkt.Get(blockRoot[:])
|
||||
if enc == nil {
|
||||
@ -142,17 +142,17 @@ func (kv *Store) deleteBlock(ctx context.Context, blockRoot [32]byte) error {
|
||||
if err := deleteValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not delete root for DB indices")
|
||||
}
|
||||
kv.blockCache.Del(string(blockRoot[:]))
|
||||
s.blockCache.Del(string(blockRoot[:]))
|
||||
return bkt.Delete(blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// deleteBlocks by block roots.
|
||||
func (kv *Store) deleteBlocks(ctx context.Context, blockRoots [][32]byte) error {
|
||||
func (s *Store) deleteBlocks(ctx context.Context, blockRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteBlocks")
|
||||
defer span.End()
|
||||
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
for _, blockRoot := range blockRoots {
|
||||
enc := bkt.Get(blockRoot[:])
|
||||
@ -167,7 +167,7 @@ func (kv *Store) deleteBlocks(ctx context.Context, blockRoots [][32]byte) error
|
||||
if err := deleteValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not delete root for DB indices")
|
||||
}
|
||||
kv.blockCache.Del(string(blockRoot[:]))
|
||||
s.blockCache.Del(string(blockRoot[:]))
|
||||
if err := bkt.Delete(blockRoot[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -177,26 +177,26 @@ func (kv *Store) deleteBlocks(ctx context.Context, blockRoots [][32]byte) error
|
||||
}
|
||||
|
||||
// SaveBlock to the db.
|
||||
func (kv *Store) SaveBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
|
||||
func (s *Store) SaveBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlock")
|
||||
defer span.End()
|
||||
blockRoot, err := signed.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := kv.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return kv.SaveBlocks(ctx, []*ethpb.SignedBeaconBlock{signed})
|
||||
return s.SaveBlocks(ctx, []*ethpb.SignedBeaconBlock{signed})
|
||||
}
|
||||
|
||||
// SaveBlocks via bulk updates to the db.
|
||||
func (kv *Store) SaveBlocks(ctx context.Context, blocks []*ethpb.SignedBeaconBlock) error {
|
||||
func (s *Store) SaveBlocks(ctx context.Context, blocks []*ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlocks")
|
||||
defer span.End()
|
||||
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
for _, block := range blocks {
|
||||
blockRoot, err := block.Block.HashTreeRoot()
|
||||
@ -215,7 +215,7 @@ func (kv *Store) SaveBlocks(ctx context.Context, blocks []*ethpb.SignedBeaconBlo
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
kv.blockCache.Set(string(blockRoot[:]), block, int64(len(enc)))
|
||||
s.blockCache.Set(string(blockRoot[:]), block, int64(len(enc)))
|
||||
|
||||
if err := bkt.Put(blockRoot[:], enc); err != nil {
|
||||
return err
|
||||
@ -226,11 +226,11 @@ func (kv *Store) SaveBlocks(ctx context.Context, blocks []*ethpb.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// SaveHeadBlockRoot to the db.
|
||||
func (kv *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
defer span.End()
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateSummaryInCache := kv.stateSummaryCache.Has(blockRoot)
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateSummaryInCache := s.stateSummaryCache.Has(blockRoot)
|
||||
hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) != nil
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(blockRoot[:]) != nil
|
||||
if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) {
|
||||
@ -243,11 +243,11 @@ func (kv *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) erro
|
||||
}
|
||||
|
||||
// GenesisBlock retrieves the genesis block of the beacon chain.
|
||||
func (kv *Store) GenesisBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Store) GenesisBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlock")
|
||||
defer span.End()
|
||||
var block *ethpb.SignedBeaconBlock
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
root := bkt.Get(genesisBlockRootKey)
|
||||
enc := bkt.Get(root)
|
||||
@ -261,22 +261,22 @@ func (kv *Store) GenesisBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, er
|
||||
}
|
||||
|
||||
// SaveGenesisBlockRoot to the db.
|
||||
func (kv *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
defer span.End()
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(genesisBlockRootKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// HighestSlotBlocksBelow returns the block with the highest slot below the input slot from the db.
|
||||
func (kv *Store) HighestSlotBlocksBelow(ctx context.Context, slot uint64) ([]*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot uint64) ([]*ethpb.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocksBelow")
|
||||
defer span.End()
|
||||
|
||||
var best []byte
|
||||
if err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
// Iterate through the index, which is in byte sorted order.
|
||||
c := bkt.Cursor()
|
||||
@ -298,13 +298,13 @@ func (kv *Store) HighestSlotBlocksBelow(ctx context.Context, slot uint64) ([]*et
|
||||
var blk *ethpb.SignedBeaconBlock
|
||||
var err error
|
||||
if best != nil {
|
||||
blk, err = kv.Block(ctx, bytesutil.ToBytes32(best))
|
||||
blk, err = s.Block(ctx, bytesutil.ToBytes32(best))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if blk == nil {
|
||||
blk, err = kv.GenesisBlock(ctx)
|
||||
blk, err = s.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -14,11 +14,11 @@ import (
|
||||
var errMissingStateForCheckpoint = errors.New("missing state summary for finalized root")
|
||||
|
||||
// JustifiedCheckpoint returns the latest justified checkpoint in beacon chain.
|
||||
func (kv *Store) JustifiedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
|
||||
func (s *Store) JustifiedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.JustifiedCheckpoint")
|
||||
defer span.End()
|
||||
var checkpoint *ethpb.Checkpoint
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(checkpointBucket)
|
||||
enc := bkt.Get(justifiedCheckpointKey)
|
||||
if enc == nil {
|
||||
@ -32,11 +32,11 @@ func (kv *Store) JustifiedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, er
|
||||
}
|
||||
|
||||
// FinalizedCheckpoint returns the latest finalized checkpoint in beacon chain.
|
||||
func (kv *Store) FinalizedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
|
||||
func (s *Store) FinalizedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.FinalizedCheckpoint")
|
||||
defer span.End()
|
||||
var checkpoint *ethpb.Checkpoint
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(checkpointBucket)
|
||||
enc := bkt.Get(finalizedCheckpointKey)
|
||||
if enc == nil {
|
||||
@ -50,7 +50,7 @@ func (kv *Store) FinalizedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, er
|
||||
}
|
||||
|
||||
// SaveJustifiedCheckpoint saves justified checkpoint in beacon chain.
|
||||
func (kv *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error {
|
||||
func (s *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveJustifiedCheckpoint")
|
||||
defer span.End()
|
||||
|
||||
@ -58,10 +58,10 @@ func (kv *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) != nil
|
||||
hasStateSummaryInCache := kv.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateSummaryInCache := s.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) {
|
||||
return errMissingStateForCheckpoint
|
||||
@ -71,7 +71,7 @@ func (kv *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.
|
||||
}
|
||||
|
||||
// SaveFinalizedCheckpoint saves finalized checkpoint in beacon chain.
|
||||
func (kv *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error {
|
||||
func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveFinalizedCheckpoint")
|
||||
defer span.End()
|
||||
|
||||
@ -79,10 +79,10 @@ func (kv *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) != nil
|
||||
hasStateSummaryInCache := kv.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateSummaryInCache := s.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) {
|
||||
return errMissingStateForCheckpoint
|
||||
@ -91,6 +91,6 @@ func (kv *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.
|
||||
return err
|
||||
}
|
||||
|
||||
return kv.updateFinalizedBlockRoots(ctx, tx, checkpoint)
|
||||
return s.updateFinalizedBlockRoots(ctx, tx, checkpoint)
|
||||
})
|
||||
}
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
|
||||
// DepositContractAddress returns contract address is the address of
|
||||
// the deposit contract on the proof of work chain.
|
||||
func (kv *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
defer span.End()
|
||||
var addr []byte
|
||||
if err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
chainInfo := tx.Bucket(chainMetadataBucket)
|
||||
addr = chainInfo.Get(depositContractAddressKey)
|
||||
return nil
|
||||
@ -26,11 +26,11 @@ func (kv *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
}
|
||||
|
||||
// SaveDepositContractAddress to the db. It returns an error if an address has been previously saved.
|
||||
func (kv *Store) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
|
||||
func (s *Store) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
defer span.End()
|
||||
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
chainInfo := tx.Bucket(chainMetadataBucket)
|
||||
expectedAddress := chainInfo.Get(depositContractAddressKey)
|
||||
if expectedAddress != nil {
|
||||
|
@ -37,7 +37,7 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
|
||||
//
|
||||
// This method ensures that all blocks from the current finalized epoch are considered "final" while
|
||||
// maintaining only canonical and finalized blocks older than the current finalized epoch.
|
||||
func (kv *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, checkpoint *ethpb.Checkpoint) error {
|
||||
func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, checkpoint *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateFinalizedBlockRoots")
|
||||
defer span.End()
|
||||
|
||||
@ -56,7 +56,7 @@ func (kv *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, che
|
||||
}
|
||||
}
|
||||
|
||||
blockRoots, err := kv.BlockRoots(ctx, filters.NewFilter().
|
||||
blockRoots, err := s.BlockRoots(ctx, filters.NewFilter().
|
||||
SetStartEpoch(previousFinalizedCheckpoint.Epoch).
|
||||
SetEndEpoch(checkpoint.Epoch+1),
|
||||
)
|
||||
@ -78,7 +78,7 @@ func (kv *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, che
|
||||
break
|
||||
}
|
||||
|
||||
signedBlock, err := kv.Block(ctx, bytesutil.ToBytes32(root))
|
||||
signedBlock, err := s.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
@ -129,7 +129,7 @@ func (kv *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, che
|
||||
}
|
||||
|
||||
// Upsert blocks from the current finalized epoch.
|
||||
roots, err := kv.BlockRoots(ctx, filters.NewFilter().SetStartEpoch(checkpoint.Epoch).SetEndEpoch(checkpoint.Epoch+1))
|
||||
roots, err := s.BlockRoots(ctx, filters.NewFilter().SetStartEpoch(checkpoint.Epoch).SetEndEpoch(checkpoint.Epoch+1))
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
@ -159,12 +159,12 @@ func (kv *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, che
|
||||
// A beacon block root contained exists in this index if it is considered finalized and canonical.
|
||||
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
|
||||
// considered canonical in the "head view" of the beacon node.
|
||||
func (kv *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
defer span.End()
|
||||
|
||||
var exists bool
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
exists = tx.Bucket(finalizedBlockRootsIndexBucket).Get(blockRoot[:]) != nil
|
||||
// Check genesis block root.
|
||||
if !exists {
|
||||
@ -182,12 +182,12 @@ func (kv *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
// FinalizedChildBlock returns the child block of a provided finalized block. If
|
||||
// no finalized block or its respective child block exists we return with a nil
|
||||
// block.
|
||||
func (kv *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.FinalizedChildBlock")
|
||||
defer span.End()
|
||||
|
||||
var blk *ethpb.SignedBeaconBlock
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
blkBytes := tx.Bucket(finalizedBlockRootsIndexBucket).Get(blockRoot[:])
|
||||
if blkBytes == nil {
|
||||
return nil
|
||||
|
@ -122,26 +122,26 @@ func NewKVStore(dirPath string, stateSummaryCache *cache.StateSummaryCache) (*St
|
||||
}
|
||||
|
||||
// ClearDB removes the previously stored database in the data directory.
|
||||
func (kv *Store) ClearDB() error {
|
||||
if _, err := os.Stat(kv.databasePath); os.IsNotExist(err) {
|
||||
func (s *Store) ClearDB() error {
|
||||
if _, err := os.Stat(s.databasePath); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
prometheus.Unregister(createBoltCollector(kv.db))
|
||||
if err := os.Remove(path.Join(kv.databasePath, databaseFileName)); err != nil {
|
||||
prometheus.Unregister(createBoltCollector(s.db))
|
||||
if err := os.Remove(path.Join(s.databasePath, databaseFileName)); err != nil {
|
||||
return errors.Wrap(err, "could not remove database file")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the underlying BoltDB database.
|
||||
func (kv *Store) Close() error {
|
||||
prometheus.Unregister(createBoltCollector(kv.db))
|
||||
return kv.db.Close()
|
||||
func (s *Store) Close() error {
|
||||
prometheus.Unregister(createBoltCollector(s.db))
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
// DatabasePath at which this database writes files.
|
||||
func (kv *Store) DatabasePath() string {
|
||||
return kv.databasePath
|
||||
func (s *Store) DatabasePath() string {
|
||||
return s.databasePath
|
||||
}
|
||||
|
||||
func createBuckets(tx *bolt.Tx, buckets ...[]byte) error {
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
// VoluntaryExit retrieval by signing root.
|
||||
func (kv *Store) VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*ethpb.VoluntaryExit, error) {
|
||||
func (s *Store) VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*ethpb.VoluntaryExit, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := kv.voluntaryExitBytes(ctx, exitRoot)
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -27,10 +27,10 @@ func (kv *Store) VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*ethpb.V
|
||||
}
|
||||
|
||||
// HasVoluntaryExit verifies if a voluntary exit is stored in the db by its signing root.
|
||||
func (kv *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
|
||||
func (s *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasVoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := kv.voluntaryExitBytes(ctx, exitRoot)
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -38,7 +38,7 @@ func (kv *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
|
||||
}
|
||||
|
||||
// SaveVoluntaryExit to the db by its signing root.
|
||||
func (kv *Store) SaveVoluntaryExit(ctx context.Context, exit *ethpb.VoluntaryExit) error {
|
||||
func (s *Store) SaveVoluntaryExit(ctx context.Context, exit *ethpb.VoluntaryExit) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveVoluntaryExit")
|
||||
defer span.End()
|
||||
exitRoot, err := exit.HashTreeRoot()
|
||||
@ -49,17 +49,17 @@ func (kv *Store) SaveVoluntaryExit(ctx context.Context, exit *ethpb.VoluntaryExi
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Put(exitRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (kv *Store) voluntaryExitBytes(ctx context.Context, exitRoot [32]byte) ([]byte, error) {
|
||||
func (s *Store) voluntaryExitBytes(ctx context.Context, exitRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.voluntaryExitBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(voluntaryExitsBucket)
|
||||
dst = bkt.Get(exitRoot[:])
|
||||
return nil
|
||||
@ -68,10 +68,10 @@ func (kv *Store) voluntaryExitBytes(ctx context.Context, exitRoot [32]byte) ([]b
|
||||
}
|
||||
|
||||
// deleteVoluntaryExit clears a voluntary exit from the db by its signing root.
|
||||
func (kv *Store) deleteVoluntaryExit(ctx context.Context, exitRoot [32]byte) error {
|
||||
func (s *Store) deleteVoluntaryExit(ctx context.Context, exitRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteVoluntaryExit")
|
||||
defer span.End()
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Delete(exitRoot[:])
|
||||
})
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
// SavePowchainData saves the pow chain data.
|
||||
func (kv *Store) SavePowchainData(ctx context.Context, data *db.ETH1ChainData) error {
|
||||
func (s *Store) SavePowchainData(ctx context.Context, data *db.ETH1ChainData) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SavePowchainData")
|
||||
defer span.End()
|
||||
|
||||
@ -22,7 +22,7 @@ func (kv *Store) SavePowchainData(ctx context.Context, data *db.ETH1ChainData) e
|
||||
return err
|
||||
}
|
||||
|
||||
err := kv.db.Update(func(tx *bolt.Tx) error {
|
||||
err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(powchainBucket)
|
||||
enc, err := proto.Marshal(data)
|
||||
if err != nil {
|
||||
@ -35,12 +35,12 @@ func (kv *Store) SavePowchainData(ctx context.Context, data *db.ETH1ChainData) e
|
||||
}
|
||||
|
||||
// PowchainData retrieves the powchain data.
|
||||
func (kv *Store) PowchainData(ctx context.Context) (*db.ETH1ChainData, error) {
|
||||
func (s *Store) PowchainData(ctx context.Context) (*db.ETH1ChainData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.PowchainData")
|
||||
defer span.End()
|
||||
|
||||
var data *db.ETH1ChainData
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(powchainBucket)
|
||||
enc := bkt.Get(powchainDataKey)
|
||||
if len(enc) == 0 {
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
// ProposerSlashing retrieval by slashing root.
|
||||
func (kv *Store) ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.ProposerSlashing, error) {
|
||||
func (s *Store) ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.ProposerSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := kv.proposerSlashingBytes(ctx, slashingRoot)
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -27,10 +27,10 @@ func (kv *Store) ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*
|
||||
}
|
||||
|
||||
// HasProposerSlashing verifies if a slashing is stored in the db.
|
||||
func (kv *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
func (s *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := kv.proposerSlashingBytes(ctx, slashingRoot)
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -38,7 +38,7 @@ func (kv *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte)
|
||||
}
|
||||
|
||||
// SaveProposerSlashing to the db by its hash tree root.
|
||||
func (kv *Store) SaveProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) error {
|
||||
func (s *Store) SaveProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveProposerSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
@ -49,17 +49,17 @@ func (kv *Store) SaveProposerSlashing(ctx context.Context, slashing *ethpb.Propo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (kv *Store) proposerSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
func (s *Store) proposerSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.proposerSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(proposerSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
@ -68,20 +68,20 @@ func (kv *Store) proposerSlashingBytes(ctx context.Context, slashingRoot [32]byt
|
||||
}
|
||||
|
||||
// deleteProposerSlashing clears a proposer slashing from the db by its hash tree root.
|
||||
func (kv *Store) deleteProposerSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
func (s *Store) deleteProposerSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteProposerSlashing")
|
||||
defer span.End()
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// AttesterSlashing retrieval by hash tree root.
|
||||
func (kv *Store) AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.AttesterSlashing, error) {
|
||||
func (s *Store) AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.AttesterSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := kv.attesterSlashingBytes(ctx, slashingRoot)
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -96,10 +96,10 @@ func (kv *Store) AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*
|
||||
}
|
||||
|
||||
// HasAttesterSlashing verifies if a slashing is stored in the db.
|
||||
func (kv *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
func (s *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := kv.attesterSlashingBytes(ctx, slashingRoot)
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -107,7 +107,7 @@ func (kv *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte)
|
||||
}
|
||||
|
||||
// SaveAttesterSlashing to the db by its hash tree root.
|
||||
func (kv *Store) SaveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) error {
|
||||
func (s *Store) SaveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttesterSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
@ -118,17 +118,17 @@ func (kv *Store) SaveAttesterSlashing(ctx context.Context, slashing *ethpb.Attes
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (kv *Store) attesterSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
func (s *Store) attesterSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.attesterSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(attesterSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
@ -137,10 +137,10 @@ func (kv *Store) attesterSlashingBytes(ctx context.Context, slashingRoot [32]byt
|
||||
}
|
||||
|
||||
// deleteAttesterSlashing clears an attester slashing from the db by its hash tree root.
|
||||
func (kv *Store) deleteAttesterSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
func (s *Store) deleteAttesterSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteAttesterSlashing")
|
||||
defer span.End()
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
|
@ -15,11 +15,11 @@ import (
|
||||
|
||||
// State returns the saved state using block's signing root,
|
||||
// this particular block was used to generate the state.
|
||||
func (kv *Store) State(ctx context.Context, blockRoot [32]byte) (*state.BeaconState, error) {
|
||||
func (s *Store) State(ctx context.Context, blockRoot [32]byte) (*state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.State")
|
||||
defer span.End()
|
||||
var s *pb.BeaconState
|
||||
enc, err := kv.stateBytes(ctx, blockRoot)
|
||||
var st *pb.BeaconState
|
||||
enc, err := s.stateBytes(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -28,19 +28,19 @@ func (kv *Store) State(ctx context.Context, blockRoot [32]byte) (*state.BeaconSt
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
s, err = createState(ctx, enc)
|
||||
st, err = createState(ctx, enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state.InitializeFromProtoUnsafe(s)
|
||||
return state.InitializeFromProtoUnsafe(st)
|
||||
}
|
||||
|
||||
// HeadState returns the latest canonical state in beacon chain.
|
||||
func (kv *Store) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
func (s *Store) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadState")
|
||||
defer span.End()
|
||||
var s *pb.BeaconState
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
var st *pb.BeaconState
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve head block's signing root from blocks bucket,
|
||||
// to look up what the head state is.
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@ -53,28 +53,28 @@ func (kv *Store) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
}
|
||||
|
||||
var err error
|
||||
s, err = createState(ctx, enc)
|
||||
st, err = createState(ctx, enc)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s == nil {
|
||||
if st == nil {
|
||||
return nil, nil
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("exists", s != nil))
|
||||
if s != nil {
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(s.Slot)))
|
||||
if st != nil {
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(st.Slot)))
|
||||
}
|
||||
return state.InitializeFromProtoUnsafe(s)
|
||||
return state.InitializeFromProtoUnsafe(st)
|
||||
}
|
||||
|
||||
// GenesisState returns the genesis state in beacon chain.
|
||||
func (kv *Store) GenesisState(ctx context.Context) (*state.BeaconState, error) {
|
||||
func (s *Store) GenesisState(ctx context.Context) (*state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")
|
||||
defer span.End()
|
||||
var s *pb.BeaconState
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
var st *pb.BeaconState
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve genesis block's signing root from blocks bucket,
|
||||
// to look up what the genesis state is.
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@ -87,28 +87,28 @@ func (kv *Store) GenesisState(ctx context.Context) (*state.BeaconState, error) {
|
||||
}
|
||||
|
||||
var err error
|
||||
s, err = createState(ctx, enc)
|
||||
st, err = createState(ctx, enc)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s == nil {
|
||||
if st == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return state.InitializeFromProtoUnsafe(s)
|
||||
return state.InitializeFromProtoUnsafe(st)
|
||||
}
|
||||
|
||||
// SaveState stores a state to the db using block's signing root which was used to generate the state.
|
||||
func (kv *Store) SaveState(ctx context.Context, st *state.BeaconState, blockRoot [32]byte) error {
|
||||
func (s *Store) SaveState(ctx context.Context, st *state.BeaconState, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveState")
|
||||
defer span.End()
|
||||
|
||||
return kv.SaveStates(ctx, []*state.BeaconState{st}, [][32]byte{blockRoot})
|
||||
return s.SaveStates(ctx, []*state.BeaconState{st}, [][32]byte{blockRoot})
|
||||
}
|
||||
|
||||
// SaveStates stores multiple states to the db using the provided corresponding roots.
|
||||
func (kv *Store) SaveStates(ctx context.Context, states []*state.BeaconState, blockRoots [][32]byte) error {
|
||||
func (s *Store) SaveStates(ctx context.Context, states []*state.BeaconState, blockRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStates")
|
||||
defer span.End()
|
||||
if states == nil {
|
||||
@ -123,7 +123,7 @@ func (kv *Store) SaveStates(ctx context.Context, states []*state.BeaconState, bl
|
||||
}
|
||||
}
|
||||
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
@ -139,10 +139,10 @@ func (kv *Store) SaveStates(ctx context.Context, states []*state.BeaconState, bl
|
||||
}
|
||||
|
||||
// HasState checks if a state by root exists in the db.
|
||||
func (kv *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
|
||||
func (s *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasState")
|
||||
defer span.End()
|
||||
enc, err := kv.stateBytes(ctx, blockRoot)
|
||||
enc, err := s.stateBytes(ctx, blockRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -150,10 +150,10 @@ func (kv *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
|
||||
}
|
||||
|
||||
// DeleteState by block root.
|
||||
func (kv *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteState")
|
||||
defer span.End()
|
||||
return kv.DeleteStates(ctx, [][32]byte{blockRoot})
|
||||
return s.DeleteStates(ctx, [][32]byte{blockRoot})
|
||||
}
|
||||
|
||||
// DeleteStates by block roots.
|
||||
@ -162,7 +162,7 @@ func (kv *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
// cursor is faster when there are a large set of keys to delete. This method is O(n) deletion where
|
||||
// n is the number of keys in the database. The alternative of calling bkt.Delete on each key to
|
||||
// delete would be O(m*log(n)) which would be much slower given a large set of keys to delete.
|
||||
func (kv *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error {
|
||||
func (s *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteStates")
|
||||
defer span.End()
|
||||
|
||||
@ -171,7 +171,7 @@ func (kv *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
rootMap[blockRoot] = true
|
||||
}
|
||||
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
genesisBlockRoot := bkt.Get(genesisBlockRootKey)
|
||||
|
||||
@ -225,11 +225,11 @@ func createState(ctx context.Context, enc []byte) (*pb.BeaconState, error) {
|
||||
}
|
||||
|
||||
// HasState checks if a state by root exists in the db.
|
||||
func (kv *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
|
||||
func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateBucket)
|
||||
dst = bkt.Get(blockRoot[:])
|
||||
return nil
|
||||
@ -287,12 +287,12 @@ func slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []byte) (uint64
|
||||
// from the db. Ideally there should just be one state per slot, but given validator
|
||||
// can double propose, a single slot could have multiple block roots and
|
||||
// results states. This returns a list of states.
|
||||
func (kv *Store) HighestSlotStatesBelow(ctx context.Context, slot uint64) ([]*state.BeaconState, error) {
|
||||
func (s *Store) HighestSlotStatesBelow(ctx context.Context, slot uint64) ([]*state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotStatesBelow")
|
||||
defer span.End()
|
||||
|
||||
var best []byte
|
||||
if err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
c := bkt.Cursor()
|
||||
for s, root := c.First(); s != nil; s, root = c.Next() {
|
||||
@ -313,13 +313,13 @@ func (kv *Store) HighestSlotStatesBelow(ctx context.Context, slot uint64) ([]*st
|
||||
var st *state.BeaconState
|
||||
var err error
|
||||
if best != nil {
|
||||
st, err = kv.State(ctx, bytesutil.ToBytes32(best))
|
||||
st, err = s.State(ctx, bytesutil.ToBytes32(best))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if st == nil {
|
||||
st, err = kv.GenesisState(ctx)
|
||||
st, err = s.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -9,19 +9,19 @@ import (
|
||||
)
|
||||
|
||||
// SaveStateSummary saves a state summary object to the DB.
|
||||
func (kv *Store) SaveStateSummary(ctx context.Context, summary *pb.StateSummary) error {
|
||||
func (s *Store) SaveStateSummary(ctx context.Context, summary *pb.StateSummary) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStateSummary")
|
||||
defer span.End()
|
||||
|
||||
return kv.SaveStateSummaries(ctx, []*pb.StateSummary{summary})
|
||||
return s.SaveStateSummaries(ctx, []*pb.StateSummary{summary})
|
||||
}
|
||||
|
||||
// SaveStateSummaries saves state summary objects to the DB.
|
||||
func (kv *Store) SaveStateSummaries(ctx context.Context, summaries []*pb.StateSummary) error {
|
||||
func (s *Store) SaveStateSummaries(ctx context.Context, summaries []*pb.StateSummary) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStateSummaries")
|
||||
defer span.End()
|
||||
|
||||
return kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSummaryBucket)
|
||||
for _, summary := range summaries {
|
||||
enc, err := encode(ctx, summary)
|
||||
@ -37,10 +37,10 @@ func (kv *Store) SaveStateSummaries(ctx context.Context, summaries []*pb.StateSu
|
||||
}
|
||||
|
||||
// StateSummary returns the state summary object from the db using input block root.
|
||||
func (kv *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
|
||||
func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.StateSummary")
|
||||
defer span.End()
|
||||
enc, err := kv.stateSummaryBytes(ctx, blockRoot)
|
||||
enc, err := s.stateSummaryBytes(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -55,22 +55,22 @@ func (kv *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*pb.Stat
|
||||
}
|
||||
|
||||
// HasStateSummary returns true if a state summary exists in DB.
|
||||
func (kv *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
|
||||
defer span.End()
|
||||
enc, err := kv.stateSummaryBytes(ctx, blockRoot)
|
||||
enc, err := s.stateSummaryBytes(ctx, blockRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
func (kv *Store) stateSummaryBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
|
||||
func (s *Store) stateSummaryBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.stateSummaryBytes")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
err := kv.db.View(func(tx *bolt.Tx) error {
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSummaryBucket)
|
||||
enc = bucket.Get(blockRoot[:])
|
||||
return nil
|
||||
|
@ -54,7 +54,7 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
|
||||
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
|
||||
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
|
||||
maxPeers := int(ctx.Int(cmd.P2PMaxPeers.Name))
|
||||
maxPeers := ctx.Int(cmd.P2PMaxPeers.Name)
|
||||
if cfg.MinimumSyncPeers > maxPeers {
|
||||
log.Warnf("Changing Minimum Sync Peers to %d", maxPeers)
|
||||
cfg.MinimumSyncPeers = maxPeers
|
||||
|
@ -121,7 +121,7 @@ func (s *Service) Status() error {
|
||||
}
|
||||
|
||||
// AllDeposits mocks out the deposit cache functionality for interop.
|
||||
func (s *Service) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
func (s *Service) AllDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
@ -146,22 +146,22 @@ func (s *Service) ClearPreGenesisData() {
|
||||
}
|
||||
|
||||
// DepositByPubkey mocks out the deposit cache functionality for interop.
|
||||
func (s *Service) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
func (s *Service) DepositByPubkey(_ context.Context, _ []byte) (*ethpb.Deposit, *big.Int) {
|
||||
return ðpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight mocks out the deposit cache functionality for interop.
|
||||
func (s *Service) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
func (s *Service) DepositsNumberAndRootAtHeight(_ context.Context, _ *big.Int) (uint64, [32]byte) {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
|
||||
// FinalizedDeposits mocks out the deposit cache functionality for interop.
|
||||
func (s *Service) FinalizedDeposits(ctx context.Context) *depositcache.FinalizedDeposits {
|
||||
func (s *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDeposits {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
|
||||
func (s *Service) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
func (s *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
|
@ -15,14 +15,14 @@ import (
|
||||
)
|
||||
|
||||
func validAttesterSlashingForValIdx(t *testing.T, beaconState *state.BeaconState, privs []bls.SecretKey, valIdx ...uint64) *ethpb.AttesterSlashing {
|
||||
slashings := []*ethpb.AttesterSlashing{}
|
||||
var slashings []*ethpb.AttesterSlashing
|
||||
for _, idx := range valIdx {
|
||||
slashing, err := testutil.GenerateAttesterSlashingForValidator(beaconState, privs[idx], idx)
|
||||
require.NoError(t, err)
|
||||
slashings = append(slashings, slashing)
|
||||
}
|
||||
allSig1 := []bls.Signature{}
|
||||
allSig2 := []bls.Signature{}
|
||||
var allSig1 []bls.Signature
|
||||
var allSig2 []bls.Signature
|
||||
for _, slashing := range slashings {
|
||||
sig1 := slashing.Attestation_1.Signature
|
||||
sig2 := slashing.Attestation_2.Signature
|
||||
|
@ -56,7 +56,6 @@ go_library(
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_btcsuite_btcd//btcec:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
@ -20,7 +20,7 @@ const ipLimit = 4
|
||||
const ipBurst = 8
|
||||
|
||||
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
|
||||
func (s *Service) InterceptPeerDial(p peer.ID) (allow bool) {
|
||||
func (s *Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -51,12 +51,12 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
|
||||
|
||||
// InterceptSecured tests whether a given connection, now authenticated,
|
||||
// is allowed.
|
||||
func (s *Service) InterceptSecured(_ network.Direction, _ peer.ID, n network.ConnMultiaddrs) (allow bool) {
|
||||
func (s *Service) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded tests whether a fully capable connection is allowed.
|
||||
func (s *Service) InterceptUpgraded(n network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (s *Service) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ func TestReadVarint_ExceedsMaxLength(t *testing.T) {
|
||||
fByte := byte(1 << 7)
|
||||
// Terminating byte.
|
||||
tByte := byte(1 << 6)
|
||||
header := []byte{}
|
||||
var header []byte
|
||||
for i := 0; i < 9; i++ {
|
||||
header = append(header, fByte)
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
addrs := []ma.Multiaddr{}
|
||||
var addrs []ma.Multiaddr
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
@ -182,7 +182,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
addrs := []ma.Multiaddr{}
|
||||
var addrs []ma.Multiaddr
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
|
@ -36,7 +36,7 @@ func (mockListener) Lookup(enode.ID) []*enode.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) ReadRandomNodes([]*enode.Node) int {
|
||||
func (mockListener) ReadRandomNodes(_ []*enode.Node) int {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ func retrieveAttSubnets(record *enr.Record) ([]uint64, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committeeIdxs := []uint64{}
|
||||
var committeeIdxs []uint64
|
||||
for i := uint64(0); i < attestationSubnetCount; i++ {
|
||||
if bitV.BitAt(i) {
|
||||
committeeIdxs = append(committeeIdxs, i)
|
||||
|
@ -33,22 +33,22 @@ func (p *FakeP2P) Encoding() encoder.NetworkEncoding {
|
||||
}
|
||||
|
||||
// AddConnectionHandler -- fake.
|
||||
func (p *FakeP2P) AddConnectionHandler(f func(ctx context.Context, id peer.ID) error) {
|
||||
func (p *FakeP2P) AddConnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler -- fake.
|
||||
func (p *FakeP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID) error) {
|
||||
func (p *FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
}
|
||||
|
||||
// AddPingMethod -- fake.
|
||||
func (p *FakeP2P) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) {
|
||||
func (p *FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
|
||||
}
|
||||
|
||||
// PeerID -- fake.
|
||||
func (p *FakeP2P) PeerID() peer.ID {
|
||||
return peer.ID("fake")
|
||||
return "fake"
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
@ -57,7 +57,7 @@ func (p *FakeP2P) ENR() *enr.Record {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (p *FakeP2P) FindPeersWithSubnet(ctx context.Context, index uint64) (bool, error) {
|
||||
func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ uint64) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -67,7 +67,7 @@ func (p *FakeP2P) RefreshENR() {
|
||||
}
|
||||
|
||||
// LeaveTopic -- fake.
|
||||
func (p *FakeP2P) LeaveTopic(topic string) error {
|
||||
func (p *FakeP2P) LeaveTopic(_ string) error {
|
||||
return nil
|
||||
|
||||
}
|
||||
@ -83,12 +83,12 @@ func (p *FakeP2P) Peers() *peers.Status {
|
||||
}
|
||||
|
||||
// PublishToTopic -- fake.
|
||||
func (p *FakeP2P) PublishToTopic(ctx context.Context, topic string, data []byte, opts ...pubsub.PubOpt) error {
|
||||
func (p *FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send -- fake.
|
||||
func (p *FakeP2P) Send(ctx context.Context, msg interface{}, topic string, pid peer.ID) (network.Stream, error) {
|
||||
func (p *FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -103,17 +103,17 @@ func (p *FakeP2P) MetadataSeq() uint64 {
|
||||
}
|
||||
|
||||
// SetStreamHandler -- fake.
|
||||
func (p *FakeP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
func (p *FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) {
|
||||
|
||||
}
|
||||
|
||||
// SubscribeToTopic -- fake.
|
||||
func (p *FakeP2P) SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub.Subscription, error) {
|
||||
func (p *FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// JoinTopic -- fake.
|
||||
func (p *FakeP2P) JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
func (p *FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -123,17 +123,17 @@ func (p *FakeP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// Disconnect -- fake.
|
||||
func (p *FakeP2P) Disconnect(pid peer.ID) error {
|
||||
func (p *FakeP2P) Disconnect(_ peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Broadcast -- fake.
|
||||
func (p *FakeP2P) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
func (p *FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation -- fake.
|
||||
func (p *FakeP2P) BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error {
|
||||
func (p *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -148,7 +148,7 @@ func (p *FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
}
|
||||
|
||||
// InterceptAccept -- fake.
|
||||
func (p *FakeP2P) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
|
||||
func (p *FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ func (m *MockBroadcaster) Broadcast(context.Context, proto.Message) error {
|
||||
}
|
||||
|
||||
// BroadcastAttestation records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error {
|
||||
func (m *MockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
@ -39,9 +39,9 @@ func (m MockPeerManager) ENR() *enr.Record {
|
||||
func (m MockPeerManager) RefreshENR() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (m MockPeerManager) FindPeersWithSubnet(ctx context.Context, index uint64) (bool, error) {
|
||||
func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ uint64) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (m MockPeerManager) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) {}
|
||||
func (m MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
@ -143,13 +143,13 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
}
|
||||
|
||||
// Broadcast a message.
|
||||
func (p *TestP2P) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
func (p *TestP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation.
|
||||
func (p *TestP2P) BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error {
|
||||
func (p *TestP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@ -314,7 +314,7 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (p *TestP2P) FindPeersWithSubnet(ctx context.Context, index uint64) (bool, error) {
|
||||
func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ uint64) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -337,7 +337,7 @@ func (p *TestP2P) MetadataSeq() uint64 {
|
||||
}
|
||||
|
||||
// AddPingMethod mocks the p2p func.
|
||||
func (p *TestP2P) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) {
|
||||
func (p *TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@ -352,7 +352,7 @@ func (p *TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
}
|
||||
|
||||
// InterceptAccept .
|
||||
func (p *TestP2P) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
|
||||
func (p *TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/pkg/errors"
|
||||
@ -40,17 +39,17 @@ func SerializeENR(record *enr.Record) (string, error) {
|
||||
}
|
||||
|
||||
func convertFromInterfacePrivKey(privkey crypto.PrivKey) *ecdsa.PrivateKey {
|
||||
typeAssertedKey := (*ecdsa.PrivateKey)((*btcec.PrivateKey)(privkey.(*crypto.Secp256k1PrivateKey)))
|
||||
typeAssertedKey := (*ecdsa.PrivateKey)(privkey.(*crypto.Secp256k1PrivateKey))
|
||||
return typeAssertedKey
|
||||
}
|
||||
|
||||
func convertToInterfacePrivkey(privkey *ecdsa.PrivateKey) crypto.PrivKey {
|
||||
typeAssertedKey := crypto.PrivKey((*crypto.Secp256k1PrivateKey)((*btcec.PrivateKey)(privkey)))
|
||||
typeAssertedKey := crypto.PrivKey((*crypto.Secp256k1PrivateKey)(privkey))
|
||||
return typeAssertedKey
|
||||
}
|
||||
|
||||
func convertToInterfacePubkey(pubkey *ecdsa.PublicKey) crypto.PubKey {
|
||||
typeAssertedKey := crypto.PubKey((*crypto.Secp256k1PublicKey)((*btcec.PublicKey)(pubkey)))
|
||||
typeAssertedKey := crypto.PubKey((*crypto.Secp256k1PublicKey)(pubkey))
|
||||
return typeAssertedKey
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
)
|
||||
|
||||
// ensurePeerConnections will attempt to reestablish connection to the peers
|
||||
@ -24,13 +25,17 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
|
||||
|
||||
c := h.Network().ConnsToPeer(peer.ID)
|
||||
if len(c) == 0 {
|
||||
log.WithField("peer", peer.ID).Debug("No connections to peer, reconnecting")
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
if err := h.Connect(ctx, *peer); err != nil {
|
||||
if err := connectWithTimeout(ctx, h, peer); err != nil {
|
||||
log.WithField("peer", peer.ID).WithField("addrs", peer.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func connectWithTimeout(ctx context.Context, h host.Host, peer *peer.AddrInfo) error {
|
||||
log.WithField("peer", peer.ID).Debug("No connections to peer, reconnecting")
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
return h.Connect(ctx, *peer)
|
||||
}
|
||||
|
@ -179,6 +179,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
func popProcessNoopFunc(_ interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ func TestProcessDeposit_OK(t *testing.T) {
|
||||
|
||||
valcount, err := helpers.ActiveValidatorCount(web3Service.preGenesisState, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(1), int(valcount), "Did not get correct active validator count")
|
||||
require.Equal(t, 1, int(valcount), "Did not get correct active validator count")
|
||||
}
|
||||
|
||||
func TestProcessDeposit_InvalidMerkleBranch(t *testing.T) {
|
||||
@ -230,7 +230,7 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
|
||||
|
||||
valcount, err := helpers.ActiveValidatorCount(web3Service.preGenesisState, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(0), int(valcount), "Did not get correct active validator count")
|
||||
require.Equal(t, 0, int(valcount), "Did not get correct active validator count")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
pendingDeposits := web3Service.depositCache.PendingDeposits(context.Background(), nil /*blockNum*/)
|
||||
require.Equal(t, int(2), len(pendingDeposits), "Unexpected number of deposits")
|
||||
require.Equal(t, 2, len(pendingDeposits), "Unexpected number of deposits")
|
||||
|
||||
hook.Reset()
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ type goodFetcher struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (g *goodFetcher) HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error) {
|
||||
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
|
||||
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
|
||||
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
|
||||
}
|
||||
@ -91,7 +91,7 @@ func (g *goodFetcher) HeaderByHash(ctx context.Context, hash common.Hash) (*geth
|
||||
|
||||
}
|
||||
|
||||
func (g *goodFetcher) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
func (g *goodFetcher) HeaderByNumber(_ context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
if g.backend == nil {
|
||||
return &gethTypes.Header{
|
||||
Number: big.NewInt(15),
|
||||
@ -110,7 +110,7 @@ func (g *goodFetcher) HeaderByNumber(ctx context.Context, number *big.Int) (*get
|
||||
return header, nil
|
||||
}
|
||||
|
||||
func (g *goodFetcher) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) {
|
||||
func (g *goodFetcher) SyncProgress(_ context.Context) (*ethereum.SyncProgress, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
6
beacon-chain/powchain/testing/faulty_mock.go
generated
6
beacon-chain/powchain/testing/faulty_mock.go
generated
@ -29,7 +29,7 @@ func (f *FaultyMockPOWChain) LatestBlockHeight() *big.Int {
|
||||
}
|
||||
|
||||
// BlockExists --
|
||||
func (f *FaultyMockPOWChain) BlockExists(_ context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
func (f *FaultyMockPOWChain) BlockExists(_ context.Context, _ common.Hash) (bool, *big.Int, error) {
|
||||
if f.HashesByHeight == nil {
|
||||
return false, big.NewInt(1), errors.New("failed")
|
||||
}
|
||||
@ -38,12 +38,12 @@ func (f *FaultyMockPOWChain) BlockExists(_ context.Context, hash common.Hash) (b
|
||||
}
|
||||
|
||||
// BlockHashByHeight --
|
||||
func (f *FaultyMockPOWChain) BlockHashByHeight(_ context.Context, height *big.Int) (common.Hash, error) {
|
||||
func (f *FaultyMockPOWChain) BlockHashByHeight(_ context.Context, _ *big.Int) (common.Hash, error) {
|
||||
return [32]byte{}, errors.New("failed")
|
||||
}
|
||||
|
||||
// BlockTimeByHeight --
|
||||
func (f *FaultyMockPOWChain) BlockTimeByHeight(_ context.Context, height *big.Int) (uint64, error) {
|
||||
func (f *FaultyMockPOWChain) BlockTimeByHeight(_ context.Context, _ *big.Int) (uint64, error) {
|
||||
return 0, errors.New("failed")
|
||||
}
|
||||
|
||||
|
@ -378,7 +378,7 @@ func (bs *Server) collectReceivedAttestations(ctx context.Context) {
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
func (bs *Server) AttestationPool(
|
||||
ctx context.Context, req *ethpb.AttestationPoolRequest,
|
||||
_ context.Context, req *ethpb.AttestationPoolRequest,
|
||||
) (*ethpb.AttestationPoolResponse, error) {
|
||||
if int(req.PageSize) > cmd.Get().MaxRPCPageSize {
|
||||
return nil, status.Errorf(
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
@ -30,7 +29,7 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
|
||||
|
||||
numValidators := 128
|
||||
ctx := context.Background()
|
||||
headState := setupActiveValidators(t, db, numValidators)
|
||||
headState := setupActiveValidators(t, numValidators)
|
||||
|
||||
m := &mock.ChainService{
|
||||
Genesis: timeutils.Now().Add(time.Duration(-1*int64(headState.Slot()*params.BeaconConfig().SecondsPerSlot)) * time.Second),
|
||||
@ -76,7 +75,7 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
numValidators := 128
|
||||
headState := setupActiveValidators(t, db, numValidators)
|
||||
headState := setupActiveValidators(t, numValidators)
|
||||
|
||||
mixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mixes); i++ {
|
||||
@ -144,7 +143,7 @@ func TestRetrieveCommitteesForRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
numValidators := 128
|
||||
headState := setupActiveValidators(t, db, numValidators)
|
||||
headState := setupActiveValidators(t, numValidators)
|
||||
|
||||
m := &mock.ChainService{
|
||||
Genesis: timeutils.Now().Add(time.Duration(-1*int64(headState.Slot()*params.BeaconConfig().SecondsPerSlot)) * time.Second),
|
||||
@ -192,7 +191,7 @@ func TestRetrieveCommitteesForRoot(t *testing.T) {
|
||||
assert.DeepEqual(t, wantedRes, receivedRes)
|
||||
}
|
||||
|
||||
func setupActiveValidators(t *testing.T, db db.Database, count int) *stateTrie.BeaconState {
|
||||
func setupActiveValidators(t *testing.T, count int) *stateTrie.BeaconState {
|
||||
balances := make([]uint64, count)
|
||||
validators := make([]*ethpb.Validator, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
@ -208,9 +207,11 @@ func setupActiveValidators(t *testing.T, db db.Database, count int) *stateTrie.B
|
||||
}
|
||||
s := testutil.NewBeaconState()
|
||||
if err := s.SetValidators(validators); err != nil {
|
||||
t.Error(err)
|
||||
return nil
|
||||
}
|
||||
if err := s.SetBalances(balances); err != nil {
|
||||
t.Error(err)
|
||||
return nil
|
||||
}
|
||||
return s
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// GetBeaconConfig retrieves the current configuration parameters of the beacon chain.
|
||||
func (bs *Server) GetBeaconConfig(ctx context.Context, _ *ptypes.Empty) (*ethpb.BeaconConfig, error) {
|
||||
func (bs *Server) GetBeaconConfig(_ context.Context, _ *ptypes.Empty) (*ethpb.BeaconConfig, error) {
|
||||
conf := params.BeaconConfig()
|
||||
val := reflect.ValueOf(conf).Elem()
|
||||
numFields := val.Type().NumField()
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// GetProtoArrayForkChoice returns proto array fork choice store.
|
||||
func (ds *Server) GetProtoArrayForkChoice(ctx context.Context, _ *ptypes.Empty) (*pbrpc.ProtoArrayForkChoiceResponse, error) {
|
||||
func (ds *Server) GetProtoArrayForkChoice(_ context.Context, _ *ptypes.Empty) (*pbrpc.ProtoArrayForkChoiceResponse, error) {
|
||||
store := ds.HeadFetcher.ProtoArrayStore()
|
||||
|
||||
nodes := store.Nodes()
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// GetPeer returns the data known about the peer defined by the provided peer id.
|
||||
func (ds *Server) GetPeer(ctx context.Context, peerReq *ethpb.PeerRequest) (*pbrpc.DebugPeerResponse, error) {
|
||||
func (ds *Server) GetPeer(_ context.Context, peerReq *ethpb.PeerRequest) (*pbrpc.DebugPeerResponse, error) {
|
||||
pid, err := peer.Decode(peerReq.PeerId)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unable to parse provided peer id: %v", err)
|
||||
@ -24,8 +24,8 @@ func (ds *Server) GetPeer(ctx context.Context, peerReq *ethpb.PeerRequest) (*pbr
|
||||
|
||||
// ListPeers returns all peers known to the host node, irregardless of if they are connected/
|
||||
// disconnected.
|
||||
func (ds *Server) ListPeers(ctx context.Context, _ *types.Empty) (*pbrpc.DebugPeerResponses, error) {
|
||||
responses := []*pbrpc.DebugPeerResponse{}
|
||||
func (ds *Server) ListPeers(_ context.Context, _ *types.Empty) (*pbrpc.DebugPeerResponses, error) {
|
||||
var responses []*pbrpc.DebugPeerResponse
|
||||
for _, pid := range ds.PeersFetcher.Peers().All() {
|
||||
resp, err := ds.getPeer(pid)
|
||||
if err != nil {
|
||||
@ -101,7 +101,7 @@ func (ds *Server) getPeer(pid peer.ID) (*pbrpc.DebugPeerResponse, error) {
|
||||
PeerLatency: uint64(peerStore.LatencyEWMA(pid).Milliseconds()),
|
||||
}
|
||||
addresses := peerStore.Addrs(pid)
|
||||
stringAddrs := []string{}
|
||||
var stringAddrs []string
|
||||
if addr != nil {
|
||||
stringAddrs = append(stringAddrs, addr.String())
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ type Server struct {
|
||||
|
||||
// SetLoggingLevel of a beacon node according to a request type,
|
||||
// either INFO, DEBUG, or TRACE.
|
||||
func (ds *Server) SetLoggingLevel(ctx context.Context, req *pbrpc.LoggingLevelRequest) (*ptypes.Empty, error) {
|
||||
func (ds *Server) SetLoggingLevel(_ context.Context, req *pbrpc.LoggingLevelRequest) (*ptypes.Empty, error) {
|
||||
var verbosity string
|
||||
switch req.Level {
|
||||
case pbrpc.LoggingLevelRequest_INFO:
|
||||
|
@ -37,7 +37,7 @@ type Server struct {
|
||||
}
|
||||
|
||||
// GetSyncStatus checks the current network sync status of the node.
|
||||
func (ns *Server) GetSyncStatus(ctx context.Context, _ *ptypes.Empty) (*ethpb.SyncStatus, error) {
|
||||
func (ns *Server) GetSyncStatus(_ context.Context, _ *ptypes.Empty) (*ethpb.SyncStatus, error) {
|
||||
return ðpb.SyncStatus{
|
||||
Syncing: ns.SyncChecker.Syncing(),
|
||||
}, nil
|
||||
@ -71,7 +71,7 @@ func (ns *Server) GetGenesis(ctx context.Context, _ *ptypes.Empty) (*ethpb.Genes
|
||||
}
|
||||
|
||||
// GetVersion checks the version information of the beacon node.
|
||||
func (ns *Server) GetVersion(ctx context.Context, _ *ptypes.Empty) (*ethpb.Version, error) {
|
||||
func (ns *Server) GetVersion(_ context.Context, _ *ptypes.Empty) (*ethpb.Version, error) {
|
||||
return ðpb.Version{
|
||||
Version: version.GetVersion(),
|
||||
}, nil
|
||||
@ -82,7 +82,7 @@ func (ns *Server) GetVersion(ctx context.Context, _ *ptypes.Empty) (*ethpb.Versi
|
||||
// Any service not present in this list may return UNIMPLEMENTED or
|
||||
// PERMISSION_DENIED. The server may also support fetching services by grpc
|
||||
// reflection.
|
||||
func (ns *Server) ListImplementedServices(ctx context.Context, _ *ptypes.Empty) (*ethpb.ImplementedServices, error) {
|
||||
func (ns *Server) ListImplementedServices(_ context.Context, _ *ptypes.Empty) (*ethpb.ImplementedServices, error) {
|
||||
serviceInfo := ns.Server.GetServiceInfo()
|
||||
serviceNames := make([]string, 0, len(serviceInfo))
|
||||
for svc := range serviceInfo {
|
||||
@ -95,8 +95,8 @@ func (ns *Server) ListImplementedServices(ctx context.Context, _ *ptypes.Empty)
|
||||
}
|
||||
|
||||
// GetHost returns the p2p data on the current local and host peer.
|
||||
func (ns *Server) GetHost(ctx context.Context, _ *ptypes.Empty) (*ethpb.HostData, error) {
|
||||
stringAddr := []string{}
|
||||
func (ns *Server) GetHost(_ context.Context, _ *ptypes.Empty) (*ethpb.HostData, error) {
|
||||
var stringAddr []string
|
||||
for _, addr := range ns.PeerManager.Host().Addrs() {
|
||||
stringAddr = append(stringAddr, addr.String())
|
||||
}
|
||||
@ -118,7 +118,7 @@ func (ns *Server) GetHost(ctx context.Context, _ *ptypes.Empty) (*ethpb.HostData
|
||||
}
|
||||
|
||||
// GetPeer returns the data known about the peer defined by the provided peer id.
|
||||
func (ns *Server) GetPeer(ctx context.Context, peerReq *ethpb.PeerRequest) (*ethpb.Peer, error) {
|
||||
func (ns *Server) GetPeer(_ context.Context, peerReq *ethpb.PeerRequest) (*ethpb.Peer, error) {
|
||||
pid, err := peer.Decode(peerReq.PeerId)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unable to parse provided peer id: %v", err)
|
||||
|
@ -17,16 +17,16 @@ import (
|
||||
// SubmitAggregateSelectionProof is called by a validator when its assigned to be an aggregator.
|
||||
// The aggregator submits the selection proof to obtain the aggregated attestation
|
||||
// object to sign over.
|
||||
func (as *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error) {
|
||||
func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "AggregatorServer.SubmitAggregateSelectionProof")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
|
||||
if as.SyncChecker.Syncing() {
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
st, err := as.HeadFetcher.HeadState(ctx)
|
||||
st, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine head state: %v", err)
|
||||
}
|
||||
@ -59,14 +59,14 @@ func (as *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Validator is not an aggregator")
|
||||
}
|
||||
|
||||
if err := as.AttPool.AggregateUnaggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex); err != nil {
|
||||
if err := vs.AttPool.AggregateUnaggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not aggregate unaggregated attestations")
|
||||
}
|
||||
aggregatedAtts := as.AttPool.AggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex)
|
||||
aggregatedAtts := vs.AttPool.AggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex)
|
||||
|
||||
// Filter out the best aggregated attestation (ie. the one with the most aggregated bits).
|
||||
if len(aggregatedAtts) == 0 {
|
||||
aggregatedAtts = as.AttPool.UnaggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex)
|
||||
aggregatedAtts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex)
|
||||
if len(aggregatedAtts) == 0 {
|
||||
return nil, status.Errorf(codes.Internal, "Could not find attestation for slot and committee in pool")
|
||||
}
|
||||
@ -108,7 +108,7 @@ func (as *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
|
||||
|
||||
// SubmitSignedAggregateSelectionProof is called by a validator to broadcast a signed
|
||||
// aggregated and proof object.
|
||||
func (as *Server) SubmitSignedAggregateSelectionProof(ctx context.Context, req *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
func (vs *Server) SubmitSignedAggregateSelectionProof(ctx context.Context, req *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil ||
|
||||
req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Signed aggregate request can't be nil")
|
||||
@ -120,11 +120,11 @@ func (as *Server) SubmitSignedAggregateSelectionProof(ctx context.Context, req *
|
||||
}
|
||||
|
||||
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
|
||||
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot, as.GenesisTimeFetcher.GenesisTime()); err != nil {
|
||||
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot, vs.GenesisTimeFetcher.GenesisTime()); err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Attestation slot is no longer valid from current time")
|
||||
}
|
||||
|
||||
if err := as.P2P.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
|
||||
if err := vs.P2P.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast signed aggregated attestation: %v", err)
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ func assignValidatorToSubnet(pubkey []byte, status ethpb.ValidatorStatus) {
|
||||
return
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
|
||||
assignedIdxs := []uint64{}
|
||||
var assignedIdxs []uint64
|
||||
randGen := rand.NewGenerator()
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().RandomSubnetsPerValidator; i++ {
|
||||
assignedIdx := randGen.Intn(int(params.BeaconNetworkConfig().AttestationSubnetCount))
|
||||
|
@ -348,7 +348,9 @@ func chosenEth1DataMajorityVote(votes []eth1DataSingleVote) eth1DataAggregatedVo
|
||||
voteCount = append(voteCount, eth1DataAggregatedVote{data: singleVote, votes: 1})
|
||||
}
|
||||
}
|
||||
|
||||
if len(voteCount) == 0 {
|
||||
return eth1DataAggregatedVote{}
|
||||
}
|
||||
currentVote := voteCount[0]
|
||||
for _, aggregatedVote := range voteCount[1:] {
|
||||
// Choose new eth1data if it has more votes or the same number of votes with a bigger block height.
|
||||
@ -557,7 +559,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1DataHeight *big.
|
||||
}
|
||||
|
||||
upToEth1DataDeposits := vs.DepositFetcher.AllDeposits(ctx, canonicalEth1DataHeight)
|
||||
depositData := [][]byte{}
|
||||
var depositData [][]byte
|
||||
for _, dep := range upToEth1DataDeposits {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
|
@ -131,7 +131,7 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
|
||||
}
|
||||
|
||||
// DomainData fetches the current domain version information from the beacon state.
|
||||
func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
func (vs *Server) DomainData(_ context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
fork := vs.ForkFetcher.CurrentFork()
|
||||
headGenesisValidatorRoot := vs.HeadFetcher.HeadGenesisValidatorRoot()
|
||||
dv, err := helpers.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), headGenesisValidatorRoot[:])
|
||||
@ -145,7 +145,7 @@ func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest)
|
||||
|
||||
// CanonicalHead of the current beacon chain. This method is requested on-demand
|
||||
// by a validator when it is their time to propose or attest.
|
||||
func (vs *Server) CanonicalHead(ctx context.Context, req *ptypes.Empty) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (vs *Server) CanonicalHead(ctx context.Context, _ *ptypes.Empty) (*ethpb.SignedBeaconBlock, error) {
|
||||
headBlk, err := vs.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head block: %v", err)
|
||||
@ -157,7 +157,7 @@ func (vs *Server) CanonicalHead(ctx context.Context, req *ptypes.Empty) (*ethpb.
|
||||
// has started its runtime and validators begin their responsibilities. If it has not, it then
|
||||
// subscribes to an event stream triggered by the powchain service whenever the ChainStart log does
|
||||
// occur in the Deposit Contract on ETH 1.0.
|
||||
func (vs *Server) WaitForChainStart(req *ptypes.Empty, stream ethpb.BeaconNodeValidator_WaitForChainStartServer) error {
|
||||
func (vs *Server) WaitForChainStart(_ *ptypes.Empty, stream ethpb.BeaconNodeValidator_WaitForChainStartServer) error {
|
||||
head, err := vs.HeadFetcher.HeadState(stream.Context())
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
|
||||
@ -212,7 +212,7 @@ func (vs *Server) WaitForChainStart(req *ptypes.Empty, stream ethpb.BeaconNodeVa
|
||||
|
||||
// WaitForSynced subscribes to the state channel and ends the stream when the state channel
|
||||
// indicates the beacon node has been initialized and is ready
|
||||
func (vs *Server) WaitForSynced(req *ptypes.Empty, stream ethpb.BeaconNodeValidator_WaitForSyncedServer) error {
|
||||
func (vs *Server) WaitForSynced(_ *ptypes.Empty, stream ethpb.BeaconNodeValidator_WaitForSyncedServer) error {
|
||||
head, err := vs.HeadFetcher.HeadState(stream.Context())
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
|
||||
|
@ -318,7 +318,7 @@ func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator
|
||||
b.sharedFieldReferences[validators] = &reference{refs: 1}
|
||||
}
|
||||
b.lock.Unlock()
|
||||
changedVals := []uint64{}
|
||||
var changedVals []uint64
|
||||
for i, val := range v {
|
||||
changed, err := f(i, val)
|
||||
if err != nil {
|
||||
|
@ -144,7 +144,7 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
func popProcessNoopFunc(_ interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ func (s *State) ForceCheckpoint(ctx context.Context, root []byte) error {
|
||||
|
||||
// SaveStateSummary saves the relevant state summary for a block and its corresponding state slot in the
|
||||
// state summary cache.
|
||||
func (s *State) SaveStateSummary(ctx context.Context, blk *ethpb.SignedBeaconBlock, blockRoot [32]byte) {
|
||||
func (s *State) SaveStateSummary(_ context.Context, blk *ethpb.SignedBeaconBlock, blockRoot [32]byte) {
|
||||
// Save State summary
|
||||
s.stateSummaryCache.Put(blockRoot, &pb.StateSummary{
|
||||
Slot: blk.Block.Slot,
|
||||
|
@ -70,7 +70,7 @@ func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName st
|
||||
changedIndices = append(changedIndices, maxChangedIndex+1)
|
||||
}
|
||||
for i := 0; i < len(changedIndices); i++ {
|
||||
rt, err = recomputeRoot(changedIndices[i], chunks, length, fieldName, hashFunc)
|
||||
rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@ -133,8 +133,7 @@ func merkleizeTrieLeaves(layers [][][32]byte, hashLayer [][32]byte,
|
||||
return layers, hashLayer
|
||||
}
|
||||
|
||||
func recomputeRoot(idx int, chunks [][32]byte, length uint64,
|
||||
fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
|
||||
func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
|
||||
items, ok := layersCache[fieldName]
|
||||
if !ok {
|
||||
return [32]byte{}, errors.New("could not recompute root as there was no cache found")
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
// PendingAttestationRoot describes a method from which the hash tree root
|
||||
// of a pending attestation is returned.
|
||||
func PendingAttestationRoot(hasher htrutils.HashFn, att *pb.PendingAttestation) ([32]byte, error) {
|
||||
fieldRoots := [][32]byte{}
|
||||
var fieldRoots [][32]byte
|
||||
if att != nil {
|
||||
// Bitfield.
|
||||
aggregationRoot, err := htrutils.BitlistRoot(hasher, att.AggregationBits, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
@ -64,7 +64,7 @@ func ValidatorBalancesRoot(balances []uint64) ([32]byte, error) {
|
||||
// ValidatorRoot describes a method from which the hash tree root
|
||||
// of a validator is returned.
|
||||
func ValidatorRoot(hasher htrutils.HashFn, validator *ethpb.Validator) ([32]byte, error) {
|
||||
fieldRoots := [][32]byte{}
|
||||
var fieldRoots [][32]byte
|
||||
if validator != nil {
|
||||
pubkey := bytesutil.ToBytes48(validator.PublicKey)
|
||||
withdrawCreds := bytesutil.ToBytes32(validator.WithdrawalCredentials)
|
||||
|
@ -54,7 +54,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
}
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
pendingRoots := [][32]byte{}
|
||||
var pendingRoots [][32]byte
|
||||
randGen := rand.NewGenerator()
|
||||
for _, bRoot := range roots {
|
||||
s.pendingAttsLock.RLock()
|
||||
|
@ -49,7 +49,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
return errors.Wrap(err, "could not validate pending slots")
|
||||
}
|
||||
slots := s.sortedPendingSlots()
|
||||
parentRoots := [][32]byte{}
|
||||
var parentRoots [][32]byte
|
||||
|
||||
span.AddAttributes(
|
||||
trace.Int64Attribute("numSlots", int64(len(slots))),
|
||||
|
@ -113,18 +113,18 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) {
|
||||
b1r := [32]byte{'b'}
|
||||
|
||||
r.insertBlockToPendingQueue(b0.Block.Slot, b0, b0r)
|
||||
require.Equal(t, int(1), len(r.slotToPendingBlocks[b0.Block.Slot]), "Block was not added to map")
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b0.Block.Slot]), "Block was not added to map")
|
||||
|
||||
r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1r)
|
||||
require.Equal(t, int(1), len(r.slotToPendingBlocks[b1.Block.Slot]), "Block was not added to map")
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b1.Block.Slot]), "Block was not added to map")
|
||||
|
||||
// Add duplicate block which should not be saved.
|
||||
r.insertBlockToPendingQueue(b0.Block.Slot, b0, b0r)
|
||||
require.Equal(t, int(1), len(r.slotToPendingBlocks[b0.Block.Slot]), "Block was added to map")
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b0.Block.Slot]), "Block was added to map")
|
||||
|
||||
// Add duplicate block which should not be saved.
|
||||
r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1r)
|
||||
require.Equal(t, int(1), len(r.slotToPendingBlocks[b1.Block.Slot]), "Block was added to map")
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b1.Block.Slot]), "Block was added to map")
|
||||
|
||||
}
|
||||
|
||||
@ -379,7 +379,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := [][32]byte{}
|
||||
var out [][32]byte
|
||||
assert.NoError(t, p2.Encoding().DecodeWithMaxLength(stream, &out))
|
||||
assert.DeepEqual(t, expectedRoots, out, "Did not receive expected message")
|
||||
response := []*ethpb.SignedBeaconBlock{b2, b3, b4, b5}
|
||||
|
@ -52,7 +52,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
res := testutil.NewBeaconBlock()
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
if (res.Block.Slot-req.StartSlot)%req.Step != 0 {
|
||||
@ -117,7 +117,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
||||
prevSlot := uint64(0)
|
||||
require.Equal(t, uint64(len(expectedRoots)), req.Count, "Number of roots not expected")
|
||||
for i, j := req.StartSlot, 0; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
if res.Block.Slot < prevSlot {
|
||||
@ -177,12 +177,12 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
// check for genesis block
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
assert.Equal(t, uint64(0), res.Block.Slot, "genesis block was not returned")
|
||||
for i := req.StartSlot + req.Step; i < req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
}
|
||||
@ -219,7 +219,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
return
|
||||
}
|
||||
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
res := testutil.NewBeaconBlock()
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
if (res.Block.Slot-req.StartSlot)%req.Step != 0 {
|
||||
@ -434,7 +434,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
name: "Valid Request",
|
||||
req: &pb.BeaconBlocksByRangeRequest{
|
||||
Step: 1,
|
||||
Count: uint64(params.BeaconNetworkConfig().MaxRequestBlocks) - 1,
|
||||
Count: params.BeaconNetworkConfig().MaxRequestBlocks - 1,
|
||||
StartSlot: 50,
|
||||
},
|
||||
errorToLog: "validation failed with valid params",
|
||||
@ -472,7 +472,7 @@ func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) {
|
||||
defer wg.Done()
|
||||
blocks := make([]*ethpb.SignedBeaconBlock, 0, req.Count)
|
||||
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
blk := testutil.NewBeaconBlock()
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, blk))
|
||||
if (blk.Block.Slot-req.StartSlot)%req.Step != 0 {
|
||||
|
@ -53,7 +53,7 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
for i := range blkRoots {
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
res := testutil.NewBeaconBlock()
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, &res))
|
||||
if res.Block.Slot != uint64(i+1) {
|
||||
@ -119,7 +119,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := [][32]byte{}
|
||||
var out [][32]byte
|
||||
assert.NoError(t, p2.Encoding().DecodeWithMaxLength(stream, &out))
|
||||
assert.DeepEqual(t, expectedRoots, out, "Did not receive expected message")
|
||||
response := []*ethpb.SignedBeaconBlock{blockB, blockA}
|
||||
|
@ -30,7 +30,7 @@ var goodByes = map[uint64]string{
|
||||
const flushDelay = 50 * time.Millisecond
|
||||
|
||||
// goodbyeRPCHandler reads the incoming goodbye rpc message from the peer.
|
||||
func (s *Service) goodbyeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close stream")
|
||||
|
@ -39,7 +39,7 @@ func TestGoodByeRPCHandler_Disconnects_With_Peer(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectResetStream(t, r, stream)
|
||||
expectResetStream(t, stream)
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
// metaDataHandler reads the incoming metadata rpc request from the peer.
|
||||
func (s *Service) metaDataHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
|
@ -47,7 +47,7 @@ func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
out := new(pb.MetaData)
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.DeepEqual(t, p1.LocalMetadata, out, "Metadata unequal")
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// pingHandler reads the incoming ping rpc message from the peer.
|
||||
func (s *Service) pingHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
m, ok := msg.(*uint64)
|
||||
|
@ -53,7 +53,7 @@ func TestPingRPCHandler_ReceivesPing(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
out := new(uint64)
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), *out)
|
||||
|
@ -60,7 +60,7 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
out := &pb.Status{}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
if !bytes.Equal(out.FinalizedRoot, root[:]) {
|
||||
@ -126,7 +126,7 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
out := &pb.Status{}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
if !bytes.Equal(out.FinalizedRoot, root[:]) {
|
||||
@ -206,7 +206,7 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
expectSuccess(t, stream)
|
||||
out := &pb.Status{}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
expected := &pb.Status{
|
||||
|
@ -23,7 +23,7 @@ func init() {
|
||||
}
|
||||
|
||||
// expectSuccess status code from a stream in regular sync.
|
||||
func expectSuccess(t *testing.T, r *Service, stream network.Stream) {
|
||||
func expectSuccess(t *testing.T, stream network.Stream) {
|
||||
code, errMsg, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint8(0), code, "Received non-zero response code")
|
||||
@ -35,12 +35,12 @@ func expectFailure(t *testing.T, expectedCode uint8, expectedErrorMsg string, st
|
||||
code, errMsg, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uint8(0), code, "Expected request to fail but got a 0 response code")
|
||||
require.Equal(t, uint8(expectedCode), code, "Received incorrect response code")
|
||||
require.Equal(t, expectedCode, code, "Received incorrect response code")
|
||||
require.Equal(t, expectedErrorMsg, errMsg)
|
||||
}
|
||||
|
||||
// expectResetStream status code from a stream in regular sync.
|
||||
func expectResetStream(t *testing.T, r *Service, stream network.Stream) {
|
||||
func expectResetStream(t *testing.T, stream network.Stream) {
|
||||
expectedErr := "stream reset"
|
||||
_, _, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
|
||||
require.ErrorContains(t, expectedErr, err)
|
||||
|
@ -29,7 +29,7 @@ const pubsubMessageTimeout = 30 * time.Second
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// noopValidator is a no-op that only decodes the message, but does not check its contents.
|
||||
func (s *Service) noopValidator(ctx context.Context, _ peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to decode message")
|
||||
@ -91,6 +91,7 @@ func (s *Service) subscribe(topic string, validator pubsub.ValidatorEx, handle s
|
||||
return s.subscribeWithBase(base, s.addDigestToTopic(topic), validator, handle)
|
||||
}
|
||||
|
||||
// TODO(7437): Refactor this method to remove unused arg "base".
|
||||
func (s *Service) subscribeWithBase(base proto.Message, topic string, validator pubsub.ValidatorEx, handle subHandler) *pubsub.Subscription {
|
||||
topic += s.p2p.Encoding().ProtocolSuffix()
|
||||
log := log.WithField("topic", topic)
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
// beaconAggregateProofSubscriber forwards the incoming validated aggregated attestation and proof to the
|
||||
// attestation pool for processing.
|
||||
func (s *Service) beaconAggregateProofSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
func (s *Service) beaconAggregateProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *eth.SignedAggregateAttestationAndProof, type=%T", msg)
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
|
||||
func (s *Service) committeeIndexBeaconAttestationSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(*eth.Attestation)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg)
|
||||
@ -53,7 +53,7 @@ func (s *Service) persistentSubnetIndices() []uint64 {
|
||||
func (s *Service) aggregatorSubnetIndices(currentSlot uint64) []uint64 {
|
||||
endEpoch := helpers.SlotToEpoch(currentSlot) + 1
|
||||
endSlot := endEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
commIds := []uint64{}
|
||||
var commIds []uint64
|
||||
for i := currentSlot; i <= endSlot; i++ {
|
||||
commIds = append(commIds, cache.SubnetIDs.GetAggregatorSubnetIDs(i)...)
|
||||
}
|
||||
@ -63,7 +63,7 @@ func (s *Service) aggregatorSubnetIndices(currentSlot uint64) []uint64 {
|
||||
func (s *Service) attesterSubnetIndices(currentSlot uint64) []uint64 {
|
||||
endEpoch := helpers.SlotToEpoch(currentSlot) + 1
|
||||
endSlot := endEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
commIds := []uint64{}
|
||||
var commIds []uint64
|
||||
for i := currentSlot; i <= endSlot; i++ {
|
||||
commIds = append(commIds, cache.SubnetIDs.GetAttesterSubnetIDs(i)...)
|
||||
}
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
func TestSortedObj_SortBlocksRoots(t *testing.T) {
|
||||
source := rand.NewSource(33)
|
||||
randGen := rand.New(source)
|
||||
blks := []*ethpb.SignedBeaconBlock{}
|
||||
roots := [][32]byte{}
|
||||
var blks []*ethpb.SignedBeaconBlock
|
||||
var roots [][32]byte
|
||||
randFunc := func() int64 {
|
||||
return randGen.Int63n(50)
|
||||
}
|
||||
@ -45,8 +45,8 @@ func TestSortedObj_SortBlocksRoots(t *testing.T) {
|
||||
func TestSortedObj_NoDuplicates(t *testing.T) {
|
||||
source := rand.NewSource(33)
|
||||
randGen := rand.New(source)
|
||||
blks := []*ethpb.SignedBeaconBlock{}
|
||||
roots := [][32]byte{}
|
||||
var blks []*ethpb.SignedBeaconBlock
|
||||
var roots [][32]byte
|
||||
randFunc := func() int64 {
|
||||
return randGen.Int63n(50)
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ const depositGasLimit = 4000000
|
||||
|
||||
// StartValidatorClients starts the configured amount of validators, also sending and mining their validator deposits.
|
||||
// Should only be used on initialization.
|
||||
func StartValidatorClients(t *testing.T, config *types.E2EConfig, keystorePath string) {
|
||||
func StartValidatorClients(t *testing.T, config *types.E2EConfig) {
|
||||
// Always using genesis count since using anything else would be difficult to test for.
|
||||
validatorNum := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
beaconNodeNum := e2e.TestParams.BeaconNodeCount
|
||||
|
@ -43,7 +43,7 @@ func runEndToEndTest(t *testing.T, config *types.E2EConfig) {
|
||||
go components.SendAndMineDeposits(t, keystorePath, minGenesisActiveCount, 0)
|
||||
bootnodeENR := components.StartBootnode(t)
|
||||
components.StartBeaconNodes(t, config, bootnodeENR)
|
||||
components.StartValidatorClients(t, config, keystorePath)
|
||||
components.StartValidatorClients(t, config)
|
||||
defer helpers.LogOutput(t, config)
|
||||
if config.UsePprof {
|
||||
defer func() {
|
||||
|
@ -35,7 +35,7 @@ func afterNthEpoch(afterEpoch uint64) func(uint64) bool {
|
||||
}
|
||||
|
||||
// All epochs.
|
||||
func allEpochs(currentEpoch uint64) bool {
|
||||
func allEpochs(_ uint64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -39,9 +39,6 @@ var BootNodeLogFileName = "bootnode.log"
|
||||
// BeaconNodeLogFileName is the file name used for the beacon chain node logs.
|
||||
var BeaconNodeLogFileName = "beacon-%d.log"
|
||||
|
||||
// BeaconNodeCPUProfileFileName is the file name used for the beacon chain cpu profiles.
|
||||
var BeaconNodeCPUProfileFileName = "beacon-cpu-%d.out"
|
||||
|
||||
// SlasherLogFileName is the file name used for the slasher client logs.
|
||||
var SlasherLogFileName = "slasher-%d.log"
|
||||
|
||||
|
@ -1,10 +1,6 @@
|
||||
package fuzz
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
@ -13,28 +9,3 @@ func init() {
|
||||
SkipBLSVerify: true,
|
||||
})
|
||||
}
|
||||
|
||||
func fail(err error) ([]byte, bool) {
|
||||
shouldPanic := false
|
||||
if val, ok := os.LookupEnv("PANIC_ON_ERROR"); ok {
|
||||
shouldPanic = strings.ToLower(val) == "true"
|
||||
}
|
||||
if shouldPanic {
|
||||
panic(err)
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func success(post *stateTrie.BeaconState) ([]byte, bool) {
|
||||
if val, ok := os.LookupEnv("RETURN_SSZ_POST_STATE"); ok {
|
||||
if strings.ToLower(val) != "true" {
|
||||
return nil, true
|
||||
}
|
||||
}
|
||||
|
||||
result, err := post.InnerStateUnsafe().MarshalSSZ()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result, true
|
||||
}
|
||||
|
7
go.mod
7
go.mod
@ -9,7 +9,6 @@ require (
|
||||
github.com/aws/aws-sdk-go v1.33.15 // indirect
|
||||
github.com/bazelbuild/buildtools v0.0.0-20200528175155-f4e8394f069d
|
||||
github.com/bazelbuild/rules_go v0.23.2
|
||||
github.com/btcsuite/btcd v0.20.1-beta
|
||||
github.com/cespare/cp v1.1.1 // indirect
|
||||
github.com/confluentinc/confluent-kafka-go v1.4.2 // indirect
|
||||
github.com/d4l3k/messagediff v1.2.1
|
||||
@ -34,6 +33,7 @@ require (
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26
|
||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
|
||||
github.com/google/gopacket v1.1.18 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
@ -59,9 +59,6 @@ require (
|
||||
github.com/libp2p/go-libp2p-circuit v0.3.1
|
||||
github.com/libp2p/go-libp2p-core v0.6.1
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0
|
||||
github.com/libp2p/go-libp2p-host v0.1.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.8.3
|
||||
github.com/libp2p/go-libp2p-net v0.1.0
|
||||
github.com/libp2p/go-libp2p-noise v0.1.1
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.3
|
||||
github.com/libp2p/go-libp2p-secio v0.2.2
|
||||
@ -69,8 +66,10 @@ require (
|
||||
github.com/libp2p/go-libp2p-tls v0.1.4-0.20200421131144-8a8ad624a291 // indirect
|
||||
github.com/libp2p/go-mplex v0.1.3 // indirect
|
||||
github.com/libp2p/go-reuseport-transport v0.0.4 // indirect
|
||||
github.com/libp2p/go-sockaddr v0.1.0 // indirect
|
||||
github.com/libp2p/go-yamux v1.3.8 // indirect
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible
|
||||
github.com/lunixbochs/vtclean v1.0.0 // indirect
|
||||
github.com/manifoldco/promptui v0.7.0
|
||||
github.com/minio/highwayhash v1.0.0
|
||||
github.com/minio/sha256-simd v0.1.1
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user