prysm-pulse/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go

928 lines
33 KiB
Go
Raw Normal View History

package sync
import (
"context"
"io"
"sync"
"testing"
"time"
"github.com/kevinms/leakybucket-go"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/protocol"
types "github.com/prysmaticlabs/eth2-types"
2020-06-09 22:40:48 +00:00
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
db2 "github.com/prysmaticlabs/prysm/beacon-chain/db"
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/interfaces"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Step: 64,
Count: 16,
}
// Populate the database with blocks that would match the request.
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
blk := testutil.NewBeaconBlock()
blk.Block.Slot = i
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
}
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
var wg sync.WaitGroup
wg.Add(1)
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer wg.Done()
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
expectSuccess(t, stream)
res := testutil.NewBeaconBlock()
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
Update run time to v0.9.3 (#4154) * Remove custody (#3986) * Update proto fields * Updated block operations * Fixed all block operation tests * Fixed tests part 1 * Fixed tests part 1 * All tests pass * Clean up * Skip spec test * Fixed ssz test * Skip ssz test * Skip mainnet tests * Update beacon-chain/operations/attestation.go * Update beacon-chain/operations/attestation.go * Decoy flip flop check (#3987) * Bounce attack check (#3989) * New store values * Update process block * Update process attestation * Update tests * Helper * Fixed blockchain package tests * Update beacon-chain/blockchain/forkchoice/process_block.go * Conflict * Unskip mainnet spec tests (#3998) * Starting * Fixed attestation mainnet test * Unskip ssz static and block processing tests * Fixed workspace * fixed workspace * fixed workspace * Update beacon-chain/core/blocks/block_operations.go * Unskip minimal spec tests (#3999) * Starting * Fixed attestation mainnet test * Unskip ssz static and block processing tests * Fixed workspace * fixed workspace * fixed workspace * Update workspace * Unskip all minimal spec tests * Update workspace for general test * Unskip test (#4001) * Update minimal seconds per slot to 6 (#3978) * Bounce attack tests (#3993) * New store values * Update process block * Update process attestation * Update tests * Helper * Fixed blockchain package tests * Slots since epoch starts tests * Update justified checkpt tests * Conflict * Fixed logic * Update process_block.go * Use helper * Conflict * Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.1 * Conflict * Fixed failed tests * Lower MinGenesisActiveValidatorCount to 16384 (#4100) * Fork choice beacon block checks (#4107) * Prevent future blocks check and test * Removed old code * Update aggregation proto (#4121) * Update def * Update spec test * Conflict * Update workspace * patch * Resolve conflict * Patch * Change workspace * Update ethereumapis to a forked branch at commit https://github.com/prysmaticlabs/ethereumapis/pull/46/commits/6eb1193e47f66c7dabc33958b1996ec16c1b6e16 * Fixed all the tests * Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into conflict * fix patch * Need to regenerate test data * Merge branch 'master' into v0.9.2 * Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2 * Enable snappy compression for all (#4157) * enable snappy compression for all * enable snappy compression for all * enable snappy compression for all * enable snappy compression for all * Validate aggregate and proof subscriber (#4159) * Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2 * Conflict * Update workspace * Conflict * Conflict * Conflict * Merge branch 'master' into v0.9.2 * Merge branch 'master' into v0.9.2 * Conflict * Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2 * Remove migrate to snappy (#4205) * Feature flag: Deprecate --prune-states, release to all (#4204) * Deprecated prune-states, release to all * imports * remote unused import * remove unused import * Rm prune state test * gaz * Refactoring for dynamic pubsub subscriptions for non-aggregated attestations (#4189) * checkpoint progress * chkpt * checkpoint progress * put pipeline in its own file * remove unused imports * add test, it's failing though * fix test * remove head state issue * add clear db flag to e2e * add some more error handling, debug logging * skip processing if chain has not started * fix test * wrap in go routine to see if anything breaks * remove duplicated topic * Add a regression test. Thanks @nisdas for finding the original problem. May it never happen again *fingers crossed* * Comments * gofmt * comment out with TODO * Sync with master * Sync with master * RPC servers use attestation pool (#4223) * Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2 * Refactor RPC to Fully Utilize Ethereum APIs (#4243) * include attester as a file in the validator server * remove old proposer server impl * include new patch and properly sync changes * align with public pbs * ensure matches rpc def * fix up status tests * resolve all broken test files in the validator rpc package * gazelle include * fix up the duties implementation * fixed up all get duties functions * all tests pass * utilize new ethereum apis * amend validator client to use the new beacon node validator rpc client * fix up most of validator items * added in mock * fix up test * readd test * add chain serv mock * fix a few more validator methods * all validator tests passingggg * fix broken test * resolve even more broken tests * all tests passsssss * fix lint * try PR * fix up test * resolve broken other tests * Sync with master * Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2 * Aggregate and proof subscriber (#4240) * Added subscribers * Fixed conflict * Tests * fix up patch * Use upstream pb * include latest patch * Fmt * Save state before head block * skip tests (#4275) * Delete block attestations from the pool (#4241) * Added subscribers * Clean up * Fixed conflict * Delete atts in pool in validate pipeline * Moved it to subscriber * Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into use-att-pool-3 * Test * Fixed test * Initial work on voluntary exit (#4207) * Initial implementation of voluntary exit: RPC call * Update for recent merges * Break out validation logic for voluntary exits to core module * RequestExit -> ProposeExit * Decrease exit package visibility * Move to operation feed * Wrap errors * Fix critical proposer selection bug #4259 (#4265) * fix critical proposer selection bug #4259 * gofmt * add 1 more validator to make it 5 * more tests * Fixed archivedProposerIndex * Fixed TestFilterAttestation_OK * Refactor ComputeProposerIndex, add regression test for potential out of range panic * handle case of nil validator * Update validators_test.go * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * Leftover merge files, oops * gaz * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2 * Fixes Duplicate Validator Bug (#4322) * Update dict * Test helper * Regression test * Comment * Reset test cache * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * fixes after PR #4328 * Complete attestation pool for run time (#4286) * Added subscribers * Fixed conflict * Delete atts in pool in validate pipeline * Moved it to subscriber * Test * Fixed test * New curl for forkchoice attestations * Starting att pool service for fork choice * Update pool interface * Update pool interface * Update sync and node * Lint * Gazelle * Updated servers, filled in missing functionalities * RPC working with 1 beacon node 64 validators * Started writing tests. Yay * Test to aggregate and save multiple fork choice atts * Tests for BatchAttestations for fork choice * Fixed exisiting tests * Minor fixes * Fmt * Added batch saves * Lint * Mo tests yay * Delete test * Fmt * Update interval * Fixed aggregation broadcast * Clean up based on design review comment * Fixed setupBeaconChain * Raul's feedback. s/error/err * resolve conflicts * Merge branch 'v0.9.2' of github.com:prysmaticlabs/prysm into v0.9.2 * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * Merge refs/heads/master into v0.9.2 * Removed old protos and fixed tests (#4336) * Merge refs/heads/master into v0.9.2 * Disallow duplicated indices and test (#4339) * Explicit use of GENESIS_SLOT in fork choice (#4343) * Update from 2 to 3 (#4345) * Remove verify unaggregated attestation when aggregating (#4347) * use slot ticker instead of run every (#4348) * Add context check for unbounded loop work (#4346) * Revert "Explicit use of GENESIS_SLOT in fork choice (#4343)" (#4349) This reverts commit d3f6753c77f8f733563d00ab649c5159b2c2926f. * Refactor Powchain Service (#4306) * add data structures * generate proto * add in new fields * add comments * add new mock state * add new mock state * add new methods * some more changes * check genesis time properly * lint * fix refs * fix tests * lint * lint * lint * gaz * fix lint * raul's comments * use one method * fix test * raul's comment Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Ensure best better-justification is stored for fork choice (#4342) * Ensure best better-justification is stored. Minor refactor * Tests * Merge refs/heads/v0.9.2 into better-best-justified * Merge refs/heads/v0.9.2 into better-best-justified * Ensure that epoch of attestation slot matches the target epoch (#4341) * Disallow duplicated indices and test * Add slot to target epoch check to on_attestation * Add slot to target epoch check to process_attestation * Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into no-dup-att-indices * Fixed TestProcessAttestations_PrevEpochFFGDataMismatches * Merge refs/heads/v0.9.2 into no-dup-att-indices * Merge refs/heads/v0.9.2 into no-dup-att-indices * Merge refs/heads/v0.9.2 into no-dup-att-indices * Merge refs/heads/v0.9.2 into no-dup-att-indices * Merge refs/heads/v0.9.2 into no-dup-att-indices * Merge refs/heads/v0.9.2 into no-dup-att-indices * Merge refs/heads/v0.9.2 into no-dup-att-indices * Update beacon-chain/blockchain/forkchoice/process_attestation_test.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * Merge refs/heads/v0.9.2 into no-dup-att-indices * Filter viable branches in fork choice (#4355) * Only activate upon finality (#4359) * Updated functions * Tests * Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into queue-fix-on-finality * Comment * Merge refs/heads/v0.9.2 into queue-fix-on-finality * Fixed failing test from 4359 (#4360) * Fixed * Skip registry spec tests * Wait for state to be initialized at least once before running slot ticker based on genesis time (#4364) * Sync with master * Fix checkpoint root to use genesis block root (#4368) * Return an error on nil head state in fork choice (#4369) * Return error if nil head state * Fixed tests. Saved childen blocks state Co-authored-by: terence tsao <terence@prysmaticlabs.com> * Update metrics every epoch (#4367) * return empty slice if state is nil (#4365) * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * Merge refs/heads/master into v0.9.2 * Pubsub: Broadcast attestations to committee based subnets (#4316) * Working on un-aggregated pubsub topics * update subscriber to call pool * checkpointing * fix * untested message validation * minor fixes * rename slotsSinceGenesis to slotsSince * some progress on a unit test, subscribe is not being called still... * dont change topic * need to set the data on the message * restore topic * fixes * some helpful parameter changes for mainnet operations * lint * Terence feedback * unskip e2e * Unit test for validate committee index beacon attestation * PR feedbacK Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into resolveConflicts * remove condition * Remove unused operation pool (#4361) * Merge refs/heads/master into v0.9.2 * Aggregate attestations periodically (#4376) * Persist ETH1 Data to Disk (#4329) * add data structures * generate proto * add in new fields * add comments * add new mock state * add new mock state * add new methods * some more changes * check genesis time properly * lint * fix refs * fix tests * lint * lint * lint * gaz * adding in new proto message * remove outdated vars * add new changes * remove latest eth1data * continue refactoring * finally works * lint * fix test * fix all tests * fix all tests again * fix build * change back * add full eth1 test * fix logs and test * add constant * changes * fix bug * lint * fix another bug * change back * Apply suggestions from code review Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * Fixed VerifyIndexedAttestation (#4382) * rm signing root (#4381) * rm signing root * Fixed VerifyIndexedAttestation * Check proposer slashed status inside ProcessBlockHeaderNoVerify * Fixed TestUpdateJustified_CouldUpdateBest Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Remove Redundant Trie Generation (#4383) * remove trie generation * remove deposit hashes * Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2 * fix build * Conflict * Implement StreamAttestations RPC Endpoint (#4390) * started attestation stream * stream attestations test * on slot tick test passing * imports * gaz * Update beacon-chain/rpc/beacon/attestations_test.go Co-Authored-By: shayzluf <thezluf@gmail.com> Co-authored-by: shayzluf <thezluf@gmail.com> * Fixed goimport (#4394) * Use custom stateutil ssz for ssz HTR spec tests (#4396) * Use custom stateutil ssz for ssz HTR spec tests * gofmt * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * Merge refs/heads/master into v0.9.2 * set mainnet to be the default for build and run (#4398) Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * gracefully handle deduplicated registration of topic validators (#4399) Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * SSZ: temporarily disable roots cache until cache issues can be resolved (#4407) * temporarily disable roots cache until cache issues can be resolved * Also use custom ssz for spectests Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Remove process block attestations as separate routine (#4408) * Removed old save/process block atts * Fixed tests Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Save Deposit Cache to Disk (#4384) * change to protos * fix build * glue everything together * fix test * raul's review * preston's comments Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Fix activation queue sorting (#4409) * Removed old save/process block atts * Fixed tests * Proper sorting by eligibility epoch then by indices * Deleted old colde * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * Merge branch 'master' into v0.9.2 * Merge refs/heads/master into v0.9.2 * stop recursive lookup if context is cancelled (#4420) * Fix proposal bug (#4419) * Add Pending Deposits Safely (#4422) * safely prune cache * use proper method * preston's,terence's reviews and comments * revert change to build files * use as feature config instead * Release custom state ssz (#4421) * Release custom state ssz, change all HTR of beacon state to use custom method * typo * use mainnet config Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2 * Update initial sync save justified to align with v0.9.3 (#4432) * Merge refs/heads/master into v0.9.2 * Merge refs/heads/master into v0.9.2 * fix build * don't blacklist on pubsub (#4435) * Fix Flakey Slot Ticker Test (#4434) * use interface instead for the slot ticker * fixed up flakey tests * add gen time * get duties comment * fix lifecycle test * more fixes * Configurable min genesis delay (#4437) * Configurable min genesis delay based on https://github.com/ethereum/eth2.0-specs/pull/1557 * remove feature flag for genesis delay * fix * demo config feedback * patch readme * save keys unencrypted for validators (#4439) * Add new demo configuration targeting mainnet scale (#4397) * Add new demo configuration targeting mainnet, with 1/10th of the deposit value * reduce quotant by 1/10th. Use 1/10th mainnet values * only change the inactivity quotant Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Save justified checkpoint state (#4433) * Save justified checkpoint state * Lint * Feedback * Fixed test Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Update shared/testutil/deposits.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update proto/testing/ssz_regression_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/core/epoch/epoch_processing.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/operations/attestations/kv/forkchoice.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/operations/attestations/pool.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/operations/attestations/pool.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/powchain/log_processing_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/validator/exit_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/operations/attestations/service.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/sync/subscriber_beacon_blocks_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/sync/subscriber_beacon_blocks_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/sync/subscriber.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/sync/subscriber.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/validator/proposer.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/validator/exit_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/validator/exit_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/operations/attestations/prepare_forkchoice.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/powchain/log_processing_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/operations/attestations/pool.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/powchain/log_processing_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/aggregator/server.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/validator/exit.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/validator/exit_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/rpc/validator/exit_test.go Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com> * Update beacon-chain/cache/depositcache/pending_deposits.go * Update beacon-chain/cache/depositcache/pending_deposits_test.go * Update beacon-chain/rpc/validator/proposer.go * Merge refs/heads/master into v0.9.2 * Fix e2e genesis delay issues (#4442) * fix e2e genesis delay issues * register flag * typo * Update shared/featureconfig/config.go Co-Authored-By: Nishant Das <nishdas93@gmail.com> * Apply suggestions from code review Co-Authored-By: Nishant Das <nishdas93@gmail.com> * skip demo e2e * fix validator Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: Nishant Das <nish1993@hotmail.com> Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com> * Batch Eth1 RPC Calls (#4392) * add new methods * get it working * optimize past deposit logs processing * revert change * fix all tests * use mock * lint * lint * check for nil * stop panics * Apply suggestions from code review Co-Authored-By: terence tsao <terence@prysmaticlabs.com> * Terence's Review Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-01-07 18:47:39 +00:00
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
}
}
})
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
require.NoError(t, err)
err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)
require.NoError(t, err)
// Make sure that rate limiter doesn't limit capacity exceedingly.
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
expectedCapacity := int64(req.Count*10 - req.Count)
require.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
}
func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 0,
Step: 1,
Count: 200,
}
genRoot := [32]byte{}
// Populate the database with blocks that would match the request.
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
blk := testutil.NewBeaconBlock()
blk.Block.Slot = i
if i == 0 {
rt, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
genRoot = rt
}
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
}
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot))
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
var wg sync.WaitGroup
wg.Add(1)
// Use a new request to test this out
newReq := &pb.BeaconBlocksByRangeRequest{StartSlot: 0, Step: 1, Count: 1}
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer wg.Done()
for i := newReq.StartSlot; i < newReq.StartSlot.Add(newReq.Count*newReq.Step); i += types.Slot(newReq.Step) {
expectSuccess(t, stream)
res := testutil.NewBeaconBlock()
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
if res.Block.Slot.SubSlot(newReq.StartSlot).Mod(newReq.Step) != 0 {
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
}
// Expect EOF
b := make([]byte, 1)
_, err := stream.Read(b)
require.ErrorContains(t, io.EOF.Error(), err)
}
})
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
require.NoError(t, err)
err = r.beaconBlocksByRangeRPCHandler(context.Background(), newReq, stream1)
require.NoError(t, err)
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
}
func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 200,
Step: 21,
Count: 33,
}
endSlot := req.StartSlot.Add(req.Step * (req.Count - 1))
expectedRoots := make([][32]byte, req.Count)
// Populate the database with blocks that would match the request.
for i, j := endSlot, req.Count-1; i >= req.StartSlot; i -= types.Slot(req.Step) {
blk := testutil.NewBeaconBlock()
blk.Block.Slot = i
rt, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
expectedRoots[j] = rt
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
j--
}
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
var wg sync.WaitGroup
wg.Add(1)
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer wg.Done()
prevSlot := types.Slot(0)
require.Equal(t, uint64(len(expectedRoots)), req.Count, "Number of roots not expected")
for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
expectSuccess(t, stream)
res := &ethpb.SignedBeaconBlock{}
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
if res.Block.Slot < prevSlot {
t.Errorf("Received block is unsorted with slot %d lower than previous slot %d", res.Block.Slot, prevSlot)
}
rt, err := res.Block.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, expectedRoots[j], rt, "roots not equal")
prevSlot = res.Block.Slot
j++
}
})
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
require.NoError(t, err)
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
}
func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 0,
Step: 1,
Count: 4,
}
prevRoot := [32]byte{}
// Populate the database with blocks that would match the request.
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i++ {
blk := testutil.NewBeaconBlock()
blk.Block.Slot = i
blk.Block.ParentRoot = prevRoot[:]
rt, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
// Save genesis block
if i == 0 {
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), rt))
}
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
prevRoot = rt
}
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, false)
var wg sync.WaitGroup
wg.Add(1)
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer wg.Done()
// check for genesis block
expectSuccess(t, stream)
res := &ethpb.SignedBeaconBlock{}
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
assert.Equal(t, types.Slot(0), res.Block.Slot, "genesis block was not returned")
for i := req.StartSlot.Add(req.Step); i < types.Slot(req.Count*req.Step); i += types.Slot(req.Step) {
expectSuccess(t, stream)
res := &ethpb.SignedBeaconBlock{}
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
}
})
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
require.NoError(t, err)
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
}
func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
saveBlocks := func(req *pb.BeaconBlocksByRangeRequest) {
// Populate the database with blocks that would match the request.
parentRoot := [32]byte{}
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
block := testutil.NewBeaconBlock()
block.Block.Slot = i
if req.Step == 1 {
block.Block.ParentRoot = parentRoot[:]
}
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(block)))
rt, err := block.Block.HashTreeRoot()
require.NoError(t, err)
parentRoot = rt
}
}
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
req *pb.BeaconBlocksByRangeRequest, validateBlocks bool, success bool) error {
var wg sync.WaitGroup
wg.Add(1)
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer wg.Done()
if !validateBlocks {
return
}
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
if !success {
continue
}
expectSuccess(t, stream)
res := testutil.NewBeaconBlock()
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
}
}
})
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
require.NoError(t, err)
if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
return err
}
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
return nil
}
t.Run("high request count param and no overflow", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
capacity := int64(flags.Get().BlockBatchLimit * 3)
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Step: 5,
Count: uint64(capacity),
}
saveBlocks(req)
assert.NoError(t, sendRequest(p1, p2, r, req, true, true))
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
})
t.Run("high request count param and overflow", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
capacity := int64(flags.Get().BlockBatchLimit * 3)
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Step: 5,
Count: uint64(capacity + 1),
}
saveBlocks(req)
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
err := sendRequest(p1, p2, r, req, false, true)
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
}
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
expectedCapacity := int64(0) // Whole capacity is used.
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
})
t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Step: 1,
Count: uint64(flags.Get().BlockBatchLimit),
}
saveBlocks(req)
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
}
// One more request should result in overflow.
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
err := sendRequest(p1, p2, r, req, false, false)
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
}
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
expectedCapacity := int64(0) // Whole capacity is used.
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
})
}
func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
slotsSinceGenesis := types.Slot(1000)
offset := int64(slotsSinceGenesis.Mul(params.BeaconConfig().SecondsPerSlot))
r := &Service{
cfg: &Config{
Chain: &chainMock.ChainService{
Genesis: time.Now().Add(time.Second * time.Duration(-1*offset)),
},
},
}
tests := []struct {
name string
req *pb.BeaconBlocksByRangeRequest
expectedError error
errorToLog string
}{
{
name: "Zero Count",
req: &pb.BeaconBlocksByRangeRequest{
Count: 0,
Step: 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad count",
},
{
name: "Over limit Count",
req: &pb.BeaconBlocksByRangeRequest{
Count: params.BeaconNetworkConfig().MaxRequestBlocks + 1,
Step: 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad count",
},
{
name: "Correct Count",
req: &pb.BeaconBlocksByRangeRequest{
Count: params.BeaconNetworkConfig().MaxRequestBlocks - 1,
Step: 1,
},
errorToLog: "validation failed with correct count",
},
{
name: "Zero Step",
req: &pb.BeaconBlocksByRangeRequest{
Step: 0,
Count: 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad step",
},
{
name: "Over limit Step",
req: &pb.BeaconBlocksByRangeRequest{
Step: rangeLimit + 1,
Count: 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad step",
},
{
name: "Correct Step",
req: &pb.BeaconBlocksByRangeRequest{
Step: rangeLimit - 1,
Count: 2,
},
errorToLog: "validation failed with correct step",
},
{
name: "Over Limit Start Slot",
req: &pb.BeaconBlocksByRangeRequest{
StartSlot: slotsSinceGenesis.Add((2 * rangeLimit) + 1),
Step: 1,
Count: 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad start slot",
},
{
name: "Over Limit End Slot",
req: &pb.BeaconBlocksByRangeRequest{
Step: 1,
Count: params.BeaconNetworkConfig().MaxRequestBlocks + 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad end slot",
},
{
name: "Exceed Range Limit",
req: &pb.BeaconBlocksByRangeRequest{
Step: 3,
Count: uint64(slotsSinceGenesis / 2),
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad range",
},
{
name: "Valid Request",
req: &pb.BeaconBlocksByRangeRequest{
Step: 1,
Count: params.BeaconNetworkConfig().MaxRequestBlocks - 1,
StartSlot: 50,
},
errorToLog: "validation failed with valid params",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.expectedError != nil {
assert.ErrorContains(t, tt.expectedError.Error(), r.validateRangeRequest(tt.req), tt.errorToLog)
} else {
assert.NoError(t, r.validateRangeRequest(tt.req), tt.errorToLog)
}
})
}
}
func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) {
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
hook := logTest.NewGlobal()
saveBlocks := func(req *pb.BeaconBlocksByRangeRequest) {
// Populate the database with blocks that would match the request.
parentRoot := [32]byte{}
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
block := testutil.NewBeaconBlock()
block.Block.Slot = i
block.Block.ParentRoot = parentRoot[:]
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(block)))
rt, err := block.Block.HashTreeRoot()
require.NoError(t, err)
parentRoot = rt
}
}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
req *pb.BeaconBlocksByRangeRequest, processBlocks func([]*ethpb.SignedBeaconBlock)) error {
var wg sync.WaitGroup
wg.Add(1)
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer wg.Done()
blocks := make([]*ethpb.SignedBeaconBlock, 0, req.Count)
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
expectSuccess(t, stream)
blk := testutil.NewBeaconBlock()
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, blk))
if blk.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
t.Errorf("Received unexpected block slot %d", blk.Block.Slot)
}
blocks = append(blocks, blk)
}
processBlocks(blocks)
})
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
require.NoError(t, err)
if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
return err
}
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
return nil
}
t.Run("assert range", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 448,
Step: 1,
Count: 64,
}
saveBlocks(req)
hook.Reset()
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
assert.Equal(t, req.Count, uint64(len(blocks)))
for _, blk := range blocks {
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
}
}
})
assert.NoError(t, err)
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
})
}
func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
hook := logTest.NewGlobal()
saveBlocks := func(d db2.Database, chain *chainMock.ChainService, req *pb.BeaconBlocksByRangeRequest, finalized bool) {
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 0
previousRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), previousRoot))
blocks := make([]*ethpb.SignedBeaconBlock, req.Count)
// Populate the database with blocks that would match the request.
for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
parentRoot := make([]byte, 32)
copy(parentRoot, previousRoot[:])
blocks[j] = testutil.NewBeaconBlock()
blocks[j].Block.Slot = i
blocks[j].Block.ParentRoot = parentRoot
var err error
previousRoot, err = blocks[j].Block.HashTreeRoot()
require.NoError(t, err)
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blocks[j])))
j++
}
stateSummaries := make([]*pb.StateSummary, len(blocks))
if finalized {
if chain.CanonicalRoots == nil {
chain.CanonicalRoots = map[[32]byte]bool{}
}
for i, b := range blocks {
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
stateSummaries[i] = &pb.StateSummary{
Slot: b.Block.Slot,
Root: bRoot[:],
}
chain.CanonicalRoots[bRoot] = true
}
require.NoError(t, d.SaveStateSummaries(context.Background(), stateSummaries))
require.NoError(t, d.SaveFinalizedCheckpoint(context.Background(), &ethpb.Checkpoint{
Epoch: helpers.SlotToEpoch(stateSummaries[len(stateSummaries)-1].Slot),
Root: stateSummaries[len(stateSummaries)-1].Root,
}))
}
}
saveBadBlocks := func(d db2.Database, chain *chainMock.ChainService,
req *pb.BeaconBlocksByRangeRequest, badBlockNum uint64, finalized bool) {
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 0
previousRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
genRoot := previousRoot
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), previousRoot))
blocks := make([]*ethpb.SignedBeaconBlock, req.Count)
// Populate the database with blocks with non linear roots.
for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
parentRoot := make([]byte, 32)
copy(parentRoot, previousRoot[:])
blocks[j] = testutil.NewBeaconBlock()
blocks[j].Block.Slot = i
blocks[j].Block.ParentRoot = parentRoot
// Make the 2nd block have a bad root.
if j == int(badBlockNum) {
blocks[j].Block.ParentRoot = genRoot[:]
}
var err error
previousRoot, err = blocks[j].Block.HashTreeRoot()
require.NoError(t, err)
2021-05-26 18:33:46 +00:00
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blocks[j])))
j++
}
stateSummaries := make([]*pb.StateSummary, len(blocks))
if finalized {
if chain.CanonicalRoots == nil {
chain.CanonicalRoots = map[[32]byte]bool{}
}
for i, b := range blocks {
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
stateSummaries[i] = &pb.StateSummary{
Slot: b.Block.Slot,
Root: bRoot[:],
}
chain.CanonicalRoots[bRoot] = true
}
require.NoError(t, d.SaveStateSummaries(context.Background(), stateSummaries))
require.NoError(t, d.SaveFinalizedCheckpoint(context.Background(), &ethpb.Checkpoint{
Epoch: helpers.SlotToEpoch(stateSummaries[len(stateSummaries)-1].Slot),
Root: stateSummaries[len(stateSummaries)-1].Root,
}))
}
}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
req *pb.BeaconBlocksByRangeRequest, processBlocks func([]*ethpb.SignedBeaconBlock)) error {
var wg sync.WaitGroup
wg.Add(1)
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
defer wg.Done()
blocks := make([]*ethpb.SignedBeaconBlock, 0, req.Count)
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
code, _, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
if err != nil && err != io.EOF {
t.Fatal(err)
}
if code != 0 || err == io.EOF {
break
}
blk := testutil.NewBeaconBlock()
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, blk))
if blk.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
t.Errorf("Received unexpected block slot %d", blk.Block.Slot)
}
blocks = append(blocks, blk)
}
processBlocks(blocks)
})
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
require.NoError(t, err)
if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
return err
}
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
return nil
}
t.Run("process normal range", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 1,
Step: 1,
Count: 64,
}
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, true)
hook.Reset()
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
assert.Equal(t, req.Count, uint64(len(blocks)))
for _, blk := range blocks {
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
}
}
})
assert.NoError(t, err)
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
})
t.Run("process non linear blocks", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 1,
Step: 1,
Count: 64,
}
saveBadBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, 2, true)
hook.Reset()
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
assert.Equal(t, uint64(2), uint64(len(blocks)))
prevRoot := [32]byte{}
for _, blk := range blocks {
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
}
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
}
}
})
assert.NoError(t, err)
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
})
t.Run("process non linear blocks with 2nd bad batch", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 1,
Step: 1,
Count: 128,
}
saveBadBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, 65, true)
hook.Reset()
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
assert.Equal(t, uint64(65), uint64(len(blocks)))
prevRoot := [32]byte{}
for _, blk := range blocks {
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
}
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
}
}
})
assert.NoError(t, err)
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
})
t.Run("only return finalized blocks", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 1,
Step: 1,
Count: 64,
}
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, true)
req.StartSlot = 65
req.Step = 1
req.Count = 128
// Save unfinalized chain.
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, false)
req.StartSlot = 1
hook.Reset()
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
assert.Equal(t, uint64(64), uint64(len(blocks)))
prevRoot := [32]byte{}
for _, blk := range blocks {
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
blk.Block.Slot, req.StartSlot)
}
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
}
}
})
assert.NoError(t, err)
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
})
t.Run("reject duplicate and non canonical blocks", func(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
2020-12-16 16:56:21 +00:00
d := db.SetupDB(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 1,
Step: 1,
Count: 64,
}
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, true)
// Create a duplicate set of unfinalized blocks.
req.StartSlot = 1
req.Step = 1
req.Count = 300
// Save unfinalized chain.
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, false)
req.Count = 64
hook.Reset()
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
assert.Equal(t, uint64(64), uint64(len(blocks)))
prevRoot := [32]byte{}
for _, blk := range blocks {
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
blk.Block.Slot, req.StartSlot)
}
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
}
}
})
assert.NoError(t, err)
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
})
}