2019-08-20 19:06:49 +00:00
|
|
|
package sync
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-10-22 14:33:01 +00:00
|
|
|
"io"
|
2019-08-20 19:06:49 +00:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2020-01-16 01:19:06 +00:00
|
|
|
"github.com/kevinms/leakybucket-go"
|
2019-08-20 19:06:49 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
|
|
"github.com/libp2p/go-libp2p-core/protocol"
|
2021-02-16 07:45:34 +00:00
|
|
|
types "github.com/prysmaticlabs/eth2-types"
|
2020-06-09 22:40:48 +00:00
|
|
|
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
2020-10-22 14:33:01 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
|
|
db2 "github.com/prysmaticlabs/prysm/beacon-chain/db"
|
2019-08-20 19:06:49 +00:00
|
|
|
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
2021-05-17 19:25:59 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
2020-10-22 14:33:01 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
2019-08-20 19:06:49 +00:00
|
|
|
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
2020-11-18 04:17:42 +00:00
|
|
|
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
2021-03-02 19:36:03 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
2019-08-20 19:06:49 +00:00
|
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
2021-06-02 23:49:52 +00:00
|
|
|
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
2020-10-22 14:33:01 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
2021-05-26 16:19:54 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/interfaces"
|
2020-08-28 13:50:38 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
2019-08-20 19:06:49 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
2020-07-15 04:41:11 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
|
|
|
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
2020-05-15 20:51:23 +00:00
|
|
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
2019-08-20 19:06:49 +00:00
|
|
|
)
|
|
|
|
|
2020-05-15 08:53:19 +00:00
|
|
|
func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
2019-08-20 19:06:49 +00:00
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2019-08-20 19:06:49 +00:00
|
|
|
|
2019-09-20 06:27:28 +00:00
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 100,
|
2019-11-19 03:56:37 +00:00
|
|
|
Step: 64,
|
|
|
|
Count: 16,
|
2019-08-20 19:06:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Populate the database with blocks that would match the request.
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
2020-08-27 18:13:32 +00:00
|
|
|
blk := testutil.NewBeaconBlock()
|
|
|
|
blk.Block.Slot = i
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
|
2019-08-20 19:06:49 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 13:43:36 +00:00
|
|
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-07-17 08:58:51 +00:00
|
|
|
topic := string(pcl)
|
|
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
|
2019-08-20 19:06:49 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
2020-06-18 03:53:46 +00:00
|
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
2019-08-20 19:06:49 +00:00
|
|
|
defer wg.Done()
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
2020-10-12 08:11:05 +00:00
|
|
|
expectSuccess(t, stream)
|
2020-08-27 18:13:32 +00:00
|
|
|
res := testutil.NewBeaconBlock()
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
|
2021-02-16 07:45:34 +00:00
|
|
|
if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
|
Update run time to v0.9.3 (#4154)
* Remove custody (#3986)
* Update proto fields
* Updated block operations
* Fixed all block operation tests
* Fixed tests part 1
* Fixed tests part 1
* All tests pass
* Clean up
* Skip spec test
* Fixed ssz test
* Skip ssz test
* Skip mainnet tests
* Update beacon-chain/operations/attestation.go
* Update beacon-chain/operations/attestation.go
* Decoy flip flop check (#3987)
* Bounce attack check (#3989)
* New store values
* Update process block
* Update process attestation
* Update tests
* Helper
* Fixed blockchain package tests
* Update beacon-chain/blockchain/forkchoice/process_block.go
* Conflict
* Unskip mainnet spec tests (#3998)
* Starting
* Fixed attestation mainnet test
* Unskip ssz static and block processing tests
* Fixed workspace
* fixed workspace
* fixed workspace
* Update beacon-chain/core/blocks/block_operations.go
* Unskip minimal spec tests (#3999)
* Starting
* Fixed attestation mainnet test
* Unskip ssz static and block processing tests
* Fixed workspace
* fixed workspace
* fixed workspace
* Update workspace
* Unskip all minimal spec tests
* Update workspace for general test
* Unskip test (#4001)
* Update minimal seconds per slot to 6 (#3978)
* Bounce attack tests (#3993)
* New store values
* Update process block
* Update process attestation
* Update tests
* Helper
* Fixed blockchain package tests
* Slots since epoch starts tests
* Update justified checkpt tests
* Conflict
* Fixed logic
* Update process_block.go
* Use helper
* Conflict
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.1
* Conflict
* Fixed failed tests
* Lower MinGenesisActiveValidatorCount to 16384 (#4100)
* Fork choice beacon block checks (#4107)
* Prevent future blocks check and test
* Removed old code
* Update aggregation proto (#4121)
* Update def
* Update spec test
* Conflict
* Update workspace
* patch
* Resolve conflict
* Patch
* Change workspace
* Update ethereumapis to a forked branch at commit https://github.com/prysmaticlabs/ethereumapis/pull/46/commits/6eb1193e47f66c7dabc33958b1996ec16c1b6e16
* Fixed all the tests
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into conflict
* fix patch
* Need to regenerate test data
* Merge branch 'master' into v0.9.2
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Enable snappy compression for all (#4157)
* enable snappy compression for all
* enable snappy compression for all
* enable snappy compression for all
* enable snappy compression for all
* Validate aggregate and proof subscriber (#4159)
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Conflict
* Update workspace
* Conflict
* Conflict
* Conflict
* Merge branch 'master' into v0.9.2
* Merge branch 'master' into v0.9.2
* Conflict
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Remove migrate to snappy (#4205)
* Feature flag: Deprecate --prune-states, release to all (#4204)
* Deprecated prune-states, release to all
* imports
* remote unused import
* remove unused import
* Rm prune state test
* gaz
* Refactoring for dynamic pubsub subscriptions for non-aggregated attestations (#4189)
* checkpoint progress
* chkpt
* checkpoint progress
* put pipeline in its own file
* remove unused imports
* add test, it's failing though
* fix test
* remove head state issue
* add clear db flag to e2e
* add some more error handling, debug logging
* skip processing if chain has not started
* fix test
* wrap in go routine to see if anything breaks
* remove duplicated topic
* Add a regression test. Thanks @nisdas for finding the original problem. May it never happen again *fingers crossed*
* Comments
* gofmt
* comment out with TODO
* Sync with master
* Sync with master
* RPC servers use attestation pool (#4223)
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Refactor RPC to Fully Utilize Ethereum APIs (#4243)
* include attester as a file in the validator server
* remove old proposer server impl
* include new patch and properly sync changes
* align with public pbs
* ensure matches rpc def
* fix up status tests
* resolve all broken test files in the validator rpc package
* gazelle include
* fix up the duties implementation
* fixed up all get duties functions
* all tests pass
* utilize new ethereum apis
* amend validator client to use the new beacon node validator rpc client
* fix up most of validator items
* added in mock
* fix up test
* readd test
* add chain serv mock
* fix a few more validator methods
* all validator tests passingggg
* fix broken test
* resolve even more broken tests
* all tests passsssss
* fix lint
* try PR
* fix up test
* resolve broken other tests
* Sync with master
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into v0.9.2
* Aggregate and proof subscriber (#4240)
* Added subscribers
* Fixed conflict
* Tests
* fix up patch
* Use upstream pb
* include latest patch
* Fmt
* Save state before head block
* skip tests (#4275)
* Delete block attestations from the pool (#4241)
* Added subscribers
* Clean up
* Fixed conflict
* Delete atts in pool in validate pipeline
* Moved it to subscriber
* Merge branch 'v0.9.2' of https://github.com/prysmaticlabs/prysm into use-att-pool-3
* Test
* Fixed test
* Initial work on voluntary exit (#4207)
* Initial implementation of voluntary exit: RPC call
* Update for recent merges
* Break out validation logic for voluntary exits to core module
* RequestExit -> ProposeExit
* Decrease exit package visibility
* Move to operation feed
* Wrap errors
* Fix critical proposer selection bug #4259 (#4265)
* fix critical proposer selection bug #4259
* gofmt
* add 1 more validator to make it 5
* more tests
* Fixed archivedProposerIndex
* Fixed TestFilterAttestation_OK
* Refactor ComputeProposerIndex, add regression test for potential out of range panic
* handle case of nil validator
* Update validators_test.go
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Leftover merge files, oops
* gaz
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2
* Fixes Duplicate Validator Bug (#4322)
* Update dict
* Test helper
* Regression test
* Comment
* Reset test cache
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* fixes after PR #4328
* Complete attestation pool for run time (#4286)
* Added subscribers
* Fixed conflict
* Delete atts in pool in validate pipeline
* Moved it to subscriber
* Test
* Fixed test
* New curl for forkchoice attestations
* Starting att pool service for fork choice
* Update pool interface
* Update pool interface
* Update sync and node
* Lint
* Gazelle
* Updated servers, filled in missing functionalities
* RPC working with 1 beacon node 64 validators
* Started writing tests. Yay
* Test to aggregate and save multiple fork choice atts
* Tests for BatchAttestations for fork choice
* Fixed exisiting tests
* Minor fixes
* Fmt
* Added batch saves
* Lint
* Mo tests yay
* Delete test
* Fmt
* Update interval
* Fixed aggregation broadcast
* Clean up based on design review comment
* Fixed setupBeaconChain
* Raul's feedback. s/error/err
* resolve conflicts
* Merge branch 'v0.9.2' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* Removed old protos and fixed tests (#4336)
* Merge refs/heads/master into v0.9.2
* Disallow duplicated indices and test (#4339)
* Explicit use of GENESIS_SLOT in fork choice (#4343)
* Update from 2 to 3 (#4345)
* Remove verify unaggregated attestation when aggregating (#4347)
* use slot ticker instead of run every (#4348)
* Add context check for unbounded loop work (#4346)
* Revert "Explicit use of GENESIS_SLOT in fork choice (#4343)" (#4349)
This reverts commit d3f6753c77f8f733563d00ab649c5159b2c2926f.
* Refactor Powchain Service (#4306)
* add data structures
* generate proto
* add in new fields
* add comments
* add new mock state
* add new mock state
* add new methods
* some more changes
* check genesis time properly
* lint
* fix refs
* fix tests
* lint
* lint
* lint
* gaz
* fix lint
* raul's comments
* use one method
* fix test
* raul's comment
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Ensure best better-justification is stored for fork choice (#4342)
* Ensure best better-justification is stored. Minor refactor
* Tests
* Merge refs/heads/v0.9.2 into better-best-justified
* Merge refs/heads/v0.9.2 into better-best-justified
* Ensure that epoch of attestation slot matches the target epoch (#4341)
* Disallow duplicated indices and test
* Add slot to target epoch check to on_attestation
* Add slot to target epoch check to process_attestation
* Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into no-dup-att-indices
* Fixed TestProcessAttestations_PrevEpochFFGDataMismatches
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Update beacon-chain/blockchain/forkchoice/process_attestation_test.go
Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>
* Merge refs/heads/v0.9.2 into no-dup-att-indices
* Filter viable branches in fork choice (#4355)
* Only activate upon finality (#4359)
* Updated functions
* Tests
* Merge branch 'v0.9.2' of git+ssh://github.com/prysmaticlabs/prysm into queue-fix-on-finality
* Comment
* Merge refs/heads/v0.9.2 into queue-fix-on-finality
* Fixed failing test from 4359 (#4360)
* Fixed
* Skip registry spec tests
* Wait for state to be initialized at least once before running slot ticker based on genesis time (#4364)
* Sync with master
* Fix checkpoint root to use genesis block root (#4368)
* Return an error on nil head state in fork choice (#4369)
* Return error if nil head state
* Fixed tests. Saved childen blocks state
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Update metrics every epoch (#4367)
* return empty slice if state is nil (#4365)
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* Pubsub: Broadcast attestations to committee based subnets (#4316)
* Working on un-aggregated pubsub topics
* update subscriber to call pool
* checkpointing
* fix
* untested message validation
* minor fixes
* rename slotsSinceGenesis to slotsSince
* some progress on a unit test, subscribe is not being called still...
* dont change topic
* need to set the data on the message
* restore topic
* fixes
* some helpful parameter changes for mainnet operations
* lint
* Terence feedback
* unskip e2e
* Unit test for validate committee index beacon attestation
* PR feedbacK
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into resolveConflicts
* remove condition
* Remove unused operation pool (#4361)
* Merge refs/heads/master into v0.9.2
* Aggregate attestations periodically (#4376)
* Persist ETH1 Data to Disk (#4329)
* add data structures
* generate proto
* add in new fields
* add comments
* add new mock state
* add new mock state
* add new methods
* some more changes
* check genesis time properly
* lint
* fix refs
* fix tests
* lint
* lint
* lint
* gaz
* adding in new proto message
* remove outdated vars
* add new changes
* remove latest eth1data
* continue refactoring
* finally works
* lint
* fix test
* fix all tests
* fix all tests again
* fix build
* change back
* add full eth1 test
* fix logs and test
* add constant
* changes
* fix bug
* lint
* fix another bug
* change back
* Apply suggestions from code review
Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* Fixed VerifyIndexedAttestation (#4382)
* rm signing root (#4381)
* rm signing root
* Fixed VerifyIndexedAttestation
* Check proposer slashed status inside ProcessBlockHeaderNoVerify
* Fixed TestUpdateJustified_CouldUpdateBest
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Remove Redundant Trie Generation (#4383)
* remove trie generation
* remove deposit hashes
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into v0.9.2
* fix build
* Conflict
* Implement StreamAttestations RPC Endpoint (#4390)
* started attestation stream
* stream attestations test
* on slot tick test passing
* imports
* gaz
* Update beacon-chain/rpc/beacon/attestations_test.go
Co-Authored-By: shayzluf <thezluf@gmail.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
* Fixed goimport (#4394)
* Use custom stateutil ssz for ssz HTR spec tests (#4396)
* Use custom stateutil ssz for ssz HTR spec tests
* gofmt
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge refs/heads/master into v0.9.2
* set mainnet to be the default for build and run (#4398)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* gracefully handle deduplicated registration of topic validators (#4399)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* SSZ: temporarily disable roots cache until cache issues can be resolved (#4407)
* temporarily disable roots cache until cache issues can be resolved
* Also use custom ssz for spectests
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Remove process block attestations as separate routine (#4408)
* Removed old save/process block atts
* Fixed tests
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Save Deposit Cache to Disk (#4384)
* change to protos
* fix build
* glue everything together
* fix test
* raul's review
* preston's comments
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Fix activation queue sorting (#4409)
* Removed old save/process block atts
* Fixed tests
* Proper sorting by eligibility epoch then by indices
* Deleted old colde
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Merge branch 'master' into v0.9.2
* Merge refs/heads/master into v0.9.2
* stop recursive lookup if context is cancelled (#4420)
* Fix proposal bug (#4419)
* Add Pending Deposits Safely (#4422)
* safely prune cache
* use proper method
* preston's,terence's reviews and comments
* revert change to build files
* use as feature config instead
* Release custom state ssz (#4421)
* Release custom state ssz, change all HTR of beacon state to use custom method
* typo
* use mainnet config
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Merge branch 'master' of github.com:prysmaticlabs/prysm into v0.9.2
* Update initial sync save justified to align with v0.9.3 (#4432)
* Merge refs/heads/master into v0.9.2
* Merge refs/heads/master into v0.9.2
* fix build
* don't blacklist on pubsub (#4435)
* Fix Flakey Slot Ticker Test (#4434)
* use interface instead for the slot ticker
* fixed up flakey tests
* add gen time
* get duties comment
* fix lifecycle test
* more fixes
* Configurable min genesis delay (#4437)
* Configurable min genesis delay based on https://github.com/ethereum/eth2.0-specs/pull/1557
* remove feature flag for genesis delay
* fix
* demo config feedback
* patch readme
* save keys unencrypted for validators (#4439)
* Add new demo configuration targeting mainnet scale (#4397)
* Add new demo configuration targeting mainnet, with 1/10th of the deposit value
* reduce quotant by 1/10th. Use 1/10th mainnet values
* only change the inactivity quotant
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Save justified checkpoint state (#4433)
* Save justified checkpoint state
* Lint
* Feedback
* Fixed test
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Update shared/testutil/deposits.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update proto/testing/ssz_regression_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/core/epoch/epoch_processing.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/kv/forkchoice.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/service.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber_beacon_blocks_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber_beacon_blocks_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/sync/subscriber.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/proposer.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/prepare_forkchoice.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/operations/attestations/pool.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/powchain/log_processing_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/aggregator/server.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/rpc/validator/exit_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/cache/depositcache/pending_deposits.go
* Update beacon-chain/cache/depositcache/pending_deposits_test.go
* Update beacon-chain/rpc/validator/proposer.go
* Merge refs/heads/master into v0.9.2
* Fix e2e genesis delay issues (#4442)
* fix e2e genesis delay issues
* register flag
* typo
* Update shared/featureconfig/config.go
Co-Authored-By: Nishant Das <nishdas93@gmail.com>
* Apply suggestions from code review
Co-Authored-By: Nishant Das <nishdas93@gmail.com>
* skip demo e2e
* fix validator
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Nishant Das <nish1993@hotmail.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
* Batch Eth1 RPC Calls (#4392)
* add new methods
* get it working
* optimize past deposit logs processing
* revert change
* fix all tests
* use mock
* lint
* lint
* check for nil
* stop panics
* Apply suggestions from code review
Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Terence's Review
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-01-07 18:47:39 +00:00
|
|
|
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
2019-08-20 19:06:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-06-18 03:53:46 +00:00
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
2020-07-15 04:41:11 +00:00
|
|
|
require.NoError(t, err)
|
2019-08-20 19:06:49 +00:00
|
|
|
|
2019-09-20 06:27:28 +00:00
|
|
|
err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)
|
2020-07-15 04:41:11 +00:00
|
|
|
require.NoError(t, err)
|
2019-08-20 19:06:49 +00:00
|
|
|
|
2020-05-14 13:43:36 +00:00
|
|
|
// Make sure that rate limiter doesn't limit capacity exceedingly.
|
2020-07-17 08:58:51 +00:00
|
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
2020-05-14 13:43:36 +00:00
|
|
|
expectedCapacity := int64(req.Count*10 - req.Count)
|
2020-07-15 04:41:11 +00:00
|
|
|
require.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
2020-05-14 13:43:36 +00:00
|
|
|
|
2019-08-20 19:06:49 +00:00
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
|
|
}
|
|
|
|
}
|
2020-05-04 07:54:31 +00:00
|
|
|
|
2020-10-23 04:23:15 +00:00
|
|
|
func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-10-23 04:23:15 +00:00
|
|
|
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 0,
|
|
|
|
Step: 1,
|
|
|
|
Count: 200,
|
|
|
|
}
|
|
|
|
|
|
|
|
genRoot := [32]byte{}
|
|
|
|
// Populate the database with blocks that would match the request.
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
2020-10-23 04:23:15 +00:00
|
|
|
blk := testutil.NewBeaconBlock()
|
|
|
|
blk.Block.Slot = i
|
|
|
|
if i == 0 {
|
|
|
|
rt, err := blk.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
genRoot = rt
|
|
|
|
}
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
|
2020-10-23 04:23:15 +00:00
|
|
|
}
|
|
|
|
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot))
|
|
|
|
|
|
|
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-10-23 04:23:15 +00:00
|
|
|
topic := string(pcl)
|
|
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
// Use a new request to test this out
|
|
|
|
newReq := &pb.BeaconBlocksByRangeRequest{StartSlot: 0, Step: 1, Count: 1}
|
|
|
|
|
|
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
|
|
defer wg.Done()
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := newReq.StartSlot; i < newReq.StartSlot.Add(newReq.Count*newReq.Step); i += types.Slot(newReq.Step) {
|
2020-10-23 04:23:15 +00:00
|
|
|
expectSuccess(t, stream)
|
|
|
|
res := testutil.NewBeaconBlock()
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
|
2021-02-16 07:45:34 +00:00
|
|
|
if res.Block.Slot.SubSlot(newReq.StartSlot).Mod(newReq.Step) != 0 {
|
2020-10-23 04:23:15 +00:00
|
|
|
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
|
|
|
}
|
|
|
|
// Expect EOF
|
|
|
|
b := make([]byte, 1)
|
|
|
|
_, err := stream.Read(b)
|
|
|
|
require.ErrorContains(t, io.EOF.Error(), err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = r.beaconBlocksByRangeRPCHandler(context.Background(), newReq, stream1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-18 17:58:20 +00:00
|
|
|
func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-05-18 17:58:20 +00:00
|
|
|
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 200,
|
|
|
|
Step: 21,
|
|
|
|
Count: 33,
|
|
|
|
}
|
|
|
|
|
2021-02-16 07:45:34 +00:00
|
|
|
endSlot := req.StartSlot.Add(req.Step * (req.Count - 1))
|
2020-08-17 08:41:45 +00:00
|
|
|
expectedRoots := make([][32]byte, req.Count)
|
2020-05-18 17:58:20 +00:00
|
|
|
// Populate the database with blocks that would match the request.
|
2021-02-16 07:45:34 +00:00
|
|
|
for i, j := endSlot, req.Count-1; i >= req.StartSlot; i -= types.Slot(req.Step) {
|
2020-08-27 18:13:32 +00:00
|
|
|
blk := testutil.NewBeaconBlock()
|
|
|
|
blk.Block.Slot = i
|
|
|
|
rt, err := blk.Block.HashTreeRoot()
|
2020-08-17 08:41:45 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
expectedRoots[j] = rt
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
|
2020-08-17 08:41:45 +00:00
|
|
|
j--
|
2020-05-18 17:58:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-07-17 08:58:51 +00:00
|
|
|
topic := string(pcl)
|
|
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
|
2020-05-18 17:58:20 +00:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
2020-06-18 03:53:46 +00:00
|
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
2020-05-18 17:58:20 +00:00
|
|
|
defer wg.Done()
|
2021-02-16 07:45:34 +00:00
|
|
|
prevSlot := types.Slot(0)
|
2020-08-17 08:41:45 +00:00
|
|
|
require.Equal(t, uint64(len(expectedRoots)), req.Count, "Number of roots not expected")
|
2021-02-16 07:45:34 +00:00
|
|
|
for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
2020-10-12 08:11:05 +00:00
|
|
|
expectSuccess(t, stream)
|
2020-05-18 17:58:20 +00:00
|
|
|
res := ðpb.SignedBeaconBlock{}
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
|
2020-05-18 17:58:20 +00:00
|
|
|
if res.Block.Slot < prevSlot {
|
|
|
|
t.Errorf("Received block is unsorted with slot %d lower than previous slot %d", res.Block.Slot, prevSlot)
|
|
|
|
}
|
2020-08-27 18:13:32 +00:00
|
|
|
rt, err := res.Block.HashTreeRoot()
|
2020-08-17 08:41:45 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, expectedRoots[j], rt, "roots not equal")
|
2020-05-18 17:58:20 +00:00
|
|
|
prevSlot = res.Block.Slot
|
2020-08-17 08:41:45 +00:00
|
|
|
j++
|
2020-05-18 17:58:20 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-06-18 03:53:46 +00:00
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
2020-07-15 04:41:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
|
2020-05-18 17:58:20 +00:00
|
|
|
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-15 08:53:19 +00:00
|
|
|
func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
2020-05-04 07:54:31 +00:00
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-05-04 07:54:31 +00:00
|
|
|
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 0,
|
|
|
|
Step: 1,
|
|
|
|
Count: 4,
|
|
|
|
}
|
|
|
|
|
2020-10-22 14:33:01 +00:00
|
|
|
prevRoot := [32]byte{}
|
2020-05-04 07:54:31 +00:00
|
|
|
// Populate the database with blocks that would match the request.
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i++ {
|
2020-08-27 18:13:32 +00:00
|
|
|
blk := testutil.NewBeaconBlock()
|
|
|
|
blk.Block.Slot = i
|
2020-10-22 14:33:01 +00:00
|
|
|
blk.Block.ParentRoot = prevRoot[:]
|
|
|
|
rt, err := blk.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
2020-08-27 18:13:32 +00:00
|
|
|
|
2020-05-04 07:54:31 +00:00
|
|
|
// Save genesis block
|
|
|
|
if i == 0 {
|
2020-07-15 04:41:11 +00:00
|
|
|
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), rt))
|
2020-05-04 07:54:31 +00:00
|
|
|
}
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
|
2020-10-22 14:33:01 +00:00
|
|
|
prevRoot = rt
|
2020-05-04 07:54:31 +00:00
|
|
|
}
|
|
|
|
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-07-17 08:58:51 +00:00
|
|
|
topic := string(pcl)
|
|
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, false)
|
2020-05-04 07:54:31 +00:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
2020-06-18 03:53:46 +00:00
|
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
2020-05-04 07:54:31 +00:00
|
|
|
defer wg.Done()
|
|
|
|
// check for genesis block
|
2020-10-12 08:11:05 +00:00
|
|
|
expectSuccess(t, stream)
|
2020-05-04 07:54:31 +00:00
|
|
|
res := ðpb.SignedBeaconBlock{}
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
|
2021-02-16 07:45:34 +00:00
|
|
|
assert.Equal(t, types.Slot(0), res.Block.Slot, "genesis block was not returned")
|
|
|
|
for i := req.StartSlot.Add(req.Step); i < types.Slot(req.Count*req.Step); i += types.Slot(req.Step) {
|
2020-10-12 08:11:05 +00:00
|
|
|
expectSuccess(t, stream)
|
2020-05-04 07:54:31 +00:00
|
|
|
res := ðpb.SignedBeaconBlock{}
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
|
2020-05-04 07:54:31 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-06-18 03:53:46 +00:00
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
2020-07-15 04:41:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
|
2020-05-04 07:54:31 +00:00
|
|
|
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
|
|
}
|
|
|
|
}
|
2020-05-15 20:51:23 +00:00
|
|
|
|
|
|
|
func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-05-15 20:51:23 +00:00
|
|
|
saveBlocks := func(req *pb.BeaconBlocksByRangeRequest) {
|
|
|
|
// Populate the database with blocks that would match the request.
|
2020-10-22 14:33:01 +00:00
|
|
|
parentRoot := [32]byte{}
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
2020-08-27 18:13:32 +00:00
|
|
|
block := testutil.NewBeaconBlock()
|
|
|
|
block.Block.Slot = i
|
2020-10-22 14:33:01 +00:00
|
|
|
if req.Step == 1 {
|
|
|
|
block.Block.ParentRoot = parentRoot[:]
|
|
|
|
}
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(block)))
|
2020-10-22 14:33:01 +00:00
|
|
|
rt, err := block.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
parentRoot = rt
|
2020-05-15 20:51:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
|
2020-10-22 14:33:01 +00:00
|
|
|
req *pb.BeaconBlocksByRangeRequest, validateBlocks bool, success bool) error {
|
2020-05-15 20:51:23 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-06-18 03:53:46 +00:00
|
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
2020-05-15 20:51:23 +00:00
|
|
|
defer wg.Done()
|
|
|
|
if !validateBlocks {
|
|
|
|
return
|
|
|
|
}
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
2020-10-22 14:33:01 +00:00
|
|
|
if !success {
|
|
|
|
continue
|
|
|
|
}
|
2020-10-12 08:11:05 +00:00
|
|
|
expectSuccess(t, stream)
|
2020-08-27 18:13:32 +00:00
|
|
|
res := testutil.NewBeaconBlock()
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, res))
|
2021-02-16 07:45:34 +00:00
|
|
|
if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
|
2020-05-15 20:51:23 +00:00
|
|
|
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2020-06-18 03:53:46 +00:00
|
|
|
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
2020-07-15 04:41:11 +00:00
|
|
|
require.NoError(t, err)
|
2021-01-05 13:55:23 +00:00
|
|
|
if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
|
2020-05-15 20:51:23 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("high request count param and no overflow", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
2020-05-15 20:51:23 +00:00
|
|
|
|
|
|
|
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-05-15 20:51:23 +00:00
|
|
|
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-07-17 08:58:51 +00:00
|
|
|
topic := string(pcl)
|
|
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
|
2020-05-15 20:51:23 +00:00
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 100,
|
|
|
|
Step: 5,
|
|
|
|
Count: uint64(capacity),
|
|
|
|
}
|
|
|
|
saveBlocks(req)
|
|
|
|
|
2020-10-22 14:33:01 +00:00
|
|
|
assert.NoError(t, sendRequest(p1, p2, r, req, true, true))
|
2020-05-15 20:51:23 +00:00
|
|
|
|
2020-07-17 08:58:51 +00:00
|
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
2020-05-15 20:51:23 +00:00
|
|
|
expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
2020-05-15 20:51:23 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("high request count param and overflow", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
2020-05-15 20:51:23 +00:00
|
|
|
|
|
|
|
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-07-17 08:58:51 +00:00
|
|
|
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-07-17 08:58:51 +00:00
|
|
|
topic := string(pcl)
|
|
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
|
2020-05-15 20:51:23 +00:00
|
|
|
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 100,
|
|
|
|
Step: 5,
|
|
|
|
Count: uint64(capacity + 1),
|
|
|
|
}
|
|
|
|
saveBlocks(req)
|
|
|
|
|
2020-07-29 09:26:46 +00:00
|
|
|
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
2020-10-22 14:33:01 +00:00
|
|
|
err := sendRequest(p1, p2, r, req, false, true)
|
2020-11-18 04:17:42 +00:00
|
|
|
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
2020-05-15 20:51:23 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 08:58:51 +00:00
|
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
2020-05-15 20:51:23 +00:00
|
|
|
expectedCapacity := int64(0) // Whole capacity is used.
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
2020-05-15 20:51:23 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
2020-05-15 20:51:23 +00:00
|
|
|
|
|
|
|
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-07-17 08:58:51 +00:00
|
|
|
topic := string(pcl)
|
|
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
|
2020-05-15 20:51:23 +00:00
|
|
|
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 100,
|
|
|
|
Step: 1,
|
|
|
|
Count: uint64(flags.Get().BlockBatchLimit),
|
|
|
|
}
|
|
|
|
saveBlocks(req)
|
|
|
|
|
|
|
|
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
|
2020-10-22 14:33:01 +00:00
|
|
|
assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
|
2020-05-15 20:51:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// One more request should result in overflow.
|
2020-07-29 09:26:46 +00:00
|
|
|
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
2020-10-22 14:33:01 +00:00
|
|
|
err := sendRequest(p1, p2, r, req, false, false)
|
2020-11-18 04:17:42 +00:00
|
|
|
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
2020-05-15 20:51:23 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 08:58:51 +00:00
|
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
2020-05-15 20:51:23 +00:00
|
|
|
expectedCapacity := int64(0) // Whole capacity is used.
|
2020-07-15 04:41:11 +00:00
|
|
|
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
2020-05-15 20:51:23 +00:00
|
|
|
})
|
|
|
|
}
|
2020-08-28 13:50:38 +00:00
|
|
|
|
2020-09-02 21:59:57 +00:00
|
|
|
func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
2021-02-16 07:45:34 +00:00
|
|
|
slotsSinceGenesis := types.Slot(1000)
|
|
|
|
offset := int64(slotsSinceGenesis.Mul(params.BeaconConfig().SecondsPerSlot))
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{
|
|
|
|
cfg: &Config{
|
|
|
|
Chain: &chainMock.ChainService{
|
|
|
|
Genesis: time.Now().Add(time.Second * time.Duration(-1*offset)),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-08-28 13:50:38 +00:00
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
req *pb.BeaconBlocksByRangeRequest
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError error
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "Zero Count",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Count: 0,
|
|
|
|
Step: 1,
|
|
|
|
},
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError: p2ptypes.ErrInvalidRequest,
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog: "validation did not fail with bad count",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Over limit Count",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Count: params.BeaconNetworkConfig().MaxRequestBlocks + 1,
|
|
|
|
Step: 1,
|
|
|
|
},
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError: p2ptypes.ErrInvalidRequest,
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog: "validation did not fail with bad count",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Correct Count",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Count: params.BeaconNetworkConfig().MaxRequestBlocks - 1,
|
|
|
|
Step: 1,
|
|
|
|
},
|
|
|
|
errorToLog: "validation failed with correct count",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Zero Step",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Step: 0,
|
|
|
|
Count: 1,
|
|
|
|
},
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError: p2ptypes.ErrInvalidRequest,
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog: "validation did not fail with bad step",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Over limit Step",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Step: rangeLimit + 1,
|
|
|
|
Count: 1,
|
|
|
|
},
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError: p2ptypes.ErrInvalidRequest,
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog: "validation did not fail with bad step",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Correct Step",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Step: rangeLimit - 1,
|
|
|
|
Count: 2,
|
|
|
|
},
|
|
|
|
errorToLog: "validation failed with correct step",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Over Limit Start Slot",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
2021-02-16 07:45:34 +00:00
|
|
|
StartSlot: slotsSinceGenesis.Add((2 * rangeLimit) + 1),
|
2020-08-28 13:50:38 +00:00
|
|
|
Step: 1,
|
|
|
|
Count: 1,
|
|
|
|
},
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError: p2ptypes.ErrInvalidRequest,
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog: "validation did not fail with bad start slot",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Over Limit End Slot",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Step: 1,
|
|
|
|
Count: params.BeaconNetworkConfig().MaxRequestBlocks + 1,
|
|
|
|
},
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError: p2ptypes.ErrInvalidRequest,
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog: "validation did not fail with bad end slot",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Exceed Range Limit",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Step: 3,
|
|
|
|
Count: uint64(slotsSinceGenesis / 2),
|
|
|
|
},
|
2020-11-18 04:17:42 +00:00
|
|
|
expectedError: p2ptypes.ErrInvalidRequest,
|
2020-08-28 13:50:38 +00:00
|
|
|
errorToLog: "validation did not fail with bad range",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Valid Request",
|
|
|
|
req: &pb.BeaconBlocksByRangeRequest{
|
|
|
|
Step: 1,
|
2020-10-12 08:11:05 +00:00
|
|
|
Count: params.BeaconNetworkConfig().MaxRequestBlocks - 1,
|
2020-08-28 13:50:38 +00:00
|
|
|
StartSlot: 50,
|
|
|
|
},
|
|
|
|
errorToLog: "validation failed with valid params",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
2020-11-18 04:17:42 +00:00
|
|
|
if tt.expectedError != nil {
|
|
|
|
assert.ErrorContains(t, tt.expectedError.Error(), r.validateRangeRequest(tt.req), tt.errorToLog)
|
2020-08-28 13:50:38 +00:00
|
|
|
} else {
|
|
|
|
assert.NoError(t, r.validateRangeRequest(tt.req), tt.errorToLog)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-09-02 21:59:57 +00:00
|
|
|
|
|
|
|
func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) {
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-09-02 21:59:57 +00:00
|
|
|
hook := logTest.NewGlobal()
|
|
|
|
saveBlocks := func(req *pb.BeaconBlocksByRangeRequest) {
|
|
|
|
// Populate the database with blocks that would match the request.
|
2020-10-22 14:33:01 +00:00
|
|
|
parentRoot := [32]byte{}
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
2020-09-02 21:59:57 +00:00
|
|
|
block := testutil.NewBeaconBlock()
|
|
|
|
block.Block.Slot = i
|
2020-10-22 14:33:01 +00:00
|
|
|
block.Block.ParentRoot = parentRoot[:]
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(block)))
|
2020-10-22 14:33:01 +00:00
|
|
|
rt, err := block.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
parentRoot = rt
|
2020-09-02 21:59:57 +00:00
|
|
|
}
|
|
|
|
}
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-09-02 21:59:57 +00:00
|
|
|
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
|
|
|
|
req *pb.BeaconBlocksByRangeRequest, processBlocks func([]*ethpb.SignedBeaconBlock)) error {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
|
|
defer wg.Done()
|
|
|
|
blocks := make([]*ethpb.SignedBeaconBlock, 0, req.Count)
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
2020-10-12 08:11:05 +00:00
|
|
|
expectSuccess(t, stream)
|
2020-09-02 21:59:57 +00:00
|
|
|
blk := testutil.NewBeaconBlock()
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, blk))
|
2021-02-16 07:45:34 +00:00
|
|
|
if blk.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
|
2020-09-02 21:59:57 +00:00
|
|
|
t.Errorf("Received unexpected block slot %d", blk.Block.Slot)
|
|
|
|
}
|
|
|
|
blocks = append(blocks, blk)
|
|
|
|
}
|
|
|
|
processBlocks(blocks)
|
|
|
|
})
|
|
|
|
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
|
|
require.NoError(t, err)
|
2021-01-05 13:55:23 +00:00
|
|
|
if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
|
2020-09-02 21:59:57 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("assert range", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-09-02 21:59:57 +00:00
|
|
|
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 448,
|
|
|
|
Step: 1,
|
|
|
|
Count: 64,
|
|
|
|
}
|
|
|
|
saveBlocks(req)
|
|
|
|
|
|
|
|
hook.Reset()
|
|
|
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
|
|
|
assert.Equal(t, req.Count, uint64(len(blocks)))
|
|
|
|
for _, blk := range blocks {
|
2021-02-16 07:45:34 +00:00
|
|
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
2020-09-02 21:59:57 +00:00
|
|
|
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
2021-02-16 07:45:34 +00:00
|
|
|
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
|
2020-09-02 21:59:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
})
|
|
|
|
}
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
|
|
|
hook := logTest.NewGlobal()
|
|
|
|
|
|
|
|
saveBlocks := func(d db2.Database, chain *chainMock.ChainService, req *pb.BeaconBlocksByRangeRequest, finalized bool) {
|
|
|
|
blk := testutil.NewBeaconBlock()
|
|
|
|
blk.Block.Slot = 0
|
|
|
|
previousRoot, err := blk.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
|
2020-10-22 14:33:01 +00:00
|
|
|
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), previousRoot))
|
|
|
|
blocks := make([]*ethpb.SignedBeaconBlock, req.Count)
|
|
|
|
// Populate the database with blocks that would match the request.
|
2021-02-16 07:45:34 +00:00
|
|
|
for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
2020-10-22 14:33:01 +00:00
|
|
|
parentRoot := make([]byte, 32)
|
|
|
|
copy(parentRoot, previousRoot[:])
|
|
|
|
blocks[j] = testutil.NewBeaconBlock()
|
|
|
|
blocks[j].Block.Slot = i
|
|
|
|
blocks[j].Block.ParentRoot = parentRoot
|
|
|
|
var err error
|
|
|
|
previousRoot, err = blocks[j].Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blocks[j])))
|
2020-10-22 14:33:01 +00:00
|
|
|
j++
|
|
|
|
}
|
|
|
|
stateSummaries := make([]*pb.StateSummary, len(blocks))
|
|
|
|
|
|
|
|
if finalized {
|
|
|
|
if chain.CanonicalRoots == nil {
|
|
|
|
chain.CanonicalRoots = map[[32]byte]bool{}
|
|
|
|
}
|
|
|
|
for i, b := range blocks {
|
|
|
|
bRoot, err := b.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
stateSummaries[i] = &pb.StateSummary{
|
|
|
|
Slot: b.Block.Slot,
|
|
|
|
Root: bRoot[:],
|
|
|
|
}
|
|
|
|
chain.CanonicalRoots[bRoot] = true
|
|
|
|
}
|
|
|
|
require.NoError(t, d.SaveStateSummaries(context.Background(), stateSummaries))
|
|
|
|
require.NoError(t, d.SaveFinalizedCheckpoint(context.Background(), ðpb.Checkpoint{
|
|
|
|
Epoch: helpers.SlotToEpoch(stateSummaries[len(stateSummaries)-1].Slot),
|
|
|
|
Root: stateSummaries[len(stateSummaries)-1].Root,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
saveBadBlocks := func(d db2.Database, chain *chainMock.ChainService,
|
|
|
|
req *pb.BeaconBlocksByRangeRequest, badBlockNum uint64, finalized bool) {
|
|
|
|
blk := testutil.NewBeaconBlock()
|
|
|
|
blk.Block.Slot = 0
|
|
|
|
previousRoot, err := blk.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
genRoot := previousRoot
|
|
|
|
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blk)))
|
2020-10-22 14:33:01 +00:00
|
|
|
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), previousRoot))
|
|
|
|
blocks := make([]*ethpb.SignedBeaconBlock, req.Count)
|
|
|
|
// Populate the database with blocks with non linear roots.
|
2021-02-16 07:45:34 +00:00
|
|
|
for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
2020-10-22 14:33:01 +00:00
|
|
|
parentRoot := make([]byte, 32)
|
|
|
|
copy(parentRoot, previousRoot[:])
|
|
|
|
blocks[j] = testutil.NewBeaconBlock()
|
|
|
|
blocks[j].Block.Slot = i
|
|
|
|
blocks[j].Block.ParentRoot = parentRoot
|
|
|
|
// Make the 2nd block have a bad root.
|
|
|
|
if j == int(badBlockNum) {
|
|
|
|
blocks[j].Block.ParentRoot = genRoot[:]
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
previousRoot, err = blocks[j].Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
2021-05-26 18:33:46 +00:00
|
|
|
require.NoError(t, d.SaveBlock(context.Background(), interfaces.WrappedPhase0SignedBeaconBlock(blocks[j])))
|
2020-10-22 14:33:01 +00:00
|
|
|
j++
|
|
|
|
}
|
|
|
|
stateSummaries := make([]*pb.StateSummary, len(blocks))
|
|
|
|
if finalized {
|
|
|
|
if chain.CanonicalRoots == nil {
|
|
|
|
chain.CanonicalRoots = map[[32]byte]bool{}
|
|
|
|
}
|
|
|
|
for i, b := range blocks {
|
|
|
|
bRoot, err := b.Block.HashTreeRoot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
stateSummaries[i] = &pb.StateSummary{
|
|
|
|
Slot: b.Block.Slot,
|
|
|
|
Root: bRoot[:],
|
|
|
|
}
|
|
|
|
chain.CanonicalRoots[bRoot] = true
|
|
|
|
}
|
|
|
|
require.NoError(t, d.SaveStateSummaries(context.Background(), stateSummaries))
|
|
|
|
require.NoError(t, d.SaveFinalizedCheckpoint(context.Background(), ðpb.Checkpoint{
|
|
|
|
Epoch: helpers.SlotToEpoch(stateSummaries[len(stateSummaries)-1].Slot),
|
|
|
|
Root: stateSummaries[len(stateSummaries)-1].Root,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
}
|
2021-05-17 19:25:59 +00:00
|
|
|
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
2020-10-22 14:33:01 +00:00
|
|
|
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
|
|
|
|
req *pb.BeaconBlocksByRangeRequest, processBlocks func([]*ethpb.SignedBeaconBlock)) error {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
|
|
defer wg.Done()
|
|
|
|
blocks := make([]*ethpb.SignedBeaconBlock, 0, req.Count)
|
2021-02-16 07:45:34 +00:00
|
|
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
2020-10-22 14:33:01 +00:00
|
|
|
code, _, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if code != 0 || err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
blk := testutil.NewBeaconBlock()
|
2021-03-21 19:07:42 +00:00
|
|
|
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, blk))
|
2021-02-16 07:45:34 +00:00
|
|
|
if blk.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
|
2020-10-22 14:33:01 +00:00
|
|
|
t.Errorf("Received unexpected block slot %d", blk.Block.Slot)
|
|
|
|
}
|
|
|
|
blocks = append(blocks, blk)
|
|
|
|
}
|
|
|
|
processBlocks(blocks)
|
|
|
|
})
|
|
|
|
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
|
|
require.NoError(t, err)
|
2021-01-05 13:55:23 +00:00
|
|
|
if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
|
2020-10-22 14:33:01 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("process normal range", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
p1.Connect(p2)
|
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-10-22 14:33:01 +00:00
|
|
|
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 1,
|
|
|
|
Step: 1,
|
|
|
|
Count: 64,
|
|
|
|
}
|
2021-03-21 19:07:42 +00:00
|
|
|
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, true)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
hook.Reset()
|
|
|
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
|
|
|
assert.Equal(t, req.Count, uint64(len(blocks)))
|
|
|
|
for _, blk := range blocks {
|
2021-02-16 07:45:34 +00:00
|
|
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
2020-10-22 14:33:01 +00:00
|
|
|
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
2021-02-16 07:45:34 +00:00
|
|
|
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
|
2020-10-22 14:33:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("process non linear blocks", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
p1.Connect(p2)
|
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-10-22 14:33:01 +00:00
|
|
|
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 1,
|
|
|
|
Step: 1,
|
|
|
|
Count: 64,
|
|
|
|
}
|
2021-03-21 19:07:42 +00:00
|
|
|
saveBadBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, 2, true)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
hook.Reset()
|
|
|
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
|
|
|
assert.Equal(t, uint64(2), uint64(len(blocks)))
|
|
|
|
prevRoot := [32]byte{}
|
|
|
|
for _, blk := range blocks {
|
2021-02-16 07:45:34 +00:00
|
|
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
2020-10-22 14:33:01 +00:00
|
|
|
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
2021-02-16 07:45:34 +00:00
|
|
|
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
|
2020-10-22 14:33:01 +00:00
|
|
|
}
|
|
|
|
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
|
|
|
|
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("process non linear blocks with 2nd bad batch", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
p1.Connect(p2)
|
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-10-22 14:33:01 +00:00
|
|
|
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 1,
|
|
|
|
Step: 1,
|
|
|
|
Count: 128,
|
|
|
|
}
|
2021-03-21 19:07:42 +00:00
|
|
|
saveBadBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, 65, true)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
hook.Reset()
|
|
|
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
|
|
|
assert.Equal(t, uint64(65), uint64(len(blocks)))
|
|
|
|
prevRoot := [32]byte{}
|
|
|
|
for _, blk := range blocks {
|
2021-02-16 07:45:34 +00:00
|
|
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
2020-10-22 14:33:01 +00:00
|
|
|
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
2021-02-16 07:45:34 +00:00
|
|
|
blk.Block.Slot, req.StartSlot, req.StartSlot.Add(req.Count*req.Step))
|
2020-10-22 14:33:01 +00:00
|
|
|
}
|
|
|
|
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
|
|
|
|
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("only return finalized blocks", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
p1.Connect(p2)
|
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-10-22 14:33:01 +00:00
|
|
|
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 1,
|
|
|
|
Step: 1,
|
|
|
|
Count: 64,
|
|
|
|
}
|
2021-03-21 19:07:42 +00:00
|
|
|
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, true)
|
2020-10-22 14:33:01 +00:00
|
|
|
req.StartSlot = 65
|
|
|
|
req.Step = 1
|
|
|
|
req.Count = 128
|
|
|
|
// Save unfinalized chain.
|
2021-03-21 19:07:42 +00:00
|
|
|
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, false)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
req.StartSlot = 1
|
|
|
|
hook.Reset()
|
|
|
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
|
|
|
assert.Equal(t, uint64(64), uint64(len(blocks)))
|
|
|
|
prevRoot := [32]byte{}
|
|
|
|
for _, blk := range blocks {
|
|
|
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
|
|
|
|
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
|
|
|
|
blk.Block.Slot, req.StartSlot)
|
|
|
|
}
|
|
|
|
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
|
|
|
|
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
})
|
|
|
|
t.Run("reject duplicate and non canonical blocks", func(t *testing.T) {
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
|
|
p2 := p2ptest.NewTestP2P(t)
|
2020-12-16 16:56:21 +00:00
|
|
|
d := db.SetupDB(t)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
p1.Connect(p2)
|
|
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
|
2021-03-21 19:07:42 +00:00
|
|
|
r := &Service{cfg: &Config{P2P: p1, DB: d, Chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
2020-10-22 14:33:01 +00:00
|
|
|
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
|
|
StartSlot: 1,
|
|
|
|
Step: 1,
|
|
|
|
Count: 64,
|
|
|
|
}
|
2021-03-21 19:07:42 +00:00
|
|
|
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, true)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
// Create a duplicate set of unfinalized blocks.
|
|
|
|
req.StartSlot = 1
|
|
|
|
req.Step = 1
|
|
|
|
req.Count = 300
|
|
|
|
// Save unfinalized chain.
|
2021-03-21 19:07:42 +00:00
|
|
|
saveBlocks(d, r.cfg.Chain.(*chainMock.ChainService), req, false)
|
2020-10-22 14:33:01 +00:00
|
|
|
|
|
|
|
req.Count = 64
|
|
|
|
hook.Reset()
|
|
|
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
|
|
|
assert.Equal(t, uint64(64), uint64(len(blocks)))
|
|
|
|
prevRoot := [32]byte{}
|
|
|
|
for _, blk := range blocks {
|
|
|
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
|
|
|
|
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
|
|
|
|
blk.Block.Slot, req.StartSlot)
|
|
|
|
}
|
|
|
|
if prevRoot != [32]byte{} && bytesutil.ToBytes32(blk.Block.ParentRoot) != prevRoot {
|
|
|
|
t.Errorf("non linear chain received, expected %#x but got %#x", prevRoot, blk.Block.ParentRoot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
})
|
|
|
|
}
|