mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-29 06:37:17 +00:00
62da94bd0b
* Modify `GetValidatorParticipation` to use new state service (#5409)
* Skip 2 more minimal tests
* Update readme
* gaz
* Fix import and not use
* Update workspace for new spec test
* Fix workspace
* Update workspace with new ethapi commit
* Unblock a few tests
* fixed block op test
* gaz
* Skip gen state test (test setup issue
* Updated hysteresis config
* Updated epoch processing for new hyteresis
* Updated tests
* regen proto beacon
* update state util for state root
* update state types
* update getter and setters
* update compute domain and get domain and tests
* update validators
* Add forkdata proto
* Updated compute domain api, moved it to helper pkg
* Fixed all core tests
* Fixed all the sync tests
* Fixed all the rpc tests
* Conflict fix
* visibility
* Fixed validator tests
* Fixing test util
* Fixed rest of non spec tests
* Fixed a bug proposer index wasn't included
* gaz
* Updated eth1 data voting period to epoch based
* Fixed failed tests
* fix bug
* fix error
* Fixed more misc tests
* Add new SignedAggregateAndProof to pass spec test
* Update minimalConfig.PersistentCommitteePeriod
* allow to rebuild trie
* Skip e2e tests
* Align aggregator action with v0.11 (#5146)
* Remove Head Root from Beacon Block by Range Request (#5165)
* make proto changes
* remove head root
* add back herumi's library
* Update ethapi in workspace, started fixing test. Hand off to Nishant
* fix build
* Align finalized slot check with v0.11 (#5166)
* Add DoS resistance for v0.11 (#5158)
* Add Fork Digest Helper (#5173)
* Extend DoS prevention to rest of operation objects (#5174)
* Update mapping
* Add caches
* Update seen block in validation pipeline
* Update seen att in validation pipeline
* Update seen att in validation pipeline
* Fixed rest of tests
* Gazelle
* Better writes
* Lint
* Preston's feedback
* Switched to LRU cache and fixed tests
* Gazelle
* Fix test
* Update proposer slashing
* Update proposer slashing
* Fixed a block test
* Update exit
* Update atteser slashing
* Raul's feedback
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Add remote keymanager (#5133)
* Add remote keymanager
* Add generic signRoot() helper
* Add tests for remote keymanager
* NewRemote -> NewRemoteWallet
* signRoot -> signOject, to increase reuse
* Fix end-to-end compile error
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Add Snappy Framing to the Encoder (#5172)
* change to framing
* more fixes
* fix everything
* add stricter limits
* preston feedback
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: rauljordan <raul@prysmaticlabs.com>
* Move Subnet Functionality to its Own File (#5179)
* move subnets to their own file
* fix build fail
* build
* Update beacon-chain/p2p/discovery_test.go
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Verify proposer signature in sync (#5206)
* Fix Signed Attestation In Sync (#5207)
* Add Eth2 Fork ENR Functionality (#5181)
* add fork entry enr
* add in fork
* add the required fork entry to node
* add and retrieve fork entry
* await state initialized
* utilize new structure
* more progress, utilizing a config map instead
* send the genesis validators root via the event feed
* struct method for discovery
* fix broken builds
* fixed up more tsts using state feed initializer
* fix up most tests
* only one more failing test
* almost done with tests
* p2p tests all pass
* config fix
* fix blockchain test
* gaz
* add in todo
* lint
* add compare func
* ensure fork ENR versions match between peers
* add in test for discovery
* test name
* tests complete
* tests done
* done
* comments
* fix all flakes
* addressed comments
* build using ssz gen
* marshal record
* use custom ssz
* deduplicate import
* fix build
* add enr proto
* p2p tests done
Co-authored-by: nisdas <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Verify aggregator signature in sync (#5208)
* Add Fork Digest For Gossip Topics (#5191)
* update for the day
* fix remaining failing test
* fix one more test
* change message
* Apply suggestions from code review
Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* terence's review
* implement fork digest'
* align digest to interface'
* passed all tests
* spawn in goroutine
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* Fix Incorrect Attester Slashing Method (#5229)
* Remove keystore keymanager from validator (#5236)
* Remove keystore keymanager from validator
* Update dependency
* Update validator/flags/flags.go
* Update validator/flags/flags.go
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
* fix broadcaster
* update metrics with fork digest for p2p (#5251)
* update metrics with fork digest for p2p
* update p2p metrics
* update metrics using att values
* wrapped up
* fix bug
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Fix incorrect domain type comments (#5250)
* Fix incorrect domain type comments
* fix broken broadcast test
* fix tests
* include protocol suffix
* lint
* fix test
* resolve broken slasher test'
* fix config override
* Remove deprecated parameters (#5249)
* Avoid div by zero in extreme balance case (#5273)
* Return effective balance increment instead of 1
* Update to new spec tests v0.11.1
* Revert "Regen historical states for `new-state-mgmt` compatibility (#5261)"
This reverts commit df9a534826
.
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Revert "Remove deprecated parameters (#5249)" (#5276)
This reverts commit 7d17c9ac3455ee15c67b3645485693309216bc97.
* Verify block proposer index before gossip (#5274)
* Update pipeline
* Update tests
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Add in Proposer Index to Custom HTR (#5269)
* fix test
* Update beacon-chain/state/stateutil/blocks_test.go
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Resolve Flakey P2P Tests (#5285)
* double time for flakey test
* fix test flakeyness in p2p:
* flakey
* time tolerance
* greater tolerance
* release resources correctly (#5287)
* Enable NOISE Handshake by Default v0.11 (#5272)
* noise handshakes by default
* fix build
* noisy noise everywhere
* deprecated noisy noise flag with more noise
* add secio as fallback
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: nisdas <nishdas93@gmail.com>
* new ports
* fix broken build
* Make `new-state-mgmt` canonical (#5289)
* Invert the flags
* Update checking messages
* Fixed all db tests
* Fixed rest of the block chain tests
* Fix chain race tests
* Fixed rpc tests
* Disable soudns better...
* Merge branch 'v0.11' into invert-new-state-mgmt
* Merge refs/heads/v0.11 into invert-new-state-mgmt
* Fix export
* Merge branch 'invert-new-state-mgmt' of github.com:prysmaticlabs/prysm into invert-new-state-mgmt
* Fix conflict tests
* Gazelle
* Merge refs/heads/v0.11 into invert-new-state-mgmt
* Merge refs/heads/v0.11 into invert-new-state-mgmt
* resolve flakeyness
* Detect Proposer Slashing Implementation (#5139)
* detect blocks
* detect blocks
* use stub
* use stub
* use stub
* todo
* fix test
* add tests and utils
* fix imports
* fix imports
* fix comment
* todo
* proposerIndex
* fix broken test
* formatting and simplified if
* Update slasher/detection/service.go
* Update slasher/detection/testing/utils.go
Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* fixed up final comments
* better naming
* Update slasher/detection/service.go
* Update slasher/detection/service.go
* Update slasher/detection/service.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* no more named args
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
* Add Metadata And Ping RPC methods (#5271)
* add new proto files
* add flag and helper
* add initializer
* imports
* add ping method
* add receive/send ping request
* add ping test
* refactor rpc methods and add ping test
* finish adding all tests
* fix up tests
* Apply suggestions from code review
* lint
* imports
* lint
* Update beacon-chain/p2p/service.go
* Update shared/cmd/flags.go
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Updates for remote keymanager (#5260)
* Update to slash by slot instead of epoch (#5297)
* change to slash by slot instead of epoch
* gaz
* fix test
* fix test
* fix infinite loop on error parse
* Update proposer protection to v0.11 (#5292)
* Complete most of changes
* Fix other tests
* Test progress
* Tests
* Finish tests
* update pbs
* Fix mocked tests
* Gazelle
* pt 2
* Fix
* Fixes
* Fix tests wit hwrong copying
* Implement `SubscribeCommitteeSubnet` method (#5299)
* Add client implementation
* Update workspace
* Update server
* Update service
* Gaz
* Mocks
* Fixed validator tests
* Add round tirp tests
* Fixed subnet test
* Comment
* Update committee cache
* Comment
* Update RPC
* Fixed test
* Nishant's comment
* Gaz
* Refresh ENR is for epoch
* Needs to be append
* Validator subscribe subnet to next epoch (#5312)
* Alert to subscribe to next epoch
* Fixed tests
* Comments
* Fixed tests
* Update validator/client/validator.go
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Revert "Revert "Remove deprecated parameters (#5249)" (#5276)" (#5277)
This reverts commit 47e5a2cf96f5add151bf135a5352c2dad7922615.
* Aggregate on demand for v0.11 (#5302)
* Add client implementation
* Update workspace
* Update server
* Update service
* Gaz
* Mocks
* Fixed validator tests
* Add round tirp tests
* Fixed subnet test
* Wait 1/3 on validator side
* Lint
* Comment
* Update committee cache
* Comment
* Update RPC
* Fixed test
* Nishant's comment
* Gaz
* Refresh ENR is for epoch
* Needs to be append
* Fixed duplication
* Tests
* Skip e2e
* Update beacon-chain/rpc/validator/aggregator.go
Co-Authored-By: shayzluf <thezluf@gmail.com>
* Apply suggestions from code review
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
* Refactor Dynamic Subscriptions (#5318)
* clean up
* comment
* metrics
* fix
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Fix listindexed attestations and detect historic attestations (#5321)
* fix list indexed attestations
* fix tests
* goimports
* names
* Add check for slot == 0 (#5322)
* Change attester protection to return default if DB is empty (#5323)
* Change how default values are set
* Remove unused imports
* Remove wasteful db call
* Fix db tests
* Fix db test
* fix it (#5326)
* V0.11 run time fixes to use interop config (#5324)
* Started testing
* Bunch of fixes
* use-interop
* Sync with v0.11
* Conflict
* Uncomment wait for activation
* Move pending block queue from subscriber to validator pipeline
* Merge branch 'v0.11' into use-interop-config
* passing tests
* Merge refs/heads/v0.11 into use-interop-config
* Merge refs/heads/v0.11 into use-interop-config
* Nil Checks in Process Attestation v0.11 (#5331)
* Started testing
* Bunch of fixes
* use-interop
* Sync with v0.11
* Uncomment wait for activation
* Move pending block queue from subscriber to validator pipeline
* passing tests
* nil checks to prevent panics
* lint
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Validator batch subscribe subnets (#5332)
* Update both beacon node and validator
* Comments
* Tests
* Lint
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Validator smarter subscribe (#5334)
* Fix incorrect proposer index calculation (#5336)
* Use correct parent state
* Fixed test
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* enhance error
* enhance error
* Update P2P Service to Handle Local Metadata (#5319)
* add metadata to ENR
* add new methods
* glue everything
* fix all tests and refs
* add tests
* add more tests
* Apply suggestions from code review
* fix method
* raul's review
* gaz
* fix test setup
* fix all tests
* better naming
* fix broken test
* validate nil
Co-authored-by: rauljordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Revert "Revert "Revert "Remove deprecated parameters (#5249)" (#5276)" (#5277)" (#5343)
This reverts commit e5aef1686e582fc2077767c42187c8527f3a742f.
* Wait for Genesis Event to Start P2P (#5303)
* use event feed for state initialized events
* add in handler for tests
* wait till genesis for p2p
* Apply suggestions from code review
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Avoid duplicated aggregation request (#5346)
* Avoid duplicated aggregation request
* Test and lock
* Gaz
* Fix Validate For Metadata (#5348)
* return true
* shay's review
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Multiple Proposer Slots Allowed Per Epoch for Validators (#5344)
* allow multiple proposer slots
* multi propose
* proposer indices to slots map
* remove deprecated comm assign
* Apply suggestions from code review
* resolve broken tests, add logic in validator client
* fix val tests
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Networking Fixes (#5349)
* close stream later
* add ping method
* add method
* lint
* More efficient aggregation on demand (#5354)
* Return Nil Error if Pre-Genesis in P2P Service Healthz Check (#5355)
* pregenesis healthz check:
* optimal
* right order
* Update beacon-chain/p2p/service.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update beacon-chain/p2p/service.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* no comment
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Release DiscoveryV5 for Testnet Restart (#5357)
* release discv5
* fix build
* Fix Overflow in Status Check (#5361)
* fix overflow
* Apply suggestions from code review
* fix after merge
* Make Mainnet Config Default, No More Demo Config (#5367)
* bye bye demo config
* gaz
* fix usage
* fix dep
* gaz
* Update default balance for sendDeposits
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
* Use FastSSZ Marshal/Unmarshal for DB Encodings in v0.11.1 (#5351)
* try
* use marshaler structure for db instead of proto
* white list types
* attempt
* revert
* testutil.NewBeaconState()
* Fully populate fields for round trip ssz marshal
* fix //beacon-chain/db/kv:go_default_test
* more passing tests
* another test target passed
* fixed stategen
* blockchain tests green
* passing sync
* more targets fixed
* more test fixes in rpc/validator
* most rpc val
* validators test fixes
* skip round robin old
* aggregate test
* whitelist done
* Update beacon-chain/rpc/validator/attester_test.go
* edit baz
* Fixed tests
* Fixed getblock test
* Add back init
* reduce test size
* fix broken build
* tests pass
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
* Reconnect slasher streams on beacon node shutdown (#5376)
* restart streams on beacon node shutdown
* fix comment
* remove export
* ivan feedback
* ivan feedback
* case insensitive
* Update slasher/beaconclient/receivers.go
* raul feedback
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Amend Faucet to Offer 32.5 ETH for v0.11 (#5378)
* deposit amount in faucet
* fix eth amount
* gas cost
* unskip exec transition test
* Revert "Enable NOISE Handshake by Default v0.11 (#5272)" (#5381)
This reverts commit a8d32d504a8f923cdf7fa9dfc2684f8804fbab92.
* use string for deposit flag
* Update Bootnode to v0.11 (#5387)
* fix bootnode
* add changes
* gaz
* fix docker
* build fix
* fix flaky test
* Unskip E2E for V0.11 (#5386)
* Begin work on fixing e2e for v0.11
* Start bootnode work
* Begin implementing bootnode into e2e
* Fix E2E for v0.11
* Remove extra
* gaz
* Remove unused key gen code
* Remove trailing multiaddr code
* add skip for slashing
* Fix slashing e2e
* Fix docker image build
* Update beacon-chain/p2p/broadcaster_test.go
* Update GetValidatorParticipation
* Update tests
* Gaz
* Pass E2E Tests for v0.11 and Enable Attestation Subnets By Default (#5407)
* Update README.md
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Apply suggestions from code review
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update beacon-chain/p2p/config.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update shared/keystore/deposit_input.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update tools/faucet/server.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update beacon-chain/p2p/service.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update shared/benchutil/pregen_test.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update shared/benchutil/pregen_test.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update proto/beacon/p2p/v1/BUILD.bazel
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update shared/benchutil/pregen_test.go
Co-Authored-By: Preston Van Loon <preston@prysmaticlabs.com>
* Update shared/bls/spectest/aggregate_verify_test.go
* Addressed feedback. All test passing
* Update beacon-chain/core/blocks/block_operations_fuzz_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/core/blocks/block_operations_test.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update shared/testutil/helpers.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Update beacon-chain/core/helpers/signing_root.go
Co-Authored-By: Ivan Martinez <ivanthegreatdev@gmail.com>
* Resolve Misc v0.11 Items (Raul) (#5414)
* address all comments
* set faucet
* nishant feedback
* Update beacon-chain/p2p/service.go
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Revert keymanager changes (#5416)
* Revert "Updates for remote keymanager (#5260)"
This reverts commit bbcd895db50ce5e7c0ecb64210471cf56f63b373.
* Revert "Remove keystore keymanager from validator (#5236)"
This reverts commit 46008770c162e741251e13772fd7356b43a9af87.
* Revert "Update eth2 wallet keymanager (#4984)"
This reverts commit 7f7ef43f218598a671aaeb327342d7e5130fe8b1.
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
* Update BLS and limit visibility (#5415)
* remove duplicated BLS, add golang.org/x/mod
* Update BLS and restrict visibility
* fix build
* Fix eth1data test and fix order of ops (#5413)
* use multiaddr builder (#5419)
* Unskip benchutil and minor v0.11 fixes (#5417)
* Unskip benchutil tests
* Remove protos and gaz
* Fixes
* Networking Fixes (#5421)
* check
* fix test
* fix size
* fix test
* more fixes
* fix test again
* Update ethereum APIs with latest master
* Error handling for v0.11 tests (#5428)
* Proper err handling for tests
* Lint
* Fixed rest of the tests
* Gaz
* Fixed old master tests
* Rm old aggregate_test.go
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: nisdas <nishdas93@gmail.com>
Co-authored-by: Jim McDonald <Jim@mcdee.net>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
* Validator participation with fallback
* Add back the old tests
* Fixed test
* Merge branch 'master' into validator-participation-fallback
* Merge refs/heads/master into validator-participation-fallback
1955 lines
52 KiB
Go
1955 lines
52 KiB
Go
package beacon
|
|
|
|
import (
|
|
"context"
|
|
"encoding/binary"
|
|
"fmt"
|
|
"reflect"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"testing"
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
ptypes "github.com/gogo/protobuf/types"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/go-ssz"
|
|
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
|
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
|
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
|
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
|
)
|
|
|
|
func init() {
|
|
// Use minimal config to reduce test setup time.
|
|
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
|
flags.Init(&flags.GlobalFlags{
|
|
MaxPageSize: 250,
|
|
})
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_CannotRequestFutureEpoch(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
wanted := "Cannot retrieve information about an epoch in the future"
|
|
if _, err := bs.ListValidatorBalances(
|
|
ctx,
|
|
ðpb.ListValidatorBalancesRequest{
|
|
QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{
|
|
Epoch: 1,
|
|
},
|
|
},
|
|
); err != nil && !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_NoResults(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
wanted := ðpb.ValidatorBalances{
|
|
Balances: make([]*ethpb.ValidatorBalances_Balance, 0),
|
|
TotalSize: int32(0),
|
|
NextPageToken: strconv.Itoa(0),
|
|
}
|
|
res, err := bs.ListValidatorBalances(
|
|
ctx,
|
|
ðpb.ListValidatorBalancesRequest{
|
|
QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_DefaultResponse_NoArchive(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
numItems := 100
|
|
validators := make([]*ethpb.Validator, numItems)
|
|
balances := make([]uint64, numItems)
|
|
balancesResponse := make([]*ethpb.ValidatorBalances_Balance, numItems)
|
|
for i := 0; i < numItems; i++ {
|
|
validators[i] = ðpb.Validator{
|
|
PublicKey: pubKey(uint64(i)),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
}
|
|
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
|
balancesResponse[i] = ðpb.ValidatorBalances_Balance{
|
|
PublicKey: pubKey(uint64(i)),
|
|
Index: uint64(i),
|
|
Balance: params.BeaconConfig().MaxEffectiveBalance,
|
|
}
|
|
}
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetBalances(balances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
res, err := bs.ListValidatorBalances(
|
|
ctx,
|
|
ðpb.ListValidatorBalancesRequest{
|
|
QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(balancesResponse, res.Balances) {
|
|
t.Errorf("Wanted %v, received %v", balancesResponse, res.Balances)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_DefaultResponse_FromArchive(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
currentNumValidators := 100
|
|
numOldBalances := 50
|
|
validators := make([]*ethpb.Validator, currentNumValidators)
|
|
balances := make([]uint64, currentNumValidators)
|
|
oldBalances := make([]uint64, numOldBalances)
|
|
balancesResponse := make([]*ethpb.ValidatorBalances_Balance, numOldBalances)
|
|
for i := 0; i < currentNumValidators; i++ {
|
|
key := make([]byte, 48)
|
|
copy(key, strconv.Itoa(i))
|
|
validators[i] = ðpb.Validator{
|
|
PublicKey: key,
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
}
|
|
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
|
}
|
|
for i := 0; i < numOldBalances; i++ {
|
|
oldBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
|
key := make([]byte, 48)
|
|
copy(key, strconv.Itoa(i))
|
|
balancesResponse[i] = ðpb.ValidatorBalances_Balance{
|
|
PublicKey: key,
|
|
Index: uint64(i),
|
|
Balance: params.BeaconConfig().MaxEffectiveBalance,
|
|
}
|
|
}
|
|
// We archive old balances for epoch 50.
|
|
if err := db.SaveArchivedBalances(ctx, 50, oldBalances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(helpers.StartSlot(100) /* epoch 100 */); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetBalances(balances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
res, err := bs.ListValidatorBalances(
|
|
ctx,
|
|
ðpb.ListValidatorBalancesRequest{
|
|
QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{
|
|
Epoch: 50,
|
|
},
|
|
},
|
|
)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(balancesResponse, res.Balances) {
|
|
t.Errorf("Wanted %v, received %v", balancesResponse, res.Balances)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_PaginationOutOfRange(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
setupValidators(t, db, 3)
|
|
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
|
|
req := ðpb.ListValidatorBalancesRequest{PageToken: strconv.Itoa(1), PageSize: 100}
|
|
wanted := fmt.Sprintf("page start %d >= list %d", req.PageSize, len(headState.Balances()))
|
|
if _, err := bs.ListValidatorBalances(context.Background(), req); err != nil && !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_ExceedsMaxPageSize(t *testing.T) {
|
|
bs := &Server{}
|
|
exceedsMax := int32(flags.Get().MaxPageSize + 1)
|
|
|
|
wanted := fmt.Sprintf(
|
|
"Requested page size %d can not be greater than max size %d",
|
|
exceedsMax,
|
|
flags.Get().MaxPageSize,
|
|
)
|
|
req := ðpb.ListValidatorBalancesRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
|
if _, err := bs.ListValidatorBalances(context.Background(), req); err != nil && !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func pubKey(i uint64) []byte {
|
|
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
|
binary.LittleEndian.PutUint64(pubKey, i)
|
|
return pubKey
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_Pagination_Default(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
setupValidators(t, db, 100)
|
|
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{State: headState},
|
|
}
|
|
|
|
tests := []struct {
|
|
req *ethpb.ListValidatorBalancesRequest
|
|
res *ethpb.ValidatorBalances
|
|
}{
|
|
{req: ðpb.ListValidatorBalancesRequest{PublicKeys: [][]byte{pubKey(99)}},
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{Index: 99, PublicKey: pubKey(99), Balance: 99},
|
|
},
|
|
NextPageToken: "",
|
|
TotalSize: 1,
|
|
},
|
|
},
|
|
{req: ðpb.ListValidatorBalancesRequest{Indices: []uint64{1, 2, 3}},
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{Index: 1, PublicKey: pubKey(1), Balance: 1},
|
|
{Index: 2, PublicKey: pubKey(2), Balance: 2},
|
|
{Index: 3, PublicKey: pubKey(3), Balance: 3},
|
|
},
|
|
NextPageToken: "",
|
|
TotalSize: 3,
|
|
},
|
|
},
|
|
{req: ðpb.ListValidatorBalancesRequest{PublicKeys: [][]byte{pubKey(10), pubKey(11), pubKey(12)}},
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{Index: 10, PublicKey: pubKey(10), Balance: 10},
|
|
{Index: 11, PublicKey: pubKey(11), Balance: 11},
|
|
{Index: 12, PublicKey: pubKey(12), Balance: 12},
|
|
},
|
|
NextPageToken: "",
|
|
TotalSize: 3,
|
|
}},
|
|
{req: ðpb.ListValidatorBalancesRequest{PublicKeys: [][]byte{pubKey(2), pubKey(3)}, Indices: []uint64{3, 4}}, // Duplication
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{Index: 2, PublicKey: pubKey(2), Balance: 2},
|
|
{Index: 3, PublicKey: pubKey(3), Balance: 3},
|
|
{Index: 4, PublicKey: pubKey(4), Balance: 4},
|
|
},
|
|
NextPageToken: "",
|
|
TotalSize: 3,
|
|
}},
|
|
{req: ðpb.ListValidatorBalancesRequest{PublicKeys: [][]byte{{}}, Indices: []uint64{3, 4}}, // Public key has a blank value
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{Index: 3, PublicKey: pubKey(3), Balance: 3},
|
|
{Index: 4, PublicKey: pubKey(4), Balance: 4},
|
|
},
|
|
NextPageToken: "",
|
|
TotalSize: 2,
|
|
}},
|
|
}
|
|
for _, test := range tests {
|
|
res, err := bs.ListValidatorBalances(context.Background(), test.req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(res, test.res) {
|
|
t.Errorf("Expected %v, received %v", test.res, res)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_Pagination_CustomPageSizes(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
count := 1000
|
|
setupValidators(t, db, count)
|
|
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
|
|
tests := []struct {
|
|
req *ethpb.ListValidatorBalancesRequest
|
|
res *ethpb.ValidatorBalances
|
|
}{
|
|
{req: ðpb.ListValidatorBalancesRequest{PageToken: strconv.Itoa(1), PageSize: 3},
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{PublicKey: pubKey(3), Index: 3, Balance: uint64(3)},
|
|
{PublicKey: pubKey(4), Index: 4, Balance: uint64(4)},
|
|
{PublicKey: pubKey(5), Index: 5, Balance: uint64(5)}},
|
|
NextPageToken: strconv.Itoa(2),
|
|
TotalSize: int32(count)}},
|
|
{req: ðpb.ListValidatorBalancesRequest{PageToken: strconv.Itoa(10), PageSize: 5},
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{PublicKey: pubKey(50), Index: 50, Balance: uint64(50)},
|
|
{PublicKey: pubKey(51), Index: 51, Balance: uint64(51)},
|
|
{PublicKey: pubKey(52), Index: 52, Balance: uint64(52)},
|
|
{PublicKey: pubKey(53), Index: 53, Balance: uint64(53)},
|
|
{PublicKey: pubKey(54), Index: 54, Balance: uint64(54)}},
|
|
NextPageToken: strconv.Itoa(11),
|
|
TotalSize: int32(count)}},
|
|
{req: ðpb.ListValidatorBalancesRequest{PageToken: strconv.Itoa(33), PageSize: 3},
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{PublicKey: pubKey(99), Index: 99, Balance: uint64(99)},
|
|
{PublicKey: pubKey(100), Index: 100, Balance: uint64(100)},
|
|
{PublicKey: pubKey(101), Index: 101, Balance: uint64(101)},
|
|
},
|
|
NextPageToken: "34",
|
|
TotalSize: int32(count)}},
|
|
{req: ðpb.ListValidatorBalancesRequest{PageSize: 2},
|
|
res: ðpb.ValidatorBalances{
|
|
Balances: []*ethpb.ValidatorBalances_Balance{
|
|
{PublicKey: pubKey(0), Index: 0, Balance: uint64(0)},
|
|
{PublicKey: pubKey(1), Index: 1, Balance: uint64(1)}},
|
|
NextPageToken: strconv.Itoa(1),
|
|
TotalSize: int32(count)}},
|
|
}
|
|
for _, test := range tests {
|
|
res, err := bs.ListValidatorBalances(context.Background(), test.req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(res, test.res) {
|
|
t.Errorf("Expected %v, received %v", test.res, res)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_OutOfRange(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
setupValidators(t, db, 1)
|
|
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{State: headState},
|
|
}
|
|
|
|
req := ðpb.ListValidatorBalancesRequest{Indices: []uint64{uint64(1)}}
|
|
wanted := "does not exist"
|
|
if _, err := bs.ListValidatorBalances(context.Background(), req); err == nil || !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_FromArchive(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
epoch := uint64(0)
|
|
validators, balances := setupValidators(t, db, 100)
|
|
|
|
if err := db.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
newerBalances := make([]uint64, len(balances))
|
|
for i := 0; i < len(newerBalances); i++ {
|
|
newerBalances[i] = balances[i] * 2
|
|
}
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(params.BeaconConfig().SlotsPerEpoch * 3); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetBalances(newerBalances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
req := ðpb.ListValidatorBalancesRequest{
|
|
QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{Epoch: 0},
|
|
Indices: []uint64{uint64(1)},
|
|
}
|
|
res, err := bs.ListValidatorBalances(context.Background(), req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// We should expect a response containing the old balance from epoch 0,
|
|
// not the new balance from the current state.
|
|
want := []*ethpb.ValidatorBalances_Balance{
|
|
{
|
|
PublicKey: validators[1].PublicKey,
|
|
Index: 1,
|
|
Balance: balances[1],
|
|
},
|
|
}
|
|
if !reflect.DeepEqual(want, res.Balances) {
|
|
t.Errorf("Wanted %v, received %v", want, res.Balances)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidatorBalances_FromArchive_NewValidatorNotFound(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
epoch := uint64(0)
|
|
_, balances := setupValidators(t, db, 100)
|
|
|
|
if err := db.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
newValidators, newBalances := setupValidators(t, db, 200)
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(params.BeaconConfig().SlotsPerEpoch * 3); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetValidators(newValidators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetBalances(newBalances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
req := ðpb.ListValidatorBalancesRequest{
|
|
QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{Epoch: 0},
|
|
Indices: []uint64{1, 150, 161},
|
|
}
|
|
if _, err := bs.ListValidatorBalances(context.Background(), req); err == nil || !strings.Contains(err.Error(), "does not exist") {
|
|
t.Errorf("Wanted out of range error for including newer validators in the arguments, received %v", err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_CannotRequestFutureEpoch(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
wanted := "Cannot retrieve information about an epoch in the future"
|
|
if _, err := bs.ListValidators(
|
|
ctx,
|
|
ðpb.ListValidatorsRequest{
|
|
QueryFilter: ðpb.ListValidatorsRequest_Epoch{
|
|
Epoch: 1,
|
|
},
|
|
},
|
|
); err != nil && !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_NoResults(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
wanted := ðpb.Validators{
|
|
ValidatorList: make([]*ethpb.Validators_ValidatorContainer, 0),
|
|
TotalSize: int32(0),
|
|
NextPageToken: strconv.Itoa(0),
|
|
}
|
|
res, err := bs.ListValidators(
|
|
ctx,
|
|
ðpb.ListValidatorsRequest{
|
|
QueryFilter: ðpb.ListValidatorsRequest_Epoch{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_OnlyActiveValidators(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
count := 100
|
|
balances := make([]uint64, count)
|
|
validators := make([]*ethpb.Validator, count)
|
|
activeValidators := make([]*ethpb.Validators_ValidatorContainer, 0)
|
|
for i := 0; i < count; i++ {
|
|
pubKey := pubKey(uint64(i))
|
|
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
|
|
|
// We mark even validators as active, and odd validators as inactive.
|
|
if i%2 == 0 {
|
|
val := ðpb.Validator{
|
|
PublicKey: pubKey,
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
ActivationEpoch: 0,
|
|
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
}
|
|
validators[i] = val
|
|
activeValidators = append(activeValidators, ðpb.Validators_ValidatorContainer{
|
|
Index: uint64(i),
|
|
Validator: val,
|
|
})
|
|
} else {
|
|
validators[i] = ðpb.Validator{
|
|
PublicKey: pubKey,
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
ActivationEpoch: 0,
|
|
ExitEpoch: 0,
|
|
}
|
|
}
|
|
}
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetBalances(balances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
received, err := bs.ListValidators(ctx, ðpb.ListValidatorsRequest{
|
|
Active: true,
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !reflect.DeepEqual(activeValidators, received.ValidatorList) {
|
|
t.Errorf("Wanted %v, received %v", activeValidators, received.ValidatorList)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_NoPagination(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
validators, _ := setupValidators(t, db, 100)
|
|
want := make([]*ethpb.Validators_ValidatorContainer, len(validators))
|
|
for i := 0; i < len(validators); i++ {
|
|
want[i] = ðpb.Validators_ValidatorContainer{
|
|
Index: uint64(i),
|
|
Validator: validators[i],
|
|
}
|
|
}
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
}
|
|
|
|
received, err := bs.ListValidators(context.Background(), ðpb.ListValidatorsRequest{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !reflect.DeepEqual(want, received.ValidatorList) {
|
|
t.Fatal("Incorrect respond of validators")
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_IndicesPubKeys(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
validators, _ := setupValidators(t, db, 100)
|
|
indicesWanted := []uint64{2, 7, 11, 17}
|
|
pubkeyIndicesWanted := []uint64{3, 5, 9, 15}
|
|
allIndicesWanted := append(indicesWanted, pubkeyIndicesWanted...)
|
|
want := make([]*ethpb.Validators_ValidatorContainer, len(allIndicesWanted))
|
|
for i, idx := range allIndicesWanted {
|
|
want[i] = ðpb.Validators_ValidatorContainer{
|
|
Index: idx,
|
|
Validator: validators[idx],
|
|
}
|
|
}
|
|
sort.Slice(want, func(i int, j int) bool {
|
|
return want[i].Index < want[j].Index
|
|
})
|
|
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
}
|
|
|
|
pubKeysWanted := make([][]byte, len(pubkeyIndicesWanted))
|
|
for i, indice := range pubkeyIndicesWanted {
|
|
pubKeysWanted[i] = pubKey(indice)
|
|
}
|
|
req := ðpb.ListValidatorsRequest{
|
|
Indices: indicesWanted,
|
|
PublicKeys: pubKeysWanted,
|
|
}
|
|
received, err := bs.ListValidators(context.Background(), req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !reflect.DeepEqual(want, received.ValidatorList) {
|
|
t.Fatal("Incorrect respond of validators")
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_Pagination(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
count := 100
|
|
setupValidators(t, db, count)
|
|
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
}
|
|
|
|
tests := []struct {
|
|
req *ethpb.ListValidatorsRequest
|
|
res *ethpb.Validators
|
|
}{
|
|
{req: ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(1), PageSize: 3},
|
|
res: ðpb.Validators{
|
|
ValidatorList: []*ethpb.Validators_ValidatorContainer{
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(3),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 3,
|
|
},
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(4),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 4,
|
|
},
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(5),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 5,
|
|
},
|
|
},
|
|
NextPageToken: strconv.Itoa(2),
|
|
TotalSize: int32(count)}},
|
|
{req: ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(10), PageSize: 5},
|
|
res: ðpb.Validators{
|
|
ValidatorList: []*ethpb.Validators_ValidatorContainer{
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(50),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 50,
|
|
},
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(51),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 51,
|
|
},
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(52),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 52,
|
|
},
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(53),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 53,
|
|
},
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(54),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 54,
|
|
},
|
|
},
|
|
NextPageToken: strconv.Itoa(11),
|
|
TotalSize: int32(count)}},
|
|
{req: ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(33), PageSize: 3},
|
|
res: ðpb.Validators{
|
|
ValidatorList: []*ethpb.Validators_ValidatorContainer{
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(99),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 99,
|
|
},
|
|
},
|
|
NextPageToken: "",
|
|
TotalSize: int32(count)}},
|
|
{req: ðpb.ListValidatorsRequest{PageSize: 2},
|
|
res: ðpb.Validators{
|
|
ValidatorList: []*ethpb.Validators_ValidatorContainer{
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(0),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 0,
|
|
},
|
|
{
|
|
Validator: ðpb.Validator{
|
|
PublicKey: pubKey(1),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
Index: 1,
|
|
},
|
|
},
|
|
NextPageToken: strconv.Itoa(1),
|
|
TotalSize: int32(count)}},
|
|
}
|
|
for _, test := range tests {
|
|
res, err := bs.ListValidators(context.Background(), test.req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(res, test.res) {
|
|
t.Errorf("Incorrect validator response, wanted %v, received %v", test.res, res)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_PaginationOutOfRange(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
count := 1
|
|
validators, _ := setupValidators(t, db, count)
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
}
|
|
|
|
req := ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(1), PageSize: 100}
|
|
wanted := fmt.Sprintf("page start %d >= list %d", req.PageSize, len(validators))
|
|
if _, err := bs.ListValidators(context.Background(), req); err == nil || !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_ExceedsMaxPageSize(t *testing.T) {
|
|
bs := &Server{}
|
|
exceedsMax := int32(flags.Get().MaxPageSize + 1)
|
|
|
|
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, flags.Get().MaxPageSize)
|
|
req := ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
|
if _, err := bs.ListValidators(context.Background(), req); err == nil || !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_DefaultPageSize(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
validators, _ := setupValidators(t, db, 1000)
|
|
want := make([]*ethpb.Validators_ValidatorContainer, len(validators))
|
|
for i := 0; i < len(validators); i++ {
|
|
want[i] = ðpb.Validators_ValidatorContainer{
|
|
Index: uint64(i),
|
|
Validator: validators[i],
|
|
}
|
|
}
|
|
headState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
}
|
|
|
|
req := ðpb.ListValidatorsRequest{}
|
|
res, err := bs.ListValidators(context.Background(), req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
i := 0
|
|
j := params.BeaconConfig().DefaultPageSize
|
|
if !reflect.DeepEqual(res.ValidatorList, want[i:j]) {
|
|
t.Error("Incorrect respond of validators")
|
|
}
|
|
}
|
|
|
|
func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
numEpochs := 30
|
|
validators := make([]*ethpb.Validator, numEpochs)
|
|
for i := 0; i < numEpochs; i++ {
|
|
validators[i] = ðpb.Validator{
|
|
ActivationEpoch: uint64(i),
|
|
PublicKey: make([]byte, 48),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
}
|
|
}
|
|
want := make([]*ethpb.Validators_ValidatorContainer, len(validators))
|
|
for i := 0; i < len(validators); i++ {
|
|
want[i] = ðpb.Validators_ValidatorContainer{
|
|
Index: uint64(i),
|
|
Validator: validators[i],
|
|
}
|
|
}
|
|
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(helpers.StartSlot(30)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := st.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
req := ðpb.ListValidatorsRequest{
|
|
QueryFilter: ðpb.ListValidatorsRequest_Genesis{
|
|
Genesis: true,
|
|
},
|
|
}
|
|
res, err := bs.ListValidators(context.Background(), req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(res.ValidatorList) != 1 {
|
|
t.Errorf("Wanted 1 validator at genesis, received %d", len(res.ValidatorList))
|
|
}
|
|
|
|
req = ðpb.ListValidatorsRequest{
|
|
QueryFilter: ðpb.ListValidatorsRequest_Epoch{
|
|
Epoch: 20,
|
|
},
|
|
}
|
|
res, err = bs.ListValidators(context.Background(), req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(res.ValidatorList, want[:21]) {
|
|
t.Errorf("Incorrect number of validators, wanted %d received %d", len(want[:21]), len(res.ValidatorList))
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidator(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
count := 30
|
|
validators := make([]*ethpb.Validator, count)
|
|
for i := 0; i < count; i++ {
|
|
validators[i] = ðpb.Validator{
|
|
ActivationEpoch: uint64(i),
|
|
PublicKey: pubKey(uint64(i)),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
}
|
|
}
|
|
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
tests := []struct {
|
|
req *ethpb.GetValidatorRequest
|
|
res *ethpb.Validator
|
|
wantErr bool
|
|
err string
|
|
}{
|
|
{
|
|
req: ðpb.GetValidatorRequest{
|
|
QueryFilter: ðpb.GetValidatorRequest_Index{
|
|
Index: 0,
|
|
},
|
|
},
|
|
res: validators[0],
|
|
wantErr: false,
|
|
},
|
|
{
|
|
req: ðpb.GetValidatorRequest{
|
|
QueryFilter: ðpb.GetValidatorRequest_Index{
|
|
Index: uint64(count - 1),
|
|
},
|
|
},
|
|
res: validators[count-1],
|
|
wantErr: false,
|
|
},
|
|
{
|
|
req: ðpb.GetValidatorRequest{
|
|
QueryFilter: ðpb.GetValidatorRequest_PublicKey{
|
|
PublicKey: pubKey(5),
|
|
},
|
|
},
|
|
res: validators[5],
|
|
wantErr: false,
|
|
},
|
|
{
|
|
req: ðpb.GetValidatorRequest{
|
|
QueryFilter: ðpb.GetValidatorRequest_PublicKey{
|
|
PublicKey: []byte("bad-keyxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
|
|
},
|
|
},
|
|
res: nil,
|
|
wantErr: true,
|
|
err: "No validator matched filter criteria",
|
|
},
|
|
{
|
|
req: ðpb.GetValidatorRequest{
|
|
QueryFilter: ðpb.GetValidatorRequest_Index{
|
|
Index: uint64(len(validators)),
|
|
},
|
|
},
|
|
res: nil,
|
|
wantErr: true,
|
|
err: fmt.Sprintf("there are only %d validators", len(validators)),
|
|
},
|
|
}
|
|
|
|
for _, test := range tests {
|
|
res, err := bs.GetValidator(context.Background(), test.req)
|
|
if test.wantErr && err != nil {
|
|
if !strings.Contains(err.Error(), test.err) {
|
|
t.Fatalf("Wanted %v, received %v", test.err, err)
|
|
}
|
|
} else if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(test.res, res) {
|
|
t.Errorf("Wanted %v, got %v", test.res, res)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
st := testutil.NewBeaconState()
|
|
if err := st.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: st,
|
|
},
|
|
}
|
|
|
|
wanted := "Cannot retrieve information about an epoch in the future"
|
|
if _, err := bs.GetValidatorActiveSetChanges(
|
|
ctx,
|
|
ðpb.GetValidatorActiveSetChangesRequest{
|
|
QueryFilter: ðpb.GetValidatorActiveSetChangesRequest_Epoch{
|
|
Epoch: 1,
|
|
},
|
|
},
|
|
); err != nil && !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
|
|
ctx := context.Background()
|
|
validators := make([]*ethpb.Validator, 8)
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
for i := 0; i < len(validators); i++ {
|
|
activationEpoch := params.BeaconConfig().FarFutureEpoch
|
|
withdrawableEpoch := params.BeaconConfig().FarFutureEpoch
|
|
exitEpoch := params.BeaconConfig().FarFutureEpoch
|
|
slashed := false
|
|
balance := params.BeaconConfig().MaxEffectiveBalance
|
|
// Mark indices divisible by two as activated.
|
|
if i%2 == 0 {
|
|
activationEpoch = 0
|
|
} else if i%3 == 0 {
|
|
// Mark indices divisible by 3 as slashed.
|
|
withdrawableEpoch = params.BeaconConfig().EpochsPerSlashingsVector
|
|
slashed = true
|
|
} else if i%5 == 0 {
|
|
// Mark indices divisible by 5 as exited.
|
|
exitEpoch = 0
|
|
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
|
} else if i%7 == 0 {
|
|
// Mark indices divisible by 7 as ejected.
|
|
exitEpoch = 0
|
|
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
|
balance = params.BeaconConfig().EjectionBalance
|
|
}
|
|
if err := headState.UpdateValidatorAtIndex(uint64(i), ðpb.Validator{
|
|
ActivationEpoch: activationEpoch,
|
|
PublicKey: pubKey(uint64(i)),
|
|
EffectiveBalance: balance,
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
WithdrawableEpoch: withdrawableEpoch,
|
|
Slashed: slashed,
|
|
ExitEpoch: exitEpoch,
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0},
|
|
},
|
|
}
|
|
res, err := bs.GetValidatorActiveSetChanges(ctx, ðpb.GetValidatorActiveSetChangesRequest{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wantedActive := [][]byte{
|
|
pubKey(0),
|
|
pubKey(2),
|
|
pubKey(4),
|
|
pubKey(6),
|
|
}
|
|
wantedActiveIndices := []uint64{0, 2, 4, 6}
|
|
wantedExited := [][]byte{
|
|
pubKey(5),
|
|
}
|
|
wantedExitedIndices := []uint64{5}
|
|
wantedSlashed := [][]byte{
|
|
pubKey(3),
|
|
}
|
|
wantedSlashedIndices := []uint64{3}
|
|
wantedEjected := [][]byte{
|
|
pubKey(7),
|
|
}
|
|
wantedEjectedIndices := []uint64{7}
|
|
wanted := ðpb.ActiveSetChanges{
|
|
Epoch: 0,
|
|
ActivatedPublicKeys: wantedActive,
|
|
ActivatedIndices: wantedActiveIndices,
|
|
ExitedPublicKeys: wantedExited,
|
|
ExitedIndices: wantedExitedIndices,
|
|
SlashedPublicKeys: wantedSlashed,
|
|
SlashedIndices: wantedSlashedIndices,
|
|
EjectedPublicKeys: wantedEjected,
|
|
EjectedIndices: wantedEjectedIndices,
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
validators := make([]*ethpb.Validator, 8)
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(helpers.StartSlot(100)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
activatedIndices := make([]uint64, 0)
|
|
exitedIndices := make([]uint64, 0)
|
|
slashedIndices := make([]uint64, 0)
|
|
ejectedIndices := make([]uint64, 0)
|
|
for i := 0; i < len(validators); i++ {
|
|
// Mark indices divisible by two as activated.
|
|
if i%2 == 0 {
|
|
activatedIndices = append(activatedIndices, uint64(i))
|
|
} else if i%3 == 0 {
|
|
// Mark indices divisible by 3 as slashed.
|
|
slashedIndices = append(slashedIndices, uint64(i))
|
|
} else if i%5 == 0 {
|
|
// Mark indices divisible by 5 as exited.
|
|
exitedIndices = append(exitedIndices, uint64(i))
|
|
} else if i%7 == 0 {
|
|
// Mark indices divisible by 7 as ejected.
|
|
ejectedIndices = append(ejectedIndices, uint64(i))
|
|
}
|
|
key := make([]byte, 48)
|
|
copy(key, strconv.Itoa(i))
|
|
if err := headState.UpdateValidatorAtIndex(uint64(i), ðpb.Validator{
|
|
PublicKey: key,
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
archivedChanges := &pbp2p.ArchivedActiveSetChanges{
|
|
Activated: activatedIndices,
|
|
Exited: exitedIndices,
|
|
Slashed: slashedIndices,
|
|
Ejected: ejectedIndices,
|
|
}
|
|
// We store the changes during the genesis epoch.
|
|
if err := db.SaveArchivedActiveValidatorChanges(ctx, 0, archivedChanges); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// We store the same changes during epoch 5 for further testing.
|
|
if err := db.SaveArchivedActiveValidatorChanges(ctx, 5, archivedChanges); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
res, err := bs.GetValidatorActiveSetChanges(ctx, ðpb.GetValidatorActiveSetChangesRequest{
|
|
QueryFilter: ðpb.GetValidatorActiveSetChangesRequest_Genesis{Genesis: true},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wantedKeys := make([][]byte, 8)
|
|
for i := 0; i < len(wantedKeys); i++ {
|
|
k := make([]byte, 48)
|
|
copy(k, strconv.Itoa(i))
|
|
wantedKeys[i] = k
|
|
}
|
|
wantedActive := [][]byte{
|
|
wantedKeys[0],
|
|
wantedKeys[2],
|
|
wantedKeys[4],
|
|
wantedKeys[6],
|
|
}
|
|
wantedActiveIndices := []uint64{0, 2, 4, 6}
|
|
wantedExited := [][]byte{
|
|
wantedKeys[5],
|
|
}
|
|
wantedExitedIndices := []uint64{5}
|
|
wantedSlashed := [][]byte{
|
|
wantedKeys[3],
|
|
}
|
|
wantedSlashedIndices := []uint64{3}
|
|
wantedEjected := [][]byte{
|
|
wantedKeys[7],
|
|
}
|
|
wantedEjectedIndices := []uint64{7}
|
|
wanted := ðpb.ActiveSetChanges{
|
|
Epoch: 0,
|
|
ActivatedPublicKeys: wantedActive,
|
|
ActivatedIndices: wantedActiveIndices,
|
|
ExitedPublicKeys: wantedExited,
|
|
ExitedIndices: wantedExitedIndices,
|
|
SlashedPublicKeys: wantedSlashed,
|
|
SlashedIndices: wantedSlashedIndices,
|
|
EjectedPublicKeys: wantedEjected,
|
|
EjectedIndices: wantedEjectedIndices,
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
|
|
}
|
|
res, err = bs.GetValidatorActiveSetChanges(ctx, ðpb.GetValidatorActiveSetChangesRequest{
|
|
QueryFilter: ðpb.GetValidatorActiveSetChangesRequest_Epoch{Epoch: 5},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wanted.Epoch = 5
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorQueue_PendingActivation(t *testing.T) {
|
|
headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
|
|
Validators: []*ethpb.Validator{
|
|
{
|
|
ActivationEpoch: helpers.ActivationExitEpoch(0),
|
|
ActivationEligibilityEpoch: 3,
|
|
PublicKey: pubKey(3),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
{
|
|
ActivationEpoch: helpers.ActivationExitEpoch(0),
|
|
ActivationEligibilityEpoch: 2,
|
|
PublicKey: pubKey(2),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
{
|
|
ActivationEpoch: helpers.ActivationExitEpoch(0),
|
|
ActivationEligibilityEpoch: 1,
|
|
PublicKey: pubKey(1),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
},
|
|
FinalizedCheckpoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
res, err := bs.GetValidatorQueue(context.Background(), &ptypes.Empty{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// We verify the keys are properly sorted by the validators' activation eligibility epoch.
|
|
wanted := [][]byte{
|
|
pubKey(1),
|
|
pubKey(2),
|
|
pubKey(3),
|
|
}
|
|
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.CurrentEpoch(headState))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wantChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if res.ChurnLimit != wantChurn {
|
|
t.Errorf("Wanted churn %d, received %d", wantChurn, res.ChurnLimit)
|
|
}
|
|
if !reflect.DeepEqual(res.ActivationPublicKeys, wanted) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res.ActivationPublicKeys)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorQueue_ExitedValidatorLeavesQueue(t *testing.T) {
|
|
validators := []*ethpb.Validator{
|
|
{
|
|
ActivationEpoch: 0,
|
|
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
PublicKey: []byte("1"),
|
|
},
|
|
{
|
|
ActivationEpoch: 0,
|
|
ExitEpoch: 4,
|
|
WithdrawableEpoch: 6,
|
|
PublicKey: []byte("2"),
|
|
},
|
|
}
|
|
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 0}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
|
|
// First we check if validator with index 1 is in the exit queue.
|
|
res, err := bs.GetValidatorQueue(context.Background(), &ptypes.Empty{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wanted := [][]byte{
|
|
[]byte("2"),
|
|
}
|
|
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.CurrentEpoch(headState))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wantChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if res.ChurnLimit != wantChurn {
|
|
t.Errorf("Wanted churn %d, received %d", wantChurn, res.ChurnLimit)
|
|
}
|
|
if !reflect.DeepEqual(res.ExitPublicKeys, wanted) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res.ExitPublicKeys)
|
|
}
|
|
|
|
// Now, we move the state.slot past the exit epoch of the validator, and now
|
|
// the validator should no longer exist in the queue.
|
|
if err := headState.SetSlot(helpers.StartSlot(validators[1].ExitEpoch + 1)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
res, err = bs.GetValidatorQueue(context.Background(), &ptypes.Empty{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(res.ExitPublicKeys) != 0 {
|
|
t.Errorf("Wanted empty exit queue, received %v", res.ExitPublicKeys)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorQueue_PendingExit(t *testing.T) {
|
|
headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
|
|
Validators: []*ethpb.Validator{
|
|
{
|
|
ActivationEpoch: 0,
|
|
ExitEpoch: 4,
|
|
WithdrawableEpoch: 3,
|
|
PublicKey: pubKey(3),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
{
|
|
ActivationEpoch: 0,
|
|
ExitEpoch: 4,
|
|
WithdrawableEpoch: 2,
|
|
PublicKey: pubKey(2),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
{
|
|
ActivationEpoch: 0,
|
|
ExitEpoch: 4,
|
|
WithdrawableEpoch: 1,
|
|
PublicKey: pubKey(1),
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
},
|
|
},
|
|
FinalizedCheckpoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
res, err := bs.GetValidatorQueue(context.Background(), &ptypes.Empty{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// We verify the keys are properly sorted by the validators' withdrawable epoch.
|
|
wanted := [][]byte{
|
|
pubKey(1),
|
|
pubKey(2),
|
|
pubKey(3),
|
|
}
|
|
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.CurrentEpoch(headState))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wantChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if res.ChurnLimit != wantChurn {
|
|
t.Errorf("Wanted churn %d, received %d", wantChurn, res.ChurnLimit)
|
|
}
|
|
if !reflect.DeepEqual(res.ExitPublicKeys, wanted) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res.ExitPublicKeys)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorParticipation_CannotRequestCurrentEpoch(t *testing.T) {
|
|
fc := featureconfig.Get()
|
|
fc.DisableNewStateMgmt = true
|
|
featureconfig.Init(fc)
|
|
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(helpers.StartSlot(2)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
|
|
wanted := "Cannot retrieve information about an epoch currently in progress"
|
|
if _, err := bs.GetValidatorParticipation(
|
|
ctx,
|
|
ðpb.GetValidatorParticipationRequest{
|
|
QueryFilter: ðpb.GetValidatorParticipationRequest_Epoch{
|
|
Epoch: 2,
|
|
},
|
|
},
|
|
); err != nil && !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorParticipation_CannotRequestFutureEpoch(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
GenesisTimeFetcher: &mock.ChainService{},
|
|
}
|
|
|
|
wanted := "Cannot retrieve information about an epoch in the future"
|
|
if _, err := bs.GetValidatorParticipation(
|
|
ctx,
|
|
ðpb.GetValidatorParticipationRequest{
|
|
QueryFilter: ðpb.GetValidatorParticipationRequest_Epoch{
|
|
Epoch: helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
|
|
},
|
|
},
|
|
); err != nil && !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorParticipation_FromArchive(t *testing.T) {
|
|
fc := featureconfig.Get()
|
|
fc.DisableNewStateMgmt = true
|
|
featureconfig.Init(fc)
|
|
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
epoch := uint64(4)
|
|
part := ðpb.ValidatorParticipation{
|
|
GlobalParticipationRate: 1.0,
|
|
VotedEther: 20,
|
|
EligibleEther: 20,
|
|
}
|
|
if err := db.SaveArchivedValidatorParticipation(ctx, epoch-2, part); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(helpers.StartSlot(epoch + 1)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: epoch + 1}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
if _, err := bs.GetValidatorParticipation(ctx, ðpb.GetValidatorParticipationRequest{
|
|
QueryFilter: ðpb.GetValidatorParticipationRequest_Epoch{
|
|
Epoch: epoch + 2,
|
|
},
|
|
}); err == nil {
|
|
t.Error("Expected error when requesting future epoch, received nil")
|
|
}
|
|
// We request data from epoch 0, which we didn't archive, so we should expect an error.
|
|
if _, err := bs.GetValidatorParticipation(ctx, ðpb.GetValidatorParticipationRequest{
|
|
QueryFilter: ðpb.GetValidatorParticipationRequest_Genesis{
|
|
Genesis: true,
|
|
},
|
|
}); err == nil {
|
|
t.Error("Expected error when data from archive is not found, received nil")
|
|
}
|
|
|
|
want := ðpb.ValidatorParticipationResponse{
|
|
Epoch: epoch - 2,
|
|
Finalized: true,
|
|
Participation: part,
|
|
}
|
|
res, err := bs.GetValidatorParticipation(ctx, ðpb.GetValidatorParticipationRequest{
|
|
QueryFilter: ðpb.GetValidatorParticipationRequest_Epoch{
|
|
Epoch: epoch - 2,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(want, res) {
|
|
t.Errorf("Wanted %v, received %v", want, res)
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorParticipation_PrevEpoch(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
validatorCount := uint64(100)
|
|
|
|
validators := make([]*ethpb.Validator, validatorCount)
|
|
balances := make([]uint64, validatorCount)
|
|
for i := 0; i < len(validators); i++ {
|
|
validators[i] = ðpb.Validator{
|
|
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
|
}
|
|
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
|
}
|
|
|
|
atts := []*pbp2p.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}}
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(params.BeaconConfig().SlotsPerEpoch); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetBalances(balances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetPreviousEpochAttestations(atts); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: params.BeaconConfig().SlotsPerEpoch}}
|
|
if err := db.SaveBlock(ctx, b); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bRoot, err := ssz.HashTreeRoot(b.Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveState(ctx, headState, bRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
m := &mock.ChainService{State: headState}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: m,
|
|
ParticipationFetcher: m,
|
|
GenesisTimeFetcher: &mock.ChainService{},
|
|
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
|
}
|
|
|
|
res, err := bs.GetValidatorParticipation(ctx, ðpb.GetValidatorParticipationRequest{QueryFilter: ðpb.GetValidatorParticipationRequest_Epoch{Epoch: 0}})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
wanted := ðpb.ValidatorParticipation{EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance}
|
|
if !reflect.DeepEqual(res.Participation, wanted) {
|
|
t.Error("Incorrect validator participation respond")
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorParticipation_DoesntExist(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(params.BeaconConfig().SlotsPerEpoch); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: params.BeaconConfig().SlotsPerEpoch}}
|
|
if err := db.SaveBlock(ctx, b); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
bRoot, err := ssz.HashTreeRoot(b.Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveState(ctx, headState, bRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
m := &mock.ChainService{State: headState}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: m,
|
|
ParticipationFetcher: m,
|
|
GenesisTimeFetcher: &mock.ChainService{},
|
|
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
|
}
|
|
|
|
res, err := bs.GetValidatorParticipation(ctx, ðpb.GetValidatorParticipationRequest{QueryFilter: ðpb.GetValidatorParticipationRequest_Epoch{Epoch: 0}})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if res.Participation.VotedEther != 0 || res.Participation.EligibleEther != 0 {
|
|
t.Error("Incorrect validator participation response")
|
|
}
|
|
}
|
|
|
|
func TestServer_GetValidatorParticipation_FromArchive_FinalizedEpoch(t *testing.T) {
|
|
fc := featureconfig.Get()
|
|
fc.DisableNewStateMgmt = true
|
|
featureconfig.Init(fc)
|
|
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
part := ðpb.ValidatorParticipation{
|
|
GlobalParticipationRate: 1.0,
|
|
VotedEther: 20,
|
|
EligibleEther: 20,
|
|
}
|
|
epoch := uint64(1)
|
|
// We archive data for epoch 1.
|
|
if err := db.SaveArchivedValidatorParticipation(ctx, epoch, part); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(helpers.StartSlot(epoch + 10)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := headState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: epoch + 5}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
// 10 epochs into the future.
|
|
State: headState,
|
|
},
|
|
}
|
|
want := ðpb.ValidatorParticipationResponse{
|
|
Epoch: epoch,
|
|
Finalized: true,
|
|
Participation: part,
|
|
}
|
|
// We request epoch 1.
|
|
res, err := bs.GetValidatorParticipation(ctx, ðpb.GetValidatorParticipationRequest{
|
|
QueryFilter: ðpb.GetValidatorParticipationRequest_Epoch{
|
|
Epoch: epoch,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(want, res) {
|
|
t.Errorf("Wanted %v, received %v", want, res)
|
|
}
|
|
}
|
|
|
|
func BenchmarkListValidatorBalances(b *testing.B) {
|
|
b.StopTimer()
|
|
db := dbTest.SetupDB(b)
|
|
defer dbTest.TeardownDB(b, db)
|
|
|
|
ctx := context.Background()
|
|
count := 1000
|
|
setupValidators(b, db, count)
|
|
|
|
headState, err := db.HeadState(ctx)
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
|
|
req := ðpb.ListValidatorBalancesRequest{PageSize: 100}
|
|
b.StartTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
if _, err := bs.ListValidatorBalances(ctx, req); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func BenchmarkListValidatorBalances_FromArchive(b *testing.B) {
|
|
b.StopTimer()
|
|
db := dbTest.SetupDB(b)
|
|
defer dbTest.TeardownDB(b, db)
|
|
|
|
ctx := context.Background()
|
|
currentNumValidators := 1000
|
|
numOldBalances := 50
|
|
validators := make([]*ethpb.Validator, currentNumValidators)
|
|
oldBalances := make([]uint64, numOldBalances)
|
|
for i := 0; i < currentNumValidators; i++ {
|
|
validators[i] = ðpb.Validator{
|
|
PublicKey: []byte(strconv.Itoa(i)),
|
|
}
|
|
}
|
|
for i := 0; i < numOldBalances; i++ {
|
|
oldBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
|
}
|
|
// We archive old balances for epoch 50.
|
|
if err := db.SaveArchivedBalances(ctx, 50, oldBalances); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
headState := testutil.NewBeaconState()
|
|
if err := headState.SetSlot(helpers.StartSlot(100 /* epoch 100 */)); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
if err := headState.SetValidators(validators); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{
|
|
State: headState,
|
|
},
|
|
}
|
|
|
|
b.StartTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
if _, err := bs.ListValidatorBalances(
|
|
ctx,
|
|
ðpb.ListValidatorBalancesRequest{
|
|
QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{
|
|
Epoch: 50,
|
|
},
|
|
PageSize: 100,
|
|
},
|
|
); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func setupValidators(t testing.TB, db db.Database, count int) ([]*ethpb.Validator, []uint64) {
|
|
ctx := context.Background()
|
|
balances := make([]uint64, count)
|
|
validators := make([]*ethpb.Validator, 0, count)
|
|
for i := 0; i < count; i++ {
|
|
pubKey := pubKey(uint64(i))
|
|
balances[i] = uint64(i)
|
|
validators = append(validators, ðpb.Validator{
|
|
PublicKey: pubKey,
|
|
WithdrawalCredentials: make([]byte, 32),
|
|
})
|
|
}
|
|
blk := ðpb.BeaconBlock{
|
|
Slot: 0,
|
|
}
|
|
blockRoot, err := ssz.HashTreeRoot(blk)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
s := testutil.NewBeaconState()
|
|
if err := s.SetValidators(validators); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := s.SetBalances(balances); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveState(
|
|
context.Background(),
|
|
s,
|
|
blockRoot,
|
|
); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
return validators, balances
|
|
}
|