mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 21:07:18 +00:00
78a25f99c3
* Update fastssz * Merge branch 'master' of github.com:prysmaticlabs/prysm into update-fssz * fmt * gaz * Merge refs/heads/master into update-fssz * goimports * Merge refs/heads/master into update-fssz * Merge refs/heads/master into update-fssz * Merge refs/heads/master into update-fssz * Merge refs/heads/master into update-fssz * Merge refs/heads/master into update-fssz * Fix * fix ethereumapis * fix again * kafka * fix gen file * fix compute signing root * gofmt * checkpoint progress * progress * checkpoint * progress * Fix build * checkpoint * helpers * Another test fixed * gaz * another test fix * gofmt * some fixes * Merge branch 'master' of github.com:prysmaticlabs/prysm into update-fssz * fix one test * Merge branch 'master' of github.com:prysmaticlabs/prysm into update-fssz * fill empty checkpoint roots * more padding * more padding * Fix //beacon-chain/rpc/debug:go_default_test * fix //beacon-chain/core/state:go_default_test * fix //beacon-chain/core/state:go_default_test * fix some htr errors * fix //slasher/rpc:go_default_test * Progress on //beacon-chain/core/blocks:go_default_test * Progress on //beacon-chain/core/blocks:go_default_test * Progress on //beacon-chain/core/blocks:go_default_test * fix //slasher/db/kv:go_default_test * progress * fix //beacon-chain/sync/initial-sync:go_raceon_test * gofmt and gaz * fix one more test, taking a break * Fix //beacon-chain/core/blocks:go_default_test * Complete beacon-chain/powchain * Do most of beacon-chain/rpc/beacon/ * Do most of beacon-chain/blockchain * fix //beacon-chain/operations/attestations/kv:go_default_test * Fix //beacon-chain/cache/depositcache:go_default_test * Fix //slasher/detection:go_default_test * Progress * fix //beacon-chain/rpc/validator:go_default_test * gofmt * fix //validator/client:go_default_test * fix * fix //beacon-chain/blockchain:go_raceoff_test * fix //beacon-chain/rpc/beacon:go_default_test * fix 1 of 4 shards in //beacon-chain/sync:go_default_test * Fix //beacon-chain/sync:go_default_test and gofmt * prevent panic * fix //beacon-chain/state/stategen:go_default_test * fix * Merge branch 'master' of github.com:prysmaticlabs/prysm into update-fssz * fix most tests * Self review, go mod tidy, run regen scripts * fix slasher * Update ethereumapis * disable spawn strategy override * Merge refs/heads/master into update-fssz * Merge refs/heads/master into update-fssz * Remove extra line in imports * Remove extra line in imports * Gofmt * PR feedback from @nisdas
340 lines
12 KiB
Go
340 lines
12 KiB
Go
package sync
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/kevinms/leakybucket-go"
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
"github.com/libp2p/go-libp2p-core/protocol"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
|
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
|
)
|
|
|
|
func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
d, _ := db.SetupDB(t)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
StartSlot: 100,
|
|
Step: 64,
|
|
Count: 16,
|
|
}
|
|
|
|
// Populate the database with blocks that would match the request.
|
|
for i := req.StartSlot; i < req.StartSlot+(req.Step*req.Count); i += req.Step {
|
|
blk := testutil.NewBeaconBlock()
|
|
blk.Block.Slot = i
|
|
require.NoError(t, d.SaveBlock(context.Background(), blk))
|
|
}
|
|
|
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
|
r := &Service{p2p: p1, db: d, chain: &chainMock.ChainService{}, rateLimiter: newRateLimiter(p1)}
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
|
expectSuccess(t, r, stream)
|
|
res := testutil.NewBeaconBlock()
|
|
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
|
if (res.Block.Slot-req.StartSlot)%req.Step != 0 {
|
|
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
|
}
|
|
}
|
|
})
|
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
|
|
err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)
|
|
require.NoError(t, err)
|
|
|
|
// Make sure that rate limiter doesn't limit capacity exceedingly.
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
|
expectedCapacity := int64(req.Count*10 - req.Count)
|
|
require.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
d, _ := db.SetupDB(t)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
StartSlot: 200,
|
|
Step: 21,
|
|
Count: 33,
|
|
}
|
|
|
|
endSlot := req.StartSlot + (req.Step * (req.Count - 1))
|
|
expectedRoots := make([][32]byte, req.Count)
|
|
// Populate the database with blocks that would match the request.
|
|
for i, j := endSlot, req.Count-1; i >= req.StartSlot; i -= req.Step {
|
|
blk := testutil.NewBeaconBlock()
|
|
blk.Block.Slot = i
|
|
rt, err := stateutil.BlockRoot(blk.Block)
|
|
require.NoError(t, err)
|
|
expectedRoots[j] = rt
|
|
require.NoError(t, d.SaveBlock(context.Background(), blk))
|
|
j--
|
|
}
|
|
|
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
|
r := &Service{p2p: p1, db: d, rateLimiter: newRateLimiter(p1),
|
|
chain: &chainMock.ChainService{}}
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), false)
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
prevSlot := uint64(0)
|
|
require.Equal(t, uint64(len(expectedRoots)), req.Count, "Number of roots not expected")
|
|
for i, j := req.StartSlot, 0; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
|
expectSuccess(t, r, stream)
|
|
res := ðpb.SignedBeaconBlock{}
|
|
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
|
if res.Block.Slot < prevSlot {
|
|
t.Errorf("Received block is unsorted with slot %d lower than previous slot %d", res.Block.Slot, prevSlot)
|
|
}
|
|
rt, err := stateutil.BlockRoot(res.Block)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, expectedRoots[j], rt, "roots not equal")
|
|
prevSlot = res.Block.Slot
|
|
j++
|
|
}
|
|
})
|
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
d, _ := db.SetupDB(t)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
StartSlot: 0,
|
|
Step: 1,
|
|
Count: 4,
|
|
}
|
|
|
|
// Populate the database with blocks that would match the request.
|
|
for i := req.StartSlot; i < req.StartSlot+(req.Step*req.Count); i++ {
|
|
blk := testutil.NewBeaconBlock()
|
|
blk.Block.Slot = i
|
|
|
|
// Save genesis block
|
|
if i == 0 {
|
|
rt, err := stateutil.BlockRoot(blk.Block)
|
|
require.NoError(t, err)
|
|
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), rt))
|
|
}
|
|
require.NoError(t, d.SaveBlock(context.Background(), blk))
|
|
}
|
|
|
|
r := &Service{p2p: p1, db: d, chain: &chainMock.ChainService{}, rateLimiter: newRateLimiter(p1)}
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, false)
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
// check for genesis block
|
|
expectSuccess(t, r, stream)
|
|
res := ðpb.SignedBeaconBlock{}
|
|
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
|
assert.Equal(t, uint64(0), res.Block.Slot, "genesis block was not returned")
|
|
for i := req.StartSlot + req.Step; i < req.Count*req.Step; i += req.Step {
|
|
expectSuccess(t, r, stream)
|
|
res := ðpb.SignedBeaconBlock{}
|
|
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
|
}
|
|
})
|
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
|
d, _ := db.SetupDB(t)
|
|
hook := logTest.NewGlobal()
|
|
saveBlocks := func(req *pb.BeaconBlocksByRangeRequest) {
|
|
// Populate the database with blocks that would match the request.
|
|
for i := req.StartSlot; i < req.StartSlot+(req.Step*req.Count); i += req.Step {
|
|
block := testutil.NewBeaconBlock()
|
|
block.Block.Slot = i
|
|
require.NoError(t, d.SaveBlock(context.Background(), block))
|
|
}
|
|
}
|
|
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
|
|
req *pb.BeaconBlocksByRangeRequest, validateBlocks bool) error {
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
pcl := protocol.ID("/testing")
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
if !validateBlocks {
|
|
return
|
|
}
|
|
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
|
expectSuccess(t, r, stream)
|
|
res := testutil.NewBeaconBlock()
|
|
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
|
if (res.Block.Slot-req.StartSlot)%req.Step != 0 {
|
|
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
|
}
|
|
}
|
|
})
|
|
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
if err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
|
|
return err
|
|
}
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
t.Run("high request count param and no overflow", func(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
|
r := &Service{p2p: p1, db: d, chain: &chainMock.ChainService{}, rateLimiter: newRateLimiter(p1)}
|
|
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
StartSlot: 100,
|
|
Step: 5,
|
|
Count: uint64(capacity),
|
|
}
|
|
saveBlocks(req)
|
|
|
|
hook.Reset()
|
|
assert.NoError(t, sendRequest(p1, p2, r, req, true))
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
|
expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
|
|
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
|
})
|
|
|
|
t.Run("high request count param and overflow", func(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
|
r := &Service{p2p: p1, db: d, chain: &chainMock.ChainService{}, rateLimiter: newRateLimiter(p1)}
|
|
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
StartSlot: 100,
|
|
Step: 5,
|
|
Count: uint64(capacity + 1),
|
|
}
|
|
saveBlocks(req)
|
|
|
|
hook.Reset()
|
|
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
|
err := sendRequest(p1, p2, r, req, false)
|
|
assert.ErrorContains(t, rateLimitedError, err)
|
|
}
|
|
// Make sure that we were blocked indeed.
|
|
require.LogsContain(t, hook, "Disconnecting bad peer")
|
|
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
|
expectedCapacity := int64(0) // Whole capacity is used.
|
|
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
|
})
|
|
|
|
t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
|
|
r := &Service{p2p: p1, db: d, chain: &chainMock.ChainService{}, rateLimiter: newRateLimiter(p1)}
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, false)
|
|
|
|
req := &pb.BeaconBlocksByRangeRequest{
|
|
StartSlot: 100,
|
|
Step: 1,
|
|
Count: uint64(flags.Get().BlockBatchLimit),
|
|
}
|
|
saveBlocks(req)
|
|
|
|
hook.Reset()
|
|
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
|
|
assert.NoError(t, sendRequest(p1, p2, r, req, true))
|
|
}
|
|
require.LogsDoNotContain(t, hook, "Disconnecting bad peer")
|
|
|
|
// One more request should result in overflow.
|
|
hook.Reset()
|
|
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
|
err := sendRequest(p1, p2, r, req, false)
|
|
assert.ErrorContains(t, rateLimitedError, err)
|
|
}
|
|
require.LogsContain(t, hook, "Disconnecting bad peer")
|
|
|
|
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
|
expectedCapacity := int64(0) // Whole capacity is used.
|
|
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
|
})
|
|
}
|