mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2025-01-13 21:48:19 +00:00
7ebb3c1784
* fix issue with rate limiting * force fetcher to wait for minimum peers * adds backoff interval * cap the max blocks requested from a peer * queue rewritten * adds docs to fsm * fix visibility * updates fsm * fsm tests added * optimizes queue resource allocations * removes debug log * replace auto-fixed comment * fixes typo * better handling of evil peers * fixes test * minor fixes to fsm * better interface for findEpochState func
370 lines
9.4 KiB
Go
370 lines
9.4 KiB
Go
package initialsync
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"testing"
|
|
|
|
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/go-ssz"
|
|
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
|
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
|
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
|
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
|
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
|
)
|
|
|
|
type blocksProviderMock struct {
|
|
}
|
|
|
|
func (f *blocksProviderMock) start() error {
|
|
return nil
|
|
}
|
|
|
|
func (f *blocksProviderMock) stop() {
|
|
}
|
|
|
|
func (f *blocksProviderMock) scheduleRequest(ctx context.Context, start, count uint64) error {
|
|
return nil
|
|
}
|
|
|
|
func (f *blocksProviderMock) requestResponses() <-chan *fetchRequestResponse {
|
|
return nil
|
|
}
|
|
|
|
func TestBlocksQueueInitStartStop(t *testing.T) {
|
|
mc, p2p, beaconDB := initializeTestServices(t, []uint64{}, []*peerData{})
|
|
defer dbtest.TeardownDB(t, beaconDB)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
|
headFetcher: mc,
|
|
p2p: p2p,
|
|
})
|
|
|
|
t.Run("stop without start", func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
headFetcher: mc,
|
|
highestExpectedSlot: blockBatchSize,
|
|
})
|
|
|
|
if err := queue.stop(); err == nil {
|
|
t.Errorf("expected error: %v", errQueueTakesTooLongToStop)
|
|
}
|
|
})
|
|
|
|
t.Run("use default fetcher", func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
headFetcher: mc,
|
|
highestExpectedSlot: blockBatchSize,
|
|
})
|
|
if err := queue.start(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
})
|
|
|
|
t.Run("stop timeout", func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
headFetcher: mc,
|
|
highestExpectedSlot: blockBatchSize,
|
|
})
|
|
if err := queue.start(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err := queue.stop(); err == nil {
|
|
t.Errorf("expected error: %v", errQueueTakesTooLongToStop)
|
|
}
|
|
})
|
|
|
|
t.Run("check for leaked goroutines", func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
blocksFetcher: fetcher,
|
|
headFetcher: mc,
|
|
highestExpectedSlot: blockBatchSize,
|
|
})
|
|
|
|
if err := queue.start(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
// Blocks up until all resources are reclaimed (or timeout is called)
|
|
if err := queue.stop(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
select {
|
|
case <-queue.fetchedBlocks:
|
|
default:
|
|
t.Error("queue.fetchedBlocks channel is leaked")
|
|
}
|
|
select {
|
|
case <-fetcher.fetchResponses:
|
|
default:
|
|
t.Error("fetcher.fetchResponses channel is leaked")
|
|
}
|
|
})
|
|
|
|
t.Run("re-starting of stopped queue", func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
blocksFetcher: fetcher,
|
|
headFetcher: mc,
|
|
highestExpectedSlot: blockBatchSize,
|
|
})
|
|
if err := queue.start(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err := queue.stop(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err := queue.start(); err == nil {
|
|
t.Errorf("expected error not returned: %v", errQueueCtxIsDone)
|
|
}
|
|
})
|
|
|
|
t.Run("multiple stopping attempts", func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
blocksFetcher: fetcher,
|
|
headFetcher: mc,
|
|
highestExpectedSlot: blockBatchSize,
|
|
})
|
|
if err := queue.start(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if err := queue.stop(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err := queue.stop(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
})
|
|
|
|
t.Run("cancellation", func(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
blocksFetcher: fetcher,
|
|
headFetcher: mc,
|
|
highestExpectedSlot: blockBatchSize,
|
|
})
|
|
if err := queue.start(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
cancel()
|
|
if err := queue.stop(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
})
|
|
}
|
|
|
|
func TestBlocksQueueLoop(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
highestExpectedSlot uint64
|
|
expectedBlockSlots []uint64
|
|
peers []*peerData
|
|
}{
|
|
{
|
|
name: "Single peer with all blocks",
|
|
highestExpectedSlot: 251,
|
|
expectedBlockSlots: makeSequence(1, 251),
|
|
peers: []*peerData{
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "Multiple peers with all blocks",
|
|
highestExpectedSlot: 251,
|
|
expectedBlockSlots: makeSequence(1, 251),
|
|
peers: []*peerData{
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "Multiple peers with skipped slots",
|
|
highestExpectedSlot: 576,
|
|
expectedBlockSlots: append(makeSequence(1, 64), makeSequence(500, 576)...), // up to 18th epoch
|
|
peers: []*peerData{
|
|
{
|
|
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
|
|
finalizedEpoch: 18,
|
|
headSlot: 640,
|
|
},
|
|
{
|
|
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
|
|
finalizedEpoch: 18,
|
|
headSlot: 640,
|
|
},
|
|
{
|
|
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
|
|
finalizedEpoch: 18,
|
|
headSlot: 640,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "Multiple peers with failures",
|
|
highestExpectedSlot: 128,
|
|
expectedBlockSlots: makeSequence(1, 128),
|
|
peers: []*peerData{
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
failureSlots: makeSequence(32*3+1, 32*3+32),
|
|
},
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
failureSlots: makeSequence(1, 32*3),
|
|
},
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
{
|
|
blocks: makeSequence(1, 320),
|
|
finalizedEpoch: 8,
|
|
headSlot: 320,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
mc, p2p, beaconDB := initializeTestServices(t, tt.expectedBlockSlots, tt.peers)
|
|
defer dbtest.TeardownDB(t, beaconDB)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
|
headFetcher: mc,
|
|
p2p: p2p,
|
|
})
|
|
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
|
blocksFetcher: fetcher,
|
|
headFetcher: mc,
|
|
highestExpectedSlot: tt.highestExpectedSlot,
|
|
})
|
|
if err := queue.start(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
processBlock := func(block *eth.SignedBeaconBlock) error {
|
|
if !beaconDB.HasBlock(ctx, bytesutil.ToBytes32(block.Block.ParentRoot)) {
|
|
return fmt.Errorf("beacon node doesn't have a block in db with root %#x", block.Block.ParentRoot)
|
|
}
|
|
if featureconfig.Get().InitSyncNoVerify {
|
|
if err := mc.ReceiveBlockNoVerify(ctx, block); err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
if err := mc.ReceiveBlockNoPubsubForkchoice(ctx, block); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
var blocks []*eth.SignedBeaconBlock
|
|
for block := range queue.fetchedBlocks {
|
|
if err := processBlock(block); err != nil {
|
|
continue
|
|
}
|
|
blocks = append(blocks, block)
|
|
}
|
|
|
|
if err := queue.stop(); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if queue.headFetcher.HeadSlot() < uint64(len(tt.expectedBlockSlots)) {
|
|
t.Errorf("Not enough slots synced, want: %v, got: %v",
|
|
len(tt.expectedBlockSlots), queue.headFetcher.HeadSlot())
|
|
}
|
|
if len(blocks) != len(tt.expectedBlockSlots) {
|
|
t.Errorf("Processes wrong number of blocks. Wanted %d got %d", len(tt.expectedBlockSlots), len(blocks))
|
|
}
|
|
var receivedBlockSlots []uint64
|
|
for _, blk := range blocks {
|
|
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
|
|
}
|
|
if missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots); len(missing) > 0 {
|
|
t.Errorf("Missing blocks at slots %v", missing)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func setBlocksFromCache(ctx context.Context, t *testing.T, mc *mock.ChainService, highestSlot uint64) {
|
|
cache.RLock()
|
|
parentRoot := cache.rootCache[0]
|
|
cache.RUnlock()
|
|
for slot := uint64(0); slot <= highestSlot; slot++ {
|
|
blk := ð.SignedBeaconBlock{
|
|
Block: ð.BeaconBlock{
|
|
Slot: slot,
|
|
ParentRoot: parentRoot[:],
|
|
},
|
|
}
|
|
mc.BlockNotifier().BlockFeed().Send(&feed.Event{
|
|
Type: blockfeed.ReceivedBlock,
|
|
Data: blockfeed.ReceivedBlockData{
|
|
SignedBlock: blk,
|
|
},
|
|
})
|
|
|
|
if err := mc.ReceiveBlockNoPubsubForkchoice(ctx, blk); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
currRoot, _ := ssz.HashTreeRoot(blk.Block)
|
|
parentRoot = currRoot
|
|
}
|
|
}
|