mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-23 11:57:18 +00:00
879e310332
* panic in SizeSSZ * moving slowly * adapt old code to new interfaces * return interfaces from factory functions * replace the rest of WrappedSignedBeaconBlock * WrappedBeaconBlock * WrappedBeaconBlockBody * miscellaneous * Test_BeaconBlockIsNil * replace usages of BeaconBlockIsNil * replace usages of mutator * fix all build errors * fix some more issues * mutator changes * relax assertions when initializing * revert changes in object_mapping.go * allow calling Proto on nil * Revert "allow calling Proto on nil" This reverts commit ecc84e455381b03d24aec2fa0fa17bddbec71705. * modify Copy and Proto methods * remove unused var * fix block batch tests * correct BUILD file * Error when initializing nil objects * one more error fix * add missing comma * rename alias to blocktest * add logging * error when SignedBeaconBlock is nil * fix last test * import fix * broken * working * test fixes * reduce complexity of processPendingBlocks * simplified
287 lines
10 KiB
Go
287 lines
10 KiB
Go
package sync
|
|
|
|
import (
|
|
"context"
|
|
"math/big"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
|
"github.com/kevinms/leakybucket-go"
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
"github.com/libp2p/go-libp2p-core/protocol"
|
|
gcache "github.com/patrickmn/go-cache"
|
|
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
|
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
mockExecution "github.com/prysmaticlabs/prysm/beacon-chain/execution/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
|
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
|
p2pTypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
|
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
|
"github.com/prysmaticlabs/prysm/config/params"
|
|
"github.com/prysmaticlabs/prysm/consensus-types/blocks"
|
|
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
|
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
|
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
|
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/testing/assert"
|
|
"github.com/prysmaticlabs/prysm/testing/require"
|
|
"github.com/prysmaticlabs/prysm/testing/util"
|
|
)
|
|
|
|
func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
d := db.SetupDB(t)
|
|
|
|
var blkRoots p2pTypes.BeaconBlockByRootsReq
|
|
// Populate the database with blocks that would match the request.
|
|
for i := types.Slot(1); i < 11; i++ {
|
|
blk := util.NewBeaconBlock()
|
|
blk.Block.Slot = i
|
|
root, err := blk.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
util.SaveBlock(t, context.Background(), d, blk)
|
|
blkRoots = append(blkRoots, root)
|
|
}
|
|
|
|
r := &Service{cfg: &config{p2p: p1, beaconDB: d}, rateLimiter: newRateLimiter(p1)}
|
|
r.cfg.chain = &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
|
pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1)
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, false)
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
for i := range blkRoots {
|
|
expectSuccess(t, stream)
|
|
res := util.NewBeaconBlock()
|
|
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
|
if uint64(res.Block.Slot) != uint64(i+1) {
|
|
t.Errorf("Received unexpected block slot %d but wanted %d", res.Block.Slot, i+1)
|
|
}
|
|
}
|
|
})
|
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
err = r.beaconBlocksRootRPCHandler(context.Background(), &blkRoots, stream1)
|
|
assert.NoError(t, err)
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks_ReconstructsPayload(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
d := db.SetupDB(t)
|
|
|
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
|
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
|
stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength)
|
|
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
|
|
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
|
|
tx := gethTypes.NewTransaction(
|
|
0,
|
|
common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
|
|
big.NewInt(0), 0, big.NewInt(0),
|
|
nil,
|
|
)
|
|
txs := []*gethTypes.Transaction{tx}
|
|
encodedBinaryTxs := make([][]byte, 1)
|
|
var err error
|
|
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
|
|
require.NoError(t, err)
|
|
blockHash := bytesutil.ToBytes32([]byte("foo"))
|
|
payload := &enginev1.ExecutionPayload{
|
|
ParentHash: parent,
|
|
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
|
StateRoot: stateRoot,
|
|
ReceiptsRoot: receiptsRoot,
|
|
LogsBloom: logsBloom,
|
|
PrevRandao: blockHash[:],
|
|
BlockNumber: 0,
|
|
GasLimit: 0,
|
|
GasUsed: 0,
|
|
Timestamp: 0,
|
|
ExtraData: make([]byte, 0),
|
|
BlockHash: blockHash[:],
|
|
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
|
|
Transactions: encodedBinaryTxs,
|
|
}
|
|
wrappedPayload, err := blocks.WrappedExecutionPayload(payload)
|
|
require.NoError(t, err)
|
|
header, err := blocks.PayloadToHeader(wrappedPayload)
|
|
require.NoError(t, err)
|
|
|
|
var blkRoots p2pTypes.BeaconBlockByRootsReq
|
|
// Populate the database with blocks that would match the request.
|
|
for i := types.Slot(1); i < 11; i++ {
|
|
blk := util.NewBlindedBeaconBlockBellatrix()
|
|
blk.Block.Body.ExecutionPayloadHeader = header
|
|
blk.Block.Slot = i
|
|
root, err := blk.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
|
require.NoError(t, err)
|
|
require.NoError(t, d.SaveBlock(context.Background(), wsb))
|
|
blkRoots = append(blkRoots, root)
|
|
}
|
|
|
|
mockEngine := &mockExecution.EngineClient{
|
|
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{
|
|
blockHash: payload,
|
|
},
|
|
}
|
|
r := &Service{cfg: &config{
|
|
p2p: p1,
|
|
beaconDB: d,
|
|
executionPayloadReconstructor: mockEngine,
|
|
}, rateLimiter: newRateLimiter(p1)}
|
|
r.cfg.chain = &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
|
pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1)
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, false)
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
for i := range blkRoots {
|
|
expectSuccess(t, stream)
|
|
res := util.NewBeaconBlockBellatrix()
|
|
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
|
if uint64(res.Block.Slot) != uint64(i+1) {
|
|
t.Errorf("Received unexpected block slot %d but wanted %d", res.Block.Slot, i+1)
|
|
}
|
|
}
|
|
require.Equal(t, uint64(10), mockEngine.NumReconstructedPayloads)
|
|
})
|
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
err = r.beaconBlocksRootRPCHandler(context.Background(), &blkRoots, stream1)
|
|
assert.NoError(t, err)
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.DelaySend = true
|
|
|
|
blockA := util.NewBeaconBlock()
|
|
blockA.Block.Slot = 111
|
|
blockB := util.NewBeaconBlock()
|
|
blockB.Block.Slot = 40
|
|
// Set up a head state with data we expect.
|
|
blockARoot, err := blockA.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
blockBRoot, err := blockB.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{})
|
|
require.NoError(t, err)
|
|
require.NoError(t, genesisState.SetSlot(111))
|
|
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), blockARoot))
|
|
finalizedCheckpt := ðpb.Checkpoint{
|
|
Epoch: 5,
|
|
Root: blockBRoot[:],
|
|
}
|
|
|
|
expectedRoots := p2pTypes.BeaconBlockByRootsReq{blockBRoot, blockARoot}
|
|
|
|
r := &Service{
|
|
cfg: &config{
|
|
p2p: p1,
|
|
chain: &mock.ChainService{
|
|
State: genesisState,
|
|
FinalizedCheckPoint: finalizedCheckpt,
|
|
Root: blockARoot[:],
|
|
Genesis: time.Now(),
|
|
ValidatorsRoot: [32]byte{},
|
|
},
|
|
},
|
|
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
|
seenPendingBlocks: make(map[[32]byte]bool),
|
|
ctx: context.Background(),
|
|
rateLimiter: newRateLimiter(p1),
|
|
}
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/eth2/beacon_chain/req/beacon_blocks_by_root/1/ssz_snappy")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, false)
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
out := new(p2pTypes.BeaconBlockByRootsReq)
|
|
assert.NoError(t, p2.Encoding().DecodeWithMaxLength(stream, out))
|
|
assert.DeepEqual(t, &expectedRoots, out, "Did not receive expected message")
|
|
response := []*ethpb.SignedBeaconBlock{blockB, blockA}
|
|
for _, blk := range response {
|
|
_, err := stream.Write([]byte{responseCodeSuccess})
|
|
assert.NoError(t, err, "Could not write to stream")
|
|
_, err = p2.Encoding().EncodeWithMaxLength(stream, blk)
|
|
assert.NoError(t, err, "Could not send response back")
|
|
}
|
|
assert.NoError(t, stream.Close())
|
|
})
|
|
|
|
p1.Connect(p2)
|
|
require.NoError(t, r.sendRecentBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestRecentBeaconBlocksRPCHandler_HandleZeroBlocks(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
d := db.SetupDB(t)
|
|
|
|
r := &Service{cfg: &config{p2p: p1, beaconDB: d}, rateLimiter: newRateLimiter(p1)}
|
|
pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1)
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, false)
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
expectFailure(t, 1, "no block roots provided in request", stream)
|
|
})
|
|
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
err = r.beaconBlocksRootRPCHandler(context.Background(), &p2pTypes.BeaconBlockByRootsReq{}, stream1)
|
|
assert.ErrorContains(t, "no block roots provided", err)
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
r.rateLimiter.RLock() // retrieveCollector requires a lock to be held.
|
|
defer r.rateLimiter.RUnlock()
|
|
lter, err := r.rateLimiter.retrieveCollector(topic)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, 1, int(lter.Count(stream1.Conn().RemotePeer().String())))
|
|
}
|