mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-26 05:17:22 +00:00
b0128ad894
* bump bitfield dep * add new methods * get it working * add nil check * add check * one more check * add flag * everything works local run * add debug log * more changes * ensuring p2p interface works enough for tests to pass * all tests pass * include proper naming and comments to fix lint * Apply suggestions from code review * discover by peers * cannot figure out why 0 peers * remove keys * fix test * fix it * fix again * remove log * change back * gaz Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
271 lines
9.0 KiB
Go
271 lines
9.0 KiB
Go
package sync
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
"github.com/libp2p/go-libp2p-core/protocol"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/go-ssz"
|
|
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
|
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
func init() {
|
|
logrus.SetLevel(logrus.DebugLevel)
|
|
}
|
|
|
|
// /- b1 - b2
|
|
// b0
|
|
// \- b3
|
|
// Test b1 was missing then received and we can process b0 -> b1 -> b2
|
|
func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
|
db := dbtest.SetupDB(t)
|
|
defer dbtest.TeardownDB(t, db)
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
r := &Service{
|
|
p2p: p1,
|
|
db: db,
|
|
chain: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
},
|
|
slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock),
|
|
seenPendingBlocks: make(map[[32]byte]bool),
|
|
}
|
|
|
|
b0 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
|
if err := r.db.SaveBlock(context.Background(), b0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
b0Root, _ := ssz.HashTreeRoot(b0.Block)
|
|
b3 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 3, ParentRoot: b0Root[:]}}
|
|
if err := r.db.SaveBlock(context.Background(), b3); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// Incomplete block link
|
|
b1 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: b0Root[:]}}
|
|
b1Root, _ := ssz.HashTreeRoot(b1.Block)
|
|
b2 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2, ParentRoot: b1Root[:]}}
|
|
b2Root, _ := ssz.HashTreeRoot(b1.Block)
|
|
|
|
// Add b2 to the cache
|
|
r.slotToPendingBlocks[b2.Block.Slot] = b2
|
|
r.seenPendingBlocks[b2Root] = true
|
|
|
|
if err := r.processPendingBlocks(context.Background()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(r.slotToPendingBlocks) != 1 {
|
|
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
|
}
|
|
if len(r.seenPendingBlocks) != 1 {
|
|
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
|
}
|
|
|
|
// Add b1 to the cache
|
|
r.slotToPendingBlocks[b1.Block.Slot] = b1
|
|
r.seenPendingBlocks[b1Root] = true
|
|
if err := r.db.SaveBlock(context.Background(), b1); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := r.processPendingBlocks(context.Background()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(r.slotToPendingBlocks) != 0 {
|
|
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
|
}
|
|
if len(r.seenPendingBlocks) != 0 {
|
|
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
|
}
|
|
}
|
|
|
|
// /- b1 - b2 - b5
|
|
// b0
|
|
// \- b3 - b4
|
|
// Test b2 and b3 were missed, after receiving them we can process 2 chains.
|
|
func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks2(t *testing.T) {
|
|
db := dbtest.SetupDB(t)
|
|
defer dbtest.TeardownDB(t, db)
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
if len(p1.Host.Network().Peers()) != 1 {
|
|
t.Error("Expected peers to be connected")
|
|
}
|
|
pcl := protocol.ID("/eth2/beacon_chain/req/hello/1/ssz")
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.Host.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
code, errMsg, err := ReadStatusCode(stream, p1.Encoding())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if code == 0 {
|
|
t.Error("Expected a non-zero code")
|
|
}
|
|
if errMsg != errWrongForkVersion.Error() {
|
|
t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(errWrongForkVersion.Error()))
|
|
t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, errWrongForkVersion.Error())
|
|
}
|
|
})
|
|
|
|
r := &Service{
|
|
p2p: p1,
|
|
db: db,
|
|
chain: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 0,
|
|
},
|
|
}, slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock),
|
|
seenPendingBlocks: make(map[[32]byte]bool),
|
|
}
|
|
p1.Peers().Add(p2.PeerID(), nil, network.DirOutbound, []uint64{})
|
|
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
|
p1.Peers().SetChainState(p2.PeerID(), &pb.Status{})
|
|
|
|
b0 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
|
if err := r.db.SaveBlock(context.Background(), b0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
b0Root, _ := ssz.HashTreeRoot(b0.Block)
|
|
b1 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: b0Root[:]}}
|
|
if err := r.db.SaveBlock(context.Background(), b1); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
b1Root, _ := ssz.HashTreeRoot(b1.Block)
|
|
|
|
// Incomplete block links
|
|
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: b1Root[:]}
|
|
b2Root, _ := ssz.HashTreeRoot(b2)
|
|
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: b2Root[:]}
|
|
b5Root, _ := ssz.HashTreeRoot(b5)
|
|
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: b0Root[:]}
|
|
b3Root, _ := ssz.HashTreeRoot(b3)
|
|
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: b3Root[:]}
|
|
b4Root, _ := ssz.HashTreeRoot(b4)
|
|
|
|
r.slotToPendingBlocks[b4.Slot] = ðpb.SignedBeaconBlock{Block: b4}
|
|
r.seenPendingBlocks[b4Root] = true
|
|
r.slotToPendingBlocks[b5.Slot] = ðpb.SignedBeaconBlock{Block: b5}
|
|
r.seenPendingBlocks[b5Root] = true
|
|
|
|
if err := r.processPendingBlocks(context.Background()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(r.slotToPendingBlocks) != 2 {
|
|
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
|
}
|
|
if len(r.seenPendingBlocks) != 2 {
|
|
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
|
}
|
|
|
|
// Add b3 to the cache
|
|
r.slotToPendingBlocks[b3.Slot] = ðpb.SignedBeaconBlock{Block: b3}
|
|
r.seenPendingBlocks[b3Root] = true
|
|
if err := r.db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b3}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := r.processPendingBlocks(context.Background()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(r.slotToPendingBlocks) != 1 {
|
|
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
|
}
|
|
if len(r.seenPendingBlocks) != 1 {
|
|
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
|
}
|
|
|
|
// Add b2 to the cache
|
|
r.slotToPendingBlocks[b2.Slot] = ðpb.SignedBeaconBlock{Block: b2}
|
|
r.seenPendingBlocks[b2Root] = true
|
|
|
|
if err := r.db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b2}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := r.processPendingBlocks(context.Background()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(r.slotToPendingBlocks) != 0 {
|
|
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
|
}
|
|
t.Log(r.seenPendingBlocks)
|
|
if len(r.seenPendingBlocks) != 0 {
|
|
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
|
}
|
|
}
|
|
|
|
func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
|
db := dbtest.SetupDB(t)
|
|
defer dbtest.TeardownDB(t, db)
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
if len(p1.Host.Network().Peers()) != 1 {
|
|
t.Error("Expected peers to be connected")
|
|
}
|
|
|
|
r := &Service{
|
|
p2p: p1,
|
|
db: db,
|
|
chain: &mock.ChainService{
|
|
FinalizedCheckPoint: ðpb.Checkpoint{
|
|
Epoch: 1,
|
|
},
|
|
}, slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock),
|
|
seenPendingBlocks: make(map[[32]byte]bool),
|
|
}
|
|
p1.Peers().Add(p1.PeerID(), nil, network.DirOutbound, []uint64{})
|
|
p1.Peers().SetConnectionState(p1.PeerID(), peers.PeerConnected)
|
|
p1.Peers().SetChainState(p1.PeerID(), &pb.Status{})
|
|
|
|
b0 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
|
if err := r.db.SaveBlock(context.Background(), b0); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
b0Root, _ := ssz.HashTreeRoot(b0.Block)
|
|
b1 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: b0Root[:]}}
|
|
if err := r.db.SaveBlock(context.Background(), b1); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
b1Root, _ := ssz.HashTreeRoot(b1.Block)
|
|
|
|
// Incomplete block links
|
|
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: b1Root[:]}
|
|
b2Root, _ := ssz.HashTreeRoot(b2)
|
|
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: b2Root[:]}
|
|
b5Root, _ := ssz.HashTreeRoot(b5)
|
|
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: b0Root[:]}
|
|
b3Root, _ := ssz.HashTreeRoot(b3)
|
|
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: b3Root[:]}
|
|
b4Root, _ := ssz.HashTreeRoot(b4)
|
|
|
|
r.slotToPendingBlocks[b2.Slot] = ðpb.SignedBeaconBlock{Block: b2}
|
|
r.seenPendingBlocks[b2Root] = true
|
|
r.slotToPendingBlocks[b3.Slot] = ðpb.SignedBeaconBlock{Block: b3}
|
|
r.seenPendingBlocks[b3Root] = true
|
|
r.slotToPendingBlocks[b4.Slot] = ðpb.SignedBeaconBlock{Block: b4}
|
|
r.seenPendingBlocks[b4Root] = true
|
|
r.slotToPendingBlocks[b5.Slot] = ðpb.SignedBeaconBlock{Block: b5}
|
|
r.seenPendingBlocks[b5Root] = true
|
|
|
|
if err := r.processPendingBlocks(context.Background()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(r.slotToPendingBlocks) != 0 {
|
|
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
|
}
|
|
if len(r.seenPendingBlocks) != 0 {
|
|
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
|
}
|
|
}
|