mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 12:57:18 +00:00
cc741ed8af
* begin state service * begin on the state trie idea * created beacon state structure * add in the full clone getter * return by value instead * add all setters * new state setters are being completed * arrays roots exposed * close to finishing all these headerssss * functionality complete * added in proto benchmark test * test for compatibility * add test for compat * comments fixed * add clone * add clone * remove underlying copies * make it immutable * integrate it into chainservice * revert * wrap up comments for package * address all comments and godocs * address all comments * clone the pending attestation properly * properly clone remaining items * tests pass fixed bug * begin using it instead of head state * prevent nil pointer exceptions * begin using new struct in db * integrated new type into db package * add proper nil checks * using new state in archiver * refactored much of core * editing all the precompute functions * done with most core refactor * fixed up some bugs in the clone comparisons * append current epoch atts * add missing setters * add new setters * fix other core methods * fix up transition * main service and forkchoice * fix rpc * integrated to powchain * some more changes * fix build * improve processing of deposits * fix error * prevent panic * comment * fix process att * gaz * fix up att process * resolve existing review comments * resolve another batch of gh comments * resolve broken cpt state * revise testutil to use the new state * begin updating the state transition func to pass in more compartmentalized args * finish editing transition function to return errors * block operations pretty much done with refactor * state transition fully refactored * got epoch processing completed * fix build in fork choice * fixing more of the build * fix up broken sync package * it builds nowww it buildssss * revert registry changes * Recompute on Read (#4627) * compute on read * fix up eth1 data votes * looking into slashings bug introduced in core/ * able to advance more slots * add logging * can now sync with testnet yay * remove the leaves algorithm and other merkle imports * expose initialize unsafe funcs * Update beacon-chain/db/kv/state.go * lint Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * More Optimizations for New State (#4641) * map optimization * more optimizations * use a custom hasher * comment * block operations optimizations * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * fixed up various operations to use the validator index map access Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * archiver tests pass * fixing cache tests * cache tests passing * edited validator tests * powchain tests passing * halfway thru sync tests * more sync test fixes * add in tests for state/ * working through rpc tests * assignments tests passed * almost done with rpc/beacon tests * resolved painful validator test * fixed up even more tests * resolve tests * fix build * reduce a randao mixes copy * fixes under //beacon-chain/blockchain/... * build //beacon-chain/core/... * fixes * Runtime Optimizations (#4648) * parallelize shuffling * clean up * lint * fix build * use callback to read from registry * fix array roots and size map * new improvements * reduce hash allocs * improved shuffling * terence's review * use different method * raul's comment * new array roots * remove clone in pre-compute * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * raul's review * lint * fix build issues * fix visibility Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * fix visibility * build works for all * fix blockchain test * fix a few tests * fix more tests * update validator in slashing * archiver passing * fixed rpc/validator * progress on core tests * resolve broken rpc tests * blockchain tests passed * fix up some tests in core * fix message diff * remove unnecessary save * Save validator after slashing * Update validators one by one * another update * fix everything * fix more precompute tests * fix blocks tests * more elegant fix * more helper fixes * change back ? * fix test * fix skip slot * fix test * reset caches * fix testutil * raceoff fixed * passing * Retrieve cached state in the beginning * lint * Fixed tests part 1 * Fixed rest of the tests * Minor changes to avoid copying, small refactor to reduce deplicated code * Handle att req for slot 0 * New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689) * Only populate merkle layers as needed, copy merkle layers on copy/clone. * use custom copy * Make maps of correct size * slightly fast, doesn't wait for lock Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Target root can't be 0x00 * Don't use cache for current slot (may not be the right fix) * fixed up tests * Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing... * Align with prev logic for process slots cachedState.Slot() < slot * Fix Initial Sync Flag (#4692) * fixes * fix up some test failures due to lack of nil checks * fix up some test failures due to lack of nil checks * fix up imports * revert some changes * imports Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * resolving further conflicts * Better skip slot cache (#4694) * Return copy of skip slot cache state, disable skip slot cache on sync * fix * Fix pruning * fix up issues with broken tests Co-authored-by: Nishant Das <nish1993@hotmail.com> Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com> Co-authored-by: shayzluf <thezluf@gmail.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
419 lines
11 KiB
Go
419 lines
11 KiB
Go
package sync
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
"github.com/libp2p/go-libp2p-core/protocol"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/go-ssz"
|
|
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
|
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
|
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
func init() {
|
|
logrus.SetLevel(logrus.DebugLevel)
|
|
}
|
|
|
|
func TestHelloRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
if len(p1.Host.Network().Peers()) != 1 {
|
|
t.Error("Expected peers to be connected")
|
|
}
|
|
|
|
r := &Service{p2p: p1}
|
|
pcl := protocol.ID("/testing")
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.Host.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
code, errMsg, err := ReadStatusCode(stream, p1.Encoding())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if code == 0 {
|
|
t.Error("Expected a non-zero code")
|
|
}
|
|
if errMsg != errWrongForkVersion.Error() {
|
|
t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(errWrongForkVersion.Error()))
|
|
t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, errWrongForkVersion.Error())
|
|
}
|
|
})
|
|
|
|
stream1, err := p1.Host.NewStream(context.Background(), p2.Host.ID(), pcl)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
err = r.statusRPCHandler(context.Background(), &pb.Status{HeadForkVersion: []byte("fake")}, stream1)
|
|
if err != errWrongForkVersion {
|
|
t.Errorf("Expected error %v, got %v", errWrongForkVersion, err)
|
|
}
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
if len(p1.Host.Network().Peers()) != 0 {
|
|
t.Error("handler did not disconnect peer")
|
|
}
|
|
}
|
|
|
|
func TestHelloRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
if len(p1.Host.Network().Peers()) != 1 {
|
|
t.Error("Expected peers to be connected")
|
|
}
|
|
|
|
// Set up a head state with data we expect.
|
|
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 40})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := genesisState.SetSlot(111); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
finalizedCheckpt := ðpb.Checkpoint{
|
|
Epoch: 5,
|
|
Root: finalizedRoot[:],
|
|
}
|
|
|
|
r := &Service{
|
|
p2p: p1,
|
|
chain: &mock.ChainService{
|
|
State: genesisState,
|
|
FinalizedCheckPoint: finalizedCheckpt,
|
|
Root: headRoot[:],
|
|
Fork: &pb.Fork{
|
|
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
|
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
|
},
|
|
},
|
|
}
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/testing")
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.Host.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
expectSuccess(t, r, stream)
|
|
out := &pb.Status{}
|
|
if err := r.p2p.Encoding().DecodeWithLength(stream, out); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
expected := &pb.Status{
|
|
HeadForkVersion: params.BeaconConfig().GenesisForkVersion,
|
|
HeadSlot: genesisState.Slot(),
|
|
HeadRoot: headRoot[:],
|
|
FinalizedEpoch: 5,
|
|
FinalizedRoot: finalizedRoot[:],
|
|
}
|
|
if !proto.Equal(out, expected) {
|
|
t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected)
|
|
}
|
|
})
|
|
stream1, err := p1.Host.NewStream(context.Background(), p2.Host.ID(), pcl)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
err = r.statusRPCHandler(context.Background(), &pb.Status{HeadForkVersion: params.BeaconConfig().GenesisForkVersion}, stream1)
|
|
if err != nil {
|
|
t.Errorf("Unxpected error: %v", err)
|
|
}
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
|
// Scenario is that p1 and p2 connect, exchange handshakes.
|
|
// p2 disconnects and p1 should forget the handshake status.
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
|
Slot: 5,
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
r := &Service{
|
|
p2p: p1,
|
|
chain: &mock.ChainService{
|
|
State: st,
|
|
FinalizedCheckPoint: ðpb.Checkpoint{},
|
|
Fork: &pb.Fork{
|
|
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
|
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
|
},
|
|
},
|
|
ctx: context.Background(),
|
|
}
|
|
|
|
r.Start()
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/eth2/beacon_chain/req/status/1/ssz")
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.Host.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
out := &pb.Status{}
|
|
if err := r.p2p.Encoding().DecodeWithLength(stream, out); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
log.WithField("status", out).Warn("received status")
|
|
|
|
resp := &pb.Status{HeadSlot: 100, HeadForkVersion: params.BeaconConfig().GenesisForkVersion}
|
|
|
|
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
_, err := r.p2p.Encoding().EncodeWithLength(stream, resp)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
log.WithField("status", out).Warn("sending status")
|
|
stream.Close()
|
|
})
|
|
|
|
numInactive1 := len(p1.Peers().Inactive())
|
|
numActive1 := len(p1.Peers().Active())
|
|
|
|
p1.Connect(p2)
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
// Wait for stream buffer to be read.
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
numInactive2 := len(p1.Peers().Inactive())
|
|
numActive2 := len(p1.Peers().Active())
|
|
|
|
if numInactive2 != numInactive1 {
|
|
t.Errorf("Number of inactive peers changed unexpectedly: was %d, now %d", numInactive1, numInactive2)
|
|
}
|
|
if numActive2 != numActive1+1 {
|
|
t.Errorf("Number of active peers unexpected: wanted %d, found %d", numActive1+1, numActive2)
|
|
}
|
|
|
|
if err := p2.Disconnect(p1.PeerID()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerDisconnected)
|
|
|
|
// Wait for disconnect event to trigger.
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
numInactive3 := len(p1.Peers().Inactive())
|
|
numActive3 := len(p1.Peers().Active())
|
|
if numInactive3 != numInactive2+1 {
|
|
t.Errorf("Number of inactive peers unexpected: wanted %d, found %d", numInactive2+1, numInactive3)
|
|
}
|
|
if numActive3 != numActive2-1 {
|
|
t.Errorf("Number of active peers unexpected: wanted %d, found %d", numActive2-1, numActive3)
|
|
}
|
|
}
|
|
|
|
func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
// Set up a head state with data we expect.
|
|
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 40})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := genesisState.SetSlot(111); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
finalizedCheckpt := ðpb.Checkpoint{
|
|
Epoch: 5,
|
|
Root: finalizedRoot[:],
|
|
}
|
|
|
|
r := &Service{
|
|
p2p: p1,
|
|
chain: &mock.ChainService{
|
|
State: genesisState,
|
|
FinalizedCheckPoint: finalizedCheckpt,
|
|
Root: headRoot[:],
|
|
Fork: &pb.Fork{
|
|
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
|
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
|
},
|
|
},
|
|
ctx: context.Background(),
|
|
}
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/eth2/beacon_chain/req/status/1/ssz")
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.Host.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
out := &pb.Status{}
|
|
if err := r.p2p.Encoding().DecodeWithLength(stream, out); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
expected := &pb.Status{
|
|
HeadForkVersion: params.BeaconConfig().GenesisForkVersion,
|
|
HeadSlot: genesisState.Slot(),
|
|
HeadRoot: headRoot[:],
|
|
FinalizedEpoch: 5,
|
|
FinalizedRoot: finalizedRoot[:],
|
|
}
|
|
if !proto.Equal(out, expected) {
|
|
t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected)
|
|
}
|
|
})
|
|
|
|
p1.AddConnectionHandler(r.sendRPCStatusRequest)
|
|
p1.Connect(p2)
|
|
|
|
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
if len(p1.Host.Network().Peers()) != 1 {
|
|
t.Error("Expected peers to continue being connected")
|
|
}
|
|
}
|
|
|
|
func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
|
|
// Set up a head state with data we expect.
|
|
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 40})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := genesisState.SetSlot(111); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
finalizedCheckpt := ðpb.Checkpoint{
|
|
Epoch: 5,
|
|
Root: finalizedRoot[:],
|
|
}
|
|
|
|
r := &Service{
|
|
p2p: p1,
|
|
chain: &mock.ChainService{
|
|
State: genesisState,
|
|
FinalizedCheckPoint: finalizedCheckpt,
|
|
Root: headRoot[:],
|
|
Fork: &pb.Fork{
|
|
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
|
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
|
},
|
|
},
|
|
ctx: context.Background(),
|
|
}
|
|
|
|
r.Start()
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/eth2/beacon_chain/req/status/1/ssz")
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.Host.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
out := &pb.Status{}
|
|
if err := r.p2p.Encoding().DecodeWithLength(stream, out); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
expected := &pb.Status{
|
|
HeadForkVersion: []byte{1, 1, 1, 1},
|
|
HeadSlot: genesisState.Slot(),
|
|
HeadRoot: headRoot[:],
|
|
FinalizedEpoch: 5,
|
|
FinalizedRoot: finalizedRoot[:],
|
|
}
|
|
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
|
log.WithError(err).Error("Failed to write to stream")
|
|
}
|
|
_, err := r.p2p.Encoding().EncodeWithLength(stream, expected)
|
|
if err != nil {
|
|
t.Errorf("Could not send status: %v", err)
|
|
}
|
|
})
|
|
|
|
p1.Connect(p2)
|
|
|
|
if testutil.WaitTimeout(&wg, time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
connectionState, err := p1.Peers().ConnectionState(p2.PeerID())
|
|
if err != nil {
|
|
t.Fatal("Failed to obtain peer connection state")
|
|
}
|
|
if connectionState != peers.PeerDisconnected {
|
|
t.Error("Expected peer to be disconnected")
|
|
}
|
|
|
|
badResponses, err := p1.Peers().BadResponses(p2.PeerID())
|
|
if err != nil {
|
|
t.Fatal("Failed to obtain peer connection state")
|
|
}
|
|
if badResponses != 1 {
|
|
t.Errorf("Bad response was not bumped to one, instead it is %d", badResponses)
|
|
}
|
|
}
|