mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 04:47:18 +00:00
cc741ed8af
* begin state service * begin on the state trie idea * created beacon state structure * add in the full clone getter * return by value instead * add all setters * new state setters are being completed * arrays roots exposed * close to finishing all these headerssss * functionality complete * added in proto benchmark test * test for compatibility * add test for compat * comments fixed * add clone * add clone * remove underlying copies * make it immutable * integrate it into chainservice * revert * wrap up comments for package * address all comments and godocs * address all comments * clone the pending attestation properly * properly clone remaining items * tests pass fixed bug * begin using it instead of head state * prevent nil pointer exceptions * begin using new struct in db * integrated new type into db package * add proper nil checks * using new state in archiver * refactored much of core * editing all the precompute functions * done with most core refactor * fixed up some bugs in the clone comparisons * append current epoch atts * add missing setters * add new setters * fix other core methods * fix up transition * main service and forkchoice * fix rpc * integrated to powchain * some more changes * fix build * improve processing of deposits * fix error * prevent panic * comment * fix process att * gaz * fix up att process * resolve existing review comments * resolve another batch of gh comments * resolve broken cpt state * revise testutil to use the new state * begin updating the state transition func to pass in more compartmentalized args * finish editing transition function to return errors * block operations pretty much done with refactor * state transition fully refactored * got epoch processing completed * fix build in fork choice * fixing more of the build * fix up broken sync package * it builds nowww it buildssss * revert registry changes * Recompute on Read (#4627) * compute on read * fix up eth1 data votes * looking into slashings bug introduced in core/ * able to advance more slots * add logging * can now sync with testnet yay * remove the leaves algorithm and other merkle imports * expose initialize unsafe funcs * Update beacon-chain/db/kv/state.go * lint Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * More Optimizations for New State (#4641) * map optimization * more optimizations * use a custom hasher * comment * block operations optimizations * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * fixed up various operations to use the validator index map access Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * archiver tests pass * fixing cache tests * cache tests passing * edited validator tests * powchain tests passing * halfway thru sync tests * more sync test fixes * add in tests for state/ * working through rpc tests * assignments tests passed * almost done with rpc/beacon tests * resolved painful validator test * fixed up even more tests * resolve tests * fix build * reduce a randao mixes copy * fixes under //beacon-chain/blockchain/... * build //beacon-chain/core/... * fixes * Runtime Optimizations (#4648) * parallelize shuffling * clean up * lint * fix build * use callback to read from registry * fix array roots and size map * new improvements * reduce hash allocs * improved shuffling * terence's review * use different method * raul's comment * new array roots * remove clone in pre-compute * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * raul's review * lint * fix build issues * fix visibility Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * fix visibility * build works for all * fix blockchain test * fix a few tests * fix more tests * update validator in slashing * archiver passing * fixed rpc/validator * progress on core tests * resolve broken rpc tests * blockchain tests passed * fix up some tests in core * fix message diff * remove unnecessary save * Save validator after slashing * Update validators one by one * another update * fix everything * fix more precompute tests * fix blocks tests * more elegant fix * more helper fixes * change back ? * fix test * fix skip slot * fix test * reset caches * fix testutil * raceoff fixed * passing * Retrieve cached state in the beginning * lint * Fixed tests part 1 * Fixed rest of the tests * Minor changes to avoid copying, small refactor to reduce deplicated code * Handle att req for slot 0 * New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689) * Only populate merkle layers as needed, copy merkle layers on copy/clone. * use custom copy * Make maps of correct size * slightly fast, doesn't wait for lock Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Target root can't be 0x00 * Don't use cache for current slot (may not be the right fix) * fixed up tests * Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing... * Align with prev logic for process slots cachedState.Slot() < slot * Fix Initial Sync Flag (#4692) * fixes * fix up some test failures due to lack of nil checks * fix up some test failures due to lack of nil checks * fix up imports * revert some changes * imports Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * resolving further conflicts * Better skip slot cache (#4694) * Return copy of skip slot cache state, disable skip slot cache on sync * fix * Fix pruning * fix up issues with broken tests Co-authored-by: Nishant Das <nish1993@hotmail.com> Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com> Co-authored-by: shayzluf <thezluf@gmail.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
384 lines
10 KiB
Go
384 lines
10 KiB
Go
package blockchain
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/hex"
|
|
"io/ioutil"
|
|
"reflect"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/gogo/protobuf/proto"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
ssz "github.com/prysmaticlabs/go-ssz"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
|
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
|
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
|
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
|
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
|
"github.com/prysmaticlabs/prysm/shared/event"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
|
"github.com/sirupsen/logrus"
|
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
|
)
|
|
|
|
func init() {
|
|
logrus.SetLevel(logrus.DebugLevel)
|
|
logrus.SetOutput(ioutil.Discard)
|
|
}
|
|
|
|
type store struct {
|
|
headRoot []byte
|
|
}
|
|
|
|
func (s *store) OnBlock(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
func (s *store) OnBlockCacheFilteredTree(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
func (s *store) OnBlockInitialSyncStateTransition(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
func (s *store) GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error {
|
|
return nil
|
|
}
|
|
|
|
func (s *store) FinalizedCheckpt() *ethpb.Checkpoint {
|
|
return nil
|
|
}
|
|
|
|
func (s *store) JustifiedCheckpt() *ethpb.Checkpoint {
|
|
return nil
|
|
}
|
|
|
|
func (s *store) Head(ctx context.Context) ([]byte, error) {
|
|
return s.headRoot, nil
|
|
}
|
|
|
|
type mockBeaconNode struct {
|
|
stateFeed *event.Feed
|
|
}
|
|
|
|
// StateFeed mocks the same method in the beacon node.
|
|
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
|
if mbn.stateFeed == nil {
|
|
mbn.stateFeed = new(event.Feed)
|
|
}
|
|
return mbn.stateFeed
|
|
}
|
|
|
|
type mockBroadcaster struct {
|
|
broadcastCalled bool
|
|
}
|
|
|
|
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
|
mb.broadcastCalled = true
|
|
return nil
|
|
}
|
|
|
|
var _ = p2p.Broadcaster(&mockBroadcaster{})
|
|
|
|
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
|
endpoint := "ws://127.0.0.1"
|
|
ctx := context.Background()
|
|
var web3Service *powchain.Service
|
|
var err error
|
|
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
|
BeaconDB: beaconDB,
|
|
ETH1Endpoint: endpoint,
|
|
DepositContract: common.Address{},
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("unable to set up web3 service: %v", err)
|
|
}
|
|
|
|
cfg := &Config{
|
|
BeaconBlockBuf: 0,
|
|
BeaconDB: beaconDB,
|
|
DepositCache: depositcache.NewDepositCache(),
|
|
ChainStartFetcher: web3Service,
|
|
P2p: &mockBroadcaster{},
|
|
StateNotifier: &mockBeaconNode{},
|
|
AttPool: attestations.NewPool(),
|
|
}
|
|
if err != nil {
|
|
t.Fatalf("could not register blockchain service: %v", err)
|
|
}
|
|
chainService, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatalf("unable to setup chain service: %v", err)
|
|
}
|
|
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
|
|
|
return chainService
|
|
}
|
|
|
|
func TestChainStartStop_Uninitialized(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
chainService := setupBeaconChain(t, db)
|
|
|
|
// Listen for state events.
|
|
stateSubChannel := make(chan *feed.Event, 1)
|
|
stateSub := chainService.stateNotifier.StateFeed().Subscribe(stateSubChannel)
|
|
|
|
// Test the chain start state notifier.
|
|
genesisTime := time.Unix(1, 0)
|
|
chainService.Start()
|
|
event := &feed.Event{
|
|
Type: statefeed.ChainStarted,
|
|
Data: &statefeed.ChainStartedData{
|
|
StartTime: genesisTime,
|
|
},
|
|
}
|
|
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
|
for sent := 1; sent == 1; {
|
|
sent = chainService.stateNotifier.StateFeed().Send(event)
|
|
if sent == 1 {
|
|
// Flush our local subscriber.
|
|
<-stateSubChannel
|
|
}
|
|
}
|
|
|
|
// Now wait for notification the state is ready.
|
|
for stateInitialized := false; stateInitialized == false; {
|
|
recv := <-stateSubChannel
|
|
if recv.Type == statefeed.Initialized {
|
|
stateInitialized = true
|
|
}
|
|
}
|
|
stateSub.Unsubscribe()
|
|
|
|
beaconState, err := db.HeadState(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if beaconState == nil || beaconState.Slot() != 0 {
|
|
t.Error("Expected canonical state feed to send a state with genesis block")
|
|
}
|
|
if err := chainService.Stop(); err != nil {
|
|
t.Fatalf("Unable to stop chain service: %v", err)
|
|
}
|
|
// The context should have been canceled.
|
|
if chainService.ctx.Err() != context.Canceled {
|
|
t.Error("Context was not canceled")
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "Waiting")
|
|
testutil.AssertLogsContain(t, hook, "Initialized beacon chain genesis state")
|
|
}
|
|
|
|
func TestChainStartStop_Initialized(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
chainService := setupBeaconChain(t, db)
|
|
|
|
genesisBlk := b.NewGenesisBlock([]byte{})
|
|
blkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveBlock(ctx, genesisBlk); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveState(ctx, s, blkRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveHeadBlockRoot(ctx, blkRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Test the start function.
|
|
chainService.Start()
|
|
|
|
if err := chainService.Stop(); err != nil {
|
|
t.Fatalf("unable to stop chain service: %v", err)
|
|
}
|
|
|
|
// The context should have been canceled.
|
|
if chainService.ctx.Err() != context.Canceled {
|
|
t.Error("context was not canceled")
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "data already exists")
|
|
}
|
|
|
|
func TestChainService_InitializeBeaconChain(t *testing.T) {
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
|
|
bc := setupBeaconChain(t, db)
|
|
var err error
|
|
|
|
// Set up 10 deposits pre chain start for validators to register
|
|
count := uint64(10)
|
|
deposits, _, _ := testutil.DeterministicDepositsAndKeys(count)
|
|
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
hashTreeRoot := trie.HashTreeRoot()
|
|
genState, err := state.EmptyGenesisState()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
genState.SetEth1Data(ðpb.Eth1Data{
|
|
DepositRoot: hashTreeRoot[:],
|
|
DepositCount: uint64(len(deposits)),
|
|
})
|
|
genState, err = b.ProcessDeposits(ctx, genState, ðpb.BeaconBlockBody{Deposits: deposits})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{
|
|
DepositRoot: hashTreeRoot[:],
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
s, err := bc.beaconDB.State(ctx, bytesutil.ToBytes32(bc.canonicalRoots[0]))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
for _, v := range s.Validators() {
|
|
if !db.HasValidatorIndex(ctx, v.PublicKey) {
|
|
t.Errorf("Validator %s missing from db", hex.EncodeToString(v.PublicKey))
|
|
}
|
|
}
|
|
|
|
if _, err := bc.HeadState(ctx); err != nil {
|
|
t.Error(err)
|
|
}
|
|
if bc.HeadBlock() == nil {
|
|
t.Error("Head state can't be nil after initialize beacon chain")
|
|
}
|
|
if bc.canonicalRoots[0] == nil {
|
|
t.Error("Canonical root for slot 0 can't be nil after initialize beacon chain")
|
|
}
|
|
}
|
|
|
|
func TestChainService_InitializeChainInfo(t *testing.T) {
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
|
|
genesis := b.NewGenesisBlock([]byte{})
|
|
genesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveGenesisBlockRoot(ctx, genesisRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveBlock(ctx, genesis); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
|
headBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}}
|
|
headState, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: finalizedSlot})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
headRoot, _ := ssz.HashTreeRoot(headBlock.Block)
|
|
if err := db.SaveState(ctx, headState, headRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
|
Epoch: helpers.SlotToEpoch(finalizedSlot),
|
|
Root: headRoot[:],
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
c := &Service{beaconDB: db, canonicalRoots: make(map[uint64][]byte)}
|
|
if err := c.initializeChainInfo(ctx); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(c.HeadBlock(), headBlock) {
|
|
t.Error("head block incorrect")
|
|
}
|
|
s, err := c.HeadState(ctx)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(s, headState) {
|
|
t.Error("head state incorrect")
|
|
}
|
|
if headBlock.Block.Slot != c.HeadSlot() {
|
|
t.Error("head slot incorrect")
|
|
}
|
|
r, err := c.HeadRoot(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !bytes.Equal(headRoot[:], r) {
|
|
t.Error("head slot incorrect")
|
|
}
|
|
if c.genesisRoot != genesisRoot {
|
|
t.Error("genesis block root incorrect")
|
|
}
|
|
}
|
|
|
|
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
s := &Service{
|
|
beaconDB: db,
|
|
canonicalRoots: make(map[uint64][]byte),
|
|
}
|
|
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
|
r, _ := ssz.HashTreeRoot(b)
|
|
if err := s.saveHeadNoDB(ctx, b, r); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
newB, err := s.beaconDB.HeadBlock(ctx)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if reflect.DeepEqual(newB, b) {
|
|
t.Error("head block should not be equal")
|
|
}
|
|
}
|