prysm-pulse/beacon-chain/state/stategen/service.go
terence tsao 7f7866ff2a
Micro optimizations on new-state-mgmt service for initial syncing (#5241)
* Starting a quick PoC

* Rate limit to one epoch worth of blocks in memory

* Proof of concept working

* Quick comment out

* Save previous finalized checkpoint

* Test

* Minor fixes

* More run time fixes

* Remove panic

* Feature flag

* Removed unused methods

* Fixed tests

* E2e test

* comment

* Compatible with current initial sync

* Starting

* New cache

* Cache getters and setters

* It should be part of state gen

* Need to use cache for DB

* Don't have to use finalized state

* Rm unused file

* some changes to memory mgmt when using mempool

* More run time fixes

* Can sync to head

* Feedback

* Revert "some changes to memory mgmt when using mempool"

This reverts commit f5b3e7ff4714fef9f0397007f519a45fa259ad24.

* Fixed sync tests

* Fixed existing tests

* Test for state summary getter

* Gaz

* Fix kafka passthrough

* Fixed inputs

* Gaz

* Fixed build

* Fixed visibility

* Trying without the ignore

* Didn't work..

* Fix kafka

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-03-30 17:10:45 -05:00

84 lines
2.8 KiB
Go

package stategen
import (
"context"
"sync"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
const archivedInterval = 256
// State represents a management object that handles the internal
// logic of maintaining both hot and cold states in DB.
type State struct {
beaconDB db.NoHeadAccessDatabase
slotsPerArchivedPoint uint64
epochBoundarySlotToRoot map[uint64][32]byte
epochBoundaryLock sync.RWMutex
hotStateCache *cache.HotStateCache
splitInfo *splitSlotAndRoot
stateSummaryCache *cache.StateSummaryCache
}
// This tracks the split point. The point where slot and the block root of
// cold and hot sections of the DB splits.
type splitSlotAndRoot struct {
slot uint64
root [32]byte
}
// New returns a new state management object.
func New(db db.NoHeadAccessDatabase, stateSummaryCache *cache.StateSummaryCache) *State {
return &State{
beaconDB: db,
epochBoundarySlotToRoot: make(map[uint64][32]byte),
hotStateCache: cache.NewHotStateCache(),
splitInfo: &splitSlotAndRoot{slot: 0, root: params.BeaconConfig().ZeroHash},
slotsPerArchivedPoint: archivedInterval,
stateSummaryCache: stateSummaryCache,
}
}
// Resume resumes a new state management object from previously saved finalized check point in DB.
func (s *State) Resume(ctx context.Context) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.Resume")
defer span.End()
lastArchivedRoot := s.beaconDB.LastArchivedIndexRoot(ctx)
lastArchivedState, err := s.beaconDB.State(ctx, lastArchivedRoot)
if err != nil {
return nil, err
}
// Resume as genesis state if there's no last archived state.
if lastArchivedState == nil {
return s.beaconDB.GenesisState(ctx)
}
s.splitInfo = &splitSlotAndRoot{slot: lastArchivedState.Slot(), root: lastArchivedRoot}
// In case the finalized state slot was skipped.
slot := lastArchivedState.Slot()
if !helpers.IsEpochStart(slot) {
slot = helpers.StartSlot(helpers.SlotToEpoch(slot) + 1)
}
return lastArchivedState, nil
}
// This verifies the archive point frequency is valid. It checks the interval
// is a divisor of the number of slots per epoch. This ensures we have at least one
// archive point within range of our state root history when iterating
// backwards. It also ensures the archive points align with hot state summaries
// which makes it quicker to migrate hot to cold.
func verifySlotsPerArchivePoint(slotsPerArchivePoint uint64) bool {
return slotsPerArchivePoint > 0 &&
slotsPerArchivePoint%params.BeaconConfig().SlotsPerEpoch == 0
}