2020-04-29 17:40:33 +00:00
|
|
|
// Package stategen defines functions to regenerate beacon chain states
|
|
|
|
// by replaying blocks from a stored state checkpoint, useful for
|
|
|
|
// optimization and reducing a beacon node's resource consumption.
|
2020-02-16 23:28:20 +00:00
|
|
|
package stategen
|
|
|
|
|
|
|
|
import (
|
2020-03-15 16:47:49 +00:00
|
|
|
"context"
|
2020-10-29 16:14:57 +00:00
|
|
|
"errors"
|
2020-07-06 17:22:12 +00:00
|
|
|
"sync"
|
2020-03-03 19:07:34 +00:00
|
|
|
|
2020-02-16 23:28:20 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
2020-03-15 16:47:49 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
2020-10-29 16:14:57 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
2020-03-06 23:06:01 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
2020-03-15 16:47:49 +00:00
|
|
|
"go.opencensus.io/trace"
|
2020-02-16 23:28:20 +00:00
|
|
|
)
|
|
|
|
|
2020-10-23 00:35:30 +00:00
|
|
|
var defaultHotStateDBInterval uint64 = 128 // slots
|
|
|
|
|
2020-02-16 23:28:20 +00:00
|
|
|
// State represents a management object that handles the internal
|
|
|
|
// logic of maintaining both hot and cold states in DB.
|
|
|
|
type State struct {
|
2020-07-06 17:22:12 +00:00
|
|
|
beaconDB db.NoHeadAccessDatabase
|
|
|
|
slotsPerArchivedPoint uint64
|
2020-12-17 20:40:47 +00:00
|
|
|
hotStateCache *hotStateCache
|
2020-07-06 17:22:12 +00:00
|
|
|
finalizedInfo *finalizedInfo
|
|
|
|
epochBoundaryStateCache *epochBoundaryState
|
2020-10-23 00:35:30 +00:00
|
|
|
saveHotStateDB *saveHotStateDbConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
// This tracks the config in the event of long non-finality,
|
|
|
|
// how often does the node save hot states to db? what are
|
|
|
|
// the saved hot states in db?... etc
|
|
|
|
type saveHotStateDbConfig struct {
|
|
|
|
enabled bool
|
|
|
|
lock sync.Mutex
|
|
|
|
duration uint64
|
|
|
|
savedStateRoots [][32]byte
|
2020-03-12 02:27:16 +00:00
|
|
|
}
|
|
|
|
|
2020-07-03 06:33:58 +00:00
|
|
|
// This tracks the finalized point. It's also the point where slot and the block root of
|
2020-03-12 02:27:16 +00:00
|
|
|
// cold and hot sections of the DB splits.
|
2020-07-06 17:22:12 +00:00
|
|
|
type finalizedInfo struct {
|
|
|
|
slot uint64
|
|
|
|
root [32]byte
|
|
|
|
state *state.BeaconState
|
|
|
|
lock sync.RWMutex
|
2020-02-16 23:28:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// New returns a new state management object.
|
2020-12-18 19:12:30 +00:00
|
|
|
func New(beaconDB db.NoHeadAccessDatabase) *State {
|
2020-02-16 23:28:20 +00:00
|
|
|
return &State{
|
2020-12-18 19:12:30 +00:00
|
|
|
beaconDB: beaconDB,
|
2020-12-17 20:40:47 +00:00
|
|
|
hotStateCache: newHotStateCache(),
|
2020-07-06 17:22:12 +00:00
|
|
|
finalizedInfo: &finalizedInfo{slot: 0, root: params.BeaconConfig().ZeroHash},
|
|
|
|
slotsPerArchivedPoint: params.BeaconConfig().SlotsPerArchivedPoint,
|
|
|
|
epochBoundaryStateCache: newBoundaryStateCache(),
|
2020-10-23 00:35:30 +00:00
|
|
|
saveHotStateDB: &saveHotStateDbConfig{
|
|
|
|
duration: defaultHotStateDBInterval,
|
|
|
|
},
|
2020-02-16 23:28:20 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-06 23:06:01 +00:00
|
|
|
|
2020-03-15 16:47:49 +00:00
|
|
|
// Resume resumes a new state management object from previously saved finalized check point in DB.
|
2020-03-27 20:28:38 +00:00
|
|
|
func (s *State) Resume(ctx context.Context) (*state.BeaconState, error) {
|
2020-03-15 16:47:49 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "stateGen.Resume")
|
|
|
|
defer span.End()
|
|
|
|
|
2020-10-29 16:14:57 +00:00
|
|
|
c, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
2020-03-15 16:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-10-29 16:14:57 +00:00
|
|
|
fRoot := bytesutil.ToBytes32(c.Root)
|
|
|
|
// Resume as genesis state if last finalized root is zero hashes.
|
|
|
|
if fRoot == params.BeaconConfig().ZeroHash {
|
2020-03-16 19:07:07 +00:00
|
|
|
return s.beaconDB.GenesisState(ctx)
|
|
|
|
}
|
2020-10-29 19:04:06 +00:00
|
|
|
fState, err := s.StateByRoot(ctx, fRoot)
|
2020-10-29 16:14:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if fState == nil {
|
|
|
|
return nil, errors.New("finalized state not found in disk")
|
|
|
|
}
|
2020-03-16 19:07:07 +00:00
|
|
|
|
2020-11-06 21:05:44 +00:00
|
|
|
go func() {
|
|
|
|
if err := s.beaconDB.CleanUpDirtyStates(ctx, s.slotsPerArchivedPoint); err != nil {
|
|
|
|
log.WithError(err).Error("Could not clean up dirty states")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-10-29 16:14:57 +00:00
|
|
|
s.finalizedInfo = &finalizedInfo{slot: fState.Slot(), root: fRoot, state: fState.Copy()}
|
2020-03-16 19:07:07 +00:00
|
|
|
|
2020-10-29 16:14:57 +00:00
|
|
|
return fState, nil
|
2020-03-15 16:47:49 +00:00
|
|
|
}
|
2020-07-06 17:22:12 +00:00
|
|
|
|
|
|
|
// SaveFinalizedState saves the finalized slot, root and state into memory to be used by state gen service.
|
|
|
|
// This used for migration at the correct start slot and used for hot state play back to ensure
|
|
|
|
// lower bound to start is always at the last finalized state.
|
|
|
|
func (s *State) SaveFinalizedState(fSlot uint64, fRoot [32]byte, fState *state.BeaconState) {
|
|
|
|
s.finalizedInfo.lock.Lock()
|
|
|
|
defer s.finalizedInfo.lock.Unlock()
|
|
|
|
s.finalizedInfo.root = fRoot
|
|
|
|
s.finalizedInfo.state = fState.Copy()
|
|
|
|
s.finalizedInfo.slot = fSlot
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if input root equals to cached finalized root.
|
|
|
|
func (s *State) isFinalizedRoot(r [32]byte) bool {
|
|
|
|
s.finalizedInfo.lock.RLock()
|
|
|
|
defer s.finalizedInfo.lock.RUnlock()
|
|
|
|
return r == s.finalizedInfo.root
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the cached and copied finalized state.
|
|
|
|
func (s *State) finalizedState() *state.BeaconState {
|
|
|
|
s.finalizedInfo.lock.RLock()
|
|
|
|
defer s.finalizedInfo.lock.RUnlock()
|
|
|
|
return s.finalizedInfo.state.Copy()
|
|
|
|
}
|