2020-02-16 23:28:20 +00:00
|
|
|
package stategen
|
|
|
|
|
|
|
|
import (
|
2020-03-15 16:47:49 +00:00
|
|
|
"context"
|
2020-03-03 19:07:34 +00:00
|
|
|
"sync"
|
|
|
|
|
2020-03-08 06:24:57 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
2020-03-15 16:47:49 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
2020-02-16 23:28:20 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
2020-03-15 16:47:49 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
2020-03-06 23:06:01 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
2020-03-15 16:47:49 +00:00
|
|
|
"go.opencensus.io/trace"
|
2020-02-16 23:28:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// State represents a management object that handles the internal
|
|
|
|
// logic of maintaining both hot and cold states in DB.
|
|
|
|
type State struct {
|
2020-03-03 19:07:34 +00:00
|
|
|
beaconDB db.NoHeadAccessDatabase
|
2020-03-05 16:22:20 +00:00
|
|
|
lastArchivedSlot uint64
|
2020-03-12 00:38:30 +00:00
|
|
|
slotsPerArchivedPoint uint64
|
2020-03-03 19:07:34 +00:00
|
|
|
epochBoundarySlotToRoot map[uint64][32]byte
|
|
|
|
epochBoundaryLock sync.RWMutex
|
2020-03-08 06:24:57 +00:00
|
|
|
hotStateCache *cache.HotStateCache
|
2020-03-12 02:27:16 +00:00
|
|
|
splitInfo *splitSlotAndRoot
|
|
|
|
}
|
|
|
|
|
|
|
|
// This tracks the split point. The point where slot and the block root of
|
|
|
|
// cold and hot sections of the DB splits.
|
|
|
|
type splitSlotAndRoot struct {
|
|
|
|
slot uint64
|
|
|
|
root [32]byte
|
2020-02-16 23:28:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// New returns a new state management object.
|
|
|
|
func New(db db.NoHeadAccessDatabase) *State {
|
|
|
|
return &State{
|
2020-03-03 19:07:34 +00:00
|
|
|
beaconDB: db,
|
|
|
|
epochBoundarySlotToRoot: make(map[uint64][32]byte),
|
2020-03-08 06:24:57 +00:00
|
|
|
hotStateCache: cache.NewHotStateCache(),
|
2020-03-12 02:27:16 +00:00
|
|
|
splitInfo: &splitSlotAndRoot{slot: 0, root: params.BeaconConfig().ZeroHash},
|
2020-02-16 23:28:20 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-06 23:06:01 +00:00
|
|
|
|
2020-03-15 16:47:49 +00:00
|
|
|
// Resume resumes a new state management object from previously saved finalized check point in DB.
|
|
|
|
func (s *State) Resume(ctx context.Context, finalizedRoot [32]byte) (*state.BeaconState, error) {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "stateGen.Resume")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
finalizedState, err := s.beaconDB.State(ctx, finalizedRoot)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.splitInfo = &splitSlotAndRoot{slot: finalizedState.Slot(), root: finalizedRoot}
|
|
|
|
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: finalizedState.Slot(), Root: finalizedRoot[:], BoundaryRoot: finalizedRoot[:]}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// In case the finalized state slot was skipped.
|
|
|
|
slot := finalizedState.Slot()
|
|
|
|
if !helpers.IsEpochStart(slot) {
|
|
|
|
slot = helpers.StartSlot(helpers.SlotToEpoch(slot) + 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.setEpochBoundaryRoot(slot, finalizedRoot)
|
|
|
|
|
|
|
|
return finalizedState, nil
|
|
|
|
}
|
|
|
|
|
2020-03-06 23:06:01 +00:00
|
|
|
// This verifies the archive point frequency is valid. It checks the interval
|
|
|
|
// is a divisor of the number of slots per epoch. This ensures we have at least one
|
|
|
|
// archive point within range of our state root history when iterating
|
|
|
|
// backwards. It also ensures the archive points align with hot state summaries
|
|
|
|
// which makes it quicker to migrate hot to cold.
|
|
|
|
func verifySlotsPerArchivePoint(slotsPerArchivePoint uint64) bool {
|
|
|
|
return slotsPerArchivePoint > 0 &&
|
|
|
|
slotsPerArchivePoint%params.BeaconConfig().SlotsPerEpoch == 0
|
|
|
|
}
|