mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 03:30:37 +00:00
d41d523050
"whitelisting" mechanism (list of files - stored in DB) - which protecting us from downloading new files after upgrade/downgrade was broken. And seems it became over-complicated with time. I replacing it by 1 persistent flag inside downloader: "prohibit_new_downloads.lock" Erigon will turn downloader into this mode after downloading/verification of first snapshots. ``` //Corner cases: // - Erigon generated file X with hash H1. User upgraded Erigon. New version has preverified file X with hash H2. Must ignore H2 (don't send to Downloader) // - Erigon "download once": means restart/upgrade/downgrade must not download files (and will be fast) // - After "download once" - Erigon will produce and seed new files ``` ------ `downloader --seedbox` is never "prohibit new downloads"
78 lines
2.2 KiB
Go
78 lines
2.2 KiB
Go
package silkworm
|
|
|
|
import (
|
|
"errors"
|
|
"math/big"
|
|
|
|
"github.com/erigontech/silkworm-go"
|
|
"github.com/ledgerwatch/erigon-lib/kv"
|
|
"github.com/ledgerwatch/erigon/consensus"
|
|
)
|
|
|
|
type Silkworm = silkworm_go.Silkworm
|
|
type SentrySettings = silkworm_go.SentrySettings
|
|
type MappedHeaderSnapshot = silkworm_go.MappedHeaderSnapshot
|
|
type MappedBodySnapshot = silkworm_go.MappedBodySnapshot
|
|
type MappedTxnSnapshot = silkworm_go.MappedTxnSnapshot
|
|
type MappedChainSnapshot = silkworm_go.MappedChainSnapshot
|
|
|
|
var New = silkworm_go.New
|
|
var NewMemoryMappedRegion = silkworm_go.NewMemoryMappedRegion
|
|
var NewMappedHeaderSnapshot = silkworm_go.NewMappedHeaderSnapshot
|
|
var NewMappedBodySnapshot = silkworm_go.NewMappedBodySnapshot
|
|
var NewMappedTxnSnapshot = silkworm_go.NewMappedTxnSnapshot
|
|
|
|
var ErrInterrupted = silkworm_go.ErrInterrupted
|
|
|
|
type RpcDaemonService struct {
|
|
silkworm *Silkworm
|
|
db kv.RoDB
|
|
}
|
|
|
|
func NewRpcDaemonService(s *Silkworm, db kv.RoDB) RpcDaemonService {
|
|
return RpcDaemonService{
|
|
silkworm: s,
|
|
db: db,
|
|
}
|
|
}
|
|
|
|
func (service RpcDaemonService) Start() error {
|
|
return service.silkworm.StartRpcDaemon(service.db.CHandle())
|
|
}
|
|
|
|
func (service RpcDaemonService) Stop() error {
|
|
return service.silkworm.StopRpcDaemon()
|
|
}
|
|
|
|
type SentryService struct {
|
|
silkworm *silkworm_go.Silkworm
|
|
settings silkworm_go.SentrySettings
|
|
}
|
|
|
|
func NewSentryService(s *Silkworm, settings silkworm_go.SentrySettings) SentryService {
|
|
return SentryService{
|
|
silkworm: s,
|
|
settings: settings,
|
|
}
|
|
}
|
|
|
|
func (service SentryService) Start() error {
|
|
return service.silkworm.SentryStart(service.settings)
|
|
}
|
|
|
|
func (service SentryService) Stop() error {
|
|
return service.silkworm.SentryStop()
|
|
}
|
|
|
|
func ExecuteBlocks(s *Silkworm, txn kv.Tx, chainID *big.Int, startBlock uint64, maxBlock uint64, batchSize uint64, writeChangeSets, writeReceipts, writeCallTraces bool) (uint64, error) {
|
|
lastExecutedBlock, err := s.ExecuteBlocks(txn.CHandle(), chainID, startBlock, maxBlock, batchSize, writeChangeSets, writeReceipts, writeCallTraces)
|
|
if (err != nil) && errors.Is(err, silkworm_go.ErrInvalidBlock) {
|
|
return lastExecutedBlock, consensus.ErrInvalidBlock
|
|
}
|
|
return lastExecutedBlock, err
|
|
}
|
|
|
|
type CanAddSnapshotsToSilkwarm interface {
|
|
AddSnapshotsToSilkworm(*Silkworm) error
|
|
}
|