mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2025-01-08 20:11:21 +00:00
d41d523050
"whitelisting" mechanism (list of files - stored in DB) - which protecting us from downloading new files after upgrade/downgrade was broken. And seems it became over-complicated with time. I replacing it by 1 persistent flag inside downloader: "prohibit_new_downloads.lock" Erigon will turn downloader into this mode after downloading/verification of first snapshots. ``` //Corner cases: // - Erigon generated file X with hash H1. User upgraded Erigon. New version has preverified file X with hash H2. Must ignore H2 (don't send to Downloader) // - Erigon "download once": means restart/upgrade/downgrade must not download files (and will be fast) // - After "download once" - Erigon will produce and seed new files ``` ------ `downloader --seedbox` is never "prohibit new downloads"
142 lines
4.0 KiB
Go
142 lines
4.0 KiB
Go
package snapcfg
|
|
|
|
import (
|
|
_ "embed"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/ledgerwatch/erigon-lib/chain/networkname"
|
|
snapshothashes "github.com/ledgerwatch/erigon-snapshot"
|
|
"github.com/ledgerwatch/erigon-snapshot/webseed"
|
|
"github.com/pelletier/go-toml/v2"
|
|
"golang.org/x/exp/slices"
|
|
)
|
|
|
|
var (
|
|
Mainnet = fromToml(snapshothashes.Mainnet)
|
|
// Holesky = fromToml(snapshothashes.Holesky)
|
|
Sepolia = fromToml(snapshothashes.Sepolia)
|
|
Goerli = fromToml(snapshothashes.Goerli)
|
|
Mumbai = fromToml(snapshothashes.Mumbai)
|
|
Amoy = fromToml(snapshothashes.Amoy)
|
|
BorMainnet = fromToml(snapshothashes.BorMainnet)
|
|
Gnosis = fromToml(snapshothashes.Gnosis)
|
|
Chiado = fromToml(snapshothashes.Chiado)
|
|
)
|
|
|
|
type PreverifiedItem struct {
|
|
Name string
|
|
Hash string
|
|
}
|
|
type Preverified []PreverifiedItem
|
|
type preverified map[string]string
|
|
|
|
func fromToml(in []byte) (out Preverified) {
|
|
var outMap preverified
|
|
if err := toml.Unmarshal(in, &outMap); err != nil {
|
|
panic(err)
|
|
}
|
|
return doSort(outMap)
|
|
}
|
|
func doSort(in preverified) Preverified {
|
|
out := make(Preverified, 0, len(in))
|
|
for k, v := range in {
|
|
out = append(out, PreverifiedItem{k, v})
|
|
}
|
|
slices.SortFunc(out, func(i, j PreverifiedItem) int { return strings.Compare(i.Name, j.Name) })
|
|
return out
|
|
}
|
|
|
|
var (
|
|
MainnetChainSnapshotCfg = newCfg(Mainnet)
|
|
// HoleskyChainSnapshotCfg = newCfg(Holesky, HoleskyHistory)
|
|
SepoliaChainSnapshotCfg = newCfg(Sepolia)
|
|
GoerliChainSnapshotCfg = newCfg(Goerli)
|
|
MumbaiChainSnapshotCfg = newCfg(Mumbai)
|
|
AmoyChainSnapshotCfg = newCfg(Amoy)
|
|
BorMainnetChainSnapshotCfg = newCfg(BorMainnet)
|
|
GnosisChainSnapshotCfg = newCfg(Gnosis)
|
|
ChiadoChainSnapshotCfg = newCfg(Chiado)
|
|
)
|
|
|
|
func newCfg(preverified Preverified) *Cfg {
|
|
return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified}
|
|
}
|
|
|
|
func maxBlockNum(preverified Preverified) uint64 {
|
|
max := uint64(0)
|
|
for _, p := range preverified {
|
|
_, fileName := filepath.Split(p.Name)
|
|
ext := filepath.Ext(fileName)
|
|
if ext != ".seg" {
|
|
continue
|
|
}
|
|
onlyName := fileName[:len(fileName)-len(ext)]
|
|
parts := strings.Split(onlyName, "-")
|
|
if parts[3] != "headers" {
|
|
continue
|
|
}
|
|
to, err := strconv.ParseUint(parts[2], 10, 64)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
if max < to {
|
|
max = to
|
|
}
|
|
}
|
|
if max == 0 { // to prevent underflow
|
|
return 0
|
|
}
|
|
return max*1_000 - 1
|
|
}
|
|
|
|
type Cfg struct {
|
|
ExpectBlocks uint64
|
|
Preverified Preverified
|
|
}
|
|
|
|
var KnownCfgs = map[string]*Cfg{
|
|
networkname.MainnetChainName: MainnetChainSnapshotCfg,
|
|
// networkname.HoleskyChainName: HoleskyChainSnapshotCfg,
|
|
networkname.SepoliaChainName: SepoliaChainSnapshotCfg,
|
|
networkname.GoerliChainName: GoerliChainSnapshotCfg,
|
|
networkname.MumbaiChainName: MumbaiChainSnapshotCfg,
|
|
networkname.AmoyChainName: AmoyChainSnapshotCfg,
|
|
networkname.BorMainnetChainName: BorMainnetChainSnapshotCfg,
|
|
networkname.GnosisChainName: GnosisChainSnapshotCfg,
|
|
networkname.ChiadoChainName: ChiadoChainSnapshotCfg,
|
|
}
|
|
|
|
// KnownCfg return list of preverified hashes for given network, but apply whiteList filter if it's not empty
|
|
func KnownCfg(networkName string) *Cfg {
|
|
c, ok := KnownCfgs[networkName]
|
|
if !ok {
|
|
return newCfg(Preverified{})
|
|
}
|
|
return newCfg(c.Preverified)
|
|
}
|
|
|
|
var KnownWebseeds = map[string][]string{
|
|
networkname.MainnetChainName: webseedsParse(webseed.Mainnet),
|
|
networkname.SepoliaChainName: webseedsParse(webseed.Sepolia),
|
|
networkname.GoerliChainName: webseedsParse(webseed.Goerli),
|
|
networkname.MumbaiChainName: webseedsParse(webseed.Mumbai),
|
|
networkname.AmoyChainName: webseedsParse(webseed.Amoy),
|
|
networkname.BorMainnetChainName: webseedsParse(webseed.BorMainnet),
|
|
networkname.GnosisChainName: webseedsParse(webseed.Gnosis),
|
|
networkname.ChiadoChainName: webseedsParse(webseed.Chiado),
|
|
}
|
|
|
|
func webseedsParse(in []byte) (res []string) {
|
|
a := map[string]string{}
|
|
if err := toml.Unmarshal(in, &a); err != nil {
|
|
panic(err)
|
|
}
|
|
for _, l := range a {
|
|
res = append(res, l)
|
|
}
|
|
slices.Sort(res)
|
|
return res
|
|
}
|