mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2025-01-03 09:37:38 +00:00
329d18ef6f
Reason: - produce and seed snapshots earlier on chain tip. reduce depnedency on "good peers with history" at p2p-network. Some networks have no much archive peers, also ConsensusLayer clients are not-good(not-incentivised) at serving history. - avoiding having too much files: more files(shards) - means "more metadata", "more lookups for non-indexed queries", "more dictionaries", "more bittorrent connections", ... less files - means small files will be removed after merge (no peers for this files). ToDo: [x] Recent 500K - merge up to 100K [x] Older than 500K - merge up to 500K [x] Start seeding 100k files [x] Stop seeding 100k files after merge (right before delete) In next PR: [] Old version of Erigon must be able download recent hashes. To achieve it - at first start erigon will download preverified hashes .toml from s3 - if it's newer that what we have (build-in) - use it.
298 lines
9.1 KiB
Go
298 lines
9.1 KiB
Go
package freezeblocks
|
|
|
|
import (
|
|
"context"
|
|
"path/filepath"
|
|
"testing"
|
|
"testing/fstest"
|
|
|
|
"github.com/ledgerwatch/erigon-lib/chain/networkname"
|
|
"github.com/ledgerwatch/erigon-lib/chain/snapcfg"
|
|
"github.com/ledgerwatch/erigon-lib/compress"
|
|
"github.com/ledgerwatch/erigon-lib/downloader/snaptype"
|
|
"github.com/ledgerwatch/erigon-lib/recsplit"
|
|
"github.com/ledgerwatch/log/v3"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/ledgerwatch/erigon/common/math"
|
|
"github.com/ledgerwatch/erigon/eth/ethconfig"
|
|
"github.com/ledgerwatch/erigon/params"
|
|
)
|
|
|
|
func createTestSegmentFile(t *testing.T, from, to uint64, name snaptype.Type, dir string, logger log.Logger) {
|
|
c, err := compress.NewCompressor(context.Background(), "test", filepath.Join(dir, snaptype.SegmentFileName(from, to, name)), dir, 100, 1, log.LvlDebug, logger)
|
|
require.NoError(t, err)
|
|
defer c.Close()
|
|
err = c.AddWord([]byte{1})
|
|
require.NoError(t, err)
|
|
err = c.Compress()
|
|
require.NoError(t, err)
|
|
idx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{
|
|
KeyCount: 1,
|
|
BucketSize: 10,
|
|
TmpDir: dir,
|
|
IndexFile: filepath.Join(dir, snaptype.IdxFileName(from, to, name.String())),
|
|
LeafSize: 8,
|
|
}, logger)
|
|
require.NoError(t, err)
|
|
defer idx.Close()
|
|
err = idx.AddKey([]byte{1}, 0)
|
|
require.NoError(t, err)
|
|
err = idx.Build(context.Background())
|
|
require.NoError(t, err)
|
|
if name == snaptype.Transactions {
|
|
idx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{
|
|
KeyCount: 1,
|
|
BucketSize: 10,
|
|
TmpDir: dir,
|
|
IndexFile: filepath.Join(dir, snaptype.IdxFileName(from, to, snaptype.Transactions2Block.String())),
|
|
LeafSize: 8,
|
|
}, logger)
|
|
require.NoError(t, err)
|
|
err = idx.AddKey([]byte{1}, 0)
|
|
require.NoError(t, err)
|
|
err = idx.Build(context.Background())
|
|
require.NoError(t, err)
|
|
defer idx.Close()
|
|
}
|
|
}
|
|
|
|
func TestFindMergeRange(t *testing.T) {
|
|
merger := NewMerger("x", 1, log.LvlInfo, nil, params.MainnetChainConfig, nil)
|
|
t.Run("big", func(t *testing.T) {
|
|
var ranges []Range
|
|
for i := 0; i < 24; i++ {
|
|
ranges = append(ranges, Range{from: uint64(i * 100_000), to: uint64((i + 1) * 100_000)})
|
|
}
|
|
found := merger.FindMergeRanges(ranges, uint64(24*100_000))
|
|
|
|
expect := []Range{
|
|
{0, 500_000},
|
|
{500_000, 1_000_000},
|
|
{1_000_000, 1_500_000},
|
|
}
|
|
require.Equal(t, Ranges(expect).String(), Ranges(found).String())
|
|
})
|
|
|
|
t.Run("small", func(t *testing.T) {
|
|
var ranges Ranges
|
|
for i := 0; i < 240; i++ {
|
|
ranges = append(ranges, Range{from: uint64(i * 10_000), to: uint64((i + 1) * 10_000)})
|
|
}
|
|
found := merger.FindMergeRanges(ranges, uint64(240*10_000))
|
|
|
|
expect := Ranges{
|
|
{0, 500_000},
|
|
{500_000, 1_000_000},
|
|
{1_000_000, 1_500_000},
|
|
{1_500_000, 1_600_000},
|
|
{1_600_000, 1_700_000},
|
|
{1_700_000, 1_800_000},
|
|
{1_800_000, 1_900_000},
|
|
{1_900_000, 2_000_000},
|
|
{2_000_000, 2_100_000},
|
|
{2_100_000, 2_200_000},
|
|
{2_200_000, 2_300_000},
|
|
{2_300_000, 2_400_000},
|
|
}
|
|
|
|
require.Equal(t, expect.String(), Ranges(found).String())
|
|
})
|
|
|
|
t.Run("IsRecent", func(t *testing.T) {
|
|
require.True(t, Range{500_000, 599_000}.IsRecent(1_000_000))
|
|
require.True(t, Range{500_000, 501_000}.IsRecent(1_000_000))
|
|
require.False(t, Range{499_000, 500_000}.IsRecent(1_000_000))
|
|
require.False(t, Range{400_000, 500_000}.IsRecent(1_000_000))
|
|
require.False(t, Range{400_000, 401_000}.IsRecent(1_000_000))
|
|
|
|
require.False(t, Range{500_000, 501_000}.IsRecent(1_100_000))
|
|
})
|
|
|
|
}
|
|
|
|
func TestMergeSnapshots(t *testing.T) {
|
|
logger := log.New()
|
|
dir, require := t.TempDir(), require.New(t)
|
|
createFile := func(from, to uint64) {
|
|
for _, snT := range snaptype.BlockSnapshotTypes {
|
|
createTestSegmentFile(t, from, to, snT, dir, logger)
|
|
}
|
|
}
|
|
|
|
N := uint64(17)
|
|
createFile(0, snaptype.Erigon2MergeLimit)
|
|
for i := uint64(snaptype.Erigon2MergeLimit); i < snaptype.Erigon2MergeLimit+N*100_000; i += 100_000 {
|
|
createFile(i, i+100_000)
|
|
}
|
|
s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, logger)
|
|
defer s.Close()
|
|
require.NoError(s.ReopenFolder())
|
|
{
|
|
merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger)
|
|
ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax())
|
|
require.True(len(ranges) > 0)
|
|
err := merger.Merge(context.Background(), s, ranges, s.Dir(), false, func(r Range) error {
|
|
return nil
|
|
}, func(l []string) error {
|
|
return nil
|
|
})
|
|
require.NoError(err)
|
|
}
|
|
|
|
expectedFileName := snaptype.SegmentFileName(500_000, 1_000_000, snaptype.Transactions)
|
|
d, err := compress.NewDecompressor(filepath.Join(dir, expectedFileName))
|
|
require.NoError(err)
|
|
defer d.Close()
|
|
a := d.Count()
|
|
require.Equal(5, a)
|
|
|
|
{
|
|
merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger)
|
|
ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax())
|
|
require.True(len(ranges) == 0)
|
|
err := merger.Merge(context.Background(), s, ranges, s.Dir(), false, func(r Range) error {
|
|
return nil
|
|
}, func(l []string) error {
|
|
return nil
|
|
})
|
|
require.NoError(err)
|
|
}
|
|
|
|
expectedFileName = snaptype.SegmentFileName(1_800_000, 1_900_000, snaptype.Transactions)
|
|
d, err = compress.NewDecompressor(filepath.Join(dir, expectedFileName))
|
|
require.NoError(err)
|
|
defer d.Close()
|
|
a = d.Count()
|
|
require.Equal(1, a)
|
|
}
|
|
|
|
func TestCanRetire(t *testing.T) {
|
|
require := require.New(t)
|
|
cases := []struct {
|
|
inFrom, inTo, outFrom, outTo uint64
|
|
can bool
|
|
}{
|
|
{0, 1234, 0, 1000, true},
|
|
{1_000_000, 1_120_000, 1_000_000, 1_100_000, true},
|
|
{2_500_000, 4_100_000, 2_500_000, 3_000_000, true},
|
|
{2_500_000, 2_500_100, 2_500_000, 2_500_000, false},
|
|
{1_001_000, 2_000_000, 1_001_000, 1_002_000, true},
|
|
}
|
|
for _, tc := range cases {
|
|
from, to, can := canRetire(tc.inFrom, tc.inTo)
|
|
require.Equal(int(tc.outFrom), int(from))
|
|
require.Equal(int(tc.outTo), int(to))
|
|
require.Equal(tc.can, can, tc.inFrom, tc.inTo)
|
|
}
|
|
}
|
|
func TestOpenAllSnapshot(t *testing.T) {
|
|
logger := log.New()
|
|
dir, require := t.TempDir(), require.New(t)
|
|
chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName, nil, nil)
|
|
chainSnapshotCfg.ExpectBlocks = math.MaxUint64
|
|
cfg := ethconfig.BlocksFreezing{Enabled: true}
|
|
createFile := func(from, to uint64, name snaptype.Type) { createTestSegmentFile(t, from, to, name, dir, logger) }
|
|
s := NewRoSnapshots(cfg, dir, logger)
|
|
defer s.Close()
|
|
err := s.ReopenFolder()
|
|
require.NoError(err)
|
|
require.Equal(0, len(s.Headers.segments))
|
|
s.Close()
|
|
|
|
createFile(500_000, 1_000_000, snaptype.Bodies)
|
|
s = NewRoSnapshots(cfg, dir, logger)
|
|
defer s.Close()
|
|
require.Equal(0, len(s.Bodies.segments)) //because, no headers and transactions snapshot files are created
|
|
s.Close()
|
|
|
|
createFile(500_000, 1_000_000, snaptype.Headers)
|
|
createFile(500_000, 1_000_000, snaptype.Transactions)
|
|
s = NewRoSnapshots(cfg, dir, logger)
|
|
err = s.ReopenFolder()
|
|
require.NoError(err)
|
|
require.Equal(0, len(s.Headers.segments))
|
|
s.Close()
|
|
|
|
createFile(0, 500_000, snaptype.Bodies)
|
|
createFile(0, 500_000, snaptype.Headers)
|
|
createFile(0, 500_000, snaptype.Transactions)
|
|
s = NewRoSnapshots(cfg, dir, logger)
|
|
defer s.Close()
|
|
|
|
err = s.ReopenFolder()
|
|
require.NoError(err)
|
|
require.Equal(2, len(s.Headers.segments))
|
|
|
|
view := s.View()
|
|
defer view.Close()
|
|
|
|
seg, ok := view.TxsSegment(10)
|
|
require.True(ok)
|
|
require.Equal(int(seg.ranges.to), 500_000)
|
|
|
|
seg, ok = view.TxsSegment(500_000)
|
|
require.True(ok)
|
|
require.Equal(int(seg.ranges.to), 1_000_000)
|
|
|
|
_, ok = view.TxsSegment(1_000_000)
|
|
require.False(ok)
|
|
|
|
// Erigon may create new snapshots by itself - with high bigger than hardcoded ExpectedBlocks
|
|
// ExpectedBlocks - says only how much block must come from Torrent
|
|
chainSnapshotCfg.ExpectBlocks = 500_000 - 1
|
|
s = NewRoSnapshots(cfg, dir, logger)
|
|
err = s.ReopenFolder()
|
|
require.NoError(err)
|
|
defer s.Close()
|
|
require.Equal(2, len(s.Headers.segments))
|
|
|
|
createFile(500_000, 900_000, snaptype.Headers)
|
|
createFile(500_000, 900_000, snaptype.Bodies)
|
|
createFile(500_000, 900_000, snaptype.Transactions)
|
|
chainSnapshotCfg.ExpectBlocks = math.MaxUint64
|
|
s = NewRoSnapshots(cfg, dir, logger)
|
|
defer s.Close()
|
|
err = s.ReopenFolder()
|
|
require.NoError(err)
|
|
}
|
|
|
|
func TestParseCompressedFileName(t *testing.T) {
|
|
require := require.New(t)
|
|
fs := fstest.MapFS{
|
|
"a": &fstest.MapFile{},
|
|
"1-a": &fstest.MapFile{},
|
|
"1-2-a": &fstest.MapFile{},
|
|
"1-2-bodies.info": &fstest.MapFile{},
|
|
"1-2-bodies.seg": &fstest.MapFile{},
|
|
"v2-1-2-bodies.seg": &fstest.MapFile{},
|
|
"v0-1-2-bodies.seg": &fstest.MapFile{},
|
|
"v1-1-2-bodies.seg": &fstest.MapFile{},
|
|
}
|
|
stat := func(name string) string {
|
|
s, err := fs.Stat(name)
|
|
require.NoError(err)
|
|
return s.Name()
|
|
}
|
|
_, ok := snaptype.ParseFileName("", stat("a"))
|
|
require.False(ok)
|
|
_, ok = snaptype.ParseFileName("", stat("1-a"))
|
|
require.False(ok)
|
|
_, ok = snaptype.ParseFileName("", stat("1-2-a"))
|
|
require.False(ok)
|
|
_, ok = snaptype.ParseFileName("", stat("1-2-bodies.info"))
|
|
require.False(ok)
|
|
_, ok = snaptype.ParseFileName("", stat("1-2-bodies.seg"))
|
|
require.False(ok)
|
|
_, ok = snaptype.ParseFileName("", stat("v2-1-2-bodies.seg"))
|
|
require.True(ok)
|
|
_, ok = snaptype.ParseFileName("", stat("v0-1-2-bodies.seg"))
|
|
require.True(ok)
|
|
|
|
f, ok := snaptype.ParseFileName("", stat("v1-1-2-bodies.seg"))
|
|
require.True(ok)
|
|
require.Equal(f.T, snaptype.Bodies)
|
|
require.Equal(1_000, int(f.From))
|
|
require.Equal(2_000, int(f.To))
|
|
}
|