mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-25 13:07:17 +00:00
762 lines
17 KiB
Go
762 lines
17 KiB
Go
package snapshotsync
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/binary"
|
|
"errors"
|
|
"math"
|
|
"os"
|
|
"path"
|
|
"runtime"
|
|
"sync"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ledgerwatch/erigon/ethdb/kv"
|
|
"github.com/ledgerwatch/erigon/log"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/ledgerwatch/erigon/common"
|
|
"github.com/ledgerwatch/erigon/common/dbutils"
|
|
"github.com/ledgerwatch/erigon/ethdb"
|
|
)
|
|
|
|
//Testcase plan
|
|
//Step 1. Generate headers from 0 to 11.
|
|
//Step 2. Run in a separate goroutine snapshot with epoch 10 blocks.
|
|
//Step 3. Wait until the snapshot builder passes the first cycle. It must generate a new snapshot and remove duplicate data from
|
|
//the main database. After it we must check that headers from 0 to 10 is in snapshot and headers 11 is in the main DB.
|
|
//Stap 4. Begin new Ro tx and generate data from 11 to 20. Snapshot migration must be blocked by Ro tx on the replace snapshot stage.
|
|
//After 3 seconds, we rollback Ro tx, and migration must continue without any errors.
|
|
// Step 5. We need to check that the new snapshot contains headers from 0 to 20, the headers bucket in the main database is empty,
|
|
// it started seeding a new snapshot and removed the old one.
|
|
func TestSnapshotMigratorStage(t *testing.T) {
|
|
if runtime.GOOS == "windows" {
|
|
t.Skip("fix me on win please") // after remove ChainReader from consensus engine - this test can be changed to create less databases, then can enable on win. now timeout after 20min
|
|
}
|
|
|
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
var err error
|
|
dir := t.TempDir()
|
|
|
|
defer func() {
|
|
if err != nil {
|
|
t.Log(err, dir)
|
|
}
|
|
err = os.RemoveAll(dir)
|
|
if err != nil {
|
|
t.Log(err)
|
|
}
|
|
|
|
}()
|
|
snapshotsDir := path.Join(dir, "snapshots")
|
|
err = os.Mkdir(snapshotsDir, os.ModePerm)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
btCli, err := New(snapshotsDir, true, "12345123451234512345")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
btCli.trackers = [][]string{}
|
|
|
|
db := kv.NewSnapshotKV().DB(kv.MustOpenKV(path.Join(dir, "chaindata"))).Open()
|
|
quit := make(chan struct{})
|
|
defer func() {
|
|
close(quit)
|
|
}()
|
|
|
|
sb := &SnapshotMigrator{
|
|
snapshotsDir: snapshotsDir,
|
|
replaceChan: make(chan struct{}),
|
|
}
|
|
currentSnapshotBlock := uint64(10)
|
|
tx, err := db.BeginRw(context.Background())
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
defer tx.Rollback()
|
|
err = GenerateHeaderData(tx, 0, 11)
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
|
|
err = tx.Commit()
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
generateChan := make(chan int)
|
|
StageSyncStep := func() {
|
|
tx, err := db.BeginRw(context.Background())
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
defer tx.Rollback()
|
|
|
|
select {
|
|
case newHeight := <-generateChan:
|
|
err = GenerateHeaderData(tx, int(currentSnapshotBlock), newHeight)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
currentSnapshotBlock = CalculateEpoch(uint64(newHeight), 10)
|
|
default:
|
|
|
|
}
|
|
|
|
err = sb.AsyncStages(currentSnapshotBlock, db, tx, btCli, true)
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
|
|
err = sb.SyncStages(currentSnapshotBlock, db, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
|
|
err = sb.Final(tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
|
|
err = tx.Commit()
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
time.Sleep(time.Second)
|
|
}
|
|
wg := sync.WaitGroup{}
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
so := sync.Once{}
|
|
//this gorutine emulates staged sync.
|
|
for {
|
|
select {
|
|
case <-quit:
|
|
db.Close()
|
|
return
|
|
default:
|
|
StageSyncStep()
|
|
//mark that migration started
|
|
so.Do(func() {
|
|
wg.Done()
|
|
})
|
|
}
|
|
}
|
|
}()
|
|
//wait until migration start
|
|
wg.Wait()
|
|
for atomic.LoadUint64(&sb.started) > 0 {
|
|
time.Sleep(time.Millisecond * 100)
|
|
}
|
|
|
|
//note. We need here only main database.
|
|
rotx, err := db.WriteDB().BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
defer rotx.Rollback()
|
|
roc, err := rotx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
var headerNumber uint64
|
|
headerNumber = 11
|
|
|
|
err = ethdb.Walk(roc, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
require.Equal(t, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)}), k)
|
|
headerNumber++
|
|
return true, nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if headerNumber != 12 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
rotx.Rollback()
|
|
|
|
snokv := db.SnapshotKV(dbutils.HeadersBucket).(ethdb.RwKV)
|
|
snRoTx, err := snokv.BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
headersCursor, err := snRoTx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
headerNumber = 0
|
|
err = ethdb.Walk(headersCursor, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
|
|
return true, nil
|
|
})
|
|
snRoTx.Rollback()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if headerNumber != 11 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
|
|
headerNumber = 0
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
headersC, err := tx.Cursor(dbutils.HeadersBucket)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer headersC.Close()
|
|
|
|
return ethdb.ForEach(headersC, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
return true, nil
|
|
})
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if headerNumber != 12 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
|
|
trnts := btCli.Torrents()
|
|
if len(trnts) != 1 {
|
|
t.Fatal("incorrect len", trnts)
|
|
}
|
|
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !bytes.Equal(v, trnts[0].Bytes()) {
|
|
t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes()))
|
|
}
|
|
|
|
v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if binary.BigEndian.Uint64(v) != 10 {
|
|
t.Fatal("incorrect snapshot")
|
|
}
|
|
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
roTX, err := db.BeginRo(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
//just start snapshot transaction
|
|
// it can't be empty slice but shouldn't be in main db
|
|
_, err = roTX.GetOne(dbutils.HeadersBucket, []byte{112, 3})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer roTX.Rollback()
|
|
|
|
generateChan <- 20
|
|
|
|
rollbacked := false
|
|
//3s - just to be sure that it blocks here
|
|
c := time.After(time.Second * 5)
|
|
for atomic.LoadUint64(&sb.started) > 0 || atomic.LoadUint64(&sb.HeadersCurrentSnapshot) != 20 {
|
|
select {
|
|
case <-c:
|
|
roTX.Rollback()
|
|
rollbacked = true
|
|
default:
|
|
}
|
|
time.Sleep(time.Millisecond * 100)
|
|
}
|
|
if !rollbacked {
|
|
t.Log("it's not possible to close db without rollback. something went wrong")
|
|
}
|
|
|
|
rotx, err = db.WriteDB().BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
defer rotx.Rollback()
|
|
roc, err = rotx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
|
|
err = ethdb.Walk(roc, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
t.Fatal("main db must be empty here", k)
|
|
return true, nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
headerNumber = 0
|
|
snRoTx, err = db.SnapshotKV(dbutils.HeadersBucket).(ethdb.RwKV).BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
headersCursor, err = snRoTx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
err = ethdb.Walk(headersCursor, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
|
|
return true, nil
|
|
})
|
|
snRoTx.Rollback()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if headerNumber != 21 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
headerNumber = 0
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
c, err := tx.Cursor(dbutils.HeadersBucket)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer c.Close()
|
|
return ethdb.ForEach(c, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
|
|
return true, nil
|
|
})
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if headerNumber != 21 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
|
|
trnts = btCli.Torrents()
|
|
if len(trnts) != 1 {
|
|
t.Fatal("incorrect len", trnts)
|
|
}
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !bytes.Equal(v, trnts[0].Bytes()) {
|
|
t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes()))
|
|
}
|
|
|
|
v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if binary.BigEndian.Uint64(v) != 20 {
|
|
t.Fatal("incorrect snapshot")
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err = os.Stat(SnapshotName(snapshotsDir, "headers", 10)); os.IsExist(err) {
|
|
t.Fatal("snapshot exsists")
|
|
} else {
|
|
//just not to confuse defer
|
|
err = nil
|
|
}
|
|
}
|
|
|
|
func TestSnapshotMigratorStageSyncMode(t *testing.T) {
|
|
if runtime.GOOS == "windows" {
|
|
t.Skip("fix me on win please") // after remove ChainReader from consensus engine - this test can be changed to create less databases, then can enable on win. now timeout after 20min
|
|
}
|
|
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
var err error
|
|
dir := t.TempDir()
|
|
|
|
defer func() {
|
|
if err != nil {
|
|
t.Log(err, dir)
|
|
}
|
|
err = os.RemoveAll(dir)
|
|
if err != nil {
|
|
t.Log(err)
|
|
}
|
|
|
|
}()
|
|
snapshotsDir := path.Join(dir, "snapshots")
|
|
err = os.Mkdir(snapshotsDir, os.ModePerm)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
btCli, err := New(snapshotsDir, true, "12345123451234512345")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
btCli.trackers = [][]string{}
|
|
|
|
db := kv.NewSnapshotKV().DB(kv.MustOpenKV(path.Join(dir, "chaindata"))).Open()
|
|
quit := make(chan struct{})
|
|
defer func() {
|
|
close(quit)
|
|
}()
|
|
|
|
sb := &SnapshotMigrator{
|
|
snapshotsDir: snapshotsDir,
|
|
replaceChan: make(chan struct{}),
|
|
}
|
|
|
|
tx, err := db.BeginRw(context.Background())
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
defer tx.Rollback()
|
|
err = GenerateHeaderData(tx, 0, 11)
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
|
|
err = tx.Commit()
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
|
|
writeStep := func(currentSnapshotBlock uint64) {
|
|
writeTX, writeErr := db.BeginRw(context.Background())
|
|
if writeErr != nil {
|
|
t.Fatal(writeErr)
|
|
}
|
|
defer writeTX.Rollback()
|
|
writeErr = sb.SyncStages(currentSnapshotBlock, db, writeTX)
|
|
if writeErr != nil {
|
|
t.Fatal(writeErr)
|
|
}
|
|
|
|
writeErr = writeTX.Commit()
|
|
if writeErr != nil {
|
|
t.Fatal(writeErr)
|
|
}
|
|
}
|
|
|
|
StageSyncStep := func(currentSnapshotBlock uint64) {
|
|
t.Helper()
|
|
rotx, err := db.BeginRo(context.Background())
|
|
if err != nil {
|
|
t.Error(err)
|
|
panic(err)
|
|
}
|
|
defer rotx.Rollback()
|
|
|
|
err = sb.AsyncStages(currentSnapshotBlock, db, rotx, btCli, false)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
rotx.Rollback()
|
|
}
|
|
StageSyncStep(10)
|
|
for !sb.Replaced() {
|
|
//wait until all txs of old snapshot closed
|
|
}
|
|
writeStep(10)
|
|
|
|
for atomic.LoadUint64(&sb.started) > 0 {
|
|
roTx, err := db.BeginRo(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
err = sb.Final(roTx)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
roTx.Rollback()
|
|
time.Sleep(time.Millisecond * 100)
|
|
}
|
|
|
|
//note. We need here only main database.
|
|
rotx, err := db.WriteDB().BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
defer rotx.Rollback()
|
|
roc, err := rotx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
var headerNumber uint64
|
|
headerNumber = 11
|
|
|
|
err = ethdb.Walk(roc, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
require.Equal(t, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)}), k)
|
|
headerNumber++
|
|
return true, nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if headerNumber != 12 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
rotx.Rollback()
|
|
|
|
snokv := db.SnapshotKV(dbutils.HeadersBucket).(ethdb.RwKV)
|
|
snRoTx, err := snokv.BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
headersCursor, err := snRoTx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
headerNumber = 0
|
|
err = ethdb.Walk(headersCursor, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
|
|
return true, nil
|
|
})
|
|
snRoTx.Rollback()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if headerNumber != 11 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
|
|
headerNumber = 0
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
headersC, err := tx.Cursor(dbutils.HeadersBucket)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer headersC.Close()
|
|
|
|
return ethdb.ForEach(headersC, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
return true, nil
|
|
})
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if headerNumber != 12 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
|
|
trnts := btCli.Torrents()
|
|
if len(trnts) != 1 {
|
|
t.Fatal("incorrect len", trnts)
|
|
}
|
|
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !bytes.Equal(v, trnts[0].Bytes()) {
|
|
t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes()))
|
|
}
|
|
|
|
v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if binary.BigEndian.Uint64(v) != 10 {
|
|
t.Fatal("incorrect snapshot")
|
|
}
|
|
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
tx, err = db.BeginRw(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer tx.Rollback()
|
|
err = GenerateHeaderData(tx, 12, 20)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
err = tx.Commit()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
|
wg.Add(1)
|
|
rollbacked := false
|
|
go func() {
|
|
c := time.After(time.Second * 3)
|
|
|
|
roTX, err := db.BeginRo(context.Background())
|
|
if err != nil {
|
|
wg.Done()
|
|
|
|
t.Error(err)
|
|
}
|
|
//just start snapshot transaction
|
|
// it can't be empty slice but shouldn't be in main db
|
|
_, err = roTX.GetOne(dbutils.HeadersBucket, []byte{1})
|
|
if err != nil {
|
|
wg.Done()
|
|
t.Error(err)
|
|
}
|
|
wg.Done()
|
|
<-c
|
|
rollbacked = true
|
|
roTX.Rollback()
|
|
}()
|
|
//wait until read tx start
|
|
wg.Wait()
|
|
|
|
StageSyncStep(20)
|
|
for !sb.Replaced() {
|
|
//wait until all txs of old snapshot closed
|
|
}
|
|
writeStep(20)
|
|
|
|
for atomic.LoadUint64(&sb.started) > 0 {
|
|
roTx, err := db.BeginRo(context.Background())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
err = sb.Final(roTx)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
roTx.Rollback()
|
|
time.Sleep(time.Millisecond * 1000)
|
|
}
|
|
|
|
if !rollbacked {
|
|
t.Log("it's not possible to close db without rollback. something went wrong")
|
|
}
|
|
|
|
rotx, err = db.WriteDB().BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
defer rotx.Rollback()
|
|
roc, err = rotx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
|
|
err = ethdb.Walk(roc, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
t.Fatal("main db must be empty here", k)
|
|
return true, nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
rotx.Rollback()
|
|
headerNumber = 0
|
|
snRoTx, err = db.SnapshotKV(dbutils.HeadersBucket).(ethdb.RwKV).BeginRo(context.Background())
|
|
require.NoError(t, err)
|
|
headersCursor, err = snRoTx.Cursor(dbutils.HeadersBucket)
|
|
require.NoError(t, err)
|
|
err = ethdb.Walk(headersCursor, []byte{}, 0, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
|
|
return true, nil
|
|
})
|
|
snRoTx.Rollback()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if headerNumber != 21 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
headerNumber = 0
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
c, err := tx.Cursor(dbutils.HeadersBucket)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer c.Close()
|
|
return ethdb.ForEach(c, func(k, v []byte) (bool, error) {
|
|
if !bytes.Equal(k, dbutils.HeaderKey(headerNumber, common.Hash{uint8(headerNumber)})) {
|
|
t.Fatal(k)
|
|
}
|
|
headerNumber++
|
|
|
|
return true, nil
|
|
})
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if headerNumber != 21 {
|
|
t.Fatal(headerNumber)
|
|
}
|
|
|
|
trnts = btCli.Torrents()
|
|
if len(trnts) != 1 {
|
|
t.Fatal("incorrect len", trnts)
|
|
}
|
|
err = db.View(context.Background(), func(tx ethdb.Tx) error {
|
|
v, err := tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotHash)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !bytes.Equal(v, trnts[0].Bytes()) {
|
|
t.Fatal("incorrect bytes", common.Bytes2Hex(v), common.Bytes2Hex(trnts[0].Bytes()))
|
|
}
|
|
|
|
v, err = tx.GetOne(dbutils.BittorrentInfoBucket, dbutils.CurrentHeadersSnapshotBlock)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if binary.BigEndian.Uint64(v) != 20 {
|
|
t.Fatal("incorrect snapshot")
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err = os.Stat(SnapshotName(snapshotsDir, "headers", 10)); os.IsExist(err) {
|
|
t.Fatal("snapshot exsists")
|
|
} else {
|
|
//just not to confuse defer
|
|
err = nil
|
|
}
|
|
|
|
}
|
|
|
|
func GenerateHeaderData(tx ethdb.RwTx, from, to int) error {
|
|
var err error
|
|
if to > math.MaxInt8 {
|
|
return errors.New("greater than uint8")
|
|
}
|
|
for i := from; i <= to; i++ {
|
|
err = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(uint64(i), common.Hash{uint8(i)}), []byte{uint8(i), uint8(i), uint8(i)})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = tx.Put(dbutils.HeaderCanonicalBucket, dbutils.EncodeBlockNumber(uint64(i)), common.Hash{uint8(i)}.Bytes())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|