core/state/snapshot: fix journal recovery from generating old journal (#21775)

* core/state/snapshot: print warning if failed to resolve journal

* core/state/snapshot: fix snapshot recovery

When we meet the snapshot journal consisted with:
- disk layer generator with new-format
- diff layer journal with old-format

The base layer should be returned without error.
The broken diff layer can be reconstructed later
but we definitely don't want to reconstruct the
huge diff layer.

* core: add tests
# Conflicts:
#	core/state/snapshot/journal.go
This commit is contained in:
gary rong 2020-11-04 19:41:46 +08:00 committed by Igor Mandrigin
parent 4f2558be98
commit d474b21f4b

View File

@ -45,6 +45,7 @@ import (
type snapshotTest struct {
legacy bool // Flag whether the loaded snapshot is in legacy format
crash bool // Flag whether the Geth restarts from the previous crash
restartCrash int // Number of blocks to insert after the normal stop, then the crash happens
gapped int // Number of blocks to insert without enabling snapshot
setHead uint64 // Block number to set head back to
@ -565,6 +566,46 @@ func TestSetHeadWithLegacySnapshot(t *testing.T) {
})
}
// Tests the Geth was running with snapshot(legacy-format) enabled and upgrades
// the disk layer journal(journal generator) to latest format. After that the Geth
// is restarted from a crash. In this case Geth will find the new-format disk layer
// journal but with legacy-format diff journal(the new-format is never committed),
// and the invalid diff journal is expected to be dropped.
func TestRecoverSnapshotFromCrashWithLegacyDiffJournal(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G
// Snapshot: G
//
// SetHead(0)
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
//
// Expected head header : C10
// Expected head fast block: C10
// Expected head block : C8
// Expected snapshot disk : C10
testSnapshot(t, &snapshotTest{
legacy: true,
crash: false,
restartCrash: 2,
gapped: 0,
setHead: 0,
chainBlocks: 8,
snapshotBlock: 0,
commitBlock: 0,
expCanonicalBlocks: 10,
expHeadHeader: 10,
expHeadFastBlock: 10,
expHeadBlock: 8, // The persisted state in the first running
expSnapshotBottom: 10, // The persisted disk layer in the second running
})
}
func testSnapshot(t *testing.T, tt *snapshotTest) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
@ -694,6 +735,30 @@ func testSnapshot(t *testing.T, tt *snapshotTest) {
chain.SetHead(tt.setHead)
chain.Stop()
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer chain.Stop()
} else if tt.restartCrash != 0 {
// Firstly, stop the chain properly, with all snapshot journal
// and state committed.
chain.Stop()
// Restart chain, forcibly flush the disk layer journal with new format
newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.restartCrash, func(i int, b *BlockGen) {})
chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
chain.InsertChain(newBlocks)
chain.Snapshot().Cap(newBlocks[len(newBlocks)-1].Root(), 0)
// Simulate the blockchain crash
// Don't call chain.Stop here, so that no snapshot
// journal and latest state will be committed
// Restart the chain after the crash
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)