mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 03:30:37 +00:00
Tevm stage 1 (#1845)
* convert contracts after block execution * check if has tevm code * after review-1 * handle ErrNotFound * typo * tests * tevm code bucket * testdata * execute pre-stage * after merge * test fix * test fix * fix test after merge * disable translation stage * after merge * rename params * rename to Erigon * parallelize EVM translation * fix * logging and fixes * fix * todos * cleanup * revert erigorn renaming * unwind * tevm unwind * fix AppData * non-parallel version * comments
This commit is contained in:
parent
5085a15b28
commit
7352b81122
76
README.md
76
README.md
@ -8,23 +8,23 @@ Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficien
|
||||
<!--ts-->
|
||||
- [System Requirements](#system-requirements)
|
||||
- [Usage](#usage)
|
||||
+ [Getting Started](#getting-started)
|
||||
+ [Testnets](#testnets)
|
||||
+ [Mining](#mining)
|
||||
+ [Windows](#windows)
|
||||
+ [GoDoc](https://godoc.org/github.com/ledgerwatch/erigon)
|
||||
+ [Getting Started](#getting-started)
|
||||
+ [Testnets](#testnets)
|
||||
+ [Mining](#mining)
|
||||
+ [Windows](#windows)
|
||||
+ [GoDoc](https://godoc.org/github.com/ledgerwatch/erigon)
|
||||
- [Key features](#key-features)
|
||||
+ [More Efficient State Storage](#more-efficient-state-storage)
|
||||
+ [Faster Initial Sync](#faster-initial-sync)
|
||||
+ [JSON-RPC daemon](#json-rpc-daemon)
|
||||
+ [Run all components by docker-compose](#run-all-components-by-docker-compose)
|
||||
+ [Grafana dashboard](#grafana-dashboard)
|
||||
+ [More Efficient State Storage](#more-efficient-state-storage)
|
||||
+ [Faster Initial Sync](#faster-initial-sync)
|
||||
+ [JSON-RPC daemon](#json-rpc-daemon)
|
||||
+ [Run all components by docker-compose](#run-all-components-by-docker-compose)
|
||||
+ [Grafana dashboard](#grafana-dashboard)
|
||||
- [Getting in touch](#getting-in-touch)
|
||||
+ [Erigon Discord Server](#erigon-discord-server)
|
||||
+ [Reporting security issues/concerns](#reporting-security-issues-concerns)
|
||||
+ [Team](#team)
|
||||
+ [Erigon Discord Server](#erigon-discord-server)
|
||||
+ [Reporting security issues/concerns](#reporting-security-issues-concerns)
|
||||
+ [Team](#team)
|
||||
- [Known issues](#known-issues)
|
||||
+ [`htop` shows incorrect memory usage](#-htop--shows-incorrect-memory-usage)
|
||||
+ [`htop` shows incorrect memory usage](#-htop--shows-incorrect-memory-usage)
|
||||
<!--te-->
|
||||
|
||||
|
||||
@ -41,7 +41,7 @@ The current version is currently based on Go-Ethereum 1.10.1
|
||||
System Requirements
|
||||
===================
|
||||
|
||||
Recommend 2Tb storage space on a single partition: 1Tb state, 200GB temp files (can symlink or mount folder `<datadir>/etl-tmp` to another disk).
|
||||
Recommend 2Tb storage space on a single partition: 1Tb state, 200GB temp files (can symlink or mount folder `<datadir>/etl-tmp` to another disk).
|
||||
|
||||
RAM: 16GB, 64-bit architecture, [Golang version >= 1.16](https://golang.org/doc/install)
|
||||
|
||||
@ -81,11 +81,11 @@ Support only remote-miners.
|
||||
* RPCDaemon supports methods: eth_coinbase , eth_hashrate, eth_mining, eth_getWork, eth_submitWork, eth_submitHashrate
|
||||
* RPCDaemon supports websocket methods: newPendingTransaction
|
||||
* TODO:
|
||||
+ we don't broadcast mined blocks to p2p-network yet, [but it's easy to accomplish](https://github.com/ledgerwatch/erigon/blob/9b8cdc0f2289a7cef78218a15043de5bdff4465e/eth/downloader/downloader.go#L673)
|
||||
+ eth_newPendingTransactionFilter
|
||||
+ eth_newBlockFilter
|
||||
+ eth_newFilter
|
||||
+ websocket Logs
|
||||
+ we don't broadcast mined blocks to p2p-network yet, [but it's easy to accomplish](https://github.com/ledgerwatch/erigon/blob/9b8cdc0f2289a7cef78218a15043de5bdff4465e/eth/downloader/downloader.go#L673)
|
||||
+ eth_newPendingTransactionFilter
|
||||
+ eth_newBlockFilter
|
||||
+ eth_newFilter
|
||||
+ websocket Logs
|
||||
|
||||
<code> 🔬 Detailed mining explanation is [here](/docs/mining.md).</code>
|
||||
|
||||
@ -94,8 +94,8 @@ Support only remote-miners.
|
||||
Windows users may run erigon in 2 possible ways:
|
||||
|
||||
* Build executable binaries natively for Windows using provided `win-build.ps1` PowerShell script which has to be run with local Administrator privileges.
|
||||
The script creates `libmdbx.dll` (MDBX is current default database for Erigon) and copies it into Windows's `system32` folder (generally `C:\Windows\system32`).
|
||||
Though is still possible to run erigon with LMDB database there's a caveat which might cause your experience with LMDB on Windows uncomfortable: data file allocation is fixed so you need to know in advance how much space you want to allocate for database file using the command line option `--lmdb.mapSize`
|
||||
The script creates `libmdbx.dll` (MDBX is current default database for Erigon) and copies it into Windows's `system32` folder (generally `C:\Windows\system32`).
|
||||
Though is still possible to run erigon with LMDB database there's a caveat which might cause your experience with LMDB on Windows uncomfortable: data file allocation is fixed so you need to know in advance how much space you want to allocate for database file using the command line option `--lmdb.mapSize`
|
||||
|
||||
* Use Docker : see [docker-compose.yml](./docker-compose.yml)
|
||||
|
||||
@ -109,12 +109,12 @@ Key features
|
||||
### More Efficient State Storage
|
||||
|
||||
**Flat KV storage.** Erigon uses a key-value database and storing accounts and storage in
|
||||
a simple way.
|
||||
a simple way.
|
||||
|
||||
<code> 🔬 See our detailed DB walkthrough [here](./docs/programmers_guide/db_walkthrough.MD).</code>
|
||||
|
||||
**Preprocessing**. For some operations, Erigon uses temporary files to preprocess data before
|
||||
inserting it into the main DB. That reduces write amplification and
|
||||
inserting it into the main DB. That reduces write amplification and
|
||||
DB inserts are orders of magnitude quicker.
|
||||
|
||||
<code> 🔬 See our detailed ETL explanation [here](/common/etl/).</code>
|
||||
@ -160,7 +160,7 @@ Examples of stages are:
|
||||
In Erigon RPC calls are extracted out of the main binary into a separate daemon.
|
||||
This daemon can use both local or remote DBs. That means, that this RPC daemon
|
||||
doesn't have to be running on the same machine as the main Erigon binary or
|
||||
it can run from a snapshot of a database for read-only calls.
|
||||
it can run from a snapshot of a database for read-only calls.
|
||||
|
||||
<code>🔬 See [RPC-Daemon docs](./cmd/rpcdaemon/README.md)</code>
|
||||
|
||||
@ -215,8 +215,8 @@ Getting in touch
|
||||
|
||||
### Erigon Discord Server
|
||||
|
||||
The main discussions are happening on our Discord server.
|
||||
To get an invite, send an email to `tg [at] torquem.ch` with your name, occupation,
|
||||
The main discussions are happening on our Discord server.
|
||||
To get an invite, send an email to `tg [at] torquem.ch` with your name, occupation,
|
||||
a brief explanation of why you want to join the Discord, and how you heard about Erigon.
|
||||
|
||||
### Reporting security issues/concerns
|
||||
@ -265,24 +265,24 @@ Known issues
|
||||
Erigon's internal DB (LMDB) using `MemoryMap` - when OS does manage all `read, write, cache` operations instead of Application
|
||||
([linux](https://linux-kernel-labs.github.io/refs/heads/master/labs/memory_mapping.html), [windows](https://docs.microsoft.com/en-us/windows/win32/memory/file-mapping))
|
||||
|
||||
`htop` on column `res` shows memory of "App + OS used to hold page cache for given App",
|
||||
but it's not informative, because if `htop` says that app using 90% of memory you still
|
||||
`htop` on column `res` shows memory of "App + OS used to hold page cache for given App",
|
||||
but it's not informative, because if `htop` says that app using 90% of memory you still
|
||||
can run 3 more instances of app on the same machine - because most of that `90%` is "OS pages cache".
|
||||
OS automatically free this cache any time it needs memory.
|
||||
Smaller "page cache size" may not impact performance of Erigon at all.
|
||||
OS automatically free this cache any time it needs memory.
|
||||
Smaller "page cache size" may not impact performance of Erigon at all.
|
||||
|
||||
Next tools show correct memory usage of Erigon:
|
||||
- `vmmap -summary PID | grep -i "Physical footprint"`.
|
||||
Without `grep` you can see details - `section MALLOC ZONE column Resident Size` shows App memory usage, `section REGION TYPE column Resident Size` shows OS pages cache size.
|
||||
Next tools show correct memory usage of Erigon:
|
||||
- `vmmap -summary PID | grep -i "Physical footprint"`.
|
||||
Without `grep` you can see details - `section MALLOC ZONE column Resident Size` shows App memory usage, `section REGION TYPE column Resident Size` shows OS pages cache size.
|
||||
- `Prometheus` dashboard shows memory of Go app without OS pages cache (`make prometheus`, open in browser `localhost:3000`, credentials `admin/admin`)
|
||||
- `cat /proc/<PID>/smaps`
|
||||
|
||||
Erigon uses ~4Gb of RAM during genesis sync and < 1Gb during normal work. OS pages cache can utilize unlimited amount of memory.
|
||||
Erigon uses ~4Gb of RAM during genesis sync and < 1Gb during normal work. OS pages cache can utilize unlimited amount of memory.
|
||||
|
||||
**Warning:** Multiple instances of Erigon on same machine will touch Disk concurrently,
|
||||
it impacts performance - one of main Erigon optimisations: "reduce Disk random access".
|
||||
**Warning:** Multiple instances of Erigon on same machine will touch Disk concurrently,
|
||||
it impacts performance - one of main Erigon optimisations: "reduce Disk random access".
|
||||
"Blocks Execution stage" still does much random reads - this is reason why it's slowest stage.
|
||||
We do not recommend run multiple genesis syncs on same Disk.
|
||||
We do not recommend run multiple genesis syncs on same Disk.
|
||||
If genesis sync passed, then it's fine to run multiple Erigon on same Disk.
|
||||
|
||||
### Blocks Execution is slow on cloud-network-drives
|
||||
|
@ -69,6 +69,7 @@ type SimulatedBackend struct {
|
||||
database *ethdb.ObjectDatabase // In memory database to store our testing data
|
||||
engine consensus.Engine
|
||||
getHeader func(hash common.Hash, number uint64) *types.Header
|
||||
checkTEVM func(hash common.Hash) (bool, error)
|
||||
|
||||
mu sync.Mutex
|
||||
prependBlock *types.Block
|
||||
@ -102,7 +103,8 @@ func NewSimulatedBackendWithDatabase(database *ethdb.ObjectDatabase, alloc core.
|
||||
getHeader: func(hash common.Hash, number uint64) *types.Header {
|
||||
return rawdb.ReadHeader(database, hash, number)
|
||||
},
|
||||
config: genesis.Config,
|
||||
checkTEVM: ethdb.GetCheckTEVM(database),
|
||||
config: genesis.Config,
|
||||
}
|
||||
backend.events = filters.NewEventSystem(&filterBackend{database, backend})
|
||||
backend.emptyPendingBlock()
|
||||
@ -125,6 +127,7 @@ func NewSimulatedBackendWithConfig(alloc core.GenesisAlloc, config *params.Chain
|
||||
getHeader: func(hash common.Hash, number uint64) *types.Header {
|
||||
return rawdb.ReadHeader(database, hash, number)
|
||||
},
|
||||
checkTEVM: ethdb.GetCheckTEVM(database),
|
||||
}
|
||||
backend.events = filters.NewEventSystem(&filterBackend{database, backend})
|
||||
backend.emptyPendingBlock()
|
||||
@ -620,7 +623,7 @@ func (b *SimulatedBackend) callContract(_ context.Context, call ethereum.CallMsg
|
||||
msg := callMsg{call}
|
||||
|
||||
txContext := core.NewEVMTxContext(msg)
|
||||
evmContext := core.NewEVMBlockContext(block.Header(), b.getHeader, b.engine, nil)
|
||||
evmContext := core.NewEVMBlockContext(block.Header(), b.getHeader, b.engine, nil, b.checkTEVM)
|
||||
// Create a new environment which holds all relevant information
|
||||
// about the transaction and calling mechanisms.
|
||||
vmEnv := vm.NewEVM(evmContext, txContext, statedb, b.config, vm.Config{})
|
||||
@ -653,7 +656,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac
|
||||
&b.pendingHeader.Coinbase, b.gasPool,
|
||||
b.pendingState, state.NewNoopWriter(),
|
||||
b.pendingHeader, tx,
|
||||
&b.pendingHeader.GasUsed, vm.Config{}); err != nil {
|
||||
&b.pendingHeader.GasUsed, vm.Config{}, b.checkTEVM); err != nil {
|
||||
return err
|
||||
}
|
||||
//fmt.Printf("==== Start producing block %d\n", (b.prependBlock.NumberU64() + 1))
|
||||
|
@ -363,8 +363,7 @@ func TestSimulatedBackend_TransactionByHash(t *testing.T) {
|
||||
sim := NewSimulatedBackend(t,
|
||||
core.GenesisAlloc{
|
||||
testAddr: {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
}, 10000000)
|
||||
bgCtx := context.Background()
|
||||
|
||||
// create a signed transaction to send
|
||||
|
@ -118,6 +118,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
Transfer: core.Transfer,
|
||||
Coinbase: pre.Env.Coinbase,
|
||||
BlockNumber: pre.Env.Number,
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
Time: pre.Env.Timestamp,
|
||||
Difficulty: pre.Env.Difficulty,
|
||||
GasLimit: pre.Env.GasLimit,
|
||||
|
@ -198,6 +198,7 @@ func NewStagedSync(
|
||||
db,
|
||||
sm.Receipts,
|
||||
sm.CallTraces,
|
||||
sm.TEVM,
|
||||
pruningDistance,
|
||||
batchSize,
|
||||
nil,
|
||||
@ -209,6 +210,13 @@ func NewStagedSync(
|
||||
&vm.Config{NoReceipts: !sm.Receipts},
|
||||
tmpdir,
|
||||
),
|
||||
stagedsync.StageTranspileCfg(
|
||||
db,
|
||||
batchSize,
|
||||
nil,
|
||||
nil,
|
||||
controlServer.chainConfig,
|
||||
),
|
||||
stagedsync.StageHashStateCfg(db, tmpdir),
|
||||
stagedsync.StageTrieCfg(db, true, true, tmpdir),
|
||||
stagedsync.StageHistoryCfg(db, tmpdir),
|
||||
|
@ -212,6 +212,7 @@ func mock(t *testing.T) *MockSentry {
|
||||
db,
|
||||
sm.Receipts,
|
||||
sm.CallTraces,
|
||||
sm.TEVM,
|
||||
0,
|
||||
batchSize,
|
||||
nil,
|
||||
@ -223,6 +224,13 @@ func mock(t *testing.T) *MockSentry {
|
||||
&vm.Config{NoReceipts: !sm.Receipts},
|
||||
mock.tmpdir,
|
||||
),
|
||||
stagedsync.StageTranspileCfg(
|
||||
db,
|
||||
batchSize,
|
||||
nil,
|
||||
nil,
|
||||
mock.chainConfig,
|
||||
),
|
||||
stagedsync.StageHashStateCfg(db, mock.tmpdir),
|
||||
stagedsync.StageTrieCfg(db, true, true, mock.tmpdir),
|
||||
stagedsync.StageHistoryCfg(db, mock.tmpdir),
|
||||
|
@ -34,6 +34,8 @@ var stateBuckets = []string{
|
||||
dbutils.AccountsHistoryBucket,
|
||||
dbutils.StorageHistoryBucket,
|
||||
dbutils.TxLookupPrefix,
|
||||
dbutils.ContractTEVMCodeBucket,
|
||||
dbutils.ContractTEVMCodeStatusBucket,
|
||||
}
|
||||
|
||||
var cmdCompareBucket = &cobra.Command{
|
||||
|
@ -216,6 +216,7 @@ func snapshotCheck(ctx context.Context, db ethdb.RwKV, isNew bool, tmpDir string
|
||||
stages.BlockHashes,
|
||||
stages.Bodies,
|
||||
stages.Senders,
|
||||
stages.Translation,
|
||||
stages.AccountHistoryIndex,
|
||||
stages.StorageHistoryIndex,
|
||||
stages.LogIndex,
|
||||
@ -265,7 +266,7 @@ func snapshotCheck(ctx context.Context, db ethdb.RwKV, isNew bool, tmpDir string
|
||||
log.Info("Stage4", "progress", stage4.BlockNumber)
|
||||
|
||||
err = stagedsync.SpawnExecuteBlocksStage(stage4, tx, blockNumber, ch,
|
||||
stagedsync.StageExecuteBlocksCfg(db, false, false, 0, batchSize, nil, nil, nil, nil, chainConfig, engine, vmConfig, tmpDir), nil,
|
||||
stagedsync.StageExecuteBlocksCfg(db, false, false, false, 0, batchSize, nil, nil, nil, nil, chainConfig, engine, vmConfig, tmpDir), nil,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("execution err %w", err)
|
||||
|
@ -330,7 +330,7 @@ func init() {
|
||||
rootCmd.AddCommand(cmdRunMigrations)
|
||||
|
||||
withDatadir(cmdSetStorageMode)
|
||||
cmdSetStorageMode.Flags().StringVar(&storageMode, "storage-mode", "htr", "Storage mode to override database")
|
||||
cmdSetStorageMode.Flags().StringVar(&storageMode, "storage-mode", "htre", "Storage mode to override database")
|
||||
rootCmd.AddCommand(cmdSetStorageMode)
|
||||
}
|
||||
|
||||
@ -450,7 +450,7 @@ func stageExec(db ethdb.RwKV, ctx context.Context) error {
|
||||
stage4 := stage(sync, tx, stages.Execution)
|
||||
log.Info("Stage4", "progress", stage4.BlockNumber)
|
||||
ch := ctx.Done()
|
||||
cfg := stagedsync.StageExecuteBlocksCfg(db, sm.Receipts, sm.CallTraces, 0, batchSize, nil, nil, silkwormExecutionFunc(), nil, chainConfig, engine, vmConfig, tmpDBPath)
|
||||
cfg := stagedsync.StageExecuteBlocksCfg(db, sm.Receipts, sm.CallTraces, sm.TEVM, 0, batchSize, nil, nil, silkwormExecutionFunc(), nil, chainConfig, engine, vmConfig, tmpDBPath)
|
||||
if unwind > 0 {
|
||||
u := &stagedsync.UnwindState{Stage: stages.Execution, UnwindPoint: stage4.BlockNumber - unwind}
|
||||
err = stagedsync.UnwindExecutionStage(u, stage4, tx, ch, cfg, nil)
|
||||
|
@ -181,7 +181,7 @@ func syncBySmallSteps(db ethdb.RwKV, miningConfig params.MiningConfig, ctx conte
|
||||
stages.TxPool, // TODO: enable TxPool stage
|
||||
stages.Finish)
|
||||
|
||||
execCfg := stagedsync.StageExecuteBlocksCfg(db, sm.Receipts, sm.CallTraces, 0, batchSize, nil, nil, nil, changeSetHook, chainConfig, engine, vmConfig, tmpDir)
|
||||
execCfg := stagedsync.StageExecuteBlocksCfg(db, sm.Receipts, sm.CallTraces, sm.TEVM, 0, batchSize, nil, nil, nil, changeSetHook, chainConfig, engine, vmConfig, tmpDir)
|
||||
|
||||
execUntilFunc := func(execToBlock uint64) func(stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx ethdb.RwTx) error {
|
||||
return func(s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx ethdb.RwTx) error {
|
||||
@ -429,7 +429,7 @@ func loopIh(db ethdb.RwKV, ctx context.Context, unwind uint64) error {
|
||||
}
|
||||
|
||||
_ = clearUnwindStack(tx, context.Background())
|
||||
sync.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxPool, stages.TxLookup, stages.Finish)
|
||||
sync.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.Translation, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxPool, stages.TxLookup, stages.Finish)
|
||||
if err = sync.Run(ethdb.NewObjectDatabase(db), tx); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -514,7 +514,7 @@ func loopExec(db ethdb.RwKV, ctx context.Context, unwind uint64) error {
|
||||
|
||||
from := progress(tx, stages.Execution)
|
||||
to := from + unwind
|
||||
cfg := stagedsync.StageExecuteBlocksCfg(db, true, false, 0, batchSize, nil, nil, silkwormExecutionFunc(), nil, chainConfig, engine, vmConfig, tmpDBPath)
|
||||
cfg := stagedsync.StageExecuteBlocksCfg(db, true, false, false, 0, batchSize, nil, nil, silkwormExecutionFunc(), nil, chainConfig, engine, vmConfig, tmpDBPath)
|
||||
|
||||
// set block limit of execute stage
|
||||
sync.MockExecFunc(stages.Execution, func(stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx ethdb.RwTx) error {
|
||||
|
@ -69,28 +69,30 @@ import (
|
||||
}*/
|
||||
|
||||
var bucketLabels = map[string]string{
|
||||
dbutils.BlockReceiptsPrefix: "Receipts",
|
||||
dbutils.Log: "Event Logs",
|
||||
dbutils.AccountsHistoryBucket: "History Of Accounts",
|
||||
dbutils.StorageHistoryBucket: "History Of Storage",
|
||||
dbutils.HeadersBucket: "Headers",
|
||||
dbutils.HeaderCanonicalBucket: "Canonical headers",
|
||||
dbutils.HeaderTDBucket: "Headers TD",
|
||||
dbutils.BlockBodyPrefix: "Block Bodies",
|
||||
dbutils.HeaderNumberBucket: "Header Numbers",
|
||||
dbutils.TxLookupPrefix: "Transaction Index",
|
||||
dbutils.CodeBucket: "Code Of Contracts",
|
||||
dbutils.SyncStageProgress: "Sync Progress",
|
||||
dbutils.PlainStateBucket: "Plain State",
|
||||
dbutils.HashedAccountsBucket: "Hashed Accounts",
|
||||
dbutils.HashedStorageBucket: "Hashed Storage",
|
||||
dbutils.TrieOfAccountsBucket: "Intermediate Hashes Of Accounts",
|
||||
dbutils.TrieOfStorageBucket: "Intermediate Hashes Of Storage",
|
||||
dbutils.SyncStageUnwind: "Unwind",
|
||||
dbutils.AccountChangeSetBucket: "Account Changes",
|
||||
dbutils.StorageChangeSetBucket: "Storage Changes",
|
||||
dbutils.IncarnationMapBucket: "Incarnations",
|
||||
dbutils.Senders: "Transaction Senders",
|
||||
dbutils.BlockReceiptsPrefix: "Receipts",
|
||||
dbutils.Log: "Event Logs",
|
||||
dbutils.AccountsHistoryBucket: "History Of Accounts",
|
||||
dbutils.StorageHistoryBucket: "History Of Storage",
|
||||
dbutils.HeadersBucket: "Headers",
|
||||
dbutils.HeaderCanonicalBucket: "Canonical headers",
|
||||
dbutils.HeaderTDBucket: "Headers TD",
|
||||
dbutils.BlockBodyPrefix: "Block Bodies",
|
||||
dbutils.HeaderNumberBucket: "Header Numbers",
|
||||
dbutils.TxLookupPrefix: "Transaction Index",
|
||||
dbutils.CodeBucket: "Code Of Contracts",
|
||||
dbutils.SyncStageProgress: "Sync Progress",
|
||||
dbutils.PlainStateBucket: "Plain State",
|
||||
dbutils.HashedAccountsBucket: "Hashed Accounts",
|
||||
dbutils.HashedStorageBucket: "Hashed Storage",
|
||||
dbutils.TrieOfAccountsBucket: "Intermediate Hashes Of Accounts",
|
||||
dbutils.TrieOfStorageBucket: "Intermediate Hashes Of Storage",
|
||||
dbutils.SyncStageUnwind: "Unwind",
|
||||
dbutils.AccountChangeSetBucket: "Account Changes",
|
||||
dbutils.StorageChangeSetBucket: "Storage Changes",
|
||||
dbutils.IncarnationMapBucket: "Incarnations",
|
||||
dbutils.Senders: "Transaction Senders",
|
||||
dbutils.ContractTEVMCodeBucket: "Contract TEVM code",
|
||||
dbutils.ContractTEVMCodeStatusBucket: "Contract TEVM code status",
|
||||
}
|
||||
|
||||
/*dbutils.PlainContractCodeBucket,
|
||||
|
@ -63,7 +63,8 @@ func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash co
|
||||
return StorageRangeResult{}, err
|
||||
}
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) }
|
||||
_, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, bc, chainConfig, getHeader, ethash.NewFaker(), tx, blockHash, txIndex)
|
||||
checkTEVM := ethdb.GetCheckTEVM(tx)
|
||||
_, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, bc, chainConfig, getHeader, checkTEVM, ethash.NewFaker(), tx, blockHash, txIndex)
|
||||
if err != nil {
|
||||
return StorageRangeResult{}, err
|
||||
}
|
||||
@ -215,10 +216,11 @@ func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
GetHeader := func(hash common.Hash, number uint64) *types.Header {
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header {
|
||||
return rawdb.ReadHeader(tx, hash, number)
|
||||
}
|
||||
_, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, bc, chainConfig, GetHeader, ethash.NewFaker(), tx, blockHash, txIndex)
|
||||
checkTEVM := ethdb.GetCheckTEVM(tx)
|
||||
_, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, bc, chainConfig, getHeader, checkTEVM, ethash.NewFaker(), tx, blockHash, txIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -38,7 +38,8 @@ func getReceipts(ctx context.Context, tx ethdb.Tx, chainConfig *params.ChainConf
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header {
|
||||
return rawdb.ReadHeader(tx, hash, number)
|
||||
}
|
||||
_, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, bc, chainConfig, getHeader, ethash.NewFaker(), tx, hash, 0)
|
||||
checkTEVM := ethdb.GetCheckTEVM(tx)
|
||||
_, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, bc, chainConfig, getHeader, checkTEVM, ethash.NewFaker(), tx, hash, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -49,7 +50,7 @@ func getReceipts(ctx context.Context, tx ethdb.Tx, chainConfig *params.ChainConf
|
||||
for i, txn := range block.Transactions() {
|
||||
ibs.Prepare(txn.Hash(), block.Hash(), i)
|
||||
|
||||
receipt, err := core.ApplyTransaction(chainConfig, getHeader, ethash.NewFaker(), nil, gp, ibs, state.NewNoopWriter(), block.Header(), txn, usedGas, vm.Config{})
|
||||
receipt, err := core.ApplyTransaction(chainConfig, getHeader, ethash.NewFaker(), nil, gp, ibs, state.NewNoopWriter(), block.Header(), txn, usedGas, vm.Config{}, checkTEVM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -42,7 +42,8 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header {
|
||||
return rawdb.ReadHeader(tx, hash, number)
|
||||
}
|
||||
msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, getter, chainConfig, getHeader, ethash.NewFaker(), tx, blockHash, txIndex)
|
||||
checkTEVM := ethdb.GetCheckTEVM(tx)
|
||||
msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, getter, chainConfig, getHeader, checkTEVM, ethash.NewFaker(), tx, blockHash, txIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -111,7 +111,9 @@ func TestMatreshkaStream(t *testing.T) {
|
||||
t.Fatal(err, currentBlock)
|
||||
}
|
||||
|
||||
_, err = core.ExecuteBlockEphemerally(chainConfig, &vm.Config{NoReceipts: true}, getHeader, ethash.NewFaker(), block, stateReaderWriter, stateReaderWriter)
|
||||
checkTEVM := ethdb.GetCheckTEVM(tx)
|
||||
|
||||
_, err = core.ExecuteBlockEphemerally(chainConfig, &vm.Config{NoReceipts: true}, getHeader, ethash.NewFaker(), block, stateReaderWriter, stateReaderWriter, checkTEVM)
|
||||
if err != nil {
|
||||
t.Fatal(err, currentBlock)
|
||||
}
|
||||
|
@ -136,7 +136,8 @@ func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, h
|
||||
}
|
||||
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(rwtx, hash, number) }
|
||||
receipts, err1 := runBlock(intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, block, vmConfig)
|
||||
checkTEVM := ethdb.GetCheckTEVM(rwtx)
|
||||
receipts, err1 := runBlock(intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, checkTEVM, block, vmConfig)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
|
@ -540,7 +540,8 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB
|
||||
intraBlockState.SetTracer(ot)
|
||||
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(chainDb, hash, number) }
|
||||
receipts, err1 := runBlock(intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, block, vmConfig)
|
||||
checkTEVM := ethdb.GetCheckTEVM(chainDb)
|
||||
receipts, err1 := runBlock(intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, checkTEVM, block, vmConfig)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
@ -655,7 +656,7 @@ func check(e error) {
|
||||
}
|
||||
|
||||
func runBlock(ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWriter state.StateWriter,
|
||||
chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (types.Receipts, error) {
|
||||
chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, checkTEVM func(hash common.Hash) (bool, error), block *types.Block, vmConfig vm.Config) (types.Receipts, error) {
|
||||
header := block.Header()
|
||||
vmConfig.TraceJumpDest = true
|
||||
engine := ethash.NewFullFaker()
|
||||
@ -667,7 +668,7 @@ func runBlock(ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWrit
|
||||
}
|
||||
for i, tx := range block.Transactions() {
|
||||
ibs.Prepare(tx.Hash(), block.Hash(), i)
|
||||
receipt, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig)
|
||||
receipt, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, checkTEVM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not apply tx %d [%x] failed: %v", i, tx.Hash(), err)
|
||||
}
|
||||
|
@ -91,6 +91,16 @@ const (
|
||||
//key - address
|
||||
//value - incarnation of account when it was last deleted
|
||||
IncarnationMapBucket = "incarnationMap"
|
||||
|
||||
//TEVMCodeStatusBucket -
|
||||
//key - encoded timestamp(block number)
|
||||
//value - contract codes hashes: [code_hash1]+[code_hash2]
|
||||
ContractTEVMCodeStatusBucket = "TEVM-code-status"
|
||||
|
||||
//TEVMCodeBucket -
|
||||
//key - contract code hash
|
||||
//value - contract EVTM code
|
||||
ContractTEVMCodeBucket = "TEVM-code"
|
||||
)
|
||||
|
||||
/*TrieOfAccountsBucket and TrieOfStorageBucket
|
||||
@ -280,6 +290,8 @@ var (
|
||||
StorageModeTxIndex = []byte("smTxIndex")
|
||||
//StorageModeCallTraces - does not build index of call traces
|
||||
StorageModeCallTraces = []byte("smCallTraces")
|
||||
//StorageModeTEVM - does not translate EVM to TEVM
|
||||
StorageModeTEVM = []byte("smTEVM")
|
||||
|
||||
DBSchemaVersionKey = []byte("dbVersion")
|
||||
|
||||
@ -311,6 +323,8 @@ var Buckets = []string{
|
||||
BloomBitsIndexPrefix,
|
||||
DatabaseInfoBucket,
|
||||
IncarnationMapBucket,
|
||||
ContractTEVMCodeBucket,
|
||||
ContractTEVMCodeStatusBucket,
|
||||
CliqueSeparateBucket,
|
||||
CliqueLastSnapshotBucket,
|
||||
CliqueSnapshotBucket,
|
||||
|
@ -94,6 +94,7 @@ func ExecuteBlockEphemerally(
|
||||
block *types.Block,
|
||||
stateReader state.StateReader,
|
||||
stateWriter state.WriterWithChangeSets,
|
||||
checkTEVM func(hash common.Hash) (bool, error),
|
||||
) (types.Receipts, error) {
|
||||
defer blockExecutionTimer.UpdateSince(time.Now())
|
||||
block.Uncles()
|
||||
@ -116,7 +117,7 @@ func ExecuteBlockEphemerally(
|
||||
writeTrace = true
|
||||
}
|
||||
|
||||
receipt, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig)
|
||||
receipt, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, checkTEVM)
|
||||
if writeTrace {
|
||||
w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash()))
|
||||
if err1 != nil {
|
||||
|
@ -107,7 +107,8 @@ func (b *BlockGen) AddTxWithChain(getHeader func(hash common.Hash, number uint64
|
||||
b.SetCoinbase(common.Address{})
|
||||
}
|
||||
b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
|
||||
receipt, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{})
|
||||
checkTEVM := func(common.Hash) (bool, error) { return false, nil }
|
||||
receipt, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, checkTEVM)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
21
core/evm.go
21
core/evm.go
@ -27,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
// NewEVMBlockContext creates a new context for use in the EVM.
|
||||
func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address) vm.BlockContext {
|
||||
func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, checkTEVM func(codeHash common.Hash) (bool, error)) vm.BlockContext {
|
||||
// If we don't have an explicit author (i.e. not mining), extract from the header
|
||||
var beneficiary common.Address
|
||||
if author == nil {
|
||||
@ -39,6 +39,11 @@ func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, n
|
||||
if header.Eip1559 {
|
||||
baseFee.SetFromBig(header.BaseFee)
|
||||
}
|
||||
if checkTEVM == nil {
|
||||
checkTEVM = func(_ common.Hash) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return vm.BlockContext{
|
||||
CanTransfer: CanTransfer,
|
||||
Transfer: Transfer,
|
||||
@ -49,6 +54,7 @@ func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, n
|
||||
Difficulty: new(big.Int).Set(header.Difficulty),
|
||||
BaseFee: &baseFee,
|
||||
GasLimit: header.GasLimit,
|
||||
CheckTEVM: checkTEVM,
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,19 +66,6 @@ func NewEVMTxContext(msg Message) vm.TxContext {
|
||||
}
|
||||
}
|
||||
|
||||
func NewEVMContextByHeader(msg Message, header *types.Header, hashGetter func(n uint64) common.Hash) vm.BlockContext {
|
||||
return vm.BlockContext{
|
||||
CanTransfer: CanTransfer,
|
||||
Transfer: Transfer,
|
||||
GetHash: hashGetter,
|
||||
Coinbase: header.Coinbase,
|
||||
BlockNumber: header.Number.Uint64(),
|
||||
Time: header.Time,
|
||||
Difficulty: new(big.Int).Set(header.Difficulty),
|
||||
GasLimit: header.GasLimit,
|
||||
}
|
||||
}
|
||||
|
||||
// GetHashFn returns a GetHashFunc which retrieves header hashes by number
|
||||
func GetHashFn(ref *types.Header, getHeader func(hash common.Hash, number uint64) *types.Header) func(n uint64) common.Hash {
|
||||
// Cache will initially contain [refHash.parent],
|
||||
|
@ -93,7 +93,7 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, getHeader f
|
||||
|
||||
ctx := config.WithEIPsFlags(context.Background(), header.Number.Uint64())
|
||||
// Create a new context to be used in the EVM environment
|
||||
context := NewEVMBlockContext(header, getHeader, engine, author)
|
||||
context := NewEVMBlockContext(header, getHeader, engine, author, evm.Context.CheckTEVM)
|
||||
txContext := NewEVMTxContext(msg)
|
||||
if cfg.TraceJumpDest {
|
||||
txContext.TxHash = tx.Hash()
|
||||
@ -147,13 +147,13 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, getHeader f
|
||||
// and uses the input parameters for its environment. It returns the receipt
|
||||
// for the transaction, gas used and an error if the transaction failed,
|
||||
// indicating the block was invalid.
|
||||
func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) {
|
||||
func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, checkTEVM func(hash common.Hash) (bool, error)) (*types.Receipt, error) {
|
||||
msg, err := tx.AsMessage(*types.MakeSigner(config, header.Number.Uint64()), header.BaseFee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a new context to be used in the EVM environment
|
||||
blockContext := NewEVMBlockContext(header, getHeader, engine, author)
|
||||
blockContext := NewEVMBlockContext(header, getHeader, engine, author, checkTEVM)
|
||||
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, ibs, config, cfg)
|
||||
return applyTransaction(msg, config, getHeader, engine, author, gp, ibs, stateWriter, header, tx, usedGas, vmenv, cfg)
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func BenchmarkJumpDest(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
contract := NewContract(contractRef, contractRef, nil, 0, false /* skipAnalysis */)
|
||||
contract := NewContract(contractRef, contractRef, nil, 0, false /* skipAnalysis */, false)
|
||||
contract.Code = code
|
||||
contract.CodeHash = hash
|
||||
|
||||
|
@ -51,6 +51,7 @@ type Contract struct {
|
||||
jumpdests map[common.Hash][]uint64 // Aggregated result of JUMPDEST analysis.
|
||||
analysis []uint64 // Locally cached result of JUMPDEST analysis
|
||||
skipAnalysis bool
|
||||
vmType VmType
|
||||
|
||||
Code []byte
|
||||
CodeHash common.Hash
|
||||
@ -62,7 +63,7 @@ type Contract struct {
|
||||
}
|
||||
|
||||
// NewContract returns a new contract environment for the execution of EVM.
|
||||
func NewContract(caller ContractRef, object ContractRef, value *uint256.Int, gas uint64, skipAnalysis bool) *Contract {
|
||||
func NewContract(caller ContractRef, object ContractRef, value *uint256.Int, gas uint64, skipAnalysis bool, isTEVM bool) *Contract {
|
||||
c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object}
|
||||
|
||||
if parent, ok := caller.(*Contract); ok {
|
||||
@ -80,6 +81,11 @@ func NewContract(caller ContractRef, object ContractRef, value *uint256.Int, gas
|
||||
|
||||
c.skipAnalysis = skipAnalysis
|
||||
|
||||
c.vmType = EVMType
|
||||
if isTEVM {
|
||||
c.vmType = TEVMType
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
|
103
core/vm/evm.go
103
core/vm/evm.go
@ -77,20 +77,21 @@ func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) {
|
||||
|
||||
// run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter.
|
||||
func run(evm *EVM, contract *Contract, input []byte, readOnly bool) ([]byte, error) {
|
||||
for _, interpreter := range evm.interpreters {
|
||||
if interpreter.CanRun(contract.Code) {
|
||||
if evm.interpreter != interpreter {
|
||||
// Ensure that the interpreter pointer is set back
|
||||
// to its current value upon return.
|
||||
defer func(i Interpreter) {
|
||||
evm.interpreter = i
|
||||
}(evm.interpreter)
|
||||
evm.interpreter = interpreter
|
||||
}
|
||||
return interpreter.Run(contract, input, readOnly)
|
||||
}
|
||||
interpreter := evm.interpreter
|
||||
defer func() {
|
||||
evm.interpreter = interpreter
|
||||
}()
|
||||
|
||||
switch contract.vmType {
|
||||
case EVMType:
|
||||
evm.interpreter = evm.interpreters[EVMType]
|
||||
case TEVMType:
|
||||
evm.interpreter = evm.interpreters[TEVMType]
|
||||
default:
|
||||
return nil, errors.New("no compatible interpreter")
|
||||
}
|
||||
return nil, errors.New("no compatible interpreter")
|
||||
|
||||
return evm.interpreter.Run(contract, input, readOnly)
|
||||
}
|
||||
|
||||
// BlockContext provides the EVM with auxiliary information. Once provided
|
||||
@ -103,6 +104,8 @@ type BlockContext struct {
|
||||
Transfer TransferFunc
|
||||
// GetHash returns the hash corresponding to n
|
||||
GetHash GetHashFunc
|
||||
// checkTEVM returns true if the contract has TEVM code
|
||||
CheckTEVM func(codeHash common.Hash) (bool, error)
|
||||
|
||||
// Block information
|
||||
Coinbase common.Address // Provides information for COINBASE
|
||||
@ -170,11 +173,13 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, state IntraBlockState, chain
|
||||
vmConfig: vmConfig,
|
||||
chainConfig: chainConfig,
|
||||
chainRules: chainConfig.Rules(blockCtx.BlockNumber),
|
||||
interpreters: make([]Interpreter, 0, 1),
|
||||
}
|
||||
|
||||
evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm, vmConfig))
|
||||
evm.interpreter = evm.interpreters[0]
|
||||
evm.interpreters = []Interpreter{
|
||||
EVMType: NewEVMInterpreter(evm, vmConfig),
|
||||
TEVMType: NewTEVMInterpreter(evm, vmConfig),
|
||||
}
|
||||
evm.interpreter = evm.interpreters[EVMType]
|
||||
|
||||
return evm
|
||||
}
|
||||
@ -253,10 +258,17 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
||||
addrCopy := addr
|
||||
// If the account has no code, we can abort here
|
||||
// The depth-check is already done, and precompiles handled above
|
||||
contract := NewContract(caller, AccountRef(addrCopy), value, gas, evm.vmConfig.SkipAnalysis)
|
||||
contract.SetCallCode(&addrCopy, evm.IntraBlockState.GetCodeHash(addrCopy), code)
|
||||
ret, err = run(evm, contract, input, false)
|
||||
gas = contract.Gas
|
||||
codehash := evm.IntraBlockState.GetCodeHash(addrCopy)
|
||||
|
||||
var isTEVM bool
|
||||
isTEVM, err = evm.Context.CheckTEVM(codehash)
|
||||
|
||||
if err == nil {
|
||||
contract := NewContract(caller, AccountRef(addrCopy), value, gas, evm.vmConfig.SkipAnalysis, isTEVM)
|
||||
contract.SetCallCode(&addrCopy, codehash, code)
|
||||
ret, err = run(evm, contract, input, false)
|
||||
gas = contract.Gas
|
||||
}
|
||||
}
|
||||
}
|
||||
// When an error was returned by the EVM or when setting the creation code
|
||||
@ -315,10 +327,17 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
|
||||
addrCopy := addr
|
||||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
contract := NewContract(caller, AccountRef(caller.Address()), value, gas, evm.vmConfig.SkipAnalysis)
|
||||
contract.SetCallCode(&addrCopy, evm.IntraBlockState.GetCodeHash(addrCopy), evm.IntraBlockState.GetCode(addrCopy))
|
||||
ret, err = run(evm, contract, input, false)
|
||||
gas = contract.Gas
|
||||
var isTEVM bool
|
||||
|
||||
codeHash := evm.IntraBlockState.GetCodeHash(addrCopy)
|
||||
isTEVM, err = evm.Context.CheckTEVM(codeHash)
|
||||
|
||||
if err == nil {
|
||||
contract := NewContract(caller, AccountRef(caller.Address()), value, gas, evm.vmConfig.SkipAnalysis, isTEVM)
|
||||
contract.SetCallCode(&addrCopy, codeHash, evm.IntraBlockState.GetCode(addrCopy))
|
||||
ret, err = run(evm, contract, input, false)
|
||||
gas = contract.Gas
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
evm.IntraBlockState.RevertToSnapshot(snapshot)
|
||||
@ -358,10 +377,16 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
|
||||
} else {
|
||||
addrCopy := addr
|
||||
// Initialise a new contract and make initialise the delegate values
|
||||
contract := NewContract(caller, AccountRef(caller.Address()), nil, gas, evm.vmConfig.SkipAnalysis).AsDelegate()
|
||||
contract.SetCallCode(&addrCopy, evm.IntraBlockState.GetCodeHash(addrCopy), evm.IntraBlockState.GetCode(addrCopy))
|
||||
ret, err = run(evm, contract, input, false)
|
||||
gas = contract.Gas
|
||||
var isTEVM bool
|
||||
codeHash := evm.IntraBlockState.GetCodeHash(addrCopy)
|
||||
isTEVM, err = evm.Context.CheckTEVM(codeHash)
|
||||
|
||||
if err == nil {
|
||||
contract := NewContract(caller, AccountRef(caller.Address()), nil, gas, evm.vmConfig.SkipAnalysis, isTEVM).AsDelegate()
|
||||
contract.SetCallCode(&addrCopy, evm.IntraBlockState.GetCodeHash(addrCopy), evm.IntraBlockState.GetCode(addrCopy))
|
||||
ret, err = run(evm, contract, input, false)
|
||||
gas = contract.Gas
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
evm.IntraBlockState.RevertToSnapshot(snapshot)
|
||||
@ -414,13 +439,19 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
|
||||
addrCopy := addr
|
||||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
contract := NewContract(caller, AccountRef(addrCopy), new(uint256.Int), gas, evm.vmConfig.SkipAnalysis)
|
||||
contract.SetCallCode(&addrCopy, evm.IntraBlockState.GetCodeHash(addrCopy), evm.IntraBlockState.GetCode(addrCopy))
|
||||
// When an error was returned by the EVM or when setting the creation code
|
||||
// above we revert to the snapshot and consume any gas remaining. Additionally
|
||||
// when we're in Homestead this also counts for code storage gas errors.
|
||||
ret, err = run(evm, contract, input, true)
|
||||
gas = contract.Gas
|
||||
var isTEVM bool
|
||||
codeHash := evm.IntraBlockState.GetCodeHash(addrCopy)
|
||||
isTEVM, err = evm.Context.CheckTEVM(codeHash)
|
||||
|
||||
if err == nil {
|
||||
contract := NewContract(caller, AccountRef(addrCopy), new(uint256.Int), gas, evm.vmConfig.SkipAnalysis, isTEVM)
|
||||
contract.SetCallCode(&addrCopy, evm.IntraBlockState.GetCodeHash(addrCopy), evm.IntraBlockState.GetCode(addrCopy))
|
||||
// When an error was returned by the EVM or when setting the creation code
|
||||
// above we revert to the snapshot and consume any gas remaining. Additionally
|
||||
// when we're in Homestead this also counts for code storage gas errors.
|
||||
ret, err = run(evm, contract, input, true)
|
||||
gas = contract.Gas
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
evm.IntraBlockState.RevertToSnapshot(snapshot)
|
||||
@ -484,7 +515,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
|
||||
|
||||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
contract := NewContract(caller, AccountRef(address), value, gas, evm.vmConfig.SkipAnalysis)
|
||||
contract := NewContract(caller, AccountRef(address), value, gas, evm.vmConfig.SkipAnalysis, false)
|
||||
contract.SetCodeOptionalHash(&address, codeAndHash)
|
||||
|
||||
if evm.vmConfig.NoRecursion && evm.depth > 0 {
|
||||
|
@ -99,6 +99,7 @@ func TestEIP2200(t *testing.T) {
|
||||
vmctx := BlockContext{
|
||||
CanTransfer: func(IntraBlockState, common.Address, *uint256.Int) bool { return true },
|
||||
Transfer: func(IntraBlockState, common.Address, common.Address, *uint256.Int, bool) {},
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
}
|
||||
vmenv := NewEVM(vmctx, TxContext{}, s, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}})
|
||||
|
||||
|
@ -95,7 +95,9 @@ func init() {
|
||||
|
||||
func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFunc, name string) {
|
||||
var (
|
||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
env = NewEVM(BlockContext{
|
||||
CheckTEVM: func(h common.Hash) (bool, error) { return false, nil },
|
||||
}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
stack = stack.New()
|
||||
pc = uint64(0)
|
||||
evmInterpreter = env.interpreter.(*EVMInterpreter)
|
||||
@ -194,7 +196,9 @@ func TestSAR(t *testing.T) {
|
||||
|
||||
func TestAddMod(t *testing.T) {
|
||||
var (
|
||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
env = NewEVM(BlockContext{
|
||||
CheckTEVM: func(h common.Hash) (bool, error) { return false, nil },
|
||||
}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
stack = stack.New()
|
||||
evmInterpreter = NewEVMInterpreter(env, env.vmConfig)
|
||||
pc = uint64(0)
|
||||
@ -281,7 +285,9 @@ func TestJsonTestcases(t *testing.T) {
|
||||
|
||||
func opBenchmark(bench *testing.B, op executionFunc, args ...string) {
|
||||
var (
|
||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
env = NewEVM(BlockContext{
|
||||
CheckTEVM: func(h common.Hash) (bool, error) { return false, nil },
|
||||
}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
stack = stack.New()
|
||||
evmInterpreter = NewEVMInterpreter(env, env.vmConfig)
|
||||
)
|
||||
@ -515,7 +521,9 @@ func BenchmarkOpIsZero(b *testing.B) {
|
||||
|
||||
func TestOpMstore(t *testing.T) {
|
||||
var (
|
||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
env = NewEVM(BlockContext{
|
||||
CheckTEVM: func(h common.Hash) (bool, error) { return false, nil },
|
||||
}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
stack = stack.New()
|
||||
mem = NewMemory()
|
||||
evmInterpreter = NewEVMInterpreter(env, env.vmConfig)
|
||||
@ -539,7 +547,9 @@ func TestOpMstore(t *testing.T) {
|
||||
|
||||
func BenchmarkOpMstore(bench *testing.B) {
|
||||
var (
|
||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
env = NewEVM(BlockContext{
|
||||
CheckTEVM: func(h common.Hash) (bool, error) { return false, nil },
|
||||
}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
stack = stack.New()
|
||||
mem = NewMemory()
|
||||
evmInterpreter = NewEVMInterpreter(env, env.vmConfig)
|
||||
@ -560,7 +570,9 @@ func BenchmarkOpMstore(bench *testing.B) {
|
||||
|
||||
func BenchmarkOpSHA3(bench *testing.B) {
|
||||
var (
|
||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
env = NewEVM(BlockContext{
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
stack = stack.New()
|
||||
mem = NewMemory()
|
||||
evmInterpreter = NewEVMInterpreter(env, env.vmConfig)
|
||||
|
@ -48,18 +48,6 @@ type Interpreter interface {
|
||||
// Run loops and evaluates the contract's code with the given input data and returns
|
||||
// the return byte-slice and an error if one occurred.
|
||||
Run(contract *Contract, input []byte, static bool) ([]byte, error)
|
||||
// CanRun tells if the contract, passed as an argument, can be
|
||||
// run by the current interpreter. This is meant so that the
|
||||
// caller can do something like:
|
||||
//
|
||||
// ```golang
|
||||
// for _, interpreter := range interpreters {
|
||||
// if interpreter.CanRun(contract.code) {
|
||||
// interpreter.Run(contract.code, input)
|
||||
// }
|
||||
// }
|
||||
// ```
|
||||
CanRun([]byte) bool
|
||||
}
|
||||
|
||||
// callCtx contains the things that are per-call, such as stack and memory,
|
||||
@ -301,9 +289,3 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CanRun tells if the contract, passed as an argument, can be
|
||||
// run by the current interpreter.
|
||||
func (in *EVMInterpreter) CanRun(code []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
@ -53,11 +53,13 @@ func (*dummyStatedb) GetRefund() uint64 { return 1337 }
|
||||
|
||||
func TestStoreCapture(t *testing.T) {
|
||||
var (
|
||||
env = NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{})
|
||||
env = NewEVM(BlockContext{
|
||||
CheckTEVM: func(hash common.Hash) (bool, error) { return false, nil },
|
||||
}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{})
|
||||
logger = NewStructLogger(nil)
|
||||
mem = NewMemory()
|
||||
stack = stack.New()
|
||||
contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(uint256.Int), 0, false /* skipAnalysis */)
|
||||
contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(uint256.Int), 0, false /* skipAnalysis */, false)
|
||||
)
|
||||
stack.Push(uint256.NewInt().SetUint64(1))
|
||||
stack.Push(uint256.NewInt())
|
||||
|
@ -30,6 +30,7 @@ func NewEnv(cfg *Config) *vm.EVM {
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
GetHash: cfg.GetHashFn,
|
||||
CheckTEVM: cfg.CheckTEVM,
|
||||
Coinbase: cfg.Coinbase,
|
||||
BlockNumber: cfg.BlockNumber.Uint64(),
|
||||
Time: cfg.Time.Uint64(),
|
||||
|
@ -50,6 +50,7 @@ type Config struct {
|
||||
r state.StateReader
|
||||
w state.StateWriter
|
||||
GetHashFn func(n uint64) common.Hash
|
||||
CheckTEVM func(hash common.Hash) (bool, error)
|
||||
}
|
||||
|
||||
// sets defaults on the config
|
||||
@ -96,6 +97,11 @@ func setDefaults(cfg *Config) {
|
||||
return common.BytesToHash(crypto.Keccak256([]byte(new(big.Int).SetUint64(n).String())))
|
||||
}
|
||||
}
|
||||
if cfg.CheckTEVM == nil {
|
||||
cfg.CheckTEVM = func(hash common.Hash) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Execute executes the code using the input as call data during the execution.
|
||||
|
20
core/vm/tevm_interpreter.go
Normal file
20
core/vm/tevm_interpreter.go
Normal file
@ -0,0 +1,20 @@
|
||||
package vm
|
||||
|
||||
// todo: TBD actual TEVM interpreter
|
||||
|
||||
// TEVMInterpreter represents an TEVM interpreter
|
||||
type TEVMInterpreter struct {
|
||||
*EVMInterpreter
|
||||
}
|
||||
|
||||
type VmType int8
|
||||
|
||||
const (
|
||||
EVMType VmType = 0
|
||||
TEVMType VmType = 1
|
||||
)
|
||||
|
||||
// NewTEVMInterpreter returns a new instance of the Interpreter.
|
||||
func NewTEVMInterpreter(evm *EVM, cfg Config) *TEVMInterpreter {
|
||||
return &TEVMInterpreter{NewEVMInterpreter(evm, cfg)}
|
||||
}
|
@ -212,7 +212,7 @@ func New(stack *node.Node, config *ethconfig.Config, gitCommit string) (*Ethereu
|
||||
return nil, err
|
||||
}
|
||||
if config.StorageMode.Initialised {
|
||||
// If storage mode is not explicitely specified, we take whatever is in the database
|
||||
// If storage mode is not explicitly specified, we take whatever is in the database
|
||||
if !reflect.DeepEqual(sm, config.StorageMode) {
|
||||
return nil, errors.New("mode is " + config.StorageMode.ToString() + " original mode is " + sm.ToString())
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func init() {
|
||||
if localappdata != "" {
|
||||
Defaults.Ethash.DatasetDir = filepath.Join(localappdata, "erigon-thash")
|
||||
} else {
|
||||
Defaults.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "eriogn-ethash")
|
||||
Defaults.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "erigon-ethash")
|
||||
}
|
||||
} else {
|
||||
if xdgDataDir := os.Getenv("XDG_DATA_HOME"); xdgDataDir != "" {
|
||||
|
@ -72,6 +72,7 @@ func createStageBuilders(blocks []*types.Block, blockNum uint64, checkRoot bool)
|
||||
world.DB.RwKV(),
|
||||
world.storageMode.Receipts,
|
||||
world.storageMode.CallTraces,
|
||||
world.storageMode.TEVM,
|
||||
0,
|
||||
world.BatchSize,
|
||||
world.stateReaderBuilder,
|
||||
@ -368,5 +369,11 @@ func UpdateMetrics(db ethdb.Getter) error {
|
||||
return err
|
||||
}
|
||||
stageExecutionGauge.Update(int64(progress))
|
||||
|
||||
progress, err = stages.GetStageProgress(db, stages.Translation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stageTranspileGauge.Update(int64(progress))
|
||||
return nil
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ func ReplacementStages(ctx context.Context,
|
||||
bodies BodiesCfg,
|
||||
senders SendersCfg,
|
||||
exec ExecuteBlockCfg,
|
||||
trans TranspileCfg,
|
||||
hashState HashStateCfg,
|
||||
trieCfg TrieCfg,
|
||||
history HistoryCfg,
|
||||
@ -172,6 +173,23 @@ func ReplacementStages(ctx context.Context,
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: stages.Translation,
|
||||
Build: func(world StageParameters) *Stage {
|
||||
return &Stage{
|
||||
ID: stages.Translation,
|
||||
Description: "Transpile marked EVM contracts to TEVM",
|
||||
Disabled: !sm.TEVM,
|
||||
DisabledDescription: "Enable by adding `e` to --storage-mode",
|
||||
ExecFunc: func(s *StageState, u Unwinder, tx ethdb.RwTx) error {
|
||||
return SpawnTranspileStage(s, tx, 0, ctx.Done(), trans)
|
||||
},
|
||||
UnwindFunc: func(u *UnwindState, s *StageState, tx ethdb.RwTx) error {
|
||||
return UnwindTranspileStage(u, s, tx, ctx.Done(), trans)
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: stages.CreateStateSnapshot,
|
||||
Build: func(world StageParameters) *Stage {
|
||||
@ -343,13 +361,13 @@ func ReplacementUnwindOrder() UnwindOrder {
|
||||
0, 1, 2, 3, 4, // download headers/bodies + haders&body snapshots
|
||||
// Unwinding of tx pool (reinjecting transactions into the pool needs to happen after unwinding execution)
|
||||
// also tx pool is before senders because senders unwind is inside cycle transaction
|
||||
15,
|
||||
5, 6, 7, // senders, exec, state snapshot
|
||||
9, 8, // Unwinding of IHashes needs to happen after unwinding HashState
|
||||
10, // call traces
|
||||
11, 12, // history
|
||||
13, // log index
|
||||
14, // tx lookup
|
||||
16, // finish
|
||||
16,
|
||||
5, 6, 7, 8, // senders, exec, state snapshot
|
||||
10, 9, // Unwinding of IHashes needs to happen after unwinding HashState
|
||||
11, // call traces
|
||||
12, 13, // history
|
||||
14, // log index
|
||||
15, // tx lookup
|
||||
17, // finish
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/holiman/uint256"
|
||||
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/common/changeset"
|
||||
"github.com/ledgerwatch/erigon/common/dbutils"
|
||||
@ -52,6 +54,7 @@ type ExecuteBlockCfg struct {
|
||||
db ethdb.RwKV
|
||||
writeReceipts bool
|
||||
writeCallTraces bool
|
||||
writeTEVM bool
|
||||
pruningDistance uint64
|
||||
batchSize datasize.ByteSize
|
||||
changeSetHook ChangeSetHook
|
||||
@ -68,6 +71,7 @@ func StageExecuteBlocksCfg(
|
||||
kv ethdb.RwKV,
|
||||
WriteReceipts bool,
|
||||
WriteCallTraces bool,
|
||||
writeTEVM bool,
|
||||
pruningDistance uint64,
|
||||
BatchSize datasize.ByteSize,
|
||||
ReaderBuilder StateReaderBuilder,
|
||||
@ -83,6 +87,7 @@ func StageExecuteBlocksCfg(
|
||||
db: kv,
|
||||
writeReceipts: WriteReceipts,
|
||||
writeCallTraces: WriteCallTraces,
|
||||
writeTEVM: writeTEVM,
|
||||
pruningDistance: pruningDistance,
|
||||
batchSize: BatchSize,
|
||||
changeSetHook: ChangeSetHook,
|
||||
@ -113,29 +118,11 @@ func executeBlockWithGo(
|
||||
writeChangesets bool,
|
||||
traceCursor ethdb.RwCursorDupSort,
|
||||
accumulator *shards.Accumulator,
|
||||
readerWriterWrapper func(r state.StateReader, w state.WriterWithChangeSets) *TouchReaderWriter,
|
||||
checkTEVM func(hash common.Hash) (bool, error),
|
||||
) error {
|
||||
blockNum := block.NumberU64()
|
||||
var stateReader state.StateReader
|
||||
var stateWriter state.WriterWithChangeSets
|
||||
|
||||
if params.readerBuilder != nil {
|
||||
stateReader = params.readerBuilder(batch)
|
||||
} else {
|
||||
stateReader = state.NewPlainStateReader(batch)
|
||||
}
|
||||
|
||||
if params.writerBuilder != nil {
|
||||
stateWriter = params.writerBuilder(batch, tx, blockNum)
|
||||
} else {
|
||||
if accumulator != nil {
|
||||
accumulator.StartChange(blockNum, block.Hash(), false /* unwind */)
|
||||
}
|
||||
if writeChangesets {
|
||||
stateWriter = state.NewPlainStateWriter(batch, tx, blockNum).SetAccumulator(accumulator)
|
||||
} else {
|
||||
stateWriter = state.NewPlainStateWriterNoHistory(batch, blockNum).SetAccumulator(accumulator)
|
||||
}
|
||||
}
|
||||
stateReader, stateWriter := newStateReaderWriter(params, batch, tx, blockNum, block.Hash(), writeChangesets, accumulator, readerWriterWrapper)
|
||||
|
||||
// where the magic happens
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) }
|
||||
@ -145,7 +132,7 @@ func executeBlockWithGo(
|
||||
params.vmConfig.Debug = true
|
||||
params.vmConfig.Tracer = callTracer
|
||||
}
|
||||
receipts, err := core.ExecuteBlockEphemerally(params.chainConfig, params.vmConfig, getHeader, params.engine, block, stateReader, stateWriter)
|
||||
receipts, err := core.ExecuteBlockEphemerally(params.chainConfig, params.vmConfig, getHeader, params.engine, block, stateReader, stateWriter, checkTEVM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -210,6 +197,48 @@ func executeBlockWithGo(
|
||||
return nil
|
||||
}
|
||||
|
||||
func newStateReaderWriter(
|
||||
params ExecuteBlockCfg,
|
||||
batch ethdb.Database,
|
||||
tx ethdb.RwTx,
|
||||
blockNum uint64,
|
||||
blockHash common.Hash,
|
||||
writeChangesets bool,
|
||||
accumulator *shards.Accumulator,
|
||||
readerWriterWrapper func(r state.StateReader, w state.WriterWithChangeSets) *TouchReaderWriter,
|
||||
) (state.StateReader, state.WriterWithChangeSets) {
|
||||
|
||||
var stateReader state.StateReader
|
||||
var stateWriter state.WriterWithChangeSets
|
||||
|
||||
if params.readerBuilder != nil {
|
||||
stateReader = params.readerBuilder(batch)
|
||||
} else {
|
||||
stateReader = state.NewPlainStateReader(batch)
|
||||
}
|
||||
|
||||
if params.writerBuilder != nil {
|
||||
stateWriter = params.writerBuilder(batch, tx, blockNum)
|
||||
} else {
|
||||
if accumulator != nil {
|
||||
accumulator.StartChange(blockNum, blockHash, false)
|
||||
}
|
||||
if writeChangesets {
|
||||
stateWriter = state.NewPlainStateWriter(batch, tx, blockNum).SetAccumulator(accumulator)
|
||||
} else {
|
||||
stateWriter = state.NewPlainStateWriterNoHistory(batch, blockNum).SetAccumulator(accumulator)
|
||||
}
|
||||
}
|
||||
|
||||
if readerWriterWrapper != nil {
|
||||
wrapper := readerWriterWrapper(stateReader, stateWriter)
|
||||
stateReader = wrapper
|
||||
stateWriter = wrapper
|
||||
}
|
||||
|
||||
return stateReader, stateWriter
|
||||
}
|
||||
|
||||
func SpawnExecuteBlocksStage(s *StageState, tx ethdb.RwTx, toBlock uint64, quit <-chan struct{}, cfg ExecuteBlockCfg, accumulator *shards.Accumulator) error {
|
||||
useExternalTx := tx != nil
|
||||
if !useExternalTx {
|
||||
@ -247,6 +276,15 @@ func SpawnExecuteBlocksStage(s *StageState, tx ethdb.RwTx, toBlock uint64, quit
|
||||
defer traceCursor.Close()
|
||||
}
|
||||
|
||||
var tevmStatusCursor ethdb.RwCursorDupSort
|
||||
if cfg.writeTEVM {
|
||||
var err error
|
||||
if tevmStatusCursor, err = tx.RwCursorDupSort(dbutils.ContractTEVMCodeStatusBucket); err != nil {
|
||||
return fmt.Errorf("%s: failed to create cursor for TEVM status: %v", logPrefix, err)
|
||||
}
|
||||
defer tevmStatusCursor.Close()
|
||||
}
|
||||
|
||||
useSilkworm := cfg.silkwormExecutionFunc != nil
|
||||
if useSilkworm && cfg.changeSetHook != nil {
|
||||
panic("ChangeSetHook is not supported with Silkworm")
|
||||
@ -285,13 +323,62 @@ func SpawnExecuteBlocksStage(s *StageState, tx ethdb.RwTx, toBlock uint64, quit
|
||||
log.Error(fmt.Sprintf("[%s] Empty block", logPrefix), "blocknum", blockNum)
|
||||
break
|
||||
}
|
||||
|
||||
writeChangesets := true
|
||||
if cfg.pruningDistance > 0 && to-blockNum > cfg.pruningDistance {
|
||||
writeChangesets = false
|
||||
}
|
||||
if err = executeBlockWithGo(block, tx, batch, cfg, writeChangesets, traceCursor, accumulator); err != nil {
|
||||
|
||||
var (
|
||||
stateReaderWriter *TouchReaderWriter
|
||||
checkTEVMCode func(codeHash common.Hash) (bool, error)
|
||||
)
|
||||
|
||||
if cfg.writeTEVM {
|
||||
checkTEVMCode = ethdb.GetCheckTEVM(tx)
|
||||
} else {
|
||||
checkTEVMCode = nil
|
||||
}
|
||||
readerWriterWrapper := func(r state.StateReader, w state.WriterWithChangeSets) *TouchReaderWriter {
|
||||
stateReaderWriter = NewTouchCreateWatcher(r, w, checkTEVMCode)
|
||||
return stateReaderWriter
|
||||
}
|
||||
|
||||
if err = executeBlockWithGo(block, tx, batch, cfg, writeChangesets, traceCursor, accumulator, readerWriterWrapper, checkTEVMCode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TEVM marking new contracts sub-stage
|
||||
if cfg.writeTEVM {
|
||||
codeHashes := stateReaderWriter.AllTouches()
|
||||
touchedСontracts := make(common.Hashes, 0, len(codeHashes))
|
||||
|
||||
for codeHash := range codeHashes {
|
||||
touchedСontracts = append(touchedСontracts, codeHash)
|
||||
}
|
||||
sort.Sort(touchedСontracts)
|
||||
|
||||
var blockNumEnc [8]byte
|
||||
binary.BigEndian.PutUint64(blockNumEnc[:], blockNum)
|
||||
|
||||
var prev common.Hash
|
||||
for i, hash := range touchedСontracts {
|
||||
var h [common.HashLength]byte
|
||||
copy(h[:], hash[:])
|
||||
|
||||
if i == 0 {
|
||||
if err = tevmStatusCursor.Append(blockNumEnc[:], h[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = tevmStatusCursor.AppendDup(blockNumEnc[:], h[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
copy(prev[:], h[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stageProgress = blockNum
|
||||
@ -319,11 +406,17 @@ func SpawnExecuteBlocksStage(s *StageState, tx ethdb.RwTx, toBlock uint64, quit
|
||||
}
|
||||
// TODO: This creates stacked up deferrals
|
||||
defer tx.Rollback()
|
||||
|
||||
if cfg.writeCallTraces {
|
||||
if traceCursor, err = tx.RwCursorDupSort(dbutils.CallTraceSet); err != nil {
|
||||
return fmt.Errorf("%s: failed to create cursor for call traces: %v", logPrefix, err)
|
||||
}
|
||||
}
|
||||
if cfg.writeTEVM {
|
||||
if tevmStatusCursor, err = tx.RwCursorDupSort(dbutils.ContractTEVMCodeStatusBucket); err != nil {
|
||||
return fmt.Errorf("%s: failed to create cursor for tevm statuses: %v", logPrefix, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
batch = ethdb.NewBatch(tx)
|
||||
// TODO: This creates stacked up deferrals
|
||||
@ -581,6 +674,25 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx ethdb.RwTx, quit <-c
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.writeTEVM {
|
||||
keyStart := dbutils.EncodeBlockNumber(u.UnwindPoint + 1)
|
||||
tevmStatusCursor, err := tx.RwCursorDupSort(dbutils.ContractTEVMCodeStatusBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tevmStatusCursor.Close()
|
||||
|
||||
for k, _, err := tevmStatusCursor.Seek(keyStart); k != nil; k, _, err = tevmStatusCursor.NextNoDup() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = tevmStatusCursor.DeleteCurrentDuplicates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -625,3 +737,107 @@ func min(a, b uint64) uint64 {
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func NewTouchCreateWatcher(r state.StateReader, w state.WriterWithChangeSets, check func(hash common.Hash) (bool, error)) *TouchReaderWriter {
|
||||
return &TouchReaderWriter{
|
||||
r: r,
|
||||
w: w,
|
||||
readCodes: make(map[common.Hash]struct{}),
|
||||
updatedCodes: make(map[common.Hash]struct{}),
|
||||
check: check,
|
||||
}
|
||||
}
|
||||
|
||||
type TouchReaderWriter struct {
|
||||
r state.StateReader
|
||||
w state.WriterWithChangeSets
|
||||
readCodes map[common.Hash]struct{}
|
||||
updatedCodes map[common.Hash]struct{}
|
||||
check func(hash common.Hash) (bool, error)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) ReadAccountData(address common.Address) (*accounts.Account, error) {
|
||||
return d.r.ReadAccountData(address)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) {
|
||||
return d.r.ReadAccountStorage(address, incarnation, key)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) {
|
||||
if d.check != nil && codeHash != (common.Hash{}) {
|
||||
_, ok := d.readCodes[codeHash]
|
||||
if !ok {
|
||||
ok, err := d.check(codeHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
d.readCodes[codeHash] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return d.r.ReadAccountCode(address, incarnation, codeHash)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) {
|
||||
return d.r.ReadAccountCodeSize(address, incarnation, codeHash)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) ReadAccountIncarnation(address common.Address) (uint64, error) {
|
||||
return d.r.ReadAccountIncarnation(address)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) WriteChangeSets() error {
|
||||
return d.w.WriteChangeSets()
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) WriteHistory() error {
|
||||
return d.w.WriteHistory()
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) UpdateAccountData(ctx context.Context, address common.Address, original, account *accounts.Account) error {
|
||||
return d.w.UpdateAccountData(ctx, address, original, account)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error {
|
||||
if d.check != nil && codeHash != (common.Hash{}) {
|
||||
_, ok := d.updatedCodes[codeHash]
|
||||
if !ok {
|
||||
ok, err := d.check(codeHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
d.updatedCodes[codeHash] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return d.w.UpdateAccountCode(address, incarnation, codeHash, code)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) DeleteAccount(ctx context.Context, address common.Address, original *accounts.Account) error {
|
||||
return d.w.DeleteAccount(ctx, address, original)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) WriteAccountStorage(ctx context.Context, address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error {
|
||||
return d.w.WriteAccountStorage(ctx, address, incarnation, key, original, value)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) CreateContract(address common.Address) error {
|
||||
return d.w.CreateContract(address)
|
||||
}
|
||||
|
||||
func (d *TouchReaderWriter) AllTouches() map[common.Hash]struct{} {
|
||||
c := make(map[common.Hash]struct{}, len(d.readCodes))
|
||||
|
||||
for h := range d.readCodes {
|
||||
c[h] = struct{}{}
|
||||
}
|
||||
for h := range d.updatedCodes {
|
||||
c[h] = struct{}{}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ func TestUnwindExecutionStagePlainStatic(t *testing.T) {
|
||||
t.Errorf("error while unwinding state: %v", err)
|
||||
}
|
||||
|
||||
compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket)
|
||||
compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.ContractTEVMCodeBucket)
|
||||
}
|
||||
|
||||
func TestUnwindExecutionStagePlainWithIncarnationChanges(t *testing.T) {
|
||||
@ -47,7 +47,7 @@ func TestUnwindExecutionStagePlainWithIncarnationChanges(t *testing.T) {
|
||||
t.Errorf("error while unwinding state: %v", err)
|
||||
}
|
||||
|
||||
compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket)
|
||||
compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.ContractTEVMCodeStatusBucket, dbutils.ContractTEVMCodeBucket)
|
||||
}
|
||||
|
||||
func TestUnwindExecutionStagePlainWithCodeChanges(t *testing.T) {
|
||||
@ -69,5 +69,5 @@ func TestUnwindExecutionStagePlainWithCodeChanges(t *testing.T) {
|
||||
t.Errorf("error while unwinding state: %v", err)
|
||||
}
|
||||
|
||||
compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket)
|
||||
compareCurrentState(t, tx1, tx2, dbutils.PlainStateBucket, dbutils.PlainContractCodeBucket, dbutils.ContractTEVMCodeStatusBucket, dbutils.ContractTEVMCodeBucket)
|
||||
}
|
||||
|
@ -69,13 +69,14 @@ func SpawnMiningExecStage(s *StageState, tx ethdb.RwTx, cfg MiningExecCfg, curre
|
||||
}
|
||||
|
||||
getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) }
|
||||
checkTEVM := ethdb.GetCheckTEVM(tx)
|
||||
|
||||
// Short circuit if there is no available pending transactions.
|
||||
// But if we disable empty precommit already, ignore it. Since
|
||||
// empty block is necessary to keep the liveness of the network.
|
||||
if noempty {
|
||||
if !localTxs.Empty() {
|
||||
logs, err := addTransactionsToMiningBlock(current, cfg.chainConfig, cfg.vmConfig, getHeader, cfg.engine, localTxs, cfg.mining.Etherbase, ibs, quit)
|
||||
logs, err := addTransactionsToMiningBlock(current, cfg.chainConfig, cfg.vmConfig, getHeader, checkTEVM, cfg.engine, localTxs, cfg.mining.Etherbase, ibs, quit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -87,7 +88,7 @@ func SpawnMiningExecStage(s *StageState, tx ethdb.RwTx, cfg MiningExecCfg, curre
|
||||
//}
|
||||
}
|
||||
if !remoteTxs.Empty() {
|
||||
logs, err := addTransactionsToMiningBlock(current, cfg.chainConfig, cfg.vmConfig, getHeader, cfg.engine, remoteTxs, cfg.mining.Etherbase, ibs, quit)
|
||||
logs, err := addTransactionsToMiningBlock(current, cfg.chainConfig, cfg.vmConfig, getHeader, checkTEVM, cfg.engine, remoteTxs, cfg.mining.Etherbase, ibs, quit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -140,7 +141,7 @@ func SpawnMiningExecStage(s *StageState, tx ethdb.RwTx, cfg MiningExecCfg, curre
|
||||
return nil
|
||||
}
|
||||
|
||||
func addTransactionsToMiningBlock(current *miningBlock, chainConfig params.ChainConfig, vmConfig *vm.Config, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, txs types.TransactionsStream, coinbase common.Address, ibs *state.IntraBlockState, quit <-chan struct{}) (types.Logs, error) {
|
||||
func addTransactionsToMiningBlock(current *miningBlock, chainConfig params.ChainConfig, vmConfig *vm.Config, getHeader func(hash common.Hash, number uint64) *types.Header, checkTEVM func(hash common.Hash) (bool, error), engine consensus.Engine, txs types.TransactionsStream, coinbase common.Address, ibs *state.IntraBlockState, quit <-chan struct{}) (types.Logs, error) {
|
||||
header := current.Header
|
||||
tcount := 0
|
||||
gasPool := new(core.GasPool).AddGas(current.Header.GasLimit)
|
||||
@ -151,7 +152,7 @@ func addTransactionsToMiningBlock(current *miningBlock, chainConfig params.Chain
|
||||
|
||||
var miningCommitTx = func(txn types.Transaction, coinbase common.Address, vmConfig *vm.Config, chainConfig params.ChainConfig, ibs *state.IntraBlockState, current *miningBlock) ([]*types.Log, error) {
|
||||
snap := ibs.Snapshot()
|
||||
receipt, err := core.ApplyTransaction(&chainConfig, getHeader, engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig)
|
||||
receipt, err := core.ApplyTransaction(&chainConfig, getHeader, engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, checkTEVM)
|
||||
if err != nil {
|
||||
ibs.RevertToSnapshot(snap)
|
||||
return nil, err
|
||||
|
271
eth/stagedsync/stage_tevm.go
Normal file
271
eth/stagedsync/stage_tevm.go
Normal file
@ -0,0 +1,271 @@
|
||||
package stagedsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/common/dbutils"
|
||||
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
|
||||
"github.com/ledgerwatch/erigon/ethdb"
|
||||
"github.com/ledgerwatch/erigon/log"
|
||||
"github.com/ledgerwatch/erigon/metrics"
|
||||
"github.com/ledgerwatch/erigon/params"
|
||||
)
|
||||
|
||||
var stageTranspileGauge = metrics.NewRegisteredGauge("stage/tevm", nil)
|
||||
|
||||
type TranspileCfg struct {
|
||||
db ethdb.RwKV
|
||||
batchSize datasize.ByteSize
|
||||
readerBuilder StateReaderBuilder
|
||||
writerBuilder StateWriterBuilder
|
||||
chainConfig *params.ChainConfig
|
||||
}
|
||||
|
||||
func StageTranspileCfg(
|
||||
kv ethdb.RwKV,
|
||||
batchSize datasize.ByteSize,
|
||||
readerBuilder StateReaderBuilder,
|
||||
writerBuilder StateWriterBuilder,
|
||||
chainConfig *params.ChainConfig,
|
||||
) TranspileCfg {
|
||||
return TranspileCfg{
|
||||
db: kv,
|
||||
batchSize: batchSize,
|
||||
readerBuilder: readerBuilder,
|
||||
writerBuilder: writerBuilder,
|
||||
chainConfig: chainConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func transpileBatch(logPrefix string, s *StageState, fromBlock uint64, toBlock uint64, tx ethdb.RwTx, batch ethdb.DbWithPendingMutations, cfg TranspileCfg, useExternalTx bool, quitCh <-chan struct{}) error {
|
||||
logEvery := time.NewTicker(logInterval)
|
||||
defer logEvery.Stop()
|
||||
|
||||
stageProgress := uint64(0)
|
||||
logBlock := stageProgress
|
||||
logTime := time.Now()
|
||||
|
||||
// read contracts pending for translation
|
||||
keyStart := dbutils.EncodeBlockNumber(fromBlock + 1)
|
||||
c, err := tx.CursorDupSort(dbutils.ContractTEVMCodeStatusBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
for k, hash, err := c.Seek(keyStart); k != nil; k, hash, err = c.Next() {
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't read pending code translations: %w", err)
|
||||
}
|
||||
if err = common.Stopped(quitCh); err != nil {
|
||||
return fmt.Errorf("can't read pending code translations: %w", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-logEvery.C:
|
||||
logBlock, logTime = logTEVMProgress(logPrefix, logBlock, logTime, stageProgress)
|
||||
if hasTx, ok := tx.(ethdb.HasTx); ok {
|
||||
hasTx.Tx().CollectMetrics()
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
block, err := dbutils.DecodeBlockNumber(k)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't read pending code translations: %w", err)
|
||||
}
|
||||
|
||||
if block > toBlock {
|
||||
return nil
|
||||
}
|
||||
|
||||
// load the contract code. don't use batch to prevent a data race on creating a new batch variable.
|
||||
evmContract, err := batch.GetOne(dbutils.CodeBucket, hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't read pending code translations: %w", err)
|
||||
}
|
||||
|
||||
// call a transpiler
|
||||
transpiledCode, err := transpileCode(evmContract)
|
||||
if err != nil {
|
||||
return fmt.Errorf("contract %q cannot be translated: %w",
|
||||
common.BytesToHash(hash).String(), err)
|
||||
}
|
||||
|
||||
// store TEVM contract code
|
||||
err = batch.Put(dbutils.ContractTEVMCodeBucket, hash, transpiledCode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot store TEVM code %q: %w", common.BytesToHash(hash), err)
|
||||
}
|
||||
|
||||
stageProgress++
|
||||
|
||||
currentSize := batch.BatchSize()
|
||||
updateProgress := currentSize >= int(cfg.batchSize)
|
||||
|
||||
if updateProgress {
|
||||
if err = batch.Commit(); err != nil {
|
||||
return fmt.Errorf("cannot commit the batch of translations on %q: %w",
|
||||
common.BytesToHash(hash), err)
|
||||
}
|
||||
|
||||
if !useExternalTx {
|
||||
if err = s.Update(tx, stageProgress); err != nil {
|
||||
return fmt.Errorf("cannot update the stage status on %q: %w",
|
||||
common.BytesToHash(hash), err)
|
||||
}
|
||||
if err = tx.Commit(); err != nil {
|
||||
return fmt.Errorf("cannot commit the external transation on %q: %w",
|
||||
common.BytesToHash(hash), err)
|
||||
}
|
||||
|
||||
tx, err = cfg.db.BeginRw(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot begin the batch transaction on %q: %w",
|
||||
common.BytesToHash(hash), err)
|
||||
}
|
||||
|
||||
// TODO: This creates stacked up deferrals
|
||||
defer tx.Rollback()
|
||||
}
|
||||
|
||||
batch = ethdb.NewBatch(tx)
|
||||
// TODO: This creates stacked up deferrals
|
||||
defer batch.Rollback()
|
||||
|
||||
stageTranspileGauge.Inc(int64(currentSize))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("[%s] Completed on", logPrefix), "contracts", stageProgress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func logTEVMProgress(logPrefix string, prevContract uint64, prevTime time.Time, currentContract uint64) (uint64, time.Time) {
|
||||
currentTime := time.Now()
|
||||
interval := currentTime.Sub(prevTime)
|
||||
speed := float64(currentContract-prevContract) / float64(interval/time.Second)
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
var logpairs = []interface{}{
|
||||
"number", currentContract,
|
||||
"contracts/second", speed,
|
||||
}
|
||||
logpairs = append(logpairs, "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys), "numGC", int(m.NumGC))
|
||||
log.Info(fmt.Sprintf("[%s] Translated contracts", logPrefix), logpairs...)
|
||||
|
||||
return currentContract, currentTime
|
||||
}
|
||||
|
||||
func SpawnTranspileStage(s *StageState, tx ethdb.RwTx, toBlock uint64, quit <-chan struct{}, cfg TranspileCfg) error {
|
||||
useExternalTx := tx != nil
|
||||
if !useExternalTx {
|
||||
var err error
|
||||
tx, err = cfg.db.BeginRw(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
}
|
||||
|
||||
prevStageProgress, errStart := stages.GetStageProgress(tx, stages.Execution)
|
||||
if errStart != nil {
|
||||
return errStart
|
||||
}
|
||||
|
||||
var to = prevStageProgress
|
||||
if toBlock > 0 {
|
||||
to = min(prevStageProgress, toBlock)
|
||||
}
|
||||
|
||||
if to <= s.BlockNumber {
|
||||
s.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
logPrefix := s.state.LogPrefix()
|
||||
log.Info(fmt.Sprintf("[%s] Contract translation", logPrefix), "from", s.BlockNumber, "to", to)
|
||||
|
||||
batch := ethdb.NewBatch(tx)
|
||||
defer batch.Rollback()
|
||||
|
||||
err := common.Stopped(quit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = transpileBatch(logPrefix, s, s.BlockNumber, to, tx, batch, cfg, useExternalTx, quit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// commit the same number as execution
|
||||
if err := s.Update(batch, prevStageProgress); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := batch.Commit(); err != nil {
|
||||
return fmt.Errorf("%s: failed to write batch commit: %v", logPrefix, err)
|
||||
}
|
||||
|
||||
if !useExternalTx {
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("[%s] Completed on", logPrefix), "block", prevStageProgress)
|
||||
s.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnwindTranspileStage(u *UnwindState, s *StageState, tx ethdb.RwTx, _ <-chan struct{}, cfg TranspileCfg) error {
|
||||
useExternalTx := tx != nil
|
||||
if !useExternalTx {
|
||||
var err error
|
||||
tx, err = cfg.db.BeginRw(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
}
|
||||
|
||||
keyStart := dbutils.EncodeBlockNumber(u.UnwindPoint + 1)
|
||||
c, err := tx.CursorDupSort(dbutils.ContractTEVMCodeStatusBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
for k, hash, err := c.Seek(keyStart); k != nil; k, hash, err = c.Next() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.Delete(dbutils.ContractTEVMCodeBucket, hash, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = u.Done(tx)
|
||||
logPrefix := s.state.LogPrefix()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: reset: %v", logPrefix, err)
|
||||
}
|
||||
if !useExternalTx {
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to write db commit: %v", logPrefix, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// todo: TBD actual TEVM translator
|
||||
func transpileCode(code []byte) ([]byte, error) {
|
||||
return append(make([]byte, 0, len(code)), code...), nil
|
||||
}
|
@ -263,6 +263,7 @@ func DefaultStages() StageBuilders {
|
||||
world.DB.RwKV(),
|
||||
world.storageMode.Receipts,
|
||||
world.storageMode.CallTraces,
|
||||
world.storageMode.TEVM,
|
||||
0,
|
||||
world.BatchSize,
|
||||
world.stateReaderBuilder,
|
||||
|
@ -35,6 +35,7 @@ var (
|
||||
Bodies SyncStage = "Bodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified
|
||||
Senders SyncStage = "Senders" // "From" recovered from signatures, bodies re-written
|
||||
Execution SyncStage = "Execution" // Executing each block w/o buildinf a trie
|
||||
Translation SyncStage = "Translation" // Translation each marked for translation contract (from EVM to TEVM)
|
||||
IntermediateHashes SyncStage = "IntermediateHashes" // Generate intermediate hashes, calculate the state root hash
|
||||
HashState SyncStage = "HashState" // Apply Keccak256 to all the keys in the state
|
||||
AccountHistoryIndex SyncStage = "AccountHistoryIndex" // Generating history index for accounts
|
||||
@ -60,6 +61,7 @@ var AllStages = []SyncStage{
|
||||
Bodies,
|
||||
Senders,
|
||||
Execution,
|
||||
Translation,
|
||||
IntermediateHashes,
|
||||
HashState,
|
||||
AccountHistoryIndex,
|
||||
|
@ -58,7 +58,7 @@ func compareBucket(t *testing.T, db1, db2 ethdb.Tx, bucketName string) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, bucket1 /*expected*/, bucket2 /*actual*/)
|
||||
assert.Equalf(t, bucket1 /*expected*/, bucket2 /*actual*/, "bucket %q", bucketName)
|
||||
}
|
||||
|
||||
type stateWriterGen func(uint64) state.WriterWithChangeSets
|
||||
|
@ -56,7 +56,10 @@ type vmContext struct {
|
||||
}
|
||||
|
||||
func testCtx() *vmContext {
|
||||
return &vmContext{blockCtx: vm.BlockContext{BlockNumber: 1}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}
|
||||
return &vmContext{blockCtx: vm.BlockContext{
|
||||
BlockNumber: 1,
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}
|
||||
}
|
||||
|
||||
func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) {
|
||||
@ -65,7 +68,7 @@ func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) {
|
||||
startGas uint64 = 10000
|
||||
value = uint256.NewInt()
|
||||
)
|
||||
contract := vm.NewContract(account{}, account{}, value, startGas, false)
|
||||
contract := vm.NewContract(account{}, account{}, value, startGas, false, false)
|
||||
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0}
|
||||
|
||||
if err := tracer.CaptureStart(0, contract.Caller(), contract.Address(), false, false, vm.CallType(0), []byte{}, startGas, big.NewInt(int64(value.Uint64()))); err != nil {
|
||||
@ -84,7 +87,10 @@ func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) {
|
||||
func TestTracer(t *testing.T) {
|
||||
execTracer := func(code string) []byte {
|
||||
t.Helper()
|
||||
ctx := &vmContext{blockCtx: vm.BlockContext{BlockNumber: 1}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}
|
||||
ctx := &vmContext{blockCtx: vm.BlockContext{
|
||||
BlockNumber: 1,
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}
|
||||
tracer, err := New(code, ctx.txCtx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -154,8 +160,11 @@ func TestHaltBetweenSteps(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
env := vm.NewEVM(vm.BlockContext{BlockNumber: 1}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
|
||||
contract := vm.NewContract(&account{}, &account{}, uint256.NewInt(), 0, false)
|
||||
env := vm.NewEVM(vm.BlockContext{
|
||||
BlockNumber: 1,
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
|
||||
contract := vm.NewContract(&account{}, &account{}, uint256.NewInt(), 0, false, false)
|
||||
|
||||
tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, contract, 0, nil) //nolint:errcheck
|
||||
timeout := errors.New("stahp")
|
||||
|
@ -156,6 +156,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
Coinbase: common.Address{},
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
BlockNumber: 8000000,
|
||||
Time: 5,
|
||||
Difficulty: big.NewInt(0x30000),
|
||||
@ -251,6 +252,7 @@ func TestCallTracer(t *testing.T) {
|
||||
Time: uint64(test.Context.Time),
|
||||
Difficulty: (*big.Int)(test.Context.Difficulty),
|
||||
GasLimit: uint64(test.Context.GasLimit),
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
}
|
||||
statedb, _ := tests.MakePreState(ctx, ethdb.NewTestDB(t), test.Genesis.Alloc, uint64(test.Context.Number))
|
||||
|
||||
|
@ -2,11 +2,14 @@ package ethdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/common/dbutils"
|
||||
"github.com/ledgerwatch/erigon/log"
|
||||
)
|
||||
|
||||
@ -135,3 +138,35 @@ func testKVPath() string {
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// todo: return TEVM code and use it
|
||||
func GetCheckTEVM(db KVGetter) func(codeHash common.Hash) (bool, error) {
|
||||
checked := map[common.Hash]struct{}{}
|
||||
var ok bool
|
||||
|
||||
return func(codeHash common.Hash) (bool, error) {
|
||||
if _, ok = checked[codeHash]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
ok, err := db.Has(dbutils.ContractTEVMCodeStatusBucket, codeHash.Bytes())
|
||||
if !errors.Is(err, ErrKeyNotFound) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if ok {
|
||||
return false, ErrKeyNotFound
|
||||
}
|
||||
|
||||
ok, err = db.Has(dbutils.ContractTEVMCodeBucket, codeHash.Bytes())
|
||||
if !errors.Is(err, ErrKeyNotFound) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
checked[codeHash] = struct{}{}
|
||||
}
|
||||
|
||||
return ok, nil
|
||||
}
|
||||
}
|
||||
|
@ -12,9 +12,17 @@ type StorageMode struct {
|
||||
Receipts bool
|
||||
TxIndex bool
|
||||
CallTraces bool
|
||||
TEVM bool
|
||||
}
|
||||
|
||||
var DefaultStorageMode = StorageMode{Initialised: true, History: true, Receipts: true, TxIndex: true, CallTraces: true}
|
||||
var DefaultStorageMode = StorageMode{
|
||||
Initialised: true,
|
||||
History: true,
|
||||
Receipts: true,
|
||||
TxIndex: true,
|
||||
CallTraces: true,
|
||||
TEVM: false,
|
||||
}
|
||||
|
||||
func (m StorageMode) ToString() string {
|
||||
if !m.Initialised {
|
||||
@ -33,6 +41,9 @@ func (m StorageMode) ToString() string {
|
||||
if m.CallTraces {
|
||||
modeString += "c"
|
||||
}
|
||||
if m.TEVM {
|
||||
modeString += "e"
|
||||
}
|
||||
return modeString
|
||||
}
|
||||
|
||||
@ -52,6 +63,8 @@ func StorageModeFromString(flags string) (StorageMode, error) {
|
||||
mode.TxIndex = true
|
||||
case 'c':
|
||||
mode.CallTraces = true
|
||||
case 'e':
|
||||
mode.TEVM = true
|
||||
default:
|
||||
return mode, fmt.Errorf("unexpected flag found: %c", flag)
|
||||
}
|
||||
@ -91,6 +104,12 @@ func GetStorageModeFromDB(db KVGetter) (StorageMode, error) {
|
||||
return StorageMode{}, err
|
||||
}
|
||||
sm.CallTraces = len(v) == 1 && v[0] == 1
|
||||
|
||||
v, err = db.GetOne(dbutils.DatabaseInfoBucket, dbutils.StorageModeTEVM)
|
||||
if err != nil {
|
||||
return StorageMode{}, err
|
||||
}
|
||||
sm.TEVM = len(v) == 1 && v[0] == 1
|
||||
return sm, nil
|
||||
}
|
||||
|
||||
@ -119,6 +138,11 @@ func OverrideStorageMode(db RwTx, sm StorageMode) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = setMode(db, dbutils.StorageModeTEVM, sm.TEVM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -150,6 +174,11 @@ func SetStorageModeIfNotExist(db RwTx, sm StorageMode) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = setModeOnEmpty(db, dbutils.StorageModeTEVM, sm.TEVM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@ func TestSetStorageModeIfNotExist(t *testing.T) {
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -40,6 +41,7 @@ func TestSetStorageModeIfNotExist(t *testing.T) {
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
false,
|
||||
}) {
|
||||
spew.Dump(sm)
|
||||
t.Fatal("not equal")
|
||||
|
@ -201,7 +201,8 @@ func (t *StateTest) RunNoVerify(ctx context.Context, kvtx ethdb.RwTx, subtest St
|
||||
|
||||
// Prepare the EVM.
|
||||
txContext := core.NewEVMTxContext(msg)
|
||||
context := core.NewEVMBlockContext(block.Header(), nil, nil, &t.json.Env.Coinbase)
|
||||
checkTEVM := func(common.Hash) (bool, error) { return false, nil }
|
||||
context := core.NewEVMBlockContext(block.Header(), nil, nil, &t.json.Env.Coinbase, checkTEVM)
|
||||
context.GetHash = vmTestBlockHash
|
||||
evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
|
||||
|
||||
|
@ -155,6 +155,7 @@ func (t *VMTest) newEVM(state vm.IntraBlockState, vmconfig vm.Config) *vm.EVM {
|
||||
CanTransfer: canTransfer,
|
||||
Transfer: transfer,
|
||||
GetHash: vmTestBlockHash,
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
Coinbase: t.json.Env.Coinbase,
|
||||
BlockNumber: t.json.Env.Number,
|
||||
Time: t.json.Env.Timestamp,
|
||||
|
@ -61,7 +61,8 @@ var (
|
||||
* h - write history to the DB
|
||||
* r - write receipts to the DB
|
||||
* t - write tx lookup index to the DB
|
||||
* c - write call traces index to the DB`,
|
||||
* c - write call traces index to the DB,
|
||||
* e - write TEVM translated code to the DB`,
|
||||
Value: "default",
|
||||
}
|
||||
SnapshotModeFlag = cli.StringFlag{
|
||||
|
@ -33,6 +33,7 @@ func NewStagedSync(
|
||||
bodies stagedsync.BodiesCfg,
|
||||
senders stagedsync.SendersCfg,
|
||||
exec stagedsync.ExecuteBlockCfg,
|
||||
trans stagedsync.TranspileCfg,
|
||||
hashState stagedsync.HashStateCfg,
|
||||
trieCfg stagedsync.TrieCfg,
|
||||
history stagedsync.HistoryCfg,
|
||||
@ -43,7 +44,7 @@ func NewStagedSync(
|
||||
finish stagedsync.FinishCfg,
|
||||
) *stagedsync.StagedSync {
|
||||
return stagedsync.New(
|
||||
stagedsync.ReplacementStages(ctx, sm, headers, blockHashes, bodies, senders, exec, hashState, trieCfg, history, logIndex, callTraces, txLookup, txPool, finish),
|
||||
stagedsync.ReplacementStages(ctx, sm, headers, blockHashes, bodies, senders, exec, trans, hashState, trieCfg, history, logIndex, callTraces, txLookup, txPool, finish),
|
||||
stagedsync.ReplacementUnwindOrder(),
|
||||
stagedsync.OptionalParameters{},
|
||||
)
|
||||
|
@ -127,6 +127,7 @@ func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
GetHash: getHashGetter(requireCanonical, tx),
|
||||
CheckTEVM: func(common.Hash) (bool, error) { return false, nil },
|
||||
Coinbase: header.Coinbase,
|
||||
BlockNumber: header.Number.Uint64(),
|
||||
Time: header.Time,
|
||||
|
@ -36,7 +36,7 @@ type BlockGetter interface {
|
||||
}
|
||||
|
||||
// computeTxEnv returns the execution environment of a certain transaction.
|
||||
func ComputeTxEnv(ctx context.Context, blockGetter BlockGetter, cfg *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, dbtx ethdb.Tx, blockHash common.Hash, txIndex uint64) (core.Message, vm.BlockContext, vm.TxContext, *state.IntraBlockState, *state.PlainKVState, error) {
|
||||
func ComputeTxEnv(ctx context.Context, blockGetter BlockGetter, cfg *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, checkTEVM func(hash common.Hash) (bool, error), engine consensus.Engine, dbtx ethdb.Tx, blockHash common.Hash, txIndex uint64) (core.Message, vm.BlockContext, vm.TxContext, *state.IntraBlockState, *state.PlainKVState, error) {
|
||||
// Create the parent state database
|
||||
block, err := blockGetter.GetBlockByHash(blockHash)
|
||||
if err != nil {
|
||||
@ -69,7 +69,7 @@ func ComputeTxEnv(ctx context.Context, blockGetter BlockGetter, cfg *params.Chai
|
||||
|
||||
// Assemble the transaction call message and return if the requested offset
|
||||
msg, _ := tx.AsMessage(*signer, block.Header().BaseFee)
|
||||
BlockContext := core.NewEVMBlockContext(block.Header(), getHeader, engine, nil)
|
||||
BlockContext := core.NewEVMBlockContext(block.Header(), getHeader, engine, nil, checkTEVM)
|
||||
TxContext := core.NewEVMTxContext(msg)
|
||||
if idx == int(txIndex) {
|
||||
return msg, BlockContext, TxContext, statedb, reader, nil
|
||||
|
Loading…
Reference in New Issue
Block a user