Aggregator22.Unwind() (#5039)

* save

* save
This commit is contained in:
Alex Sharov 2022-08-13 18:51:25 +07:00 committed by GitHub
parent 64bc837ccf
commit 52fd0d0e8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
37 changed files with 158 additions and 62 deletions

View File

@ -105,7 +105,7 @@ func New(snapDir string, verbosity lg.Level, dbg bool, natif nat.Interface, down
// rates are divided by 2 - I don't know why it works, maybe bug inside torrent lib accounting
torrentConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(uploadRate.Bytes()), 2*DefaultNetworkChunkSize) // default: unlimited
if downloadRate.Bytes() < 500_000_000 {
b := int(2 * DefaultNetworkChunkSize)
b := 2 * DefaultNetworkChunkSize
if downloadRate.Bytes() > DefaultNetworkChunkSize {
b = int(2 * downloadRate.Bytes())
}

View File

@ -193,7 +193,7 @@ func (api *BorImpl) GetCurrentValidators() ([]*bor.Validator, error) {
// GetRootHash returns the merkle root of the start to end block headers
func (api *BorImpl) GetRootHash(start, end uint64) (string, error) {
length := uint64(end - start + 1)
length := end - start + 1
if length > bor.MaxCheckpointLength {
return "", &bor.MaxCheckpointLengthExceededError{Start: start, End: end}
}

View File

@ -72,7 +72,7 @@ func TestTraceBlockByNumber(t *testing.T) {
}
var buf bytes.Buffer
stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096)
err := api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(rpc.LatestBlockNumber), &tracers.TraceConfig{}, stream)
err := api.TraceBlockByNumber(context.Background(), rpc.LatestBlockNumber, &tracers.TraceConfig{}, stream)
if err != nil {
t.Errorf("traceBlock %v: %v", rpc.LatestBlockNumber, err)
}

View File

@ -158,14 +158,14 @@ func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload
// Convert slice of hexutil.Bytes to a slice of slice of bytes
transactions := make([][]byte, len(payload.Transactions))
for i, transaction := range payload.Transactions {
transactions[i] = ([]byte)(transaction)
transactions[i] = transaction
}
res, err := e.api.EngineNewPayloadV1(ctx, &types2.ExecutionPayload{
ParentHash: gointerfaces.ConvertHashToH256(payload.ParentHash),
Coinbase: gointerfaces.ConvertAddressToH160(payload.FeeRecipient),
StateRoot: gointerfaces.ConvertHashToH256(payload.StateRoot),
ReceiptRoot: gointerfaces.ConvertHashToH256(payload.ReceiptsRoot),
LogsBloom: gointerfaces.ConvertBytesToH2048(([]byte)(payload.LogsBloom)),
LogsBloom: gointerfaces.ConvertBytesToH2048(payload.LogsBloom),
PrevRandao: gointerfaces.ConvertHashToH256(payload.PrevRandao),
BlockNumber: uint64(payload.BlockNumber),
GasLimit: uint64(payload.GasLimit),

View File

@ -312,7 +312,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str
it := allBlocks.Iterator()
for it.HasNext() {
b := uint64(it.Next())
b := it.Next()
// Extract transactions from block
hash, hashErr := rawdb.ReadCanonicalHash(dbtx, b)
if hashErr != nil {

View File

@ -193,7 +193,7 @@ func (api *BorImpl) GetCurrentValidators() ([]*bor.Validator, error) {
// GetRootHash returns the merkle root of the start to end block headers
func (api *BorImpl) GetRootHash(start, end uint64) (string, error) {
length := uint64(end - start + 1)
length := end - start + 1
if length > bor.MaxCheckpointLength {
return "", &bor.MaxCheckpointLengthExceededError{Start: start, End: end}
}

View File

@ -72,7 +72,7 @@ func TestTraceBlockByNumber(t *testing.T) {
}
var buf bytes.Buffer
stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096)
err := api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(rpc.LatestBlockNumber), &tracers.TraceConfig{}, stream)
err := api.TraceBlockByNumber(context.Background(), rpc.LatestBlockNumber, &tracers.TraceConfig{}, stream)
if err != nil {
t.Errorf("traceBlock %v: %v", rpc.LatestBlockNumber, err)
}

View File

@ -158,14 +158,14 @@ func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload
// Convert slice of hexutil.Bytes to a slice of slice of bytes
transactions := make([][]byte, len(payload.Transactions))
for i, transaction := range payload.Transactions {
transactions[i] = ([]byte)(transaction)
transactions[i] = transaction
}
res, err := e.api.EngineNewPayloadV1(ctx, &types2.ExecutionPayload{
ParentHash: gointerfaces.ConvertHashToH256(payload.ParentHash),
Coinbase: gointerfaces.ConvertAddressToH160(payload.FeeRecipient),
StateRoot: gointerfaces.ConvertHashToH256(payload.StateRoot),
ReceiptRoot: gointerfaces.ConvertHashToH256(payload.ReceiptsRoot),
LogsBloom: gointerfaces.ConvertBytesToH2048(([]byte)(payload.LogsBloom)),
LogsBloom: gointerfaces.ConvertBytesToH2048(payload.LogsBloom),
PrevRandao: gointerfaces.ConvertHashToH256(payload.PrevRandao),
BlockNumber: uint64(payload.BlockNumber),
GasLimit: uint64(payload.GasLimit),

View File

@ -319,7 +319,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str
stateReader := state.NewHistoryReader22(ac, nil /* ReadIndices */)
noop := state.NewNoopWriter()
for it.HasNext() {
txNum := uint64(it.Next())
txNum := it.Next()
// Find block number
blockNum := uint64(sort.Search(len(api._txNums), func(i int) bool {
return api._txNums[i] > txNum

View File

@ -166,7 +166,7 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64,
return h
}
contractHasTEVM := ethdb.GetHasTEVM(rwtx)
receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, contractHasTEVM, b, vmConfig, blockNum == uint64(block))
receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, contractHasTEVM, b, vmConfig, blockNum == block)
if err1 != nil {
return err1
}

View File

@ -5,7 +5,7 @@ import "encoding/binary"
type Suffix []byte
func ToSuffix(b []byte) Suffix {
return Suffix(b)
return b
}
func (s Suffix) Add(key []byte) Suffix {
@ -20,7 +20,7 @@ func (s Suffix) Add(key []byte) Suffix {
binary.BigEndian.PutUint32(dv, 1+s.KeyCount()) // Increment the counter of keys
dv[l] = byte(len(key))
copy(dv[l+1:], key)
return Suffix(dv)
return dv
}
func (s Suffix) MultiAdd(keys [][]byte) Suffix {
var l int
@ -43,7 +43,7 @@ func (s Suffix) MultiAdd(keys [][]byte) Suffix {
copy(dv[i:], key)
i += len(key)
}
return Suffix(dv)
return dv
}
func (s Suffix) KeyCount() uint32 {

View File

@ -120,6 +120,6 @@ func TestMustParseUint64Panic(t *testing.T) {
func TestAbsoluteDifference(t *testing.T) {
x1 := uint64(99)
x2 := uint64(45)
assert.Equal(t, AbsoluteDifference(x1, x2), uint64(x1-x2))
assert.Equal(t, AbsoluteDifference(x2, x1), uint64(x1-x2))
assert.Equal(t, AbsoluteDifference(x1, x2), x1-x2)
assert.Equal(t, AbsoluteDifference(x2, x1), x1-x2)
}

View File

@ -132,7 +132,7 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) {
if root, known := api.rootHashCache.Get(key); known {
return root.(string), nil
}
length := uint64(end - start + 1)
length := end - start + 1
if length > MaxCheckpointLength {
return "", &MaxCheckpointLengthExceededError{start, end}
}
@ -147,7 +147,7 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) {
wg.Add(1)
concurrent <- true
go func(number uint64) {
blockHeaders[number-start] = api.chain.GetHeaderByNumber(uint64(number))
blockHeaders[number-start] = api.chain.GetHeaderByNumber(number)
<-concurrent
wg.Done()
}(i)

View File

@ -39,7 +39,7 @@ func NewHeimdallClient(urlString string) (*HeimdallClient, error) {
h := &HeimdallClient{
urlString: urlString,
client: http.Client{
Timeout: time.Duration(5 * time.Second),
Timeout: 5 * time.Second,
},
}
return h, nil

View File

@ -12,6 +12,7 @@ import (
"github.com/google/btree"
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/kv"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon/common"
@ -109,6 +110,10 @@ func NewState22() *State22 {
}
func (rs *State22) put(table string, key, val []byte) {
if table == kv.PlainState {
fmt.Printf("table: %s, %s\n", table, dbg.Stack())
}
t, ok := rs.changes[table]
if !ok {
t = btree.NewG[StateItem](32, stateItemLess)

View File

@ -394,7 +394,7 @@ func (tx LegacyTx) EncodeRLP(w io.Writer) error {
// DecodeRLP decodes LegacyTx but with the list token already consumed and encodingSize being presented
func (tx *LegacyTx) DecodeRLP(s *rlp.Stream, encodingSize uint64) error {
var err error
s.NewList(uint64(encodingSize))
s.NewList(encodingSize)
if tx.Nonce, err = s.Uint(); err != nil {
return fmt.Errorf("read Nonce: %w", err)
}

View File

@ -599,7 +599,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *uint2
// DESCRIBED: docs/programmers_guide/guide.md#nonce
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *uint256.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
codeAndHash := &codeAndHash{code: code}
contractAddr = crypto.CreateAddress2(caller.Address(), common.Hash(salt.Bytes32()), codeAndHash.Hash().Bytes())
contractAddr = crypto.CreateAddress2(caller.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes())
return evm.create(caller, codeAndHash, gas, endowment, contractAddr, CREATE2T)
}

View File

@ -371,7 +371,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.Peek()
slot.SetUint64(uint64(interpreter.evm.IntraBlockState().GetCodeSize(common.Address(slot.Bytes20()))))
slot.SetUint64(uint64(interpreter.evm.IntraBlockState().GetCodeSize(slot.Bytes20())))
return nil, nil
}
@ -869,7 +869,7 @@ func makeLog(size int) executionFunc {
mStart, mSize := stack.Pop(), stack.Pop()
for i := 0; i < size; i++ {
addr := stack.Pop()
topics[i] = common.Hash(addr.Bytes32())
topics[i] = addr.Bytes32()
}
d := scope.Memory.GetCopy(mStart.Uint64(), mSize.Uint64())

View File

@ -31,7 +31,7 @@ func NewKeyFormat(prefix byte, layout ...int) *KeyFormat {
// For prefix byte
length := 1
for _, l := range layout {
length += int(l)
length += l
}
return &KeyFormat{
prefix: prefix,

View File

@ -75,7 +75,7 @@ func (op IAVLAbsenceOp) Run(args [][]byte) ([][]byte, error) {
// XXX What is the encoding for keys?
// We should decode the key depending on whether it's a string or hex,
// maybe based on quotes and 0x prefix?
err = op.Proof.VerifyAbsence([]byte(op.key))
err = op.Proof.VerifyAbsence(op.key)
if err != nil {
return nil, cmn.ErrorWrap(err, "verifying absence")
}

View File

@ -74,7 +74,7 @@ func (op IAVLValueOp) Run(args [][]byte) ([][]byte, error) {
// XXX What is the encoding for keys?
// We should decode the key depending on whether it's a string or hex,
// maybe based on quotes and 0x prefix?
err = op.Proof.VerifyItem([]byte(op.key), value)
err = op.Proof.VerifyItem(op.key, value)
if err != nil {
return nil, cmn.ErrorWrap(err, "verifying value")
}

View File

@ -125,7 +125,7 @@ func (pl PathToLeaf) dropRoot() PathToLeaf {
if pl.isEmpty() {
return pl
}
return PathToLeaf(pl[:len(pl)-1])
return pl[:len(pl)-1]
}
// TODO: (leonard) unused linter complains these are unused methods

View File

@ -222,7 +222,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
value uint256.Int
)
env.IntraBlockState().GetState(contract.Address(), &address, &value)
l.storage[contract.Address()][address] = common.Hash(value.Bytes32())
l.storage[contract.Address()][address] = value.Bytes32()
}
// capture SSTORE opcodes and record the written entry in the local storage.
if op == SSTORE && stack.Len() >= 2 {

View File

@ -83,7 +83,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
if bf.baseFee = bf.header.BaseFee; bf.baseFee == nil {
bf.baseFee = new(big.Int)
}
if chainconfig.IsLondon(uint64(bf.blockNumber + 1)) {
if chainconfig.IsLondon(bf.blockNumber + 1) {
bf.nextBaseFee = misc.CalcBaseFee(chainconfig, bf.header)
} else {
bf.nextBaseFee = new(big.Int)

View File

@ -235,6 +235,8 @@ func newStateReaderWriter(
return stateReader, stateWriter, nil
}
// ================ Erigon22 ================
func ExecBlock22(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
@ -262,7 +264,6 @@ func ExecBlock22(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont
var prevStageProgress uint64
// Compute mapping blockNum -> last TxNum in that block
var txNums []uint64
if tx != nil {
prevStageProgress, err = stages.GetStageProgress(tx, stages.Senders)
if err != nil {
@ -335,6 +336,92 @@ func ExecBlock22(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont
return nil
}
func UnwindExec22(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) {
if u.UnwindPoint >= s.BlockNumber {
return nil
}
useExternalTx := tx != nil
if !useExternalTx {
tx, err = cfg.db.BeginRw(context.Background())
if err != nil {
return err
}
defer tx.Rollback()
}
logPrefix := u.LogPrefix()
log.Info(fmt.Sprintf("[%s] Unwind Execution", logPrefix), "from", s.BlockNumber, "to", u.UnwindPoint)
//rs := state.NewState22()
aggDir := path.Join(cfg.dirs.DataDir, "agg22")
dir.MustExist(aggDir)
agg, err := libstate.NewAggregator22(aggDir, AggregationStep)
if err != nil {
return err
}
defer agg.Close()
allSnapshots := cfg.blockReader.(WithSnapshots).Snapshots()
var prevStageProgress uint64
// Compute mapping blockNum -> last TxNum in that block
var txNums []uint64
if tx != nil {
prevStageProgress, err = stages.GetStageProgress(tx, stages.Senders)
if err != nil {
return err
}
txNums = make([]uint64, prevStageProgress+1)
if err := (snapshotsync.BodiesIterator{}).ForEach(tx, allSnapshots, 0, func(blockNum, baseTxNum, txAmount uint64) error {
if blockNum > prevStageProgress {
return nil
}
txNums[blockNum] = baseTxNum + txAmount
return nil
}); err != nil {
return fmt.Errorf("build txNum => blockNum mapping: %w", err)
}
} else {
if err = cfg.db.View(ctx, func(tx kv.Tx) error {
prevStageProgress, err = stages.GetStageProgress(tx, stages.Senders)
if err != nil {
return err
}
txNums = make([]uint64, prevStageProgress)
if err := (snapshotsync.BodiesIterator{}).ForEach(tx, allSnapshots, 0, func(blockNum, baseTxNum, txAmount uint64) error {
if blockNum > prevStageProgress {
return nil
}
txNums[blockNum] = baseTxNum + txAmount
return nil
}); err != nil {
return fmt.Errorf("build txNum => blockNum mapping: %w", err)
}
return nil
}); err != nil {
return err
}
}
agg.SetTx(tx)
agg.SetTxNum(txNums[prevStageProgress])
if err := agg.Unwind(txNums[u.UnwindPoint]); err != nil {
return err
}
if err = u.Done(tx); err != nil {
return err
}
if !useExternalTx {
if err = tx.Commit(); err != nil {
return err
}
}
return nil
}
// ================ Erigon22 End ================
func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) {
if cfg.exec22 {
return ExecBlock22(s, u, tx, toBlock, ctx, cfg, initialCycle)
@ -547,6 +634,10 @@ func logProgress(logPrefix string, prevBlock uint64, prevTime time.Time, current
}
func UnwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) {
if cfg.exec22 {
return UnwindExec22(u, s, tx, ctx, cfg, initialCycle)
}
quit := ctx.Done()
if u.UnwindPoint >= s.BlockNumber {
return nil

View File

@ -391,7 +391,7 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H
}
// Check if we already reached TTD.
if td != nil && td.Cmp(s.config.TerminalTotalDifficulty) < 0 {
log.Warn(fmt.Sprintf("[%s] TTD not reached yet", prefix), "hash", common.Hash(blockHash))
log.Warn(fmt.Sprintf("[%s] TTD not reached yet", prefix), "hash", blockHash)
return &engineapi.PayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: common.Hash{}}, nil
}

2
go.mod
View File

@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon
go 1.18
require (
github.com/ledgerwatch/erigon-lib v0.0.0-20220812160240-8e9ea275cd81
github.com/ledgerwatch/erigon-lib v0.0.0-20220813104409-4999fb010739
github.com/ledgerwatch/erigon-snapshot v1.0.1-0.20220809023834-6309df4da4b1
github.com/ledgerwatch/log/v3 v3.4.1
github.com/ledgerwatch/secp256k1 v1.0.0

4
go.sum
View File

@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/ledgerwatch/erigon-lib v0.0.0-20220812160240-8e9ea275cd81 h1:VlM9UKWtDCFo+JLSXBQ1/08bx9sk3zZx1XrVa7W8Nqs=
github.com/ledgerwatch/erigon-lib v0.0.0-20220812160240-8e9ea275cd81/go.mod h1:Sb+16YoG+izjT/6ZPxUZixa2gI2jLbi1rrxBCtc2QKg=
github.com/ledgerwatch/erigon-lib v0.0.0-20220813104409-4999fb010739 h1:J0X0kujYuZKkzbHw/IamcjC51nNPqLrrc+92TBrWuWk=
github.com/ledgerwatch/erigon-lib v0.0.0-20220813104409-4999fb010739/go.mod h1:Sb+16YoG+izjT/6ZPxUZixa2gI2jLbi1rrxBCtc2QKg=
github.com/ledgerwatch/erigon-snapshot v1.0.1-0.20220809023834-6309df4da4b1 h1:qRIJu6cs6fbI8L52DSdPF27j3sOrEriXz1zQSuQvYpA=
github.com/ledgerwatch/erigon-snapshot v1.0.1-0.20220809023834-6309df4da4b1/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo=
github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc=

View File

@ -224,7 +224,7 @@ func (h *handler) handleMsg(msg *jsonrpcMessage, stream *jsoniter.Stream) {
h.addSubscriptions(cp.notifiers)
if answer != nil {
buffer, _ := json.Marshal(answer)
stream.Write(json.RawMessage(buffer))
stream.Write(buffer)
}
if needWriteStream {
h.conn.writeJSON(cp.ctx, json.RawMessage(stream.Buffer()))

View File

@ -139,10 +139,10 @@ func (t *StateTest) Run(rules *params.Rules, tx kv.RwTx, subtest StateSubtest, v
post := t.json.Post[subtest.Fork][subtest.Index]
// N.B: We need to do this in a two-step process, because the first Commit takes care
// of suicides, and we need to touch the coinbase _after_ it has potentially suicided.
if root != common.Hash(post.Root) {
if root != post.Root {
return state, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
if logs := rlpHash(state.Logs()); logs != common.Hash(post.Logs) {
if logs := rlpHash(state.Logs()); logs != post.Logs {
return state, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
return state, nil

View File

@ -131,15 +131,15 @@ func (tt *TransactionTest) Run(chainID *big.Int) error {
return fmt.Errorf("got error, expected none: %w", err)
}
if sender == nil {
return fmt.Errorf("sender was nil, should be %x", common.Address(testcase.fork.Sender))
return fmt.Errorf("sender was nil, should be %x", testcase.fork.Sender)
}
if *sender != common.Address(testcase.fork.Sender) {
if *sender != testcase.fork.Sender {
return fmt.Errorf("sender mismatch: got %x, want %x", sender, testcase.fork.Sender)
}
if txhash == nil {
return fmt.Errorf("txhash was nil, should be %x", common.Hash(testcase.fork.Hash))
return fmt.Errorf("txhash was nil, should be %x", testcase.fork.Hash)
}
if *txhash != common.Hash(testcase.fork.Hash) {
if *txhash != testcase.fork.Hash {
return fmt.Errorf("hash mismatch: got %x, want %x", *txhash, testcase.fork.Hash)
}
if new(big.Int).SetUint64(intrinsicGas).Cmp((*big.Int)(testcase.fork.IntrinsicGas)) != 0 {

View File

@ -56,7 +56,7 @@ func TestHeaderStep(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed
initialCycle := true
highestSeenHeader := uint64(chain.TopBlock.NumberU64())
highestSeenHeader := chain.TopBlock.NumberU64()
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)
}
@ -95,7 +95,7 @@ func TestMineBlockWith1Tx(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
initialCycle := true
highestSeenHeader := uint64(chain.TopBlock.NumberU64())
highestSeenHeader := chain.TopBlock.NumberU64()
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)
}
@ -164,7 +164,7 @@ func TestReorg(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
initialCycle := true
highestSeenHeader := uint64(chain.TopBlock.NumberU64())
highestSeenHeader := chain.TopBlock.NumberU64()
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)
}
@ -217,7 +217,7 @@ func TestReorg(t *testing.T) {
}
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
highestSeenHeader = uint64(short.TopBlock.NumberU64())
highestSeenHeader = short.TopBlock.NumberU64()
initialCycle = false
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)
@ -262,7 +262,7 @@ func TestReorg(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
// This is unwind step
highestSeenHeader = uint64(long1.TopBlock.NumberU64())
highestSeenHeader = long1.TopBlock.NumberU64()
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)
}
@ -299,7 +299,7 @@ func TestReorg(t *testing.T) {
}
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
highestSeenHeader = uint64(short2.TopBlock.NumberU64())
highestSeenHeader = short2.TopBlock.NumberU64()
initialCycle = false
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)
@ -396,7 +396,7 @@ func TestAnchorReplace(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
highestSeenHeader := uint64(long.TopBlock.NumberU64())
highestSeenHeader := long.TopBlock.NumberU64()
initialCycle := true
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)
@ -501,7 +501,7 @@ func TestAnchorReplace2(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
highestSeenHeader := uint64(long.TopBlock.NumberU64())
highestSeenHeader := long.TopBlock.NumberU64()
initialCycle := true
if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil {
t.Fatal(err)

View File

@ -250,7 +250,7 @@ func (l *JsonStreamLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, ga
value uint256.Int
)
env.IntraBlockState().GetState(contract.Address(), &address, &value)
l.storage[contract.Address()][address] = common.Hash(value.Bytes32())
l.storage[contract.Address()][address] = value.Bytes32()
outputStorage = true
}
// capture SSTORE opcodes and record the written entry in the local storage.

View File

@ -52,7 +52,7 @@ func FullNode4() {
}
func ShortNode1() {
s := NewShortNode([]byte("1"), valueNode([]byte("2")))
s := NewShortNode([]byte("1"), valueNode("2"))
b, err := rlp.EncodeToBytes(s)
if err != nil {
panic(err)
@ -61,7 +61,7 @@ func ShortNode1() {
}
func ShortNode2() {
s := NewShortNode([]byte("1"), valueNode([]byte("123456789012345678901234567890123456789012345678901234567890")))
s := NewShortNode([]byte("1"), valueNode("123456789012345678901234567890123456789012345678901234567890"))
b, err := rlp.EncodeToBytes(s)
if err != nil {
panic(err)
@ -99,17 +99,17 @@ func Hash2() {
}
func Hash3() {
s := NewShortNode([]byte("12"), valueNode([]byte("1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012")))
s := NewShortNode([]byte("12"), valueNode("1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012"))
hashRoot(s, "Hash3")
}
func Hash4() {
s := NewShortNode([]byte("12345678901234567890123456789012"), valueNode([]byte("12345678901234567890")))
s := NewShortNode([]byte("12345678901234567890123456789012"), valueNode("12345678901234567890"))
hashRoot(s, "Hash4")
}
func Hash5() {
s := NewShortNode([]byte("1234567890123456789012345678901"), valueNode([]byte("1")))
s := NewShortNode([]byte("1234567890123456789012345678901"), valueNode("1"))
hashRoot(s, "Hash5")
}

View File

@ -12,7 +12,7 @@ func TestValue(t *testing.T) {
h := newHasher(false)
var hn common.Hash
h.hash(valueNode([]byte("BLAH")), false, hn[:])
h.hash(valueNode("BLAH"), false, hn[:])
expected := "0x0"
actual := fmt.Sprintf("0x%x", hn[:])
if actual != expected {

View File

@ -50,9 +50,9 @@ func TestV2HashBuilding(t *testing.T) {
valueShort := []byte("VAL")
for i, key := range keys {
if i%2 == 0 {
tr.Update([]byte(key), valueNode(valueLong))
tr.Update([]byte(key), valueLong)
} else {
tr.Update([]byte(key), valueNode(valueShort))
tr.Update([]byte(key), valueShort)
}
}
trieHash := tr.Hash()
@ -110,7 +110,7 @@ func TestV2Resolution(t *testing.T) {
tr := New(common.Hash{})
value := []byte("VALUE123985903485903489043859043859043859048590485904385903485940385439058934058439058439058439058940385904358904385438809348908345")
for _, key := range keys {
tr.Update([]byte(key), valueNode(value))
tr.Update([]byte(key), value)
}
trieHash := tr.Hash()
@ -201,7 +201,7 @@ func TestEmbeddedStorage(t *testing.T) {
tr := New(common.Hash{})
valueShort := []byte("VAL")
for _, key := range keys {
tr.Update([]byte(key)[common.HashLength:], valueNode(valueShort))
tr.Update([]byte(key)[common.HashLength:], valueShort)
}
trieHash := tr.Hash()

View File

@ -24,7 +24,7 @@ func transformSubTrie(nd node, hex []byte, newTrie *Trie, transformFunc keyTrans
code = make([]byte, len(n.code))
copy(code, n.code)
}
_, newTrie.root = newTrie.insert(newTrie.root, transformFunc(hex), &accountNode{accountCopy, nil, true, codeNode(code), n.codeSize})
_, newTrie.root = newTrie.insert(newTrie.root, transformFunc(hex), &accountNode{accountCopy, nil, true, code, n.codeSize})
aHex := hex
if aHex[len(aHex)-1] == 16 {
aHex = aHex[:len(aHex)-1]