2020-08-01 07:39:04 +00:00
|
|
|
package commands
|
|
|
|
|
|
|
|
import (
|
2021-06-23 22:33:45 +00:00
|
|
|
"bytes"
|
2020-08-01 07:39:04 +00:00
|
|
|
"context"
|
2021-06-23 22:33:45 +00:00
|
|
|
"encoding/binary"
|
2020-08-01 07:39:04 +00:00
|
|
|
"fmt"
|
2020-10-07 20:50:03 +00:00
|
|
|
"math/big"
|
|
|
|
|
2022-10-28 01:47:45 +00:00
|
|
|
"github.com/RoaringBitmap/roaring"
|
2022-09-05 04:33:55 +00:00
|
|
|
"github.com/RoaringBitmap/roaring/roaring64"
|
2021-07-02 13:34:20 +00:00
|
|
|
"github.com/holiman/uint256"
|
2022-12-19 08:38:54 +00:00
|
|
|
common2 "github.com/ledgerwatch/erigon-lib/common"
|
2021-07-29 11:53:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/kv"
|
2022-10-28 01:47:45 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/kv/bitmapdb"
|
2022-09-05 04:33:55 +00:00
|
|
|
libstate "github.com/ledgerwatch/erigon-lib/state"
|
2022-12-22 02:37:32 +00:00
|
|
|
"github.com/ledgerwatch/erigon/core/state/temporal"
|
2022-12-07 17:45:44 +00:00
|
|
|
"github.com/ledgerwatch/log/v3"
|
|
|
|
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/common"
|
|
|
|
"github.com/ledgerwatch/erigon/common/hexutil"
|
|
|
|
"github.com/ledgerwatch/erigon/core"
|
|
|
|
"github.com/ledgerwatch/erigon/core/rawdb"
|
|
|
|
"github.com/ledgerwatch/erigon/core/state"
|
|
|
|
"github.com/ledgerwatch/erigon/core/types"
|
|
|
|
"github.com/ledgerwatch/erigon/core/vm"
|
2022-11-30 01:31:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon/core/vm/evmtypes"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/eth/filters"
|
2021-06-23 22:33:45 +00:00
|
|
|
"github.com/ledgerwatch/erigon/ethdb/cbor"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/params"
|
|
|
|
"github.com/ledgerwatch/erigon/rpc"
|
2022-06-14 08:07:46 +00:00
|
|
|
"github.com/ledgerwatch/erigon/turbo/rpchelper"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/turbo/transactions"
|
2020-08-01 07:39:04 +00:00
|
|
|
)
|
|
|
|
|
2022-06-04 22:06:30 +00:00
|
|
|
func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *params.ChainConfig, block *types.Block, senders []common.Address) (types.Receipts, error) {
|
2021-06-04 12:28:18 +00:00
|
|
|
if cached := rawdb.ReadReceipts(tx, block, senders); cached != nil {
|
2020-08-01 07:39:04 +00:00
|
|
|
return cached, nil
|
|
|
|
}
|
Fix trace error in Polygon | Pass Engin to the Base API (#6131)
So there is an issue with tracing certain blocks/transactions on
Polygon, for example:
```
> '{"method": "trace_transaction","params":["0xb198d93f640343a98f90d93aa2b74b4fc5c64f3a649f1608d2bfd1004f9dee0e"],"id":1,"jsonrpc":"2.0"}'
```
gives the error `first run for txIndex 1 error: insufficient funds for
gas * price + value: address 0x10AD27A96CDBffC90ab3b83bF695911426A69f5E
have 16927727762862809 want 17594166808296934`
The reason is that this transaction is from the author of the block,
which doesn't have enough ETH to pay for the gas fee + tx value if he's
not the block author receiving transactions fees.
The issue is that currently the APIs are using `ethash.NewFaker()`
Engine for running traces, etc. which doesn't know how to get the author
for a specific block (which is consensus dependant); as it was noting in
several TODO comments.
The fix is to pass the Engine to the BaseAPI, which can then be used to
create the right Block Context. I chose to split the current Engine
interface in 2, with Reader and Writer, so that the BaseAPI only
receives the Reader one, which might be safer (even though it's only
used for getting the block Author).
2022-12-04 05:17:39 +00:00
|
|
|
engine := api.engine()
|
2020-08-01 07:39:04 +00:00
|
|
|
|
Fix trace error in Polygon | Pass Engin to the Base API (#6131)
So there is an issue with tracing certain blocks/transactions on
Polygon, for example:
```
> '{"method": "trace_transaction","params":["0xb198d93f640343a98f90d93aa2b74b4fc5c64f3a649f1608d2bfd1004f9dee0e"],"id":1,"jsonrpc":"2.0"}'
```
gives the error `first run for txIndex 1 error: insufficient funds for
gas * price + value: address 0x10AD27A96CDBffC90ab3b83bF695911426A69f5E
have 16927727762862809 want 17594166808296934`
The reason is that this transaction is from the author of the block,
which doesn't have enough ETH to pay for the gas fee + tx value if he's
not the block author receiving transactions fees.
The issue is that currently the APIs are using `ethash.NewFaker()`
Engine for running traces, etc. which doesn't know how to get the author
for a specific block (which is consensus dependant); as it was noting in
several TODO comments.
The fix is to pass the Engine to the BaseAPI, which can then be used to
create the right Block Context. I chose to split the current Engine
interface in 2, with Reader and Writer, so that the BaseAPI only
receives the Reader one, which might be safer (even though it's only
used for getting the block Author).
2022-12-04 05:17:39 +00:00
|
|
|
_, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0, api._agg, api.historyV3(tx))
|
2020-08-01 07:39:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-10-11 14:24:21 +00:00
|
|
|
usedGas := new(uint64)
|
2020-08-01 07:39:04 +00:00
|
|
|
gp := new(core.GasPool).AddGas(block.GasLimit())
|
2021-10-11 14:24:21 +00:00
|
|
|
|
|
|
|
noopWriter := state.NewNoopWriter()
|
|
|
|
|
|
|
|
receipts := make(types.Receipts, len(block.Transactions()))
|
|
|
|
|
2022-10-31 05:31:38 +00:00
|
|
|
getHeader := func(hash common.Hash, number uint64) *types.Header {
|
|
|
|
h, e := api._blockReader.Header(ctx, tx, hash, number)
|
|
|
|
if e != nil {
|
|
|
|
log.Error("getHeader error", "number", number, "hash", hash, "err", e)
|
|
|
|
}
|
|
|
|
return h
|
|
|
|
}
|
2020-09-28 17:18:36 +00:00
|
|
|
for i, txn := range block.Transactions() {
|
|
|
|
ibs.Prepare(txn.Hash(), block.Hash(), i)
|
2022-07-07 11:47:00 +00:00
|
|
|
header := block.Header()
|
Fix trace error in Polygon | Pass Engin to the Base API (#6131)
So there is an issue with tracing certain blocks/transactions on
Polygon, for example:
```
> '{"method": "trace_transaction","params":["0xb198d93f640343a98f90d93aa2b74b4fc5c64f3a649f1608d2bfd1004f9dee0e"],"id":1,"jsonrpc":"2.0"}'
```
gives the error `first run for txIndex 1 error: insufficient funds for
gas * price + value: address 0x10AD27A96CDBffC90ab3b83bF695911426A69f5E
have 16927727762862809 want 17594166808296934`
The reason is that this transaction is from the author of the block,
which doesn't have enough ETH to pay for the gas fee + tx value if he's
not the block author receiving transactions fees.
The issue is that currently the APIs are using `ethash.NewFaker()`
Engine for running traces, etc. which doesn't know how to get the author
for a specific block (which is consensus dependant); as it was noting in
several TODO comments.
The fix is to pass the Engine to the BaseAPI, which can then be used to
create the right Block Context. I chose to split the current Engine
interface in 2, with Reader and Writer, so that the BaseAPI only
receives the Reader one, which might be safer (even though it's only
used for getting the block Author).
2022-12-04 05:17:39 +00:00
|
|
|
receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{})
|
2020-08-01 07:39:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-02 13:34:20 +00:00
|
|
|
receipt.BlockHash = block.Hash()
|
2021-10-11 14:24:21 +00:00
|
|
|
receipts[i] = receipt
|
2020-08-01 07:39:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return receipts, nil
|
|
|
|
}
|
|
|
|
|
2020-10-24 17:03:52 +00:00
|
|
|
// GetLogs implements eth_getLogs. Returns an array of logs matching a given filter object.
|
2022-08-30 02:49:05 +00:00
|
|
|
func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) (types.Logs, error) {
|
2020-09-28 17:18:36 +00:00
|
|
|
var begin, end uint64
|
2022-08-30 02:49:05 +00:00
|
|
|
logs := types.Logs{}
|
2020-09-03 07:51:19 +00:00
|
|
|
|
2021-04-03 06:26:00 +00:00
|
|
|
tx, beginErr := api.db.BeginRo(ctx)
|
2020-09-28 17:18:36 +00:00
|
|
|
if beginErr != nil {
|
2022-03-25 04:17:23 +00:00
|
|
|
return logs, beginErr
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
2020-09-28 17:18:36 +00:00
|
|
|
defer tx.Rollback()
|
2020-09-03 07:51:19 +00:00
|
|
|
|
|
|
|
if crit.BlockHash != nil {
|
2022-08-10 16:03:22 +00:00
|
|
|
header, err := api._blockReader.HeaderByHash(ctx, tx, *crit.BlockHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if header == nil {
|
2020-09-28 17:18:36 +00:00
|
|
|
return nil, fmt.Errorf("block not found: %x", *crit.BlockHash)
|
|
|
|
}
|
2022-08-10 16:03:22 +00:00
|
|
|
begin = header.Number.Uint64()
|
|
|
|
end = header.Number.Uint64()
|
2020-09-03 07:51:19 +00:00
|
|
|
} else {
|
|
|
|
// Convert the RPC block numbers into internal representations
|
2022-08-10 16:03:22 +00:00
|
|
|
latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestExecutedBlockNumber), tx, nil)
|
2020-09-03 07:51:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-12-03 12:36:52 +00:00
|
|
|
begin = latest
|
2021-11-18 14:05:20 +00:00
|
|
|
if crit.FromBlock != nil {
|
|
|
|
if crit.FromBlock.Sign() >= 0 {
|
|
|
|
begin = crit.FromBlock.Uint64()
|
2021-12-03 12:36:52 +00:00
|
|
|
} else if !crit.FromBlock.IsInt64() || crit.FromBlock.Int64() != int64(rpc.LatestBlockNumber) {
|
2021-11-18 14:05:20 +00:00
|
|
|
return nil, fmt.Errorf("negative value for FromBlock: %v", crit.FromBlock)
|
|
|
|
}
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
2020-09-28 17:18:36 +00:00
|
|
|
end = latest
|
2021-11-18 14:05:20 +00:00
|
|
|
if crit.ToBlock != nil {
|
|
|
|
if crit.ToBlock.Sign() >= 0 {
|
|
|
|
end = crit.ToBlock.Uint64()
|
2021-12-03 12:36:52 +00:00
|
|
|
} else if !crit.ToBlock.IsInt64() || crit.ToBlock.Int64() != int64(rpc.LatestBlockNumber) {
|
2021-11-18 14:05:20 +00:00
|
|
|
return nil, fmt.Errorf("negative value for ToBlock: %v", crit.ToBlock)
|
|
|
|
}
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
|
|
|
}
|
2021-11-18 14:05:20 +00:00
|
|
|
if end < begin {
|
|
|
|
return nil, fmt.Errorf("end (%d) < begin (%d)", end, begin)
|
|
|
|
}
|
2022-08-11 02:16:40 +00:00
|
|
|
if end > roaring.MaxUint32 {
|
|
|
|
latest, err := rpchelper.GetLatestBlockNumber(tx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if begin > latest {
|
|
|
|
return nil, fmt.Errorf("begin (%d) > latest (%d)", begin, latest)
|
|
|
|
}
|
|
|
|
end = latest
|
|
|
|
}
|
2022-08-12 14:45:09 +00:00
|
|
|
|
2022-09-26 03:54:42 +00:00
|
|
|
if api.historyV3(tx) {
|
2022-09-27 09:08:45 +00:00
|
|
|
return api.getLogsV3(ctx, tx, begin, end, crit)
|
2022-09-05 04:33:55 +00:00
|
|
|
}
|
|
|
|
|
2022-10-02 03:54:40 +00:00
|
|
|
blockNumbers := bitmapdb.NewBitmap()
|
|
|
|
defer bitmapdb.ReturnToPool(blockNumbers)
|
2020-09-28 17:18:36 +00:00
|
|
|
blockNumbers.AddRange(begin, end+1) // [min,max)
|
2020-10-25 08:38:55 +00:00
|
|
|
topicsBitmap, err := getTopicsBitmap(tx, crit.Topics, uint32(begin), uint32(end))
|
2020-09-03 07:51:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-08-10 16:03:22 +00:00
|
|
|
|
2020-09-28 17:18:36 +00:00
|
|
|
if topicsBitmap != nil {
|
2022-03-25 04:17:23 +00:00
|
|
|
blockNumbers.And(topicsBitmap)
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
|
|
|
|
2022-10-02 03:51:19 +00:00
|
|
|
rx := make([]*roaring.Bitmap, len(crit.Addresses))
|
|
|
|
for idx, addr := range crit.Addresses {
|
2021-07-28 02:47:38 +00:00
|
|
|
m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], uint32(begin), uint32(end))
|
2020-09-28 17:18:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-10-02 03:51:19 +00:00
|
|
|
rx[idx] = m
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
2020-09-03 07:51:19 +00:00
|
|
|
|
2022-10-02 03:51:19 +00:00
|
|
|
addrBitmap := roaring.FastOr(rx...)
|
|
|
|
|
|
|
|
if len(rx) > 0 {
|
2022-03-25 04:17:23 +00:00
|
|
|
blockNumbers.And(addrBitmap)
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
2020-09-28 17:18:36 +00:00
|
|
|
|
2020-10-02 08:16:21 +00:00
|
|
|
if blockNumbers.GetCardinality() == 0 {
|
2022-03-25 04:17:23 +00:00
|
|
|
return logs, nil
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
large performance optimization for filterLogs (getLogs) when doing queries with many addresses and logs. (#5805)
this pr changes filterLogs to use a pre computed hashset of addresses,
instead of iterating across the list of addresses once per log.
this greatly increases the speed of filter queries that use many
addresses and also return a large number of logs. In our case, we are
performing a query for all the trades performed in a uniswap v3 pool in
a 250 block range.
my benchmarks were performed with the data & code below:
address list gist is here
[addrs](https://gist.githubusercontent.com/elee1766/861c6a55838c88522beae651d9d3b584/raw/2c30b0df439b9c8114ee8aea4083a8039ffcb93b/gistfile1.txt)
```
c := NewRpcClient()
addrs := []common.Address{AddressListGist}
logs, err := c.FilterLogs(context.TODO(), ethereum.FilterQuery{
FromBlock:big.NewInt(15640000),
ToBlock: big.NewInt(15640250),
Addresses: addrs,
Topics: [][]common.Hash{
{
common.HexToHash("c42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67"),
},
},
```
the query contains 8442 addresses, while the response contains 1277 logs
On average, current devel averages a 15.57 second response time on my machine after 10 runs, while the new filterLogs averages 1.05 seconds.
for CURRENT DEVEL, the profile is here: https://pprof.aaaaa.news/cd8dkv0tidul37sctmi0/flamegraph
for the filterLogs branch, the profile is here: https://pprof.aaaaa.news/cd8dlmgtidul37sctmig/flamegraph
while the tests pass with this branch, I am not really sure why filterLogs was originally programmed the way it was. Is there some sort of edge case / compatibility thing that I am missing with this change?
Co-authored-by: a <a@a.a>
2022-10-20 10:34:20 +00:00
|
|
|
addrMap := make(map[common.Address]struct{}, len(crit.Addresses))
|
|
|
|
for _, v := range crit.Addresses {
|
|
|
|
addrMap[v] = struct{}{}
|
|
|
|
}
|
2021-06-23 22:33:45 +00:00
|
|
|
iter := blockNumbers.Iterator()
|
|
|
|
for iter.HasNext() {
|
2022-03-25 04:17:23 +00:00
|
|
|
if err = ctx.Err(); err != nil {
|
2021-07-21 02:37:29 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-07-09 13:42:57 +00:00
|
|
|
blockNumber := uint64(iter.Next())
|
2021-06-23 22:33:45 +00:00
|
|
|
var logIndex uint
|
2022-07-09 03:15:22 +00:00
|
|
|
var txIndex uint
|
2022-03-25 04:17:23 +00:00
|
|
|
var blockLogs []*types.Log
|
large performance optimization for filterLogs (getLogs) when doing queries with many addresses and logs. (#5805)
this pr changes filterLogs to use a pre computed hashset of addresses,
instead of iterating across the list of addresses once per log.
this greatly increases the speed of filter queries that use many
addresses and also return a large number of logs. In our case, we are
performing a query for all the trades performed in a uniswap v3 pool in
a 250 block range.
my benchmarks were performed with the data & code below:
address list gist is here
[addrs](https://gist.githubusercontent.com/elee1766/861c6a55838c88522beae651d9d3b584/raw/2c30b0df439b9c8114ee8aea4083a8039ffcb93b/gistfile1.txt)
```
c := NewRpcClient()
addrs := []common.Address{AddressListGist}
logs, err := c.FilterLogs(context.TODO(), ethereum.FilterQuery{
FromBlock:big.NewInt(15640000),
ToBlock: big.NewInt(15640250),
Addresses: addrs,
Topics: [][]common.Hash{
{
common.HexToHash("c42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67"),
},
},
```
the query contains 8442 addresses, while the response contains 1277 logs
On average, current devel averages a 15.57 second response time on my machine after 10 runs, while the new filterLogs averages 1.05 seconds.
for CURRENT DEVEL, the profile is here: https://pprof.aaaaa.news/cd8dkv0tidul37sctmi0/flamegraph
for the filterLogs branch, the profile is here: https://pprof.aaaaa.news/cd8dlmgtidul37sctmig/flamegraph
while the tests pass with this branch, I am not really sure why filterLogs was originally programmed the way it was. Is there some sort of edge case / compatibility thing that I am missing with this change?
Co-authored-by: a <a@a.a>
2022-10-20 10:34:20 +00:00
|
|
|
|
2022-12-19 08:38:54 +00:00
|
|
|
err := tx.ForPrefix(kv.Log, common2.EncodeTs(blockNumber), func(k, v []byte) error {
|
2021-06-23 22:33:45 +00:00
|
|
|
var logs types.Logs
|
|
|
|
if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil {
|
|
|
|
return fmt.Errorf("receipt unmarshal failed: %w", err)
|
|
|
|
}
|
|
|
|
for _, log := range logs {
|
|
|
|
log.Index = logIndex
|
|
|
|
logIndex++
|
|
|
|
}
|
2022-11-03 02:09:04 +00:00
|
|
|
filtered := logs.Filter(addrMap, crit.Topics)
|
2022-03-25 04:17:23 +00:00
|
|
|
if len(filtered) == 0 {
|
|
|
|
return nil
|
2021-06-23 22:33:45 +00:00
|
|
|
}
|
2022-07-09 03:15:22 +00:00
|
|
|
txIndex = uint(binary.BigEndian.Uint32(k[8:]))
|
2022-03-25 04:17:23 +00:00
|
|
|
for _, log := range filtered {
|
|
|
|
log.TxIndex = txIndex
|
|
|
|
}
|
|
|
|
blockLogs = append(blockLogs, filtered...)
|
|
|
|
|
2021-06-23 22:33:45 +00:00
|
|
|
return nil
|
2022-03-25 04:17:23 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return logs, err
|
|
|
|
}
|
|
|
|
if len(blockLogs) == 0 {
|
|
|
|
continue
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
2021-07-21 02:37:29 +00:00
|
|
|
|
2022-07-09 13:42:57 +00:00
|
|
|
blockHash, err := rawdb.ReadCanonicalHash(tx, blockNumber)
|
2022-03-25 04:17:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-09 13:42:57 +00:00
|
|
|
|
|
|
|
body, err := api._blockReader.BodyWithTransactions(ctx, tx, blockHash, blockNumber)
|
2022-07-10 02:13:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if body == nil {
|
2022-07-09 13:42:57 +00:00
|
|
|
return nil, fmt.Errorf("block not found %d", blockNumber)
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
2022-03-25 04:17:23 +00:00
|
|
|
for _, log := range blockLogs {
|
2022-07-09 13:42:57 +00:00
|
|
|
log.BlockNumber = blockNumber
|
2022-03-25 04:17:23 +00:00
|
|
|
log.BlockHash = blockHash
|
2022-10-13 21:14:50 +00:00
|
|
|
// bor transactions are at the end of the bodies transactions (added manually but not actually part of the block)
|
|
|
|
if log.TxIndex == uint(len(body.Transactions)) {
|
|
|
|
log.TxHash = types.ComputeBorTxHash(blockNumber, blockHash)
|
|
|
|
} else {
|
|
|
|
log.TxHash = body.Transactions[log.TxIndex].Hash()
|
|
|
|
}
|
2022-03-25 04:17:23 +00:00
|
|
|
}
|
|
|
|
logs = append(logs, blockLogs...)
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
2022-03-25 04:17:23 +00:00
|
|
|
|
|
|
|
return logs, nil
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
2020-09-03 07:51:19 +00:00
|
|
|
|
2020-09-28 17:18:36 +00:00
|
|
|
// The Topic list restricts matches to particular event topics. Each event has a list
|
|
|
|
// of topics. Topics matches a prefix of that list. An empty element slice matches any
|
|
|
|
// topic. Non-empty elements represent an alternative that matches any of the
|
|
|
|
// contained topics.
|
|
|
|
//
|
|
|
|
// Examples:
|
|
|
|
// {} or nil matches any topic list
|
|
|
|
// {{A}} matches topic A in first position
|
|
|
|
// {{}, {B}} matches any topic in first position AND B in second position
|
|
|
|
// {{A}, {B}} matches topic A in first position AND B in second position
|
|
|
|
// {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position
|
2021-07-28 02:47:38 +00:00
|
|
|
func getTopicsBitmap(c kv.Tx, topics [][]common.Hash, from, to uint32) (*roaring.Bitmap, error) {
|
2020-10-02 08:16:21 +00:00
|
|
|
var result *roaring.Bitmap
|
2020-09-28 17:18:36 +00:00
|
|
|
for _, sub := range topics {
|
2020-10-02 08:16:21 +00:00
|
|
|
var bitmapForORing *roaring.Bitmap
|
2020-09-28 17:18:36 +00:00
|
|
|
for _, topic := range sub {
|
2021-07-28 02:47:38 +00:00
|
|
|
m, err := bitmapdb.Get(c, kv.LogTopicIndex, topic[:], from, to)
|
2020-09-28 17:18:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if bitmapForORing == nil {
|
|
|
|
bitmapForORing = m
|
2022-03-25 04:17:23 +00:00
|
|
|
continue
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
2022-03-25 04:17:23 +00:00
|
|
|
bitmapForORing.Or(m)
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
2020-09-03 07:51:19 +00:00
|
|
|
|
2022-03-25 04:17:23 +00:00
|
|
|
if bitmapForORing == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if result == nil {
|
|
|
|
result = bitmapForORing
|
|
|
|
continue
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
2022-08-10 16:03:22 +00:00
|
|
|
|
2022-03-25 04:17:23 +00:00
|
|
|
result = roaring.And(bitmapForORing, result)
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
|
|
|
return result, nil
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
|
|
|
|
2022-09-27 09:08:45 +00:00
|
|
|
func (api *APIImpl) getLogsV3(ctx context.Context, tx kv.Tx, begin, end uint64, crit filters.FilterCriteria) ([]*types.Log, error) {
|
2022-09-05 04:33:55 +00:00
|
|
|
logs := []*types.Log{}
|
|
|
|
|
|
|
|
var fromTxNum, toTxNum uint64
|
2022-09-18 10:41:01 +00:00
|
|
|
var err error
|
2022-09-05 04:33:55 +00:00
|
|
|
if begin > 0 {
|
2022-09-18 10:41:01 +00:00
|
|
|
fromTxNum, err = rawdb.TxNums.Min(tx, begin)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
toTxNum, err = rawdb.TxNums.Max(tx, end) // end is an inclusive bound
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-09-05 04:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
txNumbers := roaring64.New()
|
|
|
|
txNumbers.AddRange(fromTxNum, toTxNum) // [min,max)
|
|
|
|
|
|
|
|
ac := api._agg.MakeContext()
|
|
|
|
ac.SetTx(tx)
|
|
|
|
|
2022-10-05 10:54:54 +00:00
|
|
|
topicsBitmap, err := getTopicsBitmapV3(ac, tx, crit.Topics, fromTxNum, toTxNum)
|
2022-09-05 04:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if topicsBitmap != nil {
|
|
|
|
txNumbers.And(topicsBitmap)
|
|
|
|
}
|
|
|
|
|
|
|
|
var addrBitmap *roaring64.Bitmap
|
|
|
|
for _, addr := range crit.Addresses {
|
|
|
|
var bitmapForORing roaring64.Bitmap
|
|
|
|
it := ac.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, tx)
|
|
|
|
for it.HasNext() {
|
2022-12-22 02:37:32 +00:00
|
|
|
n, err := it.NextBatch()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bitmapForORing.AddMany(n)
|
2022-09-05 04:33:55 +00:00
|
|
|
}
|
|
|
|
if addrBitmap == nil {
|
|
|
|
addrBitmap = &bitmapForORing
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addrBitmap = roaring64.Or(addrBitmap, &bitmapForORing)
|
|
|
|
}
|
|
|
|
|
|
|
|
if addrBitmap != nil {
|
|
|
|
txNumbers.And(addrBitmap)
|
|
|
|
}
|
|
|
|
|
|
|
|
if txNumbers.GetCardinality() == 0 {
|
|
|
|
return logs, nil
|
|
|
|
}
|
|
|
|
var lastBlockNum uint64
|
2022-10-28 10:53:58 +00:00
|
|
|
var blockHash common.Hash
|
|
|
|
var header *types.Header
|
|
|
|
var signer *types.Signer
|
|
|
|
var rules *params.Rules
|
|
|
|
var skipAnalysis bool
|
2022-12-22 02:37:32 +00:00
|
|
|
stateReader := state.NewHistoryReaderV3(ac)
|
2022-09-05 04:33:55 +00:00
|
|
|
stateReader.SetTx(tx)
|
2022-12-05 05:42:08 +00:00
|
|
|
ibs := state.New(stateReader)
|
|
|
|
|
2022-10-05 10:54:54 +00:00
|
|
|
//stateReader.SetTrace(true)
|
2022-09-05 04:33:55 +00:00
|
|
|
iter := txNumbers.Iterator()
|
|
|
|
|
|
|
|
chainConfig, err := api.chainConfig(tx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Fix trace error in Polygon | Pass Engin to the Base API (#6131)
So there is an issue with tracing certain blocks/transactions on
Polygon, for example:
```
> '{"method": "trace_transaction","params":["0xb198d93f640343a98f90d93aa2b74b4fc5c64f3a649f1608d2bfd1004f9dee0e"],"id":1,"jsonrpc":"2.0"}'
```
gives the error `first run for txIndex 1 error: insufficient funds for
gas * price + value: address 0x10AD27A96CDBffC90ab3b83bF695911426A69f5E
have 16927727762862809 want 17594166808296934`
The reason is that this transaction is from the author of the block,
which doesn't have enough ETH to pay for the gas fee + tx value if he's
not the block author receiving transactions fees.
The issue is that currently the APIs are using `ethash.NewFaker()`
Engine for running traces, etc. which doesn't know how to get the author
for a specific block (which is consensus dependant); as it was noting in
several TODO comments.
The fix is to pass the Engine to the BaseAPI, which can then be used to
create the right Block Context. I chose to split the current Engine
interface in 2, with Reader and Writer, so that the BaseAPI only
receives the Reader one, which might be safer (even though it's only
used for getting the block Author).
2022-12-04 05:17:39 +00:00
|
|
|
engine := api.engine()
|
|
|
|
|
large performance optimization for filterLogs (getLogs) when doing queries with many addresses and logs. (#5805)
this pr changes filterLogs to use a pre computed hashset of addresses,
instead of iterating across the list of addresses once per log.
this greatly increases the speed of filter queries that use many
addresses and also return a large number of logs. In our case, we are
performing a query for all the trades performed in a uniswap v3 pool in
a 250 block range.
my benchmarks were performed with the data & code below:
address list gist is here
[addrs](https://gist.githubusercontent.com/elee1766/861c6a55838c88522beae651d9d3b584/raw/2c30b0df439b9c8114ee8aea4083a8039ffcb93b/gistfile1.txt)
```
c := NewRpcClient()
addrs := []common.Address{AddressListGist}
logs, err := c.FilterLogs(context.TODO(), ethereum.FilterQuery{
FromBlock:big.NewInt(15640000),
ToBlock: big.NewInt(15640250),
Addresses: addrs,
Topics: [][]common.Hash{
{
common.HexToHash("c42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67"),
},
},
```
the query contains 8442 addresses, while the response contains 1277 logs
On average, current devel averages a 15.57 second response time on my machine after 10 runs, while the new filterLogs averages 1.05 seconds.
for CURRENT DEVEL, the profile is here: https://pprof.aaaaa.news/cd8dkv0tidul37sctmi0/flamegraph
for the filterLogs branch, the profile is here: https://pprof.aaaaa.news/cd8dlmgtidul37sctmig/flamegraph
while the tests pass with this branch, I am not really sure why filterLogs was originally programmed the way it was. Is there some sort of edge case / compatibility thing that I am missing with this change?
Co-authored-by: a <a@a.a>
2022-10-20 10:34:20 +00:00
|
|
|
addrMap := make(map[common.Address]struct{}, len(crit.Addresses))
|
|
|
|
for _, v := range crit.Addresses {
|
|
|
|
addrMap[v] = struct{}{}
|
|
|
|
}
|
2022-10-28 10:53:58 +00:00
|
|
|
|
2022-11-30 01:31:13 +00:00
|
|
|
evm := vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, chainConfig, vm.Config{})
|
|
|
|
vmConfig := vm.Config{SkipAnalysis: skipAnalysis}
|
2022-12-05 05:42:08 +00:00
|
|
|
var blockCtx evmtypes.BlockContext
|
2022-11-30 01:31:13 +00:00
|
|
|
|
2022-10-28 10:53:58 +00:00
|
|
|
var minTxNumInBlock, maxTxNumInBlock uint64 // end is an inclusive bound
|
|
|
|
var blockNum uint64
|
|
|
|
var ok bool
|
2022-09-05 04:33:55 +00:00
|
|
|
for iter.HasNext() {
|
|
|
|
txNum := iter.Next()
|
2022-10-28 10:53:58 +00:00
|
|
|
|
|
|
|
// txNums are sorted, it means blockNum will not change until `txNum < maxTxNum`
|
|
|
|
|
|
|
|
if maxTxNumInBlock == 0 || txNum > maxTxNumInBlock {
|
|
|
|
// Find block number
|
|
|
|
ok, blockNum, err = rawdb.TxNums.FindBlockNum(tx, txNum)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-09-18 10:41:01 +00:00
|
|
|
}
|
2022-09-05 04:33:55 +00:00
|
|
|
if !ok {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2022-10-28 10:53:58 +00:00
|
|
|
|
|
|
|
// if block number changed, calculate all related field
|
2022-09-05 04:33:55 +00:00
|
|
|
if blockNum > lastBlockNum {
|
2022-10-28 10:53:58 +00:00
|
|
|
if header, err = api._blockReader.HeaderByNumber(ctx, tx, blockNum); err != nil {
|
2022-09-05 04:33:55 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
lastBlockNum = blockNum
|
2022-10-28 10:53:58 +00:00
|
|
|
blockHash = header.Hash()
|
|
|
|
signer = types.MakeSigner(chainConfig, blockNum)
|
2022-12-07 17:45:44 +00:00
|
|
|
rules = chainConfig.Rules(blockNum, header.Time)
|
2022-11-30 01:31:13 +00:00
|
|
|
vmConfig.SkipAnalysis = core.SkipAnalysis(chainConfig, blockNum)
|
2022-10-28 10:53:58 +00:00
|
|
|
|
|
|
|
minTxNumInBlock, err = rawdb.TxNums.Min(tx, blockNum)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
maxTxNumInBlock, err = rawdb.TxNums.Max(tx, blockNum)
|
2022-09-18 10:41:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-12-05 05:42:08 +00:00
|
|
|
blockCtx = transactions.NewEVMBlockContext(engine, header, true /* requireCanonical */, tx, api._blockReader)
|
2022-09-05 04:33:55 +00:00
|
|
|
}
|
2022-09-20 12:09:24 +00:00
|
|
|
|
2022-10-28 10:53:58 +00:00
|
|
|
txIndex := int(txNum) - int(minTxNumInBlock) - 1
|
2022-09-05 04:33:55 +00:00
|
|
|
//fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex)
|
2022-09-20 12:09:24 +00:00
|
|
|
txn, err := api._txnReader.TxnByIdxInBlock(ctx, tx, blockNum, txIndex)
|
2022-09-05 04:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-09-20 12:09:24 +00:00
|
|
|
if txn == nil {
|
|
|
|
continue
|
|
|
|
}
|
2022-10-23 11:51:14 +00:00
|
|
|
stateReader.SetTxNum(txNum)
|
2022-09-05 04:33:55 +00:00
|
|
|
txHash := txn.Hash()
|
2022-10-28 10:53:58 +00:00
|
|
|
msg, err := txn.AsMessage(*signer, header.BaseFee, rules)
|
2022-09-05 04:33:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-12-05 05:42:08 +00:00
|
|
|
|
|
|
|
ibs.Reset()
|
2022-11-30 01:31:13 +00:00
|
|
|
ibs.Prepare(txHash, blockHash, txIndex)
|
|
|
|
|
2022-12-05 05:42:08 +00:00
|
|
|
evm.ResetBetweenBlocks(blockCtx, core.NewEVMTxContext(msg), ibs, vmConfig, rules)
|
2022-09-05 04:33:55 +00:00
|
|
|
|
|
|
|
gp := new(core.GasPool).AddGas(msg.Gas())
|
|
|
|
_, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */)
|
|
|
|
if err != nil {
|
2022-10-05 10:54:54 +00:00
|
|
|
return nil, fmt.Errorf("%w: blockNum=%d, txNum=%d", err, blockNum, txNum)
|
2022-09-05 04:33:55 +00:00
|
|
|
}
|
2022-09-27 09:08:45 +00:00
|
|
|
rawLogs := ibs.GetLogs(txHash)
|
|
|
|
var logIndex uint
|
|
|
|
for _, log := range rawLogs {
|
|
|
|
log.Index = logIndex
|
|
|
|
logIndex++
|
|
|
|
}
|
2022-11-03 02:09:04 +00:00
|
|
|
filtered := types.Logs(rawLogs).Filter(addrMap, crit.Topics)
|
2022-09-05 04:33:55 +00:00
|
|
|
for _, log := range filtered {
|
|
|
|
log.BlockNumber = blockNum
|
2022-10-28 10:53:58 +00:00
|
|
|
log.BlockHash = blockHash
|
2022-09-05 04:33:55 +00:00
|
|
|
log.TxHash = txHash
|
|
|
|
}
|
|
|
|
logs = append(logs, filtered...)
|
|
|
|
}
|
2022-10-28 10:53:58 +00:00
|
|
|
|
2022-09-05 04:33:55 +00:00
|
|
|
//stats := api._agg.GetAndResetStats()
|
|
|
|
//log.Info("Finished", "duration", time.Since(start), "history queries", stats.HistoryQueries, "ef search duration", stats.EfSearchTime)
|
|
|
|
return logs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The Topic list restricts matches to particular event topics. Each event has a list
|
|
|
|
// of topics. Topics matches a prefix of that list. An empty element slice matches any
|
|
|
|
// topic. Non-empty elements represent an alternative that matches any of the
|
|
|
|
// contained topics.
|
|
|
|
//
|
|
|
|
// Examples:
|
|
|
|
// {} or nil matches any topic list
|
|
|
|
// {{A}} matches topic A in first position
|
|
|
|
// {{}, {B}} matches any topic in first position AND B in second position
|
|
|
|
// {{A}, {B}} matches topic A in first position AND B in second position
|
|
|
|
// {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position
|
2022-10-05 10:54:54 +00:00
|
|
|
func getTopicsBitmapV3(ac *libstate.Aggregator22Context, tx kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) {
|
2022-09-05 04:33:55 +00:00
|
|
|
var result *roaring64.Bitmap
|
|
|
|
for _, sub := range topics {
|
|
|
|
var bitmapForORing roaring64.Bitmap
|
|
|
|
for _, topic := range sub {
|
2022-12-22 02:37:32 +00:00
|
|
|
if ttx, casted := tx.(kv.TemporalTx); casted {
|
|
|
|
it, err := ttx.InvertedIndexRange(temporal.LogTopic, topic.Bytes(), from, to)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for it.HasNext() {
|
|
|
|
n, err := it.NextBatch()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bitmapForORing.AddMany(n)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
it := ac.LogTopicIterator(topic.Bytes(), from, to, tx)
|
|
|
|
bitmapForORing.Or(it.ToBitamp())
|
2022-09-05 04:33:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if bitmapForORing.GetCardinality() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if result == nil {
|
|
|
|
result = &bitmapForORing
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result = roaring64.And(&bitmapForORing, result)
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-10-24 17:03:52 +00:00
|
|
|
// GetTransactionReceipt implements eth_getTransactionReceipt. Returns the receipt of a transaction given the transaction's hash.
|
2022-07-09 03:15:22 +00:00
|
|
|
func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Hash) (map[string]interface{}, error) {
|
2021-04-03 06:26:00 +00:00
|
|
|
tx, err := api.db.BeginRo(ctx)
|
2020-10-10 12:24:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer tx.Rollback()
|
|
|
|
|
2022-02-07 21:30:46 +00:00
|
|
|
var blockNum uint64
|
|
|
|
var ok bool
|
|
|
|
|
2022-07-09 03:15:22 +00:00
|
|
|
blockNum, ok, err = api.txnLookup(ctx, tx, txnHash)
|
2022-09-02 14:41:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-05-22 19:35:02 +00:00
|
|
|
}
|
2022-09-02 14:41:58 +00:00
|
|
|
|
|
|
|
cc, err := api.chainConfig(tx)
|
2022-05-19 13:03:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-02-07 21:30:46 +00:00
|
|
|
|
2022-09-02 14:41:58 +00:00
|
|
|
if !ok && cc.Bor == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2022-12-12 15:21:30 +00:00
|
|
|
// if not ok and cc.Bor != nil then we might have a bor transaction.
|
|
|
|
// Note that Private API returns 0 if transaction is not found.
|
|
|
|
if !ok || blockNum == 0 {
|
2022-09-02 14:41:58 +00:00
|
|
|
blockNumPtr, err := rawdb.ReadBorTxLookupEntry(tx, txnHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if blockNumPtr == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
blockNum = *blockNumPtr
|
|
|
|
}
|
|
|
|
|
2022-01-07 13:52:38 +00:00
|
|
|
block, err := api.blockByNumberWithSenders(tx, blockNum)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2021-06-04 12:28:18 +00:00
|
|
|
}
|
|
|
|
if block == nil {
|
2022-01-07 13:52:38 +00:00
|
|
|
return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645
|
2021-06-04 12:28:18 +00:00
|
|
|
}
|
2022-02-07 21:30:46 +00:00
|
|
|
|
2022-01-07 13:52:38 +00:00
|
|
|
var txnIndex uint64
|
|
|
|
var txn types.Transaction
|
2022-02-07 21:30:46 +00:00
|
|
|
for idx, transaction := range block.Transactions() {
|
2022-07-09 03:15:22 +00:00
|
|
|
if transaction.Hash() == txnHash {
|
2022-01-07 13:52:38 +00:00
|
|
|
txn = transaction
|
2022-02-07 21:30:46 +00:00
|
|
|
txnIndex = uint64(idx)
|
2021-06-04 12:28:18 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2022-02-07 21:30:46 +00:00
|
|
|
|
2022-10-20 18:25:46 +00:00
|
|
|
var borTx types.Transaction
|
2022-01-07 13:52:38 +00:00
|
|
|
if txn == nil {
|
Fix eth_getBlockByNumber and eth_getTransactionReceipt some bugs for polygon (#6319)
Get bor state sync tx and receipt has some error:
```
Reqeust:
curl -X "POST" "{{polygon_rpc_endpoint}}" -H 'Content-Type: application/json; charset=utf-8' -d $'{
"jsonrpc": "2.0",
"id": 1,
"method": "eth_getTransactionByHash",
"params": [
"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad"
]
}'
Response:
{"jsonrpc":"2.0","id":1,"result":null}
```
```
Reqeust:
curl -X "POST" "{{polygon_rpc_endpoint}}" -H 'Content-Type: application/json; charset=utf-8' -d $'{
"id": 1,
"method": "eth_getTransactionReceipt",
"jsonrpc": "2.0",
"params": [
"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad"
]
}'
Response
{"jsonrpc":"2.0","id":1,"error":{"code":-32000,"message":"EOF"}}
```
fixed:
```
Request:
curl -X "POST" "{{polygon_rpc_endpoint}}" -H 'Content-Type: application/json; charset=utf-8' -d $'{
"jsonrpc": "2.0",
"id": 1,
"method": "eth_getTransactionByHash",
"params": [
"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad"
]
}'
Response:
{"jsonrpc":"2.0","id":1,"result":{"blockHash":"0xb308eeda80e2a20e1f934d5d37e5f82a078b828128a60283978286ddf0a25264","blockNumber":"0x1c317c0","from":"0x0000000000000000000000000000000000000000","gas":"0x0","gasPrice":"0x0","hash":"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad","input":"0x","nonce":"0x0","to":"0x0000000000000000000000000000000000000000","transactionIndex":"0x89","value":"0x0","type":"0x0","chainId":"0x89","v":"0x0","r":"0x0","s":"0x0"}}
```
```
curl -X "POST" "{{polygon_rpc_endpoint}}" -H 'Content-Type: application/json; charset=utf-8' -d $'{
"id": 1,
"method": "eth_getTransactionReceipt",
"jsonrpc": "2.0",
"params": [
"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad"
]
}'
{"jsonrpc":"2.0","id":1,"result":{"blockHash":"0xb308eeda80e2a20e1f934d5d37e5f82a078b828128a60283978286ddf0a25264","blockNumber":"0x1c317c0","contractAddress":null,"cumulativeGasUsed":"0x0","effectiveGasPrice":"0xd532a03e6","from":"0x0000000000000000000000000000000000000000","gasUsed":"0x0","logs":[{"address":"0x7ceb23fd6bc0add59e62ac25578270cff1b9f619","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000000000000000000000000000000000000000000000","0x00000000000000000000000038830f36f752ed29039f441cfb543639a6e07b41"],"data":"0x000000000000000000000000000000000000000000000000009c51c4521e0000","blockNumber":"0x1c317c0","transactionHash":"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad","transactionIndex":"0x89","blockHash":"0xb308eeda80e2a20e1f934d5d37e5f82a078b828128a60283978286ddf0a25264","logIndex":"0x2ee","removed":false},{"address":"0x7ceb23fd6bc0add59e62ac25578270cff1b9f619","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000f93bcb6f00c1a90050a60a9f737b4cb87126b8f8"],"data":"0x000000000000000000000000000000000000000000000000006a6674f260d000","blockNumber":"0x1c317c0","transactionHash":"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad","transactionIndex":"0x89","blockHash":"0xb308eeda80e2a20e1f934d5d37e5f82a078b828128a60283978286ddf0a25264","logIndex":"0x2ef","removed":false},{"address":"0x7ceb23fd6bc0add59e62ac25578270cff1b9f619","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000fd1091c0e49bf1d44b4786747e034d65ab46f36e"],"data":"0x000000000000000000000000000000000000000000000000002386f26fc10000","blockNumber":"0x1c317c0","transactionHash":"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad","transactionIndex":"0x89","blockHash":"0xb308eeda80e2a20e1f934d5d37e5f82a078b828128a60283978286ddf0a25264","logIndex":"0x2f0","removed":false},{"address":"0x7f280dac515121dcda3eac69eb4c13a52392cace","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000882d04c3d8410ddf2061b3cba2c3522854316feb"],"data":"0x000000000000000000000000000000000000000000001850e2f557310490f925","blockNumber":"0x1c317c0","transactionHash":"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad","transactionIndex":"0x89","blockHash":"0xb308eeda80e2a20e1f934d5d37e5f82a078b828128a60283978286ddf0a25264","logIndex":"0x2f1","removed":false},{"address":"0x7ceb23fd6bc0add59e62ac25578270cff1b9f619","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000000000000000000000000000000000000000000000","0x00000000000000000000000028515b56512cb168ad6e6a2428bb39cb696d8bee"],"data":"0x0000000000000000000000000000000000000000000000000058d15e17628000","blockNumber":"0x1c317c0","transactionHash":"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad","transactionIndex":"0x89","blockHash":"0xb308eeda80e2a20e1f934d5d37e5f82a078b828128a60283978286ddf0a25264","logIndex":"0x2f2","removed":false}],"logsBloom":"0x000000000100a0000000000000000000000000000040000000000000000000000000000000000020000000000000000000000000000080000000000000000800000000000000000000000008000000000800000000000000040000000000000000000000020000000000000000000800000000000000000000000010000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000020000002000000000008000000001000000000000002000000000000000020001000040000008000000000000000000080000000000000000000000000000000","status":"0x1","to":"0x0000000000000000000000000000000000000000","transactionHash":"0x9916e99b24daba1fb01e093105bc6988b49125ea15fd4b3c4cfa18719e1631ad","transactionIndex":"0x89","type":"0x0"}}
```
2022-12-15 11:13:52 +00:00
|
|
|
borTx, _, _, _ = rawdb.ReadBorTransactionForBlock(tx, block)
|
2022-07-07 08:40:50 +00:00
|
|
|
if borTx == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2021-11-21 09:22:29 +00:00
|
|
|
}
|
2021-06-04 12:28:18 +00:00
|
|
|
|
2022-06-04 22:06:30 +00:00
|
|
|
receipts, err := api.getReceipts(ctx, tx, cc, block, block.Body().SendersFromTxs())
|
2020-08-01 07:39:04 +00:00
|
|
|
if err != nil {
|
2021-10-04 15:16:52 +00:00
|
|
|
return nil, fmt.Errorf("getReceipts error: %w", err)
|
2020-08-01 07:39:04 +00:00
|
|
|
}
|
2022-01-07 13:52:38 +00:00
|
|
|
if len(receipts) <= int(txnIndex) {
|
|
|
|
return nil, fmt.Errorf("block has less receipts than expected: %d <= %d, block: %d", len(receipts), int(txnIndex), blockNum)
|
2020-08-01 07:39:04 +00:00
|
|
|
}
|
2022-10-20 18:25:46 +00:00
|
|
|
|
|
|
|
if txn == nil {
|
|
|
|
borReceipt, err := rawdb.ReadBorReceipt(tx, block.Hash(), blockNum, receipts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if borReceipt == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return marshalReceipt(borReceipt, borTx, cc, block, txnHash, false), nil
|
|
|
|
}
|
|
|
|
|
2022-07-09 03:15:22 +00:00
|
|
|
return marshalReceipt(receipts[txnIndex], block.Transactions()[txnIndex], cc, block, txnHash, true), nil
|
2021-04-18 05:05:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockReceipts - receipts for individual block
|
2022-02-07 21:30:46 +00:00
|
|
|
// func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) {
|
2021-04-18 05:05:54 +00:00
|
|
|
func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) {
|
|
|
|
tx, err := api.db.BeginRo(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer tx.Rollback()
|
2020-08-01 07:39:04 +00:00
|
|
|
|
2022-06-14 13:29:49 +00:00
|
|
|
blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters)
|
2021-04-18 05:05:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-09-29 06:51:51 +00:00
|
|
|
block, err := api.blockByNumberWithSenders(tx, blockNum)
|
2021-04-18 05:05:54 +00:00
|
|
|
if err != nil {
|
2021-06-04 12:28:18 +00:00
|
|
|
return nil, err
|
2021-04-18 05:05:54 +00:00
|
|
|
}
|
|
|
|
if block == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
chainConfig, err := api.chainConfig(tx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-06-04 22:06:30 +00:00
|
|
|
receipts, err := api.getReceipts(ctx, tx, chainConfig, block, block.Body().SendersFromTxs())
|
2021-04-18 05:05:54 +00:00
|
|
|
if err != nil {
|
2021-10-04 15:16:52 +00:00
|
|
|
return nil, fmt.Errorf("getReceipts error: %w", err)
|
2021-04-18 05:05:54 +00:00
|
|
|
}
|
|
|
|
result := make([]map[string]interface{}, 0, len(receipts))
|
|
|
|
for _, receipt := range receipts {
|
|
|
|
txn := block.Transactions()[receipt.TransactionIndex]
|
2022-07-09 03:15:22 +00:00
|
|
|
result = append(result, marshalReceipt(receipt, txn, chainConfig, block, txn.Hash(), true))
|
2021-04-18 05:05:54 +00:00
|
|
|
}
|
|
|
|
|
2022-07-07 08:40:50 +00:00
|
|
|
if chainConfig.Bor != nil {
|
2022-07-09 03:15:22 +00:00
|
|
|
borTx, _, _, _ := rawdb.ReadBorTransactionForBlock(tx, block)
|
2022-07-07 08:40:50 +00:00
|
|
|
if borTx != nil {
|
2022-10-20 18:25:46 +00:00
|
|
|
borReceipt, err := rawdb.ReadBorReceipt(tx, block.Hash(), block.NumberU64(), receipts)
|
2022-09-27 10:39:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-07 08:40:50 +00:00
|
|
|
if borReceipt != nil {
|
2022-07-09 03:15:22 +00:00
|
|
|
result = append(result, marshalReceipt(borReceipt, borTx, chainConfig, block, borReceipt.TxHash, false))
|
2022-07-07 08:40:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-18 05:05:54 +00:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2022-07-09 03:15:22 +00:00
|
|
|
func marshalReceipt(receipt *types.Receipt, txn types.Transaction, chainConfig *params.ChainConfig, block *types.Block, txnHash common.Hash, signed bool) map[string]interface{} {
|
2021-04-23 19:48:00 +00:00
|
|
|
var chainId *big.Int
|
2021-04-22 17:11:37 +00:00
|
|
|
switch t := txn.(type) {
|
|
|
|
case *types.LegacyTx:
|
|
|
|
if t.Protected() {
|
2021-04-23 19:48:00 +00:00
|
|
|
chainId = types.DeriveChainId(&t.V).ToBig()
|
2021-04-22 17:11:37 +00:00
|
|
|
}
|
|
|
|
case *types.AccessListTx:
|
2021-04-23 19:48:00 +00:00
|
|
|
chainId = t.ChainID.ToBig()
|
2021-04-22 17:11:37 +00:00
|
|
|
case *types.DynamicFeeTransaction:
|
2021-04-23 19:48:00 +00:00
|
|
|
chainId = t.ChainID.ToBig()
|
2020-08-01 07:39:04 +00:00
|
|
|
}
|
2022-07-09 03:15:22 +00:00
|
|
|
|
|
|
|
var from common.Address
|
|
|
|
if signed {
|
|
|
|
signer := types.LatestSignerForChainID(chainId)
|
|
|
|
from, _ = txn.Sender(*signer)
|
|
|
|
}
|
2020-08-01 07:39:04 +00:00
|
|
|
|
|
|
|
fields := map[string]interface{}{
|
2021-04-18 05:05:54 +00:00
|
|
|
"blockHash": receipt.BlockHash,
|
|
|
|
"blockNumber": hexutil.Uint64(receipt.BlockNumber.Uint64()),
|
2022-07-09 03:15:22 +00:00
|
|
|
"transactionHash": txnHash,
|
2021-04-18 05:05:54 +00:00
|
|
|
"transactionIndex": hexutil.Uint64(receipt.TransactionIndex),
|
2020-08-01 07:39:04 +00:00
|
|
|
"from": from,
|
2021-04-22 17:11:37 +00:00
|
|
|
"to": txn.GetTo(),
|
2021-04-18 05:05:54 +00:00
|
|
|
"type": hexutil.Uint(txn.Type()),
|
2020-08-01 07:39:04 +00:00
|
|
|
"gasUsed": hexutil.Uint64(receipt.GasUsed),
|
|
|
|
"cumulativeGasUsed": hexutil.Uint64(receipt.CumulativeGasUsed),
|
|
|
|
"contractAddress": nil,
|
|
|
|
"logs": receipt.Logs,
|
2020-09-28 17:18:36 +00:00
|
|
|
"logsBloom": types.CreateBloom(types.Receipts{receipt}),
|
2020-08-01 07:39:04 +00:00
|
|
|
}
|
|
|
|
|
2021-06-30 02:50:39 +00:00
|
|
|
if !chainConfig.IsLondon(block.NumberU64()) {
|
|
|
|
fields["effectiveGasPrice"] = hexutil.Uint64(txn.GetPrice().Uint64())
|
|
|
|
} else {
|
|
|
|
baseFee, _ := uint256.FromBig(block.BaseFee())
|
|
|
|
gasPrice := new(big.Int).Add(block.BaseFee(), txn.GetEffectiveGasTip(baseFee).ToBig())
|
|
|
|
fields["effectiveGasPrice"] = hexutil.Uint64(gasPrice.Uint64())
|
|
|
|
}
|
2021-06-23 14:52:31 +00:00
|
|
|
// Assign receipt status.
|
2021-06-30 11:56:00 +00:00
|
|
|
fields["status"] = hexutil.Uint64(receipt.Status)
|
2020-08-01 07:39:04 +00:00
|
|
|
if receipt.Logs == nil {
|
|
|
|
fields["logs"] = [][]*types.Log{}
|
|
|
|
}
|
|
|
|
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
|
|
|
|
if receipt.ContractAddress != (common.Address{}) {
|
|
|
|
fields["contractAddress"] = receipt.ContractAddress
|
|
|
|
}
|
2021-04-18 05:05:54 +00:00
|
|
|
return fields
|
2020-08-01 07:39:04 +00:00
|
|
|
}
|
2020-09-03 07:51:19 +00:00
|
|
|
|
|
|
|
func includes(addresses []common.Address, a common.Address) bool {
|
|
|
|
for _, addr := range addresses {
|
|
|
|
if addr == a {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterLogs creates a slice of logs matching the given criteria.
|
large performance optimization for filterLogs (getLogs) when doing queries with many addresses and logs. (#5805)
this pr changes filterLogs to use a pre computed hashset of addresses,
instead of iterating across the list of addresses once per log.
this greatly increases the speed of filter queries that use many
addresses and also return a large number of logs. In our case, we are
performing a query for all the trades performed in a uniswap v3 pool in
a 250 block range.
my benchmarks were performed with the data & code below:
address list gist is here
[addrs](https://gist.githubusercontent.com/elee1766/861c6a55838c88522beae651d9d3b584/raw/2c30b0df439b9c8114ee8aea4083a8039ffcb93b/gistfile1.txt)
```
c := NewRpcClient()
addrs := []common.Address{AddressListGist}
logs, err := c.FilterLogs(context.TODO(), ethereum.FilterQuery{
FromBlock:big.NewInt(15640000),
ToBlock: big.NewInt(15640250),
Addresses: addrs,
Topics: [][]common.Hash{
{
common.HexToHash("c42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67"),
},
},
```
the query contains 8442 addresses, while the response contains 1277 logs
On average, current devel averages a 15.57 second response time on my machine after 10 runs, while the new filterLogs averages 1.05 seconds.
for CURRENT DEVEL, the profile is here: https://pprof.aaaaa.news/cd8dkv0tidul37sctmi0/flamegraph
for the filterLogs branch, the profile is here: https://pprof.aaaaa.news/cd8dlmgtidul37sctmig/flamegraph
while the tests pass with this branch, I am not really sure why filterLogs was originally programmed the way it was. Is there some sort of edge case / compatibility thing that I am missing with this change?
Co-authored-by: a <a@a.a>
2022-10-20 10:34:20 +00:00
|
|
|
func filterLogsOld(logs []*types.Log, addresses []common.Address, topics [][]common.Hash) []*types.Log {
|
2022-03-25 04:17:23 +00:00
|
|
|
result := make(types.Logs, 0, len(logs))
|
2020-09-03 07:51:19 +00:00
|
|
|
Logs:
|
|
|
|
for _, log := range logs {
|
|
|
|
if len(addresses) > 0 && !includes(addresses, log.Address) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// If the to filtered topics is greater than the amount of topics in logs, skip.
|
|
|
|
if len(topics) > len(log.Topics) {
|
|
|
|
continue Logs
|
|
|
|
}
|
|
|
|
for i, sub := range topics {
|
|
|
|
match := len(sub) == 0 // empty rule set == wildcard
|
|
|
|
for _, topic := range sub {
|
|
|
|
if log.Topics[i] == topic {
|
|
|
|
match = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !match {
|
|
|
|
continue Logs
|
|
|
|
}
|
|
|
|
}
|
2022-03-25 04:17:23 +00:00
|
|
|
result = append(result, log)
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|
2022-03-25 04:17:23 +00:00
|
|
|
return result
|
2020-09-03 07:51:19 +00:00
|
|
|
}
|