mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 03:30:37 +00:00
snapshots: read block from snapshots, add sender to txs file (#2996)
This commit is contained in:
parent
d4850b6adc
commit
e55256296b
@ -1624,7 +1624,7 @@ func compress1(chaindata string, fileName, segmentFileName string) error {
|
||||
select {
|
||||
default:
|
||||
case <-logEvery.C:
|
||||
log.Info("Dictionary preprocessing", "millions", i/1_000_000)
|
||||
log.Info("Dictionary preprocessing", "processed", fmt.Sprintf("%dK", i/1_000))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
@ -1874,7 +1874,7 @@ func optimiseCluster(trace bool, numBuf []byte, input []byte, trie *patricia.Pat
|
||||
return output, patterns, uncovered
|
||||
}
|
||||
|
||||
func reduceDictWorker(inputCh chan []byte, completion *sync.WaitGroup, trie *patricia.PatriciaTree, collector *etl.Collector, inputSize, outputSize atomic2.Uint64, posMap map[uint64]uint64) {
|
||||
func reduceDictWorker(inputCh chan []byte, completion *sync.WaitGroup, trie *patricia.PatriciaTree, collector *etl.Collector, inputSize, outputSize *atomic2.Uint64, posMap map[uint64]uint64) {
|
||||
defer completion.Done()
|
||||
var output = make([]byte, 0, 256)
|
||||
var uncovered = make([]int, 256)
|
||||
@ -2163,7 +2163,7 @@ func reducedict(name string, segmentFileName string) error {
|
||||
log.Info("dictionary file parsed", "entries", len(code2pattern))
|
||||
tmpDir := ""
|
||||
ch := make(chan []byte, 10000)
|
||||
var inputSize, outputSize atomic2.Uint64
|
||||
inputSize, outputSize := atomic2.NewUint64(0), atomic2.NewUint64(0)
|
||||
var wg sync.WaitGroup
|
||||
workers := runtime.NumCPU() / 2
|
||||
var collectors []*etl.Collector
|
||||
@ -2188,7 +2188,7 @@ func reducedict(name string, segmentFileName string) error {
|
||||
case <-logEvery.C:
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
log.Info("Replacement preprocessing", "millions", i/1_000_000, "input", common.StorageSize(inputSize.Load()), "output", common.StorageSize(outputSize.Load()), "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys))
|
||||
log.Info("Replacement preprocessing", "processed", fmt.Sprintf("%dK", i/1_000), "input", common.StorageSize(inputSize.Load()), "output", common.StorageSize(outputSize.Load()), "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
@ -2564,11 +2564,12 @@ func recsplitWholeChain(chaindata string) error {
|
||||
return 0, err
|
||||
}
|
||||
last := binary.BigEndian.Uint64(k)
|
||||
if last > params.FullImmutabilityThreshold {
|
||||
last -= params.FullImmutabilityThreshold
|
||||
} else {
|
||||
last = 0
|
||||
}
|
||||
// TODO: enable next condition (disabled for tests)
|
||||
//if last > params.FullImmutabilityThreshold {
|
||||
// last -= params.FullImmutabilityThreshold
|
||||
//} else {
|
||||
// last = 0
|
||||
//}
|
||||
last = last - last%blocksPerFile
|
||||
return last, nil
|
||||
}
|
||||
@ -2604,7 +2605,7 @@ func recsplitWholeChain(chaindata string) error {
|
||||
if err := compress1(chaindata, fileName, segmentFile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := snapshotsync.TransactionsIdx(*chainID, snapshotDir, fileName); err != nil {
|
||||
if err := snapshotsync.TransactionsIdx(*chainID, segmentFile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_ = os.Remove(fileName + ".dat")
|
||||
@ -2620,7 +2621,8 @@ func recsplitWholeChain(chaindata string) error {
|
||||
if err := compress1(chaindata, fileName, segmentFile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := snapshotsync.BodiesIdx(snapshotDir, fileName); err != nil {
|
||||
|
||||
if err := snapshotsync.HeadersIdx(segmentFile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_ = os.Remove(fileName + ".dat")
|
||||
@ -2636,10 +2638,13 @@ func recsplitWholeChain(chaindata string) error {
|
||||
if err := compress1(chaindata, fileName, segmentFile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := snapshotsync.BodiesIdx(snapshotDir, fileName); err != nil {
|
||||
return err
|
||||
if err := snapshotsync.BodiesIdx(segmentFile); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_ = os.Remove(fileName + ".dat")
|
||||
|
||||
//nolint
|
||||
break // TODO: remove me - useful for tests
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/services"
|
||||
"github.com/ledgerwatch/erigon/cmd/utils"
|
||||
"github.com/ledgerwatch/erigon/common/paths"
|
||||
"github.com/ledgerwatch/erigon/eth/ethconfig"
|
||||
"github.com/ledgerwatch/erigon/internal/debug"
|
||||
"github.com/ledgerwatch/erigon/node"
|
||||
"github.com/ledgerwatch/erigon/rpc"
|
||||
@ -59,6 +60,7 @@ type Flags struct {
|
||||
TxPoolApiAddr string
|
||||
TevmEnabled bool
|
||||
StateCache kvcache.CoherentConfig
|
||||
Snapshot ethconfig.Snapshot
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
@ -91,6 +93,7 @@ func RootCommand() (*cobra.Command, *Flags) {
|
||||
rootCmd.PersistentFlags().BoolVar(&cfg.TraceCompatibility, "trace.compat", false, "Bug for bug compatibility with OE for trace_ routines")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.TxPoolApiAddr, "txpool.api.addr", "127.0.0.1:9090", "txpool api network address, for example: 127.0.0.1:9090")
|
||||
rootCmd.PersistentFlags().BoolVar(&cfg.TevmEnabled, "tevm", false, "Enables Transpiled EVM experiment")
|
||||
rootCmd.PersistentFlags().BoolVar(&cfg.Snapshot.Enabled, "experimental.snapshot", false, "Enables Snapshot Sync")
|
||||
rootCmd.PersistentFlags().IntVar(&cfg.StateCache.KeysLimit, "state.cache", kvcache.DefaultCoherentConfig.KeysLimit, "Amount of keys to store in StateCache (enabled if no --datadir set). Set 0 to disable StateCache. 1_000_000 keys ~ equal to 2Gb RAM (maybe we will add RAM accounting in future versions).")
|
||||
|
||||
if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil {
|
||||
@ -115,6 +118,7 @@ func RootCommand() (*cobra.Command, *Flags) {
|
||||
if cfg.Chaindata == "" {
|
||||
cfg.Chaindata = path.Join(cfg.Datadir, "chaindata")
|
||||
}
|
||||
cfg.Snapshot.Dir = path.Join(cfg.Datadir, "snapshots")
|
||||
}
|
||||
cfg.TxPoolV2 = true
|
||||
return nil
|
||||
@ -233,7 +237,6 @@ func RemoteServices(ctx context.Context, cfg Flags, logger log.Logger, rootCance
|
||||
}
|
||||
db = rwKv
|
||||
stateCache = kvcache.NewDummy()
|
||||
blockReader = snapshotsync.NewBlockReader()
|
||||
} else {
|
||||
if cfg.StateCache.KeysLimit > 0 {
|
||||
stateCache = kvcache.New(cfg.StateCache)
|
||||
@ -243,6 +246,17 @@ func RemoteServices(ctx context.Context, cfg Flags, logger log.Logger, rootCance
|
||||
log.Info("if you run RPCDaemon on same machine with Erigon add --datadir option")
|
||||
}
|
||||
|
||||
if cfg.SingleNodeMode {
|
||||
if cfg.Snapshot.Enabled {
|
||||
allSnapshots, err := snapshotsync.OpenAll(cfg.Snapshot.Dir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
|
||||
} else {
|
||||
blockReader = snapshotsync.NewBlockReader()
|
||||
}
|
||||
}
|
||||
if cfg.PrivateApiAddr == "" {
|
||||
return db, eth, txPool, mining, stateCache, blockReader, nil
|
||||
}
|
||||
@ -264,7 +278,10 @@ func RemoteServices(ctx context.Context, cfg Flags, logger log.Logger, rootCance
|
||||
|
||||
subscribeToStateChangesLoop(ctx, kvClient, stateCache)
|
||||
|
||||
remoteEth := services.NewRemoteBackend(conn, db)
|
||||
if !cfg.SingleNodeMode {
|
||||
blockReader = snapshotsync.NewRemoteBlockReader(remote.NewETHBACKENDClient(conn))
|
||||
}
|
||||
remoteEth := services.NewRemoteBackend(conn, db, blockReader)
|
||||
blockReader = remoteEth
|
||||
|
||||
txpoolConn := conn
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/core/types"
|
||||
"github.com/ledgerwatch/erigon/eth/protocols/eth"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
|
||||
"github.com/ledgerwatch/erigon/turbo/stages"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@ -36,7 +37,7 @@ func TestEthSubscribe(t *testing.T) {
|
||||
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
|
||||
|
||||
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m)
|
||||
backend := services.NewRemoteBackend(conn, m.DB)
|
||||
backend := services.NewRemoteBackend(conn, m.DB, snapshotsync.NewBlockReader())
|
||||
ff := filters.New(ctx, backend, nil, nil)
|
||||
|
||||
newHeads := make(chan *types.Header)
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/crypto"
|
||||
"github.com/ledgerwatch/erigon/ethdb/privateapi"
|
||||
"github.com/ledgerwatch/erigon/params"
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
|
||||
"github.com/ledgerwatch/erigon/turbo/stages"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
@ -220,7 +221,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g
|
||||
ethashApi := apis[1].Service.(*ethash.API)
|
||||
server := grpc.NewServer()
|
||||
|
||||
remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events))
|
||||
remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader()))
|
||||
txpool.RegisterTxpoolServer(server, m.TxPoolV2GrpcServer)
|
||||
txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi))
|
||||
listener := bufconn.Listen(1024 * 1024)
|
||||
|
@ -1,7 +1,6 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -10,10 +9,10 @@ import (
|
||||
"github.com/ledgerwatch/erigon-lib/gointerfaces"
|
||||
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/core/types"
|
||||
"github.com/ledgerwatch/erigon/ethdb/privateapi"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
@ -38,14 +37,16 @@ type RemoteBackend struct {
|
||||
log log.Logger
|
||||
version gointerfaces.Version
|
||||
db kv.RoDB
|
||||
blockReader interfaces.BlockReader
|
||||
}
|
||||
|
||||
func NewRemoteBackend(cc grpc.ClientConnInterface, db kv.RoDB) *RemoteBackend {
|
||||
func NewRemoteBackend(cc grpc.ClientConnInterface, db kv.RoDB, blockReader interfaces.BlockReader) *RemoteBackend {
|
||||
return &RemoteBackend{
|
||||
remoteEthBackend: remote.NewETHBACKENDClient(cc),
|
||||
version: gointerfaces.VersionFromProto(privateapi.EthBackendAPIVersion),
|
||||
log: log.New("remote_service", "eth_backend"),
|
||||
db: db,
|
||||
blockReader: blockReader,
|
||||
}
|
||||
}
|
||||
|
||||
@ -149,22 +150,6 @@ func (back *RemoteBackend) Subscribe(ctx context.Context, onNewEvent func(*remot
|
||||
return nil
|
||||
}
|
||||
|
||||
func (back *RemoteBackend) BlockWithSenders(ctx context.Context, _ kv.Tx, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) {
|
||||
reply, err := back.remoteEthBackend.Block(ctx, &remote.BlockRequest{BlockHash: gointerfaces.ConvertHashToH256(hash), BlockHeight: blockHeight})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
block = &types.Block{}
|
||||
err = rlp.Decode(bytes.NewReader(reply.BlockRlp), block)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
senders = make([]common.Address, len(reply.Senders)/20)
|
||||
for i := range senders {
|
||||
senders[i].SetBytes(reply.Senders[i*20 : (i+1)*20])
|
||||
}
|
||||
if len(senders) == block.Transactions().Len() { //it's fine if no senders provided - they can be lazy recovered
|
||||
block.SendersToTxs(senders)
|
||||
}
|
||||
return block, senders, nil
|
||||
func (back *RemoteBackend) BlockWithSenders(ctx context.Context, tx kv.Tx, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) {
|
||||
return back.blockReader.BlockWithSenders(ctx, tx, hash, blockHeight)
|
||||
}
|
||||
|
@ -532,6 +532,11 @@ var (
|
||||
Usage: "a path to clique db folder",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
SnapshotSyncFlag = cli.BoolFlag{
|
||||
Name: "experimental.snapshot",
|
||||
Usage: "Enabling experimental snapshot sync",
|
||||
}
|
||||
)
|
||||
|
||||
var MetricFlags = []cli.Flag{MetricsEnabledFlag, MetricsEnabledExpensiveFlag, MetricsHTTPFlag, MetricsPortFlag}
|
||||
@ -939,6 +944,9 @@ func setDataDirCobra(f *pflag.FlagSet, cfg *node.Config) {
|
||||
} else {
|
||||
cfg.DataDir = DataDirForNetwork(cfg.DataDir, chain)
|
||||
}
|
||||
|
||||
cfg.DataDir = DataDirForNetwork(cfg.DataDir, chain)
|
||||
|
||||
}
|
||||
|
||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
@ -1202,6 +1210,11 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
|
||||
|
||||
// SetEthConfig applies eth-related command line flags to the config.
|
||||
func SetEthConfig(ctx *cli.Context, nodeConfig *node.Config, cfg *ethconfig.Config) {
|
||||
if ctx.GlobalBool(SnapshotSyncFlag.Name) {
|
||||
cfg.Snapshot.Enabled = true
|
||||
cfg.Snapshot.Dir = path.Join(nodeConfig.DataDir, "snapshots")
|
||||
}
|
||||
|
||||
CheckExclusive(ctx, MinerSigningKeyFileFlag, MinerEtherbaseFlag)
|
||||
setEtherbase(ctx, cfg)
|
||||
setGPO(ctx, &cfg.GPO)
|
||||
|
@ -41,6 +41,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon-lib/kv/remotedbserver"
|
||||
txpool2 "github.com/ledgerwatch/erigon-lib/txpool"
|
||||
"github.com/ledgerwatch/erigon-lib/txpool/txpooluitl"
|
||||
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
|
||||
"github.com/ledgerwatch/erigon/cmd/sentry/download"
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/common/debug"
|
||||
@ -374,7 +375,17 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
|
||||
ethashApi = casted.APIs(nil)[1].Service.(*ethash.API)
|
||||
}
|
||||
|
||||
ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events)
|
||||
var blockReader interfaces.BlockReader
|
||||
if config.Snapshot.Enabled {
|
||||
allSnapshots, err := snapshotsync.OpenAll(config.Snapshot.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
|
||||
} else {
|
||||
blockReader = snapshotsync.NewBlockReader()
|
||||
}
|
||||
ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, blockReader)
|
||||
miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi)
|
||||
if stack.Config().PrivateApiAddr != "" {
|
||||
var creds credentials.TransportCredentials
|
||||
|
@ -39,7 +39,6 @@ import (
|
||||
"github.com/ledgerwatch/erigon/core"
|
||||
"github.com/ledgerwatch/erigon/eth/gasprice"
|
||||
"github.com/ledgerwatch/erigon/params"
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
)
|
||||
|
||||
@ -117,7 +116,6 @@ func init() {
|
||||
|
||||
type Snapshot struct {
|
||||
Enabled bool
|
||||
Mode snapshotsync.SnapshotMode
|
||||
Dir string
|
||||
Seeding bool
|
||||
}
|
||||
|
@ -8,11 +8,11 @@ import (
|
||||
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
|
||||
types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/core/types"
|
||||
"github.com/ledgerwatch/erigon/params"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
@ -29,7 +29,7 @@ type EthBackendServer struct {
|
||||
eth EthBackend
|
||||
events *Events
|
||||
db kv.RoDB
|
||||
blockReader *snapshotsync.BlockReader
|
||||
blockReader interfaces.BlockReader
|
||||
}
|
||||
|
||||
type EthBackend interface {
|
||||
@ -38,8 +38,8 @@ type EthBackend interface {
|
||||
NetPeerCount() (uint64, error)
|
||||
}
|
||||
|
||||
func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RoDB, events *Events) *EthBackendServer {
|
||||
return &EthBackendServer{ctx: ctx, eth: eth, events: events, db: db, blockReader: snapshotsync.NewBlockReader()}
|
||||
func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RoDB, events *Events, blockReader interfaces.BlockReader) *EthBackendServer {
|
||||
return &EthBackendServer{ctx: ctx, eth: eth, events: events, db: db, blockReader: blockReader}
|
||||
}
|
||||
|
||||
func (s *EthBackendServer) Version(context.Context, *emptypb.Empty) (*types2.VersionReply, error) {
|
||||
|
@ -22,3 +22,20 @@ var dbSchemaVersion = Migration{
|
||||
return tx.Commit()
|
||||
},
|
||||
}
|
||||
|
||||
var dbSchemaVersion5 = Migration{
|
||||
Name: "db_schema_version5",
|
||||
Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) {
|
||||
tx, err := db.BeginRw(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// This migration is no-op, but it forces the migration mechanism to apply it and thus write the DB schema version info
|
||||
if err := BeforeCommit(tx, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
},
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ var migrations = map[kv.Label][]Migration{
|
||||
fixSequences,
|
||||
storageMode,
|
||||
setPruneType,
|
||||
dbSchemaVersion5,
|
||||
},
|
||||
kv.TxPoolDB: {},
|
||||
kv.SentryDB: {},
|
||||
|
@ -46,6 +46,7 @@ var DefaultFlags = []cli.Flag{
|
||||
StateStreamDisableFlag,
|
||||
SyncLoopThrottleFlag,
|
||||
BadBlockFlag,
|
||||
utils.SnapshotSyncFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
|
@ -205,6 +205,7 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||
cfg.BadBlockHash = common.BytesToHash(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) {
|
||||
@ -275,7 +276,7 @@ func ApplyFlagsForNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
}
|
||||
|
||||
// setPrivateApi populates configuration fields related to the remote
|
||||
// read-only interface to the databae
|
||||
// read-only interface to the database
|
||||
func setPrivateApi(ctx *cli.Context, cfg *node.Config) {
|
||||
cfg.PrivateApiAddr = ctx.GlobalString(PrivateApiAddr.Name)
|
||||
cfg.PrivateApiRateLimit = uint32(ctx.GlobalUint64(PrivateApiRateLimit.Name))
|
||||
|
@ -1,13 +1,18 @@
|
||||
package snapshotsync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/ledgerwatch/erigon-lib/gointerfaces"
|
||||
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/core/rawdb"
|
||||
"github.com/ledgerwatch/erigon/core/types"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
)
|
||||
|
||||
// BlockReader can read blocks from db and snapshots
|
||||
@ -33,3 +38,109 @@ func (back *BlockReader) BlockWithSenders(ctx context.Context, tx kv.Tx, hash co
|
||||
|
||||
return rawdb.NonCanonicalBlockWithSenders(tx, hash, blockHeight)
|
||||
}
|
||||
|
||||
type RemoteBlockReader struct {
|
||||
client remote.ETHBACKENDClient
|
||||
}
|
||||
|
||||
func NewRemoteBlockReader(client remote.ETHBACKENDClient) *RemoteBlockReader {
|
||||
return &RemoteBlockReader{client}
|
||||
}
|
||||
|
||||
func (back *RemoteBlockReader) BlockWithSenders(ctx context.Context, _ kv.Tx, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) {
|
||||
reply, err := back.client.Block(ctx, &remote.BlockRequest{BlockHash: gointerfaces.ConvertHashToH256(hash), BlockHeight: blockHeight})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
block = &types.Block{}
|
||||
err = rlp.Decode(bytes.NewReader(reply.BlockRlp), block)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
senders = make([]common.Address, len(reply.Senders)/20)
|
||||
for i := range senders {
|
||||
senders[i].SetBytes(reply.Senders[i*20 : (i+1)*20])
|
||||
}
|
||||
if len(senders) == block.Transactions().Len() { //it's fine if no senders provided - they can be lazy recovered
|
||||
block.SendersToTxs(senders)
|
||||
}
|
||||
return block, senders, nil
|
||||
}
|
||||
|
||||
// BlockReaderWithSnapshots can read blocks from db and snapshots
|
||||
type BlockReaderWithSnapshots struct {
|
||||
sn *AllSnapshots
|
||||
}
|
||||
|
||||
func NewBlockReaderWithSnapshots(snapshots *AllSnapshots) *BlockReaderWithSnapshots {
|
||||
return &BlockReaderWithSnapshots{sn: snapshots}
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) BlockWithSenders(ctx context.Context, tx kv.Tx, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) {
|
||||
sn, ok := back.sn.Blocks(blockHeight)
|
||||
if !ok {
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
|
||||
}
|
||||
if canonicalHash == hash {
|
||||
block, senders, err = rawdb.ReadBlockWithSenders(tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return block, senders, nil
|
||||
}
|
||||
return rawdb.NonCanonicalBlockWithSenders(tx, hash, blockHeight)
|
||||
}
|
||||
|
||||
buf := make([]byte, 16)
|
||||
|
||||
n := binary.PutUvarint(buf, blockHeight)
|
||||
headerOffset := sn.Headers.Idx.Lookup2(sn.Headers.Idx.Lookup(buf[:n]))
|
||||
bodyOffset := sn.Bodies.Idx.Lookup2(sn.Bodies.Idx.Lookup(buf[:n]))
|
||||
|
||||
gg := sn.Headers.Segment.MakeGetter()
|
||||
gg.Reset(headerOffset)
|
||||
buf, _ = gg.Next(buf[:0]) //TODO: use gg.Current here
|
||||
h := &types.Header{}
|
||||
if err = rlp.DecodeBytes(buf, h); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
gg = sn.Bodies.Segment.MakeGetter()
|
||||
gg.Reset(bodyOffset)
|
||||
buf, _ = gg.Next(buf[:0]) //TODO: use gg.Current here
|
||||
b := &types.BodyForStorage{}
|
||||
if err = rlp.DecodeBytes(buf, b); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
n = binary.PutUvarint(buf, b.BaseTxId)
|
||||
txnOffset := sn.Transactions.Idx.Lookup2(sn.Transactions.Idx.Lookup(buf[:n]))
|
||||
gg = sn.Transactions.Segment.MakeGetter()
|
||||
gg.Reset(txnOffset)
|
||||
reader := bytes.NewReader(nil)
|
||||
stream := rlp.NewStream(reader, 0)
|
||||
txs := make([]types.Transaction, b.TxAmount)
|
||||
senders = make([]common.Address, b.TxAmount)
|
||||
//TODO: use gg.Current here
|
||||
for i := uint32(0); i < b.TxAmount; i++ {
|
||||
buf, _ = gg.Next(buf[:0])
|
||||
senders[i].SetBytes(buf[1 : 1+20])
|
||||
txRlp := buf[1+20:]
|
||||
reader.Reset(txRlp)
|
||||
stream.Reset(reader, 0)
|
||||
txs[i], err = types.DecodeTransaction(stream)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
block = types.NewBlockFromStorage(hash, h, txs, b.Uncles)
|
||||
if len(senders) != block.Transactions().Len() {
|
||||
return block, senders, nil // no senders is fine - will recover them on the fly
|
||||
}
|
||||
block.SendersToTxs(senders)
|
||||
return block, senders, nil
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/cmd/hack/tool"
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/common/dbutils"
|
||||
"github.com/ledgerwatch/erigon/core/rawdb"
|
||||
"github.com/ledgerwatch/erigon/core/types"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
@ -59,11 +60,11 @@ func IdxFileName(from, to uint64, name SnapshotType) string {
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
File string
|
||||
Idx *recsplit.Index
|
||||
Decompressor *compress.Decompressor
|
||||
From uint64 // included
|
||||
To uint64 // excluded
|
||||
File string
|
||||
Idx *recsplit.Index
|
||||
Segment *compress.Decompressor
|
||||
From uint64 // included
|
||||
To uint64 // excluded
|
||||
}
|
||||
|
||||
func (s Snapshot) Has(block uint64) bool { return block >= s.From && block < s.To }
|
||||
@ -99,7 +100,7 @@ func MustOpenAll(dir string) *AllSnapshots {
|
||||
// - segment have [from:to) semantic
|
||||
func OpenAll(dir string) (*AllSnapshots, error) {
|
||||
all := &AllSnapshots{dir: dir}
|
||||
files, err := onlyCompressedFilesList(dir)
|
||||
files, err := headersSegments(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -116,9 +117,9 @@ func OpenAll(dir string) (*AllSnapshots, error) {
|
||||
continue
|
||||
}
|
||||
if from != prevTo { // no gaps
|
||||
log.Debug("[open snapshots] snapshot missed before", "file", f)
|
||||
break
|
||||
}
|
||||
|
||||
prevTo = to
|
||||
|
||||
blocksSnapshot := &BlocksSnapshot{From: from, To: to}
|
||||
@ -139,7 +140,7 @@ func OpenAll(dir string) (*AllSnapshots, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
blocksSnapshot.Bodies = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Decompressor: d, Idx: idx}
|
||||
blocksSnapshot.Bodies = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d, Idx: idx}
|
||||
}
|
||||
{
|
||||
fileName := SegmentFileName(from, to, Headers)
|
||||
@ -158,7 +159,7 @@ func OpenAll(dir string) (*AllSnapshots, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blocksSnapshot.Headers = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Decompressor: d, Idx: idx}
|
||||
blocksSnapshot.Headers = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d, Idx: idx}
|
||||
}
|
||||
{
|
||||
fileName := SegmentFileName(from, to, Transactions)
|
||||
@ -176,7 +177,7 @@ func OpenAll(dir string) (*AllSnapshots, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
blocksSnapshot.Transactions = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Decompressor: d, Idx: idx}
|
||||
blocksSnapshot.Transactions = &Snapshot{From: from, To: to, File: path.Join(dir, fileName), Segment: d, Idx: idx}
|
||||
}
|
||||
|
||||
all.blocks = append(all.blocks, blocksSnapshot)
|
||||
@ -188,11 +189,11 @@ func OpenAll(dir string) (*AllSnapshots, error) {
|
||||
func (s AllSnapshots) Close() {
|
||||
for _, s := range s.blocks {
|
||||
s.Headers.Idx.Close()
|
||||
s.Headers.Decompressor.Close()
|
||||
s.Headers.Segment.Close()
|
||||
s.Bodies.Idx.Close()
|
||||
s.Bodies.Decompressor.Close()
|
||||
s.Bodies.Segment.Close()
|
||||
s.Transactions.Idx.Close()
|
||||
s.Transactions.Decompressor.Close()
|
||||
s.Transactions.Segment.Close()
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,7 +209,7 @@ func (s AllSnapshots) Blocks(blockNumber uint64) (snapshot *BlocksSnapshot, foun
|
||||
return snapshot, false
|
||||
}
|
||||
|
||||
func onlyCompressedFilesList(dir string) ([]string, error) {
|
||||
func headersSegments(dir string) ([]string, error) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -224,6 +225,9 @@ func onlyCompressedFilesList(dir string) ([]string, error) {
|
||||
if filepath.Ext(f.Name()) != ".seg" { // filter out only compressed files
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(f.Name(), string(Headers)) {
|
||||
continue
|
||||
}
|
||||
res = append(res, f.Name())
|
||||
}
|
||||
sort.Strings(res)
|
||||
@ -270,6 +274,8 @@ func ParseCompressedFileName(name string) (from, to uint64, snapshotType Snapsho
|
||||
return from * 1_000, to * 1_000, snapshotType, nil
|
||||
}
|
||||
|
||||
// DumpTxs -
|
||||
// Format: hash[0]_1byte + sender_address_2bytes + txnRlp
|
||||
func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) error {
|
||||
tmpFileName := TmpFileName(fromBlock, fromBlock+uint64(blocksAmount), Transactions)
|
||||
tmpFileName = path.Join(tmpdir, tmpFileName)
|
||||
@ -293,31 +299,49 @@ func DumpTxs(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) erro
|
||||
slot := txpool.TxSlot{}
|
||||
valueBuf := make([]byte, 16*4096)
|
||||
from := dbutils.EncodeBlockNumber(fromBlock)
|
||||
if err := kv.BigChunks(db, kv.BlockBody, from, func(tx kv.Tx, k, v []byte) (bool, error) {
|
||||
if err := kv.BigChunks(db, kv.HeaderCanonical, from, func(tx kv.Tx, k, v []byte) (bool, error) {
|
||||
blockNum := binary.BigEndian.Uint64(k)
|
||||
if blockNum >= fromBlock+uint64(blocksAmount) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
h := common.BytesToHash(v)
|
||||
dataRLP := rawdb.ReadStorageBodyRLP(tx, h, blockNum)
|
||||
var body types.BodyForStorage
|
||||
if e := rlp.DecodeBytes(v, &body); e != nil {
|
||||
if e := rlp.DecodeBytes(dataRLP, &body); e != nil {
|
||||
return false, e
|
||||
}
|
||||
if body.TxAmount == 0 {
|
||||
return true, nil
|
||||
}
|
||||
senders, err := rawdb.ReadSenders(tx, h, blockNum)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(numBuf, body.BaseTxId)
|
||||
j := 0
|
||||
if err := tx.ForAmount(kv.EthTx, numBuf[:8], body.TxAmount, func(tk, tv []byte) error {
|
||||
if _, err := parseCtx.ParseTransaction(tv, 0, &slot, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
var sender []byte
|
||||
if len(senders) > 0 {
|
||||
sender = senders[j][:]
|
||||
} else {
|
||||
sender = make([]byte, 20) // TODO: return error here
|
||||
//panic("not implemented")
|
||||
}
|
||||
_ = sender
|
||||
valueBuf = valueBuf[:0]
|
||||
valueBuf = append(append(valueBuf, slot.IdHash[:1]...), tv...)
|
||||
valueBuf = append(valueBuf, slot.IdHash[:1]...)
|
||||
valueBuf = append(valueBuf, sender...)
|
||||
valueBuf = append(valueBuf, tv...)
|
||||
if err := f.Append(valueBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
j++
|
||||
|
||||
select {
|
||||
default:
|
||||
@ -448,76 +472,56 @@ func DumpBodies(db kv.RoDB, tmpdir string, fromBlock uint64, blocksAmount int) e
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
s, _ := f.Stat()
|
||||
fmt.Printf("%dKb\n", s.Size()/1024)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TransactionsIdx(chainID uint256.Int, dir, name string) error {
|
||||
d, err := compress.NewDecompressor(path.Join(dir, name+".seg"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
logEvery := time.NewTicker(20 * time.Second)
|
||||
defer logEvery.Stop()
|
||||
|
||||
rs, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{
|
||||
KeyCount: d.Count(),
|
||||
Enums: true,
|
||||
BucketSize: 2000,
|
||||
Salt: 0,
|
||||
LeafSize: 8,
|
||||
TmpDir: "",
|
||||
IndexFile: path.Join(dir, name+".idx"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
RETRY:
|
||||
|
||||
g := d.MakeGetter()
|
||||
wc := 0
|
||||
var pos uint64
|
||||
func TransactionsIdx(chainID uint256.Int, segmentFileName string) error {
|
||||
parseCtx := txpool.NewTxParseContext(chainID)
|
||||
parseCtx.WithSender(false)
|
||||
slot := txpool.TxSlot{}
|
||||
var sender [20]byte
|
||||
word := make([]byte, 0, 4*1024)
|
||||
|
||||
for g.HasNext() {
|
||||
word, pos = g.Next(word[:0])
|
||||
if _, err := parseCtx.ParseTransaction(word[1:], 0, &slot, sender[:]); err != nil {
|
||||
if err := Idx(segmentFileName, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error {
|
||||
if _, err := parseCtx.ParseTransaction(word[1+20:], 0, &slot, sender[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rs.AddKey(slot.IdHash[:], pos); err != nil {
|
||||
if err := idx.AddKey(slot.IdHash[:], offset); err != nil {
|
||||
return err
|
||||
}
|
||||
wc++
|
||||
select {
|
||||
default:
|
||||
case <-logEvery.C:
|
||||
log.Info("[Filling recsplit] Processed", "millions", wc/1_000_000)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("TransactionsIdx: %w", err)
|
||||
}
|
||||
|
||||
if err = rs.Build(); err != nil {
|
||||
if errors.Is(err, recsplit.ErrCollision) {
|
||||
log.Info("Building recsplit. Collision happened. It's ok. Restarting...", "err", err)
|
||||
rs.ResetNextSalt()
|
||||
goto RETRY
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func BodiesIdx(dir, name string) error {
|
||||
d, err := compress.NewDecompressor(path.Join(dir, name+".seg"))
|
||||
func HeadersIdx(segmentFileName string) error {
|
||||
num := make([]byte, 8)
|
||||
if err := Idx(segmentFileName, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error {
|
||||
n := binary.PutUvarint(num, i)
|
||||
return idx.AddKey(num[:n], offset)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("HeadersIdx: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func BodiesIdx(segmentFileName string) error {
|
||||
num := make([]byte, 8)
|
||||
if err := Idx(segmentFileName, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error {
|
||||
n := binary.PutUvarint(num, i)
|
||||
return idx.AddKey(num[:n], offset)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("BodiesIdx: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Idx - iterate over segment and building .idx file
|
||||
func Idx(segmentFileName string, walker func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error) error {
|
||||
var extension = filepath.Ext(segmentFileName)
|
||||
var idxFileName = segmentFileName[0:len(segmentFileName)-len(extension)] + ".idx"
|
||||
|
||||
d, err := compress.NewDecompressor(segmentFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -532,7 +536,7 @@ func BodiesIdx(dir, name string) error {
|
||||
Salt: 0,
|
||||
LeafSize: 8,
|
||||
TmpDir: "",
|
||||
IndexFile: path.Join(dir, name+".idx"),
|
||||
IndexFile: idxFileName,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -541,14 +545,11 @@ func BodiesIdx(dir, name string) error {
|
||||
RETRY:
|
||||
|
||||
g := d.MakeGetter()
|
||||
wc := 0
|
||||
var pos uint64
|
||||
var wc, pos uint64
|
||||
word := make([]byte, 0, 4096)
|
||||
num := make([]byte, 8)
|
||||
for g.HasNext() {
|
||||
word, pos = g.Next(word[:0])
|
||||
binary.BigEndian.PutUint64(num, uint64(wc))
|
||||
if err := rs.AddKey(num, pos); err != nil {
|
||||
if err := walker(rs, wc, pos, word); err != nil {
|
||||
return err
|
||||
}
|
||||
wc++
|
||||
|
Loading…
Reference in New Issue
Block a user