2015-07-07 00:54:22 +00:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 16:48:40 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 00:54:22 +00:00
|
|
|
//
|
2015-07-23 16:35:11 +00:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 00:54:22 +00:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 16:48:40 +00:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 00:54:22 +00:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 16:48:40 +00:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 00:54:22 +00:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 16:48:40 +00:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 00:54:22 +00:00
|
|
|
|
2015-07-07 03:08:16 +00:00
|
|
|
// Package eth implements the Ethereum protocol.
|
2014-12-14 18:03:24 +00:00
|
|
|
package eth
|
|
|
|
|
|
|
|
import (
|
2020-11-13 16:16:47 +00:00
|
|
|
"context"
|
2021-05-06 07:39:10 +00:00
|
|
|
"errors"
|
2015-01-04 13:20:16 +00:00
|
|
|
"fmt"
|
2022-07-25 13:58:40 +00:00
|
|
|
"io/fs"
|
2017-05-16 19:07:27 +00:00
|
|
|
"math/big"
|
2022-12-12 13:25:47 +00:00
|
|
|
"net"
|
2020-07-15 12:26:59 +00:00
|
|
|
"os"
|
2022-02-12 13:33:09 +00:00
|
|
|
"path/filepath"
|
2021-07-04 07:51:08 +00:00
|
|
|
"strconv"
|
2022-11-15 09:41:56 +00:00
|
|
|
"strings"
|
2016-05-12 17:32:04 +00:00
|
|
|
"sync"
|
2024-01-10 17:12:15 +00:00
|
|
|
"sync/atomic"
|
2020-11-13 16:16:47 +00:00
|
|
|
"time"
|
2020-10-13 12:56:16 +00:00
|
|
|
|
2023-11-04 17:44:34 +00:00
|
|
|
lru "github.com/hashicorp/golang-lru/arc/v2"
|
2021-03-12 17:26:06 +00:00
|
|
|
"github.com/holiman/uint256"
|
2023-04-14 06:24:10 +00:00
|
|
|
"github.com/ledgerwatch/log/v3"
|
|
|
|
"golang.org/x/exp/slices"
|
|
|
|
"google.golang.org/grpc"
|
|
|
|
"google.golang.org/grpc/credentials"
|
|
|
|
"google.golang.org/protobuf/types/known/emptypb"
|
|
|
|
|
2023-01-13 18:12:18 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/chain"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/chain/networkname"
|
|
|
|
"github.com/ledgerwatch/erigon-lib/chain/snapcfg"
|
2021-09-12 07:50:17 +00:00
|
|
|
libcommon "github.com/ledgerwatch/erigon-lib/common"
|
2022-11-20 03:41:30 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/common/datadir"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/diagnostics"
|
2021-08-14 08:11:46 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/direct"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/downloader"
|
2022-11-20 03:41:30 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc"
|
2021-12-14 10:13:17 +00:00
|
|
|
proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
|
2021-09-02 10:04:30 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil"
|
2021-11-30 22:42:12 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
|
2024-01-09 11:37:39 +00:00
|
|
|
rpcsentinel "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
|
2021-12-14 10:13:17 +00:00
|
|
|
proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
|
2021-09-02 10:04:30 +00:00
|
|
|
txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool"
|
2021-11-30 22:42:12 +00:00
|
|
|
prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
|
2021-07-29 11:53:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/kv"
|
2021-09-13 07:58:25 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/kv/kvcache"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
|
2021-09-15 07:22:57 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/kv/remotedbserver"
|
2022-08-19 02:35:43 +00:00
|
|
|
libstate "github.com/ledgerwatch/erigon-lib/state"
|
2023-06-15 06:11:51 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/txpool"
|
2021-09-08 05:31:51 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/txpool/txpooluitl"
|
2022-04-11 03:05:07 +00:00
|
|
|
types2 "github.com/ledgerwatch/erigon-lib/types"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/wrap"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/clparams"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/cltypes"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/fork"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/persistence"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/persistence/db_config"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format/getters"
|
|
|
|
clcore "github.com/ledgerwatch/erigon/cl/phase1/core"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/phase1/execution_client"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/sentinel"
|
|
|
|
"github.com/ledgerwatch/erigon/cl/sentinel/service"
|
2023-10-16 13:35:26 +00:00
|
|
|
"github.com/ledgerwatch/erigon/cmd/caplin/caplin1"
|
2022-02-16 17:38:54 +00:00
|
|
|
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli"
|
2021-06-13 16:41:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon/common/debug"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/consensus"
|
|
|
|
"github.com/ledgerwatch/erigon/consensus/clique"
|
|
|
|
"github.com/ledgerwatch/erigon/consensus/ethash"
|
2023-05-09 17:45:33 +00:00
|
|
|
"github.com/ledgerwatch/erigon/consensus/merge"
|
2023-09-20 11:59:30 +00:00
|
|
|
"github.com/ledgerwatch/erigon/consensus/misc"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/core"
|
|
|
|
"github.com/ledgerwatch/erigon/core/rawdb"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon/core/rawdb/blockio"
|
2023-04-14 06:24:10 +00:00
|
|
|
"github.com/ledgerwatch/erigon/core/state/temporal"
|
|
|
|
"github.com/ledgerwatch/erigon/core/systemcontracts"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/core/types"
|
|
|
|
"github.com/ledgerwatch/erigon/core/vm"
|
|
|
|
"github.com/ledgerwatch/erigon/crypto"
|
|
|
|
"github.com/ledgerwatch/erigon/eth/ethconfig"
|
2022-07-28 09:57:38 +00:00
|
|
|
"github.com/ledgerwatch/erigon/eth/ethconsensusconfig"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/eth/ethutils"
|
|
|
|
"github.com/ledgerwatch/erigon/eth/protocols/eth"
|
|
|
|
"github.com/ledgerwatch/erigon/eth/stagedsync"
|
2022-09-29 05:38:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
|
2021-07-28 02:47:38 +00:00
|
|
|
"github.com/ledgerwatch/erigon/ethdb/privateapi"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon/ethdb/prune"
|
2022-04-22 06:23:52 +00:00
|
|
|
"github.com/ledgerwatch/erigon/ethstats"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/node"
|
|
|
|
"github.com/ledgerwatch/erigon/p2p"
|
2023-04-14 06:24:10 +00:00
|
|
|
"github.com/ledgerwatch/erigon/p2p/enode"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon/p2p/sentry"
|
|
|
|
"github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/params"
|
2024-01-09 18:20:42 +00:00
|
|
|
"github.com/ledgerwatch/erigon/polygon/bor"
|
|
|
|
"github.com/ledgerwatch/erigon/polygon/bor/finality/flags"
|
2024-01-12 15:11:01 +00:00
|
|
|
"github.com/ledgerwatch/erigon/polygon/bor/valset"
|
2024-01-09 18:20:42 +00:00
|
|
|
"github.com/ledgerwatch/erigon/polygon/heimdall"
|
2021-05-20 18:25:53 +00:00
|
|
|
"github.com/ledgerwatch/erigon/rpc"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon/turbo/builder"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/engineapi"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/engineapi/engine_block_downloader"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/execution/eth1"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/execution/eth1/eth1_chain_reader.go"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/jsonrpc"
|
2022-07-28 09:57:38 +00:00
|
|
|
"github.com/ledgerwatch/erigon/turbo/services"
|
2021-07-10 09:43:58 +00:00
|
|
|
"github.com/ledgerwatch/erigon/turbo/shards"
|
2024-01-09 11:37:39 +00:00
|
|
|
"github.com/ledgerwatch/erigon/turbo/silkworm"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
|
|
|
|
"github.com/ledgerwatch/erigon/turbo/snapshotsync/snap"
|
2021-05-26 07:02:19 +00:00
|
|
|
stages2 "github.com/ledgerwatch/erigon/turbo/stages"
|
2022-08-11 17:55:59 +00:00
|
|
|
"github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
|
2014-12-14 18:03:24 +00:00
|
|
|
)
|
|
|
|
|
2021-02-05 12:51:15 +00:00
|
|
|
// Config contains the configuration options of the ETH protocol.
|
|
|
|
// Deprecated: use ethconfig.Config instead.
|
|
|
|
type Config = ethconfig.Config
|
|
|
|
|
2016-06-30 10:03:26 +00:00
|
|
|
// Ethereum implements the Ethereum full node service.
|
|
|
|
type Ethereum struct {
|
2021-02-05 12:51:15 +00:00
|
|
|
config *ethconfig.Config
|
2017-09-05 16:18:28 +00:00
|
|
|
|
2015-12-16 03:26:23 +00:00
|
|
|
// DB interfaces
|
2021-09-08 05:31:51 +00:00
|
|
|
chainDB kv.RwDB
|
2020-10-10 06:06:54 +00:00
|
|
|
privateAPI *grpc.Server
|
2014-12-14 18:03:24 +00:00
|
|
|
|
2021-04-03 06:09:31 +00:00
|
|
|
engine consensus.Engine
|
2015-05-26 12:17:43 +00:00
|
|
|
|
2021-03-12 17:26:06 +00:00
|
|
|
gasPrice *uint256.Int
|
2023-01-13 18:12:18 +00:00
|
|
|
etherbase libcommon.Address
|
2014-12-14 18:03:24 +00:00
|
|
|
|
2021-04-19 07:56:44 +00:00
|
|
|
networkID uint64
|
2017-05-29 07:21:34 +00:00
|
|
|
|
2023-01-17 06:20:31 +00:00
|
|
|
lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
|
|
|
|
chainConfig *chain.Config
|
2023-09-13 10:49:49 +00:00
|
|
|
apiList []rpc.API
|
2023-01-17 06:20:31 +00:00
|
|
|
genesisBlock *types.Block
|
|
|
|
genesisHash libcommon.Hash
|
|
|
|
|
2023-08-04 12:42:35 +00:00
|
|
|
eth1ExecutionServer *eth1.EthereumExecutionModule
|
|
|
|
|
2023-01-17 06:20:31 +00:00
|
|
|
ethBackendRPC *privateapi.EthBackendServer
|
2023-08-05 21:33:10 +00:00
|
|
|
engineBackendRPC *engineapi.EngineServer
|
2023-01-17 06:20:31 +00:00
|
|
|
miningRPC txpool_proto.MiningServer
|
2023-06-15 06:11:51 +00:00
|
|
|
stateChangesClient txpool.StateChangesClient
|
2023-01-17 06:20:31 +00:00
|
|
|
|
2021-05-26 07:02:19 +00:00
|
|
|
miningSealingQuit chan struct{}
|
|
|
|
pendingBlocks chan *types.Block
|
|
|
|
minedBlocks chan *types.Block
|
2021-04-25 04:20:50 +00:00
|
|
|
|
2021-07-08 13:52:22 +00:00
|
|
|
// downloader fields
|
2022-05-10 05:17:44 +00:00
|
|
|
sentryCtx context.Context
|
|
|
|
sentryCancel context.CancelFunc
|
2023-11-02 01:35:13 +00:00
|
|
|
sentriesClient *sentry_multi_client.MultiClient
|
2022-05-10 05:17:44 +00:00
|
|
|
sentryServers []*sentry.GrpcServer
|
2021-12-14 10:13:17 +00:00
|
|
|
|
2023-07-30 21:35:55 +00:00
|
|
|
stagedSync *stagedsync.Sync
|
|
|
|
pipelineStagedSync *stagedsync.Sync
|
|
|
|
syncStages []*stagedsync.Stage
|
|
|
|
syncUnwindOrder stagedsync.UnwindOrder
|
|
|
|
syncPruneOrder stagedsync.PruneOrder
|
2021-12-14 10:13:17 +00:00
|
|
|
|
|
|
|
downloaderClient proto_downloader.DownloaderClient
|
2021-07-10 09:43:58 +00:00
|
|
|
|
2022-10-05 04:42:38 +00:00
|
|
|
notifications *shards.Notifications
|
2022-09-18 10:41:01 +00:00
|
|
|
unsubscribeEthstat func()
|
2021-07-10 09:43:58 +00:00
|
|
|
|
2021-05-20 05:33:55 +00:00
|
|
|
waitForStageLoopStop chan struct{}
|
|
|
|
waitForMiningStop chan struct{}
|
2021-09-08 05:31:51 +00:00
|
|
|
|
2023-06-15 06:11:51 +00:00
|
|
|
txPoolDB kv.RwDB
|
|
|
|
txPool *txpool.TxPool
|
|
|
|
newTxs chan types2.Announcements
|
|
|
|
txPoolFetch *txpool.Fetch
|
|
|
|
txPoolSend *txpool.Send
|
|
|
|
txPoolGrpcServer txpool_proto.TxpoolServer
|
2021-09-08 05:31:51 +00:00
|
|
|
notifyMiningAboutNewTxs chan struct{}
|
2023-07-06 16:09:52 +00:00
|
|
|
forkValidator *engine_helpers.ForkValidator
|
2023-12-27 22:05:09 +00:00
|
|
|
downloader *downloader.Downloader
|
2022-09-18 10:41:01 +00:00
|
|
|
|
2023-01-17 06:20:31 +00:00
|
|
|
agg *libstate.AggregatorV3
|
2023-06-15 06:11:51 +00:00
|
|
|
blockSnapshots *freezeblocks.RoSnapshots
|
2023-01-17 06:20:31 +00:00
|
|
|
blockReader services.FullBlockReader
|
2023-05-22 08:49:21 +00:00
|
|
|
blockWriter *blockio.BlockWriter
|
2023-01-17 06:20:31 +00:00
|
|
|
kvRPC *remotedbserver.KvServer
|
2023-05-09 17:11:31 +00:00
|
|
|
logger log.Logger
|
2023-10-05 02:27:37 +00:00
|
|
|
|
2023-10-23 14:33:08 +00:00
|
|
|
sentinel rpcsentinel.SentinelClient
|
2023-11-02 01:35:13 +00:00
|
|
|
|
|
|
|
silkworm *silkworm.Silkworm
|
|
|
|
silkwormRPCDaemonService *silkworm.RpcDaemonService
|
|
|
|
silkwormSentryService *silkworm.SentryService
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
}
|
|
|
|
|
2022-11-15 09:41:56 +00:00
|
|
|
func splitAddrIntoHostAndPort(addr string) (host string, port int, err error) {
|
|
|
|
idx := strings.LastIndexByte(addr, ':')
|
|
|
|
if idx < 0 {
|
|
|
|
return "", 0, errors.New("invalid address format")
|
|
|
|
}
|
|
|
|
host = addr[:idx]
|
|
|
|
port, err = strconv.Atoi(addr[idx+1:])
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-12 22:31:38 +00:00
|
|
|
const blockBufferSize = 128
|
|
|
|
|
2016-06-30 10:03:26 +00:00
|
|
|
// New creates a new Ethereum object (including the
|
2015-12-16 03:26:23 +00:00
|
|
|
// initialisation of the common Ethereum object)
|
2023-10-12 07:11:46 +00:00
|
|
|
func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) {
|
2023-05-26 10:12:33 +00:00
|
|
|
config.Snapshot.Enabled = config.Sync.UseSnapshots
|
2023-01-27 04:39:34 +00:00
|
|
|
if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(libcommon.Big0) <= 0 {
|
2023-05-07 06:28:15 +00:00
|
|
|
logger.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice)
|
2021-02-05 12:51:15 +00:00
|
|
|
config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice)
|
2018-08-23 10:02:36 +00:00
|
|
|
}
|
2019-02-05 10:49:59 +00:00
|
|
|
|
2022-08-19 02:35:43 +00:00
|
|
|
dirs := stack.Config().Dirs
|
|
|
|
tmpdir := dirs.Tmp
|
2022-07-25 04:31:57 +00:00
|
|
|
if err := RemoveContents(tmpdir); err != nil { // clean it on startup
|
2021-04-26 09:53:38 +00:00
|
|
|
return nil, fmt.Errorf("clean tmp dir: %s, %w", tmpdir, err)
|
|
|
|
}
|
2020-10-28 09:52:15 +00:00
|
|
|
|
2018-08-23 10:02:36 +00:00
|
|
|
// Assemble the Ethereum object
|
2023-10-12 07:11:46 +00:00
|
|
|
chainKv, err := node.OpenDatabase(ctx, stack.Config(), kv.ChainDB, "", false, logger)
|
2021-04-26 14:40:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-05-27 16:24:34 +00:00
|
|
|
}
|
2023-07-10 17:22:03 +00:00
|
|
|
latestBlockBuiltStore := builder.NewLatestBlockBuiltStore()
|
2020-03-20 10:06:14 +00:00
|
|
|
|
2022-07-26 03:44:47 +00:00
|
|
|
if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error {
|
|
|
|
if err = stagedsync.UpdateMetrics(tx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
config.Prune, err = prune.EnsureNotChanged(tx, config.Prune)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-12 09:13:14 +00:00
|
|
|
|
2022-12-19 08:38:54 +00:00
|
|
|
config.HistoryV3, err = kvcfg.HistoryV3.WriteOnce(tx, config.HistoryV3)
|
2022-08-12 14:45:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-12 09:13:14 +00:00
|
|
|
|
2023-05-26 10:12:33 +00:00
|
|
|
isCorrectSync, useSnapshots, err := snap.EnsureNotChanged(tx, config.Snapshot)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// if we are in the incorrect syncmode then we change it to the appropriate one
|
|
|
|
if !isCorrectSync {
|
|
|
|
config.Sync.UseSnapshots = useSnapshots
|
|
|
|
config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(config.Genesis.Config.ChainName) && useSnapshots
|
|
|
|
}
|
2022-07-26 03:44:47 +00:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-01-24 11:41:31 +00:00
|
|
|
if !config.Sync.UseSnapshots {
|
|
|
|
if err := downloader.CreateProhibitNewDownloadsFile(dirs.Snap); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2022-07-26 03:44:47 +00:00
|
|
|
|
2021-08-17 08:52:55 +00:00
|
|
|
ctx, ctxCancel := context.WithCancel(context.Background())
|
2022-03-08 03:43:14 +00:00
|
|
|
|
|
|
|
// kv_remote architecture does blocks on stream.Send - means current architecture require unlimited amount of txs to provide good throughput
|
2021-05-15 11:35:28 +00:00
|
|
|
backend := &Ethereum{
|
2021-12-14 10:13:17 +00:00
|
|
|
sentryCtx: ctx,
|
|
|
|
sentryCancel: ctxCancel,
|
2021-05-20 05:33:55 +00:00
|
|
|
config: config,
|
2021-09-08 05:31:51 +00:00
|
|
|
chainDB: chainKv,
|
2021-05-20 05:33:55 +00:00
|
|
|
networkID: config.NetworkID,
|
|
|
|
etherbase: config.Miner.Etherbase,
|
|
|
|
waitForStageLoopStop: make(chan struct{}),
|
|
|
|
waitForMiningStop: make(chan struct{}),
|
2022-10-05 04:42:38 +00:00
|
|
|
notifications: &shards.Notifications{
|
|
|
|
Events: shards.NewEvents(),
|
|
|
|
Accumulator: shards.NewAccumulator(),
|
2021-07-10 09:43:58 +00:00
|
|
|
},
|
2023-05-09 17:11:31 +00:00
|
|
|
logger: logger,
|
2015-07-10 12:29:40 +00:00
|
|
|
}
|
2023-05-26 10:12:33 +00:00
|
|
|
|
2023-05-23 07:49:17 +00:00
|
|
|
var chainConfig *chain.Config
|
|
|
|
var genesis *types.Block
|
2023-10-18 07:24:09 +00:00
|
|
|
if err := backend.chainDB.Update(context.Background(), func(tx kv.RwTx) error {
|
2023-05-23 07:49:17 +00:00
|
|
|
h, err := rawdb.ReadCanonicalHash(tx, 0)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
genesisSpec := config.Genesis
|
|
|
|
if h != (libcommon.Hash{}) { // fallback to db content
|
|
|
|
genesisSpec = nil
|
|
|
|
}
|
|
|
|
var genesisErr error
|
2023-08-03 14:05:35 +00:00
|
|
|
chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverrideCancunTime, tmpdir, logger)
|
2023-05-23 07:49:17 +00:00
|
|
|
if _, ok := genesisErr.(*chain.ConfigCompatError); genesisErr != nil && !ok {
|
|
|
|
return genesisErr
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
backend.chainConfig = chainConfig
|
|
|
|
backend.genesisBlock = genesis
|
|
|
|
backend.genesisHash = genesis.Hash()
|
|
|
|
|
|
|
|
logger.Info("Initialised chain configuration", "config", chainConfig, "genesis", genesis.Hash())
|
|
|
|
|
2023-12-27 22:05:09 +00:00
|
|
|
snapshotVersion := snapcfg.KnownCfg(chainConfig.ChainName, 0).Version
|
|
|
|
|
2023-10-25 09:16:43 +00:00
|
|
|
// Check if we have an already initialized chain and fall back to
|
|
|
|
// that if so. Otherwise we need to generate a new genesis spec.
|
2024-01-09 14:48:01 +00:00
|
|
|
blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, snapshotVersion, config.Snapshot, config.HistoryV3, chainConfig.Bor != nil, logger)
|
2023-10-25 09:16:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
backend.agg, backend.blockSnapshots, backend.blockReader, backend.blockWriter = agg, allSnapshots, blockReader, blockWriter
|
|
|
|
|
|
|
|
if config.HistoryV3 {
|
|
|
|
backend.chainDB, err = temporal.New(backend.chainDB, agg, systemcontracts.SystemContractCodeLookup[config.Genesis.Config.ChainName])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
chainKv = backend.chainDB //nolint
|
|
|
|
}
|
|
|
|
|
2023-05-23 07:49:17 +00:00
|
|
|
if err := backend.setUpSnapDownloader(ctx, config.Downloader); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-01-09 14:48:01 +00:00
|
|
|
kvRPC := remotedbserver.NewKvServer(ctx, backend.chainDB, allSnapshots, allBorSnapshots, agg, logger)
|
2022-07-26 03:44:47 +00:00
|
|
|
backend.notifications.StateChangesConsumer = kvRPC
|
2023-01-17 06:20:31 +00:00
|
|
|
backend.kvRPC = kvRPC
|
2022-07-26 03:44:47 +00:00
|
|
|
|
2021-05-15 11:35:28 +00:00
|
|
|
backend.gasPrice, _ = uint256.FromBig(config.Miner.GasPrice)
|
2016-03-01 22:32:43 +00:00
|
|
|
|
2023-11-30 11:45:02 +00:00
|
|
|
if config.SilkwormExecution || config.SilkwormRpcDaemon || config.SilkwormSentry {
|
|
|
|
backend.silkworm, err = silkworm.New(config.Dirs.DataDir)
|
2023-11-02 01:35:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-10 05:17:44 +00:00
|
|
|
var sentries []direct.SentryClient
|
2021-05-28 15:32:34 +00:00
|
|
|
if len(stack.Config().P2P.SentryAddr) > 0 {
|
|
|
|
for _, addr := range stack.Config().P2P.SentryAddr {
|
2023-11-02 01:35:13 +00:00
|
|
|
sentryClient, err := sentry_multi_client.GrpcClient(backend.sentryCtx, addr)
|
2021-05-28 15:32:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2021-04-30 15:09:03 +00:00
|
|
|
}
|
2022-05-10 05:17:44 +00:00
|
|
|
sentries = append(sentries, sentryClient)
|
2021-04-25 04:20:50 +00:00
|
|
|
}
|
2023-11-02 01:35:13 +00:00
|
|
|
} else if config.SilkwormSentry {
|
|
|
|
apiPort := 53774
|
|
|
|
apiAddr := fmt.Sprintf("127.0.0.1:%d", apiPort)
|
|
|
|
p2pConfig := stack.Config().P2P
|
|
|
|
|
|
|
|
collectNodeURLs := func(nodes []*enode.Node) []string {
|
|
|
|
var urls []string
|
|
|
|
for _, n := range nodes {
|
|
|
|
urls = append(urls, n.URLv4())
|
|
|
|
}
|
|
|
|
return urls
|
|
|
|
}
|
|
|
|
|
|
|
|
settings := silkworm.SentrySettings{
|
|
|
|
ClientId: p2pConfig.Name,
|
|
|
|
ApiPort: apiPort,
|
|
|
|
Port: p2pConfig.ListenPort(),
|
|
|
|
Nat: p2pConfig.NATSpec,
|
|
|
|
NetworkId: config.NetworkID,
|
|
|
|
NodeKey: crypto.FromECDSA(p2pConfig.PrivateKey),
|
|
|
|
StaticPeers: collectNodeURLs(p2pConfig.StaticNodes),
|
|
|
|
Bootnodes: collectNodeURLs(p2pConfig.BootstrapNodes),
|
|
|
|
NoDiscover: p2pConfig.NoDiscovery,
|
|
|
|
MaxPeers: p2pConfig.MaxPeers,
|
|
|
|
}
|
|
|
|
|
2023-11-30 11:45:02 +00:00
|
|
|
silkwormSentryService := silkworm.NewSentryService(backend.silkworm, settings)
|
2023-11-02 01:35:13 +00:00
|
|
|
backend.silkwormSentryService = &silkwormSentryService
|
|
|
|
|
|
|
|
sentryClient, err := sentry_multi_client.GrpcClient(backend.sentryCtx, apiAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
sentries = append(sentries, sentryClient)
|
2021-05-28 15:32:34 +00:00
|
|
|
} else {
|
2021-05-30 02:53:30 +00:00
|
|
|
var readNodeInfo = func() *eth.NodeInfo {
|
|
|
|
var res *eth.NodeInfo
|
2021-09-08 05:31:51 +00:00
|
|
|
_ = backend.chainDB.View(context.Background(), func(tx kv.Tx) error {
|
2021-05-30 02:53:30 +00:00
|
|
|
res = eth.ReadNodeInfo(tx, backend.chainConfig, backend.genesisHash, backend.networkID)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2023-02-02 08:26:30 +00:00
|
|
|
discovery := func() enode.Iterator {
|
|
|
|
d, err := setupDiscovery(backend.config.EthDiscoveryURLs)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return d
|
2021-05-30 02:53:30 +00:00
|
|
|
}
|
|
|
|
|
2022-11-15 09:41:56 +00:00
|
|
|
refCfg := stack.Config().P2P
|
|
|
|
listenHost, listenPort, err := splitAddrIntoHostAndPort(refCfg.ListenAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-12-12 13:25:47 +00:00
|
|
|
|
|
|
|
var pi int // points to next port to be picked from refCfg.AllowedPorts
|
2022-11-15 09:41:56 +00:00
|
|
|
for _, protocol := range refCfg.ProtocolVersion {
|
|
|
|
cfg := refCfg
|
|
|
|
cfg.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, eth.ProtocolToString[protocol])
|
2022-12-12 13:25:47 +00:00
|
|
|
|
|
|
|
// pick port from allowed list
|
|
|
|
var picked bool
|
|
|
|
for ; pi < len(refCfg.AllowedPorts) && !picked; pi++ {
|
|
|
|
pc := int(refCfg.AllowedPorts[pi])
|
2023-05-13 19:58:29 +00:00
|
|
|
if pc == 0 {
|
|
|
|
// For ephemeral ports probing to see if the port is taken does not
|
|
|
|
// make sense.
|
|
|
|
picked = true
|
|
|
|
break
|
|
|
|
}
|
2022-12-12 13:25:47 +00:00
|
|
|
if !checkPortIsFree(fmt.Sprintf("%s:%d", listenHost, pc)) {
|
2023-05-07 06:28:15 +00:00
|
|
|
logger.Warn("bind protocol to port has failed: port is busy", "protocols", fmt.Sprintf("eth/%d", refCfg.ProtocolVersion), "port", pc)
|
2022-12-12 13:25:47 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if listenPort != pc {
|
|
|
|
listenPort = pc
|
|
|
|
}
|
|
|
|
pi++
|
|
|
|
picked = true
|
2022-12-16 16:59:22 +00:00
|
|
|
break
|
2022-12-12 13:25:47 +00:00
|
|
|
}
|
|
|
|
if !picked {
|
|
|
|
return nil, fmt.Errorf("run out of allowed ports for p2p eth protocols %v. Extend allowed port list via --p2p.allowed-ports", cfg.AllowedPorts)
|
|
|
|
}
|
|
|
|
|
2022-11-15 09:41:56 +00:00
|
|
|
cfg.ListenAddr = fmt.Sprintf("%s:%d", listenHost, listenPort)
|
|
|
|
|
2023-05-07 06:28:15 +00:00
|
|
|
server := sentry.NewGrpcServer(backend.sentryCtx, discovery, readNodeInfo, &cfg, protocol, logger)
|
2022-11-15 09:41:56 +00:00
|
|
|
backend.sentryServers = append(backend.sentryServers, server)
|
|
|
|
sentries = append(sentries, direct.NewSentryClientDirect(protocol, server))
|
|
|
|
}
|
2021-11-07 19:25:37 +00:00
|
|
|
|
2021-07-04 07:51:08 +00:00
|
|
|
go func() {
|
2023-09-26 06:29:19 +00:00
|
|
|
logEvery := time.NewTicker(180 * time.Second)
|
2021-07-04 07:51:08 +00:00
|
|
|
defer logEvery.Stop()
|
|
|
|
|
|
|
|
var logItems []interface{}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2021-12-14 10:13:17 +00:00
|
|
|
case <-backend.sentryCtx.Done():
|
2021-07-04 07:51:08 +00:00
|
|
|
return
|
|
|
|
case <-logEvery.C:
|
|
|
|
logItems = logItems[:0]
|
2023-02-01 22:21:31 +00:00
|
|
|
peerCountMap := map[uint]int{}
|
2021-07-04 07:51:08 +00:00
|
|
|
for _, srv := range backend.sentryServers {
|
2023-02-01 22:21:31 +00:00
|
|
|
counts := srv.SimplePeerCount()
|
|
|
|
for protocol, count := range counts {
|
|
|
|
peerCountMap[protocol] += count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for protocol, count := range peerCountMap {
|
|
|
|
logItems = append(logItems, eth.ProtocolToString[protocol], strconv.Itoa(count))
|
2021-07-04 07:51:08 +00:00
|
|
|
}
|
2023-05-07 06:28:15 +00:00
|
|
|
logger.Info("[p2p] GoodPeers", logItems...)
|
2021-07-04 07:51:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2021-05-28 15:32:34 +00:00
|
|
|
}
|
2022-01-04 08:46:22 +00:00
|
|
|
|
2023-05-23 07:49:17 +00:00
|
|
|
var currentBlock *types.Block
|
|
|
|
if err := chainKv.View(context.Background(), func(tx kv.Tx) error {
|
2023-05-26 10:12:33 +00:00
|
|
|
currentBlock, err = blockReader.CurrentBlock(tx)
|
2023-05-23 07:49:17 +00:00
|
|
|
return err
|
|
|
|
}); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2022-08-12 19:54:20 +00:00
|
|
|
currentBlockNumber := uint64(0)
|
|
|
|
if currentBlock != nil {
|
|
|
|
currentBlockNumber = currentBlock.NumberU64()
|
|
|
|
}
|
2022-08-13 09:09:52 +00:00
|
|
|
|
2023-05-09 17:11:31 +00:00
|
|
|
logger.Info("Initialising Ethereum protocol", "network", config.NetworkID)
|
2022-08-13 09:09:52 +00:00
|
|
|
var consensusConfig interface{}
|
|
|
|
|
|
|
|
if chainConfig.Clique != nil {
|
|
|
|
consensusConfig = &config.Clique
|
|
|
|
} else if chainConfig.Aura != nil {
|
|
|
|
consensusConfig = &config.Aura
|
|
|
|
} else if chainConfig.Bor != nil {
|
2024-01-05 13:03:19 +00:00
|
|
|
consensusConfig = chainConfig.Bor
|
2022-08-13 09:09:52 +00:00
|
|
|
} else {
|
|
|
|
consensusConfig = &config.Ethash
|
|
|
|
}
|
2024-01-16 08:23:02 +00:00
|
|
|
var heimdallClient heimdall.HeimdallClient
|
2023-09-18 17:05:33 +00:00
|
|
|
if chainConfig.Bor != nil {
|
|
|
|
if !config.WithoutHeimdall {
|
2024-01-15 15:49:46 +00:00
|
|
|
heimdallClient = heimdall.NewHeimdallClient(config.HeimdallURL, logger)
|
2023-08-18 16:10:35 +00:00
|
|
|
}
|
2023-09-18 17:05:33 +00:00
|
|
|
|
|
|
|
flags.Milestone = config.WithHeimdallMilestones
|
2023-08-18 16:10:35 +00:00
|
|
|
}
|
2023-09-18 17:05:33 +00:00
|
|
|
|
2023-10-12 07:11:46 +00:00
|
|
|
backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger)
|
2023-08-03 01:39:47 +00:00
|
|
|
|
2024-01-09 01:26:26 +00:00
|
|
|
inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
|
2023-08-03 01:39:47 +00:00
|
|
|
notifications *shards.Notifications) error {
|
2023-09-28 07:13:23 +00:00
|
|
|
terseLogger := log.New()
|
|
|
|
terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler))
|
2023-08-03 01:39:47 +00:00
|
|
|
// Needs its own notifications to not update RPC daemon and txpool about pending blocks
|
2023-09-17 09:14:36 +00:00
|
|
|
stateSync := stages2.NewInMemoryExecution(backend.sentryCtx, backend.chainDB, config, backend.sentriesClient,
|
2023-10-05 02:27:37 +00:00
|
|
|
dirs, notifications, blockReader, blockWriter, backend.agg, backend.silkworm, terseLogger)
|
2024-01-09 01:26:26 +00:00
|
|
|
chainReader := stagedsync.NewChainReaderImpl(chainConfig, txc.Tx, blockReader, logger)
|
2023-08-03 01:39:47 +00:00
|
|
|
// We start the mining step
|
2024-01-09 01:26:26 +00:00
|
|
|
if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, backend.blockWriter, stateSync, backend.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil {
|
2023-08-03 01:39:47 +00:00
|
|
|
logger.Warn("Could not validate block", "err", err)
|
|
|
|
return err
|
|
|
|
}
|
2024-01-09 01:26:26 +00:00
|
|
|
progress, err := stages.GetStageProgress(txc.Tx, stages.IntermediateHashes)
|
2023-08-03 01:39:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if progress < header.Number.Uint64() {
|
|
|
|
return fmt.Errorf("unsuccessful execution, progress %d < expected %d", progress, header.Number.Uint64())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2023-07-23 12:51:26 +00:00
|
|
|
backend.forkValidator = engine_helpers.NewForkValidator(ctx, currentBlockNumber, inMemoryExecution, tmpdir, backend.blockReader)
|
2022-06-07 05:20:49 +00:00
|
|
|
|
2023-08-23 12:28:39 +00:00
|
|
|
// limit "new block" broadcasts to at most 10 random peers at time
|
|
|
|
maxBlockBroadcastPeers := func(header *types.Header) uint { return 10 }
|
|
|
|
|
|
|
|
// unlimited "new block" broadcasts to all peers for blocks announced by Bor validators
|
|
|
|
if borEngine, ok := backend.engine.(*bor.Bor); ok {
|
|
|
|
defaultValue := maxBlockBroadcastPeers(nil)
|
|
|
|
maxBlockBroadcastPeers = func(header *types.Header) uint {
|
|
|
|
isValidator, err := borEngine.IsValidator(header)
|
|
|
|
if err != nil {
|
2023-09-18 17:05:33 +00:00
|
|
|
logger.Warn("maxBlockBroadcastPeers: borEngine.IsValidator has failed", "err", err)
|
2023-08-23 12:28:39 +00:00
|
|
|
return defaultValue
|
|
|
|
}
|
|
|
|
if isValidator {
|
|
|
|
// 0 means send to all
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return defaultValue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-02 01:35:13 +00:00
|
|
|
backend.sentriesClient, err = sentry_multi_client.NewMultiClient(
|
2022-06-01 21:48:24 +00:00
|
|
|
chainKv,
|
|
|
|
stack.Config().NodeName(),
|
|
|
|
chainConfig,
|
|
|
|
genesis.Hash(),
|
2023-08-21 13:35:13 +00:00
|
|
|
genesis.Time(),
|
2022-06-01 21:48:24 +00:00
|
|
|
backend.engine,
|
|
|
|
backend.config.NetworkID,
|
|
|
|
sentries,
|
|
|
|
config.Sync,
|
|
|
|
blockReader,
|
2023-07-12 22:31:38 +00:00
|
|
|
blockBufferSize,
|
2022-06-01 21:48:24 +00:00
|
|
|
stack.Config().SentryLogPeerInfo,
|
2022-08-12 19:54:20 +00:00
|
|
|
backend.forkValidator,
|
2023-08-23 12:28:39 +00:00
|
|
|
maxBlockBroadcastPeers,
|
2023-05-09 17:11:31 +00:00
|
|
|
logger,
|
2022-06-01 21:48:24 +00:00
|
|
|
)
|
2021-05-28 15:32:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-04-20 14:41:46 +00:00
|
|
|
|
2023-11-23 05:00:41 +00:00
|
|
|
config.TxPool.NoGossip = config.DisableTxPoolGossip
|
2021-09-08 05:31:51 +00:00
|
|
|
var miningRPC txpool_proto.MiningServer
|
2022-11-07 13:04:31 +00:00
|
|
|
stateDiffClient := direct.NewStateDiffClientDirect(kvRPC)
|
2022-05-26 05:27:44 +00:00
|
|
|
if config.DeprecatedTxPool.Disable {
|
2023-06-15 06:11:51 +00:00
|
|
|
backend.txPoolGrpcServer = &txpool.GrpcDisabled{}
|
2022-02-12 05:24:19 +00:00
|
|
|
} else {
|
2021-09-18 13:58:23 +00:00
|
|
|
//cacheConfig := kvcache.DefaultCoherentCacheConfig
|
|
|
|
//cacheConfig.MetricsLabel = "txpool"
|
2021-09-08 05:31:51 +00:00
|
|
|
|
2023-06-15 06:11:51 +00:00
|
|
|
backend.newTxs = make(chan types2.Announcements, 1024)
|
2021-09-08 05:31:51 +00:00
|
|
|
//defer close(newTxs)
|
2023-06-15 06:11:51 +00:00
|
|
|
backend.txPoolDB, backend.txPool, backend.txPoolFetch, backend.txPoolSend, backend.txPoolGrpcServer, err = txpooluitl.AllComponents(
|
2024-01-13 10:33:34 +00:00
|
|
|
ctx, config.TxPool, kvcache.NewDummy(), backend.newTxs, backend.chainDB, backend.sentriesClient.Sentries(), stateDiffClient, misc.Eip1559FeeCalculator, logger,
|
2021-09-13 07:58:25 +00:00
|
|
|
)
|
2021-09-08 05:31:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2021-09-02 10:04:30 +00:00
|
|
|
|
2021-09-08 05:31:51 +00:00
|
|
|
backend.notifyMiningAboutNewTxs = make(chan struct{}, 1)
|
|
|
|
backend.miningSealingQuit = make(chan struct{})
|
|
|
|
backend.pendingBlocks = make(chan *types.Block, 1)
|
|
|
|
backend.minedBlocks = make(chan *types.Block, 1)
|
2021-09-02 10:04:30 +00:00
|
|
|
|
2021-09-08 05:31:51 +00:00
|
|
|
miner := stagedsync.NewMiningState(&config.Miner)
|
|
|
|
backend.pendingBlocks = miner.PendingResultCh
|
2021-06-29 10:00:22 +00:00
|
|
|
|
2023-11-04 17:44:34 +00:00
|
|
|
var (
|
|
|
|
snapDb kv.RwDB
|
|
|
|
recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot]
|
|
|
|
signatures *lru.ARCCache[libcommon.Hash, libcommon.Address]
|
|
|
|
)
|
|
|
|
if bor, ok := backend.engine.(*bor.Bor); ok {
|
|
|
|
snapDb = bor.DB
|
|
|
|
recents = bor.Recents
|
|
|
|
signatures = bor.Signatures
|
|
|
|
}
|
2022-01-04 17:37:36 +00:00
|
|
|
// proof-of-work mining
|
2021-09-08 05:31:51 +00:00
|
|
|
mining := stagedsync.New(
|
2023-12-27 22:05:09 +00:00
|
|
|
config.Sync,
|
2021-12-14 10:13:17 +00:00
|
|
|
stagedsync.MiningStages(backend.sentryCtx,
|
2023-06-15 06:11:51 +00:00
|
|
|
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPoolDB, nil, tmpdir, backend.blockReader),
|
2023-12-27 22:05:09 +00:00
|
|
|
stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures),
|
2023-06-15 06:11:51 +00:00
|
|
|
stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool, backend.txPoolDB, blockReader),
|
2023-05-24 11:34:36 +00:00
|
|
|
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3),
|
2022-09-26 03:54:42 +00:00
|
|
|
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg),
|
2023-07-10 17:22:03 +00:00
|
|
|
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore),
|
2023-05-07 06:28:15 +00:00
|
|
|
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
|
|
|
|
logger)
|
2021-09-02 10:04:30 +00:00
|
|
|
|
2021-09-08 05:31:51 +00:00
|
|
|
var ethashApi *ethash.API
|
|
|
|
if casted, ok := backend.engine.(*ethash.Ethash); ok {
|
|
|
|
ethashApi = casted.APIs(nil)[1].Service.(*ethash.API)
|
|
|
|
}
|
2022-06-13 13:43:09 +00:00
|
|
|
|
2022-01-04 17:37:36 +00:00
|
|
|
// proof-of-stake mining
|
2023-02-14 15:05:27 +00:00
|
|
|
assembleBlockPOS := func(param *core.BlockBuilderParameters, interrupt *int32) (*types.BlockWithReceipts, error) {
|
2022-06-09 11:16:11 +00:00
|
|
|
miningStatePos := stagedsync.NewProposingState(&config.Miner)
|
2022-02-23 20:27:38 +00:00
|
|
|
miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient
|
2022-01-04 17:37:36 +00:00
|
|
|
proposingSync := stagedsync.New(
|
2023-12-27 22:05:09 +00:00
|
|
|
config.Sync,
|
2022-01-04 17:37:36 +00:00
|
|
|
stagedsync.MiningStages(backend.sentryCtx,
|
2023-06-15 06:11:51 +00:00
|
|
|
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPoolDB, param, tmpdir, backend.blockReader),
|
2023-12-27 22:05:09 +00:00
|
|
|
stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures),
|
2023-06-15 06:11:51 +00:00
|
|
|
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool, backend.txPoolDB, blockReader),
|
2023-05-24 11:34:36 +00:00
|
|
|
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3),
|
2022-09-26 03:54:42 +00:00
|
|
|
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg),
|
2023-07-10 17:22:03 +00:00
|
|
|
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore),
|
2023-05-07 06:28:15 +00:00
|
|
|
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
|
|
|
|
logger)
|
2022-01-04 17:37:36 +00:00
|
|
|
// We start the mining step
|
2023-12-31 10:10:08 +00:00
|
|
|
if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir, logger); err != nil {
|
2022-01-04 17:37:36 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
block := <-miningStatePos.MiningResultPOSCh
|
|
|
|
return block, nil
|
|
|
|
}
|
2022-03-22 16:49:12 +00:00
|
|
|
|
2022-01-04 17:37:36 +00:00
|
|
|
// Initialize ethbackend
|
2023-07-10 17:22:03 +00:00
|
|
|
ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, blockReader, logger, latestBlockBuiltStore)
|
2023-07-06 16:09:52 +00:00
|
|
|
// intiialize engine backend
|
2023-07-24 16:04:36 +00:00
|
|
|
|
2023-12-18 10:46:50 +00:00
|
|
|
blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, backend.chainDB, backend.chainConfig, backend.notifications.Events, logger)
|
2023-07-28 22:22:38 +00:00
|
|
|
|
2023-05-20 13:48:16 +00:00
|
|
|
miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi, logger)
|
2022-06-13 13:43:09 +00:00
|
|
|
|
2022-11-11 22:22:47 +00:00
|
|
|
var creds credentials.TransportCredentials
|
|
|
|
if stack.Config().PrivateApiAddr != "" {
|
|
|
|
if stack.Config().TLSConnection {
|
|
|
|
creds, err = grpcutil.TLS(stack.Config().TLSCACert, stack.Config().TLSCertFile, stack.Config().TLSKeyFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
backend.privateAPI, err = privateapi.StartGrpc(
|
|
|
|
kvRPC,
|
|
|
|
ethBackendRPC,
|
2023-06-15 06:11:51 +00:00
|
|
|
backend.txPoolGrpcServer,
|
2022-11-11 22:22:47 +00:00
|
|
|
miningRPC,
|
|
|
|
stack.Config().PrivateApiAddr,
|
|
|
|
stack.Config().PrivateApiRateLimit,
|
|
|
|
creds,
|
2023-05-17 16:36:15 +00:00
|
|
|
stack.Config().HealthCheck,
|
|
|
|
logger)
|
2022-11-11 22:22:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("private api: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-01 09:02:24 +00:00
|
|
|
if currentBlock == nil {
|
|
|
|
currentBlock = genesis
|
|
|
|
}
|
|
|
|
// We start the transaction pool on startup, for a couple of reasons:
|
|
|
|
// 1) Hive tests requires us to do so and starting it from eth_sendRawTransaction is not viable as we have not enough data
|
|
|
|
// to initialize it properly.
|
|
|
|
// 2) we cannot propose for block 1 regardless.
|
|
|
|
|
2022-05-26 05:27:44 +00:00
|
|
|
if !config.DeprecatedTxPool.Disable {
|
2023-06-15 06:11:51 +00:00
|
|
|
backend.txPoolFetch.ConnectCore()
|
|
|
|
backend.txPoolFetch.ConnectSentries()
|
|
|
|
var newTxsBroadcaster *txpool.NewSlotsStreams
|
|
|
|
if casted, ok := backend.txPoolGrpcServer.(*txpool.GrpcServer); ok {
|
2022-02-12 05:24:19 +00:00
|
|
|
newTxsBroadcaster = casted.NewSlotsStreams
|
|
|
|
}
|
2023-06-15 06:11:51 +00:00
|
|
|
go txpool.MainLoop(backend.sentryCtx,
|
2024-01-13 10:33:34 +00:00
|
|
|
backend.txPoolDB, backend.txPool, backend.newTxs, backend.txPoolSend, newTxsBroadcaster,
|
2021-11-22 18:38:51 +00:00
|
|
|
func() {
|
|
|
|
select {
|
|
|
|
case backend.notifyMiningAboutNewTxs <- struct{}{}:
|
|
|
|
default:
|
2021-05-17 12:15:19 +00:00
|
|
|
}
|
2021-11-22 18:38:51 +00:00
|
|
|
})
|
2021-09-08 05:31:51 +00:00
|
|
|
}
|
2024-01-13 10:33:34 +00:00
|
|
|
|
2021-09-08 05:31:51 +00:00
|
|
|
go func() {
|
|
|
|
defer debug.LogPanic()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case b := <-backend.minedBlocks:
|
eth, consensus/bor: fixes and improvements related to mining (#6051)
In context of https://github.com/ledgerwatch/erigon/issues/5694, this PR
adds some fixes and improvement in the mining flow. Also, a relevant
change in txpool (present in erigon-lib) is made here:
https://github.com/ledgerwatch/erigon-lib/pull/737
#### Changes in triggering mining in `startMining()`
The mining module didn't honour the block time as a simple 3 second
timer and a notifier from txpool was used to trigger mining. This would
cause inconsistencies, at least with the bor consensus. Hence, a geth
like approach is used instead for simplicity. A new head channel
subscription is added in the `startMining()` loop which would notify the
addition of new block. Hence, this would make sure that the block time
is being honoured. Moreover, the fixed 3 second timer is replaced by the
`miner.recommit` value set using flags.
#### Changes in the arrangement of calls made post mining
When all the mining stages are completed, erigon writes all the data in
a cache. It then processes the block through all the stages as it would
process a block received from P2P. In this case, some of the stages
aren't really required. Like the block header and body download stage is
not required as the block was mined locally. Even execution stage is not
required as it already went through it in the mining stages.
Now, we encountered an issue where the chain was halted and kept mining
the same block again and again (liveness issue). The root cause is
because of an error in a stage of it's parent block. This stage turns
out to be the 4th stage which is "Block body download" stage. This stage
tries to download the block body from peers using the headers. As, we
mined this block locally we don't really need to download anything (or
process anything again). Hence, it reaches out to the cache which we
store for the block body.
Interestingly that cache turned out to be empty for some blocks. This
was because post mining, before adding block header and body to a cache,
we call the broadcast method which starts the staged sync. So,
technically it’s a bit uncertain at any stage if the block header and
body has been written or not.(see
[this](https://github.com/ledgerwatch/erigon/blob/devel/eth/backend.go#L553-L572)).
To achieve complete certainty, we rearranged the calls with the write to
cache being called first and broadcast next. This pretty much solves the
issue as now we’re sure that we’d always have a block body in the cache
when we reach the body download stage.
#### Misc changes
This PR also adds some logs in bor consensus.
2022-11-17 23:39:16 +00:00
|
|
|
// Add mined header and block body before broadcast. This is because the broadcast call
|
|
|
|
// will trigger the staged sync which will require headers and blocks to be available
|
|
|
|
// in their respective cache in the download stage. If not found, it would cause a
|
|
|
|
// liveness issue for the chain.
|
|
|
|
if err := backend.sentriesClient.Hd.AddMinedHeader(b.Header()); err != nil {
|
2023-05-07 06:28:15 +00:00
|
|
|
logger.Error("add mined block to header downloader", "err", err)
|
eth, consensus/bor: fixes and improvements related to mining (#6051)
In context of https://github.com/ledgerwatch/erigon/issues/5694, this PR
adds some fixes and improvement in the mining flow. Also, a relevant
change in txpool (present in erigon-lib) is made here:
https://github.com/ledgerwatch/erigon-lib/pull/737
#### Changes in triggering mining in `startMining()`
The mining module didn't honour the block time as a simple 3 second
timer and a notifier from txpool was used to trigger mining. This would
cause inconsistencies, at least with the bor consensus. Hence, a geth
like approach is used instead for simplicity. A new head channel
subscription is added in the `startMining()` loop which would notify the
addition of new block. Hence, this would make sure that the block time
is being honoured. Moreover, the fixed 3 second timer is replaced by the
`miner.recommit` value set using flags.
#### Changes in the arrangement of calls made post mining
When all the mining stages are completed, erigon writes all the data in
a cache. It then processes the block through all the stages as it would
process a block received from P2P. In this case, some of the stages
aren't really required. Like the block header and body download stage is
not required as the block was mined locally. Even execution stage is not
required as it already went through it in the mining stages.
Now, we encountered an issue where the chain was halted and kept mining
the same block again and again (liveness issue). The root cause is
because of an error in a stage of it's parent block. This stage turns
out to be the 4th stage which is "Block body download" stage. This stage
tries to download the block body from peers using the headers. As, we
mined this block locally we don't really need to download anything (or
process anything again). Hence, it reaches out to the cache which we
store for the block body.
Interestingly that cache turned out to be empty for some blocks. This
was because post mining, before adding block header and body to a cache,
we call the broadcast method which starts the staged sync. So,
technically it’s a bit uncertain at any stage if the block header and
body has been written or not.(see
[this](https://github.com/ledgerwatch/erigon/blob/devel/eth/backend.go#L553-L572)).
To achieve complete certainty, we rearranged the calls with the write to
cache being called first and broadcast next. This pretty much solves the
issue as now we’re sure that we’d always have a block body in the cache
when we reach the body download stage.
#### Misc changes
This PR also adds some logs in bor consensus.
2022-11-17 23:39:16 +00:00
|
|
|
}
|
2023-01-06 12:43:46 +00:00
|
|
|
backend.sentriesClient.Bd.AddToPrefetch(b.Header(), b.RawBody())
|
eth, consensus/bor: fixes and improvements related to mining (#6051)
In context of https://github.com/ledgerwatch/erigon/issues/5694, this PR
adds some fixes and improvement in the mining flow. Also, a relevant
change in txpool (present in erigon-lib) is made here:
https://github.com/ledgerwatch/erigon-lib/pull/737
#### Changes in triggering mining in `startMining()`
The mining module didn't honour the block time as a simple 3 second
timer and a notifier from txpool was used to trigger mining. This would
cause inconsistencies, at least with the bor consensus. Hence, a geth
like approach is used instead for simplicity. A new head channel
subscription is added in the `startMining()` loop which would notify the
addition of new block. Hence, this would make sure that the block time
is being honoured. Moreover, the fixed 3 second timer is replaced by the
`miner.recommit` value set using flags.
#### Changes in the arrangement of calls made post mining
When all the mining stages are completed, erigon writes all the data in
a cache. It then processes the block through all the stages as it would
process a block received from P2P. In this case, some of the stages
aren't really required. Like the block header and body download stage is
not required as the block was mined locally. Even execution stage is not
required as it already went through it in the mining stages.
Now, we encountered an issue where the chain was halted and kept mining
the same block again and again (liveness issue). The root cause is
because of an error in a stage of it's parent block. This stage turns
out to be the 4th stage which is "Block body download" stage. This stage
tries to download the block body from peers using the headers. As, we
mined this block locally we don't really need to download anything (or
process anything again). Hence, it reaches out to the cache which we
store for the block body.
Interestingly that cache turned out to be empty for some blocks. This
was because post mining, before adding block header and body to a cache,
we call the broadcast method which starts the staged sync. So,
technically it’s a bit uncertain at any stage if the block header and
body has been written or not.(see
[this](https://github.com/ledgerwatch/erigon/blob/devel/eth/backend.go#L553-L572)).
To achieve complete certainty, we rearranged the calls with the write to
cache being called first and broadcast next. This pretty much solves the
issue as now we’re sure that we’d always have a block body in the cache
when we reach the body download stage.
#### Misc changes
This PR also adds some logs in bor consensus.
2022-11-17 23:39:16 +00:00
|
|
|
|
2021-09-08 05:31:51 +00:00
|
|
|
//p2p
|
2022-05-10 05:17:44 +00:00
|
|
|
//backend.sentriesClient.BroadcastNewBlock(context.Background(), b, b.Difficulty())
|
2021-09-08 05:31:51 +00:00
|
|
|
//rpcdaemon
|
|
|
|
if err := miningRPC.(*privateapi.MiningServer).BroadcastMinedBlock(b); err != nil {
|
2023-05-07 06:28:15 +00:00
|
|
|
logger.Error("txpool rpc mined block broadcast", "err", err)
|
2021-09-08 05:31:51 +00:00
|
|
|
}
|
2023-05-07 06:28:15 +00:00
|
|
|
logger.Trace("BroadcastMinedBlock successful", "number", b.Number(), "GasUsed", b.GasUsed(), "txn count", b.Transactions().Len())
|
2022-08-11 17:55:59 +00:00
|
|
|
backend.sentriesClient.PropagateNewBlockHashes(ctx, []headerdownload.Announce{
|
|
|
|
{
|
|
|
|
Number: b.NumberU64(),
|
|
|
|
Hash: b.Hash(),
|
|
|
|
},
|
|
|
|
})
|
2021-09-02 10:04:30 +00:00
|
|
|
|
2021-09-08 05:31:51 +00:00
|
|
|
case b := <-backend.pendingBlocks:
|
|
|
|
if err := miningRPC.(*privateapi.MiningServer).BroadcastPendingBlock(b); err != nil {
|
2023-05-07 06:28:15 +00:00
|
|
|
logger.Error("txpool rpc pending block broadcast", "err", err)
|
2021-09-08 05:31:51 +00:00
|
|
|
}
|
2022-06-29 15:39:12 +00:00
|
|
|
case <-backend.sentriesClient.Hd.QuitPoWMining:
|
2021-09-08 05:31:51 +00:00
|
|
|
return
|
|
|
|
}
|
2021-05-17 12:15:19 +00:00
|
|
|
}
|
2021-09-08 05:31:51 +00:00
|
|
|
}()
|
|
|
|
|
2024-01-10 17:12:15 +00:00
|
|
|
if err := backend.StartMining(context.Background(), backend.chainDB, stateDiffClient, mining, miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir, logger); err != nil {
|
2021-09-08 05:31:51 +00:00
|
|
|
return nil, err
|
2021-09-02 10:04:30 +00:00
|
|
|
}
|
2022-04-22 06:23:52 +00:00
|
|
|
|
2023-01-17 06:20:31 +00:00
|
|
|
backend.ethBackendRPC, backend.miningRPC, backend.stateChangesClient = ethBackendRPC, miningRPC, stateDiffClient
|
|
|
|
|
2023-11-04 17:44:34 +00:00
|
|
|
backend.syncStages = stages2.NewDefaultStages(backend.sentryCtx, backend.chainDB, snapDb, stack.Config().P2P, config, backend.sentriesClient, backend.notifications, backend.downloaderClient,
|
|
|
|
blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, heimdallClient, recents, signatures, logger)
|
2023-01-17 06:20:31 +00:00
|
|
|
backend.syncUnwindOrder = stagedsync.DefaultUnwindOrder
|
|
|
|
backend.syncPruneOrder = stagedsync.DefaultPruneOrder
|
2023-12-27 22:05:09 +00:00
|
|
|
backend.stagedSync = stagedsync.New(config.Sync, backend.syncStages, backend.syncUnwindOrder, backend.syncPruneOrder, logger)
|
2023-01-17 06:20:31 +00:00
|
|
|
|
2023-09-29 02:03:19 +00:00
|
|
|
hook := stages2.NewHook(backend.sentryCtx, backend.chainDB, backend.notifications, backend.stagedSync, backend.blockReader, backend.chainConfig, backend.logger, backend.sentriesClient.UpdateHead)
|
2023-08-01 00:08:15 +00:00
|
|
|
|
2023-08-08 13:01:35 +00:00
|
|
|
checkStateRoot := true
|
2023-12-27 22:05:09 +00:00
|
|
|
pipelineStages := stages2.NewPipelineStages(ctx, chainKv, config, stack.Config().P2P, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot)
|
|
|
|
backend.pipelineStagedSync = stagedsync.New(config.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger)
|
2023-10-07 20:30:10 +00:00
|
|
|
backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, chainKv, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.HistoryV3)
|
2023-08-04 12:42:35 +00:00
|
|
|
executionRpc := direct.NewExecutionClientDirect(backend.eth1ExecutionServer)
|
2023-08-05 21:33:10 +00:00
|
|
|
engineBackendRPC := engineapi.NewEngineServer(
|
|
|
|
ctx,
|
|
|
|
logger,
|
|
|
|
chainConfig,
|
|
|
|
executionRpc,
|
|
|
|
backend.sentriesClient.Hd,
|
|
|
|
engine_block_downloader.NewEngineBlockDownloader(ctx, logger, backend.sentriesClient.Hd, executionRpc,
|
|
|
|
backend.sentriesClient.Bd, backend.sentriesClient.BroadcastNewBlock, backend.sentriesClient.SendBodyRequest, blockReader,
|
|
|
|
chainKv, chainConfig, tmpdir, config.Sync.BodyDownloadTimeoutSeconds),
|
|
|
|
false,
|
|
|
|
config.Miner.EnabledPOS)
|
|
|
|
backend.engineBackendRPC = engineBackendRPC
|
2023-12-19 12:10:34 +00:00
|
|
|
|
|
|
|
var engine execution_client.ExecutionEngine
|
|
|
|
|
|
|
|
// Gnosis has too few blocks on his network for phase2 to work. Once we have proper snapshot automation, it can go back to normal.
|
|
|
|
if config.NetworkID == uint64(clparams.GnosisNetwork) {
|
|
|
|
// Read the jwt secret
|
|
|
|
jwtSecret, err := cli.ObtainJWTSecret(&stack.Config().Http, logger)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
engine, err = execution_client.NewExecutionClientRPC(ctx, jwtSecret, stack.Config().Http.AuthRpcHTTPListenAddress, stack.Config().Http.AuthRpcPort)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
engine, err = execution_client.NewExecutionClientDirect(ctx, eth1_chain_reader.NewChainReaderEth1(ctx, chainConfig, executionRpc, 1000))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-03 01:39:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we choose not to run a consensus layer, run our embedded.
|
|
|
|
if config.InternalCL && clparams.EmbeddedSupported(config.NetworkID) {
|
|
|
|
genesisCfg, networkCfg, beaconCfg := clparams.GetConfigsByNetwork(clparams.NetworkType(config.NetworkID))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
state, err := clcore.RetrieveBeaconState(ctx, beaconCfg, genesisCfg,
|
|
|
|
clparams.GetCheckpointSyncEndpoint(clparams.NetworkType(config.NetworkID)))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
forkDigest, err := fork.ComputeForkDigest(beaconCfg, genesisCfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-12-06 09:48:36 +00:00
|
|
|
rawBeaconBlockChainDb, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconCfg, dirs.CaplinHistory)
|
2023-12-23 14:56:35 +00:00
|
|
|
historyDB, indiciesDB, err := caplin1.OpenCaplinDatabase(ctx, db_config.DefaultDatabaseConfiguration, beaconCfg, rawBeaconBlockChainDb, dirs.CaplinIndexing, engine, false)
|
2023-12-23 13:32:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-09-17 11:15:06 +00:00
|
|
|
|
2023-08-03 01:39:47 +00:00
|
|
|
client, err := service.StartSentinelService(&sentinel.SentinelConfig{
|
|
|
|
IpAddr: config.LightClientDiscoveryAddr,
|
|
|
|
Port: int(config.LightClientDiscoveryPort),
|
|
|
|
TCPPort: uint(config.LightClientDiscoveryTCPPort),
|
|
|
|
GenesisConfig: genesisCfg,
|
|
|
|
NetworkConfig: networkCfg,
|
|
|
|
BeaconConfig: beaconCfg,
|
|
|
|
TmpDir: tmpdir,
|
2023-12-23 13:32:00 +00:00
|
|
|
}, rawBeaconBlockChainDb, indiciesDB, &service.ServerConfig{Network: "tcp", Addr: fmt.Sprintf("%s:%d", config.SentinelAddr, config.SentinelPort)}, creds, &cltypes.Status{
|
2023-08-03 01:39:47 +00:00
|
|
|
ForkDigest: forkDigest,
|
|
|
|
FinalizedRoot: state.FinalizedCheckpoint().BlockRoot(),
|
|
|
|
FinalizedEpoch: state.FinalizedCheckpoint().Epoch(),
|
|
|
|
HeadSlot: state.FinalizedCheckpoint().Epoch() * beaconCfg.SlotsPerEpoch,
|
|
|
|
HeadRoot: state.FinalizedCheckpoint().BlockRoot(),
|
|
|
|
}, logger)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-10-23 14:33:08 +00:00
|
|
|
|
|
|
|
backend.sentinel = client
|
|
|
|
|
2023-10-16 09:09:43 +00:00
|
|
|
go func() {
|
2023-12-23 14:56:35 +00:00
|
|
|
eth1Getter := getters.NewExecutionSnapshotReader(ctx, beaconCfg, blockReader, backend.chainDB)
|
2023-12-27 22:05:09 +00:00
|
|
|
if err := caplin1.RunCaplinPhase1(ctx, client, engine, beaconCfg, genesisCfg, state, nil, dirs, snapshotVersion, config.BeaconRouter, eth1Getter, backend.downloaderClient, config.CaplinConfig.Backfilling, config.CaplinConfig.Archive, historyDB, indiciesDB); err != nil {
|
2023-10-16 09:09:43 +00:00
|
|
|
logger.Error("could not start caplin", "err", err)
|
|
|
|
}
|
|
|
|
ctxCancel()
|
|
|
|
}()
|
2023-08-01 00:08:15 +00:00
|
|
|
}
|
|
|
|
|
2023-01-17 06:20:31 +00:00
|
|
|
return backend, nil
|
|
|
|
}
|
2023-07-06 16:09:52 +00:00
|
|
|
|
2023-05-26 10:12:33 +00:00
|
|
|
func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config) error {
|
|
|
|
ethBackendRPC, miningRPC, stateDiffClient := s.ethBackendRPC, s.miningRPC, s.stateChangesClient
|
|
|
|
blockReader := s.blockReader
|
|
|
|
ctx := s.sentryCtx
|
|
|
|
chainKv := s.chainDB
|
2023-01-17 06:20:31 +00:00
|
|
|
var err error
|
|
|
|
|
2024-01-12 16:07:59 +00:00
|
|
|
if config.Genesis.Config.Bor == nil {
|
|
|
|
s.sentriesClient.Hd.StartPoSDownloader(s.sentryCtx, s.sentriesClient.SendHeaderRequest, s.sentriesClient.Penalize)
|
|
|
|
}
|
2022-03-22 16:49:12 +00:00
|
|
|
|
2023-01-13 18:12:18 +00:00
|
|
|
emptyBadHash := config.BadBlockHash == libcommon.Hash{}
|
2021-09-10 03:06:23 +00:00
|
|
|
if !emptyBadHash {
|
|
|
|
var badBlockHeader *types.Header
|
|
|
|
if err = chainKv.View(context.Background(), func(tx kv.Tx) error {
|
|
|
|
header, hErr := rawdb.ReadHeaderByHash(tx, config.BadBlockHash)
|
|
|
|
badBlockHeader = header
|
|
|
|
return hErr
|
|
|
|
}); err != nil {
|
2023-01-17 06:20:31 +00:00
|
|
|
return err
|
2021-09-10 03:06:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if badBlockHeader != nil {
|
|
|
|
unwindPoint := badBlockHeader.Number.Uint64() - 1
|
2023-10-10 14:17:51 +00:00
|
|
|
s.stagedSync.UnwindTo(unwindPoint, stagedsync.BadBlock(config.BadBlockHash, fmt.Errorf("Init unwind")))
|
2021-09-10 03:06:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-19 07:56:44 +00:00
|
|
|
//eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
|
2020-08-03 17:40:46 +00:00
|
|
|
gpoParams := config.GPO
|
|
|
|
if gpoParams.Default == nil {
|
|
|
|
gpoParams.Default = config.Miner.GasPrice
|
2020-07-09 12:53:35 +00:00
|
|
|
}
|
2021-04-19 07:56:44 +00:00
|
|
|
//eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
|
2022-04-22 06:23:52 +00:00
|
|
|
if config.Ethstats != "" {
|
2022-09-18 10:41:01 +00:00
|
|
|
var headCh chan [][]byte
|
2023-05-26 10:12:33 +00:00
|
|
|
headCh, s.unsubscribeEthstat = s.notifications.Events.AddHeaderSubscription()
|
|
|
|
if err := ethstats.New(stack, s.sentryServers, chainKv, s.blockReader, s.engine, config.Ethstats, s.networkID, ctx.Done(), headCh); err != nil {
|
2023-01-17 06:20:31 +00:00
|
|
|
return err
|
2022-04-22 06:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
2022-02-16 17:38:54 +00:00
|
|
|
// start HTTP API
|
|
|
|
httpRpcCfg := stack.Config().Http
|
2023-07-12 16:11:41 +00:00
|
|
|
ethRpcClient, txPoolRpcClient, miningRpcClient, stateCache, ff, err := cli.EmbeddedServices(ctx, chainKv, httpRpcCfg.StateCache, blockReader, ethBackendRPC,
|
|
|
|
s.txPoolGrpcServer, miningRPC, stateDiffClient, s.logger)
|
2022-07-31 10:16:19 +00:00
|
|
|
if err != nil {
|
2023-01-17 06:20:31 +00:00
|
|
|
return err
|
2022-07-31 10:16:19 +00:00
|
|
|
}
|
2022-02-16 17:38:54 +00:00
|
|
|
|
2023-11-28 09:11:39 +00:00
|
|
|
s.apiList = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, s.agg, &httpRpcCfg, s.engine, s.logger)
|
2023-11-02 01:35:13 +00:00
|
|
|
|
|
|
|
if config.SilkwormRpcDaemon && httpRpcCfg.Enabled {
|
2023-11-30 11:45:02 +00:00
|
|
|
silkwormRPCDaemonService := silkworm.NewRpcDaemonService(s.silkworm, chainKv)
|
2023-11-02 01:35:13 +00:00
|
|
|
s.silkwormRPCDaemonService = &silkwormRPCDaemonService
|
|
|
|
} else {
|
|
|
|
go func() {
|
2023-11-28 09:11:39 +00:00
|
|
|
if err := cli.StartRpcServer(ctx, &httpRpcCfg, s.apiList, s.logger); err != nil {
|
2023-11-02 01:35:13 +00:00
|
|
|
s.logger.Error("cli.StartRpcServer error", "err", err)
|
2023-10-17 23:37:16 +00:00
|
|
|
}
|
2023-11-02 01:35:13 +00:00
|
|
|
}()
|
|
|
|
}
|
2023-09-06 03:23:59 +00:00
|
|
|
|
2024-01-12 16:07:59 +00:00
|
|
|
if config.Genesis.Config.Bor == nil {
|
|
|
|
go s.engineBackendRPC.Start(&httpRpcCfg, s.chainDB, s.blockReader, ff, stateCache, s.agg, s.engine, ethRpcClient, txPoolRpcClient, miningRpcClient)
|
|
|
|
}
|
2022-02-12 12:47:19 +00:00
|
|
|
|
2020-08-03 17:40:46 +00:00
|
|
|
// Register the backend on the node
|
2023-05-26 10:12:33 +00:00
|
|
|
stack.RegisterLifecycle(s)
|
2023-01-17 06:20:31 +00:00
|
|
|
return nil
|
2014-12-14 18:03:24 +00:00
|
|
|
}
|
|
|
|
|
2021-04-19 07:56:44 +00:00
|
|
|
func (s *Ethereum) APIs() []rpc.API {
|
2023-09-13 10:49:49 +00:00
|
|
|
return s.apiList
|
2021-04-19 07:56:44 +00:00
|
|
|
}
|
|
|
|
|
2023-01-13 18:12:18 +00:00
|
|
|
func (s *Ethereum) Etherbase() (eb libcommon.Address, err error) {
|
2017-05-29 07:21:34 +00:00
|
|
|
s.lock.RLock()
|
|
|
|
etherbase := s.etherbase
|
|
|
|
s.lock.RUnlock()
|
|
|
|
|
2023-01-13 18:12:18 +00:00
|
|
|
if etherbase != (libcommon.Address{}) {
|
2017-05-29 07:21:34 +00:00
|
|
|
return etherbase, nil
|
2017-01-24 09:49:20 +00:00
|
|
|
}
|
2023-01-13 18:12:18 +00:00
|
|
|
return libcommon.Address{}, fmt.Errorf("etherbase must be explicitly specified")
|
2015-03-26 21:49:22 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 17:02:15 +00:00
|
|
|
// isLocalBlock checks whether the specified block is mined
|
|
|
|
// by local miner accounts.
|
2018-09-20 12:09:30 +00:00
|
|
|
//
|
2018-09-20 17:02:15 +00:00
|
|
|
// We regard two types of accounts as local miner account: etherbase
|
|
|
|
// and accounts specified via `txpool.locals` flag.
|
2021-04-20 14:41:46 +00:00
|
|
|
func (s *Ethereum) isLocalBlock(block *types.Block) bool { //nolint
|
2018-09-20 12:09:30 +00:00
|
|
|
s.lock.RLock()
|
|
|
|
etherbase := s.etherbase
|
|
|
|
s.lock.RUnlock()
|
2022-05-26 05:27:44 +00:00
|
|
|
return ethutils.IsLocalBlock(s.engine, etherbase, s.config.DeprecatedTxPool.Locals, block.Header())
|
2018-09-20 12:09:30 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 17:02:15 +00:00
|
|
|
// shouldPreserve checks whether we should preserve the given block
|
|
|
|
// during the chain reorg depending on whether the author of block
|
|
|
|
// is a local account.
|
2021-04-20 14:41:46 +00:00
|
|
|
func (s *Ethereum) shouldPreserve(block *types.Block) bool { //nolint
|
2018-09-20 17:02:15 +00:00
|
|
|
// The reason we need to disable the self-reorg preserving for clique
|
|
|
|
// is it can be probable to introduce a deadlock.
|
|
|
|
//
|
|
|
|
// e.g. If there are 7 available signers
|
|
|
|
//
|
|
|
|
// r1 A
|
|
|
|
// r2 B
|
|
|
|
// r3 C
|
|
|
|
// r4 D
|
|
|
|
// r5 A [X] F G
|
|
|
|
// r6 [X]
|
|
|
|
//
|
|
|
|
// In the round5, the inturn signer E is offline, so the worst case
|
|
|
|
// is A, F and G sign the block of round5 and reject the block of opponents
|
|
|
|
// and in the round6, the last available signer B is offline, the whole
|
|
|
|
// network is stuck.
|
2021-03-05 20:34:23 +00:00
|
|
|
if _, ok := s.engine.(*clique.Clique); ok {
|
2018-09-20 17:02:15 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return s.isLocalBlock(block)
|
|
|
|
}
|
|
|
|
|
2018-08-23 10:02:36 +00:00
|
|
|
// StartMining starts the miner with the given number of CPU threads. If mining
|
|
|
|
// is already running, this method adjust the number of threads allowed to use
|
|
|
|
// and updates the minimum price required by the transaction pool.
|
2024-01-10 17:12:15 +00:00
|
|
|
func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, stateDiffClient *direct.StateDiffClientDirect, mining *stagedsync.Sync, miner stagedsync.MiningState, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string, logger log.Logger) error {
|
|
|
|
|
2023-07-18 08:47:04 +00:00
|
|
|
var borcfg *bor.Bor
|
|
|
|
if b, ok := s.engine.(*bor.Bor); ok {
|
|
|
|
borcfg = b
|
2023-10-12 18:08:05 +00:00
|
|
|
b.HeaderProgress(s.sentriesClient.Hd)
|
2023-07-18 08:47:04 +00:00
|
|
|
} else if br, ok := s.engine.(*merge.Merge); ok {
|
|
|
|
if b, ok := br.InnerEngine().(*bor.Bor); ok {
|
|
|
|
borcfg = b
|
2023-10-12 18:08:05 +00:00
|
|
|
b.HeaderProgress(s.sentriesClient.Hd)
|
2023-07-18 08:47:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-22 07:17:34 +00:00
|
|
|
//if borcfg == nil {
|
2024-01-10 17:12:15 +00:00
|
|
|
if !miner.MiningConfig.Enabled {
|
2023-07-22 07:17:34 +00:00
|
|
|
return nil
|
2021-04-20 14:41:46 +00:00
|
|
|
}
|
2023-07-22 07:17:34 +00:00
|
|
|
//}
|
2021-04-21 05:01:25 +00:00
|
|
|
|
|
|
|
// Configure the local mining address
|
|
|
|
eb, err := s.Etherbase()
|
|
|
|
if err != nil {
|
2023-05-09 17:11:31 +00:00
|
|
|
s.logger.Error("Cannot start mining without etherbase", "err", err)
|
2021-10-04 15:16:52 +00:00
|
|
|
return fmt.Errorf("etherbase missing: %w", err)
|
2021-04-21 05:01:25 +00:00
|
|
|
}
|
2022-06-03 16:14:49 +00:00
|
|
|
|
2023-07-18 08:47:04 +00:00
|
|
|
if borcfg != nil {
|
2024-01-10 17:12:15 +00:00
|
|
|
if miner.MiningConfig.Enabled {
|
|
|
|
if miner.MiningConfig.SigKey == nil {
|
2023-07-18 08:47:04 +00:00
|
|
|
s.logger.Error("Etherbase account unavailable locally", "err", err)
|
|
|
|
return fmt.Errorf("signer missing: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
borcfg.Authorize(eb, func(_ libcommon.Address, mimeType string, message []byte) ([]byte, error) {
|
2024-01-10 17:12:15 +00:00
|
|
|
return crypto.Sign(crypto.Keccak256(message), miner.MiningConfig.SigKey)
|
2023-07-18 08:47:04 +00:00
|
|
|
})
|
2024-01-09 11:37:39 +00:00
|
|
|
|
2024-01-09 15:41:00 +00:00
|
|
|
if !s.config.WithoutHeimdall {
|
|
|
|
err := stagedsync.FetchSpanZeroForMiningIfNeeded(
|
|
|
|
ctx,
|
|
|
|
s.chainDB,
|
|
|
|
s.blockReader,
|
|
|
|
borcfg.HeimdallClient,
|
|
|
|
logger,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-01-09 11:37:39 +00:00
|
|
|
}
|
2023-07-18 08:47:04 +00:00
|
|
|
} else {
|
|
|
|
// for the bor dev network without heimdall we need the authorizer to be set otherwise there is no
|
|
|
|
// validator defined in the bor validator set and non mining nodes will reject all blocks
|
|
|
|
// this assumes in this mode we're only running a single validator
|
|
|
|
|
|
|
|
if s.chainConfig.ChainName == networkname.BorDevnetChainName && s.config.WithoutHeimdall {
|
|
|
|
borcfg.Authorize(eb, func(addr libcommon.Address, _ string, _ []byte) ([]byte, error) {
|
2024-01-12 15:11:01 +00:00
|
|
|
return nil, &valset.UnauthorizedSignerError{Number: 0, Signer: addr.Bytes()}
|
2023-07-18 08:47:04 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-03 16:14:49 +00:00
|
|
|
var clq *clique.Clique
|
2022-01-14 19:06:35 +00:00
|
|
|
if c, ok := s.engine.(*clique.Clique); ok {
|
2022-06-03 16:14:49 +00:00
|
|
|
clq = c
|
2023-05-09 17:45:33 +00:00
|
|
|
} else if cl, ok := s.engine.(*merge.Merge); ok {
|
2022-06-03 16:14:49 +00:00
|
|
|
if c, ok := cl.InnerEngine().(*clique.Clique); ok {
|
|
|
|
clq = c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if clq != nil {
|
2024-01-10 17:12:15 +00:00
|
|
|
if miner.MiningConfig.SigKey == nil {
|
2023-05-09 17:11:31 +00:00
|
|
|
s.logger.Error("Etherbase account unavailable locally", "err", err)
|
2021-10-04 15:16:52 +00:00
|
|
|
return fmt.Errorf("signer missing: %w", err)
|
2021-04-21 05:01:25 +00:00
|
|
|
}
|
|
|
|
|
2023-01-13 18:12:18 +00:00
|
|
|
clq.Authorize(eb, func(_ libcommon.Address, mimeType string, message []byte) ([]byte, error) {
|
2024-01-10 17:12:15 +00:00
|
|
|
return crypto.Sign(crypto.Keccak256(message), miner.MiningConfig.SigKey)
|
2021-04-21 05:01:25 +00:00
|
|
|
})
|
|
|
|
}
|
2022-06-03 16:14:49 +00:00
|
|
|
|
2024-01-10 17:12:15 +00:00
|
|
|
streamCtx, streamCancel := context.WithCancel(ctx)
|
|
|
|
stream, err := stateDiffClient.StateChanges(streamCtx, &remote.StateChangeRequest{WithStorage: false, WithTransactions: true}, grpc.WaitForReady(true))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
streamCancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
stateChangeCh := make(chan *remote.StateChange)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for req, err := stream.Recv(); ; req, err = stream.Recv() {
|
|
|
|
if err == nil {
|
|
|
|
for _, change := range req.ChangeBatch {
|
|
|
|
stateChangeCh <- change
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-04-20 14:41:46 +00:00
|
|
|
go func() {
|
2021-06-22 10:09:45 +00:00
|
|
|
defer debug.LogPanic()
|
2021-05-20 05:33:55 +00:00
|
|
|
defer close(s.waitForMiningStop)
|
2024-01-10 17:12:15 +00:00
|
|
|
defer streamCancel()
|
2021-05-17 12:15:19 +00:00
|
|
|
|
2024-01-10 17:12:15 +00:00
|
|
|
mineEvery := time.NewTicker(miner.MiningConfig.Recommit)
|
2021-09-18 13:58:23 +00:00
|
|
|
defer mineEvery.Stop()
|
|
|
|
|
2023-05-29 19:35:45 +00:00
|
|
|
s.logger.Info("Starting to mine", "etherbase", eb)
|
2023-05-23 06:16:55 +00:00
|
|
|
|
2024-01-10 17:12:15 +00:00
|
|
|
var working bool
|
|
|
|
var waiting atomic.Bool
|
|
|
|
|
2023-05-23 06:16:55 +00:00
|
|
|
hasWork := true // Start mining immediately
|
2021-05-17 12:15:19 +00:00
|
|
|
errc := make(chan error, 1)
|
|
|
|
|
2024-01-10 17:12:15 +00:00
|
|
|
workCtx, workCancel := context.WithCancel(ctx)
|
|
|
|
defer workCancel()
|
|
|
|
|
2021-05-17 12:15:19 +00:00
|
|
|
for {
|
2023-08-30 09:25:02 +00:00
|
|
|
// Only reset if some work was done previously as we'd like to rely
|
|
|
|
// on the `miner.recommit` as backup.
|
|
|
|
if hasWork {
|
2024-01-10 17:12:15 +00:00
|
|
|
mineEvery.Reset(miner.MiningConfig.Recommit)
|
2023-08-30 09:25:02 +00:00
|
|
|
}
|
|
|
|
|
2024-01-10 17:12:15 +00:00
|
|
|
// Only check for case if you're already mining (i.e. working = true) and
|
2023-05-23 06:16:55 +00:00
|
|
|
// waiting for error or you don't have any work yet (i.e. hasWork = false).
|
2024-01-10 17:12:15 +00:00
|
|
|
if working || !hasWork {
|
2023-05-23 06:16:55 +00:00
|
|
|
select {
|
2024-01-10 17:12:15 +00:00
|
|
|
case stateChanges := <-stateChangeCh:
|
|
|
|
block := stateChanges.BlockHeight
|
2024-01-11 10:03:41 +00:00
|
|
|
s.logger.Debug("Start mining based on previous block", "block", block)
|
2024-01-10 17:12:15 +00:00
|
|
|
// TODO - can do mining clean up here as we have previous
|
|
|
|
// block info in the state channel
|
2023-05-23 06:16:55 +00:00
|
|
|
hasWork = true
|
2024-01-10 17:12:15 +00:00
|
|
|
|
2023-05-23 06:16:55 +00:00
|
|
|
case <-s.notifyMiningAboutNewTxs:
|
|
|
|
// Skip mining based on new tx notif for bor consensus
|
|
|
|
hasWork = s.chainConfig.Bor == nil
|
|
|
|
if hasWork {
|
2024-01-11 10:03:41 +00:00
|
|
|
s.logger.Debug("Start mining based on txpool notif")
|
2023-05-23 06:16:55 +00:00
|
|
|
}
|
|
|
|
case <-mineEvery.C:
|
2024-01-10 17:12:15 +00:00
|
|
|
if !(working || waiting.Load()) {
|
2024-01-11 10:03:41 +00:00
|
|
|
s.logger.Debug("Start mining based on miner.recommit", "duration", miner.MiningConfig.Recommit)
|
2024-01-10 17:12:15 +00:00
|
|
|
}
|
|
|
|
hasWork = !(working || waiting.Load())
|
2023-05-23 06:16:55 +00:00
|
|
|
case err := <-errc:
|
2024-01-10 17:12:15 +00:00
|
|
|
working = false
|
2023-05-23 06:16:55 +00:00
|
|
|
hasWork = false
|
|
|
|
if errors.Is(err, libcommon.ErrStopped) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
2023-05-29 19:35:45 +00:00
|
|
|
s.logger.Warn("mining", "err", err)
|
2023-05-23 06:16:55 +00:00
|
|
|
}
|
|
|
|
case <-quitCh:
|
2021-05-26 07:02:19 +00:00
|
|
|
return
|
|
|
|
}
|
2022-01-04 17:37:36 +00:00
|
|
|
}
|
2021-04-20 14:41:46 +00:00
|
|
|
|
2024-01-10 17:12:15 +00:00
|
|
|
if !working && hasWork {
|
|
|
|
working = true
|
2023-05-23 06:16:55 +00:00
|
|
|
hasWork = false
|
2024-01-10 17:12:15 +00:00
|
|
|
mineEvery.Reset(miner.MiningConfig.Recommit)
|
|
|
|
go func() {
|
|
|
|
err := stages2.MiningStep(ctx, db, mining, tmpDir, logger)
|
|
|
|
|
|
|
|
waiting.Store(true)
|
|
|
|
defer waiting.Store(false)
|
|
|
|
|
|
|
|
errc <- err
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case block := <-miner.MiningResultCh:
|
|
|
|
if block != nil {
|
|
|
|
s.logger.Debug("Mined block", "block", block.Number())
|
|
|
|
s.minedBlocks <- block
|
|
|
|
}
|
|
|
|
return
|
|
|
|
case <-workCtx.Done():
|
|
|
|
errc <- workCtx.Err()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2021-04-20 14:41:46 +00:00
|
|
|
}
|
|
|
|
}
|
2021-05-17 12:15:19 +00:00
|
|
|
}()
|
2021-04-20 14:41:46 +00:00
|
|
|
|
2021-05-17 12:15:19 +00:00
|
|
|
return nil
|
2021-04-20 14:41:46 +00:00
|
|
|
}
|
|
|
|
|
2021-04-03 06:09:31 +00:00
|
|
|
func (s *Ethereum) IsMining() bool { return s.config.Miner.Enabled }
|
2016-06-30 10:03:26 +00:00
|
|
|
|
2021-09-08 05:31:51 +00:00
|
|
|
func (s *Ethereum) ChainKV() kv.RwDB { return s.chainDB }
|
2021-04-20 14:41:46 +00:00
|
|
|
func (s *Ethereum) NetVersion() (uint64, error) { return s.networkID, nil }
|
2021-06-17 21:55:20 +00:00
|
|
|
func (s *Ethereum) NetPeerCount() (uint64, error) {
|
2021-06-18 13:34:15 +00:00
|
|
|
var sentryPc uint64 = 0
|
|
|
|
|
2023-05-09 17:11:31 +00:00
|
|
|
s.logger.Trace("sentry", "peer count", sentryPc)
|
2022-05-10 05:17:44 +00:00
|
|
|
for _, sc := range s.sentriesClient.Sentries() {
|
2021-06-18 13:34:15 +00:00
|
|
|
ctx := context.Background()
|
2021-12-14 10:13:17 +00:00
|
|
|
reply, err := sc.PeerCount(ctx, &proto_sentry.PeerCountRequest{})
|
2021-06-18 13:34:15 +00:00
|
|
|
if err != nil {
|
2023-05-09 17:11:31 +00:00
|
|
|
s.logger.Warn("sentry", "err", err)
|
2021-06-18 13:34:15 +00:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
sentryPc += reply.Count
|
2021-06-17 21:55:20 +00:00
|
|
|
}
|
|
|
|
|
2021-06-18 13:34:15 +00:00
|
|
|
return sentryPc, nil
|
2021-06-17 21:55:20 +00:00
|
|
|
}
|
2015-01-28 17:14:28 +00:00
|
|
|
|
2021-11-30 22:42:12 +00:00
|
|
|
func (s *Ethereum) NodesInfo(limit int) (*remote.NodesInfoReply, error) {
|
2022-05-10 05:17:44 +00:00
|
|
|
if limit == 0 || limit > len(s.sentriesClient.Sentries()) {
|
|
|
|
limit = len(s.sentriesClient.Sentries())
|
2021-11-30 22:42:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
nodes := make([]*prototypes.NodeInfoReply, 0, limit)
|
|
|
|
for i := 0; i < limit; i++ {
|
2022-05-10 05:17:44 +00:00
|
|
|
sc := s.sentriesClient.Sentries()[i]
|
2021-11-30 22:42:12 +00:00
|
|
|
|
|
|
|
nodeInfo, err := sc.NodeInfo(context.Background(), nil)
|
|
|
|
if err != nil {
|
2023-05-09 17:11:31 +00:00
|
|
|
s.logger.Error("sentry nodeInfo", "err", err)
|
2023-07-18 08:47:04 +00:00
|
|
|
continue
|
2021-11-30 22:42:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
nodes = append(nodes, nodeInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
nodesInfo := &remote.NodesInfoReply{NodesInfo: nodes}
|
2023-09-29 20:11:13 +00:00
|
|
|
slices.SortFunc(nodesInfo.NodesInfo, remote.NodeInfoReplyCmp)
|
2021-11-30 22:42:12 +00:00
|
|
|
|
|
|
|
return nodesInfo, nil
|
|
|
|
}
|
|
|
|
|
2022-06-03 08:38:10 +00:00
|
|
|
// sets up blockReader and client downloader
|
2023-05-23 07:49:17 +00:00
|
|
|
func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downloadercfg.Cfg) error {
|
2022-08-23 09:28:07 +00:00
|
|
|
var err error
|
2023-05-23 07:49:17 +00:00
|
|
|
if s.config.Snapshot.NoDownloader {
|
|
|
|
return nil
|
2022-11-04 12:43:59 +00:00
|
|
|
}
|
2023-05-23 07:49:17 +00:00
|
|
|
if s.config.Snapshot.DownloaderAddr != "" {
|
|
|
|
// connect to external Downloader
|
|
|
|
s.downloaderClient, err = downloadergrpc.NewClient(ctx, s.config.Snapshot.DownloaderAddr)
|
|
|
|
} else {
|
|
|
|
// start embedded Downloader
|
2023-12-27 22:05:09 +00:00
|
|
|
if uploadFs := s.config.Sync.UploadLocation; len(uploadFs) > 0 {
|
|
|
|
downloaderCfg.AddTorrentsFromDisk = false
|
|
|
|
}
|
|
|
|
|
2023-12-22 03:35:48 +00:00
|
|
|
discover := true
|
2023-12-27 22:05:09 +00:00
|
|
|
s.downloader, err = downloader.New(ctx, downloaderCfg, s.config.Dirs, s.logger, log.LvlDebug, discover)
|
2023-05-23 07:49:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-09-15 06:46:59 +00:00
|
|
|
s.downloader.MainLoopInBackground(true)
|
2023-12-27 22:05:09 +00:00
|
|
|
bittorrentServer, err := downloader.NewGrpcServer(s.downloader)
|
2023-05-23 07:49:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("new server: %w", err)
|
|
|
|
}
|
2022-07-28 09:57:38 +00:00
|
|
|
|
2023-05-23 07:49:17 +00:00
|
|
|
s.downloaderClient = direct.NewDownloaderClient(bittorrentServer)
|
|
|
|
}
|
|
|
|
s.agg.OnFreeze(func(frozenFileNames []string) {
|
|
|
|
events := s.notifications.Events
|
|
|
|
events.OnNewSnapshot()
|
|
|
|
if s.downloaderClient != nil {
|
2023-12-12 09:05:56 +00:00
|
|
|
req := &proto_downloader.AddRequest{Items: make([]*proto_downloader.AddItem, 0, len(frozenFileNames))}
|
2023-05-23 07:49:17 +00:00
|
|
|
for _, fName := range frozenFileNames {
|
2023-12-12 09:05:56 +00:00
|
|
|
req.Items = append(req.Items, &proto_downloader.AddItem{
|
2023-05-23 07:49:17 +00:00
|
|
|
Path: filepath.Join("history", fName),
|
|
|
|
})
|
2022-06-03 08:38:10 +00:00
|
|
|
}
|
2023-12-12 09:05:56 +00:00
|
|
|
if _, err := s.downloaderClient.Add(ctx, req); err != nil {
|
2023-05-23 07:49:17 +00:00
|
|
|
s.logger.Warn("[snapshots] notify downloader", "err", err)
|
2022-07-28 09:57:38 +00:00
|
|
|
}
|
2022-06-03 08:38:10 +00:00
|
|
|
}
|
2023-05-23 07:49:17 +00:00
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-01-09 14:48:01 +00:00
|
|
|
func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snashotVersion uint8, snConfig ethconfig.BlocksFreezing, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3, error) {
|
2023-12-27 22:05:09 +00:00
|
|
|
allSnapshots := freezeblocks.NewRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger)
|
|
|
|
|
2023-08-18 16:10:35 +00:00
|
|
|
var allBorSnapshots *freezeblocks.BorRoSnapshots
|
|
|
|
if isBor {
|
2023-12-27 22:05:09 +00:00
|
|
|
allBorSnapshots = freezeblocks.NewBorRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger)
|
2023-08-18 16:10:35 +00:00
|
|
|
}
|
2023-12-27 22:05:09 +00:00
|
|
|
|
2023-05-23 07:49:17 +00:00
|
|
|
var err error
|
2023-12-27 22:05:09 +00:00
|
|
|
if snConfig.NoDownloader {
|
|
|
|
allSnapshots.ReopenFolder()
|
|
|
|
if isBor {
|
|
|
|
allBorSnapshots.ReopenFolder()
|
|
|
|
}
|
|
|
|
} else {
|
2023-05-23 07:49:17 +00:00
|
|
|
allSnapshots.OptimisticalyReopenWithDB(db)
|
2023-08-18 16:10:35 +00:00
|
|
|
if isBor {
|
|
|
|
allBorSnapshots.OptimisticalyReopenWithDB(db)
|
|
|
|
}
|
2022-06-03 08:38:10 +00:00
|
|
|
}
|
2023-08-18 16:10:35 +00:00
|
|
|
blockReader := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots)
|
2023-06-03 08:54:27 +00:00
|
|
|
blockWriter := blockio.NewBlockWriter(histV3)
|
2022-09-18 10:41:01 +00:00
|
|
|
|
2023-05-23 07:49:17 +00:00
|
|
|
agg, err := libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger)
|
2022-09-18 10:41:01 +00:00
|
|
|
if err != nil {
|
2024-01-09 14:48:01 +00:00
|
|
|
return nil, nil, nil, nil, nil, err
|
2022-09-18 10:41:01 +00:00
|
|
|
}
|
2023-02-13 05:17:01 +00:00
|
|
|
if err = agg.OpenFolder(); err != nil {
|
2024-01-09 14:48:01 +00:00
|
|
|
return nil, nil, nil, nil, nil, err
|
2022-10-05 10:54:54 +00:00
|
|
|
}
|
2024-01-09 14:48:01 +00:00
|
|
|
return blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, nil
|
2022-06-03 08:38:10 +00:00
|
|
|
}
|
|
|
|
|
2022-04-25 13:40:04 +00:00
|
|
|
func (s *Ethereum) Peers(ctx context.Context) (*remote.PeersReply, error) {
|
|
|
|
var reply remote.PeersReply
|
2022-05-10 05:17:44 +00:00
|
|
|
for _, sentryClient := range s.sentriesClient.Sentries() {
|
2022-04-25 13:40:04 +00:00
|
|
|
peers, err := sentryClient.Peers(ctx, &emptypb.Empty{})
|
|
|
|
if err != nil {
|
2022-07-28 09:57:38 +00:00
|
|
|
return nil, fmt.Errorf("ethereum backend MultiClient.Peers error: %w", err)
|
2022-04-25 13:40:04 +00:00
|
|
|
}
|
|
|
|
reply.Peers = append(reply.Peers, peers.Peers...)
|
|
|
|
}
|
2023-10-23 14:33:08 +00:00
|
|
|
|
2022-04-25 13:40:04 +00:00
|
|
|
return &reply, nil
|
|
|
|
}
|
|
|
|
|
2023-10-27 19:30:28 +00:00
|
|
|
func (s *Ethereum) DiagnosticsPeersData() map[string]*diagnostics.PeerStatistics {
|
|
|
|
var reply map[string]*diagnostics.PeerStatistics = make(map[string]*diagnostics.PeerStatistics)
|
2023-10-23 14:33:08 +00:00
|
|
|
for _, sentryServer := range s.sentryServers {
|
|
|
|
peers := sentryServer.DiagnosticsPeersData()
|
2023-10-27 19:30:28 +00:00
|
|
|
|
|
|
|
for key, value := range peers {
|
|
|
|
reply[key] = value
|
|
|
|
}
|
2023-10-23 14:33:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return reply
|
|
|
|
}
|
|
|
|
|
Add addPeer RPC (#7804)
This PR mirrors https://github.com/testinprod-io/op-erigon/pull/54.
Actual implementation for `admin_addPeer` method.
RPC Spec: Refer to
https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-admin.
> The addPeer administrative method requests adding a new remote node to
the list of tracked static nodes. The node will try to maintain
connectivity to these nodes at all times, reconnecting every once in a
while if the remote connection goes down.
Requires https://github.com/ledgerwatch/erigon-lib/pull/1033/
After https://github.com/ledgerwatch/erigon-lib/pull/1033 is merged,
will update erigon-lib version, removing replace at go.mod.
Note that even if RPC response returns `true`, it does not guarantee
that RLPx protocol is established between peers. It just adds node
entrypoint to its static peer list, and periodically tries and tests
connections.
## Testing
This RPC needs integration testing, so I made some scenario.
Use below command for testing:
Spin up two dev nodes which p2p enabled:
Start Node 1: RPC running at port 8545:
```sh
./build/bin/erigon --datadir=dev --chain=dev --port=30303 --http.port=8545 --authrpc.port=8551 --torrent.port=42069 --no-downloader --nodiscover --private.api.addr=127.0.0.1:9090 --http --ws --http.api=admin --p2p.allowed-ports=30306,30307,30308 --authrpc.jwtsecret=/tmp/jwt1 --p2p.protocol=67,68 --log.console.verbosity=5
```
Start Node 2: RPC running at port 8546:
```sh
./build/bin/erigon --datadir=dev2 --chain=dev --port=30304 --http.port=8546 --authrpc.port=8552 --torrent.port=42068 --no-downloader --nodiscover --private.api.addr=127.0.0.1:9091 --http --ws --http.api=admin --p2p.allowed-ports=30309,30310,30311 --authrpc.jwtsecret=/tmp/jwt2 --p2p.protocol=67,68 --log.console.verbosity=5
```
Get nodeInfo of node 1 using `admin_nodeInfo` RPC:
```sh
curl --location 'localhost:8545/' \
--header 'Content-Type: application/json' \
--data '{
"jsonrpc":"2.0",
"method":"admin_nodeInfo",
"params":[],
"id":1
}'
```
Example response:
```
{
"jsonrpc": "2.0",
"id": 1,
"result": {
"id": "b75e0c4d2113b6f144ea8fd356a8f90e612a2a5f48a13c78d7e0e176e5724eb2",
"name": "erigon/v2.47.0-dev-5d86cdb5/darwin-arm64/go1.19.6",
"enode": "enode://05ab575d947f2d73065ea0f795dc2d96ed0ad603f3e730ab90dc881122d552c9f59ffcb148fe50546bec8b319daeb3c22ec02e7d12a7c4f2ac4cd26456a04a7c@127.0.0.1:30303?discport=0",
...
```
Get nodeInfo of node 2 using `admin_nodeInfo` RPC:
```sh
curl --location 'localhost:8546/' \
--header 'Content-Type: application/json' \
--data '{
"jsonrpc":"2.0",
"method":"admin_nodeInfo",
"params":[],
"id":2
}'
```
Example response:
```
{
"jsonrpc": "2.0",
"id": 2,
"result": {
"id": "32d721e4d75219b021d7f83235f1f1eb8b705d6f85e634bccde564b8f7f94d78",
"name": "erigon/v2.47.0-dev-5d86cdb5/darwin-arm64/go1.19.6",
"enode": "enode://1abb8579647779e13b7f68d18f9c776cbd29281841c7f950e9cf9afa996e31120a6f481cea8e90e0f42a0eb1aa00aeafee81c4bae6c31aa16810b795c6d6e069@127.0.0.1:30304?discport=0",
...
```
Call `admin_addPeer` RPC to node 2:
```sh
curl --location 'localhost:8546/' \
--header 'Content-Type: application/json' \
--data '{
"jsonrpc":"2.0",
"method":"admin_addPeer",
"params":["enode://05ab575d947f2d73065ea0f795dc2d96ed0ad603f3e730ab90dc881122d552c9f59ffcb148fe50546bec8b319daeb3c22ec02e7d12a7c4f2ac4cd26456a04a7c@127.0.0.1:30303"],
"id":2
}'
```
Example response:
```
{
"jsonrpc": "2.0",
"id": 2,
"result": true
}
```
Check peer info of node 1 using `admin_peers` RPC:
```sh
curl --location 'localhost:8545/' \
--header 'Content-Type: application/json' \
--data '{
"jsonrpc":"2.0",
"method":"admin_peers",
"params":[],
"id":1
}'
```
Example response:
```
{
"jsonrpc": "2.0",
"id": 1,
"result": [
{
"enode": "enode://1abb8579647779e13b7f68d18f9c776cbd29281841c7f950e9cf9afa996e31120a6f481cea8e90e0f42a0eb1aa00aeafee81c4bae6c31aa16810b795c6d6e069@127.0.0.1:55426",
"id": "32d721e4d75219b021d7f83235f1f1eb8b705d6f85e634bccde564b8f7f94d78",
"name": "erigon/v2.47.0-dev-5d86cdb5/darwin-arm64/go1.19.6",
"caps": [
"eth/66",
"eth/67"
],
"network": {
"localAddress": "127.0.0.1:30303",
"remoteAddress": "127.0.0.1:55426",
"inbound": true,
"trusted": false,
"static": false
},
"protocols": null
}
]
}
```
---------
Co-authored-by: alex.sharov <AskAlexSharov@gmail.com>
2023-09-06 08:31:02 +00:00
|
|
|
func (s *Ethereum) AddPeer(ctx context.Context, req *remote.AddPeerRequest) (*remote.AddPeerReply, error) {
|
|
|
|
for _, sentryClient := range s.sentriesClient.Sentries() {
|
|
|
|
_, err := sentryClient.AddPeer(ctx, &proto_sentry.AddPeerRequest{Url: req.Url})
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("ethereum backend MultiClient.AddPeers error: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &remote.AddPeerReply{Success: true}, nil
|
|
|
|
}
|
|
|
|
|
2020-08-03 17:40:46 +00:00
|
|
|
// Protocols returns all the currently configured
|
2015-11-17 16:33:25 +00:00
|
|
|
// network protocols to start.
|
2016-06-30 10:03:26 +00:00
|
|
|
func (s *Ethereum) Protocols() []p2p.Protocol {
|
2022-08-26 03:04:36 +00:00
|
|
|
protocols := make([]p2p.Protocol, 0, len(s.sentryServers))
|
2021-05-30 02:53:30 +00:00
|
|
|
for i := range s.sentryServers {
|
2023-02-01 22:21:31 +00:00
|
|
|
protocols = append(protocols, s.sentryServers[i].Protocols...)
|
2021-05-30 02:53:30 +00:00
|
|
|
}
|
|
|
|
return protocols
|
2015-11-17 16:33:25 +00:00
|
|
|
}
|
2015-04-22 10:46:41 +00:00
|
|
|
|
2020-08-03 17:40:46 +00:00
|
|
|
// Start implements node.Lifecycle, starting all internal goroutines needed by the
|
2016-06-30 10:03:26 +00:00
|
|
|
// Ethereum protocol implementation.
|
2020-08-03 17:40:46 +00:00
|
|
|
func (s *Ethereum) Start() error {
|
2022-05-26 03:45:35 +00:00
|
|
|
s.sentriesClient.StartStreamLoops(s.sentryCtx)
|
2022-02-10 07:40:29 +00:00
|
|
|
time.Sleep(10 * time.Millisecond) // just to reduce logs order confusion
|
2021-06-01 10:41:10 +00:00
|
|
|
|
2023-09-29 02:03:19 +00:00
|
|
|
hook := stages2.NewHook(s.sentryCtx, s.chainDB, s.notifications, s.stagedSync, s.blockReader, s.chainConfig, s.logger, s.sentriesClient.UpdateHead)
|
2023-08-22 11:40:36 +00:00
|
|
|
|
|
|
|
currentTDProvider := func() *big.Int {
|
|
|
|
currentTD, err := readCurrentTotalDifficulty(s.sentryCtx, s.chainDB, s.blockReader)
|
2023-08-18 20:16:30 +00:00
|
|
|
if err != nil {
|
2023-08-22 11:40:36 +00:00
|
|
|
panic(err)
|
2023-08-18 16:10:35 +00:00
|
|
|
}
|
2023-08-22 11:40:36 +00:00
|
|
|
return currentTD
|
2023-08-05 21:33:10 +00:00
|
|
|
}
|
2023-09-18 17:05:33 +00:00
|
|
|
|
2023-08-22 11:40:36 +00:00
|
|
|
if params.IsChainPoS(s.chainConfig, currentTDProvider) {
|
2023-09-06 03:23:59 +00:00
|
|
|
s.waitForStageLoopStop = nil // TODO: Ethereum.Stop should wait for execution_server shutdown
|
2023-08-04 12:42:35 +00:00
|
|
|
go s.eth1ExecutionServer.Start(s.sentryCtx)
|
2023-07-30 21:35:55 +00:00
|
|
|
} else {
|
2023-08-30 01:49:16 +00:00
|
|
|
go stages2.StageLoop(s.sentryCtx, s.chainDB, s.stagedSync, s.sentriesClient.Hd, s.waitForStageLoopStop, s.config.Sync.LoopThrottle, s.logger, s.blockReader, hook, s.config.ForcePartialCommit)
|
2023-07-30 21:35:55 +00:00
|
|
|
}
|
2021-07-29 15:37:48 +00:00
|
|
|
|
2023-09-18 17:05:33 +00:00
|
|
|
if s.chainConfig.Bor != nil {
|
2023-10-02 17:55:31 +00:00
|
|
|
s.engine.(*bor.Bor).Start(s.chainDB)
|
2023-09-18 17:05:33 +00:00
|
|
|
}
|
|
|
|
|
2023-11-02 01:35:13 +00:00
|
|
|
if s.silkwormRPCDaemonService != nil {
|
|
|
|
if err := s.silkwormRPCDaemonService.Start(); err != nil {
|
|
|
|
s.logger.Error("silkworm.StartRpcDaemon error", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.silkwormSentryService != nil {
|
|
|
|
if err := s.silkwormSentryService.Start(); err != nil {
|
|
|
|
s.logger.Error("silkworm.SentryStart error", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-09 12:53:35 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-11-17 16:33:25 +00:00
|
|
|
// Stop implements node.Service, terminating all internal goroutines used by the
|
|
|
|
// Ethereum protocol.
|
2016-06-30 10:03:26 +00:00
|
|
|
func (s *Ethereum) Stop() error {
|
geth 1.9.13 (#469)
* core: initial version of state snapshots
* core/state: lazy sorting, snapshot invalidation
* core/state/snapshot: extract and split cap method, cover corners
* snapshot: iteration and buffering optimizations
* core/state/snapshot: unlink snapshots from blocks, quad->linear cleanup
* 123
* core/rawdb, core/state/snapshot: runtime snapshot generation
* core/state/snapshot: fix difflayer origin-initalization after flatten
* add "to merge"
* core/state/snapshot: implement snapshot layer iteration
* core/state/snapshot: node behavioural difference on bloom content
* core: journal the snapshot inside leveldb, not a flat file
* core/state/snapshot: bloom, metrics and prefetcher fixes
* core/state/snapshot: move iterator out into its own files
* core/state/snapshot: implement iterator priority for fast direct data lookup
* core/state/snapshot: full featured account iteration
* core/state/snapshot: faster account iteration, CLI integration
* core: fix broken tests due to API changes + linter
* core/state: fix an account resurrection issue
* core/tests: test for destroy+recreate contract with storage
* squashme
* core/state/snapshot, tests: sync snap gen + snaps in consensus tests
* core/state: extend snapshotter to handle account resurrections
* core/state: fix account root hash update point
* core/state: fix resurrection state clearing and access
* core/state/snapshot: handle deleted accounts in fast iterator
* core: more blockchain tests
* core/state/snapshot: fix various iteration issues due to destruct set
* core: fix two snapshot iterator flaws, decollide snap storage prefix
* core/state/snapshot/iterator: fix two disk iterator flaws
* core/rawdb: change SnapshotStoragePrefix to avoid prefix collision with preimagePrefix
* params: begin v1.9.13 release cycle
* cmd/checkpoint-admin: add some documentation (#20697)
* go.mod: update duktape to fix sprintf warnings (#20777)
This revision of go-duktype fixes the following warning
```
duk_logging.c: In function ‘duk__logger_prototype_log_shared’:
duk_logging.c:184:64: warning: ‘Z’ directive writing 1 byte into a region of size between 0 and 9 [-Wformat-overflow=]
184 | sprintf((char *) date_buf, "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ",
| ^
In file included from /usr/include/stdio.h:867,
from duk_logging.c:5:
/usr/include/x86_64-linux-gnu/bits/stdio2.h:36:10: note: ‘__builtin___sprintf_chk’ output between 25 and 85 bytes into a destination of size 32
36 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
37 | __bos (__s), __fmt, __va_arg_pack ());
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
```
* core/rawdb: fix freezer table test error check
Fixes: Condition is always 'false' because 'err' is always 'nil'
* core/rawdb: improve table database (#20703)
This PR fixes issues in TableDatabase.
TableDatabase is a wrapper of underlying ethdb.Database with an additional prefix.
The prefix is applied to all entries it maintains. However when we try to retrieve entries
from it we don't handle the key properly. In theory the prefix should be truncated and
only user key is returned. But we don't do it in some cases, e.g. the iterator and batch
replayer created from it. So this PR is the fix to these issues.
* eth: when triggering a sync, check the head header TD, not block
* internal/web3ext: fix clique console apis to work on missing arguments
* rpc: dont log an error if user configures --rpcapi=rpc... (#20776)
This just prevents a false negative ERROR warning when, for some unknown
reason, a user attempts to turn on the module rpc even though it's already going
to be on.
* node, cmd/clef: report actual port used for http rpc (#20789)
* internal/ethapi: don't set sender-balance to maxuint, fixes #16999 (#20783)
Prior to this change, eth_call changed the balance of the sender account in the
EVM environment to 2^256 wei to cover the gas cost of the call execution.
We've had this behavior for a long time even though it's super confusing.
This commit sets the default call gasprice to zero instead of updating the balance,
which is better because it makes eth_call semantics less surprising. Removing
the built-in balance assignment also makes balance overrides work as expected.
* metrics: disable CPU stats (gosigar) on iOS
* cmd/devp2p: tweak DNS TTLs (#20801)
* cmd/devp2p: tweak DNS TTLs
* cmd/devp2p: bump treeNodeTTL to four weeks
* cmd/devp2p: lower route53 change limit again (#20819)
* cmd/devp2p: be very correct about route53 change splitting (#20820)
Turns out the way RDATA limits work is documented after all,
I just didn't search right. The trick to make it work is to
count UPSERTs twice.
This also adds an additional check to ensure TTL changes are
applied on existing records.
* graphql, node, rpc: fix typos in comments (#20824)
* eth: improve shutdown synchronization (#20695)
* eth: improve shutdown synchronization
Most goroutines started by eth.Ethereum didn't have any shutdown sync at
all, which lead to weird error messages when quitting the client.
This change improves the clean shutdown path by stopping all internal
components in dependency order and waiting for them to actually be
stopped before shutdown is considered done. In particular, we now stop
everything related to peers before stopping 'resident' parts such as
core.BlockChain.
* eth: rewrite sync controller
* eth: remove sync start debug message
* eth: notify chainSyncer about new peers after handshake
* eth: move downloader.Cancel call into chainSyncer
* eth: make post-sync block broadcast synchronous
* eth: add comments
* core: change blockchain stop message
* eth: change closeBloomHandler channel type
* eth/filters: fix typo on unindexedLogs function's comment (#20827)
* core: bump txpool tx max size to 128KB
* snapshotter/tests: verify snapdb post-state against trie (#20812)
* core/state/snapshot: basic trie-to-hash implementation
* tests: validate snapshot after test
* core/state/snapshot: fix review concerns
* cmd, consensus: add option to disable mmap for DAG caches/datasets (#20484)
* cmd, consensus: add option to disable mmap for DAG caches/datasets
* consensus: add benchmarks for mmap with/with lock
* cmd/clef: add newaccount command (#20782)
* cmd/clef: add newaccount command
* cmd/clef: document clef_New, update API versioning
* Update cmd/clef/intapi_changelog.md
Co-Authored-By: ligi <ligi@ligi.de>
* Update signer/core/uiapi.go
Co-Authored-By: ligi <ligi@ligi.de>
Co-authored-by: ligi <ligi@ligi.de>
* eth: add debug_accountRange API (#19645)
This new API allows reading accounts and their content by address range.
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Felix Lange <fjl@twurst.com>
* travis: allow cocoapods deploy to fail (#20833)
* metrics: improve TestTimerFunc (#20818)
The test failed due to what appears to be fluctuations in time.Sleep, which is
not the actual method under test. This change modifies it so we compare the
metered Max to the actual time instead of the desired time.
* README: update private network genesis spec with istanbul (#20841)
* add istanbul and muirGlacier to genesis states in README
* remove muirGlacier, relocate istanbul
* cmd/evm: Rework execution stats (#20792)
- Dump stats also for --bench flag.
- From memory stats only show number and size of allocations. This is what `test -bench` shows. I doubt others like number of GC runs are any useful, but can be added if requested.
- Now the mem stats are for single execution in case of --bench.
* cmd/devp2p, cmd/wnode, whisper: add missing calls to Timer.Stop (#20843)
* p2p/server: add UDP port mapping goroutine to wait group (#20846)
* accounts/abi faster unpacking of int256 (#20850)
* p2p/discv5: add missing Timer.Stop calls (#20853)
* miner/worker: add missing timer.Stop call (#20857)
* cmd/geth: fix bad genesis test (#20860)
* eth/filters: add missing Ticker.Stop call (#20862)
* eth/fetcher: add missing timer.Stop calls (#20861)
* event: add missing timer.Stop call in TestFeed (#20868)
* metrics: add missing calls to Ticker.Stop in tests (#20866)
* ethstats: add missing Ticker.Stop call (#20867)
* p2p/discv5, p2p/testing: add missing Timer.Stop calls in tests (#20869)
* core: add missing Timer.Stop call in TestLogReorgs (#20870)
* rpc: add missing timer.Stop calls in websocket tests (#20863)
* crypto/ecies: improve concatKDF (#20836)
This removes a bunch of weird code around the counter overflow check in
concatKDF and makes it actually work for different hash output sizes.
The overflow check worked as follows: concatKDF applies the hash function N
times, where N is roundup(kdLen, hashsize) / hashsize. N should not
overflow 32 bits because that would lead to a repetition in the KDF output.
A couple issues with the overflow check:
- It used the hash.BlockSize, which is wrong because the
block size is about the input of the hash function. Luckily, all standard
hash functions have a block size that's greater than the output size, so
concatKDF didn't crash, it just generated too much key material.
- The check used big.Int to compare against 2^32-1.
- The calculation could still overflow before reaching the check.
The new code in concatKDF doesn't check for overflow. Instead, there is a
new check on ECIESParams which ensures that params.KeyLen is < 512. This
removes any possibility of overflow.
There are a couple of miscellaneous improvements bundled in with this
change:
- The key buffer is pre-allocated instead of appending the hash output
to an initially empty slice.
- The code that uses concatKDF to derive keys is now shared between Encrypt
and Decrypt.
- There was a redundant invocation of IsOnCurve in Decrypt. This is now removed
because elliptic.Unmarshal already checks whether the input is a valid curve
point since Go 1.5.
Co-authored-by: Felix Lange <fjl@twurst.com>
* rpc: metrics for JSON-RPC method calls (#20847)
This adds a couple of metrics for tracking the timing
and frequency of method calls:
- rpc/requests gauge counts all requests
- rpc/success gauge counts requests which return err == nil
- rpc/failure gauge counts requests which return err != nil
- rpc/duration/all timer tracks timing of all requests
- rpc/duration/<method>/<success/failure> tracks per-method timing
* mobile: use bind.NewKeyedTransactor instead of duplicating (#20888)
It's better to reuse the existing code to create a keyed transactor
than to rewrite the logic again.
* internal/ethapi: add CallArgs.ToMessage method (#20854)
ToMessage is used to convert between ethapi.CallArgs and types.Message.
It reduces the length of the DoCall method by about half by abstracting out
the conversion between the CallArgs and the Message. This should improve the
code's maintainability and reusability.
* eth, les: fix flaky tests (#20897)
* les: fix flaky test
* eth: fix flaky test
* cmd/geth: enable metrics for geth import command (#20738)
* cmd/geth: enable metrics for geth import command
* cmd/geth: enable metrics-flags for import command
* core/vm: use a callcontext struct (#20761)
* core/vm: use a callcontext struct
* core/vm: fix tests
* core/vm/runtime: benchmark
* core/vm: make intpool push inlineable, unexpose callcontext
* docs/audits: add discv5 protocol audits from LA and C53 (#20898)
* .github: change gitter reference to discord link in issue template (#20896)
* couple of fixes to docs in clef (#20900)
* p2p/discover: add initial discovery v5 implementation (#20750)This adds an implementation of the current discovery v5 spec.There is full integration with cmd/devp2p and enode.Iterator in thisversion. In theory we could enable the new protocol as a replacement ofdiscovery v4 at any time. In practice, there will likely be a few morechanges to the spec and implementation before this can happen.
* build: upgrade to golangci-lint 1.24.0 (#20901)
* accounts/scwallet: remove unnecessary uses of fmt.Sprintf
* cmd/puppeth: remove unnecessary uses of fmt.Sprintf
* p2p/discv5: remove unnecessary use of fmt.Sprintf
* whisper/mailserver: remove unnecessary uses of fmt.Sprintf
* core: goimports -w tx_pool_test.go
* eth/downloader: goimports -w downloader_test.go
* build: upgrade to golangci-lint 1.24.0
* accounts/abi/bind: Refactored topics (#20851)
* accounts/abi/bind: refactored topics
* accounts/abi/bind: use store function to remove code duplication
* accounts/abi/bind: removed unused type defs
* accounts/abi/bind: error on tuples in topics
* Cosmetic changes to restart travis build
Co-authored-by: Guillaume Ballet <gballet@gmail.com>
* node: allow websocket and HTTP on the same port (#20810)
This change makes it possible to run geth with JSON-RPC over HTTP and
WebSocket on the same TCP port. The default port for WebSocket
is still 8546.
geth --rpc --rpcport 8545 --ws --wsport 8545
This also removes a lot of deprecated API surface from package rpc.
The rpc package is now purely about serving JSON-RPC and no longer
provides a way to start an HTTP server.
* crypto: improve error messages in LoadECDSA (#20718)
This improves error messages when the file is too short or too long.
Also rewrite the test for SaveECDSA because LoadECDSA has its own
test now.
Co-authored-by: Felix Lange <fjl@twurst.com>
* changed date of rpcstack.go since new file (#20904)
* accounts/abi/bind: fixed erroneous filtering of negative ints (#20865)
* accounts/abi/bind: fixed erroneous packing of negative ints
* accounts/abi/bind: added test cases for negative ints in topics
* accounts/abi/bind: fixed genIntType for go 1.12
* accounts/abi: minor nitpick
* cmd: deprecate --testnet, use named networks instead (#20852)
* cmd/utils: make goerli the default testnet
* cmd/geth: explicitly rename testnet to ropsten
* core: explicitly rename testnet to ropsten
* params: explicitly rename testnet to ropsten
* cmd: explicitly rename testnet to ropsten
* miner: explicitly rename testnet to ropsten
* mobile: allow for returning the goerli spec
* tests: explicitly rename testnet to ropsten
* docs: update readme to reflect changes to the default testnet
* mobile: allow for configuring goerli and rinkeby nodes
* cmd/geth: revert --testnet back to ropsten and mark as legacy
* cmd/util: mark --testnet flag as deprecated
* docs: update readme to properly reflect the 3 testnets
* cmd/utils: add an explicit deprecation warning on startup
* cmd/utils: swap goerli and ropsten in usage
* cmd/geth: swap goerli and ropsten in usage
* cmd/geth: if running a known preset, log it for convenience
* docs: improve readme on usage of ropsten's testnet datadir
* cmd/utils: check if legacy `testnet` datadir exists for ropsten
* cmd/geth: check for legacy testnet path in console command
* cmd/geth: use switch statement for complex conditions in main
* cmd/geth: move known preset log statement to the very top
* cmd/utils: create new ropsten configurations in the ropsten datadir
* cmd/utils: makedatadir should check for existing testnet dir
* cmd/geth: add legacy testnet flag to the copy db command
* cmd/geth: add legacy testnet flag to the inspect command
* les, les/lespay/client: add service value statistics and API (#20837)
This PR adds service value measurement statistics to the light client. It
also adds a private API that makes these statistics accessible. A follow-up
PR will add the new server pool which uses these statistics to select
servers with good performance.
This document describes the function of the new components:
https://gist.github.com/zsfelfoldi/3c7ace895234b7b345ab4f71dab102d4
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
* README: update min go version to 1.13 (#20911)
* travis, appveyor, build, Dockerfile: bump Go to 1.14.2 (#20913)
* travis, appveyor, build, Dockerfile: bump Go to 1.14.2
* travis, appveyor: force GO111MODULE=on for every build
* core/rawdb: fix data race between Retrieve and Close (#20919)
* core/rawdb: fixed data race between retrieve and close
closes https://github.com/ethereum/go-ethereum/issues/20420
* core/rawdb: use non-atomic load while holding mutex
* all: simplify and fix database iteration with prefix/start (#20808)
* core/state/snapshot: start fixing disk iterator seek
* ethdb, rawdb, leveldb, memorydb: implement iterators with prefix and start
* les, core/state/snapshot: iterator fixes
* all: remove two iterator methods
* all: rename Iteratee.NewIteratorWith -> NewIterator
* ethdb: fix review concerns
* params: update CHTs for the 1.9.13 release
* params: release Geth v1.9.13
* added some missing files
* post-rebase fixups
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: gary rong <garyrong0905@gmail.com>
Co-authored-by: Alex Willmer <alex@moreati.org.uk>
Co-authored-by: meowsbits <45600330+meowsbits@users.noreply.github.com>
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: rene <41963722+renaynay@users.noreply.github.com>
Co-authored-by: Ha ĐANG <dvietha@gmail.com>
Co-authored-by: Hanjiang Yu <42531996+de1acr0ix@users.noreply.github.com>
Co-authored-by: ligi <ligi@ligi.de>
Co-authored-by: Wenbiao Zheng <delweng@gmail.com>
Co-authored-by: Adam Schmideg <adamschmideg@users.noreply.github.com>
Co-authored-by: Jeff Wentworth <jeff@curvegrid.com>
Co-authored-by: Paweł Bylica <chfast@gmail.com>
Co-authored-by: ucwong <ucwong@126.com>
Co-authored-by: Marius van der Wijden <m.vanderwijden@live.de>
Co-authored-by: Luke Champine <luke.champine@gmail.com>
Co-authored-by: Boqin Qin <Bobbqqin@gmail.com>
Co-authored-by: William Morriss <wjmelements@gmail.com>
Co-authored-by: Guillaume Ballet <gballet@gmail.com>
Co-authored-by: Raw Pong Ghmoa <58883403+q9f@users.noreply.github.com>
Co-authored-by: Felföldi Zsolt <zsfelfoldi@gmail.com>
2020-04-19 17:31:47 +00:00
|
|
|
// Stop all the peer-related stuff first.
|
2021-12-14 10:13:17 +00:00
|
|
|
s.sentryCancel()
|
2022-09-18 10:41:01 +00:00
|
|
|
if s.unsubscribeEthstat != nil {
|
|
|
|
s.unsubscribeEthstat()
|
|
|
|
}
|
2022-05-10 02:29:44 +00:00
|
|
|
if s.downloader != nil {
|
|
|
|
s.downloader.Close()
|
2022-02-09 06:22:43 +00:00
|
|
|
}
|
2020-10-10 06:06:54 +00:00
|
|
|
if s.privateAPI != nil {
|
2020-12-16 13:14:31 +00:00
|
|
|
shutdownDone := make(chan bool)
|
|
|
|
go func() {
|
|
|
|
defer close(shutdownDone)
|
|
|
|
s.privateAPI.GracefulStop()
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-time.After(1 * time.Second): // shutdown deadline
|
|
|
|
s.privateAPI.Stop()
|
|
|
|
case <-shutdownDone:
|
|
|
|
}
|
2020-10-10 06:06:54 +00:00
|
|
|
}
|
2022-06-29 15:39:12 +00:00
|
|
|
libcommon.SafeClose(s.sentriesClient.Hd.QuitPoWMining)
|
2022-07-28 09:57:38 +00:00
|
|
|
_ = s.engine.Close()
|
2023-09-06 03:23:59 +00:00
|
|
|
if s.waitForStageLoopStop != nil {
|
|
|
|
<-s.waitForStageLoopStop
|
|
|
|
}
|
2021-05-20 05:33:55 +00:00
|
|
|
if s.config.Miner.Enabled {
|
|
|
|
<-s.waitForMiningStop
|
|
|
|
}
|
2021-08-09 08:52:42 +00:00
|
|
|
for _, sentryServer := range s.sentryServers {
|
|
|
|
sentryServer.Close()
|
|
|
|
}
|
2023-06-15 06:11:51 +00:00
|
|
|
if s.txPoolDB != nil {
|
|
|
|
s.txPoolDB.Close()
|
2021-09-08 05:31:51 +00:00
|
|
|
}
|
2022-09-18 10:41:01 +00:00
|
|
|
if s.agg != nil {
|
|
|
|
s.agg.Close()
|
|
|
|
}
|
2022-12-29 08:04:07 +00:00
|
|
|
s.chainDB.Close()
|
2023-10-05 02:27:37 +00:00
|
|
|
|
2023-11-02 01:35:13 +00:00
|
|
|
if s.silkwormRPCDaemonService != nil {
|
|
|
|
if err := s.silkwormRPCDaemonService.Stop(); err != nil {
|
|
|
|
s.logger.Error("silkworm.StopRpcDaemon error", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.silkwormSentryService != nil {
|
|
|
|
if err := s.silkwormSentryService.Stop(); err != nil {
|
|
|
|
s.logger.Error("silkworm.SentryStop error", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.silkworm != nil {
|
2023-11-30 11:45:02 +00:00
|
|
|
if err := s.silkworm.Close(); err != nil {
|
|
|
|
s.logger.Error("silkworm.Close error", "err", err)
|
|
|
|
}
|
2023-10-05 02:27:37 +00:00
|
|
|
}
|
|
|
|
|
2015-11-17 16:33:25 +00:00
|
|
|
return nil
|
2014-12-14 18:03:24 +00:00
|
|
|
}
|
2022-01-20 07:34:50 +00:00
|
|
|
|
|
|
|
func (s *Ethereum) ChainDB() kv.RwDB {
|
|
|
|
return s.chainDB
|
|
|
|
}
|
|
|
|
|
2023-01-13 18:12:18 +00:00
|
|
|
func (s *Ethereum) ChainConfig() *chain.Config {
|
2022-10-05 04:42:38 +00:00
|
|
|
return s.chainConfig
|
|
|
|
}
|
|
|
|
|
2022-01-20 07:34:50 +00:00
|
|
|
func (s *Ethereum) StagedSync() *stagedsync.Sync {
|
|
|
|
return s.stagedSync
|
|
|
|
}
|
|
|
|
|
2022-10-05 04:42:38 +00:00
|
|
|
func (s *Ethereum) Notifications() *shards.Notifications {
|
2022-01-20 07:34:50 +00:00
|
|
|
return s.notifications
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Ethereum) SentryCtx() context.Context {
|
|
|
|
return s.sentryCtx
|
|
|
|
}
|
|
|
|
|
2023-11-02 01:35:13 +00:00
|
|
|
func (s *Ethereum) SentryControlServer() *sentry_multi_client.MultiClient {
|
2022-05-10 05:17:44 +00:00
|
|
|
return s.sentriesClient
|
2022-01-20 07:34:50 +00:00
|
|
|
}
|
2023-05-23 09:30:47 +00:00
|
|
|
func (s *Ethereum) BlockIO() (services.FullBlockReader, *blockio.BlockWriter) {
|
|
|
|
return s.blockReader, s.blockWriter
|
|
|
|
}
|
2022-07-25 04:31:57 +00:00
|
|
|
|
2023-08-30 09:25:02 +00:00
|
|
|
func (s *Ethereum) TxpoolServer() txpool_proto.TxpoolServer {
|
|
|
|
return s.txPoolGrpcServer
|
|
|
|
}
|
|
|
|
|
2022-07-25 04:31:57 +00:00
|
|
|
// RemoveContents is like os.RemoveAll, but preserve dir itself
|
|
|
|
func RemoveContents(dir string) error {
|
|
|
|
d, err := os.Open(dir)
|
|
|
|
if err != nil {
|
2022-07-25 13:58:40 +00:00
|
|
|
if errors.Is(err, fs.ErrNotExist) {
|
2023-08-21 23:24:26 +00:00
|
|
|
// ignore due to windows
|
|
|
|
_ = os.MkdirAll(dir, 0o755)
|
2022-07-25 13:58:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
2022-07-25 04:31:57 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer d.Close()
|
|
|
|
names, err := d.Readdirnames(-1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, name := range names {
|
|
|
|
err = os.RemoveAll(filepath.Join(dir, name))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2022-12-12 13:25:47 +00:00
|
|
|
|
|
|
|
func checkPortIsFree(addr string) (free bool) {
|
|
|
|
c, err := net.DialTimeout("tcp", addr, 200*time.Millisecond)
|
|
|
|
if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
c.Close()
|
|
|
|
return false
|
|
|
|
}
|
2023-08-05 21:33:10 +00:00
|
|
|
|
2023-08-22 11:40:36 +00:00
|
|
|
func readCurrentTotalDifficulty(ctx context.Context, db kv.RwDB, blockReader services.FullBlockReader) (*big.Int, error) {
|
|
|
|
var currentTD *big.Int
|
|
|
|
err := db.View(ctx, func(tx kv.Tx) error {
|
|
|
|
h, err := blockReader.CurrentBlock(tx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if h == nil {
|
|
|
|
currentTD = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
currentTD, err = rawdb.ReadTd(tx, h.Hash(), h.NumberU64())
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return currentTD, err
|
2023-08-05 21:33:10 +00:00
|
|
|
}
|
2023-10-23 14:33:08 +00:00
|
|
|
|
|
|
|
func (s *Ethereum) Sentinel() rpcsentinel.SentinelClient {
|
|
|
|
return s.sentinel
|
|
|
|
}
|