erigon-pulse/eth/backend.go

1560 lines
53 KiB
Go
Raw Normal View History

2015-07-07 00:54:22 +00:00
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
2015-07-07 00:54:22 +00:00
//
// The go-ethereum library is free software: you can redistribute it and/or modify
2015-07-07 00:54:22 +00:00
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
2015-07-07 00:54:22 +00:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 00:54:22 +00:00
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-07-07 00:54:22 +00:00
2015-07-07 03:08:16 +00:00
// Package eth implements the Ethereum protocol.
package eth
import (
"context"
"errors"
2015-01-04 13:20:16 +00:00
"fmt"
"io/fs"
"math/big"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
2023-11-04 17:44:34 +00:00
lru "github.com/hashicorp/golang-lru/arc/v2"
2021-03-12 17:26:06 +00:00
"github.com/holiman/uint256"
"github.com/ledgerwatch/log/v3"
"golang.org/x/exp/slices"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/ledgerwatch/erigon-lib/chain"
"github.com/ledgerwatch/erigon-lib/chain/networkname"
"github.com/ledgerwatch/erigon-lib/chain/snapcfg"
libcommon "github.com/ledgerwatch/erigon-lib/common"
2022-11-20 03:41:30 +00:00
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/diagnostics"
"github.com/ledgerwatch/erigon-lib/direct"
"github.com/ledgerwatch/erigon-lib/downloader"
2022-11-20 03:41:30 +00:00
"github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
"github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc"
proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
"github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil"
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
rpcsentinel "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool"
prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
2021-07-29 11:53:13 +00:00
"github.com/ledgerwatch/erigon-lib/kv"
2021-09-13 07:58:25 +00:00
"github.com/ledgerwatch/erigon-lib/kv/kvcache"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/erigon-lib/kv/remotedbserver"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon-lib/txpool"
2021-09-08 05:31:51 +00:00
"github.com/ledgerwatch/erigon-lib/txpool/txpooluitl"
types2 "github.com/ledgerwatch/erigon-lib/types"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/erigon/cl/persistence"
"github.com/ledgerwatch/erigon/cl/persistence/db_config"
"github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format/getters"
clcore "github.com/ledgerwatch/erigon/cl/phase1/core"
"github.com/ledgerwatch/erigon/cl/phase1/execution_client"
"github.com/ledgerwatch/erigon/cl/sentinel"
"github.com/ledgerwatch/erigon/cl/sentinel/service"
"github.com/ledgerwatch/erigon/cmd/caplin/caplin1"
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli"
"github.com/ledgerwatch/erigon/common/debug"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/clique"
"github.com/ledgerwatch/erigon/consensus/ethash"
"github.com/ledgerwatch/erigon/consensus/merge"
"github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/rawdb/blockio"
"github.com/ledgerwatch/erigon/core/state/temporal"
"github.com/ledgerwatch/erigon/core/systemcontracts"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/ethconsensusconfig"
"github.com/ledgerwatch/erigon/eth/ethutils"
"github.com/ledgerwatch/erigon/eth/protocols/eth"
"github.com/ledgerwatch/erigon/eth/stagedsync"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/erigon/ethdb/privateapi"
"github.com/ledgerwatch/erigon/ethdb/prune"
"github.com/ledgerwatch/erigon/ethstats"
"github.com/ledgerwatch/erigon/node"
"github.com/ledgerwatch/erigon/p2p"
"github.com/ledgerwatch/erigon/p2p/enode"
"github.com/ledgerwatch/erigon/p2p/sentry"
"github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client"
"github.com/ledgerwatch/erigon/params"
2024-01-09 18:20:42 +00:00
"github.com/ledgerwatch/erigon/polygon/bor"
"github.com/ledgerwatch/erigon/polygon/bor/finality/flags"
"github.com/ledgerwatch/erigon/polygon/bor/valset"
2024-01-09 18:20:42 +00:00
"github.com/ledgerwatch/erigon/polygon/heimdall"
"github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/builder"
"github.com/ledgerwatch/erigon/turbo/engineapi"
"github.com/ledgerwatch/erigon/turbo/engineapi/engine_block_downloader"
"github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers"
"github.com/ledgerwatch/erigon/turbo/execution/eth1"
"github.com/ledgerwatch/erigon/turbo/execution/eth1/eth1_chain_reader.go"
"github.com/ledgerwatch/erigon/turbo/jsonrpc"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon/turbo/shards"
"github.com/ledgerwatch/erigon/turbo/silkworm"
"github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
"github.com/ledgerwatch/erigon/turbo/snapshotsync/snap"
stages2 "github.com/ledgerwatch/erigon/turbo/stages"
"github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
)
// Config contains the configuration options of the ETH protocol.
// Deprecated: use ethconfig.Config instead.
type Config = ethconfig.Config
// Ethereum implements the Ethereum full node service.
type Ethereum struct {
config *ethconfig.Config
// DB interfaces
2021-09-08 05:31:51 +00:00
chainDB kv.RwDB
privateAPI *grpc.Server
2021-04-03 06:09:31 +00:00
engine consensus.Engine
2015-05-26 12:17:43 +00:00
2021-03-12 17:26:06 +00:00
gasPrice *uint256.Int
etherbase libcommon.Address
networkID uint64
lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
chainConfig *chain.Config
apiList []rpc.API
genesisBlock *types.Block
genesisHash libcommon.Hash
eth1ExecutionServer *eth1.EthereumExecutionModule
ethBackendRPC *privateapi.EthBackendServer
engineBackendRPC *engineapi.EngineServer
miningRPC txpool_proto.MiningServer
stateChangesClient txpool.StateChangesClient
miningSealingQuit chan struct{}
pendingBlocks chan *types.Block
minedBlocks chan *types.Block
2021-07-08 13:52:22 +00:00
// downloader fields
sentryCtx context.Context
sentryCancel context.CancelFunc
2023-11-02 01:35:13 +00:00
sentriesClient *sentry_multi_client.MultiClient
sentryServers []*sentry.GrpcServer
stagedSync *stagedsync.Sync
pipelineStagedSync *stagedsync.Sync
syncStages []*stagedsync.Stage
syncUnwindOrder stagedsync.UnwindOrder
syncPruneOrder stagedsync.PruneOrder
downloaderClient proto_downloader.DownloaderClient
notifications *shards.Notifications
unsubscribeEthstat func()
waitForStageLoopStop chan struct{}
waitForMiningStop chan struct{}
2021-09-08 05:31:51 +00:00
txPoolDB kv.RwDB
txPool *txpool.TxPool
newTxs chan types2.Announcements
txPoolFetch *txpool.Fetch
txPoolSend *txpool.Send
txPoolGrpcServer txpool_proto.TxpoolServer
2021-09-08 05:31:51 +00:00
notifyMiningAboutNewTxs chan struct{}
forkValidator *engine_helpers.ForkValidator
2023-12-27 22:05:09 +00:00
downloader *downloader.Downloader
agg *libstate.AggregatorV3
blockSnapshots *freezeblocks.RoSnapshots
blockReader services.FullBlockReader
blockWriter *blockio.BlockWriter
kvRPC *remotedbserver.KvServer
logger log.Logger
sentinel rpcsentinel.SentinelClient
2023-11-02 01:35:13 +00:00
silkworm *silkworm.Silkworm
silkwormRPCDaemonService *silkworm.RpcDaemonService
silkwormSentryService *silkworm.SentryService
}
func splitAddrIntoHostAndPort(addr string) (host string, port int, err error) {
idx := strings.LastIndexByte(addr, ':')
if idx < 0 {
return "", 0, errors.New("invalid address format")
}
host = addr[:idx]
port, err = strconv.Atoi(addr[idx+1:])
return
}
const blockBufferSize = 128
// New creates a new Ethereum object (including the
// initialisation of the common Ethereum object)
func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) {
config.Snapshot.Enabled = config.Sync.UseSnapshots
if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(libcommon.Big0) <= 0 {
logger.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice)
config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice)
}
dirs := stack.Config().Dirs
tmpdir := dirs.Tmp
if err := RemoveContents(tmpdir); err != nil { // clean it on startup
2021-04-26 09:53:38 +00:00
return nil, fmt.Errorf("clean tmp dir: %s, %w", tmpdir, err)
}
// Assemble the Ethereum object
chainKv, err := node.OpenDatabase(ctx, stack.Config(), kv.ChainDB, "", false, logger)
if err != nil {
return nil, err
}
latestBlockBuiltStore := builder.NewLatestBlockBuiltStore()
if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error {
if err = stagedsync.UpdateMetrics(tx); err != nil {
return err
}
config.Prune, err = prune.EnsureNotChanged(tx, config.Prune)
if err != nil {
return err
}
2022-12-19 08:38:54 +00:00
config.HistoryV3, err = kvcfg.HistoryV3.WriteOnce(tx, config.HistoryV3)
2022-08-12 14:45:09 +00:00
if err != nil {
return err
}
isCorrectSync, useSnapshots, err := snap.EnsureNotChanged(tx, config.Snapshot)
if err != nil {
return err
}
// if we are in the incorrect syncmode then we change it to the appropriate one
if !isCorrectSync {
config.Sync.UseSnapshots = useSnapshots
config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(config.Genesis.Config.ChainName) && useSnapshots
}
return nil
}); err != nil {
return nil, err
}
if !config.Sync.UseSnapshots {
if err := downloader.CreateProhibitNewDownloadsFile(dirs.Snap); err != nil {
return nil, err
}
}
2021-08-17 08:52:55 +00:00
ctx, ctxCancel := context.WithCancel(context.Background())
// kv_remote architecture does blocks on stream.Send - means current architecture require unlimited amount of txs to provide good throughput
backend := &Ethereum{
sentryCtx: ctx,
sentryCancel: ctxCancel,
config: config,
2021-09-08 05:31:51 +00:00
chainDB: chainKv,
networkID: config.NetworkID,
etherbase: config.Miner.Etherbase,
waitForStageLoopStop: make(chan struct{}),
waitForMiningStop: make(chan struct{}),
notifications: &shards.Notifications{
Events: shards.NewEvents(),
Accumulator: shards.NewAccumulator(),
},
logger: logger,
}
var chainConfig *chain.Config
var genesis *types.Block
2023-10-18 07:24:09 +00:00
if err := backend.chainDB.Update(context.Background(), func(tx kv.RwTx) error {
h, err := rawdb.ReadCanonicalHash(tx, 0)
if err != nil {
panic(err)
}
genesisSpec := config.Genesis
if h != (libcommon.Hash{}) { // fallback to db content
genesisSpec = nil
}
var genesisErr error
chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverrideCancunTime, tmpdir, logger)
if _, ok := genesisErr.(*chain.ConfigCompatError); genesisErr != nil && !ok {
return genesisErr
}
return nil
}); err != nil {
panic(err)
}
backend.chainConfig = chainConfig
backend.genesisBlock = genesis
backend.genesisHash = genesis.Hash()
logger.Info("Initialised chain configuration", "config", chainConfig, "genesis", genesis.Hash())
2023-12-27 22:05:09 +00:00
snapshotVersion := snapcfg.KnownCfg(chainConfig.ChainName, 0).Version
2023-10-25 09:16:43 +00:00
// Check if we have an already initialized chain and fall back to
// that if so. Otherwise we need to generate a new genesis spec.
blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, snapshotVersion, config.Snapshot, config.HistoryV3, chainConfig.Bor != nil, logger)
2023-10-25 09:16:43 +00:00
if err != nil {
return nil, err
}
backend.agg, backend.blockSnapshots, backend.blockReader, backend.blockWriter = agg, allSnapshots, blockReader, blockWriter
if config.HistoryV3 {
backend.chainDB, err = temporal.New(backend.chainDB, agg, systemcontracts.SystemContractCodeLookup[config.Genesis.Config.ChainName])
if err != nil {
return nil, err
}
chainKv = backend.chainDB //nolint
}
if err := backend.setUpSnapDownloader(ctx, config.Downloader); err != nil {
return nil, err
}
kvRPC := remotedbserver.NewKvServer(ctx, backend.chainDB, allSnapshots, allBorSnapshots, agg, logger)
backend.notifications.StateChangesConsumer = kvRPC
backend.kvRPC = kvRPC
backend.gasPrice, _ = uint256.FromBig(config.Miner.GasPrice)
if config.SilkwormExecution || config.SilkwormRpcDaemon || config.SilkwormSentry {
backend.silkworm, err = silkworm.New(config.Dirs.DataDir)
2023-11-02 01:35:13 +00:00
if err != nil {
return nil, err
}
}
var sentries []direct.SentryClient
if len(stack.Config().P2P.SentryAddr) > 0 {
for _, addr := range stack.Config().P2P.SentryAddr {
2023-11-02 01:35:13 +00:00
sentryClient, err := sentry_multi_client.GrpcClient(backend.sentryCtx, addr)
if err != nil {
return nil, err
}
sentries = append(sentries, sentryClient)
}
2023-11-02 01:35:13 +00:00
} else if config.SilkwormSentry {
apiPort := 53774
apiAddr := fmt.Sprintf("127.0.0.1:%d", apiPort)
p2pConfig := stack.Config().P2P
collectNodeURLs := func(nodes []*enode.Node) []string {
var urls []string
for _, n := range nodes {
urls = append(urls, n.URLv4())
}
return urls
}
settings := silkworm.SentrySettings{
ClientId: p2pConfig.Name,
ApiPort: apiPort,
Port: p2pConfig.ListenPort(),
Nat: p2pConfig.NATSpec,
NetworkId: config.NetworkID,
NodeKey: crypto.FromECDSA(p2pConfig.PrivateKey),
StaticPeers: collectNodeURLs(p2pConfig.StaticNodes),
Bootnodes: collectNodeURLs(p2pConfig.BootstrapNodes),
NoDiscover: p2pConfig.NoDiscovery,
MaxPeers: p2pConfig.MaxPeers,
}
silkwormSentryService := silkworm.NewSentryService(backend.silkworm, settings)
2023-11-02 01:35:13 +00:00
backend.silkwormSentryService = &silkwormSentryService
sentryClient, err := sentry_multi_client.GrpcClient(backend.sentryCtx, apiAddr)
if err != nil {
return nil, err
}
sentries = append(sentries, sentryClient)
} else {
2021-05-30 02:53:30 +00:00
var readNodeInfo = func() *eth.NodeInfo {
var res *eth.NodeInfo
2021-09-08 05:31:51 +00:00
_ = backend.chainDB.View(context.Background(), func(tx kv.Tx) error {
2021-05-30 02:53:30 +00:00
res = eth.ReadNodeInfo(tx, backend.chainConfig, backend.genesisHash, backend.networkID)
return nil
})
return res
}
discovery := func() enode.Iterator {
d, err := setupDiscovery(backend.config.EthDiscoveryURLs)
if err != nil {
panic(err)
}
return d
2021-05-30 02:53:30 +00:00
}
refCfg := stack.Config().P2P
listenHost, listenPort, err := splitAddrIntoHostAndPort(refCfg.ListenAddr)
if err != nil {
return nil, err
}
var pi int // points to next port to be picked from refCfg.AllowedPorts
for _, protocol := range refCfg.ProtocolVersion {
cfg := refCfg
cfg.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, eth.ProtocolToString[protocol])
// pick port from allowed list
var picked bool
for ; pi < len(refCfg.AllowedPorts) && !picked; pi++ {
pc := int(refCfg.AllowedPorts[pi])
if pc == 0 {
// For ephemeral ports probing to see if the port is taken does not
// make sense.
picked = true
break
}
if !checkPortIsFree(fmt.Sprintf("%s:%d", listenHost, pc)) {
logger.Warn("bind protocol to port has failed: port is busy", "protocols", fmt.Sprintf("eth/%d", refCfg.ProtocolVersion), "port", pc)
continue
}
if listenPort != pc {
listenPort = pc
}
pi++
picked = true
break
}
if !picked {
return nil, fmt.Errorf("run out of allowed ports for p2p eth protocols %v. Extend allowed port list via --p2p.allowed-ports", cfg.AllowedPorts)
}
cfg.ListenAddr = fmt.Sprintf("%s:%d", listenHost, listenPort)
server := sentry.NewGrpcServer(backend.sentryCtx, discovery, readNodeInfo, &cfg, protocol, logger)
backend.sentryServers = append(backend.sentryServers, server)
sentries = append(sentries, direct.NewSentryClientDirect(protocol, server))
}
2021-11-07 19:25:37 +00:00
go func() {
logEvery := time.NewTicker(180 * time.Second)
defer logEvery.Stop()
var logItems []interface{}
for {
select {
case <-backend.sentryCtx.Done():
return
case <-logEvery.C:
logItems = logItems[:0]
peerCountMap := map[uint]int{}
for _, srv := range backend.sentryServers {
counts := srv.SimplePeerCount()
for protocol, count := range counts {
peerCountMap[protocol] += count
}
}
for protocol, count := range peerCountMap {
logItems = append(logItems, eth.ProtocolToString[protocol], strconv.Itoa(count))
}
logger.Info("[p2p] GoodPeers", logItems...)
}
}
}()
}
2022-01-04 08:46:22 +00:00
var currentBlock *types.Block
if err := chainKv.View(context.Background(), func(tx kv.Tx) error {
currentBlock, err = blockReader.CurrentBlock(tx)
return err
}); err != nil {
panic(err)
}
currentBlockNumber := uint64(0)
if currentBlock != nil {
currentBlockNumber = currentBlock.NumberU64()
}
2022-08-13 09:09:52 +00:00
logger.Info("Initialising Ethereum protocol", "network", config.NetworkID)
2022-08-13 09:09:52 +00:00
var consensusConfig interface{}
if chainConfig.Clique != nil {
consensusConfig = &config.Clique
} else if chainConfig.Aura != nil {
consensusConfig = &config.Aura
} else if chainConfig.Bor != nil {
consensusConfig = chainConfig.Bor
2022-08-13 09:09:52 +00:00
} else {
consensusConfig = &config.Ethash
}
var heimdallClient heimdall.HeimdallClient
if chainConfig.Bor != nil {
if !config.WithoutHeimdall {
heimdallClient = heimdall.NewHeimdallClient(config.HeimdallURL, logger)
}
flags.Milestone = config.WithHeimdallMilestones
}
backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger)
inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
notifications *shards.Notifications) error {
Subdue logging from stages in inMemoryExecution (#8308) Apparently `log.New()` doesn't discard messages. Before this change I saw the following in the console log: ``` [INFO] [09-27|15:25:36.225] [NewPayload] Handling new payload height=18227379 hash=0xa82630b9acf814930f18b45cad533a635076eaaf8e0ae596914d906db02ea3aa [INFO] [09-27|15:25:36.624] [5/7 Execution] Completed on block=18227379 [INFO] [09-27|15:25:37.245] [updateForkchoice] Fork choice update: flushing in-memory state (built by previous newPayload) [INFO] [09-27|15:25:37.854] RPC Daemon notified of new headers from=18227378 to=18227379 hash=0xa82630b9acf814930f18b45cad533a635076eaaf8e0ae596914d906db02ea3aa header sending=9.615µs log sending=336ns [INFO] [09-27|15:25:37.854] head updated hash=0xa82630b9acf814930f18b45cad533a635076eaaf8e0ae596914d906db02ea3aa number=18227379 ``` And after this change: ``` [INFO] [09-27|20:31:36.929] [NewPayload] Handling new payload height=18228897 hash=0x276fbffe9dc95fa613815238c870de89de340784b4d3d7a7bd5bf1cea6e54a97 [INFO] [09-27|20:31:37.713] [updateForkchoice] Fork choice update: flushing in-memory state (built by previous newPayload) [INFO] [09-27|20:31:38.172] RPC Daemon notified of new headers from=18228896 to=18228897 hash=0x276fbffe9dc95fa613815238c870de89de340784b4d3d7a7bd5bf1cea6e54a97 header sending=17.073µs log sending=272ns [INFO] [09-27|20:31:38.172] head updated hash=0x276fbffe9dc95fa613815238c870de89de340784b4d3d7a7bd5bf1cea6e54a97 number=18228897 ``` This is a follow-up to PR #7464.
2023-09-28 07:13:23 +00:00
terseLogger := log.New()
terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler))
// Needs its own notifications to not update RPC daemon and txpool about pending blocks
stateSync := stages2.NewInMemoryExecution(backend.sentryCtx, backend.chainDB, config, backend.sentriesClient,
dirs, notifications, blockReader, blockWriter, backend.agg, backend.silkworm, terseLogger)
chainReader := stagedsync.NewChainReaderImpl(chainConfig, txc.Tx, blockReader, logger)
// We start the mining step
if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, backend.blockWriter, stateSync, backend.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil {
logger.Warn("Could not validate block", "err", err)
return err
}
progress, err := stages.GetStageProgress(txc.Tx, stages.IntermediateHashes)
if err != nil {
return err
}
if progress < header.Number.Uint64() {
return fmt.Errorf("unsuccessful execution, progress %d < expected %d", progress, header.Number.Uint64())
}
return nil
}
backend.forkValidator = engine_helpers.NewForkValidator(ctx, currentBlockNumber, inMemoryExecution, tmpdir, backend.blockReader)
2022-06-07 05:20:49 +00:00
// limit "new block" broadcasts to at most 10 random peers at time
maxBlockBroadcastPeers := func(header *types.Header) uint { return 10 }
// unlimited "new block" broadcasts to all peers for blocks announced by Bor validators
if borEngine, ok := backend.engine.(*bor.Bor); ok {
defaultValue := maxBlockBroadcastPeers(nil)
maxBlockBroadcastPeers = func(header *types.Header) uint {
isValidator, err := borEngine.IsValidator(header)
if err != nil {
logger.Warn("maxBlockBroadcastPeers: borEngine.IsValidator has failed", "err", err)
return defaultValue
}
if isValidator {
// 0 means send to all
return 0
}
return defaultValue
}
}
2023-11-02 01:35:13 +00:00
backend.sentriesClient, err = sentry_multi_client.NewMultiClient(
chainKv,
stack.Config().NodeName(),
chainConfig,
genesis.Hash(),
genesis.Time(),
backend.engine,
backend.config.NetworkID,
sentries,
config.Sync,
blockReader,
blockBufferSize,
stack.Config().SentryLogPeerInfo,
backend.forkValidator,
maxBlockBroadcastPeers,
logger,
)
if err != nil {
return nil, err
}
config.TxPool.NoGossip = config.DisableTxPoolGossip
2021-09-08 05:31:51 +00:00
var miningRPC txpool_proto.MiningServer
stateDiffClient := direct.NewStateDiffClientDirect(kvRPC)
2022-05-26 05:27:44 +00:00
if config.DeprecatedTxPool.Disable {
backend.txPoolGrpcServer = &txpool.GrpcDisabled{}
2022-02-12 05:24:19 +00:00
} else {
//cacheConfig := kvcache.DefaultCoherentCacheConfig
//cacheConfig.MetricsLabel = "txpool"
2021-09-08 05:31:51 +00:00
backend.newTxs = make(chan types2.Announcements, 1024)
2021-09-08 05:31:51 +00:00
//defer close(newTxs)
backend.txPoolDB, backend.txPool, backend.txPoolFetch, backend.txPoolSend, backend.txPoolGrpcServer, err = txpooluitl.AllComponents(
ctx, config.TxPool, kvcache.NewDummy(), backend.newTxs, backend.chainDB, backend.sentriesClient.Sentries(), stateDiffClient, misc.Eip1559FeeCalculator, logger,
2021-09-13 07:58:25 +00:00
)
2021-09-08 05:31:51 +00:00
if err != nil {
return nil, err
}
}
2021-09-08 05:31:51 +00:00
backend.notifyMiningAboutNewTxs = make(chan struct{}, 1)
backend.miningSealingQuit = make(chan struct{})
backend.pendingBlocks = make(chan *types.Block, 1)
backend.minedBlocks = make(chan *types.Block, 1)
2021-09-08 05:31:51 +00:00
miner := stagedsync.NewMiningState(&config.Miner)
backend.pendingBlocks = miner.PendingResultCh
2021-06-29 10:00:22 +00:00
2023-11-04 17:44:34 +00:00
var (
snapDb kv.RwDB
recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot]
signatures *lru.ARCCache[libcommon.Hash, libcommon.Address]
)
if bor, ok := backend.engine.(*bor.Bor); ok {
snapDb = bor.DB
recents = bor.Recents
signatures = bor.Signatures
}
// proof-of-work mining
2021-09-08 05:31:51 +00:00
mining := stagedsync.New(
2023-12-27 22:05:09 +00:00
config.Sync,
stagedsync.MiningStages(backend.sentryCtx,
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPoolDB, nil, tmpdir, backend.blockReader),
2023-12-27 22:05:09 +00:00
stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures),
stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool, backend.txPoolDB, blockReader),
2023-05-24 11:34:36 +00:00
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3),
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg),
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore),
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
logger)
2021-09-08 05:31:51 +00:00
var ethashApi *ethash.API
if casted, ok := backend.engine.(*ethash.Ethash); ok {
ethashApi = casted.APIs(nil)[1].Service.(*ethash.API)
}
// proof-of-stake mining
assembleBlockPOS := func(param *core.BlockBuilderParameters, interrupt *int32) (*types.BlockWithReceipts, error) {
2022-06-09 11:16:11 +00:00
miningStatePos := stagedsync.NewProposingState(&config.Miner)
miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient
proposingSync := stagedsync.New(
2023-12-27 22:05:09 +00:00
config.Sync,
stagedsync.MiningStages(backend.sentryCtx,
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPoolDB, param, tmpdir, backend.blockReader),
2023-12-27 22:05:09 +00:00
stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures),
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool, backend.txPoolDB, blockReader),
2023-05-24 11:34:36 +00:00
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3),
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg),
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore),
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
logger)
// We start the mining step
if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir, logger); err != nil {
return nil, err
}
block := <-miningStatePos.MiningResultPOSCh
return block, nil
}
// Initialize ethbackend
ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, blockReader, logger, latestBlockBuiltStore)
// intiialize engine backend
2023-07-24 16:04:36 +00:00
blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, backend.chainDB, backend.chainConfig, backend.notifications.Events, logger)
miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi, logger)
var creds credentials.TransportCredentials
if stack.Config().PrivateApiAddr != "" {
if stack.Config().TLSConnection {
creds, err = grpcutil.TLS(stack.Config().TLSCACert, stack.Config().TLSCertFile, stack.Config().TLSKeyFile)
if err != nil {
return nil, err
}
}
backend.privateAPI, err = privateapi.StartGrpc(
kvRPC,
ethBackendRPC,
backend.txPoolGrpcServer,
miningRPC,
stack.Config().PrivateApiAddr,
stack.Config().PrivateApiRateLimit,
creds,
stack.Config().HealthCheck,
logger)
if err != nil {
return nil, fmt.Errorf("private api: %w", err)
}
}
if currentBlock == nil {
currentBlock = genesis
}
// We start the transaction pool on startup, for a couple of reasons:
// 1) Hive tests requires us to do so and starting it from eth_sendRawTransaction is not viable as we have not enough data
// to initialize it properly.
// 2) we cannot propose for block 1 regardless.
2022-05-26 05:27:44 +00:00
if !config.DeprecatedTxPool.Disable {
backend.txPoolFetch.ConnectCore()
backend.txPoolFetch.ConnectSentries()
var newTxsBroadcaster *txpool.NewSlotsStreams
if casted, ok := backend.txPoolGrpcServer.(*txpool.GrpcServer); ok {
2022-02-12 05:24:19 +00:00
newTxsBroadcaster = casted.NewSlotsStreams
}
go txpool.MainLoop(backend.sentryCtx,
backend.txPoolDB, backend.txPool, backend.newTxs, backend.txPoolSend, newTxsBroadcaster,
func() {
select {
case backend.notifyMiningAboutNewTxs <- struct{}{}:
default:
}
})
2021-09-08 05:31:51 +00:00
}
2021-09-08 05:31:51 +00:00
go func() {
defer debug.LogPanic()
for {
select {
case b := <-backend.minedBlocks:
eth, consensus/bor: fixes and improvements related to mining (#6051) In context of https://github.com/ledgerwatch/erigon/issues/5694, this PR adds some fixes and improvement in the mining flow. Also, a relevant change in txpool (present in erigon-lib) is made here: https://github.com/ledgerwatch/erigon-lib/pull/737 #### Changes in triggering mining in `startMining()` The mining module didn't honour the block time as a simple 3 second timer and a notifier from txpool was used to trigger mining. This would cause inconsistencies, at least with the bor consensus. Hence, a geth like approach is used instead for simplicity. A new head channel subscription is added in the `startMining()` loop which would notify the addition of new block. Hence, this would make sure that the block time is being honoured. Moreover, the fixed 3 second timer is replaced by the `miner.recommit` value set using flags. #### Changes in the arrangement of calls made post mining When all the mining stages are completed, erigon writes all the data in a cache. It then processes the block through all the stages as it would process a block received from P2P. In this case, some of the stages aren't really required. Like the block header and body download stage is not required as the block was mined locally. Even execution stage is not required as it already went through it in the mining stages. Now, we encountered an issue where the chain was halted and kept mining the same block again and again (liveness issue). The root cause is because of an error in a stage of it's parent block. This stage turns out to be the 4th stage which is "Block body download" stage. This stage tries to download the block body from peers using the headers. As, we mined this block locally we don't really need to download anything (or process anything again). Hence, it reaches out to the cache which we store for the block body. Interestingly that cache turned out to be empty for some blocks. This was because post mining, before adding block header and body to a cache, we call the broadcast method which starts the staged sync. So, technically it’s a bit uncertain at any stage if the block header and body has been written or not.(see [this](https://github.com/ledgerwatch/erigon/blob/devel/eth/backend.go#L553-L572)). To achieve complete certainty, we rearranged the calls with the write to cache being called first and broadcast next. This pretty much solves the issue as now we’re sure that we’d always have a block body in the cache when we reach the body download stage. #### Misc changes This PR also adds some logs in bor consensus.
2022-11-17 23:39:16 +00:00
// Add mined header and block body before broadcast. This is because the broadcast call
// will trigger the staged sync which will require headers and blocks to be available
// in their respective cache in the download stage. If not found, it would cause a
// liveness issue for the chain.
if err := backend.sentriesClient.Hd.AddMinedHeader(b.Header()); err != nil {
logger.Error("add mined block to header downloader", "err", err)
eth, consensus/bor: fixes and improvements related to mining (#6051) In context of https://github.com/ledgerwatch/erigon/issues/5694, this PR adds some fixes and improvement in the mining flow. Also, a relevant change in txpool (present in erigon-lib) is made here: https://github.com/ledgerwatch/erigon-lib/pull/737 #### Changes in triggering mining in `startMining()` The mining module didn't honour the block time as a simple 3 second timer and a notifier from txpool was used to trigger mining. This would cause inconsistencies, at least with the bor consensus. Hence, a geth like approach is used instead for simplicity. A new head channel subscription is added in the `startMining()` loop which would notify the addition of new block. Hence, this would make sure that the block time is being honoured. Moreover, the fixed 3 second timer is replaced by the `miner.recommit` value set using flags. #### Changes in the arrangement of calls made post mining When all the mining stages are completed, erigon writes all the data in a cache. It then processes the block through all the stages as it would process a block received from P2P. In this case, some of the stages aren't really required. Like the block header and body download stage is not required as the block was mined locally. Even execution stage is not required as it already went through it in the mining stages. Now, we encountered an issue where the chain was halted and kept mining the same block again and again (liveness issue). The root cause is because of an error in a stage of it's parent block. This stage turns out to be the 4th stage which is "Block body download" stage. This stage tries to download the block body from peers using the headers. As, we mined this block locally we don't really need to download anything (or process anything again). Hence, it reaches out to the cache which we store for the block body. Interestingly that cache turned out to be empty for some blocks. This was because post mining, before adding block header and body to a cache, we call the broadcast method which starts the staged sync. So, technically it’s a bit uncertain at any stage if the block header and body has been written or not.(see [this](https://github.com/ledgerwatch/erigon/blob/devel/eth/backend.go#L553-L572)). To achieve complete certainty, we rearranged the calls with the write to cache being called first and broadcast next. This pretty much solves the issue as now we’re sure that we’d always have a block body in the cache when we reach the body download stage. #### Misc changes This PR also adds some logs in bor consensus.
2022-11-17 23:39:16 +00:00
}
backend.sentriesClient.Bd.AddToPrefetch(b.Header(), b.RawBody())
eth, consensus/bor: fixes and improvements related to mining (#6051) In context of https://github.com/ledgerwatch/erigon/issues/5694, this PR adds some fixes and improvement in the mining flow. Also, a relevant change in txpool (present in erigon-lib) is made here: https://github.com/ledgerwatch/erigon-lib/pull/737 #### Changes in triggering mining in `startMining()` The mining module didn't honour the block time as a simple 3 second timer and a notifier from txpool was used to trigger mining. This would cause inconsistencies, at least with the bor consensus. Hence, a geth like approach is used instead for simplicity. A new head channel subscription is added in the `startMining()` loop which would notify the addition of new block. Hence, this would make sure that the block time is being honoured. Moreover, the fixed 3 second timer is replaced by the `miner.recommit` value set using flags. #### Changes in the arrangement of calls made post mining When all the mining stages are completed, erigon writes all the data in a cache. It then processes the block through all the stages as it would process a block received from P2P. In this case, some of the stages aren't really required. Like the block header and body download stage is not required as the block was mined locally. Even execution stage is not required as it already went through it in the mining stages. Now, we encountered an issue where the chain was halted and kept mining the same block again and again (liveness issue). The root cause is because of an error in a stage of it's parent block. This stage turns out to be the 4th stage which is "Block body download" stage. This stage tries to download the block body from peers using the headers. As, we mined this block locally we don't really need to download anything (or process anything again). Hence, it reaches out to the cache which we store for the block body. Interestingly that cache turned out to be empty for some blocks. This was because post mining, before adding block header and body to a cache, we call the broadcast method which starts the staged sync. So, technically it’s a bit uncertain at any stage if the block header and body has been written or not.(see [this](https://github.com/ledgerwatch/erigon/blob/devel/eth/backend.go#L553-L572)). To achieve complete certainty, we rearranged the calls with the write to cache being called first and broadcast next. This pretty much solves the issue as now we’re sure that we’d always have a block body in the cache when we reach the body download stage. #### Misc changes This PR also adds some logs in bor consensus.
2022-11-17 23:39:16 +00:00
2021-09-08 05:31:51 +00:00
//p2p
//backend.sentriesClient.BroadcastNewBlock(context.Background(), b, b.Difficulty())
2021-09-08 05:31:51 +00:00
//rpcdaemon
if err := miningRPC.(*privateapi.MiningServer).BroadcastMinedBlock(b); err != nil {
logger.Error("txpool rpc mined block broadcast", "err", err)
2021-09-08 05:31:51 +00:00
}
logger.Trace("BroadcastMinedBlock successful", "number", b.Number(), "GasUsed", b.GasUsed(), "txn count", b.Transactions().Len())
backend.sentriesClient.PropagateNewBlockHashes(ctx, []headerdownload.Announce{
{
Number: b.NumberU64(),
Hash: b.Hash(),
},
})
2021-09-08 05:31:51 +00:00
case b := <-backend.pendingBlocks:
if err := miningRPC.(*privateapi.MiningServer).BroadcastPendingBlock(b); err != nil {
logger.Error("txpool rpc pending block broadcast", "err", err)
2021-09-08 05:31:51 +00:00
}
case <-backend.sentriesClient.Hd.QuitPoWMining:
2021-09-08 05:31:51 +00:00
return
}
}
2021-09-08 05:31:51 +00:00
}()
if err := backend.StartMining(context.Background(), backend.chainDB, stateDiffClient, mining, miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir, logger); err != nil {
2021-09-08 05:31:51 +00:00
return nil, err
}
backend.ethBackendRPC, backend.miningRPC, backend.stateChangesClient = ethBackendRPC, miningRPC, stateDiffClient
2023-11-04 17:44:34 +00:00
backend.syncStages = stages2.NewDefaultStages(backend.sentryCtx, backend.chainDB, snapDb, stack.Config().P2P, config, backend.sentriesClient, backend.notifications, backend.downloaderClient,
blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, heimdallClient, recents, signatures, logger)
backend.syncUnwindOrder = stagedsync.DefaultUnwindOrder
backend.syncPruneOrder = stagedsync.DefaultPruneOrder
2023-12-27 22:05:09 +00:00
backend.stagedSync = stagedsync.New(config.Sync, backend.syncStages, backend.syncUnwindOrder, backend.syncPruneOrder, logger)
2023-09-29 02:03:19 +00:00
hook := stages2.NewHook(backend.sentryCtx, backend.chainDB, backend.notifications, backend.stagedSync, backend.blockReader, backend.chainConfig, backend.logger, backend.sentriesClient.UpdateHead)
checkStateRoot := true
2023-12-27 22:05:09 +00:00
pipelineStages := stages2.NewPipelineStages(ctx, chainKv, config, stack.Config().P2P, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot)
backend.pipelineStagedSync = stagedsync.New(config.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger)
backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, chainKv, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.HistoryV3)
executionRpc := direct.NewExecutionClientDirect(backend.eth1ExecutionServer)
engineBackendRPC := engineapi.NewEngineServer(
ctx,
logger,
chainConfig,
executionRpc,
backend.sentriesClient.Hd,
engine_block_downloader.NewEngineBlockDownloader(ctx, logger, backend.sentriesClient.Hd, executionRpc,
backend.sentriesClient.Bd, backend.sentriesClient.BroadcastNewBlock, backend.sentriesClient.SendBodyRequest, blockReader,
chainKv, chainConfig, tmpdir, config.Sync.BodyDownloadTimeoutSeconds),
false,
config.Miner.EnabledPOS)
backend.engineBackendRPC = engineBackendRPC
var engine execution_client.ExecutionEngine
// Gnosis has too few blocks on his network for phase2 to work. Once we have proper snapshot automation, it can go back to normal.
if config.NetworkID == uint64(clparams.GnosisNetwork) {
// Read the jwt secret
jwtSecret, err := cli.ObtainJWTSecret(&stack.Config().Http, logger)
if err != nil {
return nil, err
}
engine, err = execution_client.NewExecutionClientRPC(ctx, jwtSecret, stack.Config().Http.AuthRpcHTTPListenAddress, stack.Config().Http.AuthRpcPort)
if err != nil {
return nil, err
}
} else {
engine, err = execution_client.NewExecutionClientDirect(ctx, eth1_chain_reader.NewChainReaderEth1(ctx, chainConfig, executionRpc, 1000))
if err != nil {
return nil, err
}
}
// If we choose not to run a consensus layer, run our embedded.
if config.InternalCL && clparams.EmbeddedSupported(config.NetworkID) {
genesisCfg, networkCfg, beaconCfg := clparams.GetConfigsByNetwork(clparams.NetworkType(config.NetworkID))
if err != nil {
return nil, err
}
state, err := clcore.RetrieveBeaconState(ctx, beaconCfg, genesisCfg,
clparams.GetCheckpointSyncEndpoint(clparams.NetworkType(config.NetworkID)))
if err != nil {
return nil, err
}
forkDigest, err := fork.ComputeForkDigest(beaconCfg, genesisCfg)
if err != nil {
return nil, err
}
rawBeaconBlockChainDb, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconCfg, dirs.CaplinHistory)
historyDB, indiciesDB, err := caplin1.OpenCaplinDatabase(ctx, db_config.DefaultDatabaseConfiguration, beaconCfg, rawBeaconBlockChainDb, dirs.CaplinIndexing, engine, false)
if err != nil {
return nil, err
}
client, err := service.StartSentinelService(&sentinel.SentinelConfig{
IpAddr: config.LightClientDiscoveryAddr,
Port: int(config.LightClientDiscoveryPort),
TCPPort: uint(config.LightClientDiscoveryTCPPort),
GenesisConfig: genesisCfg,
NetworkConfig: networkCfg,
BeaconConfig: beaconCfg,
TmpDir: tmpdir,
}, rawBeaconBlockChainDb, indiciesDB, &service.ServerConfig{Network: "tcp", Addr: fmt.Sprintf("%s:%d", config.SentinelAddr, config.SentinelPort)}, creds, &cltypes.Status{
ForkDigest: forkDigest,
FinalizedRoot: state.FinalizedCheckpoint().BlockRoot(),
FinalizedEpoch: state.FinalizedCheckpoint().Epoch(),
HeadSlot: state.FinalizedCheckpoint().Epoch() * beaconCfg.SlotsPerEpoch,
HeadRoot: state.FinalizedCheckpoint().BlockRoot(),
}, logger)
if err != nil {
return nil, err
}
backend.sentinel = client
go func() {
eth1Getter := getters.NewExecutionSnapshotReader(ctx, beaconCfg, blockReader, backend.chainDB)
2023-12-27 22:05:09 +00:00
if err := caplin1.RunCaplinPhase1(ctx, client, engine, beaconCfg, genesisCfg, state, nil, dirs, snapshotVersion, config.BeaconRouter, eth1Getter, backend.downloaderClient, config.CaplinConfig.Backfilling, config.CaplinConfig.Archive, historyDB, indiciesDB); err != nil {
logger.Error("could not start caplin", "err", err)
}
ctxCancel()
}()
}
return backend, nil
}
func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config) error {
ethBackendRPC, miningRPC, stateDiffClient := s.ethBackendRPC, s.miningRPC, s.stateChangesClient
blockReader := s.blockReader
ctx := s.sentryCtx
chainKv := s.chainDB
var err error
if config.Genesis.Config.Bor == nil {
s.sentriesClient.Hd.StartPoSDownloader(s.sentryCtx, s.sentriesClient.SendHeaderRequest, s.sentriesClient.Penalize)
}
emptyBadHash := config.BadBlockHash == libcommon.Hash{}
2021-09-10 03:06:23 +00:00
if !emptyBadHash {
var badBlockHeader *types.Header
if err = chainKv.View(context.Background(), func(tx kv.Tx) error {
header, hErr := rawdb.ReadHeaderByHash(tx, config.BadBlockHash)
badBlockHeader = header
return hErr
}); err != nil {
return err
2021-09-10 03:06:23 +00:00
}
if badBlockHeader != nil {
unwindPoint := badBlockHeader.Number.Uint64() - 1
s.stagedSync.UnwindTo(unwindPoint, stagedsync.BadBlock(config.BadBlockHash, fmt.Errorf("Init unwind")))
2021-09-10 03:06:23 +00:00
}
}
//eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
node: refactor package node (#21105) This PR significantly changes the APIs for instantiating Ethereum nodes in a Go program. The new APIs are not backwards-compatible, but we feel that this is made up for by the much simpler way of registering services on node.Node. You can find more information and rationale in the design document: https://gist.github.com/renaynay/5bec2de19fde66f4d04c535fd24f0775. There is also a new feature in Node's Go API: it is now possible to register arbitrary handlers on the user-facing HTTP server. In geth, this facility is used to enable GraphQL. There is a single minor change relevant for geth users in this PR: The GraphQL API is no longer available separately from the JSON-RPC HTTP server. If you want GraphQL, you need to enable it using the ./geth --http --graphql flag combination. The --graphql.port and --graphql.addr flags are no longer available. # Conflicts: # cmd/faucet/faucet.go # cmd/geth/chaincmd.go # cmd/geth/config.go # cmd/geth/consolecmd.go # cmd/geth/main.go # cmd/utils/flags.go # cmd/wnode/main.go # core/rawdb/freezer.go # eth/api_backend.go # eth/backend.go # ethclient/ethclient_test.go # ethstats/ethstats.go # graphql/service.go # internal/ethapi/backend.go # les/api_backend.go # les/api_test.go # les/checkpointoracle/oracle.go # les/client.go # les/commons.go # les/server.go # miner/stresstest/stress_clique.go # miner/stresstest/stress_ethash.go # mobile/geth.go # node/api.go # node/node.go # node/node_example_test.go # node/node_test.go # node/rpcstack.go # node/rpcstack_test.go # node/service.go # node/service_test.go # node/utils_test.go # p2p/simulations/examples/ping-pong.go # p2p/testing/peerpool.go # p2p/testing/protocolsession.go # p2p/testing/protocoltester.go # whisper/mailserver/server_test.go # whisper/whisperv6/api_test.go # whisper/whisperv6/filter_test.go # whisper/whisperv6/whisper.go # whisper/whisperv6/whisper_test.go
2020-08-03 17:40:46 +00:00
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.Miner.GasPrice
}
//eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
if config.Ethstats != "" {
var headCh chan [][]byte
headCh, s.unsubscribeEthstat = s.notifications.Events.AddHeaderSubscription()
if err := ethstats.New(stack, s.sentryServers, chainKv, s.blockReader, s.engine, config.Ethstats, s.networkID, ctx.Done(), headCh); err != nil {
return err
}
}
// start HTTP API
httpRpcCfg := stack.Config().Http
ethRpcClient, txPoolRpcClient, miningRpcClient, stateCache, ff, err := cli.EmbeddedServices(ctx, chainKv, httpRpcCfg.StateCache, blockReader, ethBackendRPC,
s.txPoolGrpcServer, miningRPC, stateDiffClient, s.logger)
if err != nil {
return err
}
s.apiList = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, s.agg, &httpRpcCfg, s.engine, s.logger)
2023-11-02 01:35:13 +00:00
if config.SilkwormRpcDaemon && httpRpcCfg.Enabled {
silkwormRPCDaemonService := silkworm.NewRpcDaemonService(s.silkworm, chainKv)
2023-11-02 01:35:13 +00:00
s.silkwormRPCDaemonService = &silkwormRPCDaemonService
} else {
go func() {
if err := cli.StartRpcServer(ctx, &httpRpcCfg, s.apiList, s.logger); err != nil {
2023-11-02 01:35:13 +00:00
s.logger.Error("cli.StartRpcServer error", "err", err)
}
2023-11-02 01:35:13 +00:00
}()
}
2023-09-06 03:23:59 +00:00
if config.Genesis.Config.Bor == nil {
go s.engineBackendRPC.Start(&httpRpcCfg, s.chainDB, s.blockReader, ff, stateCache, s.agg, s.engine, ethRpcClient, txPoolRpcClient, miningRpcClient)
}
node: refactor package node (#21105) This PR significantly changes the APIs for instantiating Ethereum nodes in a Go program. The new APIs are not backwards-compatible, but we feel that this is made up for by the much simpler way of registering services on node.Node. You can find more information and rationale in the design document: https://gist.github.com/renaynay/5bec2de19fde66f4d04c535fd24f0775. There is also a new feature in Node's Go API: it is now possible to register arbitrary handlers on the user-facing HTTP server. In geth, this facility is used to enable GraphQL. There is a single minor change relevant for geth users in this PR: The GraphQL API is no longer available separately from the JSON-RPC HTTP server. If you want GraphQL, you need to enable it using the ./geth --http --graphql flag combination. The --graphql.port and --graphql.addr flags are no longer available. # Conflicts: # cmd/faucet/faucet.go # cmd/geth/chaincmd.go # cmd/geth/config.go # cmd/geth/consolecmd.go # cmd/geth/main.go # cmd/utils/flags.go # cmd/wnode/main.go # core/rawdb/freezer.go # eth/api_backend.go # eth/backend.go # ethclient/ethclient_test.go # ethstats/ethstats.go # graphql/service.go # internal/ethapi/backend.go # les/api_backend.go # les/api_test.go # les/checkpointoracle/oracle.go # les/client.go # les/commons.go # les/server.go # miner/stresstest/stress_clique.go # miner/stresstest/stress_ethash.go # mobile/geth.go # node/api.go # node/node.go # node/node_example_test.go # node/node_test.go # node/rpcstack.go # node/rpcstack_test.go # node/service.go # node/service_test.go # node/utils_test.go # p2p/simulations/examples/ping-pong.go # p2p/testing/peerpool.go # p2p/testing/protocolsession.go # p2p/testing/protocoltester.go # whisper/mailserver/server_test.go # whisper/whisperv6/api_test.go # whisper/whisperv6/filter_test.go # whisper/whisperv6/whisper.go # whisper/whisperv6/whisper_test.go
2020-08-03 17:40:46 +00:00
// Register the backend on the node
stack.RegisterLifecycle(s)
return nil
}
func (s *Ethereum) APIs() []rpc.API {
return s.apiList
}
func (s *Ethereum) Etherbase() (eb libcommon.Address, err error) {
s.lock.RLock()
etherbase := s.etherbase
s.lock.RUnlock()
if etherbase != (libcommon.Address{}) {
return etherbase, nil
}
return libcommon.Address{}, fmt.Errorf("etherbase must be explicitly specified")
}
// isLocalBlock checks whether the specified block is mined
// by local miner accounts.
//
// We regard two types of accounts as local miner account: etherbase
// and accounts specified via `txpool.locals` flag.
func (s *Ethereum) isLocalBlock(block *types.Block) bool { //nolint
s.lock.RLock()
etherbase := s.etherbase
s.lock.RUnlock()
2022-05-26 05:27:44 +00:00
return ethutils.IsLocalBlock(s.engine, etherbase, s.config.DeprecatedTxPool.Locals, block.Header())
}
// shouldPreserve checks whether we should preserve the given block
// during the chain reorg depending on whether the author of block
// is a local account.
func (s *Ethereum) shouldPreserve(block *types.Block) bool { //nolint
// The reason we need to disable the self-reorg preserving for clique
// is it can be probable to introduce a deadlock.
//
// e.g. If there are 7 available signers
//
// r1 A
// r2 B
// r3 C
// r4 D
// r5 A [X] F G
// r6 [X]
//
// In the round5, the inturn signer E is offline, so the worst case
// is A, F and G sign the block of round5 and reject the block of opponents
// and in the round6, the last available signer B is offline, the whole
// network is stuck.
if _, ok := s.engine.(*clique.Clique); ok {
return false
}
return s.isLocalBlock(block)
}
// StartMining starts the miner with the given number of CPU threads. If mining
// is already running, this method adjust the number of threads allowed to use
// and updates the minimum price required by the transaction pool.
func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, stateDiffClient *direct.StateDiffClientDirect, mining *stagedsync.Sync, miner stagedsync.MiningState, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string, logger log.Logger) error {
var borcfg *bor.Bor
if b, ok := s.engine.(*bor.Bor); ok {
borcfg = b
b.HeaderProgress(s.sentriesClient.Hd)
} else if br, ok := s.engine.(*merge.Merge); ok {
if b, ok := br.InnerEngine().(*bor.Bor); ok {
borcfg = b
b.HeaderProgress(s.sentriesClient.Hd)
}
}
//if borcfg == nil {
if !miner.MiningConfig.Enabled {
return nil
}
//}
2021-04-21 05:01:25 +00:00
// Configure the local mining address
eb, err := s.Etherbase()
if err != nil {
s.logger.Error("Cannot start mining without etherbase", "err", err)
2021-10-04 15:16:52 +00:00
return fmt.Errorf("etherbase missing: %w", err)
2021-04-21 05:01:25 +00:00
}
if borcfg != nil {
if miner.MiningConfig.Enabled {
if miner.MiningConfig.SigKey == nil {
s.logger.Error("Etherbase account unavailable locally", "err", err)
return fmt.Errorf("signer missing: %w", err)
}
borcfg.Authorize(eb, func(_ libcommon.Address, mimeType string, message []byte) ([]byte, error) {
return crypto.Sign(crypto.Keccak256(message), miner.MiningConfig.SigKey)
})
if !s.config.WithoutHeimdall {
err := stagedsync.FetchSpanZeroForMiningIfNeeded(
ctx,
s.chainDB,
s.blockReader,
borcfg.HeimdallClient,
logger,
)
if err != nil {
return err
}
}
} else {
// for the bor dev network without heimdall we need the authorizer to be set otherwise there is no
// validator defined in the bor validator set and non mining nodes will reject all blocks
// this assumes in this mode we're only running a single validator
if s.chainConfig.ChainName == networkname.BorDevnetChainName && s.config.WithoutHeimdall {
borcfg.Authorize(eb, func(addr libcommon.Address, _ string, _ []byte) ([]byte, error) {
return nil, &valset.UnauthorizedSignerError{Number: 0, Signer: addr.Bytes()}
})
}
return nil
}
}
var clq *clique.Clique
Full BSC support with validator mode (#3233) * migrated consensus and chain config files for bsc support * migrated more files from bsc * fixed consensus crashing * updated erigon lib for parlia snapshot prefix * added staticpeers for bsc * [+] added system contracts [*] fixed bug with loading snapshot [+] enabled gas bailout [+] added fix to prevent syncing more than 1000 headers (for testing only) [*] fixed bug with crashing sender recover sometimes * migrated system contract calls * [*] fixed bug with returning mutable balance object [+] migrated lightclient contracts from bsc [*] fixed parlia consensus config param * [*] fixed tendermint deps * [+] added some logs * [+] enabled bsc forks [*] fixed syscalls from coinbase [*] more logging * Fix call sys contract gas calculation * [*] fixed executing system transactions * [*] enabled receipt hash, gas and bloom filter checks * [-] removed some logging scripts [*] set header checkpoint to 10 million blocks (for testing forks) * [*] fixed bug with commiting dirty inter block state state after system transaction execution [-] removed some extra logs and comments * [+] added chapel and rialto testnet support * [*] fixed chapel allocs * [-] removed 6 mil block limit for headers sync * Fix hardforks on chapel and other testnets * [*] fixed header sync issue after merge * [*] tiny code cleanup * [-] removed some comments * [*] increased mdbx map size to 4 TB * [*] increased max chaindata size to 6 tb * [*] bring more compatibility with origin erigon and some code cleanup * [+] added support of validator mode for BSC chain * [*] enable private key load for bsc, rialto and chapel chains * [*] fixed running BSC validator node * Fix the branch list * [*] tiny fixes for linter * [*] formatted imports for core and parlia packages * [*] fixed import rules in other files * Revert "[*] formatted imports for core and parlia packages" This reverts commit c764b58b34fedc2b14d69458583ba0dad114f227. * [*] changed import rules in more packages * [*] fixed type mismatch in hack command * [*] fixed crash on new epoch, enabled bootstrap flags * [*] fixed linter errors * [*] fixed missing err check for syscalls * [*] now BSC implementation is fully compatible with erigon original sources * Revert "Add chain config and CLI changes for Binance Smart Chain support (#3131)" This reverts commit 3d048b7f1a5e74ca318af96268472e2fb0262d3b. * Revert "Add Parlia consensus engine for Binance Smart Chain support (#3086)" This reverts commit ee99f17fbe0889483004f0ee113e37ad0c5c8283. * [*] fixed several issues after merge * [*] fixed integration compilation * Revert "Fix the branch list" This reverts commit 8150ca57e5f2707a84a9f6a1c5b809b7cc84547b. * [-] removed receipt repair migration * [*] fixed parlia fork numbers output * [*] bring more devel compatibility, fixed bsc address list for access list calculation * [*] fixed bug with commiting state transition for bad blocks in BSC * [*] fixed bsc changes apply for integration command and updated config print for parlia * [*] fixed bug with applying bsc forks for chapel and rialto testnet chains [*] let's use finalize and assemble for mining to let consensus know for what it's finalizing block * Fix compilation errors in hack.go * Fix lint * reset changes in erigon-snapshots to devel * Remove unrelated changes * Fix embed * Remove more unrelated changes * Remove more unrelated changes * Restore clique and aura miner config * Refactor interfaces not to use slice pointers * Refactor parlia functions to return tx and receipt instead of dealing with slices * Fix for header panic * Fix lint, restore system contract addresses * Remove more unrelated changes, unify GatherForks Co-authored-by: Dmitry Ivanov <convexman18@gmail.com> Co-authored-by: j75689 <j75689@gmail.com> Co-authored-by: Alexey Sharp <alexeysharp@Alexeys-iMac.local> Co-authored-by: Alex Sharp <alexsharp@Alexs-MacBook-Pro.local>
2022-01-14 19:06:35 +00:00
if c, ok := s.engine.(*clique.Clique); ok {
clq = c
} else if cl, ok := s.engine.(*merge.Merge); ok {
if c, ok := cl.InnerEngine().(*clique.Clique); ok {
clq = c
}
}
if clq != nil {
if miner.MiningConfig.SigKey == nil {
s.logger.Error("Etherbase account unavailable locally", "err", err)
2021-10-04 15:16:52 +00:00
return fmt.Errorf("signer missing: %w", err)
2021-04-21 05:01:25 +00:00
}
clq.Authorize(eb, func(_ libcommon.Address, mimeType string, message []byte) ([]byte, error) {
return crypto.Sign(crypto.Keccak256(message), miner.MiningConfig.SigKey)
2021-04-21 05:01:25 +00:00
})
}
streamCtx, streamCancel := context.WithCancel(ctx)
stream, err := stateDiffClient.StateChanges(streamCtx, &remote.StateChangeRequest{WithStorage: false, WithTransactions: true}, grpc.WaitForReady(true))
if err != nil {
streamCancel()
return err
}
stateChangeCh := make(chan *remote.StateChange)
go func() {
for req, err := stream.Recv(); ; req, err = stream.Recv() {
if err == nil {
for _, change := range req.ChangeBatch {
stateChangeCh <- change
}
}
}
}()
go func() {
2021-06-22 10:09:45 +00:00
defer debug.LogPanic()
defer close(s.waitForMiningStop)
defer streamCancel()
mineEvery := time.NewTicker(miner.MiningConfig.Recommit)
defer mineEvery.Stop()
s.logger.Info("Starting to mine", "etherbase", eb)
var working bool
var waiting atomic.Bool
hasWork := true // Start mining immediately
errc := make(chan error, 1)
workCtx, workCancel := context.WithCancel(ctx)
defer workCancel()
for {
// Only reset if some work was done previously as we'd like to rely
// on the `miner.recommit` as backup.
if hasWork {
mineEvery.Reset(miner.MiningConfig.Recommit)
}
// Only check for case if you're already mining (i.e. working = true) and
// waiting for error or you don't have any work yet (i.e. hasWork = false).
if working || !hasWork {
select {
case stateChanges := <-stateChangeCh:
block := stateChanges.BlockHeight
s.logger.Debug("Start mining based on previous block", "block", block)
// TODO - can do mining clean up here as we have previous
// block info in the state channel
hasWork = true
case <-s.notifyMiningAboutNewTxs:
// Skip mining based on new tx notif for bor consensus
hasWork = s.chainConfig.Bor == nil
if hasWork {
s.logger.Debug("Start mining based on txpool notif")
}
case <-mineEvery.C:
if !(working || waiting.Load()) {
s.logger.Debug("Start mining based on miner.recommit", "duration", miner.MiningConfig.Recommit)
}
hasWork = !(working || waiting.Load())
case err := <-errc:
working = false
hasWork = false
if errors.Is(err, libcommon.ErrStopped) {
return
}
if err != nil {
s.logger.Warn("mining", "err", err)
}
case <-quitCh:
return
}
}
if !working && hasWork {
working = true
hasWork = false
mineEvery.Reset(miner.MiningConfig.Recommit)
go func() {
err := stages2.MiningStep(ctx, db, mining, tmpDir, logger)
waiting.Store(true)
defer waiting.Store(false)
errc <- err
if err != nil {
return
}
for {
select {
case block := <-miner.MiningResultCh:
if block != nil {
s.logger.Debug("Mined block", "block", block.Number())
s.minedBlocks <- block
}
return
case <-workCtx.Done():
errc <- workCtx.Err()
return
}
}
}()
}
}
}()
return nil
}
2021-04-03 06:09:31 +00:00
func (s *Ethereum) IsMining() bool { return s.config.Miner.Enabled }
2021-09-08 05:31:51 +00:00
func (s *Ethereum) ChainKV() kv.RwDB { return s.chainDB }
func (s *Ethereum) NetVersion() (uint64, error) { return s.networkID, nil }
func (s *Ethereum) NetPeerCount() (uint64, error) {
var sentryPc uint64 = 0
s.logger.Trace("sentry", "peer count", sentryPc)
for _, sc := range s.sentriesClient.Sentries() {
ctx := context.Background()
reply, err := sc.PeerCount(ctx, &proto_sentry.PeerCountRequest{})
if err != nil {
s.logger.Warn("sentry", "err", err)
return 0, nil
}
sentryPc += reply.Count
}
return sentryPc, nil
}
2015-01-28 17:14:28 +00:00
func (s *Ethereum) NodesInfo(limit int) (*remote.NodesInfoReply, error) {
if limit == 0 || limit > len(s.sentriesClient.Sentries()) {
limit = len(s.sentriesClient.Sentries())
}
nodes := make([]*prototypes.NodeInfoReply, 0, limit)
for i := 0; i < limit; i++ {
sc := s.sentriesClient.Sentries()[i]
nodeInfo, err := sc.NodeInfo(context.Background(), nil)
if err != nil {
s.logger.Error("sentry nodeInfo", "err", err)
continue
}
nodes = append(nodes, nodeInfo)
}
nodesInfo := &remote.NodesInfoReply{NodesInfo: nodes}
slices.SortFunc(nodesInfo.NodesInfo, remote.NodeInfoReplyCmp)
return nodesInfo, nil
}
// sets up blockReader and client downloader
func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downloadercfg.Cfg) error {
var err error
if s.config.Snapshot.NoDownloader {
return nil
}
if s.config.Snapshot.DownloaderAddr != "" {
// connect to external Downloader
s.downloaderClient, err = downloadergrpc.NewClient(ctx, s.config.Snapshot.DownloaderAddr)
} else {
// start embedded Downloader
2023-12-27 22:05:09 +00:00
if uploadFs := s.config.Sync.UploadLocation; len(uploadFs) > 0 {
downloaderCfg.AddTorrentsFromDisk = false
}
discover := true
2023-12-27 22:05:09 +00:00
s.downloader, err = downloader.New(ctx, downloaderCfg, s.config.Dirs, s.logger, log.LvlDebug, discover)
if err != nil {
return err
}
s.downloader.MainLoopInBackground(true)
2023-12-27 22:05:09 +00:00
bittorrentServer, err := downloader.NewGrpcServer(s.downloader)
if err != nil {
return fmt.Errorf("new server: %w", err)
}
s.downloaderClient = direct.NewDownloaderClient(bittorrentServer)
}
s.agg.OnFreeze(func(frozenFileNames []string) {
events := s.notifications.Events
events.OnNewSnapshot()
if s.downloaderClient != nil {
req := &proto_downloader.AddRequest{Items: make([]*proto_downloader.AddItem, 0, len(frozenFileNames))}
for _, fName := range frozenFileNames {
req.Items = append(req.Items, &proto_downloader.AddItem{
Path: filepath.Join("history", fName),
})
}
if _, err := s.downloaderClient.Add(ctx, req); err != nil {
s.logger.Warn("[snapshots] notify downloader", "err", err)
}
}
})
return err
}
func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snashotVersion uint8, snConfig ethconfig.BlocksFreezing, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3, error) {
2023-12-27 22:05:09 +00:00
allSnapshots := freezeblocks.NewRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger)
var allBorSnapshots *freezeblocks.BorRoSnapshots
if isBor {
2023-12-27 22:05:09 +00:00
allBorSnapshots = freezeblocks.NewBorRoSnapshots(snConfig, dirs.Snap, snashotVersion, logger)
}
2023-12-27 22:05:09 +00:00
var err error
2023-12-27 22:05:09 +00:00
if snConfig.NoDownloader {
allSnapshots.ReopenFolder()
if isBor {
allBorSnapshots.ReopenFolder()
}
} else {
allSnapshots.OptimisticalyReopenWithDB(db)
if isBor {
allBorSnapshots.OptimisticalyReopenWithDB(db)
}
}
blockReader := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots)
2023-06-03 08:54:27 +00:00
blockWriter := blockio.NewBlockWriter(histV3)
agg, err := libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger)
if err != nil {
return nil, nil, nil, nil, nil, err
}
2023-02-13 05:17:01 +00:00
if err = agg.OpenFolder(); err != nil {
return nil, nil, nil, nil, nil, err
}
return blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, nil
}
func (s *Ethereum) Peers(ctx context.Context) (*remote.PeersReply, error) {
var reply remote.PeersReply
for _, sentryClient := range s.sentriesClient.Sentries() {
peers, err := sentryClient.Peers(ctx, &emptypb.Empty{})
if err != nil {
return nil, fmt.Errorf("ethereum backend MultiClient.Peers error: %w", err)
}
reply.Peers = append(reply.Peers, peers.Peers...)
}
return &reply, nil
}
func (s *Ethereum) DiagnosticsPeersData() map[string]*diagnostics.PeerStatistics {
var reply map[string]*diagnostics.PeerStatistics = make(map[string]*diagnostics.PeerStatistics)
for _, sentryServer := range s.sentryServers {
peers := sentryServer.DiagnosticsPeersData()
for key, value := range peers {
reply[key] = value
}
}
return reply
}
Add addPeer RPC (#7804) This PR mirrors https://github.com/testinprod-io/op-erigon/pull/54. Actual implementation for `admin_addPeer` method. RPC Spec: Refer to https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-admin. > The addPeer administrative method requests adding a new remote node to the list of tracked static nodes. The node will try to maintain connectivity to these nodes at all times, reconnecting every once in a while if the remote connection goes down. Requires https://github.com/ledgerwatch/erigon-lib/pull/1033/ After https://github.com/ledgerwatch/erigon-lib/pull/1033 is merged, will update erigon-lib version, removing replace at go.mod. Note that even if RPC response returns `true`, it does not guarantee that RLPx protocol is established between peers. It just adds node entrypoint to its static peer list, and periodically tries and tests connections. ## Testing This RPC needs integration testing, so I made some scenario. Use below command for testing: Spin up two dev nodes which p2p enabled: Start Node 1: RPC running at port 8545: ```sh ./build/bin/erigon --datadir=dev --chain=dev --port=30303 --http.port=8545 --authrpc.port=8551 --torrent.port=42069 --no-downloader --nodiscover --private.api.addr=127.0.0.1:9090 --http --ws --http.api=admin --p2p.allowed-ports=30306,30307,30308 --authrpc.jwtsecret=/tmp/jwt1 --p2p.protocol=67,68 --log.console.verbosity=5 ``` Start Node 2: RPC running at port 8546: ```sh ./build/bin/erigon --datadir=dev2 --chain=dev --port=30304 --http.port=8546 --authrpc.port=8552 --torrent.port=42068 --no-downloader --nodiscover --private.api.addr=127.0.0.1:9091 --http --ws --http.api=admin --p2p.allowed-ports=30309,30310,30311 --authrpc.jwtsecret=/tmp/jwt2 --p2p.protocol=67,68 --log.console.verbosity=5 ``` Get nodeInfo of node 1 using `admin_nodeInfo` RPC: ```sh curl --location 'localhost:8545/' \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc":"2.0", "method":"admin_nodeInfo", "params":[], "id":1 }' ``` Example response: ``` { "jsonrpc": "2.0", "id": 1, "result": { "id": "b75e0c4d2113b6f144ea8fd356a8f90e612a2a5f48a13c78d7e0e176e5724eb2", "name": "erigon/v2.47.0-dev-5d86cdb5/darwin-arm64/go1.19.6", "enode": "enode://05ab575d947f2d73065ea0f795dc2d96ed0ad603f3e730ab90dc881122d552c9f59ffcb148fe50546bec8b319daeb3c22ec02e7d12a7c4f2ac4cd26456a04a7c@127.0.0.1:30303?discport=0", ... ``` Get nodeInfo of node 2 using `admin_nodeInfo` RPC: ```sh curl --location 'localhost:8546/' \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc":"2.0", "method":"admin_nodeInfo", "params":[], "id":2 }' ``` Example response: ``` { "jsonrpc": "2.0", "id": 2, "result": { "id": "32d721e4d75219b021d7f83235f1f1eb8b705d6f85e634bccde564b8f7f94d78", "name": "erigon/v2.47.0-dev-5d86cdb5/darwin-arm64/go1.19.6", "enode": "enode://1abb8579647779e13b7f68d18f9c776cbd29281841c7f950e9cf9afa996e31120a6f481cea8e90e0f42a0eb1aa00aeafee81c4bae6c31aa16810b795c6d6e069@127.0.0.1:30304?discport=0", ... ``` Call `admin_addPeer` RPC to node 2: ```sh curl --location 'localhost:8546/' \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc":"2.0", "method":"admin_addPeer", "params":["enode://05ab575d947f2d73065ea0f795dc2d96ed0ad603f3e730ab90dc881122d552c9f59ffcb148fe50546bec8b319daeb3c22ec02e7d12a7c4f2ac4cd26456a04a7c@127.0.0.1:30303"], "id":2 }' ``` Example response: ``` { "jsonrpc": "2.0", "id": 2, "result": true } ``` Check peer info of node 1 using `admin_peers` RPC: ```sh curl --location 'localhost:8545/' \ --header 'Content-Type: application/json' \ --data '{ "jsonrpc":"2.0", "method":"admin_peers", "params":[], "id":1 }' ``` Example response: ``` { "jsonrpc": "2.0", "id": 1, "result": [ { "enode": "enode://1abb8579647779e13b7f68d18f9c776cbd29281841c7f950e9cf9afa996e31120a6f481cea8e90e0f42a0eb1aa00aeafee81c4bae6c31aa16810b795c6d6e069@127.0.0.1:55426", "id": "32d721e4d75219b021d7f83235f1f1eb8b705d6f85e634bccde564b8f7f94d78", "name": "erigon/v2.47.0-dev-5d86cdb5/darwin-arm64/go1.19.6", "caps": [ "eth/66", "eth/67" ], "network": { "localAddress": "127.0.0.1:30303", "remoteAddress": "127.0.0.1:55426", "inbound": true, "trusted": false, "static": false }, "protocols": null } ] } ``` --------- Co-authored-by: alex.sharov <AskAlexSharov@gmail.com>
2023-09-06 08:31:02 +00:00
func (s *Ethereum) AddPeer(ctx context.Context, req *remote.AddPeerRequest) (*remote.AddPeerReply, error) {
for _, sentryClient := range s.sentriesClient.Sentries() {
_, err := sentryClient.AddPeer(ctx, &proto_sentry.AddPeerRequest{Url: req.Url})
if err != nil {
return nil, fmt.Errorf("ethereum backend MultiClient.AddPeers error: %w", err)
}
}
return &remote.AddPeerReply{Success: true}, nil
}
node: refactor package node (#21105) This PR significantly changes the APIs for instantiating Ethereum nodes in a Go program. The new APIs are not backwards-compatible, but we feel that this is made up for by the much simpler way of registering services on node.Node. You can find more information and rationale in the design document: https://gist.github.com/renaynay/5bec2de19fde66f4d04c535fd24f0775. There is also a new feature in Node's Go API: it is now possible to register arbitrary handlers on the user-facing HTTP server. In geth, this facility is used to enable GraphQL. There is a single minor change relevant for geth users in this PR: The GraphQL API is no longer available separately from the JSON-RPC HTTP server. If you want GraphQL, you need to enable it using the ./geth --http --graphql flag combination. The --graphql.port and --graphql.addr flags are no longer available. # Conflicts: # cmd/faucet/faucet.go # cmd/geth/chaincmd.go # cmd/geth/config.go # cmd/geth/consolecmd.go # cmd/geth/main.go # cmd/utils/flags.go # cmd/wnode/main.go # core/rawdb/freezer.go # eth/api_backend.go # eth/backend.go # ethclient/ethclient_test.go # ethstats/ethstats.go # graphql/service.go # internal/ethapi/backend.go # les/api_backend.go # les/api_test.go # les/checkpointoracle/oracle.go # les/client.go # les/commons.go # les/server.go # miner/stresstest/stress_clique.go # miner/stresstest/stress_ethash.go # mobile/geth.go # node/api.go # node/node.go # node/node_example_test.go # node/node_test.go # node/rpcstack.go # node/rpcstack_test.go # node/service.go # node/service_test.go # node/utils_test.go # p2p/simulations/examples/ping-pong.go # p2p/testing/peerpool.go # p2p/testing/protocolsession.go # p2p/testing/protocoltester.go # whisper/mailserver/server_test.go # whisper/whisperv6/api_test.go # whisper/whisperv6/filter_test.go # whisper/whisperv6/whisper.go # whisper/whisperv6/whisper_test.go
2020-08-03 17:40:46 +00:00
// Protocols returns all the currently configured
// network protocols to start.
func (s *Ethereum) Protocols() []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(s.sentryServers))
2021-05-30 02:53:30 +00:00
for i := range s.sentryServers {
protocols = append(protocols, s.sentryServers[i].Protocols...)
2021-05-30 02:53:30 +00:00
}
return protocols
}
node: refactor package node (#21105) This PR significantly changes the APIs for instantiating Ethereum nodes in a Go program. The new APIs are not backwards-compatible, but we feel that this is made up for by the much simpler way of registering services on node.Node. You can find more information and rationale in the design document: https://gist.github.com/renaynay/5bec2de19fde66f4d04c535fd24f0775. There is also a new feature in Node's Go API: it is now possible to register arbitrary handlers on the user-facing HTTP server. In geth, this facility is used to enable GraphQL. There is a single minor change relevant for geth users in this PR: The GraphQL API is no longer available separately from the JSON-RPC HTTP server. If you want GraphQL, you need to enable it using the ./geth --http --graphql flag combination. The --graphql.port and --graphql.addr flags are no longer available. # Conflicts: # cmd/faucet/faucet.go # cmd/geth/chaincmd.go # cmd/geth/config.go # cmd/geth/consolecmd.go # cmd/geth/main.go # cmd/utils/flags.go # cmd/wnode/main.go # core/rawdb/freezer.go # eth/api_backend.go # eth/backend.go # ethclient/ethclient_test.go # ethstats/ethstats.go # graphql/service.go # internal/ethapi/backend.go # les/api_backend.go # les/api_test.go # les/checkpointoracle/oracle.go # les/client.go # les/commons.go # les/server.go # miner/stresstest/stress_clique.go # miner/stresstest/stress_ethash.go # mobile/geth.go # node/api.go # node/node.go # node/node_example_test.go # node/node_test.go # node/rpcstack.go # node/rpcstack_test.go # node/service.go # node/service_test.go # node/utils_test.go # p2p/simulations/examples/ping-pong.go # p2p/testing/peerpool.go # p2p/testing/protocolsession.go # p2p/testing/protocoltester.go # whisper/mailserver/server_test.go # whisper/whisperv6/api_test.go # whisper/whisperv6/filter_test.go # whisper/whisperv6/whisper.go # whisper/whisperv6/whisper_test.go
2020-08-03 17:40:46 +00:00
// Start implements node.Lifecycle, starting all internal goroutines needed by the
// Ethereum protocol implementation.
node: refactor package node (#21105) This PR significantly changes the APIs for instantiating Ethereum nodes in a Go program. The new APIs are not backwards-compatible, but we feel that this is made up for by the much simpler way of registering services on node.Node. You can find more information and rationale in the design document: https://gist.github.com/renaynay/5bec2de19fde66f4d04c535fd24f0775. There is also a new feature in Node's Go API: it is now possible to register arbitrary handlers on the user-facing HTTP server. In geth, this facility is used to enable GraphQL. There is a single minor change relevant for geth users in this PR: The GraphQL API is no longer available separately from the JSON-RPC HTTP server. If you want GraphQL, you need to enable it using the ./geth --http --graphql flag combination. The --graphql.port and --graphql.addr flags are no longer available. # Conflicts: # cmd/faucet/faucet.go # cmd/geth/chaincmd.go # cmd/geth/config.go # cmd/geth/consolecmd.go # cmd/geth/main.go # cmd/utils/flags.go # cmd/wnode/main.go # core/rawdb/freezer.go # eth/api_backend.go # eth/backend.go # ethclient/ethclient_test.go # ethstats/ethstats.go # graphql/service.go # internal/ethapi/backend.go # les/api_backend.go # les/api_test.go # les/checkpointoracle/oracle.go # les/client.go # les/commons.go # les/server.go # miner/stresstest/stress_clique.go # miner/stresstest/stress_ethash.go # mobile/geth.go # node/api.go # node/node.go # node/node_example_test.go # node/node_test.go # node/rpcstack.go # node/rpcstack_test.go # node/service.go # node/service_test.go # node/utils_test.go # p2p/simulations/examples/ping-pong.go # p2p/testing/peerpool.go # p2p/testing/protocolsession.go # p2p/testing/protocoltester.go # whisper/mailserver/server_test.go # whisper/whisperv6/api_test.go # whisper/whisperv6/filter_test.go # whisper/whisperv6/whisper.go # whisper/whisperv6/whisper_test.go
2020-08-03 17:40:46 +00:00
func (s *Ethereum) Start() error {
s.sentriesClient.StartStreamLoops(s.sentryCtx)
time.Sleep(10 * time.Millisecond) // just to reduce logs order confusion
2021-06-01 10:41:10 +00:00
2023-09-29 02:03:19 +00:00
hook := stages2.NewHook(s.sentryCtx, s.chainDB, s.notifications, s.stagedSync, s.blockReader, s.chainConfig, s.logger, s.sentriesClient.UpdateHead)
currentTDProvider := func() *big.Int {
currentTD, err := readCurrentTotalDifficulty(s.sentryCtx, s.chainDB, s.blockReader)
2023-08-18 20:16:30 +00:00
if err != nil {
panic(err)
}
return currentTD
}
if params.IsChainPoS(s.chainConfig, currentTDProvider) {
2023-09-06 03:23:59 +00:00
s.waitForStageLoopStop = nil // TODO: Ethereum.Stop should wait for execution_server shutdown
go s.eth1ExecutionServer.Start(s.sentryCtx)
} else {
go stages2.StageLoop(s.sentryCtx, s.chainDB, s.stagedSync, s.sentriesClient.Hd, s.waitForStageLoopStop, s.config.Sync.LoopThrottle, s.logger, s.blockReader, hook, s.config.ForcePartialCommit)
}
if s.chainConfig.Bor != nil {
s.engine.(*bor.Bor).Start(s.chainDB)
}
2023-11-02 01:35:13 +00:00
if s.silkwormRPCDaemonService != nil {
if err := s.silkwormRPCDaemonService.Start(); err != nil {
s.logger.Error("silkworm.StartRpcDaemon error", "err", err)
}
}
if s.silkwormSentryService != nil {
if err := s.silkwormSentryService.Start(); err != nil {
s.logger.Error("silkworm.SentryStart error", "err", err)
}
}
return nil
}
// Stop implements node.Service, terminating all internal goroutines used by the
// Ethereum protocol.
func (s *Ethereum) Stop() error {
geth 1.9.13 (#469) * core: initial version of state snapshots * core/state: lazy sorting, snapshot invalidation * core/state/snapshot: extract and split cap method, cover corners * snapshot: iteration and buffering optimizations * core/state/snapshot: unlink snapshots from blocks, quad->linear cleanup * 123 * core/rawdb, core/state/snapshot: runtime snapshot generation * core/state/snapshot: fix difflayer origin-initalization after flatten * add "to merge" * core/state/snapshot: implement snapshot layer iteration * core/state/snapshot: node behavioural difference on bloom content * core: journal the snapshot inside leveldb, not a flat file * core/state/snapshot: bloom, metrics and prefetcher fixes * core/state/snapshot: move iterator out into its own files * core/state/snapshot: implement iterator priority for fast direct data lookup * core/state/snapshot: full featured account iteration * core/state/snapshot: faster account iteration, CLI integration * core: fix broken tests due to API changes + linter * core/state: fix an account resurrection issue * core/tests: test for destroy+recreate contract with storage * squashme * core/state/snapshot, tests: sync snap gen + snaps in consensus tests * core/state: extend snapshotter to handle account resurrections * core/state: fix account root hash update point * core/state: fix resurrection state clearing and access * core/state/snapshot: handle deleted accounts in fast iterator * core: more blockchain tests * core/state/snapshot: fix various iteration issues due to destruct set * core: fix two snapshot iterator flaws, decollide snap storage prefix * core/state/snapshot/iterator: fix two disk iterator flaws * core/rawdb: change SnapshotStoragePrefix to avoid prefix collision with preimagePrefix * params: begin v1.9.13 release cycle * cmd/checkpoint-admin: add some documentation (#20697) * go.mod: update duktape to fix sprintf warnings (#20777) This revision of go-duktype fixes the following warning ``` duk_logging.c: In function ‘duk__logger_prototype_log_shared’: duk_logging.c:184:64: warning: ‘Z’ directive writing 1 byte into a region of size between 0 and 9 [-Wformat-overflow=] 184 | sprintf((char *) date_buf, "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", | ^ In file included from /usr/include/stdio.h:867, from duk_logging.c:5: /usr/include/x86_64-linux-gnu/bits/stdio2.h:36:10: note: ‘__builtin___sprintf_chk’ output between 25 and 85 bytes into a destination of size 32 36 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 37 | __bos (__s), __fmt, __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``` * core/rawdb: fix freezer table test error check Fixes: Condition is always 'false' because 'err' is always 'nil' * core/rawdb: improve table database (#20703) This PR fixes issues in TableDatabase. TableDatabase is a wrapper of underlying ethdb.Database with an additional prefix. The prefix is applied to all entries it maintains. However when we try to retrieve entries from it we don't handle the key properly. In theory the prefix should be truncated and only user key is returned. But we don't do it in some cases, e.g. the iterator and batch replayer created from it. So this PR is the fix to these issues. * eth: when triggering a sync, check the head header TD, not block * internal/web3ext: fix clique console apis to work on missing arguments * rpc: dont log an error if user configures --rpcapi=rpc... (#20776) This just prevents a false negative ERROR warning when, for some unknown reason, a user attempts to turn on the module rpc even though it's already going to be on. * node, cmd/clef: report actual port used for http rpc (#20789) * internal/ethapi: don't set sender-balance to maxuint, fixes #16999 (#20783) Prior to this change, eth_call changed the balance of the sender account in the EVM environment to 2^256 wei to cover the gas cost of the call execution. We've had this behavior for a long time even though it's super confusing. This commit sets the default call gasprice to zero instead of updating the balance, which is better because it makes eth_call semantics less surprising. Removing the built-in balance assignment also makes balance overrides work as expected. * metrics: disable CPU stats (gosigar) on iOS * cmd/devp2p: tweak DNS TTLs (#20801) * cmd/devp2p: tweak DNS TTLs * cmd/devp2p: bump treeNodeTTL to four weeks * cmd/devp2p: lower route53 change limit again (#20819) * cmd/devp2p: be very correct about route53 change splitting (#20820) Turns out the way RDATA limits work is documented after all, I just didn't search right. The trick to make it work is to count UPSERTs twice. This also adds an additional check to ensure TTL changes are applied on existing records. * graphql, node, rpc: fix typos in comments (#20824) * eth: improve shutdown synchronization (#20695) * eth: improve shutdown synchronization Most goroutines started by eth.Ethereum didn't have any shutdown sync at all, which lead to weird error messages when quitting the client. This change improves the clean shutdown path by stopping all internal components in dependency order and waiting for them to actually be stopped before shutdown is considered done. In particular, we now stop everything related to peers before stopping 'resident' parts such as core.BlockChain. * eth: rewrite sync controller * eth: remove sync start debug message * eth: notify chainSyncer about new peers after handshake * eth: move downloader.Cancel call into chainSyncer * eth: make post-sync block broadcast synchronous * eth: add comments * core: change blockchain stop message * eth: change closeBloomHandler channel type * eth/filters: fix typo on unindexedLogs function's comment (#20827) * core: bump txpool tx max size to 128KB * snapshotter/tests: verify snapdb post-state against trie (#20812) * core/state/snapshot: basic trie-to-hash implementation * tests: validate snapshot after test * core/state/snapshot: fix review concerns * cmd, consensus: add option to disable mmap for DAG caches/datasets (#20484) * cmd, consensus: add option to disable mmap for DAG caches/datasets * consensus: add benchmarks for mmap with/with lock * cmd/clef: add newaccount command (#20782) * cmd/clef: add newaccount command * cmd/clef: document clef_New, update API versioning * Update cmd/clef/intapi_changelog.md Co-Authored-By: ligi <ligi@ligi.de> * Update signer/core/uiapi.go Co-Authored-By: ligi <ligi@ligi.de> Co-authored-by: ligi <ligi@ligi.de> * eth: add debug_accountRange API (#19645) This new API allows reading accounts and their content by address range. Co-authored-by: Martin Holst Swende <martin@swende.se> Co-authored-by: Felix Lange <fjl@twurst.com> * travis: allow cocoapods deploy to fail (#20833) * metrics: improve TestTimerFunc (#20818) The test failed due to what appears to be fluctuations in time.Sleep, which is not the actual method under test. This change modifies it so we compare the metered Max to the actual time instead of the desired time. * README: update private network genesis spec with istanbul (#20841) * add istanbul and muirGlacier to genesis states in README * remove muirGlacier, relocate istanbul * cmd/evm: Rework execution stats (#20792) - Dump stats also for --bench flag. - From memory stats only show number and size of allocations. This is what `test -bench` shows. I doubt others like number of GC runs are any useful, but can be added if requested. - Now the mem stats are for single execution in case of --bench. * cmd/devp2p, cmd/wnode, whisper: add missing calls to Timer.Stop (#20843) * p2p/server: add UDP port mapping goroutine to wait group (#20846) * accounts/abi faster unpacking of int256 (#20850) * p2p/discv5: add missing Timer.Stop calls (#20853) * miner/worker: add missing timer.Stop call (#20857) * cmd/geth: fix bad genesis test (#20860) * eth/filters: add missing Ticker.Stop call (#20862) * eth/fetcher: add missing timer.Stop calls (#20861) * event: add missing timer.Stop call in TestFeed (#20868) * metrics: add missing calls to Ticker.Stop in tests (#20866) * ethstats: add missing Ticker.Stop call (#20867) * p2p/discv5, p2p/testing: add missing Timer.Stop calls in tests (#20869) * core: add missing Timer.Stop call in TestLogReorgs (#20870) * rpc: add missing timer.Stop calls in websocket tests (#20863) * crypto/ecies: improve concatKDF (#20836) This removes a bunch of weird code around the counter overflow check in concatKDF and makes it actually work for different hash output sizes. The overflow check worked as follows: concatKDF applies the hash function N times, where N is roundup(kdLen, hashsize) / hashsize. N should not overflow 32 bits because that would lead to a repetition in the KDF output. A couple issues with the overflow check: - It used the hash.BlockSize, which is wrong because the block size is about the input of the hash function. Luckily, all standard hash functions have a block size that's greater than the output size, so concatKDF didn't crash, it just generated too much key material. - The check used big.Int to compare against 2^32-1. - The calculation could still overflow before reaching the check. The new code in concatKDF doesn't check for overflow. Instead, there is a new check on ECIESParams which ensures that params.KeyLen is < 512. This removes any possibility of overflow. There are a couple of miscellaneous improvements bundled in with this change: - The key buffer is pre-allocated instead of appending the hash output to an initially empty slice. - The code that uses concatKDF to derive keys is now shared between Encrypt and Decrypt. - There was a redundant invocation of IsOnCurve in Decrypt. This is now removed because elliptic.Unmarshal already checks whether the input is a valid curve point since Go 1.5. Co-authored-by: Felix Lange <fjl@twurst.com> * rpc: metrics for JSON-RPC method calls (#20847) This adds a couple of metrics for tracking the timing and frequency of method calls: - rpc/requests gauge counts all requests - rpc/success gauge counts requests which return err == nil - rpc/failure gauge counts requests which return err != nil - rpc/duration/all timer tracks timing of all requests - rpc/duration/<method>/<success/failure> tracks per-method timing * mobile: use bind.NewKeyedTransactor instead of duplicating (#20888) It's better to reuse the existing code to create a keyed transactor than to rewrite the logic again. * internal/ethapi: add CallArgs.ToMessage method (#20854) ToMessage is used to convert between ethapi.CallArgs and types.Message. It reduces the length of the DoCall method by about half by abstracting out the conversion between the CallArgs and the Message. This should improve the code's maintainability and reusability. * eth, les: fix flaky tests (#20897) * les: fix flaky test * eth: fix flaky test * cmd/geth: enable metrics for geth import command (#20738) * cmd/geth: enable metrics for geth import command * cmd/geth: enable metrics-flags for import command * core/vm: use a callcontext struct (#20761) * core/vm: use a callcontext struct * core/vm: fix tests * core/vm/runtime: benchmark * core/vm: make intpool push inlineable, unexpose callcontext * docs/audits: add discv5 protocol audits from LA and C53 (#20898) * .github: change gitter reference to discord link in issue template (#20896) * couple of fixes to docs in clef (#20900) * p2p/discover: add initial discovery v5 implementation (#20750)This adds an implementation of the current discovery v5 spec.There is full integration with cmd/devp2p and enode.Iterator in thisversion. In theory we could enable the new protocol as a replacement ofdiscovery v4 at any time. In practice, there will likely be a few morechanges to the spec and implementation before this can happen. * build: upgrade to golangci-lint 1.24.0 (#20901) * accounts/scwallet: remove unnecessary uses of fmt.Sprintf * cmd/puppeth: remove unnecessary uses of fmt.Sprintf * p2p/discv5: remove unnecessary use of fmt.Sprintf * whisper/mailserver: remove unnecessary uses of fmt.Sprintf * core: goimports -w tx_pool_test.go * eth/downloader: goimports -w downloader_test.go * build: upgrade to golangci-lint 1.24.0 * accounts/abi/bind: Refactored topics (#20851) * accounts/abi/bind: refactored topics * accounts/abi/bind: use store function to remove code duplication * accounts/abi/bind: removed unused type defs * accounts/abi/bind: error on tuples in topics * Cosmetic changes to restart travis build Co-authored-by: Guillaume Ballet <gballet@gmail.com> * node: allow websocket and HTTP on the same port (#20810) This change makes it possible to run geth with JSON-RPC over HTTP and WebSocket on the same TCP port. The default port for WebSocket is still 8546. geth --rpc --rpcport 8545 --ws --wsport 8545 This also removes a lot of deprecated API surface from package rpc. The rpc package is now purely about serving JSON-RPC and no longer provides a way to start an HTTP server. * crypto: improve error messages in LoadECDSA (#20718) This improves error messages when the file is too short or too long. Also rewrite the test for SaveECDSA because LoadECDSA has its own test now. Co-authored-by: Felix Lange <fjl@twurst.com> * changed date of rpcstack.go since new file (#20904) * accounts/abi/bind: fixed erroneous filtering of negative ints (#20865) * accounts/abi/bind: fixed erroneous packing of negative ints * accounts/abi/bind: added test cases for negative ints in topics * accounts/abi/bind: fixed genIntType for go 1.12 * accounts/abi: minor nitpick * cmd: deprecate --testnet, use named networks instead (#20852) * cmd/utils: make goerli the default testnet * cmd/geth: explicitly rename testnet to ropsten * core: explicitly rename testnet to ropsten * params: explicitly rename testnet to ropsten * cmd: explicitly rename testnet to ropsten * miner: explicitly rename testnet to ropsten * mobile: allow for returning the goerli spec * tests: explicitly rename testnet to ropsten * docs: update readme to reflect changes to the default testnet * mobile: allow for configuring goerli and rinkeby nodes * cmd/geth: revert --testnet back to ropsten and mark as legacy * cmd/util: mark --testnet flag as deprecated * docs: update readme to properly reflect the 3 testnets * cmd/utils: add an explicit deprecation warning on startup * cmd/utils: swap goerli and ropsten in usage * cmd/geth: swap goerli and ropsten in usage * cmd/geth: if running a known preset, log it for convenience * docs: improve readme on usage of ropsten's testnet datadir * cmd/utils: check if legacy `testnet` datadir exists for ropsten * cmd/geth: check for legacy testnet path in console command * cmd/geth: use switch statement for complex conditions in main * cmd/geth: move known preset log statement to the very top * cmd/utils: create new ropsten configurations in the ropsten datadir * cmd/utils: makedatadir should check for existing testnet dir * cmd/geth: add legacy testnet flag to the copy db command * cmd/geth: add legacy testnet flag to the inspect command * les, les/lespay/client: add service value statistics and API (#20837) This PR adds service value measurement statistics to the light client. It also adds a private API that makes these statistics accessible. A follow-up PR will add the new server pool which uses these statistics to select servers with good performance. This document describes the function of the new components: https://gist.github.com/zsfelfoldi/3c7ace895234b7b345ab4f71dab102d4 Co-authored-by: rjl493456442 <garyrong0905@gmail.com> Co-authored-by: rjl493456442 <garyrong0905@gmail.com> * README: update min go version to 1.13 (#20911) * travis, appveyor, build, Dockerfile: bump Go to 1.14.2 (#20913) * travis, appveyor, build, Dockerfile: bump Go to 1.14.2 * travis, appveyor: force GO111MODULE=on for every build * core/rawdb: fix data race between Retrieve and Close (#20919) * core/rawdb: fixed data race between retrieve and close closes https://github.com/ethereum/go-ethereum/issues/20420 * core/rawdb: use non-atomic load while holding mutex * all: simplify and fix database iteration with prefix/start (#20808) * core/state/snapshot: start fixing disk iterator seek * ethdb, rawdb, leveldb, memorydb: implement iterators with prefix and start * les, core/state/snapshot: iterator fixes * all: remove two iterator methods * all: rename Iteratee.NewIteratorWith -> NewIterator * ethdb: fix review concerns * params: update CHTs for the 1.9.13 release * params: release Geth v1.9.13 * added some missing files * post-rebase fixups Co-authored-by: Péter Szilágyi <peterke@gmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> Co-authored-by: gary rong <garyrong0905@gmail.com> Co-authored-by: Alex Willmer <alex@moreati.org.uk> Co-authored-by: meowsbits <45600330+meowsbits@users.noreply.github.com> Co-authored-by: Felix Lange <fjl@twurst.com> Co-authored-by: rene <41963722+renaynay@users.noreply.github.com> Co-authored-by: Ha ĐANG <dvietha@gmail.com> Co-authored-by: Hanjiang Yu <42531996+de1acr0ix@users.noreply.github.com> Co-authored-by: ligi <ligi@ligi.de> Co-authored-by: Wenbiao Zheng <delweng@gmail.com> Co-authored-by: Adam Schmideg <adamschmideg@users.noreply.github.com> Co-authored-by: Jeff Wentworth <jeff@curvegrid.com> Co-authored-by: Paweł Bylica <chfast@gmail.com> Co-authored-by: ucwong <ucwong@126.com> Co-authored-by: Marius van der Wijden <m.vanderwijden@live.de> Co-authored-by: Luke Champine <luke.champine@gmail.com> Co-authored-by: Boqin Qin <Bobbqqin@gmail.com> Co-authored-by: William Morriss <wjmelements@gmail.com> Co-authored-by: Guillaume Ballet <gballet@gmail.com> Co-authored-by: Raw Pong Ghmoa <58883403+q9f@users.noreply.github.com> Co-authored-by: Felföldi Zsolt <zsfelfoldi@gmail.com>
2020-04-19 17:31:47 +00:00
// Stop all the peer-related stuff first.
s.sentryCancel()
if s.unsubscribeEthstat != nil {
s.unsubscribeEthstat()
}
2022-05-10 02:29:44 +00:00
if s.downloader != nil {
s.downloader.Close()
}
if s.privateAPI != nil {
shutdownDone := make(chan bool)
go func() {
defer close(shutdownDone)
s.privateAPI.GracefulStop()
}()
select {
case <-time.After(1 * time.Second): // shutdown deadline
s.privateAPI.Stop()
case <-shutdownDone:
}
}
libcommon.SafeClose(s.sentriesClient.Hd.QuitPoWMining)
_ = s.engine.Close()
2023-09-06 03:23:59 +00:00
if s.waitForStageLoopStop != nil {
<-s.waitForStageLoopStop
}
if s.config.Miner.Enabled {
<-s.waitForMiningStop
}
for _, sentryServer := range s.sentryServers {
sentryServer.Close()
}
if s.txPoolDB != nil {
s.txPoolDB.Close()
2021-09-08 05:31:51 +00:00
}
if s.agg != nil {
s.agg.Close()
}
2022-12-29 08:04:07 +00:00
s.chainDB.Close()
2023-11-02 01:35:13 +00:00
if s.silkwormRPCDaemonService != nil {
if err := s.silkwormRPCDaemonService.Stop(); err != nil {
s.logger.Error("silkworm.StopRpcDaemon error", "err", err)
}
}
if s.silkwormSentryService != nil {
if err := s.silkwormSentryService.Stop(); err != nil {
s.logger.Error("silkworm.SentryStop error", "err", err)
}
}
if s.silkworm != nil {
if err := s.silkworm.Close(); err != nil {
s.logger.Error("silkworm.Close error", "err", err)
}
}
return nil
}
func (s *Ethereum) ChainDB() kv.RwDB {
return s.chainDB
}
func (s *Ethereum) ChainConfig() *chain.Config {
return s.chainConfig
}
func (s *Ethereum) StagedSync() *stagedsync.Sync {
return s.stagedSync
}
func (s *Ethereum) Notifications() *shards.Notifications {
return s.notifications
}
func (s *Ethereum) SentryCtx() context.Context {
return s.sentryCtx
}
2023-11-02 01:35:13 +00:00
func (s *Ethereum) SentryControlServer() *sentry_multi_client.MultiClient {
return s.sentriesClient
}
2023-05-23 09:30:47 +00:00
func (s *Ethereum) BlockIO() (services.FullBlockReader, *blockio.BlockWriter) {
return s.blockReader, s.blockWriter
}
func (s *Ethereum) TxpoolServer() txpool_proto.TxpoolServer {
return s.txPoolGrpcServer
}
// RemoveContents is like os.RemoveAll, but preserve dir itself
func RemoveContents(dir string) error {
d, err := os.Open(dir)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
// ignore due to windows
_ = os.MkdirAll(dir, 0o755)
return nil
}
return err
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return err
}
for _, name := range names {
err = os.RemoveAll(filepath.Join(dir, name))
if err != nil {
return err
}
}
return nil
}
func checkPortIsFree(addr string) (free bool) {
c, err := net.DialTimeout("tcp", addr, 200*time.Millisecond)
if err != nil {
return true
}
c.Close()
return false
}
func readCurrentTotalDifficulty(ctx context.Context, db kv.RwDB, blockReader services.FullBlockReader) (*big.Int, error) {
var currentTD *big.Int
err := db.View(ctx, func(tx kv.Tx) error {
h, err := blockReader.CurrentBlock(tx)
if err != nil {
return err
}
if h == nil {
currentTD = nil
return nil
}
currentTD, err = rawdb.ReadTd(tx, h.Hash(), h.NumberU64())
return err
})
return currentTD, err
}
func (s *Ethereum) Sentinel() rpcsentinel.SentinelClient {
return s.sentinel
}