mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 03:30:37 +00:00
[Diagnostics] Simplify logging settings, introduce correct log rotation with lumberjack (#7273)
Co-authored-by: Alexey Sharp <alexeysharp@Alexeys-iMac.local>
This commit is contained in:
parent
6f18ba1458
commit
9690228ede
@ -51,7 +51,7 @@ func main() {
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
_ = logging.GetLogger("bootnode")
|
||||
logging.SetupLogger("bootnode")
|
||||
|
||||
natm, err := nat.Parse(*natdesc)
|
||||
if err != nil {
|
||||
|
@ -78,15 +78,13 @@ func StartNode(wg *sync.WaitGroup, args []string) {
|
||||
|
||||
// runNode configures, creates and serves an erigon node
|
||||
func runNode(ctx *cli.Context) error {
|
||||
logger := log.New()
|
||||
|
||||
// Initializing the node and providing the current git commit there
|
||||
logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
|
||||
log.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
|
||||
|
||||
nodeCfg := node.NewNodConfigUrfave(ctx)
|
||||
ethCfg := node.NewEthConfigUrfave(ctx, nodeCfg)
|
||||
|
||||
ethNode, err := node.New(nodeCfg, ethCfg, logger)
|
||||
ethNode, err := node.New(nodeCfg, ethCfg)
|
||||
if err != nil {
|
||||
log.Error("Devnet startup", "err", err)
|
||||
return err
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/p2p/nat"
|
||||
"github.com/ledgerwatch/erigon/params"
|
||||
"github.com/ledgerwatch/erigon/turbo/debug"
|
||||
logging2 "github.com/ledgerwatch/erigon/turbo/logging"
|
||||
"github.com/ledgerwatch/erigon/turbo/logging"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"github.com/pelletier/go-toml/v2"
|
||||
"github.com/spf13/cobra"
|
||||
@ -56,7 +56,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
utils.CobraFlags(rootCmd, debug.Flags, utils.MetricFlags, logging2.Flags)
|
||||
utils.CobraFlags(rootCmd, debug.Flags, utils.MetricFlags, logging.Flags)
|
||||
|
||||
withDataDir(rootCmd)
|
||||
|
||||
@ -114,7 +114,7 @@ var rootCmd = &cobra.Command{
|
||||
debug.Exit()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_ = logging2.GetLoggerCmd("downloader", cmd)
|
||||
logging.SetupLoggerCmd("downloader", cmd)
|
||||
if err := Downloader(cmd.Context()); err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
log.Error(err.Error())
|
||||
|
@ -173,7 +173,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
|
||||
}
|
||||
|
||||
// Assemble the Ethereum object
|
||||
chainKv, err := node.OpenDatabase(stack.Config(), logger, kv.ChainDB)
|
||||
chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -257,7 +257,6 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
|
||||
sentryCtx: ctx,
|
||||
sentryCancel: ctxCancel,
|
||||
config: config,
|
||||
log: logger,
|
||||
chainDB: chainKv,
|
||||
networkID: config.NetworkID,
|
||||
etherbase: config.Miner.Etherbase,
|
||||
@ -428,7 +427,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
|
||||
} else {
|
||||
consensusConfig = &config.Ethash
|
||||
}
|
||||
backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots, false /* readonly */, backend.chainDB)
|
||||
backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots, false /* readonly */, backend.chainDB)
|
||||
backend.forkValidator = engineapi.NewForkValidator(currentBlockNumber, inMemoryExecution, tmpdir)
|
||||
|
||||
if err != nil {
|
||||
|
@ -52,15 +52,15 @@ func runErigon(cliCtx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
logger := logging.GetLoggerCtx("erigon", cliCtx)
|
||||
logging.SetupLoggerCtx("erigon", cliCtx)
|
||||
|
||||
// initializing the node and providing the current git commit there
|
||||
logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
|
||||
log.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
|
||||
|
||||
nodeCfg := node.NewNodConfigUrfave(cliCtx)
|
||||
ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg)
|
||||
|
||||
ethNode, err := backend.NewNode(nodeCfg, ethCfg, logger)
|
||||
ethNode, err := backend.NewNode(nodeCfg, ethCfg, log.Root())
|
||||
if err != nil {
|
||||
log.Error("Erigon startup", "err", err)
|
||||
return err
|
||||
|
@ -18,7 +18,6 @@ import (
|
||||
"github.com/ledgerwatch/erigon/params"
|
||||
erigonapp "github.com/ledgerwatch/erigon/turbo/app"
|
||||
erigoncli "github.com/ledgerwatch/erigon/turbo/cli"
|
||||
"github.com/ledgerwatch/erigon/turbo/logging"
|
||||
"github.com/ledgerwatch/erigon/turbo/node"
|
||||
)
|
||||
|
||||
@ -51,15 +50,13 @@ func runErigon(cliCtx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
logger := logging.GetLoggerCtx("erigon", cliCtx)
|
||||
|
||||
// initializing the node and providing the current git commit there
|
||||
logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
|
||||
log.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
|
||||
|
||||
nodeCfg := node.NewNodConfigUrfave(cliCtx)
|
||||
ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg)
|
||||
|
||||
ethNode, err := node.New(nodeCfg, ethCfg, logger)
|
||||
ethNode, err := node.New(nodeCfg, ethCfg)
|
||||
if err != nil {
|
||||
log.Error("Erigon startup", "err", err)
|
||||
return err
|
||||
|
@ -1375,7 +1375,7 @@ func main() {
|
||||
debug.RaiseFdLimit()
|
||||
flag.Parse()
|
||||
|
||||
_ = logging.GetLogger("hack")
|
||||
logging.SetupLogger("hack")
|
||||
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
|
@ -825,7 +825,7 @@ func stageTrie(db kv.RwDB, ctx context.Context) error {
|
||||
|
||||
log.Info("StageExec", "progress", execStage.BlockNumber)
|
||||
log.Info("StageTrie", "progress", s.BlockNumber)
|
||||
cfg := stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, getBlockReader(db), nil, historyV3, agg)
|
||||
cfg := stagedsync.StageTrieCfg(db, true /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, getBlockReader(db), nil /* hd */, historyV3, agg)
|
||||
if unwind > 0 {
|
||||
u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber)
|
||||
if err := stagedsync.UnwindIntermediateHashesStage(u, s, tx, cfg, ctx); err != nil {
|
||||
@ -1435,7 +1435,6 @@ func overrideStorageMode(db kv.RwDB) error {
|
||||
}
|
||||
|
||||
func initConsensusEngine(cc *chain2.Config, datadir string, db kv.RwDB) (engine consensus.Engine) {
|
||||
l := log.New()
|
||||
snapshots, _ := allSnapshots(context.Background(), db)
|
||||
config := ethconfig.Defaults
|
||||
|
||||
@ -1453,5 +1452,5 @@ func initConsensusEngine(cc *chain2.Config, datadir string, db kv.RwDB) (engine
|
||||
} else {
|
||||
consensusConfig = &config.Ethash
|
||||
}
|
||||
return ethconsensusconfig.CreateConsensusEngine(cc, l, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallgRPCAddress, HeimdallURL, config.WithoutHeimdall, datadir, snapshots, db.ReadOnly(), db)
|
||||
return ethconsensusconfig.CreateConsensusEngine(cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallgRPCAddress, HeimdallURL, config.WithoutHeimdall, datadir, snapshots, db.ReadOnly(), db)
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ func main() {
|
||||
rootCtx, rootCancel := common.RootContext()
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
logger := logging.GetLoggerCmd("rpcdaemon", cmd)
|
||||
db, borDb, backend, txPool, mining, stateCache, blockReader, ff, agg, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel)
|
||||
logging.SetupLoggerCmd("rpcdaemon", cmd)
|
||||
db, borDb, backend, txPool, mining, stateCache, blockReader, ff, agg, err := cli.RemoteServices(ctx, *cfg, log.Root(), rootCancel)
|
||||
if err != nil {
|
||||
log.Error("Could not connect to DB", "err", err)
|
||||
return nil
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/cmd/utils"
|
||||
"github.com/ledgerwatch/erigon/common/paths"
|
||||
"github.com/ledgerwatch/erigon/turbo/debug"
|
||||
logging2 "github.com/ledgerwatch/erigon/turbo/logging"
|
||||
"github.com/ledgerwatch/erigon/turbo/logging"
|
||||
node2 "github.com/ledgerwatch/erigon/turbo/node"
|
||||
)
|
||||
|
||||
@ -38,7 +38,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
utils.CobraFlags(rootCmd, debug.Flags, utils.MetricFlags, logging2.Flags)
|
||||
utils.CobraFlags(rootCmd, debug.Flags, utils.MetricFlags, logging.Flags)
|
||||
|
||||
rootCmd.Flags().StringVar(&sentryAddr, "sentry.api.addr", "localhost:9091", "grpc addresses")
|
||||
rootCmd.Flags().StringVar(&datadirCli, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage)
|
||||
@ -98,7 +98,7 @@ var rootCmd = &cobra.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
_ = logging2.GetLoggerCmd("sentry", cmd)
|
||||
logging.SetupLoggerCmd("sentry", cmd)
|
||||
return sentry.Sentry(cmd.Context(), dirs, sentryAddr, discoveryDNS, p2pConfig, protocol, healthCheck)
|
||||
},
|
||||
}
|
||||
|
@ -81,8 +81,8 @@ var erigon4Cmd = &cobra.Command{
|
||||
Use: "erigon4",
|
||||
Short: "Experimental command to re-execute blocks from beginning using erigon2 state representation and history/domain",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := logging.GetLoggerCmd("erigon4", cmd)
|
||||
return Erigon4(genesis, chainConfig, logger)
|
||||
logging.SetupLoggerCmd("erigon4", cmd)
|
||||
return Erigon4(genesis, chainConfig, log.Root())
|
||||
},
|
||||
}
|
||||
|
||||
@ -598,7 +598,6 @@ func (ww *StateWriterV4) CreateContract(address libcommon.Address) error {
|
||||
}
|
||||
|
||||
func initConsensusEngine(cc *chain2.Config, snapshots *snapshotsync.RoSnapshots) (engine consensus.Engine) {
|
||||
l := log.New()
|
||||
config := ethconfig.Defaults
|
||||
|
||||
var consensusConfig interface{}
|
||||
@ -615,5 +614,5 @@ func initConsensusEngine(cc *chain2.Config, snapshots *snapshotsync.RoSnapshots)
|
||||
} else {
|
||||
consensusConfig = &config.Ethash
|
||||
}
|
||||
return ethconsensusconfig.CreateConsensusEngine(cc, l, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, datadirCli, snapshots, true /* readonly */)
|
||||
return ethconsensusconfig.CreateConsensusEngine(cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, datadirCli, snapshots, true /* readonly */)
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/cmd/utils"
|
||||
"github.com/ledgerwatch/erigon/common/paths"
|
||||
"github.com/ledgerwatch/erigon/turbo/debug"
|
||||
logging2 "github.com/ledgerwatch/erigon/turbo/logging"
|
||||
"github.com/ledgerwatch/erigon/turbo/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -57,7 +57,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
utils.CobraFlags(rootCmd, debug.Flags, utils.MetricFlags, logging2.Flags)
|
||||
utils.CobraFlags(rootCmd, debug.Flags, utils.MetricFlags, logging.Flags)
|
||||
rootCmd.Flags().StringSliceVar(&sentryAddr, "sentry.api.addr", []string{"localhost:9091"}, "comma separated sentry addresses '<host>:<port>,<host>:<port>'")
|
||||
rootCmd.Flags().StringVar(&privateApiAddr, "private.api.addr", "localhost:9090", "execution service <host>:<port>")
|
||||
rootCmd.Flags().StringVar(&txpoolApiAddr, "txpool.api.addr", "localhost:9094", "txpool service <host>:<port>")
|
||||
@ -89,7 +89,7 @@ var rootCmd = &cobra.Command{
|
||||
debug.Exit()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_ = logging2.GetLoggerCmd("txpool", cmd)
|
||||
logging.SetupLoggerCmd("txpool", cmd)
|
||||
|
||||
if err := doTxpool(cmd.Context()); err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
)
|
||||
|
||||
func OpenDatabase(path string, logger log.Logger, inMem bool, readonly bool) kv.RwDB {
|
||||
opts := mdbx.NewMDBX(logger).Label(kv.ConsensusDB)
|
||||
func OpenDatabase(path string, inMem bool, readonly bool) kv.RwDB {
|
||||
opts := mdbx.NewMDBX(log.Root()).Label(kv.ConsensusDB)
|
||||
if readonly {
|
||||
opts = opts.Readonly()
|
||||
}
|
||||
|
@ -117,7 +117,6 @@ type Config = ethconfig.Config
|
||||
// Ethereum implements the Ethereum full node service.
|
||||
type Ethereum struct {
|
||||
config *ethconfig.Config
|
||||
log log.Logger
|
||||
|
||||
// DB interfaces
|
||||
chainDB kv.RwDB
|
||||
@ -190,7 +189,7 @@ func splitAddrIntoHostAndPort(addr string) (host string, port int, err error) {
|
||||
|
||||
// New creates a new Ethereum object (including the
|
||||
// initialisation of the common Ethereum object)
|
||||
func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) {
|
||||
func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(libcommon.Big0) <= 0 {
|
||||
log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice)
|
||||
config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice)
|
||||
@ -203,7 +202,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
|
||||
}
|
||||
|
||||
// Assemble the Ethereum object
|
||||
chainKv, err := node.OpenDatabase(stack.Config(), logger, kv.ChainDB)
|
||||
chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -288,7 +287,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
|
||||
sentryCtx: ctx,
|
||||
sentryCancel: ctxCancel,
|
||||
config: config,
|
||||
log: logger,
|
||||
chainDB: chainKv,
|
||||
networkID: config.NetworkID,
|
||||
etherbase: config.Miner.Etherbase,
|
||||
@ -456,7 +454,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
|
||||
} else {
|
||||
consensusConfig = &config.Ethash
|
||||
}
|
||||
backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots, false /* readonly */, backend.chainDB)
|
||||
backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots, false /* readonly */, backend.chainDB)
|
||||
backend.forkValidator = engineapi.NewForkValidator(currentBlockNumber, inMemoryExecution, tmpdir)
|
||||
|
||||
backend.sentriesClient, err = sentry.NewMultiClient(
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
|
||||
)
|
||||
|
||||
func CreateConsensusEngine(chainConfig *chain.Config, logger log.Logger, config interface{}, notify []string, noverify bool, HeimdallgRPCAddress string, HeimdallURL string, WithoutHeimdall bool, datadir string, snapshots *snapshotsync.RoSnapshots, readonly bool, chainDb ...kv.RwDB) consensus.Engine {
|
||||
func CreateConsensusEngine(chainConfig *chain.Config, config interface{}, notify []string, noverify bool, HeimdallgRPCAddress string, HeimdallURL string, WithoutHeimdall bool, datadir string, snapshots *snapshotsync.RoSnapshots, readonly bool, chainDb ...kv.RwDB) consensus.Engine {
|
||||
var eng consensus.Engine
|
||||
|
||||
switch consensusCfg := config.(type) {
|
||||
@ -57,7 +57,7 @@ func CreateConsensusEngine(chainConfig *chain.Config, logger log.Logger, config
|
||||
if consensusCfg.DBPath == "" {
|
||||
consensusCfg.DBPath = filepath.Join(datadir, "clique", "db")
|
||||
}
|
||||
eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory, readonly))
|
||||
eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, consensusCfg.InMemory, readonly))
|
||||
}
|
||||
case *chain.AuRaConfig:
|
||||
if chainConfig.Aura != nil {
|
||||
@ -65,7 +65,7 @@ func CreateConsensusEngine(chainConfig *chain.Config, logger log.Logger, config
|
||||
consensusCfg.DBPath = filepath.Join(datadir, "aura")
|
||||
}
|
||||
var err error
|
||||
eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory, readonly), chainConfig.Aura.Etherbase, consensusconfig.GetConfigByChain(chainConfig.ChainName))
|
||||
eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, consensusCfg.InMemory, readonly), chainConfig.Aura.Etherbase, consensusconfig.GetConfigByChain(chainConfig.ChainName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -75,7 +75,7 @@ func CreateConsensusEngine(chainConfig *chain.Config, logger log.Logger, config
|
||||
if consensusCfg.DBPath == "" {
|
||||
consensusCfg.DBPath = filepath.Join(datadir, "parlia")
|
||||
}
|
||||
eng = parlia.New(chainConfig, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory, readonly), snapshots, chainDb[0])
|
||||
eng = parlia.New(chainConfig, db.OpenDatabase(consensusCfg.DBPath, consensusCfg.InMemory, readonly), snapshots, chainDb[0])
|
||||
}
|
||||
case *chain.BorConfig:
|
||||
// If Matic bor consensus is requested, set it up
|
||||
@ -85,7 +85,7 @@ func CreateConsensusEngine(chainConfig *chain.Config, logger log.Logger, config
|
||||
genesisContractsClient := contract.NewGenesisContractsClient(chainConfig, chainConfig.Bor.ValidatorContract, chainConfig.Bor.StateReceiverContract)
|
||||
spanner := span.NewChainSpanner(contract.ValidatorSet(), chainConfig)
|
||||
borDbPath := filepath.Join(datadir, "bor") // bor consensus path: datadir/bor
|
||||
db := db.OpenDatabase(borDbPath, logger, false, readonly)
|
||||
db := db.OpenDatabase(borDbPath, false, readonly)
|
||||
|
||||
var heimdallClient bor.IHeimdallClient
|
||||
if WithoutHeimdall {
|
||||
|
1
go.mod
1
go.mod
@ -259,6 +259,7 @@ require (
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gotest.tools/v3 v3.3.0 // indirect
|
||||
|
2
go.sum
2
go.sum
@ -1290,6 +1290,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
|
@ -284,7 +284,7 @@ func (n *Node) DataDir() string {
|
||||
return n.config.Dirs.DataDir
|
||||
}
|
||||
|
||||
func OpenDatabase(config *nodecfg.Config, logger log.Logger, label kv.Label) (kv.RwDB, error) {
|
||||
func OpenDatabase(config *nodecfg.Config, label kv.Label) (kv.RwDB, error) {
|
||||
var name string
|
||||
switch label {
|
||||
case kv.ChainDB:
|
||||
@ -309,7 +309,7 @@ func OpenDatabase(config *nodecfg.Config, logger log.Logger, label kv.Label) (kv
|
||||
roTxLimit = int64(config.Http.DBReadConcurrency)
|
||||
}
|
||||
roTxsLimiter := semaphore.NewWeighted(roTxLimit) // 1 less than max to allow unlocking to happen
|
||||
opts := mdbx.NewMDBX(logger).
|
||||
opts := mdbx.NewMDBX(log.Root()).
|
||||
Path(dbPath).Label(label).
|
||||
DBVerbosity(config.DatabaseVerbosity).RoTxsLimiter(roTxsLimiter)
|
||||
if exclusive {
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
"github.com/ledgerwatch/erigon/crypto"
|
||||
"github.com/ledgerwatch/erigon/node/nodecfg"
|
||||
"github.com/ledgerwatch/erigon/p2p"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@ -146,7 +145,7 @@ func TestNodeCloseClosesDB(t *testing.T) {
|
||||
stack, _ := New(testNodeConfig(t))
|
||||
defer stack.Close()
|
||||
|
||||
db, err := OpenDatabase(stack.Config(), log.New(), kv.SentryDB)
|
||||
db, err := OpenDatabase(stack.Config(), kv.SentryDB)
|
||||
if err != nil {
|
||||
t.Fatal("can't open DB:", err)
|
||||
}
|
||||
@ -177,7 +176,7 @@ func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) {
|
||||
var db kv.RwDB
|
||||
stack.RegisterLifecycle(&InstrumentedService{
|
||||
startHook: func() {
|
||||
db, err = OpenDatabase(stack.Config(), log.New(), kv.SentryDB)
|
||||
db, err = OpenDatabase(stack.Config(), kv.SentryDB)
|
||||
if err != nil {
|
||||
t.Fatal("can't open DB:", err)
|
||||
}
|
||||
@ -202,7 +201,7 @@ func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) {
|
||||
|
||||
stack.RegisterLifecycle(&InstrumentedService{
|
||||
stopHook: func() {
|
||||
db, err := OpenDatabase(stack.Config(), log.New(), kv.ChainDB)
|
||||
db, err := OpenDatabase(stack.Config(), kv.ChainDB)
|
||||
if err != nil {
|
||||
t.Fatal("can't open DB:", err)
|
||||
}
|
||||
|
@ -52,15 +52,13 @@ func importChain(ctx *cli.Context) error {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
|
||||
logger := log.New(ctx)
|
||||
|
||||
nodeCfg := turboNode.NewNodConfigUrfave(ctx)
|
||||
ethCfg := turboNode.NewEthConfigUrfave(ctx, nodeCfg)
|
||||
|
||||
stack := makeConfigNode(nodeCfg)
|
||||
defer stack.Close()
|
||||
|
||||
ethereum, err := eth.New(stack, ethCfg, logger)
|
||||
ethereum, err := eth.New(stack, ethCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func initGenesis(ctx *cli.Context) error {
|
||||
stack := MakeConfigNodeDefault(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chaindb, err := node.OpenDatabase(stack.Config(), log.New(ctx), kv.ChainDB)
|
||||
chaindb, err := node.OpenDatabase(stack.Config(), kv.ChainDB)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func SetupCobra(cmd *cobra.Command) error {
|
||||
RaiseFdLimit()
|
||||
flags := cmd.Flags()
|
||||
|
||||
_ = logging.GetLoggerCmd("debug", cmd)
|
||||
logging.SetupLoggerCmd("erigon", cmd)
|
||||
|
||||
traceFile, err := flags.GetString(traceFlag.Name)
|
||||
if err != nil {
|
||||
@ -168,7 +168,7 @@ func Setup(ctx *cli.Context) error {
|
||||
|
||||
RaiseFdLimit()
|
||||
|
||||
_ = logging.GetLoggerCtx("debug", ctx)
|
||||
logging.SetupLoggerCtx("erigon", ctx)
|
||||
|
||||
if traceFile := ctx.String(traceFlag.Name); traceFile != "" {
|
||||
if err := Handler.StartGoTrace(traceFile); err != nil {
|
||||
|
@ -41,7 +41,7 @@ var (
|
||||
LogDirVerbosityFlag = cli.StringFlag{
|
||||
Name: "log.dir.verbosity",
|
||||
Usage: "Set the log verbosity for logs stored to disk",
|
||||
Value: log.LvlDebug.String(),
|
||||
Value: log.LvlInfo.String(),
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -4,14 +4,16 @@ import (
|
||||
"flag"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
func GetLoggerCtx(filePrefix string, ctx *cli.Context) log.Logger {
|
||||
func SetupLoggerCtx(filePrefix string, ctx *cli.Context) {
|
||||
var consoleJson = ctx.Bool(LogJsonFlag.Name) || ctx.Bool(LogConsoleJsonFlag.Name)
|
||||
var dirJson = ctx.Bool(LogDirJsonFlag.Name)
|
||||
|
||||
@ -26,14 +28,20 @@ func GetLoggerCtx(filePrefix string, ctx *cli.Context) log.Logger {
|
||||
|
||||
dirLevel, dErr := tryGetLogLevel(ctx.String(LogDirVerbosityFlag.Name))
|
||||
if dErr != nil {
|
||||
dirLevel = log.LvlDebug
|
||||
dirLevel = log.LvlInfo
|
||||
}
|
||||
|
||||
dirPath := ctx.String(LogDirPathFlag.Name)
|
||||
return initSeparatedLogging(filePrefix, dirPath, consoleLevel, dirLevel, consoleJson, dirJson)
|
||||
if dirPath == "" {
|
||||
datadir := ctx.String("datadir")
|
||||
if datadir != "" {
|
||||
dirPath = filepath.Join(datadir, "logs")
|
||||
}
|
||||
}
|
||||
initSeparatedLogging(filePrefix, dirPath, consoleLevel, dirLevel, consoleJson, dirJson)
|
||||
}
|
||||
|
||||
func GetLoggerCmd(filePrefix string, cmd *cobra.Command) log.Logger {
|
||||
func SetupLoggerCmd(filePrefix string, cmd *cobra.Command) {
|
||||
|
||||
logJsonVal, ljerr := cmd.Flags().GetBool(LogJsonFlag.Name)
|
||||
if ljerr != nil {
|
||||
@ -62,14 +70,20 @@ func GetLoggerCmd(filePrefix string, cmd *cobra.Command) log.Logger {
|
||||
|
||||
dirLevel, dErr := tryGetLogLevel(cmd.Flags().Lookup(LogDirVerbosityFlag.Name).Value.String())
|
||||
if dErr != nil {
|
||||
dirLevel = log.LvlDebug
|
||||
dirLevel = log.LvlInfo
|
||||
}
|
||||
|
||||
dirPath := cmd.Flags().Lookup(LogDirPathFlag.Name).Value.String()
|
||||
return initSeparatedLogging(filePrefix, dirPath, consoleLevel, dirLevel, consoleJson, dirJson)
|
||||
if dirPath == "" {
|
||||
datadir := cmd.Flags().Lookup("datadir").Value.String()
|
||||
if datadir != "" {
|
||||
dirPath = filepath.Join(datadir, "logs")
|
||||
}
|
||||
}
|
||||
initSeparatedLogging(filePrefix, dirPath, consoleLevel, dirLevel, consoleJson, dirJson)
|
||||
}
|
||||
|
||||
func GetLogger(filePrefix string) log.Logger {
|
||||
func SetupLogger(filePrefix string) {
|
||||
var logConsoleVerbosity = flag.String(LogConsoleVerbosityFlag.Name, "", LogConsoleVerbosityFlag.Usage)
|
||||
var logDirVerbosity = flag.String(LogDirVerbosityFlag.Name, "", LogDirVerbosityFlag.Usage)
|
||||
var logDirPath = flag.String(LogDirPathFlag.Name, "", LogDirPathFlag.Usage)
|
||||
@ -93,10 +107,10 @@ func GetLogger(filePrefix string) log.Logger {
|
||||
|
||||
dirLevel, dErr := tryGetLogLevel(*logDirVerbosity)
|
||||
if dErr != nil {
|
||||
dirLevel = log.LvlDebug
|
||||
dirLevel = log.LvlInfo
|
||||
}
|
||||
|
||||
return initSeparatedLogging(filePrefix, *logDirPath, consoleLevel, dirLevel, consoleJson, *dirJson)
|
||||
initSeparatedLogging(filePrefix, *logDirPath, consoleLevel, dirLevel, consoleJson, *dirJson)
|
||||
}
|
||||
|
||||
func initSeparatedLogging(
|
||||
@ -105,7 +119,7 @@ func initSeparatedLogging(
|
||||
consoleLevel log.Lvl,
|
||||
dirLevel log.Lvl,
|
||||
consoleJson bool,
|
||||
dirJson bool) log.Logger {
|
||||
dirJson bool) {
|
||||
|
||||
logger := log.Root()
|
||||
|
||||
@ -117,36 +131,31 @@ func initSeparatedLogging(
|
||||
|
||||
if len(dirPath) == 0 {
|
||||
logger.Warn("no log dir set, console logging only")
|
||||
return logger
|
||||
return
|
||||
}
|
||||
|
||||
err := os.MkdirAll(dirPath, 0764)
|
||||
if err != nil {
|
||||
logger.Warn("failed to create log dir, console logging only")
|
||||
return logger
|
||||
return
|
||||
}
|
||||
|
||||
dirFormat := log.LogfmtFormat()
|
||||
dirFormat := log.TerminalFormatNoColor()
|
||||
if dirJson {
|
||||
dirFormat = log.JsonFormat()
|
||||
}
|
||||
|
||||
userLog, err := log.FileHandler(path.Join(dirPath, filePrefix+"-user.log"), dirFormat, 1<<27) // 128Mb
|
||||
if err != nil {
|
||||
logger.Warn("failed to open user log, console logging only")
|
||||
return logger
|
||||
}
|
||||
errLog, err := log.FileHandler(path.Join(dirPath, filePrefix+"-error.log"), dirFormat, 1<<27) // 128Mb
|
||||
if err != nil {
|
||||
logger.Warn("failed to open error log, console logging only")
|
||||
return logger
|
||||
lumberjack := &lumberjack.Logger{
|
||||
Filename: path.Join(dirPath, filePrefix+".log"),
|
||||
MaxSize: 100, // megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, //days
|
||||
}
|
||||
userLog := log.StreamHandler(lumberjack, dirFormat)
|
||||
|
||||
mux := log.MultiHandler(logger.GetHandler(), log.LvlFilterHandler(dirLevel, userLog), log.LvlFilterHandler(log.LvlError, errLog))
|
||||
mux := log.MultiHandler(logger.GetHandler(), log.LvlFilterHandler(dirLevel, userLog))
|
||||
log.Root().SetHandler(mux)
|
||||
logger.SetHandler(mux)
|
||||
logger.Info("logging to file system", "log dir", dirPath, "file prefix", filePrefix, "log level", dirLevel, "json", dirJson)
|
||||
return logger
|
||||
}
|
||||
|
||||
func tryGetLogLevel(s string) (log.Lvl, error) {
|
||||
|
@ -96,7 +96,6 @@ func NewEthConfigUrfave(ctx *cli.Context, nodeConfig *nodecfg.Config) *ethconfig
|
||||
func New(
|
||||
nodeConfig *nodecfg.Config,
|
||||
ethConfig *ethconfig.Config,
|
||||
logger log.Logger,
|
||||
) (*ErigonNode, error) {
|
||||
//prepareBuckets(optionalParams.CustomBuckets)
|
||||
node, err := node.New(nodeConfig)
|
||||
@ -104,7 +103,7 @@ func New(
|
||||
utils.Fatalf("Failed to create Erigon node: %v", err)
|
||||
}
|
||||
|
||||
ethereum, err := eth.New(node, ethConfig, logger)
|
||||
ethereum, err := eth.New(node, ethConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -572,7 +572,7 @@ func MockWithZeroTTDGnosis(t *testing.T, withPosDownloader bool) *MockSentry {
|
||||
address: {Balance: funds},
|
||||
},
|
||||
}
|
||||
engine := ethconsensusconfig.CreateConsensusEngine(chainConfig, log.New(), chainConfig.Aura, nil, true, "", "", true, "", nil, false /* readonly */, nil)
|
||||
engine := ethconsensusconfig.CreateConsensusEngine(chainConfig, chainConfig.Aura, nil, true, "", "", true, "", nil, false /* readonly */, nil)
|
||||
return MockWithGenesisEngine(t, gspec, engine, withPosDownloader)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user