mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-21 19:20:39 +00:00
Clean up DEBUG category logs (#2776)
- move many DEBUG logs into TRACE category
This commit is contained in:
parent
c913f35c2e
commit
3bc3be4b5f
@ -51,7 +51,7 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat
|
||||
}
|
||||
txs = append(txs, txn)
|
||||
}
|
||||
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
|
||||
defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
|
||||
|
||||
stateBlockNumber, hash, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters)
|
||||
if err != nil {
|
||||
|
@ -41,7 +41,7 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp
|
||||
|
||||
uncles := block.Uncles()
|
||||
if index >= hexutil.Uint(len(uncles)) {
|
||||
log.Debug("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
|
||||
log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
|
||||
return nil, nil
|
||||
}
|
||||
uncle := types.NewBlockWithHeader(uncles[index])
|
||||
@ -73,7 +73,7 @@ func (api *APIImpl) GetUncleByBlockHashAndIndex(ctx context.Context, hash common
|
||||
|
||||
uncles := block.Uncles()
|
||||
if index >= hexutil.Uint(len(uncles)) {
|
||||
log.Debug("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
|
||||
log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
|
||||
return nil, nil
|
||||
}
|
||||
uncle := types.NewBlockWithHeader(uncles[index])
|
||||
|
@ -210,7 +210,7 @@ func RecvUploadHeadersMessage(ctx context.Context,
|
||||
if rlp.IsDecodeError(err) {
|
||||
log.Debug("[RecvUploadHeadersMessage] Handling incoming message", "error", err)
|
||||
} else {
|
||||
log.Error("[RecvUploadHeadersMessage] Handling incoming message", "error", err)
|
||||
log.Warn("[RecvUploadHeadersMessage] Handling incoming message", "error", err)
|
||||
}
|
||||
}
|
||||
if wg != nil {
|
||||
@ -312,7 +312,7 @@ func RecvMessage(
|
||||
if rlp.IsDecodeError(err) {
|
||||
log.Debug("[RecvMessage] Handling incoming message", "error", err)
|
||||
} else {
|
||||
log.Error("[RecvMessage] Handling incoming message", "error", err)
|
||||
log.Warn("[RecvMessage] Handling incoming message", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -519,7 +519,7 @@ func (cs *ControlServerImpl) blockHeaders66(ctx context.Context, in *proto_sentr
|
||||
if req != nil {
|
||||
if peer := cs.SendHeaderRequest(ctx, req); peer != nil {
|
||||
cs.Hd.SentRequest(req, currentTime, 5 /* timeout */)
|
||||
log.Debug("Sent request", "height", req.Number)
|
||||
log.Trace("Sent request", "height", req.Number)
|
||||
}
|
||||
}
|
||||
cs.Penalize(ctx, penalties)
|
||||
@ -594,7 +594,7 @@ func (cs *ControlServerImpl) blockHeaders65(ctx context.Context, in *proto_sentr
|
||||
if req != nil {
|
||||
if peer := cs.SendHeaderRequest(ctx, req); peer != nil {
|
||||
cs.Hd.SentRequest(req, currentTime, 5 /* timeout */)
|
||||
log.Debug("Sent request", "height", req.Number)
|
||||
log.Trace("Sent request", "height", req.Number)
|
||||
}
|
||||
}
|
||||
cs.Penalize(ctx, penalties)
|
||||
@ -673,7 +673,7 @@ func (cs *ControlServerImpl) newBlock65(ctx context.Context, inreq *proto_sentry
|
||||
if _, err1 := sentry.PeerMinBlock(ctx, &outreq, &grpc.EmptyCallOption{}); err1 != nil {
|
||||
log.Error("Could not send min block for peer", "err", err1)
|
||||
}
|
||||
log.Debug(fmt.Sprintf("NewBlockMsg{blockNumber: %d} from [%s]", request.Block.NumberU64(), gointerfaces.ConvertH512ToBytes(inreq.PeerId)))
|
||||
log.Trace(fmt.Sprintf("NewBlockMsg{blockNumber: %d} from [%s]", request.Block.NumberU64(), gointerfaces.ConvertH512ToBytes(inreq.PeerId)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -267,13 +267,13 @@ func runPeer(
|
||||
default:
|
||||
}
|
||||
if peerPrinted {
|
||||
log.Debug("Peer disconnected", "id", peerID, "name", peerInfo.peer.Fullname())
|
||||
log.Trace("Peer disconnected", "id", peerID, "name", peerInfo.peer.Fullname())
|
||||
}
|
||||
}()
|
||||
for {
|
||||
if !peerPrinted {
|
||||
if time.Now().After(printTime) {
|
||||
log.Debug("Peer stable", "id", peerID, "name", peerInfo.peer.Fullname())
|
||||
log.Trace("Peer stable", "id", peerID, "name", peerInfo.peer.Fullname())
|
||||
peerPrinted = true
|
||||
}
|
||||
}
|
||||
@ -469,10 +469,10 @@ func NewSentryServer(ctx context.Context, dialCandidates enode.Iterator, readNod
|
||||
Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
peerID := peer.ID().String()
|
||||
if _, ok := ss.GoodPeers.Load(peerID); ok {
|
||||
log.Debug(fmt.Sprintf("[%s] Peer already has connection", peerID))
|
||||
log.Trace(fmt.Sprintf("[%s] Peer already has connection", peerID))
|
||||
return nil
|
||||
}
|
||||
log.Debug(fmt.Sprintf("[%s] Start with peer", peerID))
|
||||
log.Trace(fmt.Sprintf("[%s] Start with peer", peerID))
|
||||
|
||||
peerInfo := &PeerInfo{
|
||||
peer: peer,
|
||||
@ -488,7 +488,7 @@ func NewSentryServer(ctx context.Context, dialCandidates enode.Iterator, readNod
|
||||
if err != nil {
|
||||
return fmt.Errorf("handshake to peer %s: %w", peerID, err)
|
||||
}
|
||||
log.Debug(fmt.Sprintf("[%s] Received status message OK", peerID), "name", peer.Name())
|
||||
log.Trace(fmt.Sprintf("[%s] Received status message OK", peerID), "name", peer.Name())
|
||||
|
||||
if err := runPeer(
|
||||
ctx,
|
||||
@ -499,7 +499,7 @@ func NewSentryServer(ctx context.Context, dialCandidates enode.Iterator, readNod
|
||||
ss.send,
|
||||
ss.hasSubscribers,
|
||||
); err != nil {
|
||||
log.Debug(fmt.Sprintf("[%s] Error while running peer: %v", peerID, err))
|
||||
log.Trace(fmt.Sprintf("[%s] Error while running peer: %v", peerID, err))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@ -889,7 +889,7 @@ func (ss *SentryServerImpl) send(msgID proto_sentry.MessageId, peerID string, b
|
||||
ch := ss.messageStreams[msgID][i]
|
||||
ch <- req
|
||||
if len(ch) > MessagesQueueSize/2 {
|
||||
log.Debug("[sentry] consuming is slow", "msgID", msgID.String())
|
||||
log.Warn("[sentry] consuming is slow", "msgID", msgID.String())
|
||||
// evict old messages from channel
|
||||
for j := 0; j < MessagesQueueSize/4; j++ {
|
||||
select {
|
||||
@ -937,7 +937,7 @@ func (ss *SentryServerImpl) addMessagesStream(ids []proto_sentry.MessageId, ch c
|
||||
|
||||
const MessagesQueueSize = 1024 // one such queue per client of .Messages stream
|
||||
func (ss *SentryServerImpl) Messages(req *proto_sentry.MessagesRequest, server proto_sentry.Sentry_MessagesServer) error {
|
||||
log.Debug("[Messages] new subscriber", "to", req.Ids)
|
||||
log.Trace("[Messages] new subscriber", "to", req.Ids)
|
||||
ch := make(chan *proto_sentry.InboundMessage, MessagesQueueSize)
|
||||
defer close(ch)
|
||||
clean := ss.addMessagesStream(req.Ids, ch)
|
||||
|
@ -104,7 +104,7 @@ var rootCmd = &cobra.Command{
|
||||
return true, nil
|
||||
}
|
||||
if time.Since(a.UpdatedAt) > 24*time.Hour {
|
||||
log.Debug("Skipped", "k", common.Bytes2Hex(k), "last updated", a.UpdatedAt)
|
||||
log.Trace("Skipped", "k", common.Bytes2Hex(k), "last updated", a.UpdatedAt)
|
||||
return true, nil
|
||||
}
|
||||
if a.Left == 0 {
|
||||
@ -268,7 +268,7 @@ func (t *Tracker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return nil
|
||||
}
|
||||
if time.Since(a.UpdatedAt) > 5*DisconnectInterval {
|
||||
log.Debug("Skipped requset", "peer", common.Bytes2Hex(a.PeerID), "last updated", a.UpdatedAt, "now", time.Now())
|
||||
log.Trace("Skipped requset", "peer", common.Bytes2Hex(a.PeerID), "last updated", a.UpdatedAt, "now", time.Now())
|
||||
return nil
|
||||
}
|
||||
if a.Left == 0 {
|
||||
|
@ -231,7 +231,7 @@ func (e *EpochManager) zoomToAfter(chain consensus.ChainHeaderReader, er consens
|
||||
panic(fmt.Errorf("proof produced by this engine is invalid: %w", err))
|
||||
}
|
||||
epochSet := list.validators
|
||||
log.Debug("[aura] Updating finality checker with new validator set extracted from epoch", "num", lastTransition.BlockNumber)
|
||||
log.Trace("[aura] Updating finality checker with new validator set extracted from epoch", "num", lastTransition.BlockNumber)
|
||||
e.finalityChecker = NewRollingFinality(epochSet)
|
||||
if proof.SignalNumber >= DEBUG_LOG_FROM {
|
||||
fmt.Printf("new rolling finality: %d\n", proof.SignalNumber)
|
||||
@ -564,7 +564,7 @@ func (c *AuRa) verifyFamily(chain consensus.ChainHeaderReader, e consensus.Epoch
|
||||
//nolint
|
||||
if step == parentStep ||
|
||||
(header.Number.Uint64() >= c.cfg.ValidateStepTransition && step <= parentStep) {
|
||||
log.Debug("[engine] Multiple blocks proposed for step", "num", parentStep)
|
||||
log.Trace("[aura] Multiple blocks proposed for step", "num", parentStep)
|
||||
_ = setNumber
|
||||
/*
|
||||
self.validators.report_malicious(
|
||||
@ -1096,7 +1096,7 @@ func (c *AuRa) GenerateSeal(chain consensus.ChainHeaderReader, current, parent *
|
||||
// first check to avoid generating signature most of the time
|
||||
// (but there's still a race to the `compare_exchange`)
|
||||
if !c.step.canPropose.Load() {
|
||||
log.Trace("[engine] Aborting seal generation. Can't propose.")
|
||||
log.Trace("[aura] Aborting seal generation. Can't propose.")
|
||||
return nil
|
||||
}
|
||||
parentStep, err := headerStep(parent)
|
||||
@ -1108,24 +1108,24 @@ func (c *AuRa) GenerateSeal(chain consensus.ChainHeaderReader, current, parent *
|
||||
// filter messages from old and future steps and different parents
|
||||
expectedDiff := calculateScore(parentStep, step, 0)
|
||||
if current.Difficulty.Cmp(expectedDiff.ToBig()) != 0 {
|
||||
log.Debug(fmt.Sprintf("[engine] Aborting seal generation. The step or empty_steps have changed in the meantime. %d != %d", current.Difficulty, expectedDiff))
|
||||
log.Trace(fmt.Sprintf("[aura] Aborting seal generation. The step or empty_steps have changed in the meantime. %d != %d", current.Difficulty, expectedDiff))
|
||||
return nil
|
||||
}
|
||||
|
||||
if parentStep > step {
|
||||
log.Warn(fmt.Sprintf("[engine] Aborting seal generation for invalid step: %d > %d", parentStep, step))
|
||||
log.Warn(fmt.Sprintf("[aura] Aborting seal generation for invalid step: %d > %d", parentStep, step))
|
||||
return nil
|
||||
}
|
||||
|
||||
validators, setNumber, err := c.epochSet(chain, nil, current, nil)
|
||||
if err != nil {
|
||||
log.Warn("[engine] Unable to generate seal", "err", err)
|
||||
log.Warn("[aura] Unable to generate seal", "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
stepProposerAddr, err := stepProposer(validators, current.ParentHash, step, call)
|
||||
if err != nil {
|
||||
log.Warn("[engine] Unable to get stepProposer", "err", err)
|
||||
log.Warn("[aura] Unable to get stepProposer", "err", err)
|
||||
return nil
|
||||
}
|
||||
if stepProposerAddr != current.Coinbase {
|
||||
@ -1143,7 +1143,7 @@ func (c *AuRa) GenerateSeal(chain consensus.ChainHeaderReader, current, parent *
|
||||
/*
|
||||
signature, err := c.sign(current.bareHash())
|
||||
if err != nil {
|
||||
log.Warn("[engine] generate_seal: FAIL: Accounts secret key unavailable.", "err", err)
|
||||
log.Warn("[aura] generate_seal: FAIL: Accounts secret key unavailable.", "err", err)
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
@ -168,8 +168,8 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
defer func() {
|
||||
elapsed := time.Since(start)
|
||||
|
||||
logFn := logger.Debug
|
||||
if elapsed > 3*time.Second {
|
||||
logFn := logger.Trace
|
||||
if elapsed > 5*time.Second {
|
||||
logFn = logger.Info
|
||||
}
|
||||
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
|
||||
|
@ -283,7 +283,7 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []
|
||||
// any other error, like no pending work or stale mining result).
|
||||
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if s.currentBlock == nil {
|
||||
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
|
||||
s.ethash.config.Log.Warn("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
@ -318,7 +318,7 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash,
|
||||
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
|
||||
select {
|
||||
case s.results <- solution:
|
||||
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
s.ethash.config.Log.Trace("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
|
@ -19,7 +19,7 @@ type Cancel struct {
|
||||
}
|
||||
|
||||
func (c *Cancel) CancelFunc() {
|
||||
log.Debug("cancel mining task", "callers", debug.Callers(10))
|
||||
log.Trace("Cancel mining task", "callers", debug.Callers(10))
|
||||
c.cancel()
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (journal *txJournal) load(add func([]types.Transaction) []error) error {
|
||||
loadBatch := func(txs types.Transactions) {
|
||||
for _, err := range add(txs) {
|
||||
if err != nil {
|
||||
log.Debug("Failed to add journaled transaction", "err", err)
|
||||
log.Trace("Failed to add journaled transaction", "err", err)
|
||||
dropped++
|
||||
}
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ func (pool *TxPool) loop() {
|
||||
pool.mu.RUnlock()
|
||||
|
||||
if pending != prevPending || queued != prevQueued || stales != prevStales {
|
||||
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
|
||||
log.Trace("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
|
||||
prevPending, prevQueued, prevStales = pending, queued, stales
|
||||
}
|
||||
|
||||
|
@ -381,14 +381,14 @@ func (f *BlockFetcher) loop() {
|
||||
|
||||
count := f.announces[notification.origin] + 1
|
||||
if count > hashLimit {
|
||||
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
|
||||
log.Trace("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
|
||||
//blockAnnounceDOSMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
// If we have a valid block number, check that it's potentially useful
|
||||
if notification.number > 0 {
|
||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
|
||||
log.Trace("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
|
||||
//blockAnnounceDropMeter.Mark(1)
|
||||
break
|
||||
}
|
||||
@ -689,14 +689,14 @@ func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.B
|
||||
// Ensure the peer isn't DOSing us
|
||||
count := f.queues[peer] + 1
|
||||
if count > blockLimit {
|
||||
log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
|
||||
log.Trace("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
|
||||
//blockBroadcastDOSMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
}
|
||||
// Discard any past or too distant blocks
|
||||
if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||
log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
|
||||
log.Trace("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
|
||||
//blockBroadcastDropMeter.Mark(1)
|
||||
f.forgetHash(hash)
|
||||
return
|
||||
@ -715,7 +715,7 @@ func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.B
|
||||
if f.queueChangeHook != nil {
|
||||
f.queueChangeHook(hash, true)
|
||||
}
|
||||
log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
|
||||
log.Trace("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
|
||||
}
|
||||
}
|
||||
|
||||
@ -726,7 +726,7 @@ func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
|
||||
hash := block.Hash()
|
||||
|
||||
// Run the import on a new thread
|
||||
log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
|
||||
log.Trace("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
|
||||
defer func() {
|
||||
f.forgetHash(hash)
|
||||
f.forgetBlock(hash)
|
||||
@ -735,7 +735,7 @@ func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
|
||||
// If the parent's unknown, abort insertion
|
||||
parent := f.getBlock(block.ParentHash())
|
||||
if parent == nil {
|
||||
log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
|
||||
log.Trace("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
|
||||
return
|
||||
}
|
||||
// Quickly validate the header and propagate the block if it passes
|
||||
@ -750,13 +750,13 @@ func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
|
||||
|
||||
default:
|
||||
// Something went very wrong, drop the peer
|
||||
log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
|
||||
log.Trace("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
|
||||
f.dropPeer(peer)
|
||||
return
|
||||
}
|
||||
// Run the actual import and log any issues
|
||||
if _, err := f.insertChain(types.Blocks{block}); err != nil {
|
||||
log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
|
||||
log.Trace("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
// If import succeeded, broadcast the block
|
||||
|
@ -213,7 +213,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
|
||||
p.knownTxs.Add(hash)
|
||||
}
|
||||
case <-p.term:
|
||||
p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
|
||||
p.Log().Trace("Dropping transaction propagation", "count", len(hashes))
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,7 +248,7 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
|
||||
p.knownTxs.Add(hash)
|
||||
}
|
||||
case <-p.term:
|
||||
p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
|
||||
p.Log().Trace("Dropping transaction announcement", "count", len(hashes))
|
||||
}
|
||||
}
|
||||
|
||||
@ -314,7 +314,7 @@ func (p *Peer) AsyncSendNewBlockHash(block *types.Block) {
|
||||
}
|
||||
p.knownBlocks.Add(block.Hash())
|
||||
default:
|
||||
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
|
||||
p.Log().Trace("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
@ -342,7 +342,7 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
|
||||
}
|
||||
p.knownBlocks.Add(block.Hash())
|
||||
default:
|
||||
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
|
||||
p.Log().Trace("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
@ -405,7 +405,7 @@ func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
|
||||
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
||||
// single header. It is used solely by the fetcher.
|
||||
func (p *Peer) RequestOneHeader(hash common.Hash) error {
|
||||
p.Log().Debug("Fetching single header", "hash", hash)
|
||||
p.Log().Trace("Fetching single header", "hash", hash)
|
||||
query := GetBlockHeadersPacket{
|
||||
Origin: HashOrNumber{Hash: hash},
|
||||
Amount: uint64(1),
|
||||
@ -424,7 +424,7 @@ func (p *Peer) RequestOneHeader(hash common.Hash) error {
|
||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the hash of an origin block.
|
||||
func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
||||
p.Log().Trace("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
||||
query := GetBlockHeadersPacket{
|
||||
Origin: HashOrNumber{Hash: origin},
|
||||
Amount: uint64(amount),
|
||||
@ -443,7 +443,7 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re
|
||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||
// specified header query, based on the number of an origin block.
|
||||
func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
||||
p.Log().Trace("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
||||
query := GetBlockHeadersPacket{
|
||||
Origin: HashOrNumber{Number: origin},
|
||||
Amount: uint64(amount),
|
||||
@ -474,7 +474,7 @@ func (p *Peer) ExpectRequestHeadersByNumber(origin uint64, amount int, skip int,
|
||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||
// specified.
|
||||
func (p *Peer) RequestBodies(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
||||
p.Log().Trace("Fetching batch of block bodies", "count", len(hashes))
|
||||
if p.Version() >= ETH66 {
|
||||
return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
|
||||
RequestId: rand.Uint64(), //nolint:gosec
|
||||
@ -487,7 +487,7 @@ func (p *Peer) RequestBodies(hashes []common.Hash) error {
|
||||
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
||||
// data, corresponding to the specified hashes.
|
||||
func (p *Peer) RequestNodeData(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
|
||||
p.Log().Trace("Fetching batch of state data", "count", len(hashes))
|
||||
if p.Version() >= ETH66 {
|
||||
return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{
|
||||
RequestId: rand.Uint64(), //nolint:gosec
|
||||
@ -499,7 +499,7 @@ func (p *Peer) RequestNodeData(hashes []common.Hash) error {
|
||||
|
||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||
func (p *Peer) RequestReceipts(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
||||
p.Log().Trace("Fetching batch of receipts", "count", len(hashes))
|
||||
if p.Version() >= ETH66 {
|
||||
return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{
|
||||
RequestId: rand.Uint64(), //nolint:gosec
|
||||
@ -511,7 +511,7 @@ func (p *Peer) RequestReceipts(hashes []common.Hash) error {
|
||||
|
||||
// RequestTxs fetches a batch of transactions from a remote node.
|
||||
func (p *Peer) RequestTxs(hashes []common.Hash) error {
|
||||
p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
|
||||
p.Log().Trace("Fetching batch of transactions", "count", len(hashes))
|
||||
if p.Version() >= ETH66 {
|
||||
return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{
|
||||
RequestId: rand.Uint64(), //nolint:gosec
|
||||
|
@ -200,7 +200,7 @@ Loop:
|
||||
case <-timer.C:
|
||||
log.Trace("RequestQueueTime (bodies) ticked")
|
||||
case <-cfg.bd.DeliveryNotify:
|
||||
log.Debug("bodyLoop woken up by the incoming request")
|
||||
log.Trace("bodyLoop woken up by the incoming request")
|
||||
}
|
||||
d6 += time.Since(start)
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ Loop:
|
||||
peer = cfg.headerReqSend(ctx, req)
|
||||
if peer != nil {
|
||||
cfg.hd.SentRequest(req, currentTime, 5 /* timeout */)
|
||||
log.Debug("Sent request", "height", req.Number)
|
||||
log.Trace("Sent request", "height", req.Number)
|
||||
}
|
||||
}
|
||||
cfg.penalize(ctx, penalties)
|
||||
@ -139,7 +139,7 @@ Loop:
|
||||
peer = cfg.headerReqSend(ctx, req)
|
||||
if peer != nil {
|
||||
cfg.hd.SentRequest(req, currentTime, 5 /*timeout */)
|
||||
log.Debug("Sent request", "height", req.Number)
|
||||
log.Trace("Sent request", "height", req.Number)
|
||||
}
|
||||
}
|
||||
cfg.penalize(ctx, penalties)
|
||||
@ -151,7 +151,7 @@ Loop:
|
||||
if req != nil {
|
||||
peer = cfg.headerReqSend(ctx, req)
|
||||
if peer != nil {
|
||||
log.Debug("Sent skeleton request", "height", req.Number)
|
||||
log.Trace("Sent skeleton request", "height", req.Number)
|
||||
}
|
||||
}
|
||||
// Load headers into the database
|
||||
@ -195,7 +195,7 @@ Loop:
|
||||
case <-timer.C:
|
||||
log.Trace("RequestQueueTime (header) ticked")
|
||||
case <-cfg.hd.DeliveryNotify:
|
||||
log.Debug("headerLoop woken up by the incoming request")
|
||||
log.Trace("headerLoop woken up by the incoming request")
|
||||
case <-cfg.hd.SkipCycleHack:
|
||||
break Loop
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64,
|
||||
} else {
|
||||
changeSetBucket = kv.AccountChangeSet
|
||||
}
|
||||
log.Debug(fmt.Sprintf("[%s] Incremental state promotion of intermediate hashes", logPrefix), "from", from, "to", to, "csbucket", changeSetBucket)
|
||||
log.Trace(fmt.Sprintf("[%s] Incremental state promotion of intermediate hashes", logPrefix), "from", from, "to", to, "csbucket", changeSetBucket)
|
||||
|
||||
startkey := dbutils.EncodeBlockNumber(from + 1)
|
||||
|
||||
|
@ -319,7 +319,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc
|
||||
if err = commitUncle(env, uncle); err != nil {
|
||||
log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
|
||||
} else {
|
||||
log.Debug("Committing new uncle to block", "hash", hash)
|
||||
log.Trace("Committing new uncle to block", "hash", hash)
|
||||
uncles = append(uncles, uncle)
|
||||
}
|
||||
}
|
||||
|
@ -251,7 +251,7 @@ func addTransactionsToMiningBlock(current *MiningBlock, chainConfig params.Chain
|
||||
default:
|
||||
// Strange error, discard the transaction and get the next in line (note, the
|
||||
// nonce-too-high clause will prevent us from executing in vain).
|
||||
log.Debug("Transaction failed, account skipped", "hash", txn.Hash(), "err", err)
|
||||
log.Trace("Transaction failed, account skipped", "hash", txn.Hash(), "err", err)
|
||||
txs.Shift()
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.R
|
||||
log.Info(fmt.Sprintf("[%s] Preload headers", logPrefix), "block_number", binary.BigEndian.Uint64(k))
|
||||
}
|
||||
}
|
||||
log.Debug(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "amount", len(canonical))
|
||||
log.Trace(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "amount", len(canonical))
|
||||
|
||||
jobs := make(chan *senderRecoveryJob, cfg.batchSize)
|
||||
out := make(chan *senderRecoveryJob, cfg.batchSize)
|
||||
|
@ -118,7 +118,7 @@ func incrementalTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPoo
|
||||
currentHeaderIdx++
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical))
|
||||
log.Trace(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical))
|
||||
bodies, err := tx.Cursor(kv.BlockBody)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -213,7 +213,7 @@ func unwindTxPoolUpdate(logPrefix string, from, to uint64, pool *core.TxPool, tx
|
||||
|
||||
copy(canonical[blockNumber-from-1][:], v)
|
||||
}
|
||||
log.Debug(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical))
|
||||
log.Trace(fmt.Sprintf("[%s] Read canonical hashes", logPrefix), "hashes", len(canonical))
|
||||
senders := make([][]common.Address, to-from+1)
|
||||
sendersC, err := tx.Cursor(kv.Senders)
|
||||
if err != nil {
|
||||
|
@ -219,12 +219,12 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error {
|
||||
stage := s.stages[s.currentStage]
|
||||
|
||||
if string(stage.ID) == debug.StopBeforeStage() { // stop process for debugging reasons
|
||||
log.Error("STOP_BEFORE_STAGE env flag forced to stop app")
|
||||
log.Warn("STOP_BEFORE_STAGE env flag forced to stop app")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if stage.Disabled || stage.Forward == nil {
|
||||
log.Debug(fmt.Sprintf("%s disabled. %s", stage.ID, stage.DisabledDescription))
|
||||
log.Trace(fmt.Sprintf("%s disabled. %s", stage.ID, stage.DisabledDescription))
|
||||
|
||||
s.NextStage()
|
||||
continue
|
||||
@ -327,7 +327,7 @@ func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool, b
|
||||
|
||||
func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx) error {
|
||||
t := time.Now()
|
||||
log.Debug("Unwind...", "stage", stage.ID)
|
||||
log.Trace("Unwind...", "stage", stage.ID)
|
||||
stageState, err := s.StageState(stage.ID, tx, db)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -360,7 +360,7 @@ func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx
|
||||
|
||||
func (s *Sync) pruneStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx) error {
|
||||
t := time.Now()
|
||||
log.Debug("Prune...", "stage", stage.ID)
|
||||
log.Trace("Prune...", "stage", stage.ID)
|
||||
|
||||
stageState, err := s.StageState(stage.ID, tx, db)
|
||||
if err != nil {
|
||||
|
@ -71,7 +71,7 @@ func (s *EthBackendServer) NetPeerCount(_ context.Context, _ *remote.NetPeerCoun
|
||||
}
|
||||
|
||||
func (s *EthBackendServer) Subscribe(r *remote.SubscribeRequest, subscribeServer remote.ETHBACKEND_SubscribeServer) error {
|
||||
log.Debug("establishing event subscription channel with the RPC daemon")
|
||||
log.Trace("Establishing event subscription channel with the RPC daemon ...")
|
||||
s.events.AddHeaderSubscription(func(h *types.Header) error {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
|
@ -166,7 +166,7 @@ func (s *MinedBlockStreams) Broadcast(reply *proto_txpool.OnMinedBlockReply) {
|
||||
for id, stream := range s.chans {
|
||||
err := stream.Send(reply)
|
||||
if err != nil {
|
||||
log.Debug("failed send to mined block stream", "err", err)
|
||||
log.Trace("failed send to mined block stream", "err", err)
|
||||
select {
|
||||
case <-stream.Context().Done():
|
||||
delete(s.chans, id)
|
||||
@ -211,7 +211,7 @@ func (s *PendingBlockStreams) Broadcast(reply *proto_txpool.OnPendingBlockReply)
|
||||
for id, stream := range s.chans {
|
||||
err := stream.Send(reply)
|
||||
if err != nil {
|
||||
log.Debug("failed send to mined block stream", "err", err)
|
||||
log.Trace("failed send to mined block stream", "err", err)
|
||||
select {
|
||||
case <-stream.Context().Done():
|
||||
delete(s.chans, id)
|
||||
@ -256,7 +256,7 @@ func (s *PendingLogsStreams) Broadcast(reply *proto_txpool.OnPendingLogsReply) {
|
||||
for id, stream := range s.chans {
|
||||
err := stream.Send(reply)
|
||||
if err != nil {
|
||||
log.Debug("failed send to mined block stream", "err", err)
|
||||
log.Trace("failed send to mined block stream", "err", err)
|
||||
select {
|
||||
case <-stream.Context().Done():
|
||||
delete(s.chans, id)
|
||||
|
@ -252,7 +252,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
|
||||
if block != nil {
|
||||
uncles := block.Uncles()
|
||||
if index >= hexutil.Uint(len(uncles)) {
|
||||
log.Debug("Requested uncle not found", "number", blockNr, "hash", block.Hash(), "index", index)
|
||||
log.Trace("Requested uncle not found", "number", blockNr, "hash", block.Hash(), "index", index)
|
||||
return nil, nil
|
||||
}
|
||||
block = types.NewBlockWithHeader(uncles[index])
|
||||
@ -268,7 +268,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, b
|
||||
if block != nil {
|
||||
uncles := block.Uncles()
|
||||
if index >= hexutil.Uint(len(uncles)) {
|
||||
log.Debug("Requested uncle not found", "number", block.Number(), "hash", blockHash, "index", index)
|
||||
log.Trace("Requested uncle not found", "number", block.Number(), "hash", blockHash, "index", index)
|
||||
return nil, nil
|
||||
}
|
||||
block = types.NewBlockWithHeader(uncles[index])
|
||||
|
@ -534,13 +534,13 @@ func (t *dialTask) resolve(d *dialScheduler) bool {
|
||||
if t.resolveDelay > maxResolveDelay {
|
||||
t.resolveDelay = maxResolveDelay
|
||||
}
|
||||
d.log.Debug("Resolving node failed", "id", t.dest.ID(), "newdelay", t.resolveDelay)
|
||||
d.log.Warn("Resolving node failed", "id", t.dest.ID(), "newdelay", t.resolveDelay)
|
||||
return false
|
||||
}
|
||||
// The node was found.
|
||||
t.resolveDelay = initialResolveDelay
|
||||
t.dest = resolved
|
||||
d.log.Debug("Resolved node", "id", t.dest.ID(), "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()})
|
||||
d.log.Trace("Resolved node", "id", t.dest.ID(), "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()})
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ func checkClockDrift() {
|
||||
log.Warn(fmt.Sprintf("System clock seems off by %v, which can prevent network connectivity", drift))
|
||||
log.Warn("Please enable network time synchronisation in system settings.")
|
||||
} else {
|
||||
log.Debug("NTP sanity check done", "drift", drift)
|
||||
log.Trace("NTP sanity check done", "drift", drift)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -332,7 +332,7 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
|
||||
// Also fetch record if the node replied and returned a higher sequence number.
|
||||
if last.Seq() < remoteSeq {
|
||||
if n, err := tab.net.RequestENR(unwrapNode(last)); err != nil {
|
||||
tab.log.Debug("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err)
|
||||
tab.log.Trace("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err)
|
||||
} else {
|
||||
last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks}
|
||||
}
|
||||
@ -344,16 +344,16 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
|
||||
if rErr == nil {
|
||||
// The node responded, move it to the front.
|
||||
last.livenessChecks++
|
||||
tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
|
||||
tab.log.Trace("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
|
||||
tab.bumpInBucket(b, last)
|
||||
return
|
||||
}
|
||||
// No reply received, pick a replacement or delete the node if there aren't
|
||||
// any replacements.
|
||||
if r := tab.replace(b, last); r != nil {
|
||||
tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
|
||||
tab.log.Trace("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
|
||||
} else {
|
||||
tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
|
||||
tab.log.Trace("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
|
||||
}
|
||||
}
|
||||
|
||||
@ -552,11 +552,11 @@ func (tab *Table) addIP(b *bucket, ip net.IP) bool {
|
||||
return true
|
||||
}
|
||||
if !tab.ips.Add(ip) {
|
||||
tab.log.Debug("IP exceeds table limit", "ip", ip)
|
||||
tab.log.Trace("IP exceeds table limit", "ip", ip)
|
||||
return false
|
||||
}
|
||||
if !b.ips.Add(ip) {
|
||||
tab.log.Debug("IP exceeds bucket limit", "ip", ip)
|
||||
tab.log.Trace("IP exceeds bucket limit", "ip", ip)
|
||||
tab.ips.Remove(ip)
|
||||
return false
|
||||
}
|
||||
|
@ -540,12 +540,12 @@ func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) {
|
||||
nbytes, from, err := t.conn.ReadFromUDP(buf)
|
||||
if netutil.IsTemporaryError(err) {
|
||||
// Ignore temporary read errors.
|
||||
t.log.Debug("Temporary UDP read error", "err", err)
|
||||
t.log.Trace("Temporary UDP read error", "err", err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
// Shut down the loop for permament errors.
|
||||
if err != io.EOF {
|
||||
t.log.Debug("UDP read error", "err", err)
|
||||
t.log.Trace("UDP read error", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -561,7 +561,7 @@ func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) {
|
||||
func (t *UDPv4) handlePacket(from *net.UDPAddr, buf []byte) error {
|
||||
rawpacket, fromKey, hash, err := v4wire.Decode(buf)
|
||||
if err != nil {
|
||||
t.log.Debug("Bad discv4 packet", "addr", from, "err", err)
|
||||
t.log.Trace("Bad discv4 packet", "addr", from, "err", err)
|
||||
return err
|
||||
}
|
||||
packet := t.wrapPacket(rawpacket)
|
||||
|
@ -382,7 +382,7 @@ func (t *UDPv5) waitForNodes(c *callV5, distances []uint) ([]*enode.Node, error)
|
||||
for _, record := range response.Nodes {
|
||||
node, err := t.verifyResponseNode(c, record, distances, seen)
|
||||
if err != nil {
|
||||
t.log.Debug("Invalid record in "+response.Name(), "id", c.node.ID(), "err", err)
|
||||
t.log.Trace("Invalid record in "+response.Name(), "id", c.node.ID(), "err", err)
|
||||
continue
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
@ -622,12 +622,12 @@ func (t *UDPv5) readLoop() {
|
||||
nbytes, from, err := t.conn.ReadFromUDP(buf)
|
||||
if netutil.IsTemporaryError(err) {
|
||||
// Ignore temporary read errors.
|
||||
t.log.Debug("Temporary UDP read error", "err", err)
|
||||
t.log.Trace("Temporary UDP read error", "err", err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
// Shut down the loop for permament errors.
|
||||
if err != io.EOF {
|
||||
t.log.Debug("UDP read error", "err", err)
|
||||
t.log.Trace("UDP read error", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -650,7 +650,7 @@ func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error {
|
||||
addr := fromAddr.String()
|
||||
fromID, fromNode, packet, err := t.codec.Decode(rawpacket, addr)
|
||||
if err != nil {
|
||||
t.log.Debug("Bad discv5 packet", "id", fromID, "addr", addr, "err", err)
|
||||
t.log.Trace("Bad discv5 packet", "id", fromID, "addr", addr, "err", err)
|
||||
return err
|
||||
}
|
||||
if fromNode != nil {
|
||||
@ -669,15 +669,15 @@ func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error {
|
||||
func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr *net.UDPAddr, p v5wire.Packet) bool {
|
||||
ac := t.activeCallByNode[fromID]
|
||||
if ac == nil || !bytes.Equal(p.RequestID(), ac.reqid) {
|
||||
t.log.Debug(fmt.Sprintf("Unsolicited/late %s response", p.Name()), "id", fromID, "addr", fromAddr)
|
||||
t.log.Trace(fmt.Sprintf("Unsolicited/late %s response", p.Name()), "id", fromID, "addr", fromAddr)
|
||||
return false
|
||||
}
|
||||
if !fromAddr.IP.Equal(ac.node.IP()) || fromAddr.Port != ac.node.UDP() {
|
||||
t.log.Debug(fmt.Sprintf("%s from wrong endpoint", p.Name()), "id", fromID, "addr", fromAddr)
|
||||
t.log.Trace(fmt.Sprintf("%s from wrong endpoint", p.Name()), "id", fromID, "addr", fromAddr)
|
||||
return false
|
||||
}
|
||||
if p.Kind() != ac.responseType {
|
||||
t.log.Debug(fmt.Sprintf("Wrong discv5 response type %s", p.Name()), "id", fromID, "addr", fromAddr)
|
||||
t.log.Trace(fmt.Sprintf("Wrong discv5 response type %s", p.Name()), "id", fromID, "addr", fromAddr)
|
||||
return false
|
||||
}
|
||||
t.startResponseTimeout(ac)
|
||||
|
@ -276,7 +276,7 @@ func (it *randomIterator) nextNode() *enode.Node {
|
||||
if err == it.ctx.Err() {
|
||||
return nil // context canceled.
|
||||
}
|
||||
it.c.cfg.Logger.Debug("Error in DNS random node sync", "tree", ct.loc.domain, "err", err)
|
||||
it.c.cfg.Logger.Trace("Error in DNS random node sync", "tree", ct.loc.domain, "err", err)
|
||||
continue
|
||||
}
|
||||
if n != nil {
|
||||
@ -349,7 +349,7 @@ func (it *randomIterator) waitForRootUpdates(trees []*clientTree) bool {
|
||||
}
|
||||
|
||||
sleep := nextCheck.Sub(it.c.clock.Now())
|
||||
it.c.cfg.Logger.Debug("DNS iterator waiting for root updates", "sleep", sleep, "tree", minTree.loc.domain)
|
||||
it.c.cfg.Logger.Trace("DNS iterator waiting for root updates", "sleep", sleep, "tree", minTree.loc.domain)
|
||||
timeout := it.c.clock.NewTimer(sleep)
|
||||
defer timeout.Stop()
|
||||
select {
|
||||
|
@ -281,7 +281,7 @@ func (ln *LocalNode) sign() {
|
||||
panic(fmt.Errorf("enode: can't verify local record: %w", err))
|
||||
}
|
||||
ln.cur.Store(n)
|
||||
log.Debug("New local node record", "seq", ln.seq, "id", n.ID(), "ip", n.IP(), "udp", n.UDP(), "tcp", n.TCP())
|
||||
log.Trace("New local node record", "seq", ln.seq, "id", n.ID(), "ip", n.IP(), "udp", n.UDP(), "tcp", n.TCP())
|
||||
}
|
||||
|
||||
func (ln *LocalNode) bumpSeq() {
|
||||
|
@ -102,11 +102,11 @@ func Map(m Interface, c <-chan struct{}, protocol string, extport, intport int,
|
||||
refresh := time.NewTimer(mapTimeout)
|
||||
defer func() {
|
||||
refresh.Stop()
|
||||
log.Debug("Deleting port mapping")
|
||||
log.Trace("Deleting port mapping")
|
||||
m.DeleteMapping(protocol, extport, intport)
|
||||
}()
|
||||
if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil {
|
||||
log.Debug("Couldn't add port mapping", "err", err)
|
||||
log.Warn("Couldn't add port mapping", "err", err)
|
||||
} else {
|
||||
log.Info("Mapped network port")
|
||||
}
|
||||
@ -119,7 +119,7 @@ func Map(m Interface, c <-chan struct{}, protocol string, extport, intport int,
|
||||
case <-refresh.C:
|
||||
log.Trace("Refreshing port mapping")
|
||||
if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil {
|
||||
log.Debug("Couldn't add port mapping", "err", err)
|
||||
log.Warn("Couldn't add port mapping", "err", err)
|
||||
}
|
||||
refresh.Reset(mapTimeout)
|
||||
}
|
||||
|
@ -576,7 +576,7 @@ func (srv *Server) setupDiscovery() error {
|
||||
return err
|
||||
}
|
||||
realaddr := conn.LocalAddr().(*net.UDPAddr)
|
||||
srv.log.Debug("UDP listener up", "addr", realaddr)
|
||||
srv.log.Trace("UDP listener up", "addr", realaddr)
|
||||
if srv.NAT != nil {
|
||||
if !realaddr.IP.IsLoopback() {
|
||||
srv.loopWG.Add(1)
|
||||
@ -789,7 +789,7 @@ running:
|
||||
// The handshakes are done and it passed all checks.
|
||||
p := srv.launchPeer(c)
|
||||
peers[c.node.ID()] = p
|
||||
srv.log.Debug("Adding p2p peer", "peercount", len(peers), "id", p.ID(), "conn", c.flags, "addr", p.RemoteAddr(), "name", p.Name())
|
||||
srv.log.Trace("Adding p2p peer", "peercount", len(peers), "id", p.ID(), "conn", c.flags, "addr", p.RemoteAddr(), "name", p.Name())
|
||||
srv.dialsched.peerAdded(c)
|
||||
if p.Inbound() {
|
||||
inboundCount++
|
||||
@ -801,7 +801,7 @@ running:
|
||||
// A peer disconnected.
|
||||
d := common.PrettyDuration(mclock.Now() - pd.created)
|
||||
delete(peers, pd.ID())
|
||||
srv.log.Debug("Removing p2p peer", "peercount", len(peers), "id", pd.ID(), "duration", d, "req", pd.requested, "err", pd.err)
|
||||
srv.log.Trace("Removing p2p peer", "peercount", len(peers), "id", pd.ID(), "duration", d, "req", pd.requested, "err", pd.err)
|
||||
srv.dialsched.peerRemoved(pd.rw)
|
||||
if pd.Inbound() {
|
||||
inboundCount--
|
||||
@ -861,7 +861,7 @@ func (srv *Server) addPeerChecks(peers map[enode.ID]*Peer, inboundCount int, c *
|
||||
// inbound connections.
|
||||
func (srv *Server) listenLoop() {
|
||||
defer debug.LogPanic()
|
||||
srv.log.Debug("TCP listener up", "addr", srv.listener.Addr())
|
||||
srv.log.Trace("TCP listener up", "addr", srv.listener.Addr())
|
||||
|
||||
// The slots channel limits accepts of new connections.
|
||||
tokens := defaultMaxPendingPeers
|
||||
@ -895,13 +895,13 @@ func (srv *Server) listenLoop() {
|
||||
fd, err = srv.listener.Accept()
|
||||
if netutil.IsTemporaryError(err) {
|
||||
if time.Since(lastLog) > 1*time.Second {
|
||||
srv.log.Debug("Temporary read error", "err", err)
|
||||
srv.log.Trace("Temporary read error", "err", err)
|
||||
lastLog = time.Now()
|
||||
}
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
continue
|
||||
} else if err != nil {
|
||||
srv.log.Debug("Read error", "err", err)
|
||||
srv.log.Trace("Read error", "err", err)
|
||||
slots <- struct{}{}
|
||||
return
|
||||
}
|
||||
@ -910,7 +910,7 @@ func (srv *Server) listenLoop() {
|
||||
|
||||
remoteIP := netutil.AddrIP(fd.RemoteAddr())
|
||||
if err := srv.checkInboundConn(fd, remoteIP); err != nil {
|
||||
srv.log.Debug("Rejected inbound connnection", "addr", fd.RemoteAddr(), "err", err)
|
||||
srv.log.Trace("Rejected inbound connnection", "addr", fd.RemoteAddr(), "err", err)
|
||||
fd.Close()
|
||||
slots <- struct{}{}
|
||||
continue
|
||||
|
@ -76,18 +76,18 @@ func startStop(net *Network, quit chan struct{}, nodeCount int) {
|
||||
id := nodes[rand.Intn(len(nodes))]
|
||||
log.Info("stopping node", "id", id)
|
||||
if err := net.Stop(id); err != nil {
|
||||
log.Error("error stopping node", "id", id, "err", err)
|
||||
log.Error("Error stopping node", "id", id, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-quit:
|
||||
log.Info("Terminating simulation loop")
|
||||
log.Info("Terminating simulation loop ...")
|
||||
return
|
||||
case <-time.After(3 * time.Second):
|
||||
}
|
||||
|
||||
log.Debug("starting node", "id", id)
|
||||
log.Trace("Starting node ...", "id", id)
|
||||
if err := net.Start(id); err != nil {
|
||||
log.Error("error starting node", "id", id, "err", err)
|
||||
return
|
||||
@ -147,7 +147,7 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) {
|
||||
return
|
||||
case <-time.After(randWait):
|
||||
}
|
||||
log.Debug(fmt.Sprintf("node %v shutting down", nodes[i]))
|
||||
log.Trace(fmt.Sprintf("Node %v shutting down ...", nodes[i]))
|
||||
err := net.Stop(nodes[i])
|
||||
if err != nil {
|
||||
log.Error("Error stopping node", "node", nodes[i])
|
||||
@ -186,7 +186,7 @@ func connectNodesInRing(net *Network, nodeCount int) ([]enode.ID, error) {
|
||||
log.Error("Error starting a node!", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
log.Debug(fmt.Sprintf("node %v starting up", id))
|
||||
log.Trace(fmt.Sprintf("Node %v starting up", id))
|
||||
}
|
||||
for i, id := range ids {
|
||||
peerID := ids[(i+1)%len(ids)]
|
||||
|
@ -311,7 +311,7 @@ func (net *Network) Connect(oneID, otherID enode.ID) error {
|
||||
}
|
||||
|
||||
func (net *Network) connect(oneID, otherID enode.ID) error {
|
||||
log.Debug("Connecting nodes with addPeer", "id", oneID, "other", otherID)
|
||||
log.Trace("Connecting nodes with addPeer", "id", oneID, "other", otherID)
|
||||
conn, err := net.initConn(oneID, otherID)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -683,7 +683,7 @@ func (net *Network) initConn(oneID, otherID enode.ID) (*Conn, error) {
|
||||
log.Trace("Nodes not up", "err", err)
|
||||
return nil, fmt.Errorf("nodes not up: %v", err)
|
||||
}
|
||||
log.Debug("Connection initiated", "id", oneID, "other", otherID)
|
||||
log.Trace("Connection initiated", "id", oneID, "other", otherID)
|
||||
conn.initiated = time.Now()
|
||||
return conn, nil
|
||||
}
|
||||
@ -691,7 +691,7 @@ func (net *Network) initConn(oneID, otherID enode.ID) (*Conn, error) {
|
||||
// Shutdown stops all nodes in the network and closes the quit channel
|
||||
func (net *Network) Shutdown() {
|
||||
for _, node := range net.Nodes {
|
||||
log.Debug("Stopping node", "id", node.ID())
|
||||
log.Trace("Stopping node", "id", node.ID())
|
||||
if err := node.Stop(); err != nil {
|
||||
log.Warn("Can't stop node", "id", node.ID(), "err", err)
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ OUTER:
|
||||
checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
|
||||
checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
|
||||
connEventCount--
|
||||
log.Debug("ev", "count", connEventCount)
|
||||
log.Trace("ev", "count", connEventCount)
|
||||
if connEventCount == 0 {
|
||||
break OUTER
|
||||
}
|
||||
@ -135,7 +135,7 @@ OUTER:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.Debug("snapshot taken", "nodes", len(snap.Nodes), "conns", len(snap.Conns), "json", string(j))
|
||||
log.Trace("snapshot taken", "nodes", len(snap.Nodes), "conns", len(snap.Conns), "json", string(j))
|
||||
|
||||
// verify that the snap element numbers check out
|
||||
if len(checkIds) != len(snap.Conns) || len(checkIds) != len(snap.Nodes) {
|
||||
@ -214,11 +214,11 @@ OuterTwo:
|
||||
if !ev.Conn.Up {
|
||||
t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other)
|
||||
}
|
||||
log.Debug("conn", "on", ev.Conn.One, "other", ev.Conn.Other)
|
||||
log.Trace("conn", "on", ev.Conn.One, "other", ev.Conn.Other)
|
||||
checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
|
||||
checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
|
||||
connEventCount--
|
||||
log.Debug("ev", "count", connEventCount)
|
||||
log.Trace("ev", "count", connEventCount)
|
||||
if connEventCount == 0 {
|
||||
break OuterTwo
|
||||
}
|
||||
@ -760,7 +760,7 @@ func benchmarkMinimalServiceTmp(b *testing.B) {
|
||||
defer cancel()
|
||||
for nodid, peers := range protoCMap {
|
||||
for peerid, peerC := range peers {
|
||||
log.Debug("getting ", "node", nodid, "peer", peerid)
|
||||
log.Trace("getting ", "node", nodid, "peer", peerid)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
b.Fatal(ctx.Err())
|
||||
|
@ -564,13 +564,13 @@ func (c *Client) dispatch(codec ServerCodec) {
|
||||
}
|
||||
|
||||
case err := <-c.readErr:
|
||||
conn.handler.log.Debug("RPC connection read error", "err", err)
|
||||
conn.handler.log.Trace("RPC connection read error", "err", err)
|
||||
conn.close(err, lastOp)
|
||||
reading = false
|
||||
|
||||
// Reconnect:
|
||||
case newcodec := <-c.reconnected:
|
||||
log.Debug("RPC client reconnected", "reading", reading, "conn", newcodec.remoteAddr())
|
||||
log.Trace("RPC client reconnected", "reading", reading, "conn", newcodec.remoteAddr())
|
||||
if reading {
|
||||
// Wait for the previous read loop to exit. This is a rare case which
|
||||
// happens if this loop isn't notified in time after the connection breaks.
|
||||
|
@ -613,7 +613,7 @@ func (l *flakeyListener) Accept() (net.Conn, error) {
|
||||
if err == nil {
|
||||
timeout := time.Duration(rand.Int63n(int64(l.maxKillTimeout)))
|
||||
time.AfterFunc(timeout, func() {
|
||||
log.Debug(fmt.Sprintf("killing conn %v after %v", c.LocalAddr(), timeout))
|
||||
log.Trace(fmt.Sprintf("killing conn %v after %v", c.LocalAddr(), timeout))
|
||||
c.Close()
|
||||
})
|
||||
}
|
||||
|
@ -303,7 +303,7 @@ func (h *handler) handleImmediate(msg *jsonrpcMessage) bool {
|
||||
func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) {
|
||||
var result subscriptionResult
|
||||
if err := json.Unmarshal(msg.Params, &result); err != nil {
|
||||
h.log.Debug("Dropping invalid subscription message")
|
||||
h.log.Trace("Dropping invalid subscription message")
|
||||
return
|
||||
}
|
||||
if h.clientSubs[result.ID] != nil {
|
||||
@ -315,7 +315,7 @@ func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) {
|
||||
func (h *handler) handleResponse(msg *jsonrpcMessage) {
|
||||
op := h.respWait[string(msg.ID)]
|
||||
if op == nil {
|
||||
h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID})
|
||||
h.log.Trace("Unsolicited RPC response", "reqid", idForLog{msg.ID})
|
||||
return
|
||||
}
|
||||
delete(h.respWait, string(msg.ID))
|
||||
@ -344,7 +344,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage, stream *json
|
||||
switch {
|
||||
case msg.isNotification():
|
||||
h.handleCall(ctx, msg, stream)
|
||||
h.log.Debug("Served", "t", time.Since(start), "method", msg.Method, "params", string(msg.Params))
|
||||
h.log.Trace("Served", "t", time.Since(start), "method", msg.Method, "params", string(msg.Params))
|
||||
return nil
|
||||
case msg.isCall():
|
||||
resp := h.handleCall(ctx, msg, stream)
|
||||
@ -357,7 +357,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage, stream *json
|
||||
"err", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
h.log.Debug("Served", "t", time.Since(start), "method", msg.Method, "reqid", idForLog{msg.ID}, "params", string(msg.Params))
|
||||
h.log.Trace("Served", "t", time.Since(start), "method", msg.Method, "reqid", idForLog{msg.ID}, "params", string(msg.Params))
|
||||
return resp
|
||||
case msg.hasValidID():
|
||||
return msg.errorResponse(&invalidRequestError{"invalid request"})
|
||||
|
@ -129,7 +129,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec, stre
|
||||
// subscriptions.
|
||||
func (s *Server) Stop() {
|
||||
if atomic.CompareAndSwapInt32(&s.run, 1, 0) {
|
||||
log.Debug("RPC server shutting down")
|
||||
log.Info("RPC server shutting down")
|
||||
s.codecs.Each(func(c interface{}) bool {
|
||||
c.(ServerCodec).close()
|
||||
return true
|
||||
|
@ -57,7 +57,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string, compression bool) htt
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Debug("WebSocket upgrade failed", "err", err)
|
||||
log.Warn("WebSocket upgrade failed", "err", err)
|
||||
return
|
||||
}
|
||||
codec := newWebsocketCodec(conn)
|
||||
@ -87,7 +87,7 @@ func wsHandshakeValidator(allowedOrigins []string) func(*http.Request) bool {
|
||||
origins.Add("http://" + hostname)
|
||||
}
|
||||
}
|
||||
log.Debug(fmt.Sprintf("Allowed origin(s) for WS RPC interface %v", origins.ToSlice()))
|
||||
log.Trace(fmt.Sprintf("Allowed origin(s) for WS RPC interface %v", origins.ToSlice()))
|
||||
|
||||
f := func(req *http.Request) bool {
|
||||
// Skip origin verification if no Origin header is present. The origin check
|
||||
|
@ -520,7 +520,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime uint64) (*HeaderRequest
|
||||
defer hd.lock.Unlock()
|
||||
var penalties []PenaltyItem
|
||||
if hd.anchorQueue.Len() == 0 {
|
||||
log.Debug("Empty anchor queue")
|
||||
log.Trace("Empty anchor queue")
|
||||
return nil, penalties
|
||||
}
|
||||
for hd.anchorQueue.Len() > 0 {
|
||||
@ -559,7 +559,7 @@ func (hd *HeaderDownload) SentRequest(req *HeaderRequest, currentTime, timeout u
|
||||
func (hd *HeaderDownload) RequestSkeleton() *HeaderRequest {
|
||||
hd.lock.RLock()
|
||||
defer hd.lock.RUnlock()
|
||||
log.Debug("Request skeleton", "anchors", len(hd.anchors), "top seen height", hd.topSeenHeight, "highestInDb", hd.highestInDb)
|
||||
log.Trace("Request skeleton", "anchors", len(hd.anchors), "top seen height", hd.topSeenHeight, "highestInDb", hd.highestInDb)
|
||||
if len(hd.anchors) > 16 {
|
||||
return nil // Need to be below anchor threshold to produce skeleton request
|
||||
}
|
||||
@ -841,13 +841,13 @@ func (hi *HeaderInserter) BestHeaderChanged() bool {
|
||||
// speeds up visibility of new blocks
|
||||
// It remember peerID - then later - if anchors created from segments will abandoned - this peerID gonna get Penalty
|
||||
func (hd *HeaderDownload) ProcessSegment(segment *ChainSegment, newBlock bool, peerID string) (requestMore bool, penalties []PenaltyItem) {
|
||||
log.Debug("processSegment", "from", segment.Headers[0].Number.Uint64(), "to", segment.Headers[len(segment.Headers)-1].Number.Uint64())
|
||||
log.Trace("processSegment", "from", segment.Headers[0].Number.Uint64(), "to", segment.Headers[len(segment.Headers)-1].Number.Uint64())
|
||||
hd.lock.Lock()
|
||||
defer hd.lock.Unlock()
|
||||
foundAnchor, start := hd.findAnchors(segment)
|
||||
foundTip, end := hd.findLink(segment, start) // We ignore penalty because we will check it as part of PoW check
|
||||
if end == 0 {
|
||||
log.Debug("Duplicate segment")
|
||||
log.Trace("Duplicate segment")
|
||||
return
|
||||
}
|
||||
height := segment.Headers[len(segment.Headers)-1].Number.Uint64()
|
||||
@ -868,7 +868,7 @@ func (hd *HeaderDownload) ProcessSegment(segment *ChainSegment, newBlock bool, p
|
||||
log.Debug("Connect failed", "error", err)
|
||||
return
|
||||
}
|
||||
log.Debug("Connected", "start", startNum, "end", endNum)
|
||||
log.Trace("Connected", "start", startNum, "end", endNum)
|
||||
} else {
|
||||
// ExtendDown
|
||||
var err error
|
||||
@ -876,7 +876,7 @@ func (hd *HeaderDownload) ProcessSegment(segment *ChainSegment, newBlock bool, p
|
||||
log.Debug("ExtendDown failed", "error", err)
|
||||
return
|
||||
}
|
||||
log.Debug("Extended Down", "start", startNum, "end", endNum)
|
||||
log.Trace("Extended Down", "start", startNum, "end", endNum)
|
||||
}
|
||||
} else if foundTip {
|
||||
if end > 0 {
|
||||
@ -885,7 +885,7 @@ func (hd *HeaderDownload) ProcessSegment(segment *ChainSegment, newBlock bool, p
|
||||
log.Debug("ExtendUp failed", "error", err)
|
||||
return
|
||||
}
|
||||
log.Debug("Extended Up", "start", startNum, "end", endNum)
|
||||
log.Trace("Extended Up", "start", startNum, "end", endNum)
|
||||
}
|
||||
} else {
|
||||
// NewAnchor
|
||||
@ -894,12 +894,12 @@ func (hd *HeaderDownload) ProcessSegment(segment *ChainSegment, newBlock bool, p
|
||||
log.Debug("NewAnchor failed", "error", err)
|
||||
return
|
||||
}
|
||||
log.Debug("NewAnchor", "start", startNum, "end", endNum)
|
||||
log.Trace("NewAnchor", "start", startNum, "end", endNum)
|
||||
}
|
||||
//log.Info(hd.anchorState())
|
||||
log.Debug("Link queue", "size", hd.linkQueue.Len())
|
||||
log.Trace("Link queue", "size", hd.linkQueue.Len())
|
||||
if hd.linkQueue.Len() > hd.linkLimit {
|
||||
log.Debug("Too many links, cutting down", "count", hd.linkQueue.Len(), "tried to add", end-start, "limit", hd.linkLimit)
|
||||
log.Trace("Too many links, cutting down", "count", hd.linkQueue.Len(), "tried to add", end-start, "limit", hd.linkLimit)
|
||||
}
|
||||
for hd.linkQueue.Len() > hd.linkLimit {
|
||||
link := heap.Pop(hd.linkQueue).(*Link)
|
||||
|
@ -172,7 +172,7 @@ func getHashGetter(requireCanonical bool, tx kv.Tx) func(uint64) common.Hash {
|
||||
return func(n uint64) common.Hash {
|
||||
hash, err := rawdb.ReadCanonicalHash(tx, n)
|
||||
if err != nil {
|
||||
log.Debug("can't get block hash by number", "number", n, "only-canonical", requireCanonical)
|
||||
log.Debug("Can't get block hash by number", "number", n, "only-canonical", requireCanonical)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user