mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-21 19:20:39 +00:00
Header downloader monitor for diagnostics system (#7590)
## What's this PR about? - Added states to be sent to diagnostics system for header downloader monitor - Added the code for sending the states through the tunnel - Code added for updating the states in the header_algos.go file --------- Co-authored-by: alex.sharov <AskAlexSharov@gmail.com>
This commit is contained in:
parent
027d83b556
commit
1fb053f260
@ -9,6 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var BlockBodyDownloadStates *States = NewStates(64 * 1024)
|
var BlockBodyDownloadStates *States = NewStates(64 * 1024)
|
||||||
|
var HeaderDownloadStates *States = NewStates(64 * 1024)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
BlockBodyCleared byte = iota
|
BlockBodyCleared byte = iota
|
||||||
@ -22,6 +23,17 @@ const (
|
|||||||
BlockBodyInDb
|
BlockBodyInDb
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
HeaderInvalidated byte = iota
|
||||||
|
HeaderRequested
|
||||||
|
HeaderSkeletonRequested
|
||||||
|
HeaderRetryNotReady
|
||||||
|
HeaderEmpty
|
||||||
|
HeaderBad
|
||||||
|
HeaderEvicted
|
||||||
|
HeaderInserted
|
||||||
|
)
|
||||||
|
|
||||||
type SnapshotItem struct {
|
type SnapshotItem struct {
|
||||||
id uint64
|
id uint64
|
||||||
state byte
|
state byte
|
||||||
|
35
diagnostics/header_downloader_stats.go
Normal file
35
diagnostics/header_downloader_stats.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package diagnostics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ledgerwatch/erigon/dataflow"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetupHeaderDownloadStats() {
|
||||||
|
http.HandleFunc("/debug/metrics/headers_download", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
writeHeaderDownload(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeHeaderDownload(w io.Writer, r *http.Request) {
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
fmt.Fprintf(w, "ERROR: parsing arguments: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sinceTickStr := r.Form.Get("sincetick")
|
||||||
|
var tick int64
|
||||||
|
if sinceTickStr != "" {
|
||||||
|
var err error
|
||||||
|
if tick, err = strconv.ParseInt(sinceTickStr, 10, 64); err != nil {
|
||||||
|
fmt.Fprintf(w, "ERROR: parsing sincemilli: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "SUCCESS\n")
|
||||||
|
// fmt.Fprintf(w, "%d,%d\n", p2p.ingressTrafficMeter, )
|
||||||
|
dataflow.HeaderDownloadStates.ChangesSince(int(tick), w)
|
||||||
|
}
|
@ -196,6 +196,7 @@ func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, error) {
|
|||||||
diagnostics.SetupFlagsAccess(ctx)
|
diagnostics.SetupFlagsAccess(ctx)
|
||||||
diagnostics.SetupVersionAccess()
|
diagnostics.SetupVersionAccess()
|
||||||
diagnostics.SetupBlockBodyDownload()
|
diagnostics.SetupBlockBodyDownload()
|
||||||
|
diagnostics.SetupHeaderDownloadStats()
|
||||||
}
|
}
|
||||||
|
|
||||||
// pprof server
|
// pprof server
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/ledgerwatch/log/v3"
|
"github.com/ledgerwatch/log/v3"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
|
"github.com/ledgerwatch/erigon/dataflow"
|
||||||
"github.com/ledgerwatch/erigon/turbo/services"
|
"github.com/ledgerwatch/erigon/turbo/services"
|
||||||
|
|
||||||
"github.com/ledgerwatch/erigon/common"
|
"github.com/ledgerwatch/erigon/common"
|
||||||
@ -391,11 +392,15 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime time.Time) (*HeaderRequ
|
|||||||
var req *HeaderRequest
|
var req *HeaderRequest
|
||||||
hd.anchorTree.Ascend(func(anchor *Anchor) bool {
|
hd.anchorTree.Ascend(func(anchor *Anchor) bool {
|
||||||
if anchor.nextRetryTime.After(currentTime) {
|
if anchor.nextRetryTime.After(currentTime) {
|
||||||
|
// We are not ready to retry this anchor yet
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(anchor.blockHeight-1, dataflow.HeaderRetryNotReady)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if anchor.timeouts >= 10 {
|
if anchor.timeouts >= 10 {
|
||||||
// Ancestors of this anchor seem to be unavailable, invalidate and move on
|
// Ancestors of this anchor seem to be unavailable, invalidate and move on
|
||||||
hd.invalidateAnchor(anchor, "suspected unavailability")
|
hd.invalidateAnchor(anchor, "suspected unavailability")
|
||||||
|
// Add header invalidate
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(anchor.blockHeight-1, dataflow.HeaderInvalidated)
|
||||||
penalties = append(penalties, PenaltyItem{Penalty: AbandonedAnchorPenalty, PeerID: anchor.peerID})
|
penalties = append(penalties, PenaltyItem{Penalty: AbandonedAnchorPenalty, PeerID: anchor.peerID})
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -407,6 +412,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime time.Time) (*HeaderRequ
|
|||||||
Skip: 0,
|
Skip: 0,
|
||||||
Reverse: true,
|
Reverse: true,
|
||||||
}
|
}
|
||||||
|
// Add header requested
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
return req, penalties
|
return req, penalties
|
||||||
@ -415,6 +421,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime time.Time) (*HeaderRequ
|
|||||||
func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeout bool, request *HeaderRequest, penalties []PenaltyItem) {
|
func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeout bool, request *HeaderRequest, penalties []PenaltyItem) {
|
||||||
anchor := hd.posAnchor
|
anchor := hd.posAnchor
|
||||||
if anchor == nil {
|
if anchor == nil {
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(anchor.blockHeight-1, dataflow.HeaderEmpty)
|
||||||
hd.logger.Debug("[downloader] No PoS anchor")
|
hd.logger.Debug("[downloader] No PoS anchor")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -449,6 +456,7 @@ func (hd *HeaderDownload) UpdateStats(req *HeaderRequest, skeleton bool, peer [6
|
|||||||
defer hd.lock.Unlock()
|
defer hd.lock.Unlock()
|
||||||
if skeleton {
|
if skeleton {
|
||||||
hd.stats.SkeletonRequests++
|
hd.stats.SkeletonRequests++
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(req.Number, dataflow.HeaderSkeletonRequested)
|
||||||
if hd.stats.SkeletonReqMinBlock == 0 || req.Number < hd.stats.SkeletonReqMinBlock {
|
if hd.stats.SkeletonReqMinBlock == 0 || req.Number < hd.stats.SkeletonReqMinBlock {
|
||||||
hd.stats.SkeletonReqMinBlock = req.Number
|
hd.stats.SkeletonReqMinBlock = req.Number
|
||||||
}
|
}
|
||||||
@ -457,6 +465,7 @@ func (hd *HeaderDownload) UpdateStats(req *HeaderRequest, skeleton bool, peer [6
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
hd.stats.Requests++
|
hd.stats.Requests++
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(req.Number, dataflow.HeaderRequested)
|
||||||
// We know that req is reverse request, with Skip == 0, therefore comparing Number with reqMax
|
// We know that req is reverse request, with Skip == 0, therefore comparing Number with reqMax
|
||||||
if req.Number > hd.stats.ReqMaxBlock {
|
if req.Number > hd.stats.ReqMaxBlock {
|
||||||
hd.stats.ReqMaxBlock = req.Number
|
hd.stats.ReqMaxBlock = req.Number
|
||||||
@ -519,6 +528,7 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult
|
|||||||
hd.moveLinkToQueue(link, NoQueue)
|
hd.moveLinkToQueue(link, NoQueue)
|
||||||
delete(hd.links, link.hash)
|
delete(hd.links, link.hash)
|
||||||
hd.removeUpwards(link)
|
hd.removeUpwards(link)
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(link.blockHeight, dataflow.HeaderBad)
|
||||||
hd.logger.Warn("[downloader] Rejected header marked as bad", "hash", link.hash, "height", link.blockHeight)
|
hd.logger.Warn("[downloader] Rejected header marked as bad", "hash", link.hash, "height", link.blockHeight)
|
||||||
return true, false, 0, lastTime, nil
|
return true, false, 0, lastTime, nil
|
||||||
}
|
}
|
||||||
@ -534,6 +544,7 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult
|
|||||||
hd.moveLinkToQueue(link, NoQueue)
|
hd.moveLinkToQueue(link, NoQueue)
|
||||||
delete(hd.links, link.hash)
|
delete(hd.links, link.hash)
|
||||||
hd.removeUpwards(link)
|
hd.removeUpwards(link)
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(link.blockHeight, dataflow.HeaderEvicted)
|
||||||
return true, false, 0, lastTime, nil
|
return true, false, 0, lastTime, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -560,6 +571,7 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult
|
|||||||
if td.Cmp(terminalTotalDifficulty) >= 0 {
|
if td.Cmp(terminalTotalDifficulty) >= 0 {
|
||||||
hd.highestInDb = link.blockHeight
|
hd.highestInDb = link.blockHeight
|
||||||
log.Info(POSPandaBanner)
|
log.Info(POSPandaBanner)
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(link.blockHeight, dataflow.HeaderInserted)
|
||||||
return true, true, 0, lastTime, nil
|
return true, true, 0, lastTime, nil
|
||||||
}
|
}
|
||||||
returnTd = td
|
returnTd = td
|
||||||
@ -584,11 +596,13 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if link.blockHeight == hd.latestMinedBlockNumber {
|
if link.blockHeight == hd.latestMinedBlockNumber {
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(link.blockHeight, dataflow.HeaderInserted)
|
||||||
return false, true, 0, lastTime, nil
|
return false, true, 0, lastTime, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for hd.persistedLinkQueue.Len() > hd.persistedLinkLimit {
|
for hd.persistedLinkQueue.Len() > hd.persistedLinkLimit {
|
||||||
link := heap.Pop(&hd.persistedLinkQueue).(*Link)
|
link := heap.Pop(&hd.persistedLinkQueue).(*Link)
|
||||||
|
dataflow.HeaderDownloadStates.AddChange(link.blockHeight, dataflow.HeaderEvicted)
|
||||||
delete(hd.links, link.hash)
|
delete(hd.links, link.hash)
|
||||||
for child := link.fChild; child != nil; child, child.next = child.next, nil {
|
for child := link.fChild; child != nil; child, child.next = child.next, nil {
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user