metrics: separate usage of prometheus counter and gauge interfaces (#8793)

This commit is contained in:
milen 2023-11-24 15:15:12 +00:00 committed by GitHub
parent 748359cf72
commit 230b013096
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 332 additions and 302 deletions

View File

@ -3,22 +3,22 @@ package main
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/ledgerwatch/erigon-lib/metrics"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"strings" "strings"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon/diagnostics"
"github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/log/v3"
"github.com/pelletier/go-toml" "github.com/pelletier/go-toml"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/cmd/utils"
"github.com/ledgerwatch/erigon/diagnostics"
"github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/params"
erigonapp "github.com/ledgerwatch/erigon/turbo/app" erigonapp "github.com/ledgerwatch/erigon/turbo/app"
erigoncli "github.com/ledgerwatch/erigon/turbo/cli" erigoncli "github.com/ledgerwatch/erigon/turbo/cli"
@ -67,7 +67,7 @@ func runErigon(cliCtx *cli.Context) error {
// initializing the node and providing the current git commit there // initializing the node and providing the current git commit there
logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
erigonInfoGauge := metrics.GetOrCreateCounter(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit)) erigonInfoGauge := metrics.GetOrCreateGauge(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit))
erigonInfoGauge.Set(1) erigonInfoGauge.Set(1)
nodeCfg := node.NewNodConfigUrfave(cliCtx, logger) nodeCfg := node.NewNodConfigUrfave(cliCtx, logger)

View File

@ -6,12 +6,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/consensus/bor/finality/generics" "github.com/ledgerwatch/erigon/consensus/bor/finality/generics"
"github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist"
"github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/log/v3"
) )
var ( var (
@ -31,7 +31,7 @@ var (
errEndBlock = errors.New("failed to get end block") errEndBlock = errors.New("failed to get end block")
//Metrics for collecting the rewindLength //Metrics for collecting the rewindLength
rewindLengthMeter = metrics.GetOrCreateCounter("chain_autorewind_length") rewindLengthMeter = metrics.GetOrCreateGauge("chain_autorewind_length")
) )
type borVerifier struct { type borVerifier struct {
@ -157,7 +157,7 @@ func borVerify(ctx context.Context, config *config, start uint64, end uint64, ha
// Stop the miner if the mining process is running and rewind back the chain // Stop the miner if the mining process is running and rewind back the chain
func rewindBack(head uint64, rewindTo uint64) { func rewindBack(head uint64, rewindTo uint64) {
rewindLengthMeter.Set(head - rewindTo) rewindLengthMeter.SetUint64(head - rewindTo)
// Chain cannot be rewinded from this routine // Chain cannot be rewinded from this routine
// hence we are using a shared variable // hence we are using a shared variable

View File

@ -16,13 +16,10 @@ type checkpointService interface {
var ( var (
//Metrics for collecting the whitelisted milestone number //Metrics for collecting the whitelisted milestone number
whitelistedCheckpointNumberMeter = metrics.GetOrCreateCounter("chain_checkpoint_latest", true) whitelistedCheckpointNumberMeter = metrics.GetOrCreateGauge("chain_checkpoint_latest")
//Metrics for collecting the number of invalid chains received //Metrics for collecting the number of invalid chains received
CheckpointChainMeter = metrics.GetOrCreateCounter("chain_checkpoint_isvalidchain") checkpointChainMeter = metrics.GetOrCreateGauge("chain_checkpoint_isvalidchain")
//Metrics for collecting the number of valid peers received
CheckpointPeerMeter = metrics.GetOrCreateCounter("chain_checkpoint_isvalidpeer")
) )
// IsValidChain checks the validity of chain by comparing it // IsValidChain checks the validity of chain by comparing it
@ -34,9 +31,9 @@ func (w *checkpoint) IsValidChain(currentHeader uint64, chain []*types.Header) b
res := w.finality.IsValidChain(currentHeader, chain) res := w.finality.IsValidChain(currentHeader, chain)
if res { if res {
CheckpointChainMeter.Add(1) checkpointChainMeter.Inc()
} else { } else {
CheckpointPeerMeter.Add(-1) checkpointChainMeter.Dec()
} }
return res return res
@ -48,5 +45,5 @@ func (w *checkpoint) Process(block uint64, hash common.Hash) {
w.finality.Process(block, hash) w.finality.Process(block, hash)
whitelistedCheckpointNumberMeter.Set(block) whitelistedCheckpointNumberMeter.SetUint64(block)
} }

View File

@ -1,12 +1,13 @@
package whitelist package whitelist
import ( import (
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/consensus/bor/finality/flags" "github.com/ledgerwatch/erigon/consensus/bor/finality/flags"
"github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb" "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb"
"github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/log/v3"
) )
type milestone struct { type milestone struct {
@ -35,16 +36,16 @@ type milestoneService interface {
var ( var (
//Metrics for collecting the whitelisted milestone number //Metrics for collecting the whitelisted milestone number
whitelistedMilestoneMeter = metrics.GetOrCreateCounter("chain_milestone_latest", true) whitelistedMilestoneMeter = metrics.GetOrCreateGauge("chain_milestone_latest")
//Metrics for collecting the future milestone number //Metrics for collecting the future milestone number
FutureMilestoneMeter = metrics.GetOrCreateCounter("chain_milestone_future", true) futureMilestoneMeter = metrics.GetOrCreateGauge("chain_milestone_future")
//Metrics for collecting the length of the MilestoneIds map //Metrics for collecting the length of the MilestoneIds map
MilestoneIdsLengthMeter = metrics.GetOrCreateCounter("chain_milestone_idslength", true) milestoneIdsLengthMeter = metrics.GetOrCreateGauge("chain_milestone_idslength")
//Metrics for collecting the number of valid chains received //Metrics for collecting the number of valid chains received
MilestoneChainMeter = metrics.GetOrCreateCounter("chain_milestone_isvalidchain") milestoneChainMeter = metrics.GetOrCreateGauge("chain_milestone_isvalidchain")
) )
// IsValidChain checks the validity of chain by comparing it // IsValidChain checks the validity of chain by comparing it
@ -58,12 +59,12 @@ func (m *milestone) IsValidChain(currentHeader uint64, chain []*types.Header) bo
m.finality.RLock() m.finality.RLock()
defer m.finality.RUnlock() defer m.finality.RUnlock()
var isValid bool = false var isValid = false
defer func() { defer func() {
if isValid { if isValid {
MilestoneChainMeter.Add(1) milestoneChainMeter.Inc()
} else { } else {
MilestoneChainMeter.Add(-1) milestoneChainMeter.Dec()
} }
}() }()
@ -102,12 +103,12 @@ func (m *milestone) Process(block uint64, hash common.Hash) {
} }
} }
whitelistedMilestoneMeter.Set(block) whitelistedMilestoneMeter.SetUint64(block)
m.UnlockSprint(block) m.UnlockSprint(block)
} }
// This function will Lock the mutex at the time of voting // LockMutex This function will Lock the mutex at the time of voting
func (m *milestone) LockMutex(endBlockNum uint64) bool { func (m *milestone) LockMutex(endBlockNum uint64) bool {
m.finality.Lock() m.finality.Lock()
@ -124,7 +125,7 @@ func (m *milestone) LockMutex(endBlockNum uint64) bool {
return true return true
} }
// This function will unlock the mutex locked in LockMutex // UnlockMutex This function will unlock the mutex locked in LockMutex
func (m *milestone) UnlockMutex(doLock bool, milestoneId string, endBlockNum uint64, endBlockHash common.Hash) { func (m *milestone) UnlockMutex(doLock bool, milestoneId string, endBlockNum uint64, endBlockHash common.Hash) {
m.Locked = m.Locked || doLock m.Locked = m.Locked || doLock
@ -141,13 +142,12 @@ func (m *milestone) UnlockMutex(doLock bool, milestoneId string, endBlockNum uin
log.Error("Error in writing lock data of milestone to db", "err", err) log.Error("Error in writing lock data of milestone to db", "err", err)
} }
milestoneIDLength := uint64(len(m.LockedMilestoneIDs)) milestoneIdsLengthMeter.SetInt(len(m.LockedMilestoneIDs))
MilestoneIdsLengthMeter.Set(milestoneIDLength)
m.finality.Unlock() m.finality.Unlock()
} }
// This function will unlock the locked sprint // UnlockSprint This function will unlock the locked sprint
func (m *milestone) UnlockSprint(endBlockNum uint64) { func (m *milestone) UnlockSprint(endBlockNum uint64) {
if endBlockNum < m.LockedMilestoneNumber { if endBlockNum < m.LockedMilestoneNumber {
return return
@ -163,7 +163,7 @@ func (m *milestone) UnlockSprint(endBlockNum uint64) {
} }
} }
// This function will remove the stored milestoneID // RemoveMilestoneID This function will remove the stored milestoneID
func (m *milestone) RemoveMilestoneID(milestoneId string) { func (m *milestone) RemoveMilestoneID(milestoneId string) {
m.finality.Lock() m.finality.Lock()
defer m.finality.Unlock() defer m.finality.Unlock()
@ -182,7 +182,7 @@ func (m *milestone) RemoveMilestoneID(milestoneId string) {
} }
// This will check whether the incoming chain matches the locked sprint hash // IsReorgAllowed This will check whether the incoming chain matches the locked sprint hash
func (m *milestone) IsReorgAllowed(chain []*types.Header, lockedMilestoneNumber uint64, lockedMilestoneHash common.Hash) bool { func (m *milestone) IsReorgAllowed(chain []*types.Header, lockedMilestoneNumber uint64, lockedMilestoneHash common.Hash) bool {
if chain[len(chain)-1].Number.Uint64() <= lockedMilestoneNumber { //Can't reorg if the end block of incoming if chain[len(chain)-1].Number.Uint64() <= lockedMilestoneNumber { //Can't reorg if the end block of incoming
return false //chain is less than locked sprint number return false //chain is less than locked sprint number
@ -197,7 +197,7 @@ func (m *milestone) IsReorgAllowed(chain []*types.Header, lockedMilestoneNumber
return true return true
} }
// This will return the list of milestoneIDs stored. // GetMilestoneIDsList This will return the list of milestoneIDs stored.
func (m *milestone) GetMilestoneIDsList() []string { func (m *milestone) GetMilestoneIDsList() []string {
m.finality.RLock() m.finality.RLock()
defer m.finality.RUnlock() defer m.finality.RUnlock()
@ -276,7 +276,7 @@ func (m *milestone) enqueueFutureMilestone(key uint64, hash common.Hash) {
log.Error("[bor] Error in writing future milestone data to db", "err", err) log.Error("[bor] Error in writing future milestone data to db", "err", err)
} }
FutureMilestoneMeter.Set(key) futureMilestoneMeter.SetUint64(key)
} }
// DequeueFutureMilestone remove the future milestone entry from the list. // DequeueFutureMilestone remove the future milestone entry from the list.

View File

@ -2,8 +2,9 @@ package heimdall
import ( import (
"context" "context"
"github.com/ledgerwatch/erigon-lib/metrics"
"time" "time"
"github.com/ledgerwatch/erigon-lib/metrics"
) )
type ( type (
@ -11,7 +12,7 @@ type (
requestType string requestType string
meter struct { meter struct {
request map[bool]metrics.Counter // map[isSuccessful]metrics.Meter request map[bool]metrics.Gauge
timer metrics.Summary timer metrics.Summary
} }
) )
@ -40,30 +41,30 @@ func getRequestType(ctx context.Context) (requestType, bool) {
var ( var (
requestMeters = map[requestType]meter{ requestMeters = map[requestType]meter{
stateSyncRequest: { stateSyncRequest: {
request: map[bool]metrics.Counter{ request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateCounter("client_requests_statesync_valid"), true: metrics.GetOrCreateGauge("client_requests_statesync_valid"),
false: metrics.GetOrCreateCounter("client_requests_statesync_invalid"), false: metrics.GetOrCreateGauge("client_requests_statesync_invalid"),
}, },
timer: metrics.GetOrCreateSummary("client_requests_statesync_duration"), timer: metrics.GetOrCreateSummary("client_requests_statesync_duration"),
}, },
spanRequest: { spanRequest: {
request: map[bool]metrics.Counter{ request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateCounter("client_requests_span_valid"), true: metrics.GetOrCreateGauge("client_requests_span_valid"),
false: metrics.GetOrCreateCounter("client_requests_span_invalid"), false: metrics.GetOrCreateGauge("client_requests_span_invalid"),
}, },
timer: metrics.GetOrCreateSummary("client_requests_span_duration"), timer: metrics.GetOrCreateSummary("client_requests_span_duration"),
}, },
checkpointRequest: { checkpointRequest: {
request: map[bool]metrics.Counter{ request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateCounter("client_requests_checkpoint_valid"), true: metrics.GetOrCreateGauge("client_requests_checkpoint_valid"),
false: metrics.GetOrCreateCounter("client_requests_checkpoint_invalid"), false: metrics.GetOrCreateGauge("client_requests_checkpoint_invalid"),
}, },
timer: metrics.GetOrCreateSummary("client_requests_checkpoint_duration"), timer: metrics.GetOrCreateSummary("client_requests_checkpoint_duration"),
}, },
checkpointCountRequest: { checkpointCountRequest: {
request: map[bool]metrics.Counter{ request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateCounter("client_requests_checkpointcount_valid"), true: metrics.GetOrCreateGauge("client_requests_checkpointcount_valid"),
false: metrics.GetOrCreateCounter("client_requests_checkpointcount_invalid"), false: metrics.GetOrCreateGauge("client_requests_checkpointcount_invalid"),
}, },
timer: metrics.GetOrCreateSummary("client_requests_checkpointcount_duration"), timer: metrics.GetOrCreateSummary("client_requests_checkpointcount_duration"),
}, },

View File

@ -652,7 +652,9 @@ func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, txUnwindTo uint64, ag
return nil return nil
} }
func (rs *StateV3) DoneCount() uint64 { return execTxsDone.Get() } func (rs *StateV3) DoneCount() uint64 {
return execTxsDone.GetValueUint64()
}
func (rs *StateV3) SizeEstimate() (r uint64) { func (rs *StateV3) SizeEstimate() (r uint64) {
rs.lock.RLock() rs.lock.RLock()

View File

@ -83,11 +83,11 @@ const Unlim int = -1
var ( var (
ErrAttemptToDeleteNonDeprecatedBucket = errors.New("only buckets from dbutils.ChaindataDeprecatedTables can be deleted") ErrAttemptToDeleteNonDeprecatedBucket = errors.New("only buckets from dbutils.ChaindataDeprecatedTables can be deleted")
DbSize = metrics.GetOrCreateCounter(`db_size`) //nolint DbSize = metrics.GetOrCreateGauge(`db_size`) //nolint
TxLimit = metrics.GetOrCreateCounter(`tx_limit`) //nolint TxLimit = metrics.GetOrCreateGauge(`tx_limit`) //nolint
TxSpill = metrics.GetOrCreateCounter(`tx_spill`) //nolint TxSpill = metrics.GetOrCreateGauge(`tx_spill`) //nolint
TxUnspill = metrics.GetOrCreateCounter(`tx_unspill`) //nolint TxUnspill = metrics.GetOrCreateGauge(`tx_unspill`) //nolint
TxDirty = metrics.GetOrCreateCounter(`tx_dirty`) //nolint TxDirty = metrics.GetOrCreateGauge(`tx_dirty`) //nolint
DbCommitPreparation = metrics.GetOrCreateSummary(`db_commit_seconds{phase="preparation"}`) //nolint DbCommitPreparation = metrics.GetOrCreateSummary(`db_commit_seconds{phase="preparation"}`) //nolint
//DbGCWallClock = metrics.GetOrCreateSummary(`db_commit_seconds{phase="gc_wall_clock"}`) //nolint //DbGCWallClock = metrics.GetOrCreateSummary(`db_commit_seconds{phase="gc_wall_clock"}`) //nolint
@ -98,14 +98,14 @@ var (
DbCommitEnding = metrics.GetOrCreateSummary(`db_commit_seconds{phase="ending"}`) //nolint DbCommitEnding = metrics.GetOrCreateSummary(`db_commit_seconds{phase="ending"}`) //nolint
DbCommitTotal = metrics.GetOrCreateSummary(`db_commit_seconds{phase="total"}`) //nolint DbCommitTotal = metrics.GetOrCreateSummary(`db_commit_seconds{phase="total"}`) //nolint
DbPgopsNewly = metrics.GetOrCreateCounter(`db_pgops{phase="newly"}`) //nolint DbPgopsNewly = metrics.GetOrCreateGauge(`db_pgops{phase="newly"}`) //nolint
DbPgopsCow = metrics.GetOrCreateCounter(`db_pgops{phase="cow"}`) //nolint DbPgopsCow = metrics.GetOrCreateGauge(`db_pgops{phase="cow"}`) //nolint
DbPgopsClone = metrics.GetOrCreateCounter(`db_pgops{phase="clone"}`) //nolint DbPgopsClone = metrics.GetOrCreateGauge(`db_pgops{phase="clone"}`) //nolint
DbPgopsSplit = metrics.GetOrCreateCounter(`db_pgops{phase="split"}`) //nolint DbPgopsSplit = metrics.GetOrCreateGauge(`db_pgops{phase="split"}`) //nolint
DbPgopsMerge = metrics.GetOrCreateCounter(`db_pgops{phase="merge"}`) //nolint DbPgopsMerge = metrics.GetOrCreateGauge(`db_pgops{phase="merge"}`) //nolint
DbPgopsSpill = metrics.GetOrCreateCounter(`db_pgops{phase="spill"}`) //nolint DbPgopsSpill = metrics.GetOrCreateGauge(`db_pgops{phase="spill"}`) //nolint
DbPgopsUnspill = metrics.GetOrCreateCounter(`db_pgops{phase="unspill"}`) //nolint DbPgopsUnspill = metrics.GetOrCreateGauge(`db_pgops{phase="unspill"}`) //nolint
DbPgopsWops = metrics.GetOrCreateCounter(`db_pgops{phase="wops"}`) //nolint DbPgopsWops = metrics.GetOrCreateGauge(`db_pgops{phase="wops"}`) //nolint
/* /*
DbPgopsPrefault = metrics.NewCounter(`db_pgops{phase="prefault"}`) //nolint DbPgopsPrefault = metrics.NewCounter(`db_pgops{phase="prefault"}`) //nolint
DbPgopsMinicore = metrics.NewCounter(`db_pgops{phase="minicore"}`) //nolint DbPgopsMinicore = metrics.NewCounter(`db_pgops{phase="minicore"}`) //nolint
@ -139,9 +139,9 @@ var (
//DbGcSelfPnlMergeVolume = metrics.NewCounter(`db_gc_pnl{phase="self_merge_volume"}`) //nolint //DbGcSelfPnlMergeVolume = metrics.NewCounter(`db_gc_pnl{phase="self_merge_volume"}`) //nolint
//DbGcSelfPnlMergeCalls = metrics.NewCounter(`db_gc_pnl{phase="slef_merge_calls"}`) //nolint //DbGcSelfPnlMergeCalls = metrics.NewCounter(`db_gc_pnl{phase="slef_merge_calls"}`) //nolint
GcLeafMetric = metrics.GetOrCreateCounter(`db_gc_leaf`) //nolint GcLeafMetric = metrics.GetOrCreateGauge(`db_gc_leaf`) //nolint
GcOverflowMetric = metrics.GetOrCreateCounter(`db_gc_overflow`) //nolint GcOverflowMetric = metrics.GetOrCreateGauge(`db_gc_overflow`) //nolint
GcPagesMetric = metrics.GetOrCreateCounter(`db_gc_pages`) //nolint GcPagesMetric = metrics.GetOrCreateGauge(`db_gc_pages`) //nolint
) )

View File

@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package kvcache package kvcache
import ( import (
@ -100,10 +101,10 @@ type CacheView interface {
// - changes in Non-Canonical View SHOULD NOT reflect in stateEvict // - changes in Non-Canonical View SHOULD NOT reflect in stateEvict
type Coherent struct { type Coherent struct {
hasher hash.Hash hasher hash.Hash
codeEvictLen metrics.Counter codeEvictLen metrics.Gauge
codeKeys metrics.Counter codeKeys metrics.Gauge
keys metrics.Counter keys metrics.Gauge
evict metrics.Counter evict metrics.Gauge
latestStateView *CoherentRoot latestStateView *CoherentRoot
codeMiss metrics.Counter codeMiss metrics.Counter
timeout metrics.Counter timeout metrics.Counter
@ -187,12 +188,12 @@ func New(cfg CoherentConfig) *Coherent {
miss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="miss",name="%s"}`, cfg.MetricsLabel)), miss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="miss",name="%s"}`, cfg.MetricsLabel)),
hits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="hit",name="%s"}`, cfg.MetricsLabel)), hits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="hit",name="%s"}`, cfg.MetricsLabel)),
timeout: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_timeout_total{name="%s"}`, cfg.MetricsLabel)), timeout: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_timeout_total{name="%s"}`, cfg.MetricsLabel)),
keys: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_keys_total{name="%s"}`, cfg.MetricsLabel)), keys: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_keys_total{name="%s"}`, cfg.MetricsLabel)),
evict: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_list_total{name="%s"}`, cfg.MetricsLabel)), evict: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_list_total{name="%s"}`, cfg.MetricsLabel)),
codeMiss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="miss",name="%s"}`, cfg.MetricsLabel)), codeMiss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="miss",name="%s"}`, cfg.MetricsLabel)),
codeHits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="hit",name="%s"}`, cfg.MetricsLabel)), codeHits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="hit",name="%s"}`, cfg.MetricsLabel)),
codeKeys: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_keys_total{name="%s"}`, cfg.MetricsLabel)), codeKeys: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_code_keys_total{name="%s"}`, cfg.MetricsLabel)),
codeEvictLen: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_list_total{name="%s"}`, cfg.MetricsLabel)), codeEvictLen: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_code_list_total{name="%s"}`, cfg.MetricsLabel)),
} }
} }
@ -260,10 +261,10 @@ func (c *Coherent) advanceRoot(stateVersionID uint64) (r *CoherentRoot) {
c.latestStateVersionID = stateVersionID c.latestStateVersionID = stateVersionID
c.latestStateView = r c.latestStateView = r
c.keys.Set(uint64(c.latestStateView.cache.Len())) c.keys.SetInt(c.latestStateView.cache.Len())
c.codeKeys.Set(uint64(c.latestStateView.codeCache.Len())) c.codeKeys.SetInt(c.latestStateView.codeCache.Len())
c.evict.Set(uint64(c.stateEvict.Len())) c.evict.SetInt(c.stateEvict.Len())
c.codeEvictLen.Set(uint64(c.codeEvict.Len())) c.codeEvictLen.SetInt(c.codeEvict.Len())
return r return r
} }

View File

@ -34,15 +34,16 @@ import (
"github.com/c2h5oh/datasize" "github.com/c2h5oh/datasize"
"github.com/erigontech/mdbx-go/mdbx" "github.com/erigontech/mdbx-go/mdbx"
stack2 "github.com/go-stack/stack" stack2 "github.com/go-stack/stack"
"github.com/ledgerwatch/log/v3"
"github.com/pbnjay/memory"
"golang.org/x/exp/maps"
"golang.org/x/sync/semaphore"
"github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/common/dir"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/iter"
"github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/order"
"github.com/ledgerwatch/log/v3"
"github.com/pbnjay/memory"
"golang.org/x/exp/maps"
"golang.org/x/sync/semaphore"
) )
const NonExistingDBI kv.DBI = 999_999_999 const NonExistingDBI kv.DBI = 999_999_999
@ -630,33 +631,33 @@ func (tx *MdbxTx) CollectMetrics() {
} }
} }
kv.DbSize.Set(info.Geo.Current) kv.DbSize.SetUint64(info.Geo.Current)
kv.DbPgopsNewly.Set(info.PageOps.Newly) kv.DbPgopsNewly.SetUint64(info.PageOps.Newly)
kv.DbPgopsCow.Set(info.PageOps.Cow) kv.DbPgopsCow.SetUint64(info.PageOps.Cow)
kv.DbPgopsClone.Set(info.PageOps.Clone) kv.DbPgopsClone.SetUint64(info.PageOps.Clone)
kv.DbPgopsSplit.Set(info.PageOps.Split) kv.DbPgopsSplit.SetUint64(info.PageOps.Split)
kv.DbPgopsMerge.Set(info.PageOps.Merge) kv.DbPgopsMerge.SetUint64(info.PageOps.Merge)
kv.DbPgopsSpill.Set(info.PageOps.Spill) kv.DbPgopsSpill.SetUint64(info.PageOps.Spill)
kv.DbPgopsUnspill.Set(info.PageOps.Unspill) kv.DbPgopsUnspill.SetUint64(info.PageOps.Unspill)
kv.DbPgopsWops.Set(info.PageOps.Wops) kv.DbPgopsWops.SetUint64(info.PageOps.Wops)
txInfo, err := tx.tx.Info(true) txInfo, err := tx.tx.Info(true)
if err != nil { if err != nil {
return return
} }
kv.TxDirty.Set(txInfo.SpaceDirty) kv.TxDirty.SetUint64(txInfo.SpaceDirty)
kv.TxLimit.Set(tx.db.txSize) kv.TxLimit.SetUint64(tx.db.txSize)
kv.TxSpill.Set(txInfo.Spill) kv.TxSpill.SetUint64(txInfo.Spill)
kv.TxUnspill.Set(txInfo.Unspill) kv.TxUnspill.SetUint64(txInfo.Unspill)
gc, err := tx.BucketStat("gc") gc, err := tx.BucketStat("gc")
if err != nil { if err != nil {
return return
} }
kv.GcLeafMetric.Set(gc.LeafPages) kv.GcLeafMetric.SetUint64(gc.LeafPages)
kv.GcOverflowMetric.Set(gc.OverflowPages) kv.GcOverflowMetric.SetUint64(gc.OverflowPages)
kv.GcPagesMetric.Set((gc.LeafPages + gc.OverflowPages) * tx.db.opts.pageSize / 8) kv.GcPagesMetric.SetUint64((gc.LeafPages + gc.OverflowPages) * tx.db.opts.pageSize / 8)
} }
// ListBuckets - all buckets stored as keys of un-named bucket // ListBuckets - all buckets stored as keys of un-named bucket

View File

@ -0,0 +1,65 @@
package metrics
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
type Counter interface {
prometheus.Counter
ValueGetter
AddInt(v int)
AddUint64(v uint64)
}
type counter struct {
prometheus.Counter
}
// GetValue returns native float64 value stored by this counter
func (c *counter) GetValue() float64 {
var m dto.Metric
if err := c.Write(&m); err != nil {
panic(fmt.Errorf("calling GetValue with invalid metric: %w", err))
}
return m.GetCounter().GetValue()
}
// GetValueUint64 returns native float64 value stored by this counter cast to
// an uint64 value for convenience
func (c *counter) GetValueUint64() uint64 {
return uint64(c.GetValue())
}
// AddInt adds an int value to the native float64 value stored by this counter.
//
// This is a convenience function for better UX which is safe for int values up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple intCounter that satisfies the Counter
// interface.
func (c *counter) AddInt(v int) {
c.Add(float64(v))
}
// AddUint64 adds an uint64 value to the native float64 value stored by this counter.
//
// This is a convenience function for better UX which is safe for int values up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple uintCounter that satisfies the Counter
// interface.
func (c *counter) AddUint64(v uint64) {
c.Add(float64(v))
}

View File

@ -0,0 +1,76 @@
package metrics
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
type Gauge interface {
prometheus.Gauge
ValueGetter
SetUint32(v uint32)
SetUint64(v uint64)
SetInt(v int)
}
type gauge struct {
prometheus.Gauge
}
// GetValue returns native float64 value stored by this gauge
func (g *gauge) GetValue() float64 {
var m dto.Metric
if err := g.Write(&m); err != nil {
panic(fmt.Errorf("calling GetValue with invalid metric: %w", err))
}
return m.GetGauge().GetValue()
}
// GetValueUint64 returns native float64 value stored by this gauge cast to
// an uint64 value for convenience
func (g *gauge) GetValueUint64() uint64 {
return uint64(g.GetValue())
}
// SetUint32 sets gauge using an uint32 value. Note under the hood this
// is a cast to float64 which is the native type of prometheus gauges.
//
// This is a convenience function for better UX.
func (g *gauge) SetUint32(v uint32) {
g.Set(float64(v))
}
// SetUint64 sets gauge using an uint64 value. Note under the hood this
// is a cast to float64 which is the native type of prometheus gauges.
//
// This is a convenience function for better UX which is safe for uints up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple uintGauge that satisfies the Gauge
// interface.
func (g *gauge) SetUint64(v uint64) {
g.Set(float64(v))
}
// SetInt sets gauge using an int value. Note under the hood this
// is a cast to float64 which is the native type of prometheus gauges.
//
// This is a convenience function for better UX which is safe for uints up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple intGauge that satisfies the Gauge
// interface.
func (g *gauge) SetInt(v int) {
g.Set(float64(v))
}

View File

@ -5,7 +5,6 @@ import (
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
) )
type Histogram interface { type Histogram interface {
@ -22,34 +21,6 @@ type Summary interface {
Histogram Histogram
} }
type Counter interface {
Inc()
Dec()
Add(n int)
Set(n uint64)
Get() uint64
}
type intCounter struct {
prometheus.Gauge
}
func (c intCounter) Add(n int) {
c.Gauge.Add(float64(n))
}
func (c intCounter) Set(n uint64) {
c.Gauge.Set(float64(n))
}
func (c intCounter) Get() uint64 {
var m dto.Metric
if err := c.Gauge.Write(&m); err != nil {
panic(fmt.Errorf("calling intCounter.Get on invalid metric: %w", err))
}
return uint64(m.GetGauge().GetValue())
}
// NewCounter registers and returns new counter with the given name. // NewCounter registers and returns new counter with the given name.
// //
// name must be valid Prometheus-compatible metric with possible labels. // name must be valid Prometheus-compatible metric with possible labels.
@ -61,12 +32,12 @@ func (c intCounter) Get() uint64 {
// //
// The returned counter is safe to use from concurrent goroutines. // The returned counter is safe to use from concurrent goroutines.
func NewCounter(name string) Counter { func NewCounter(name string) Counter {
counter, err := defaultSet.NewGauge(name) c, err := defaultSet.NewCounter(name)
if err != nil { if err != nil {
panic(fmt.Errorf("could not create new counter: %w", err)) panic(fmt.Errorf("could not create new counter: %w", err))
} }
return intCounter{counter} return &counter{c}
} }
// GetOrCreateCounter returns registered counter with the given name // GetOrCreateCounter returns registered counter with the given name
@ -83,17 +54,16 @@ func NewCounter(name string) Counter {
// The returned counter is safe to use from concurrent goroutines. // The returned counter is safe to use from concurrent goroutines.
// //
// Performance tip: prefer NewCounter instead of GetOrCreateCounter. // Performance tip: prefer NewCounter instead of GetOrCreateCounter.
func GetOrCreateCounter(name string, isGauge ...bool) Counter { func GetOrCreateCounter(name string) Counter {
counter, err := defaultSet.GetOrCreateGauge(name) c, err := defaultSet.GetOrCreateCounter(name)
if err != nil { if err != nil {
panic(fmt.Errorf("could not get or create new counter: %w", err)) panic(fmt.Errorf("could not get or create new counter: %w", err))
} }
return intCounter{counter} return &counter{c}
} }
// NewGaugeFunc registers and returns gauge with the given name, which calls f // NewGauge registers and returns gauge with the given name.
// to obtain gauge value.
// //
// name must be valid Prometheus-compatible metric with possible labels. // name must be valid Prometheus-compatible metric with possible labels.
// For instance, // For instance,
@ -102,19 +72,17 @@ func GetOrCreateCounter(name string, isGauge ...bool) Counter {
// - foo{bar="baz"} // - foo{bar="baz"}
// - foo{bar="baz",aaa="b"} // - foo{bar="baz",aaa="b"}
// //
// f must be safe for concurrent calls.
//
// The returned gauge is safe to use from concurrent goroutines. // The returned gauge is safe to use from concurrent goroutines.
func NewGaugeFunc(name string, f func() float64) prometheus.GaugeFunc { func NewGauge(name string) Gauge {
gf, err := defaultSet.NewGaugeFunc(name, f) g, err := defaultSet.NewGauge(name)
if err != nil { if err != nil {
panic(fmt.Errorf("could not create new gauge func: %w", err)) panic(fmt.Errorf("could not create new gauge: %w", err))
} }
return gf return &gauge{g}
} }
// GetOrCreateGaugeFunc returns registered gauge with the given name // GetOrCreateGauge returns registered gauge with the given name
// or creates new gauge if the registry doesn't contain gauge with // or creates new gauge if the registry doesn't contain gauge with
// the given name. // the given name.
// //
@ -128,13 +96,13 @@ func NewGaugeFunc(name string, f func() float64) prometheus.GaugeFunc {
// The returned gauge is safe to use from concurrent goroutines. // The returned gauge is safe to use from concurrent goroutines.
// //
// Performance tip: prefer NewGauge instead of GetOrCreateGauge. // Performance tip: prefer NewGauge instead of GetOrCreateGauge.
func GetOrCreateGaugeFunc(name string, f func() float64) prometheus.GaugeFunc { func GetOrCreateGauge(name string) Gauge {
gf, err := defaultSet.GetOrCreateGaugeFunc(name, f) g, err := defaultSet.GetOrCreateGauge(name)
if err != nil { if err != nil {
panic(fmt.Errorf("could not get or create new gauge func: %w", err)) panic(fmt.Errorf("could not get or create new gauge: %w", err))
} }
return gf return &gauge{g}
} }
type summary struct { type summary struct {

View File

@ -226,8 +226,7 @@ func (s *Set) GetOrCreateCounter(name string, help ...string) (prometheus.Counte
return c, nil return c, nil
} }
// NewGauge registers and returns gauge with the given name in s, which calls f // NewGauge registers and returns gauge with the given name.
// to obtain gauge value.
// //
// name must be valid Prometheus-compatible metric with possible labels. // name must be valid Prometheus-compatible metric with possible labels.
// For instance, // For instance,
@ -308,91 +307,6 @@ func (s *Set) GetOrCreateGauge(name string, help ...string) (prometheus.Gauge, e
return g, nil return g, nil
} }
// NewGaugeFunc registers and returns gauge with the given name in s, which calls f
// to obtain gauge value.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// - foo
// - foo{bar="baz"}
// - foo{bar="baz",aaa="b"}
//
// f must be safe for concurrent calls.
//
// The returned gauge is safe to use from concurrent goroutines.
func (s *Set) NewGaugeFunc(name string, f func() float64, help ...string) (prometheus.GaugeFunc, error) {
g, err := newGaugeFunc(name, f, help...)
if err != nil {
return nil, err
}
s.registerMetric(name, g)
return g, nil
}
func newGaugeFunc(name string, f func() float64, help ...string) (prometheus.GaugeFunc, error) {
if f == nil {
return nil, fmt.Errorf("f cannot be nil")
}
name, labels, err := parseMetric(name)
if err != nil {
return nil, err
}
return prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: name,
Help: strings.Join(help, " "),
ConstLabels: labels,
}, f), nil
}
// GetOrCreateGaugeFunc returns registered gauge with the given name in s
// or creates new gauge if s doesn't contain gauge with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// - foo
// - foo{bar="baz"}
// - foo{bar="baz",aaa="b"}
//
// The returned gauge is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
func (s *Set) GetOrCreateGaugeFunc(name string, f func() float64, help ...string) (prometheus.GaugeFunc, error) {
s.mu.Lock()
nm := s.m[name]
s.mu.Unlock()
if nm == nil {
metric, err := newGaugeFunc(name, f, help...)
if err != nil {
return nil, fmt.Errorf("invalid metric name %q: %w", name, err)
}
nmNew := &namedMetric{
name: name,
metric: metric,
}
s.mu.Lock()
nm = s.m[name]
if nm == nil {
nm = nmNew
s.m[name] = nm
s.a = append(s.a, nm)
}
s.mu.Unlock()
}
g, ok := nm.metric.(prometheus.GaugeFunc)
if !ok {
return nil, fmt.Errorf("metric %q isn't a Gauge. It is %T", name, nm.metric)
}
return g, nil
}
const defaultSummaryWindow = 5 * time.Minute const defaultSummaryWindow = 5 * time.Minute
var defaultSummaryQuantiles = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.97: 0.003, 0.99: 0.001} var defaultSummaryQuantiles = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.97: 0.003, 0.99: 0.001}

View File

@ -0,0 +1,6 @@
package metrics
type ValueGetter interface {
GetValue() float64
GetValueUint64() uint64
}

View File

@ -46,22 +46,22 @@ import (
const StepsInBiggestFile = 32 const StepsInBiggestFile = 32
var ( var (
mxCurrentTx = metrics.GetOrCreateCounter("domain_tx_processed") mxCurrentTx = metrics.GetOrCreateGauge("domain_tx_processed")
mxCurrentBlock = metrics.GetOrCreateCounter("domain_block_current") mxCurrentBlock = metrics.GetOrCreateGauge("domain_block_current")
mxRunningMerges = metrics.GetOrCreateCounter("domain_running_merges") mxRunningMerges = metrics.GetOrCreateGauge("domain_running_merges")
mxRunningCollations = metrics.GetOrCreateCounter("domain_running_collations") mxRunningCollations = metrics.GetOrCreateGauge("domain_running_collations")
mxCollateTook = metrics.GetOrCreateHistogram("domain_collate_took") mxCollateTook = metrics.GetOrCreateHistogram("domain_collate_took")
mxPruneTook = metrics.GetOrCreateHistogram("domain_prune_took") mxPruneTook = metrics.GetOrCreateHistogram("domain_prune_took")
mxPruneHistTook = metrics.GetOrCreateHistogram("domain_prune_hist_took") mxPruneHistTook = metrics.GetOrCreateHistogram("domain_prune_hist_took")
mxPruningProgress = metrics.GetOrCreateCounter("domain_pruning_progress") mxPruningProgress = metrics.GetOrCreateGauge("domain_pruning_progress")
mxCollationSize = metrics.GetOrCreateCounter("domain_collation_size") mxCollationSize = metrics.GetOrCreateGauge("domain_collation_size")
mxCollationSizeHist = metrics.GetOrCreateCounter("domain_collation_hist_size") mxCollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size")
mxPruneSize = metrics.GetOrCreateCounter("domain_prune_size") mxPruneSize = metrics.GetOrCreateCounter("domain_prune_size")
mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took") mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took")
mxStepCurrent = metrics.GetOrCreateCounter("domain_step_current") mxStepCurrent = metrics.GetOrCreateGauge("domain_step_current")
mxStepTook = metrics.GetOrCreateHistogram("domain_step_took") mxStepTook = metrics.GetOrCreateHistogram("domain_step_took")
mxCommitmentKeys = metrics.GetOrCreateCounter("domain_commitment_keys") mxCommitmentKeys = metrics.GetOrCreateCounter("domain_commitment_keys")
mxCommitmentRunning = metrics.GetOrCreateCounter("domain_running_commitment") mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment")
mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took")
mxCommitmentWriteTook = metrics.GetOrCreateHistogram("domain_commitment_write_took") mxCommitmentWriteTook = metrics.GetOrCreateHistogram("domain_commitment_write_took")
mxCommitmentUpdates = metrics.GetOrCreateCounter("domain_commitment_updates") mxCommitmentUpdates = metrics.GetOrCreateCounter("domain_commitment_updates")
@ -304,7 +304,7 @@ func (a *Aggregator) SetTx(tx kv.RwTx) {
} }
func (a *Aggregator) SetTxNum(txNum uint64) { func (a *Aggregator) SetTxNum(txNum uint64) {
mxCurrentTx.Set(txNum) mxCurrentTx.SetUint64(txNum)
a.txNum = txNum a.txNum = txNum
a.accounts.SetTxNum(txNum) a.accounts.SetTxNum(txNum)
@ -319,7 +319,7 @@ func (a *Aggregator) SetTxNum(txNum uint64) {
func (a *Aggregator) SetBlockNum(blockNum uint64) { func (a *Aggregator) SetBlockNum(blockNum uint64) {
a.blockNum = blockNum a.blockNum = blockNum
mxCurrentBlock.Set(blockNum) mxCurrentBlock.SetUint64(blockNum)
} }
func (a *Aggregator) SetWorkers(i int) { func (a *Aggregator) SetWorkers(i int) {
@ -445,7 +445,7 @@ func (a *Aggregator) aggregate(ctx context.Context, step uint64) error {
mxCollateTook.UpdateDuration(start) mxCollateTook.UpdateDuration(start)
//mxCollationSize.Set(uint64(collation.valuesComp.Count())) //mxCollationSize.Set(uint64(collation.valuesComp.Count()))
mxCollationSizeHist.Set(uint64(collation.historyComp.Count())) mxCollationSizeHist.SetInt(collation.historyComp.Count())
if err != nil { if err != nil {
collation.Close() collation.Close()
@ -854,7 +854,7 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b
saveStateAfter = false saveStateAfter = false
} }
mxCommitmentKeys.Add(int(a.commitment.comKeys)) mxCommitmentKeys.AddUint64(a.commitment.comKeys)
mxCommitmentTook.Update(a.commitment.comTook.Seconds()) mxCommitmentTook.Update(a.commitment.comTook.Seconds())
defer func(t time.Time) { mxCommitmentWriteTook.UpdateDuration(t) }(time.Now()) defer func(t time.Time) { mxCommitmentWriteTook.UpdateDuration(t) }(time.Now())
@ -893,7 +893,7 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b
return rootHash, nil return rootHash, nil
} }
// Provides channel which receives commitment hash each time aggregation is occured // AggregatedRoots Provides channel which receives commitment hash each time aggregation is occured
func (a *Aggregator) AggregatedRoots() chan [length.Hash]byte { func (a *Aggregator) AggregatedRoots() chan [length.Hash]byte {
return a.stepDoneNotice return a.stepDoneNotice
} }
@ -926,7 +926,7 @@ func (a *Aggregator) FinishTx() (err error) {
return err return err
} }
step := a.txNum / a.aggregationStep step := a.txNum / a.aggregationStep
mxStepCurrent.Set(step) mxStepCurrent.SetUint64(step)
if step == 0 { if step == 0 {
a.notifyAggregated(rootHash) a.notifyAggregated(rootHash)
@ -1292,7 +1292,7 @@ func DecodeAccountBytes(enc []byte) (nonce uint64, balance *uint256.Int, hash []
} }
func EncodeAccountBytes(nonce uint64, balance *uint256.Int, hash []byte, incarnation uint64) []byte { func EncodeAccountBytes(nonce uint64, balance *uint256.Int, hash []byte, incarnation uint64) []byte {
l := int(1) l := 1
if nonce > 0 { if nonce > 0 {
l += common.BitLenToByteLen(bits.Len64(nonce)) l += common.BitLenToByteLen(bits.Len64(nonce))
} }

View File

@ -32,13 +32,12 @@ import (
"time" "time"
"github.com/RoaringBitmap/roaring/roaring64" "github.com/RoaringBitmap/roaring/roaring64"
"github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/log/v3"
btree2 "github.com/tidwall/btree" btree2 "github.com/tidwall/btree"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/background"
"github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/common/dir"
"github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/compress"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
@ -713,8 +712,8 @@ func (d *Domain) aggregate(ctx context.Context, step uint64, txFrom, txTo uint64
mxRunningCollations.Dec() mxRunningCollations.Dec()
mxCollateTook.UpdateDuration(start) mxCollateTook.UpdateDuration(start)
mxCollationSize.Set(uint64(collation.valuesComp.Count())) mxCollationSize.SetInt(collation.valuesComp.Count())
mxCollationSizeHist.Set(uint64(collation.historyComp.Count())) mxCollationSizeHist.SetInt(collation.historyComp.Count())
if err != nil { if err != nil {
collation.Close() collation.Close()

View File

@ -68,10 +68,10 @@ var (
writeToDBTimer = metrics.NewSummary(`pool_write_to_db`) writeToDBTimer = metrics.NewSummary(`pool_write_to_db`)
propagateToNewPeerTimer = metrics.NewSummary(`pool_propagate_to_new_peer`) propagateToNewPeerTimer = metrics.NewSummary(`pool_propagate_to_new_peer`)
propagateNewTxsTimer = metrics.NewSummary(`pool_propagate_new_txs`) propagateNewTxsTimer = metrics.NewSummary(`pool_propagate_new_txs`)
writeToDBBytesCounter = metrics.GetOrCreateCounter(`pool_write_to_db_bytes`) writeToDBBytesCounter = metrics.GetOrCreateGauge(`pool_write_to_db_bytes`)
pendingSubCounter = metrics.GetOrCreateCounter(`txpool_pending`) pendingSubCounter = metrics.GetOrCreateGauge(`txpool_pending`)
queuedSubCounter = metrics.GetOrCreateCounter(`txpool_queued`) queuedSubCounter = metrics.GetOrCreateGauge(`txpool_queued`)
basefeeSubCounter = metrics.GetOrCreateCounter(`txpool_basefee`) basefeeSubCounter = metrics.GetOrCreateGauge(`txpool_basefee`)
) )
// Pool is interface for the transaction pool // Pool is interface for the transaction pool
@ -1685,7 +1685,7 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs
p.logger.Error("[txpool] flush is local history", "err", err) p.logger.Error("[txpool] flush is local history", "err", err)
continue continue
} }
writeToDBBytesCounter.Set(written) writeToDBBytesCounter.SetUint64(written)
p.logger.Debug("[txpool] Commit", "written_kb", written/1024, "in", time.Since(t)) p.logger.Debug("[txpool] Commit", "written_kb", written/1024, "in", time.Since(t))
} }
case announcements := <-newTxs: case announcements := <-newTxs:
@ -2115,9 +2115,9 @@ func (p *TxPool) logStats() {
} }
ctx = append(ctx, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) ctx = append(ctx, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
p.logger.Info("[txpool] stat", ctx...) p.logger.Info("[txpool] stat", ctx...)
pendingSubCounter.Set(uint64(p.pending.Len())) pendingSubCounter.SetInt(p.pending.Len())
basefeeSubCounter.Set(uint64(p.baseFee.Len())) basefeeSubCounter.SetInt(p.baseFee.Len())
queuedSubCounter.Set(uint64(p.queued.Len())) queuedSubCounter.SetInt(p.queued.Len())
} }
// Deprecated need switch to streaming-like // Deprecated need switch to streaming-like

View File

@ -2,18 +2,19 @@ package stagedsync
import ( import (
"fmt" "fmt"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/huandu/xstrings" "github.com/huandu/xstrings"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/eth/stagedsync/stages"
) )
var syncMetrics = map[stages.SyncStage]metrics.Counter{} var syncMetrics = map[stages.SyncStage]metrics.Gauge{}
func init() { func init() {
for _, v := range stages.AllStages { for _, v := range stages.AllStages {
syncMetrics[v] = metrics.GetOrCreateCounter( syncMetrics[v] = metrics.GetOrCreateGauge(
fmt.Sprintf( fmt.Sprintf(
`sync{stage="%s"}`, `sync{stage="%s"}`,
xstrings.ToSnakeCase(string(v)), xstrings.ToSnakeCase(string(v)),
@ -30,7 +31,7 @@ func UpdateMetrics(tx kv.Tx) error {
if err != nil { if err != nil {
return err return err
} }
m.Set(progress) m.SetUint64(progress)
} }
return nil return nil
} }

View File

@ -44,9 +44,9 @@ import (
"github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/services"
) )
var execStepsInDB = metrics.NewCounter(`exec_steps_in_db`) //nolint var execStepsInDB = metrics.NewGauge(`exec_steps_in_db`) //nolint
var execRepeats = metrics.NewCounter(`exec_repeats`) //nolint var execRepeats = metrics.NewCounter(`exec_repeats`) //nolint
var execTriggers = metrics.NewCounter(`exec_triggers`) //nolint var execTriggers = metrics.NewCounter(`exec_triggers`) //nolint
func NewProgress(prevOutputBlockNum, commitThreshold uint64, workersCount int, logPrefix string, logger log.Logger) *Progress { func NewProgress(prevOutputBlockNum, commitThreshold uint64, workersCount int, logPrefix string, logger log.Logger) *Progress {
return &Progress{prevTime: time.Now(), prevOutputBlockNum: prevOutputBlockNum, commitThreshold: commitThreshold, workersCount: workersCount, logPrefix: logPrefix, logger: logger} return &Progress{prevTime: time.Now(), prevOutputBlockNum: prevOutputBlockNum, commitThreshold: commitThreshold, workersCount: workersCount, logPrefix: logPrefix, logger: logger}
@ -65,7 +65,7 @@ type Progress struct {
} }
func (p *Progress) Log(rs *state.StateV3, in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, doneCount, inputBlockNum, outputBlockNum, outTxNum, repeatCount uint64, idxStepsAmountInDB float64) { func (p *Progress) Log(rs *state.StateV3, in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, doneCount, inputBlockNum, outputBlockNum, outTxNum, repeatCount uint64, idxStepsAmountInDB float64) {
execStepsInDB.Set(uint64(idxStepsAmountInDB * 100)) execStepsInDB.Set(idxStepsAmountInDB * 100)
var m runtime.MemStats var m runtime.MemStats
dbg.ReadMemStats(&m) dbg.ReadMemStats(&m)
sizeEstimate := rs.SizeEstimate() sizeEstimate := rs.SizeEstimate()
@ -280,10 +280,10 @@ func ExecV3(ctx context.Context,
return err return err
} }
execRepeats.Add(conflicts) execRepeats.AddInt(conflicts)
execTriggers.Add(triggers) execTriggers.AddInt(triggers)
if processedBlockNum > lastBlockNum { if processedBlockNum > lastBlockNum {
outputBlockNum.Set(processedBlockNum) outputBlockNum.SetUint64(processedBlockNum)
lastBlockNum = processedBlockNum lastBlockNum = processedBlockNum
} }
if processedTxNum > 0 { if processedTxNum > 0 {
@ -334,7 +334,7 @@ func ExecV3(ctx context.Context,
case <-logEvery.C: case <-logEvery.C:
stepsInDB := rawdbhelpers.IdxStepsCountV3(tx) stepsInDB := rawdbhelpers.IdxStepsCountV3(tx)
progress.Log(rs, in, rws, rs.DoneCount(), inputBlockNum.Load(), outputBlockNum.Get(), outputTxNum.Load(), execRepeats.Get(), stepsInDB) progress.Log(rs, in, rws, rs.DoneCount(), inputBlockNum.Load(), outputBlockNum.GetValueUint64(), outputTxNum.Load(), execRepeats.GetValueUint64(), stepsInDB)
if agg.HasBackgroundFilesBuild() { if agg.HasBackgroundFilesBuild() {
logger.Info(fmt.Sprintf("[%s] Background files build", logPrefix), "progress", agg.BackgroundProgress()) logger.Info(fmt.Sprintf("[%s] Background files build", logPrefix), "progress", agg.BackgroundProgress())
} }
@ -369,10 +369,10 @@ func ExecV3(ctx context.Context,
return err return err
} }
execRepeats.Add(conflicts) execRepeats.AddInt(conflicts)
execTriggers.Add(triggers) execTriggers.AddInt(triggers)
if processedBlockNum > 0 { if processedBlockNum > 0 {
outputBlockNum.Set(processedBlockNum) outputBlockNum.SetUint64(processedBlockNum)
} }
if processedTxNum > 0 { if processedTxNum > 0 {
outputTxNum.Store(processedTxNum) outputTxNum.Store(processedTxNum)
@ -411,7 +411,7 @@ func ExecV3(ctx context.Context,
} }
t3 = time.Since(tt) t3 = time.Since(tt)
if err = execStage.Update(tx, outputBlockNum.Get()); err != nil { if err = execStage.Update(tx, outputBlockNum.GetValueUint64()); err != nil {
return err return err
} }
@ -449,7 +449,7 @@ func ExecV3(ctx context.Context,
if err = agg.Flush(ctx, tx); err != nil { if err = agg.Flush(ctx, tx); err != nil {
return err return err
} }
if err = execStage.Update(tx, outputBlockNum.Get()); err != nil { if err = execStage.Update(tx, outputBlockNum.GetValueUint64()); err != nil {
return err return err
} }
if err = tx.Commit(); err != nil { if err = tx.Commit(); err != nil {
@ -657,7 +657,7 @@ Loop:
if err := rs.ApplyState(applyTx, txTask, agg); err != nil { if err := rs.ApplyState(applyTx, txTask, agg); err != nil {
return fmt.Errorf("StateV3.Apply: %w", err) return fmt.Errorf("StateV3.Apply: %w", err)
} }
execTriggers.Add(rs.CommitTxNum(txTask.Sender, txTask.TxNum, in)) execTriggers.AddInt(rs.CommitTxNum(txTask.Sender, txTask.TxNum, in))
outputTxNum.Add(1) outputTxNum.Add(1)
if err := rs.ApplyHistory(txTask, agg); err != nil { if err := rs.ApplyHistory(txTask, agg); err != nil {
@ -669,12 +669,12 @@ Loop:
} }
if !parallel { if !parallel {
outputBlockNum.Set(blockNum) outputBlockNum.SetUint64(blockNum)
select { select {
case <-logEvery.C: case <-logEvery.C:
stepsInDB := rawdbhelpers.IdxStepsCountV3(applyTx) stepsInDB := rawdbhelpers.IdxStepsCountV3(applyTx)
progress.Log(rs, in, rws, count, inputBlockNum.Load(), outputBlockNum.Get(), outputTxNum.Load(), execRepeats.Get(), stepsInDB) progress.Log(rs, in, rws, count, inputBlockNum.Load(), outputBlockNum.GetValueUint64(), outputTxNum.Load(), execRepeats.GetValueUint64(), stepsInDB)
if rs.SizeEstimate() < commitThreshold { if rs.SizeEstimate() < commitThreshold {
break break
} }
@ -695,7 +695,7 @@ Loop:
} }
t3 = time.Since(tt) t3 = time.Since(tt)
if err = execStage.Update(applyTx, outputBlockNum.Get()); err != nil { if err = execStage.Update(applyTx, outputBlockNum.GetValueUint64()); err != nil {
return err return err
} }
@ -995,7 +995,7 @@ func reconstituteStep(last bool,
logger.Info(fmt.Sprintf("[%s] State reconstitution", s.LogPrefix()), "overall progress", fmt.Sprintf("%.2f%%", progress), logger.Info(fmt.Sprintf("[%s] State reconstitution", s.LogPrefix()), "overall progress", fmt.Sprintf("%.2f%%", progress),
"step progress", fmt.Sprintf("%.2f%%", stepProgress), "step progress", fmt.Sprintf("%.2f%%", stepProgress),
"tx/s", fmt.Sprintf("%.1f", speedTx), "workCh", fmt.Sprintf("%d/%d", len(workCh), cap(workCh)), "tx/s", fmt.Sprintf("%.1f", speedTx), "workCh", fmt.Sprintf("%d/%d", len(workCh), cap(workCh)),
"repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "queue.len", rs.QueueLen(), "blk", syncMetrics[stages.Execution].Get(), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "queue.len", rs.QueueLen(), "blk", syncMetrics[stages.Execution].GetValueUint64(),
"buffer", fmt.Sprintf("%s/%s", common.ByteCount(sizeEstimate), common.ByteCount(commitThreshold)), "buffer", fmt.Sprintf("%s/%s", common.ByteCount(sizeEstimate), common.ByteCount(commitThreshold)),
"alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
if sizeEstimate >= commitThreshold { if sizeEstimate >= commitThreshold {
@ -1106,7 +1106,7 @@ func reconstituteStep(last bool,
inputTxNum++ inputTxNum++
} }
syncMetrics[stages.Execution].Set(bn) syncMetrics[stages.Execution].SetUint64(bn)
} }
return err return err
}(); err != nil { }(); err != nil {

View File

@ -1,10 +1,10 @@
package stagedsync package stagedsync
import ( import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/log/v3"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/eth/stagedsync/stages"
) )
@ -51,7 +51,7 @@ func (s *StageState) LogPrefix() string { return s.state.LogPrefix() }
// Update updates the stage state (current block number) in the database. Can be called multiple times during stage execution. // Update updates the stage state (current block number) in the database. Can be called multiple times during stage execution.
func (s *StageState) Update(db kv.Putter, newBlockNum uint64) error { func (s *StageState) Update(db kv.Putter, newBlockNum uint64) error {
if m, ok := syncMetrics[s.ID]; ok { if m, ok := syncMetrics[s.ID]; ok {
m.Set(newBlockNum) m.SetUint64(newBlockNum)
} }
return stages.SaveStageProgress(db, s.ID, newBlockNum) return stages.SaveStageProgress(db, s.ID, newBlockNum)
} }

View File

@ -5,14 +5,11 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"github.com/ledgerwatch/erigon-lib/kv/dbutils"
"os" "os"
"runtime" "runtime"
"time" "time"
"github.com/c2h5oh/datasize" "github.com/c2h5oh/datasize"
"github.com/ledgerwatch/erigon-lib/kv/membatch"
"github.com/ledgerwatch/erigon-lib/kv/membatchwithdb"
"github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/log/v3"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@ -25,10 +22,12 @@ import (
"github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/etl"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/ledgerwatch/erigon-lib/kv/membatch"
"github.com/ledgerwatch/erigon-lib/kv/membatchwithdb"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
libstate "github.com/ledgerwatch/erigon-lib/state" libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/changeset"
"github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus"
@ -464,8 +463,8 @@ Loop:
writeReceipts := nextStagesExpectData || blockNum > cfg.prune.Receipts.PruneTo(to) writeReceipts := nextStagesExpectData || blockNum > cfg.prune.Receipts.PruneTo(to)
writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to) writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to)
_, is_memory_mutation := tx.(*membatchwithdb.MemoryMutation) _, isMemoryMutation := tx.(*membatchwithdb.MemoryMutation)
if cfg.silkworm != nil && !is_memory_mutation { if cfg.silkworm != nil && !isMemoryMutation {
blockNum, err = cfg.silkworm.ExecuteBlocks(tx, cfg.chainConfig.ChainID, blockNum, to, uint64(cfg.batchSize), writeChangeSets, writeReceipts, writeCallTraces) blockNum, err = cfg.silkworm.ExecuteBlocks(tx, cfg.chainConfig.ChainID, blockNum, to, uint64(cfg.batchSize), writeChangeSets, writeReceipts, writeCallTraces)
} else { } else {
err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, initialCycle, stateStream, logger) err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, initialCycle, stateStream, logger)
@ -536,7 +535,7 @@ Loop:
logBlock, logTx, logTime = logProgress(logPrefix, logBlock, logTime, blockNum, logTx, lastLogTx, gas, float64(currentStateGas)/float64(gasState), batch, logger) logBlock, logTx, logTime = logProgress(logPrefix, logBlock, logTime, blockNum, logTx, lastLogTx, gas, float64(currentStateGas)/float64(gasState), batch, logger)
gas = 0 gas = 0
tx.CollectMetrics() tx.CollectMetrics()
syncMetrics[stages.Execution].Set(blockNum) syncMetrics[stages.Execution].SetUint64(blockNum)
} }
} }

View File

@ -19,8 +19,9 @@
package p2p package p2p
import ( import (
"github.com/ledgerwatch/erigon-lib/metrics"
"net" "net"
"github.com/ledgerwatch/erigon-lib/metrics"
) )
const ( const (
@ -33,7 +34,7 @@ var (
ingressTrafficMeter = metrics.GetOrCreateCounter(ingressMeterName) ingressTrafficMeter = metrics.GetOrCreateCounter(ingressMeterName)
egressConnectMeter = metrics.GetOrCreateCounter("p2p_dials") egressConnectMeter = metrics.GetOrCreateCounter("p2p_dials")
egressTrafficMeter = metrics.GetOrCreateCounter(egressMeterName) egressTrafficMeter = metrics.GetOrCreateCounter(egressMeterName)
activePeerGauge = metrics.GetOrCreateCounter("p2p_peers", true) activePeerGauge = metrics.GetOrCreateGauge("p2p_peers")
) )
// meteredConn is a wrapper around a net.Conn that meters both the // meteredConn is a wrapper around a net.Conn that meters both the
@ -60,7 +61,7 @@ func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn {
// and the peer ingress traffic meters along the way. // and the peer ingress traffic meters along the way.
func (c *meteredConn) Read(b []byte) (n int, err error) { func (c *meteredConn) Read(b []byte) (n int, err error) {
n, err = c.Conn.Read(b) n, err = c.Conn.Read(b)
ingressTrafficMeter.Add(n) ingressTrafficMeter.AddInt(n)
return n, err return n, err
} }
@ -68,7 +69,7 @@ func (c *meteredConn) Read(b []byte) (n int, err error) {
// and the peer egress traffic meters along the way. // and the peer egress traffic meters along the way.
func (c *meteredConn) Write(b []byte) (n int, err error) { func (c *meteredConn) Write(b []byte) (n int, err error) {
n, err = c.Conn.Write(b) n, err = c.Conn.Write(b)
egressTrafficMeter.Add(n) egressTrafficMeter.AddInt(n)
return n, err return n, err
} }

View File

@ -26,11 +26,10 @@ import (
"sync" "sync"
"time" "time"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/diagnostics"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/common/debug"
"github.com/ledgerwatch/erigon/common/mclock" "github.com/ledgerwatch/erigon/common/mclock"
"github.com/ledgerwatch/erigon/event" "github.com/ledgerwatch/erigon/event"
@ -414,8 +413,8 @@ func (p *Peer) handle(msg Msg) error {
if p.metricsEnabled { if p.metricsEnabled {
m := fmt.Sprintf("%s_%s_%d_%#02x", ingressMeterName, proto.Name, proto.Version, msg.Code-proto.offset) m := fmt.Sprintf("%s_%s_%d_%#02x", ingressMeterName, proto.Name, proto.Version, msg.Code-proto.offset)
metrics.GetOrCreateCounter(m).Set(uint64(msg.meterSize)) metrics.GetOrCreateGauge(m).SetUint32(msg.meterSize)
metrics.GetOrCreateCounter(m + "_packets").Set(1) metrics.GetOrCreateGauge(m + "_packets").Set(1)
} }
select { select {
case proto.in <- msg: case proto.in <- msg: