metrics: separate usage of prometheus counter and gauge interfaces (#8793)

This commit is contained in:
milen 2023-11-24 15:15:12 +00:00 committed by GitHub
parent 748359cf72
commit 230b013096
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 332 additions and 302 deletions

View File

@ -3,22 +3,22 @@ package main
import (
"errors"
"fmt"
"github.com/ledgerwatch/erigon-lib/metrics"
"net/http"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon/diagnostics"
"github.com/ledgerwatch/log/v3"
"github.com/pelletier/go-toml"
"github.com/urfave/cli/v2"
"gopkg.in/yaml.v2"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/cmd/utils"
"github.com/ledgerwatch/erigon/diagnostics"
"github.com/ledgerwatch/erigon/params"
erigonapp "github.com/ledgerwatch/erigon/turbo/app"
erigoncli "github.com/ledgerwatch/erigon/turbo/cli"
@ -67,7 +67,7 @@ func runErigon(cliCtx *cli.Context) error {
// initializing the node and providing the current git commit there
logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
erigonInfoGauge := metrics.GetOrCreateCounter(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit))
erigonInfoGauge := metrics.GetOrCreateGauge(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit))
erigonInfoGauge.Set(1)
nodeCfg := node.NewNodConfigUrfave(cliCtx, logger)

View File

@ -6,12 +6,12 @@ import (
"errors"
"fmt"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/consensus/bor/finality/generics"
"github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/log/v3"
)
var (
@ -31,7 +31,7 @@ var (
errEndBlock = errors.New("failed to get end block")
//Metrics for collecting the rewindLength
rewindLengthMeter = metrics.GetOrCreateCounter("chain_autorewind_length")
rewindLengthMeter = metrics.GetOrCreateGauge("chain_autorewind_length")
)
type borVerifier struct {
@ -157,7 +157,7 @@ func borVerify(ctx context.Context, config *config, start uint64, end uint64, ha
// Stop the miner if the mining process is running and rewind back the chain
func rewindBack(head uint64, rewindTo uint64) {
rewindLengthMeter.Set(head - rewindTo)
rewindLengthMeter.SetUint64(head - rewindTo)
// Chain cannot be rewinded from this routine
// hence we are using a shared variable

View File

@ -16,13 +16,10 @@ type checkpointService interface {
var (
//Metrics for collecting the whitelisted milestone number
whitelistedCheckpointNumberMeter = metrics.GetOrCreateCounter("chain_checkpoint_latest", true)
whitelistedCheckpointNumberMeter = metrics.GetOrCreateGauge("chain_checkpoint_latest")
//Metrics for collecting the number of invalid chains received
CheckpointChainMeter = metrics.GetOrCreateCounter("chain_checkpoint_isvalidchain")
//Metrics for collecting the number of valid peers received
CheckpointPeerMeter = metrics.GetOrCreateCounter("chain_checkpoint_isvalidpeer")
checkpointChainMeter = metrics.GetOrCreateGauge("chain_checkpoint_isvalidchain")
)
// IsValidChain checks the validity of chain by comparing it
@ -34,9 +31,9 @@ func (w *checkpoint) IsValidChain(currentHeader uint64, chain []*types.Header) b
res := w.finality.IsValidChain(currentHeader, chain)
if res {
CheckpointChainMeter.Add(1)
checkpointChainMeter.Inc()
} else {
CheckpointPeerMeter.Add(-1)
checkpointChainMeter.Dec()
}
return res
@ -48,5 +45,5 @@ func (w *checkpoint) Process(block uint64, hash common.Hash) {
w.finality.Process(block, hash)
whitelistedCheckpointNumberMeter.Set(block)
whitelistedCheckpointNumberMeter.SetUint64(block)
}

View File

@ -1,12 +1,13 @@
package whitelist
import (
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/consensus/bor/finality/flags"
"github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/log/v3"
)
type milestone struct {
@ -35,16 +36,16 @@ type milestoneService interface {
var (
//Metrics for collecting the whitelisted milestone number
whitelistedMilestoneMeter = metrics.GetOrCreateCounter("chain_milestone_latest", true)
whitelistedMilestoneMeter = metrics.GetOrCreateGauge("chain_milestone_latest")
//Metrics for collecting the future milestone number
FutureMilestoneMeter = metrics.GetOrCreateCounter("chain_milestone_future", true)
futureMilestoneMeter = metrics.GetOrCreateGauge("chain_milestone_future")
//Metrics for collecting the length of the MilestoneIds map
MilestoneIdsLengthMeter = metrics.GetOrCreateCounter("chain_milestone_idslength", true)
milestoneIdsLengthMeter = metrics.GetOrCreateGauge("chain_milestone_idslength")
//Metrics for collecting the number of valid chains received
MilestoneChainMeter = metrics.GetOrCreateCounter("chain_milestone_isvalidchain")
milestoneChainMeter = metrics.GetOrCreateGauge("chain_milestone_isvalidchain")
)
// IsValidChain checks the validity of chain by comparing it
@ -58,12 +59,12 @@ func (m *milestone) IsValidChain(currentHeader uint64, chain []*types.Header) bo
m.finality.RLock()
defer m.finality.RUnlock()
var isValid bool = false
var isValid = false
defer func() {
if isValid {
MilestoneChainMeter.Add(1)
milestoneChainMeter.Inc()
} else {
MilestoneChainMeter.Add(-1)
milestoneChainMeter.Dec()
}
}()
@ -102,12 +103,12 @@ func (m *milestone) Process(block uint64, hash common.Hash) {
}
}
whitelistedMilestoneMeter.Set(block)
whitelistedMilestoneMeter.SetUint64(block)
m.UnlockSprint(block)
}
// This function will Lock the mutex at the time of voting
// LockMutex This function will Lock the mutex at the time of voting
func (m *milestone) LockMutex(endBlockNum uint64) bool {
m.finality.Lock()
@ -124,7 +125,7 @@ func (m *milestone) LockMutex(endBlockNum uint64) bool {
return true
}
// This function will unlock the mutex locked in LockMutex
// UnlockMutex This function will unlock the mutex locked in LockMutex
func (m *milestone) UnlockMutex(doLock bool, milestoneId string, endBlockNum uint64, endBlockHash common.Hash) {
m.Locked = m.Locked || doLock
@ -141,13 +142,12 @@ func (m *milestone) UnlockMutex(doLock bool, milestoneId string, endBlockNum uin
log.Error("Error in writing lock data of milestone to db", "err", err)
}
milestoneIDLength := uint64(len(m.LockedMilestoneIDs))
MilestoneIdsLengthMeter.Set(milestoneIDLength)
milestoneIdsLengthMeter.SetInt(len(m.LockedMilestoneIDs))
m.finality.Unlock()
}
// This function will unlock the locked sprint
// UnlockSprint This function will unlock the locked sprint
func (m *milestone) UnlockSprint(endBlockNum uint64) {
if endBlockNum < m.LockedMilestoneNumber {
return
@ -163,7 +163,7 @@ func (m *milestone) UnlockSprint(endBlockNum uint64) {
}
}
// This function will remove the stored milestoneID
// RemoveMilestoneID This function will remove the stored milestoneID
func (m *milestone) RemoveMilestoneID(milestoneId string) {
m.finality.Lock()
defer m.finality.Unlock()
@ -182,7 +182,7 @@ func (m *milestone) RemoveMilestoneID(milestoneId string) {
}
// This will check whether the incoming chain matches the locked sprint hash
// IsReorgAllowed This will check whether the incoming chain matches the locked sprint hash
func (m *milestone) IsReorgAllowed(chain []*types.Header, lockedMilestoneNumber uint64, lockedMilestoneHash common.Hash) bool {
if chain[len(chain)-1].Number.Uint64() <= lockedMilestoneNumber { //Can't reorg if the end block of incoming
return false //chain is less than locked sprint number
@ -197,7 +197,7 @@ func (m *milestone) IsReorgAllowed(chain []*types.Header, lockedMilestoneNumber
return true
}
// This will return the list of milestoneIDs stored.
// GetMilestoneIDsList This will return the list of milestoneIDs stored.
func (m *milestone) GetMilestoneIDsList() []string {
m.finality.RLock()
defer m.finality.RUnlock()
@ -276,7 +276,7 @@ func (m *milestone) enqueueFutureMilestone(key uint64, hash common.Hash) {
log.Error("[bor] Error in writing future milestone data to db", "err", err)
}
FutureMilestoneMeter.Set(key)
futureMilestoneMeter.SetUint64(key)
}
// DequeueFutureMilestone remove the future milestone entry from the list.

View File

@ -2,8 +2,9 @@ package heimdall
import (
"context"
"github.com/ledgerwatch/erigon-lib/metrics"
"time"
"github.com/ledgerwatch/erigon-lib/metrics"
)
type (
@ -11,7 +12,7 @@ type (
requestType string
meter struct {
request map[bool]metrics.Counter // map[isSuccessful]metrics.Meter
request map[bool]metrics.Gauge
timer metrics.Summary
}
)
@ -40,30 +41,30 @@ func getRequestType(ctx context.Context) (requestType, bool) {
var (
requestMeters = map[requestType]meter{
stateSyncRequest: {
request: map[bool]metrics.Counter{
true: metrics.GetOrCreateCounter("client_requests_statesync_valid"),
false: metrics.GetOrCreateCounter("client_requests_statesync_invalid"),
request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateGauge("client_requests_statesync_valid"),
false: metrics.GetOrCreateGauge("client_requests_statesync_invalid"),
},
timer: metrics.GetOrCreateSummary("client_requests_statesync_duration"),
},
spanRequest: {
request: map[bool]metrics.Counter{
true: metrics.GetOrCreateCounter("client_requests_span_valid"),
false: metrics.GetOrCreateCounter("client_requests_span_invalid"),
request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateGauge("client_requests_span_valid"),
false: metrics.GetOrCreateGauge("client_requests_span_invalid"),
},
timer: metrics.GetOrCreateSummary("client_requests_span_duration"),
},
checkpointRequest: {
request: map[bool]metrics.Counter{
true: metrics.GetOrCreateCounter("client_requests_checkpoint_valid"),
false: metrics.GetOrCreateCounter("client_requests_checkpoint_invalid"),
request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateGauge("client_requests_checkpoint_valid"),
false: metrics.GetOrCreateGauge("client_requests_checkpoint_invalid"),
},
timer: metrics.GetOrCreateSummary("client_requests_checkpoint_duration"),
},
checkpointCountRequest: {
request: map[bool]metrics.Counter{
true: metrics.GetOrCreateCounter("client_requests_checkpointcount_valid"),
false: metrics.GetOrCreateCounter("client_requests_checkpointcount_invalid"),
request: map[bool]metrics.Gauge{
true: metrics.GetOrCreateGauge("client_requests_checkpointcount_valid"),
false: metrics.GetOrCreateGauge("client_requests_checkpointcount_invalid"),
},
timer: metrics.GetOrCreateSummary("client_requests_checkpointcount_duration"),
},

View File

@ -652,7 +652,9 @@ func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, txUnwindTo uint64, ag
return nil
}
func (rs *StateV3) DoneCount() uint64 { return execTxsDone.Get() }
func (rs *StateV3) DoneCount() uint64 {
return execTxsDone.GetValueUint64()
}
func (rs *StateV3) SizeEstimate() (r uint64) {
rs.lock.RLock()

View File

@ -83,11 +83,11 @@ const Unlim int = -1
var (
ErrAttemptToDeleteNonDeprecatedBucket = errors.New("only buckets from dbutils.ChaindataDeprecatedTables can be deleted")
DbSize = metrics.GetOrCreateCounter(`db_size`) //nolint
TxLimit = metrics.GetOrCreateCounter(`tx_limit`) //nolint
TxSpill = metrics.GetOrCreateCounter(`tx_spill`) //nolint
TxUnspill = metrics.GetOrCreateCounter(`tx_unspill`) //nolint
TxDirty = metrics.GetOrCreateCounter(`tx_dirty`) //nolint
DbSize = metrics.GetOrCreateGauge(`db_size`) //nolint
TxLimit = metrics.GetOrCreateGauge(`tx_limit`) //nolint
TxSpill = metrics.GetOrCreateGauge(`tx_spill`) //nolint
TxUnspill = metrics.GetOrCreateGauge(`tx_unspill`) //nolint
TxDirty = metrics.GetOrCreateGauge(`tx_dirty`) //nolint
DbCommitPreparation = metrics.GetOrCreateSummary(`db_commit_seconds{phase="preparation"}`) //nolint
//DbGCWallClock = metrics.GetOrCreateSummary(`db_commit_seconds{phase="gc_wall_clock"}`) //nolint
@ -98,14 +98,14 @@ var (
DbCommitEnding = metrics.GetOrCreateSummary(`db_commit_seconds{phase="ending"}`) //nolint
DbCommitTotal = metrics.GetOrCreateSummary(`db_commit_seconds{phase="total"}`) //nolint
DbPgopsNewly = metrics.GetOrCreateCounter(`db_pgops{phase="newly"}`) //nolint
DbPgopsCow = metrics.GetOrCreateCounter(`db_pgops{phase="cow"}`) //nolint
DbPgopsClone = metrics.GetOrCreateCounter(`db_pgops{phase="clone"}`) //nolint
DbPgopsSplit = metrics.GetOrCreateCounter(`db_pgops{phase="split"}`) //nolint
DbPgopsMerge = metrics.GetOrCreateCounter(`db_pgops{phase="merge"}`) //nolint
DbPgopsSpill = metrics.GetOrCreateCounter(`db_pgops{phase="spill"}`) //nolint
DbPgopsUnspill = metrics.GetOrCreateCounter(`db_pgops{phase="unspill"}`) //nolint
DbPgopsWops = metrics.GetOrCreateCounter(`db_pgops{phase="wops"}`) //nolint
DbPgopsNewly = metrics.GetOrCreateGauge(`db_pgops{phase="newly"}`) //nolint
DbPgopsCow = metrics.GetOrCreateGauge(`db_pgops{phase="cow"}`) //nolint
DbPgopsClone = metrics.GetOrCreateGauge(`db_pgops{phase="clone"}`) //nolint
DbPgopsSplit = metrics.GetOrCreateGauge(`db_pgops{phase="split"}`) //nolint
DbPgopsMerge = metrics.GetOrCreateGauge(`db_pgops{phase="merge"}`) //nolint
DbPgopsSpill = metrics.GetOrCreateGauge(`db_pgops{phase="spill"}`) //nolint
DbPgopsUnspill = metrics.GetOrCreateGauge(`db_pgops{phase="unspill"}`) //nolint
DbPgopsWops = metrics.GetOrCreateGauge(`db_pgops{phase="wops"}`) //nolint
/*
DbPgopsPrefault = metrics.NewCounter(`db_pgops{phase="prefault"}`) //nolint
DbPgopsMinicore = metrics.NewCounter(`db_pgops{phase="minicore"}`) //nolint
@ -139,9 +139,9 @@ var (
//DbGcSelfPnlMergeVolume = metrics.NewCounter(`db_gc_pnl{phase="self_merge_volume"}`) //nolint
//DbGcSelfPnlMergeCalls = metrics.NewCounter(`db_gc_pnl{phase="slef_merge_calls"}`) //nolint
GcLeafMetric = metrics.GetOrCreateCounter(`db_gc_leaf`) //nolint
GcOverflowMetric = metrics.GetOrCreateCounter(`db_gc_overflow`) //nolint
GcPagesMetric = metrics.GetOrCreateCounter(`db_gc_pages`) //nolint
GcLeafMetric = metrics.GetOrCreateGauge(`db_gc_leaf`) //nolint
GcOverflowMetric = metrics.GetOrCreateGauge(`db_gc_overflow`) //nolint
GcPagesMetric = metrics.GetOrCreateGauge(`db_gc_pages`) //nolint
)

View File

@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kvcache
import (
@ -100,10 +101,10 @@ type CacheView interface {
// - changes in Non-Canonical View SHOULD NOT reflect in stateEvict
type Coherent struct {
hasher hash.Hash
codeEvictLen metrics.Counter
codeKeys metrics.Counter
keys metrics.Counter
evict metrics.Counter
codeEvictLen metrics.Gauge
codeKeys metrics.Gauge
keys metrics.Gauge
evict metrics.Gauge
latestStateView *CoherentRoot
codeMiss metrics.Counter
timeout metrics.Counter
@ -187,12 +188,12 @@ func New(cfg CoherentConfig) *Coherent {
miss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="miss",name="%s"}`, cfg.MetricsLabel)),
hits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="hit",name="%s"}`, cfg.MetricsLabel)),
timeout: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_timeout_total{name="%s"}`, cfg.MetricsLabel)),
keys: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_keys_total{name="%s"}`, cfg.MetricsLabel)),
evict: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_list_total{name="%s"}`, cfg.MetricsLabel)),
keys: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_keys_total{name="%s"}`, cfg.MetricsLabel)),
evict: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_list_total{name="%s"}`, cfg.MetricsLabel)),
codeMiss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="miss",name="%s"}`, cfg.MetricsLabel)),
codeHits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="hit",name="%s"}`, cfg.MetricsLabel)),
codeKeys: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_keys_total{name="%s"}`, cfg.MetricsLabel)),
codeEvictLen: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_list_total{name="%s"}`, cfg.MetricsLabel)),
codeKeys: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_code_keys_total{name="%s"}`, cfg.MetricsLabel)),
codeEvictLen: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_code_list_total{name="%s"}`, cfg.MetricsLabel)),
}
}
@ -260,10 +261,10 @@ func (c *Coherent) advanceRoot(stateVersionID uint64) (r *CoherentRoot) {
c.latestStateVersionID = stateVersionID
c.latestStateView = r
c.keys.Set(uint64(c.latestStateView.cache.Len()))
c.codeKeys.Set(uint64(c.latestStateView.codeCache.Len()))
c.evict.Set(uint64(c.stateEvict.Len()))
c.codeEvictLen.Set(uint64(c.codeEvict.Len()))
c.keys.SetInt(c.latestStateView.cache.Len())
c.codeKeys.SetInt(c.latestStateView.codeCache.Len())
c.evict.SetInt(c.stateEvict.Len())
c.codeEvictLen.SetInt(c.codeEvict.Len())
return r
}

View File

@ -34,15 +34,16 @@ import (
"github.com/c2h5oh/datasize"
"github.com/erigontech/mdbx-go/mdbx"
stack2 "github.com/go-stack/stack"
"github.com/ledgerwatch/log/v3"
"github.com/pbnjay/memory"
"golang.org/x/exp/maps"
"golang.org/x/sync/semaphore"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/common/dir"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/iter"
"github.com/ledgerwatch/erigon-lib/kv/order"
"github.com/ledgerwatch/log/v3"
"github.com/pbnjay/memory"
"golang.org/x/exp/maps"
"golang.org/x/sync/semaphore"
)
const NonExistingDBI kv.DBI = 999_999_999
@ -630,33 +631,33 @@ func (tx *MdbxTx) CollectMetrics() {
}
}
kv.DbSize.Set(info.Geo.Current)
kv.DbPgopsNewly.Set(info.PageOps.Newly)
kv.DbPgopsCow.Set(info.PageOps.Cow)
kv.DbPgopsClone.Set(info.PageOps.Clone)
kv.DbPgopsSplit.Set(info.PageOps.Split)
kv.DbPgopsMerge.Set(info.PageOps.Merge)
kv.DbPgopsSpill.Set(info.PageOps.Spill)
kv.DbPgopsUnspill.Set(info.PageOps.Unspill)
kv.DbPgopsWops.Set(info.PageOps.Wops)
kv.DbSize.SetUint64(info.Geo.Current)
kv.DbPgopsNewly.SetUint64(info.PageOps.Newly)
kv.DbPgopsCow.SetUint64(info.PageOps.Cow)
kv.DbPgopsClone.SetUint64(info.PageOps.Clone)
kv.DbPgopsSplit.SetUint64(info.PageOps.Split)
kv.DbPgopsMerge.SetUint64(info.PageOps.Merge)
kv.DbPgopsSpill.SetUint64(info.PageOps.Spill)
kv.DbPgopsUnspill.SetUint64(info.PageOps.Unspill)
kv.DbPgopsWops.SetUint64(info.PageOps.Wops)
txInfo, err := tx.tx.Info(true)
if err != nil {
return
}
kv.TxDirty.Set(txInfo.SpaceDirty)
kv.TxLimit.Set(tx.db.txSize)
kv.TxSpill.Set(txInfo.Spill)
kv.TxUnspill.Set(txInfo.Unspill)
kv.TxDirty.SetUint64(txInfo.SpaceDirty)
kv.TxLimit.SetUint64(tx.db.txSize)
kv.TxSpill.SetUint64(txInfo.Spill)
kv.TxUnspill.SetUint64(txInfo.Unspill)
gc, err := tx.BucketStat("gc")
if err != nil {
return
}
kv.GcLeafMetric.Set(gc.LeafPages)
kv.GcOverflowMetric.Set(gc.OverflowPages)
kv.GcPagesMetric.Set((gc.LeafPages + gc.OverflowPages) * tx.db.opts.pageSize / 8)
kv.GcLeafMetric.SetUint64(gc.LeafPages)
kv.GcOverflowMetric.SetUint64(gc.OverflowPages)
kv.GcPagesMetric.SetUint64((gc.LeafPages + gc.OverflowPages) * tx.db.opts.pageSize / 8)
}
// ListBuckets - all buckets stored as keys of un-named bucket

View File

@ -0,0 +1,65 @@
package metrics
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
type Counter interface {
prometheus.Counter
ValueGetter
AddInt(v int)
AddUint64(v uint64)
}
type counter struct {
prometheus.Counter
}
// GetValue returns native float64 value stored by this counter
func (c *counter) GetValue() float64 {
var m dto.Metric
if err := c.Write(&m); err != nil {
panic(fmt.Errorf("calling GetValue with invalid metric: %w", err))
}
return m.GetCounter().GetValue()
}
// GetValueUint64 returns native float64 value stored by this counter cast to
// an uint64 value for convenience
func (c *counter) GetValueUint64() uint64 {
return uint64(c.GetValue())
}
// AddInt adds an int value to the native float64 value stored by this counter.
//
// This is a convenience function for better UX which is safe for int values up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple intCounter that satisfies the Counter
// interface.
func (c *counter) AddInt(v int) {
c.Add(float64(v))
}
// AddUint64 adds an uint64 value to the native float64 value stored by this counter.
//
// This is a convenience function for better UX which is safe for int values up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple uintCounter that satisfies the Counter
// interface.
func (c *counter) AddUint64(v uint64) {
c.Add(float64(v))
}

View File

@ -0,0 +1,76 @@
package metrics
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
type Gauge interface {
prometheus.Gauge
ValueGetter
SetUint32(v uint32)
SetUint64(v uint64)
SetInt(v int)
}
type gauge struct {
prometheus.Gauge
}
// GetValue returns native float64 value stored by this gauge
func (g *gauge) GetValue() float64 {
var m dto.Metric
if err := g.Write(&m); err != nil {
panic(fmt.Errorf("calling GetValue with invalid metric: %w", err))
}
return m.GetGauge().GetValue()
}
// GetValueUint64 returns native float64 value stored by this gauge cast to
// an uint64 value for convenience
func (g *gauge) GetValueUint64() uint64 {
return uint64(g.GetValue())
}
// SetUint32 sets gauge using an uint32 value. Note under the hood this
// is a cast to float64 which is the native type of prometheus gauges.
//
// This is a convenience function for better UX.
func (g *gauge) SetUint32(v uint32) {
g.Set(float64(v))
}
// SetUint64 sets gauge using an uint64 value. Note under the hood this
// is a cast to float64 which is the native type of prometheus gauges.
//
// This is a convenience function for better UX which is safe for uints up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple uintGauge that satisfies the Gauge
// interface.
func (g *gauge) SetUint64(v uint64) {
g.Set(float64(v))
}
// SetInt sets gauge using an int value. Note under the hood this
// is a cast to float64 which is the native type of prometheus gauges.
//
// This is a convenience function for better UX which is safe for uints up
// to 2^53 (mantissa bits).
//
// This is fine for all usages in our codebase, and it is
// unlikely we will hit issues with this.
//
// If, however there is a new requirement that requires accuracy for more than
// 2^53 we can implement our own simple intGauge that satisfies the Gauge
// interface.
func (g *gauge) SetInt(v int) {
g.Set(float64(v))
}

View File

@ -5,7 +5,6 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
type Histogram interface {
@ -22,34 +21,6 @@ type Summary interface {
Histogram
}
type Counter interface {
Inc()
Dec()
Add(n int)
Set(n uint64)
Get() uint64
}
type intCounter struct {
prometheus.Gauge
}
func (c intCounter) Add(n int) {
c.Gauge.Add(float64(n))
}
func (c intCounter) Set(n uint64) {
c.Gauge.Set(float64(n))
}
func (c intCounter) Get() uint64 {
var m dto.Metric
if err := c.Gauge.Write(&m); err != nil {
panic(fmt.Errorf("calling intCounter.Get on invalid metric: %w", err))
}
return uint64(m.GetGauge().GetValue())
}
// NewCounter registers and returns new counter with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
@ -61,12 +32,12 @@ func (c intCounter) Get() uint64 {
//
// The returned counter is safe to use from concurrent goroutines.
func NewCounter(name string) Counter {
counter, err := defaultSet.NewGauge(name)
c, err := defaultSet.NewCounter(name)
if err != nil {
panic(fmt.Errorf("could not create new counter: %w", err))
}
return intCounter{counter}
return &counter{c}
}
// GetOrCreateCounter returns registered counter with the given name
@ -83,17 +54,16 @@ func NewCounter(name string) Counter {
// The returned counter is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewCounter instead of GetOrCreateCounter.
func GetOrCreateCounter(name string, isGauge ...bool) Counter {
counter, err := defaultSet.GetOrCreateGauge(name)
func GetOrCreateCounter(name string) Counter {
c, err := defaultSet.GetOrCreateCounter(name)
if err != nil {
panic(fmt.Errorf("could not get or create new counter: %w", err))
}
return intCounter{counter}
return &counter{c}
}
// NewGaugeFunc registers and returns gauge with the given name, which calls f
// to obtain gauge value.
// NewGauge registers and returns gauge with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
@ -102,19 +72,17 @@ func GetOrCreateCounter(name string, isGauge ...bool) Counter {
// - foo{bar="baz"}
// - foo{bar="baz",aaa="b"}
//
// f must be safe for concurrent calls.
//
// The returned gauge is safe to use from concurrent goroutines.
func NewGaugeFunc(name string, f func() float64) prometheus.GaugeFunc {
gf, err := defaultSet.NewGaugeFunc(name, f)
func NewGauge(name string) Gauge {
g, err := defaultSet.NewGauge(name)
if err != nil {
panic(fmt.Errorf("could not create new gauge func: %w", err))
panic(fmt.Errorf("could not create new gauge: %w", err))
}
return gf
return &gauge{g}
}
// GetOrCreateGaugeFunc returns registered gauge with the given name
// GetOrCreateGauge returns registered gauge with the given name
// or creates new gauge if the registry doesn't contain gauge with
// the given name.
//
@ -128,13 +96,13 @@ func NewGaugeFunc(name string, f func() float64) prometheus.GaugeFunc {
// The returned gauge is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
func GetOrCreateGaugeFunc(name string, f func() float64) prometheus.GaugeFunc {
gf, err := defaultSet.GetOrCreateGaugeFunc(name, f)
func GetOrCreateGauge(name string) Gauge {
g, err := defaultSet.GetOrCreateGauge(name)
if err != nil {
panic(fmt.Errorf("could not get or create new gauge func: %w", err))
panic(fmt.Errorf("could not get or create new gauge: %w", err))
}
return gf
return &gauge{g}
}
type summary struct {

View File

@ -226,8 +226,7 @@ func (s *Set) GetOrCreateCounter(name string, help ...string) (prometheus.Counte
return c, nil
}
// NewGauge registers and returns gauge with the given name in s, which calls f
// to obtain gauge value.
// NewGauge registers and returns gauge with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
@ -308,91 +307,6 @@ func (s *Set) GetOrCreateGauge(name string, help ...string) (prometheus.Gauge, e
return g, nil
}
// NewGaugeFunc registers and returns gauge with the given name in s, which calls f
// to obtain gauge value.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// - foo
// - foo{bar="baz"}
// - foo{bar="baz",aaa="b"}
//
// f must be safe for concurrent calls.
//
// The returned gauge is safe to use from concurrent goroutines.
func (s *Set) NewGaugeFunc(name string, f func() float64, help ...string) (prometheus.GaugeFunc, error) {
g, err := newGaugeFunc(name, f, help...)
if err != nil {
return nil, err
}
s.registerMetric(name, g)
return g, nil
}
func newGaugeFunc(name string, f func() float64, help ...string) (prometheus.GaugeFunc, error) {
if f == nil {
return nil, fmt.Errorf("f cannot be nil")
}
name, labels, err := parseMetric(name)
if err != nil {
return nil, err
}
return prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: name,
Help: strings.Join(help, " "),
ConstLabels: labels,
}, f), nil
}
// GetOrCreateGaugeFunc returns registered gauge with the given name in s
// or creates new gauge if s doesn't contain gauge with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// - foo
// - foo{bar="baz"}
// - foo{bar="baz",aaa="b"}
//
// The returned gauge is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
func (s *Set) GetOrCreateGaugeFunc(name string, f func() float64, help ...string) (prometheus.GaugeFunc, error) {
s.mu.Lock()
nm := s.m[name]
s.mu.Unlock()
if nm == nil {
metric, err := newGaugeFunc(name, f, help...)
if err != nil {
return nil, fmt.Errorf("invalid metric name %q: %w", name, err)
}
nmNew := &namedMetric{
name: name,
metric: metric,
}
s.mu.Lock()
nm = s.m[name]
if nm == nil {
nm = nmNew
s.m[name] = nm
s.a = append(s.a, nm)
}
s.mu.Unlock()
}
g, ok := nm.metric.(prometheus.GaugeFunc)
if !ok {
return nil, fmt.Errorf("metric %q isn't a Gauge. It is %T", name, nm.metric)
}
return g, nil
}
const defaultSummaryWindow = 5 * time.Minute
var defaultSummaryQuantiles = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.97: 0.003, 0.99: 0.001}

View File

@ -0,0 +1,6 @@
package metrics
type ValueGetter interface {
GetValue() float64
GetValueUint64() uint64
}

View File

@ -46,22 +46,22 @@ import (
const StepsInBiggestFile = 32
var (
mxCurrentTx = metrics.GetOrCreateCounter("domain_tx_processed")
mxCurrentBlock = metrics.GetOrCreateCounter("domain_block_current")
mxRunningMerges = metrics.GetOrCreateCounter("domain_running_merges")
mxRunningCollations = metrics.GetOrCreateCounter("domain_running_collations")
mxCurrentTx = metrics.GetOrCreateGauge("domain_tx_processed")
mxCurrentBlock = metrics.GetOrCreateGauge("domain_block_current")
mxRunningMerges = metrics.GetOrCreateGauge("domain_running_merges")
mxRunningCollations = metrics.GetOrCreateGauge("domain_running_collations")
mxCollateTook = metrics.GetOrCreateHistogram("domain_collate_took")
mxPruneTook = metrics.GetOrCreateHistogram("domain_prune_took")
mxPruneHistTook = metrics.GetOrCreateHistogram("domain_prune_hist_took")
mxPruningProgress = metrics.GetOrCreateCounter("domain_pruning_progress")
mxCollationSize = metrics.GetOrCreateCounter("domain_collation_size")
mxCollationSizeHist = metrics.GetOrCreateCounter("domain_collation_hist_size")
mxPruningProgress = metrics.GetOrCreateGauge("domain_pruning_progress")
mxCollationSize = metrics.GetOrCreateGauge("domain_collation_size")
mxCollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size")
mxPruneSize = metrics.GetOrCreateCounter("domain_prune_size")
mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took")
mxStepCurrent = metrics.GetOrCreateCounter("domain_step_current")
mxStepCurrent = metrics.GetOrCreateGauge("domain_step_current")
mxStepTook = metrics.GetOrCreateHistogram("domain_step_took")
mxCommitmentKeys = metrics.GetOrCreateCounter("domain_commitment_keys")
mxCommitmentRunning = metrics.GetOrCreateCounter("domain_running_commitment")
mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment")
mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took")
mxCommitmentWriteTook = metrics.GetOrCreateHistogram("domain_commitment_write_took")
mxCommitmentUpdates = metrics.GetOrCreateCounter("domain_commitment_updates")
@ -304,7 +304,7 @@ func (a *Aggregator) SetTx(tx kv.RwTx) {
}
func (a *Aggregator) SetTxNum(txNum uint64) {
mxCurrentTx.Set(txNum)
mxCurrentTx.SetUint64(txNum)
a.txNum = txNum
a.accounts.SetTxNum(txNum)
@ -319,7 +319,7 @@ func (a *Aggregator) SetTxNum(txNum uint64) {
func (a *Aggregator) SetBlockNum(blockNum uint64) {
a.blockNum = blockNum
mxCurrentBlock.Set(blockNum)
mxCurrentBlock.SetUint64(blockNum)
}
func (a *Aggregator) SetWorkers(i int) {
@ -445,7 +445,7 @@ func (a *Aggregator) aggregate(ctx context.Context, step uint64) error {
mxCollateTook.UpdateDuration(start)
//mxCollationSize.Set(uint64(collation.valuesComp.Count()))
mxCollationSizeHist.Set(uint64(collation.historyComp.Count()))
mxCollationSizeHist.SetInt(collation.historyComp.Count())
if err != nil {
collation.Close()
@ -854,7 +854,7 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b
saveStateAfter = false
}
mxCommitmentKeys.Add(int(a.commitment.comKeys))
mxCommitmentKeys.AddUint64(a.commitment.comKeys)
mxCommitmentTook.Update(a.commitment.comTook.Seconds())
defer func(t time.Time) { mxCommitmentWriteTook.UpdateDuration(t) }(time.Now())
@ -893,7 +893,7 @@ func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []b
return rootHash, nil
}
// Provides channel which receives commitment hash each time aggregation is occured
// AggregatedRoots Provides channel which receives commitment hash each time aggregation is occured
func (a *Aggregator) AggregatedRoots() chan [length.Hash]byte {
return a.stepDoneNotice
}
@ -926,7 +926,7 @@ func (a *Aggregator) FinishTx() (err error) {
return err
}
step := a.txNum / a.aggregationStep
mxStepCurrent.Set(step)
mxStepCurrent.SetUint64(step)
if step == 0 {
a.notifyAggregated(rootHash)
@ -1292,7 +1292,7 @@ func DecodeAccountBytes(enc []byte) (nonce uint64, balance *uint256.Int, hash []
}
func EncodeAccountBytes(nonce uint64, balance *uint256.Int, hash []byte, incarnation uint64) []byte {
l := int(1)
l := 1
if nonce > 0 {
l += common.BitLenToByteLen(bits.Len64(nonce))
}

View File

@ -32,13 +32,12 @@ import (
"time"
"github.com/RoaringBitmap/roaring/roaring64"
"github.com/ledgerwatch/erigon-lib/common/background"
"github.com/ledgerwatch/log/v3"
btree2 "github.com/tidwall/btree"
"golang.org/x/sync/errgroup"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/background"
"github.com/ledgerwatch/erigon-lib/common/dir"
"github.com/ledgerwatch/erigon-lib/compress"
"github.com/ledgerwatch/erigon-lib/kv"
@ -713,8 +712,8 @@ func (d *Domain) aggregate(ctx context.Context, step uint64, txFrom, txTo uint64
mxRunningCollations.Dec()
mxCollateTook.UpdateDuration(start)
mxCollationSize.Set(uint64(collation.valuesComp.Count()))
mxCollationSizeHist.Set(uint64(collation.historyComp.Count()))
mxCollationSize.SetInt(collation.valuesComp.Count())
mxCollationSizeHist.SetInt(collation.historyComp.Count())
if err != nil {
collation.Close()

View File

@ -68,10 +68,10 @@ var (
writeToDBTimer = metrics.NewSummary(`pool_write_to_db`)
propagateToNewPeerTimer = metrics.NewSummary(`pool_propagate_to_new_peer`)
propagateNewTxsTimer = metrics.NewSummary(`pool_propagate_new_txs`)
writeToDBBytesCounter = metrics.GetOrCreateCounter(`pool_write_to_db_bytes`)
pendingSubCounter = metrics.GetOrCreateCounter(`txpool_pending`)
queuedSubCounter = metrics.GetOrCreateCounter(`txpool_queued`)
basefeeSubCounter = metrics.GetOrCreateCounter(`txpool_basefee`)
writeToDBBytesCounter = metrics.GetOrCreateGauge(`pool_write_to_db_bytes`)
pendingSubCounter = metrics.GetOrCreateGauge(`txpool_pending`)
queuedSubCounter = metrics.GetOrCreateGauge(`txpool_queued`)
basefeeSubCounter = metrics.GetOrCreateGauge(`txpool_basefee`)
)
// Pool is interface for the transaction pool
@ -1685,7 +1685,7 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs
p.logger.Error("[txpool] flush is local history", "err", err)
continue
}
writeToDBBytesCounter.Set(written)
writeToDBBytesCounter.SetUint64(written)
p.logger.Debug("[txpool] Commit", "written_kb", written/1024, "in", time.Since(t))
}
case announcements := <-newTxs:
@ -2115,9 +2115,9 @@ func (p *TxPool) logStats() {
}
ctx = append(ctx, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
p.logger.Info("[txpool] stat", ctx...)
pendingSubCounter.Set(uint64(p.pending.Len()))
basefeeSubCounter.Set(uint64(p.baseFee.Len()))
queuedSubCounter.Set(uint64(p.queued.Len()))
pendingSubCounter.SetInt(p.pending.Len())
basefeeSubCounter.SetInt(p.baseFee.Len())
queuedSubCounter.SetInt(p.queued.Len())
}
// Deprecated need switch to streaming-like

View File

@ -2,18 +2,19 @@ package stagedsync
import (
"fmt"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/huandu/xstrings"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
)
var syncMetrics = map[stages.SyncStage]metrics.Counter{}
var syncMetrics = map[stages.SyncStage]metrics.Gauge{}
func init() {
for _, v := range stages.AllStages {
syncMetrics[v] = metrics.GetOrCreateCounter(
syncMetrics[v] = metrics.GetOrCreateGauge(
fmt.Sprintf(
`sync{stage="%s"}`,
xstrings.ToSnakeCase(string(v)),
@ -30,7 +31,7 @@ func UpdateMetrics(tx kv.Tx) error {
if err != nil {
return err
}
m.Set(progress)
m.SetUint64(progress)
}
return nil
}

View File

@ -44,7 +44,7 @@ import (
"github.com/ledgerwatch/erigon/turbo/services"
)
var execStepsInDB = metrics.NewCounter(`exec_steps_in_db`) //nolint
var execStepsInDB = metrics.NewGauge(`exec_steps_in_db`) //nolint
var execRepeats = metrics.NewCounter(`exec_repeats`) //nolint
var execTriggers = metrics.NewCounter(`exec_triggers`) //nolint
@ -65,7 +65,7 @@ type Progress struct {
}
func (p *Progress) Log(rs *state.StateV3, in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, doneCount, inputBlockNum, outputBlockNum, outTxNum, repeatCount uint64, idxStepsAmountInDB float64) {
execStepsInDB.Set(uint64(idxStepsAmountInDB * 100))
execStepsInDB.Set(idxStepsAmountInDB * 100)
var m runtime.MemStats
dbg.ReadMemStats(&m)
sizeEstimate := rs.SizeEstimate()
@ -280,10 +280,10 @@ func ExecV3(ctx context.Context,
return err
}
execRepeats.Add(conflicts)
execTriggers.Add(triggers)
execRepeats.AddInt(conflicts)
execTriggers.AddInt(triggers)
if processedBlockNum > lastBlockNum {
outputBlockNum.Set(processedBlockNum)
outputBlockNum.SetUint64(processedBlockNum)
lastBlockNum = processedBlockNum
}
if processedTxNum > 0 {
@ -334,7 +334,7 @@ func ExecV3(ctx context.Context,
case <-logEvery.C:
stepsInDB := rawdbhelpers.IdxStepsCountV3(tx)
progress.Log(rs, in, rws, rs.DoneCount(), inputBlockNum.Load(), outputBlockNum.Get(), outputTxNum.Load(), execRepeats.Get(), stepsInDB)
progress.Log(rs, in, rws, rs.DoneCount(), inputBlockNum.Load(), outputBlockNum.GetValueUint64(), outputTxNum.Load(), execRepeats.GetValueUint64(), stepsInDB)
if agg.HasBackgroundFilesBuild() {
logger.Info(fmt.Sprintf("[%s] Background files build", logPrefix), "progress", agg.BackgroundProgress())
}
@ -369,10 +369,10 @@ func ExecV3(ctx context.Context,
return err
}
execRepeats.Add(conflicts)
execTriggers.Add(triggers)
execRepeats.AddInt(conflicts)
execTriggers.AddInt(triggers)
if processedBlockNum > 0 {
outputBlockNum.Set(processedBlockNum)
outputBlockNum.SetUint64(processedBlockNum)
}
if processedTxNum > 0 {
outputTxNum.Store(processedTxNum)
@ -411,7 +411,7 @@ func ExecV3(ctx context.Context,
}
t3 = time.Since(tt)
if err = execStage.Update(tx, outputBlockNum.Get()); err != nil {
if err = execStage.Update(tx, outputBlockNum.GetValueUint64()); err != nil {
return err
}
@ -449,7 +449,7 @@ func ExecV3(ctx context.Context,
if err = agg.Flush(ctx, tx); err != nil {
return err
}
if err = execStage.Update(tx, outputBlockNum.Get()); err != nil {
if err = execStage.Update(tx, outputBlockNum.GetValueUint64()); err != nil {
return err
}
if err = tx.Commit(); err != nil {
@ -657,7 +657,7 @@ Loop:
if err := rs.ApplyState(applyTx, txTask, agg); err != nil {
return fmt.Errorf("StateV3.Apply: %w", err)
}
execTriggers.Add(rs.CommitTxNum(txTask.Sender, txTask.TxNum, in))
execTriggers.AddInt(rs.CommitTxNum(txTask.Sender, txTask.TxNum, in))
outputTxNum.Add(1)
if err := rs.ApplyHistory(txTask, agg); err != nil {
@ -669,12 +669,12 @@ Loop:
}
if !parallel {
outputBlockNum.Set(blockNum)
outputBlockNum.SetUint64(blockNum)
select {
case <-logEvery.C:
stepsInDB := rawdbhelpers.IdxStepsCountV3(applyTx)
progress.Log(rs, in, rws, count, inputBlockNum.Load(), outputBlockNum.Get(), outputTxNum.Load(), execRepeats.Get(), stepsInDB)
progress.Log(rs, in, rws, count, inputBlockNum.Load(), outputBlockNum.GetValueUint64(), outputTxNum.Load(), execRepeats.GetValueUint64(), stepsInDB)
if rs.SizeEstimate() < commitThreshold {
break
}
@ -695,7 +695,7 @@ Loop:
}
t3 = time.Since(tt)
if err = execStage.Update(applyTx, outputBlockNum.Get()); err != nil {
if err = execStage.Update(applyTx, outputBlockNum.GetValueUint64()); err != nil {
return err
}
@ -995,7 +995,7 @@ func reconstituteStep(last bool,
logger.Info(fmt.Sprintf("[%s] State reconstitution", s.LogPrefix()), "overall progress", fmt.Sprintf("%.2f%%", progress),
"step progress", fmt.Sprintf("%.2f%%", stepProgress),
"tx/s", fmt.Sprintf("%.1f", speedTx), "workCh", fmt.Sprintf("%d/%d", len(workCh), cap(workCh)),
"repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "queue.len", rs.QueueLen(), "blk", syncMetrics[stages.Execution].Get(),
"repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "queue.len", rs.QueueLen(), "blk", syncMetrics[stages.Execution].GetValueUint64(),
"buffer", fmt.Sprintf("%s/%s", common.ByteCount(sizeEstimate), common.ByteCount(commitThreshold)),
"alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
if sizeEstimate >= commitThreshold {
@ -1106,7 +1106,7 @@ func reconstituteStep(last bool,
inputTxNum++
}
syncMetrics[stages.Execution].Set(bn)
syncMetrics[stages.Execution].SetUint64(bn)
}
return err
}(); err != nil {

View File

@ -1,10 +1,10 @@
package stagedsync
import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
)
@ -51,7 +51,7 @@ func (s *StageState) LogPrefix() string { return s.state.LogPrefix() }
// Update updates the stage state (current block number) in the database. Can be called multiple times during stage execution.
func (s *StageState) Update(db kv.Putter, newBlockNum uint64) error {
if m, ok := syncMetrics[s.ID]; ok {
m.Set(newBlockNum)
m.SetUint64(newBlockNum)
}
return stages.SaveStageProgress(db, s.ID, newBlockNum)
}

View File

@ -5,14 +5,11 @@ import (
"encoding/binary"
"errors"
"fmt"
"github.com/ledgerwatch/erigon-lib/kv/dbutils"
"os"
"runtime"
"time"
"github.com/c2h5oh/datasize"
"github.com/ledgerwatch/erigon-lib/kv/membatch"
"github.com/ledgerwatch/erigon-lib/kv/membatchwithdb"
"github.com/ledgerwatch/log/v3"
"golang.org/x/sync/errgroup"
@ -25,10 +22,12 @@ import (
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/etl"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/ledgerwatch/erigon-lib/kv/membatch"
"github.com/ledgerwatch/erigon-lib/kv/membatchwithdb"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon/common/changeset"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/consensus"
@ -464,8 +463,8 @@ Loop:
writeReceipts := nextStagesExpectData || blockNum > cfg.prune.Receipts.PruneTo(to)
writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to)
_, is_memory_mutation := tx.(*membatchwithdb.MemoryMutation)
if cfg.silkworm != nil && !is_memory_mutation {
_, isMemoryMutation := tx.(*membatchwithdb.MemoryMutation)
if cfg.silkworm != nil && !isMemoryMutation {
blockNum, err = cfg.silkworm.ExecuteBlocks(tx, cfg.chainConfig.ChainID, blockNum, to, uint64(cfg.batchSize), writeChangeSets, writeReceipts, writeCallTraces)
} else {
err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, initialCycle, stateStream, logger)
@ -536,7 +535,7 @@ Loop:
logBlock, logTx, logTime = logProgress(logPrefix, logBlock, logTime, blockNum, logTx, lastLogTx, gas, float64(currentStateGas)/float64(gasState), batch, logger)
gas = 0
tx.CollectMetrics()
syncMetrics[stages.Execution].Set(blockNum)
syncMetrics[stages.Execution].SetUint64(blockNum)
}
}

View File

@ -19,8 +19,9 @@
package p2p
import (
"github.com/ledgerwatch/erigon-lib/metrics"
"net"
"github.com/ledgerwatch/erigon-lib/metrics"
)
const (
@ -33,7 +34,7 @@ var (
ingressTrafficMeter = metrics.GetOrCreateCounter(ingressMeterName)
egressConnectMeter = metrics.GetOrCreateCounter("p2p_dials")
egressTrafficMeter = metrics.GetOrCreateCounter(egressMeterName)
activePeerGauge = metrics.GetOrCreateCounter("p2p_peers", true)
activePeerGauge = metrics.GetOrCreateGauge("p2p_peers")
)
// meteredConn is a wrapper around a net.Conn that meters both the
@ -60,7 +61,7 @@ func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn {
// and the peer ingress traffic meters along the way.
func (c *meteredConn) Read(b []byte) (n int, err error) {
n, err = c.Conn.Read(b)
ingressTrafficMeter.Add(n)
ingressTrafficMeter.AddInt(n)
return n, err
}
@ -68,7 +69,7 @@ func (c *meteredConn) Read(b []byte) (n int, err error) {
// and the peer egress traffic meters along the way.
func (c *meteredConn) Write(b []byte) (n int, err error) {
n, err = c.Conn.Write(b)
egressTrafficMeter.Add(n)
egressTrafficMeter.AddInt(n)
return n, err
}

View File

@ -26,11 +26,10 @@ import (
"sync"
"time"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/diagnostics"
"github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/common/debug"
"github.com/ledgerwatch/erigon/common/mclock"
"github.com/ledgerwatch/erigon/event"
@ -414,8 +413,8 @@ func (p *Peer) handle(msg Msg) error {
if p.metricsEnabled {
m := fmt.Sprintf("%s_%s_%d_%#02x", ingressMeterName, proto.Name, proto.Version, msg.Code-proto.offset)
metrics.GetOrCreateCounter(m).Set(uint64(msg.meterSize))
metrics.GetOrCreateCounter(m + "_packets").Set(1)
metrics.GetOrCreateGauge(m).SetUint32(msg.meterSize)
metrics.GetOrCreateGauge(m + "_packets").Set(1)
}
select {
case proto.in <- msg: