rebort db metrics - not guilty (#1828)

This commit is contained in:
Alex Sharov 2021-04-27 15:32:41 +07:00 committed by GitHub
parent e8161541e2
commit 9cc4a0a97c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 49 additions and 303 deletions

View File

@ -16,7 +16,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": 1,
"iteration": 1618909886787,
"iteration": 1619510072029,
"links": [],
"panels": [
{
@ -657,19 +657,19 @@
"gridPos": {
"h": 4,
"w": 8,
"x": 0,
"x": 16,
"y": 10
},
"hiddenSeries": false,
"id": 160,
"id": 167,
"legend": {
"avg": false,
"current": true,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": true
"values": false
},
"lines": true,
"linewidth": 1,
@ -688,115 +688,31 @@
"steppedLine": false,
"targets": [
{
"expr": "table_scs_leaf{instance=~\"$instance\"}",
"expr": "tx_dirty{instance=~\"$instance\"}",
"interval": "",
"legendFormat": "scs(dup) leaf: {{instance}}",
"legendFormat": "dirty: {{instance}}",
"refId": "A"
},
{
"expr": "table_scs_branch{instance=~\"$instance\"}",
"expr": "tx_spill{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "scs(dup) branche: {{instance}}",
"legendFormat": "spill: {{instance}}",
"refId": "B"
},
{
"expr": "table_scs_overflow{instance=~\"$instance\"}",
"expr": "tx_unspill{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "scs(dup) overflow: {{instance}}",
"legendFormat": "unspill: {{instance}}",
"refId": "C"
},
{
"expr": "table_state_leaf{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "state(dup) leaf: {{instance}}",
"refId": "D"
},
{
"expr": "table_state_branch{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "state(dup) branch: {{instance}}",
"refId": "E"
},
{
"expr": "table_state_overflow{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "state(dup) overflow: {{instance}}",
"refId": "F"
},
{
"expr": "table_log_leaf{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "log leaf: {{instance}}",
"refId": "G"
},
{
"expr": "table_log_branch{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "log branch: {{instance}}",
"refId": "H"
},
{
"expr": "table_log_overflow{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "log overflow: {{instance}}",
"refId": "I"
},
{
"expr": "table_tx_leaf{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "tx leaf: {{instance}}",
"refId": "J"
},
{
"expr": "table_tx_branch{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "tx branch: {{instance}}",
"refId": "K"
},
{
"expr": "table_tx_overflow{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "tx overflow: {{instance}}",
"refId": "L"
},
{
"expr": "table_gc_leaf{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "gc leaf: {{instance}}",
"refId": "M"
},
{
"expr": "table_gc_branch{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "gc branch: {{instance}}",
"refId": "N"
},
{
"expr": "table_gc_overflow{instance=~\"$instance\"}",
"hide": false,
"interval": "",
"legendFormat": "gc overflow: {{instance}}",
"refId": "O"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Tables btree - amount of leaf/branch/overflow pages",
"title": "Tx Size",
"tooltip": {
"shared": true,
"sort": 0,
@ -813,132 +729,7 @@
"yaxes": [
{
"$$hashKey": "object:117",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:118",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 4,
"w": 8,
"x": 8,
"y": 10
},
"hiddenSeries": false,
"id": 161,
"legend": {
"avg": false,
"current": true,
"max": false,
"min": false,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.4.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "table_scs_entries{instance=~\"$instance\"} / (table_scs_leaf{instance=~\"$instance\"} + table_scs_branch{instance=~\"$instance\"} + table_scs_overflow{instance=~\"$instance\"})",
"interval": "",
"legendFormat": "scs(dup): {{instance}}",
"refId": "A"
},
{
"expr": "table_state_entries{instance=~\"$instance\"} / (table_state_leaf{instance=~\"$instance\"} + table_state_branch{instance=~\"$instance\"} + table_state_overflow{instance=~\"$instance\"})",
"hide": false,
"interval": "",
"legendFormat": "state(dup): {{instance}}",
"refId": "B"
},
{
"expr": "table_log_entries{instance=~\"$instance\"} / (table_log_leaf{instance=~\"$instance\"} + table_log_branch{instance=~\"$instance\"} + table_log_overflow{instance=~\"$instance\"})",
"hide": false,
"interval": "",
"legendFormat": "log: {{instance}}",
"refId": "C"
},
{
"expr": "table_tx_entries{instance=~\"$instance\"} / (table_tx_leaf{instance=~\"$instance\"} + table_tx_branch{instance=~\"$instance\"} + table_tx_overflow{instance=~\"$instance\"})",
"hide": false,
"interval": "",
"legendFormat": "tx: {{instance}}",
"refId": "D"
},
{
"expr": "table_gc_entries{instance=~\"$instance\"} / (table_gc_leaf{instance=~\"$instance\"} + table_gc_branch{instance=~\"$instance\"} + table_gc_overflow{instance=~\"$instance\"})",
"hide": false,
"interval": "",
"legendFormat": "gc: {{instance}}",
"refId": "E"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Tables density - amount keys per page",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:117",
"format": "short",
"format": "decbytes",
"label": null,
"logBase": 1,
"max": null,
@ -4644,7 +4435,7 @@
{
"allValue": null,
"current": {
"selected": true,
"selected": false,
"text": "0.95",
"value": "0.95"
},
@ -4726,7 +4517,7 @@
]
},
"time": {
"from": "now-3h",
"from": "now-12h",
"to": "now"
},
"timepicker": {
@ -4756,5 +4547,5 @@
"timezone": "",
"title": "TurboGeth Prometheus",
"uid": "FPpjH6Hik",
"version": 71
"version": 72
}

View File

@ -227,6 +227,9 @@ func SpawnExecuteBlocksStage(s *StageState, stateDB ethdb.Database, toBlock uint
default:
case <-logEvery.C:
logBlock, logTime = logProgress(logPrefix, logBlock, logTime, blockNum, batch)
if hasTx, ok := tx.(ethdb.HasTx); ok {
hasTx.Tx().CollectMetrics()
}
}
stageExecutionGauge.Update(int64(blockNum))
}

View File

@ -15,7 +15,10 @@ var (
ErrAttemptToDeleteNonDeprecatedBucket = errors.New("only buckets from dbutils.DeprecatedBuckets can be deleted")
ErrUnknownBucket = errors.New("unknown bucket. add it to dbutils.Buckets")
dbSize = metrics.GetOrRegisterGauge("db/size", metrics.DefaultRegistry) //nolint
dbSize = metrics.GetOrRegisterGauge("db/size", metrics.DefaultRegistry) //nolint
txSpill = metrics.GetOrRegisterGauge("tx/spill", metrics.DefaultRegistry) //nolint
txUnspill = metrics.GetOrRegisterGauge("tx/unspill", metrics.DefaultRegistry) //nolint
txDirty = metrics.GetOrRegisterGauge("tx/dirty", metrics.DefaultRegistry) //nolint
)
type Has interface {
@ -142,6 +145,7 @@ type Tx interface {
Comparator(bucket string) dbutils.CmpFunc
CHandle() unsafe.Pointer // Pointer to the underlying C transaction handle (e.g. *C.MDB_txn)
CollectMetrics()
}
type RwTx interface {

View File

@ -317,44 +317,8 @@ func (db *LmdbKV) DiskSize(_ context.Context) (uint64, error) {
}
func (db *LmdbKV) CollectMetrics() {
/*
fileInfo, _ := os.Stat(path.Join(db.opts.path, "data.mdb"))
dbSize.Update(fileInfo.Size())
if err := db.View(context.Background(), func(tx Tx) error {
stat, _ := tx.(*lmdbTx).BucketStat(dbutils.PlainStorageChangeSetBucket)
tableScsLeaf.Update(int64(stat.LeafPages))
tableScsBranch.Update(int64(stat.BranchPages))
tableScsOverflow.Update(int64(stat.OverflowPages))
tableScsEntries.Update(int64(stat.Entries))
stat, _ = tx.(*lmdbTx).BucketStat(dbutils.PlainStateBucket)
tableStateLeaf.Update(int64(stat.LeafPages))
tableStateBranch.Update(int64(stat.BranchPages))
tableStateOverflow.Update(int64(stat.OverflowPages))
tableStateEntries.Update(int64(stat.Entries))
stat, _ = tx.(*lmdbTx).BucketStat(dbutils.Log)
tableLogLeaf.Update(int64(stat.LeafPages))
tableLogBranch.Update(int64(stat.BranchPages))
tableLogOverflow.Update(int64(stat.OverflowPages))
tableLogEntries.Update(int64(stat.Entries))
stat, _ = tx.(*lmdbTx).BucketStat(dbutils.EthTx)
tableTxLeaf.Update(int64(stat.LeafPages))
tableTxBranch.Update(int64(stat.BranchPages))
tableTxOverflow.Update(int64(stat.OverflowPages))
tableTxEntries.Update(int64(stat.Entries))
stat, _ = tx.(*lmdbTx).BucketStat("gc")
tableGcLeaf.Update(int64(stat.LeafPages))
tableGcBranch.Update(int64(stat.BranchPages))
tableGcOverflow.Update(int64(stat.OverflowPages))
tableGcEntries.Update(int64(stat.Entries))
return nil
}); err != nil {
log.Error("collecting metrics failed", "err", err)
}
*/
fileInfo, _ := os.Stat(path.Join(db.opts.path, "data.mdb"))
dbSize.Update(fileInfo.Size())
}
func (db *LmdbKV) BeginRo(_ context.Context) (txn Tx, err error) {
@ -499,6 +463,8 @@ func (db *LmdbKV) Update(ctx context.Context, f func(tx RwTx) error) (err error)
return nil
}
func (tx *lmdbTx) CollectMetrics() {}
func (tx *lmdbTx) CreateBucket(name string) error {
var flags = tx.db.buckets[name].Flags
var nativeFlags uint

View File

@ -299,45 +299,8 @@ func (db *MdbxKV) DiskSize(_ context.Context) (uint64, error) {
}
func (db *MdbxKV) CollectMetrics() {
/*
info, _ := db.env.Info()
dbSize.Update(int64(info.Geo.Current))
if err := db.View(context.Background(), func(tx Tx) error {
stat, _ := tx.(*MdbxTx).BucketStat(dbutils.PlainStorageChangeSetBucket)
tableScsLeaf.Update(int64(stat.LeafPages))
tableScsBranch.Update(int64(stat.BranchPages))
tableScsOverflow.Update(int64(stat.OverflowPages))
tableScsEntries.Update(int64(stat.Entries))
stat, _ = tx.(*MdbxTx).BucketStat(dbutils.PlainStateBucket)
tableStateLeaf.Update(int64(stat.LeafPages))
tableStateBranch.Update(int64(stat.BranchPages))
tableStateOverflow.Update(int64(stat.OverflowPages))
tableStateEntries.Update(int64(stat.Entries))
stat, _ = tx.(*MdbxTx).BucketStat(dbutils.Log)
tableLogLeaf.Update(int64(stat.LeafPages))
tableLogBranch.Update(int64(stat.BranchPages))
tableLogOverflow.Update(int64(stat.OverflowPages))
tableLogEntries.Update(int64(stat.Entries))
stat, _ = tx.(*MdbxTx).BucketStat(dbutils.EthTx)
tableTxLeaf.Update(int64(stat.LeafPages))
tableTxBranch.Update(int64(stat.BranchPages))
tableTxOverflow.Update(int64(stat.OverflowPages))
tableTxEntries.Update(int64(stat.Entries))
stat, _ = tx.(*MdbxTx).BucketStat("gc")
tableGcLeaf.Update(int64(stat.LeafPages))
tableGcBranch.Update(int64(stat.BranchPages))
tableGcOverflow.Update(int64(stat.OverflowPages))
tableGcEntries.Update(int64(stat.Entries))
return nil
}); err != nil {
log.Error("collecting metrics failed", "err", err)
}
*/
info, _ := db.env.Info()
dbSize.Update(int64(info.Geo.Current))
}
func (db *MdbxKV) BeginRo(_ context.Context) (txn Tx, err error) {
@ -418,6 +381,17 @@ func (db *MdbxKV) AllBuckets() dbutils.BucketsCfg {
return db.buckets
}
func (tx *MdbxTx) CollectMetrics() {
txInfo, err := tx.tx.Info(true)
if err != nil {
panic(err)
}
txDirty.Update(int64(txInfo.SpaceDirty))
txSpill.Update(int64(txInfo.Spill))
txUnspill.Update(int64(txInfo.Unspill))
}
func (tx *MdbxTx) Comparator(bucket string) dbutils.CmpFunc {
b := tx.db.buckets[bucket]
return chooseComparator2(tx.tx, mdbx.DBI(b.DBI), b)

View File

@ -252,6 +252,7 @@ func (db *RemoteKV) Update(ctx context.Context, f func(tx RwTx) error) (err erro
}
func (tx *remoteTx) Comparator(bucket string) dbutils.CmpFunc { panic("not implemented yet") }
func (tx *remoteTx) CollectMetrics() {}
func (tx *remoteTx) CHandle() unsafe.Pointer { panic("not implemented yet") }
func (tx *remoteTx) IncrementSequence(bucket string, amount uint64) (uint64, error) {

View File

@ -253,6 +253,10 @@ func (s *sn2TX) Delete(bucket string, k, v []byte) error {
return s.dbTX.(RwTx).Delete(bucket, k, v)
}
func (s *sn2TX) CollectMetrics() {
s.dbTX.CollectMetrics()
}
func (s *sn2TX) getSnapshotTX(bucket string) (Tx, error) {
tx, ok := s.snTX[bucket]
if ok {

View File

@ -421,6 +421,9 @@ type TxInfo struct {
For WRITE transaction: The summarized size of the dirty database
pages that generated during this transaction. */
SpaceDirty uint64
Spill uint64
Unspill uint64
}
// scan_rlt The boolean flag controls the scan of the read lock