domain: files generic btree

This commit is contained in:
Alex Sharov 2022-07-18 16:05:04 +07:00 committed by GitHub
parent 5821ae7bbc
commit ebea2863c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 33 additions and 35 deletions

View File

@ -57,6 +57,13 @@ type filesItem struct {
readerMerge *recsplit.IndexReader // index reader for the background merge thread readerMerge *recsplit.IndexReader // index reader for the background merge thread
} }
func filesItemLess(i, j *filesItem) bool {
if i.endTxNum == j.endTxNum {
return i.startTxNum > j.startTxNum
}
return i.endTxNum < j.endTxNum
}
func (i *filesItem) Less(than btree.Item) bool { func (i *filesItem) Less(than btree.Item) bool {
if i.endTxNum == than.(*filesItem).endTxNum { if i.endTxNum == than.(*filesItem).endTxNum {
return i.startTxNum > than.(*filesItem).startTxNum return i.startTxNum > than.(*filesItem).startTxNum
@ -123,8 +130,8 @@ type Domain struct {
indexTable string // Needs to be table with DupSort indexTable string // Needs to be table with DupSort
tx kv.RwTx tx kv.RwTx
txNum uint64 txNum uint64
files [NumberOfTypes]*btree.BTree // Static files pertaining to this domain, items are of type `filesItem` files [NumberOfTypes]*btree.BTreeG[*filesItem] // Static files pertaining to this domain, items are of type `filesItem`
prefixLen int // Number of bytes in the keys that can be used for prefix iteration prefixLen int // Number of bytes in the keys that can be used for prefix iteration
compressVals bool compressVals bool
stats DomainStats stats DomainStats
} }
@ -160,7 +167,7 @@ func NewDomain(
compressVals: compressVals, compressVals: compressVals,
} }
for fType := FileType(0); fType < NumberOfTypes; fType++ { for fType := FileType(0); fType < NumberOfTypes; fType++ {
d.files[fType] = btree.New(32) d.files[fType] = btree.NewG[*filesItem](32, filesItemLess)
} }
d.scanStateFiles(files) d.scanStateFiles(files)
for fType := FileType(0); fType < NumberOfTypes; fType++ { for fType := FileType(0); fType < NumberOfTypes; fType++ {
@ -212,8 +219,7 @@ func (d *Domain) scanStateFiles(files []fs.DirEntry) {
} }
var item = &filesItem{startTxNum: startTxNum * d.aggregationStep, endTxNum: endTxNum * d.aggregationStep} var item = &filesItem{startTxNum: startTxNum * d.aggregationStep, endTxNum: endTxNum * d.aggregationStep}
var foundI *filesItem var foundI *filesItem
d.files[fType].AscendGreaterOrEqual(&filesItem{startTxNum: endTxNum * d.aggregationStep, endTxNum: endTxNum * d.aggregationStep}, func(i btree.Item) bool { d.files[fType].AscendGreaterOrEqual(&filesItem{startTxNum: endTxNum * d.aggregationStep, endTxNum: endTxNum * d.aggregationStep}, func(it *filesItem) bool {
it := i.(*filesItem)
if it.endTxNum == endTxNum { if it.endTxNum == endTxNum {
foundI = it foundI = it
} }
@ -229,8 +235,7 @@ func (d *Domain) scanStateFiles(files []fs.DirEntry) {
func (d *Domain) openFiles(fType FileType) error { func (d *Domain) openFiles(fType FileType) error {
var err error var err error
var totalKeys uint64 var totalKeys uint64
d.files[fType].Ascend(func(i btree.Item) bool { d.files[fType].Ascend(func(item *filesItem) bool {
item := i.(*filesItem)
datPath := filepath.Join(d.dir, fmt.Sprintf("%s-%s.%d-%d.dat", d.filenameBase, fType.String(), item.startTxNum/d.aggregationStep, item.endTxNum/d.aggregationStep)) datPath := filepath.Join(d.dir, fmt.Sprintf("%s-%s.%d-%d.dat", d.filenameBase, fType.String(), item.startTxNum/d.aggregationStep, item.endTxNum/d.aggregationStep))
if item.decompressor, err = compress.NewDecompressor(datPath); err != nil { if item.decompressor, err = compress.NewDecompressor(datPath); err != nil {
return false return false
@ -253,8 +258,7 @@ func (d *Domain) openFiles(fType FileType) error {
} }
func (d *Domain) closeFiles(fType FileType) { func (d *Domain) closeFiles(fType FileType) {
d.files[fType].Ascend(func(i btree.Item) bool { d.files[fType].Ascend(func(item *filesItem) bool {
item := i.(*filesItem)
if item.decompressor != nil { if item.decompressor != nil {
item.decompressor.Close() item.decompressor.Close()
} }
@ -473,8 +477,7 @@ func (d *Domain) IteratePrefix(prefix []byte, it func(k, v []byte)) error {
} }
heap.Push(&cp, &CursorItem{t: DB_CURSOR, key: common.Copy(k), val: common.Copy(v), c: keysCursor, endTxNum: txNum}) heap.Push(&cp, &CursorItem{t: DB_CURSOR, key: common.Copy(k), val: common.Copy(v), c: keysCursor, endTxNum: txNum})
} }
d.files[Values].Ascend(func(i btree.Item) bool { d.files[Values].Ascend(func(item *filesItem) bool {
item := i.(*filesItem)
if item.index.Empty() { if item.index.Empty() {
return true return true
} }
@ -988,8 +991,7 @@ func (d *Domain) prune(step uint64, txFrom, txTo uint64) error {
func (d *Domain) readFromFiles(fType FileType, filekey []byte) ([]byte, bool) { func (d *Domain) readFromFiles(fType FileType, filekey []byte) ([]byte, bool) {
var val []byte var val []byte
var found bool var found bool
d.files[fType].Descend(func(i btree.Item) bool { d.files[fType].Descend(func(item *filesItem) bool {
item := i.(*filesItem)
if item.index.Empty() { if item.index.Empty() {
return true return true
} }
@ -1021,12 +1023,11 @@ func (d *Domain) historyBeforeTxNum(key []byte, txNum uint64, roTx kv.Tx) ([]byt
var found bool var found bool
var anyItem bool // Whether any filesItem has been looked at in the loop below var anyItem bool // Whether any filesItem has been looked at in the loop below
var topState *filesItem var topState *filesItem
d.files[Values].AscendGreaterOrEqual(&search, func(i btree.Item) bool { d.files[Values].AscendGreaterOrEqual(&search, func(i *filesItem) bool {
topState = i.(*filesItem) topState = i
return false return false
}) })
d.files[EfHistory].AscendGreaterOrEqual(&search, func(i btree.Item) bool { d.files[EfHistory].AscendGreaterOrEqual(&search, func(item *filesItem) bool {
item := i.(*filesItem)
anyItem = true anyItem = true
offset := item.indexReader.Lookup(key) offset := item.indexReader.Lookup(key)
g := item.getter g := item.getter
@ -1053,8 +1054,7 @@ func (d *Domain) historyBeforeTxNum(key []byte, txNum uint64, roTx kv.Tx) ([]byt
if anyItem { if anyItem {
// If there were no changes but there were history files, the value can be obtained from value files // If there were no changes but there were history files, the value can be obtained from value files
var val []byte var val []byte
d.files[Values].DescendLessOrEqual(topState, func(i btree.Item) bool { d.files[Values].DescendLessOrEqual(topState, func(item *filesItem) bool {
item := i.(*filesItem)
if item.index.Empty() { if item.index.Empty() {
return true return true
} }
@ -1118,9 +1118,8 @@ func (d *Domain) historyBeforeTxNum(key []byte, txNum uint64, roTx kv.Tx) ([]byt
var historyItem *filesItem var historyItem *filesItem
search.startTxNum = foundStartTxNum search.startTxNum = foundStartTxNum
search.endTxNum = foundEndTxNum search.endTxNum = foundEndTxNum
if i := d.files[History].Get(&search); i != nil { historyItem, ok := d.files[History].Get(&search)
historyItem = i.(*filesItem) if !ok || historyItem == nil {
} else {
return nil, false, fmt.Errorf("no %s file found for [%x]", d.filenameBase, key) return nil, false, fmt.Errorf("no %s file found for [%x]", d.filenameBase, key)
} }
offset := historyItem.indexReader.Lookup2(txKey[:], key) offset := historyItem.indexReader.Lookup2(txKey[:], key)

View File

@ -36,9 +36,12 @@ func (d *Domain) endTxNumMinimax() uint64 {
var minimax uint64 var minimax uint64
for fType := FileType(0); fType < NumberOfTypes; fType++ { for fType := FileType(0); fType < NumberOfTypes; fType++ {
if d.files[fType].Len() > 0 { if d.files[fType].Len() > 0 {
endTxNum := d.files[fType].Max().(*filesItem).endTxNum max, ok := d.files[fType].Max()
if minimax == 0 || endTxNum < minimax { if ok {
minimax = endTxNum endTxNum := max.endTxNum
if minimax == 0 || endTxNum < minimax {
minimax = endTxNum
}
} }
} }
} }
@ -73,8 +76,7 @@ func (r DomainRanges) any() bool {
// That is why only Values type is inspected // That is why only Values type is inspected
func (d *Domain) findMergeRange(maxEndTxNum, maxSpan uint64) DomainRanges { func (d *Domain) findMergeRange(maxEndTxNum, maxSpan uint64) DomainRanges {
var r DomainRanges var r DomainRanges
d.files[Values].Ascend(func(i btree.Item) bool { d.files[Values].Ascend(func(item *filesItem) bool {
item := i.(*filesItem)
if item.endTxNum > maxEndTxNum { if item.endTxNum > maxEndTxNum {
return false return false
} }
@ -91,8 +93,7 @@ func (d *Domain) findMergeRange(maxEndTxNum, maxSpan uint64) DomainRanges {
} }
return true return true
}) })
d.files[History].Ascend(func(i btree.Item) bool { d.files[History].Ascend(func(item *filesItem) bool {
item := i.(*filesItem)
if item.endTxNum > maxEndTxNum { if item.endTxNum > maxEndTxNum {
return false return false
} }
@ -164,8 +165,7 @@ func (d *Domain) staticFilesInRange(r DomainRanges) ([][NumberOfTypes]*filesItem
} }
startJ = 0 startJ = 0
j := 0 j := 0
d.files[fType].Ascend(func(i btree.Item) bool { d.files[fType].Ascend(func(item *filesItem) bool {
item := i.(*filesItem)
if item.startTxNum < startTxNum { if item.startTxNum < startTxNum {
startJ++ startJ++
return true return true

View File

@ -41,8 +41,7 @@ func (d *Domain) MakeContext() *DomainContext {
for fType := FileType(0); fType < NumberOfTypes; fType++ { for fType := FileType(0); fType < NumberOfTypes; fType++ {
bt := btree.New(32) bt := btree.New(32)
dc.files[fType] = bt dc.files[fType] = bt
d.files[fType].Ascend(func(i btree.Item) bool { d.files[fType].Ascend(func(item *filesItem) bool {
item := i.(*filesItem)
bt.ReplaceOrInsert(&filesItem{ bt.ReplaceOrInsert(&filesItem{
startTxNum: item.startTxNum, startTxNum: item.startTxNum,
endTxNum: item.endTxNum, endTxNum: item.endTxNum,

View File

@ -25,7 +25,6 @@ import (
"fmt" "fmt"
"math" "math"
"runtime" "runtime"
"sort"
"sync" "sync"
"time" "time"
@ -49,6 +48,7 @@ import (
"github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon-lib/types"
"github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/log/v3"
"go.uber.org/atomic" "go.uber.org/atomic"
"golang.org/x/exp/slices"
) )
var ( var (
@ -2041,7 +2041,7 @@ func (p *PendingPool) EnforceWorstInvariants() {
heap.Init(p.worst) heap.Init(p.worst)
} }
func (p *PendingPool) EnforceBestInvariants() { func (p *PendingPool) EnforceBestInvariants() {
sort.Sort(p.best) slices.SortFunc(p.best.ms, func(i, j *metaTx) bool { return i.better(j, p.best.pendingBaseFee) })
} }
func (p *PendingPool) Best() *metaTx { //nolint func (p *PendingPool) Best() *metaTx { //nolint