e3: option to discard all history writes. useful to test execution performance. (#753)

This commit is contained in:
Alex Sharov 2022-11-30 16:06:51 +07:00 committed by GitHub
parent 7e661f184b
commit c63b54e470
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 18 additions and 6 deletions

View File

@ -504,7 +504,9 @@ func (h *historyWAL) close() {
if h == nil { // allow dobule-close
return
}
h.historyVals.Close()
if h.historyVals != nil {
h.historyVals.Close()
}
}
func (h *History) newWriter(tmpdir string, buffered, discard bool) *historyWAL {
@ -518,8 +520,8 @@ func (h *History) newWriter(tmpdir string, buffered, discard bool) *historyWAL {
}
if buffered {
w.historyVals = etl.NewCollector(h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam))
w.historyVals.LogLvl(log.LvlTrace)
}
w.historyVals.LogLvl(log.LvlTrace)
val, err := h.tx.GetOne(h.settingsTable, historyValCountKey)
if err != nil {
@ -535,6 +537,9 @@ func (h *History) newWriter(tmpdir string, buffered, discard bool) *historyWAL {
}
func (h *historyWAL) flush(tx kv.RwTx) error {
if h.discard {
return nil
}
binary.BigEndian.PutUint64(h.autoIncrementBuf, h.autoIncrement)
if err := tx.Put(h.h.settingsTable, historyValCountKey, h.autoIncrementBuf); err != nil {
return err

View File

@ -364,6 +364,9 @@ func loadFunc(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc)
}
func (ii *invertedIndexWAL) Flush(tx kv.RwTx) error {
if ii.discard {
return nil
}
if err := ii.index.Load(tx, ii.ii.indexTable, loadFunc, etl.TransformArgs{}); err != nil {
return err
}
@ -378,8 +381,12 @@ func (ii *invertedIndexWAL) close() {
if ii == nil {
return
}
ii.index.Close()
ii.indexKeys.Close()
if ii.index != nil {
ii.index.Close()
}
if ii.indexKeys != nil {
ii.indexKeys.Close()
}
}
var WALCollectorRam = etl.BufferOptimalSize / 16
@ -406,9 +413,9 @@ func (ii *InvertedIndex) newWriter(tmpdir string, buffered, discard bool) *inver
// etl collector doesn't fsync: means if have enough ram, all files produced by all collectors will be in ram
w.index = etl.NewCollector(ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam))
w.indexKeys = etl.NewCollector(ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRam))
w.index.LogLvl(log.LvlTrace)
w.indexKeys.LogLvl(log.LvlTrace)
}
w.index.LogLvl(log.LvlTrace)
w.indexKeys.LogLvl(log.LvlTrace)
return w
}