erigon-pulse/etl/dataprovider.go
ledgerwatch c71ac02a0f
[erigon2] Optimisations in etl collector and compressor (#339)
* Optimisations in etl collector and compressor

* Not copy k and v in the collector

* Fix lint

* Optimisations

* Change Load1 back to Load

* Reduce allocations for tests

* preallocate inv

* counting hits and misses

* Try to fix

* Try to fix

* Relaxation 1

* Relaxation 2

* Add arch tables

* Fix

* Update arch tables and use them

* Not to override larger value

* Increase arch table size

* Increase arch table size

* Fixes to arch

* Print

* Off by one

* Print

* Fix

* Remove print

* Perform update of arch in the background

* Build up huffman tree

Co-authored-by: Alexey Sharp <alexeysharp@Alexeys-iMac.local>
2022-02-20 22:14:06 +00:00

176 lines
4.4 KiB
Go

/*
Copyright 2021 Erigon contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etl
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/log/v3"
)
type dataProvider interface {
Next(keyBuf, valBuf []byte) ([]byte, []byte, error)
Dispose() uint64 // Safe for repeated call, doesn't return error - means defer-friendly
}
type fileDataProvider struct {
file *os.File
reader io.Reader
byteReader io.ByteReader // Different interface to the same object as reader
}
// FlushToDisk - `doFsync` is true only for 'critical' collectors (which should not loose).
func FlushToDisk(b Buffer, tmpdir string, doFsync, noLogs bool) (dataProvider, error) {
if b.Len() == 0 {
return nil, nil
}
// if we are going to create files in the system temp dir, we don't need any
// subfolders.
if tmpdir != "" {
if err := os.MkdirAll(tmpdir, 0755); err != nil {
return nil, err
}
}
bufferFile, err := ioutil.TempFile(tmpdir, "erigon-sortable-buf-")
if err != nil {
return nil, err
}
if doFsync {
defer bufferFile.Sync() //nolint:errcheck
}
w := bufio.NewWriterSize(bufferFile, BufIOSize)
defer w.Flush() //nolint:errcheck
defer func() {
b.Reset() // run it after buf.flush and file.sync
if !noLogs {
var m runtime.MemStats
runtime.ReadMemStats(&m)
log.Info(
"Flushed buffer file",
"name", bufferFile.Name(),
"alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
}
}()
if err = b.Write(w); err != nil {
return nil, fmt.Errorf("error writing entries to disk: %w", err)
}
return &fileDataProvider{file: bufferFile, reader: nil}, nil
}
func (p *fileDataProvider) Next(keyBuf, valBuf []byte) ([]byte, []byte, error) {
if p.reader == nil {
_, err := p.file.Seek(0, 0)
if err != nil {
return nil, nil, err
}
r := bufio.NewReaderSize(p.file, BufIOSize)
p.reader = r
p.byteReader = r
}
return readElementFromDisk(p.reader, p.byteReader, keyBuf, valBuf)
}
func (p *fileDataProvider) Dispose() uint64 {
info, _ := os.Stat(p.file.Name())
_ = p.file.Close()
_ = os.Remove(p.file.Name())
if info == nil {
return 0
}
return uint64(info.Size())
}
func (p *fileDataProvider) String() string {
return fmt.Sprintf("%T(file: %s)", p, p.file.Name())
}
func readElementFromDisk(r io.Reader, br io.ByteReader, keyBuf, valBuf []byte) ([]byte, []byte, error) {
n, err := binary.ReadUvarint(br)
if err != nil {
return nil, nil, err
}
if n > 0 {
// Reallocate the slice or extend it if there is enough capacity
if len(keyBuf)+int(n) > cap(keyBuf) {
newKeyBuf := make([]byte, len(keyBuf)+int(n))
copy(newKeyBuf, keyBuf)
keyBuf = newKeyBuf
} else {
keyBuf = keyBuf[:len(keyBuf)+int(n)]
}
if _, err = io.ReadFull(r, keyBuf[len(keyBuf)-int(n):]); err != nil {
return nil, nil, err
}
}
if n, err = binary.ReadUvarint(br); err != nil {
return nil, nil, err
}
if n > 0 {
// Reallocate the slice or extend it if there is enough capacity
if len(valBuf)+int(n) > cap(valBuf) {
newValBuf := make([]byte, len(valBuf)+int(n))
copy(newValBuf, valBuf)
valBuf = newValBuf
} else {
valBuf = valBuf[:len(valBuf)+int(n)]
}
if _, err = io.ReadFull(r, valBuf[len(valBuf)-int(n):]); err != nil {
return nil, nil, err
}
}
return keyBuf, valBuf, err
}
type memoryDataProvider struct {
buffer Buffer
currentIndex int
}
func KeepInRAM(buffer Buffer) dataProvider {
return &memoryDataProvider{buffer, 0}
}
func (p *memoryDataProvider) Next(keyBuf, valBuf []byte) ([]byte, []byte, error) {
if p.currentIndex >= p.buffer.Len() {
return nil, nil, io.EOF
}
key, value := p.buffer.Get(p.currentIndex, keyBuf, valBuf)
p.currentIndex++
return key, value, nil
}
func (p *memoryDataProvider) Dispose() uint64 {
return 0 /* doesn't take space on disk */
}
func (p *memoryDataProvider) String() string {
return fmt.Sprintf("%T(buffer.Len: %d)", p, p.buffer.Len())
}