mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-27 22:28:21 +00:00
166 lines
4.2 KiB
Go
166 lines
4.2 KiB
Go
/*
|
|
Copyright 2021 Erigon contributors
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package etl
|
|
|
|
import (
|
|
"bufio"
|
|
"encoding/binary"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
|
|
"github.com/ledgerwatch/log/v3"
|
|
)
|
|
|
|
type dataProvider interface {
|
|
Next(keyBuf, valBuf []byte) ([]byte, []byte, error)
|
|
Dispose() uint64 // Safe for repeated call, doesn't return error - means defer-friendly
|
|
}
|
|
|
|
type fileDataProvider struct {
|
|
file *os.File
|
|
reader io.Reader
|
|
byteReader io.ByteReader // Different interface to the same object as reader
|
|
}
|
|
|
|
// FlushToDisk - `doFsync` is true only for 'critical' collectors (which should not loose).
|
|
func FlushToDisk(logPrefix string, b Buffer, tmpdir string, doFsync bool, lvl log.Lvl) (dataProvider, error) {
|
|
if b.Len() == 0 {
|
|
return nil, nil
|
|
}
|
|
// if we are going to create files in the system temp dir, we don't need any
|
|
// subfolders.
|
|
if tmpdir != "" {
|
|
if err := os.MkdirAll(tmpdir, 0755); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
bufferFile, err := os.CreateTemp(tmpdir, "erigon-sortable-buf-")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if doFsync {
|
|
defer bufferFile.Sync() //nolint:errcheck
|
|
}
|
|
|
|
w := bufio.NewWriterSize(bufferFile, BufIOSize)
|
|
defer w.Flush() //nolint:errcheck
|
|
|
|
defer func() {
|
|
b.Reset() // run it after buf.flush and file.sync
|
|
log.Log(lvl, fmt.Sprintf("[%s] Flushed buffer file", logPrefix), "name", bufferFile.Name())
|
|
}()
|
|
|
|
if err = b.Write(w); err != nil {
|
|
return nil, fmt.Errorf("error writing entries to disk: %w", err)
|
|
}
|
|
|
|
return &fileDataProvider{file: bufferFile, reader: nil}, nil
|
|
}
|
|
|
|
func (p *fileDataProvider) Next(keyBuf, valBuf []byte) ([]byte, []byte, error) {
|
|
if p.reader == nil {
|
|
_, err := p.file.Seek(0, 0)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
r := bufio.NewReaderSize(p.file, BufIOSize)
|
|
p.reader = r
|
|
p.byteReader = r
|
|
|
|
}
|
|
return readElementFromDisk(p.reader, p.byteReader, keyBuf, valBuf)
|
|
}
|
|
|
|
func (p *fileDataProvider) Dispose() uint64 {
|
|
info, _ := os.Stat(p.file.Name())
|
|
_ = p.file.Close()
|
|
_ = os.Remove(p.file.Name())
|
|
if info == nil {
|
|
return 0
|
|
}
|
|
return uint64(info.Size())
|
|
}
|
|
|
|
func (p *fileDataProvider) String() string {
|
|
return fmt.Sprintf("%T(file: %s)", p, p.file.Name())
|
|
}
|
|
|
|
func readElementFromDisk(r io.Reader, br io.ByteReader, keyBuf, valBuf []byte) ([]byte, []byte, error) {
|
|
n, err := binary.ReadUvarint(br)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
if n > 0 {
|
|
// Reallocate the slice or extend it if there is enough capacity
|
|
if len(keyBuf)+int(n) > cap(keyBuf) {
|
|
newKeyBuf := make([]byte, len(keyBuf)+int(n))
|
|
copy(newKeyBuf, keyBuf)
|
|
keyBuf = newKeyBuf
|
|
} else {
|
|
keyBuf = keyBuf[:len(keyBuf)+int(n)]
|
|
}
|
|
if _, err = io.ReadFull(r, keyBuf[len(keyBuf)-int(n):]); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
}
|
|
if n, err = binary.ReadUvarint(br); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
if n > 0 {
|
|
// Reallocate the slice or extend it if there is enough capacity
|
|
if len(valBuf)+int(n) > cap(valBuf) {
|
|
newValBuf := make([]byte, len(valBuf)+int(n))
|
|
copy(newValBuf, valBuf)
|
|
valBuf = newValBuf
|
|
} else {
|
|
valBuf = valBuf[:len(valBuf)+int(n)]
|
|
}
|
|
if _, err = io.ReadFull(r, valBuf[len(valBuf)-int(n):]); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
}
|
|
return keyBuf, valBuf, err
|
|
}
|
|
|
|
type memoryDataProvider struct {
|
|
buffer Buffer
|
|
currentIndex int
|
|
}
|
|
|
|
func KeepInRAM(buffer Buffer) dataProvider {
|
|
return &memoryDataProvider{buffer, 0}
|
|
}
|
|
|
|
func (p *memoryDataProvider) Next(keyBuf, valBuf []byte) ([]byte, []byte, error) {
|
|
if p.currentIndex >= p.buffer.Len() {
|
|
return nil, nil, io.EOF
|
|
}
|
|
key, value := p.buffer.Get(p.currentIndex, keyBuf, valBuf)
|
|
p.currentIndex++
|
|
return key, value, nil
|
|
}
|
|
|
|
func (p *memoryDataProvider) Dispose() uint64 {
|
|
return 0 /* doesn't take space on disk */
|
|
}
|
|
|
|
func (p *memoryDataProvider) String() string {
|
|
return fmt.Sprintf("%T(buffer.Len: %d)", p, p.buffer.Len())
|
|
}
|