path -> filepath (path package is for urls) (#321)

This commit is contained in:
Alex Sharov 2022-02-12 20:11:30 +07:00 committed by GitHub
parent b041b959ca
commit 6f85066c7e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 17 additions and 19 deletions

View File

@ -1451,9 +1451,9 @@ func (a *Aggregator) backgroundMerge() {
}
func (a *Aggregator) reduceHistoryFiles(fType FileType, item *byEndBlockItem) error {
datTmpPath := path.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.dat.tmp", fType.String(), item.startBlock, item.endBlock))
datPath := path.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.dat", fType.String(), item.startBlock, item.endBlock))
idxPath := path.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.idx", fType.String(), item.startBlock, item.endBlock))
datTmpPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.dat.tmp", fType.String(), item.startBlock, item.endBlock))
datPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.dat", fType.String(), item.startBlock, item.endBlock))
idxPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.idx", fType.String(), item.startBlock, item.endBlock))
comp, err := compress.NewCompressor(context.Background(), AggregatorPrefix, datTmpPath, a.diffDir, compress.MinPatternScore, 1)
if err != nil {
return fmt.Errorf("reduceHistoryFiles create compressor %s: %w", datPath, err)
@ -2600,7 +2600,7 @@ func (a *Aggregator) computeAggregation(treeName string,
item2.getter = item2.decompressor.MakeGetter()
item2.getterMerge = item2.decompressor.MakeGetter()
if withIndex {
idxPath := path.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.idx", treeName, aggFrom, aggTo))
idxPath := filepath.Join(a.diffDir, fmt.Sprintf("%s.%d-%d.idx", treeName, aggFrom, aggTo))
if item2.index, err = buildIndex(item2.decompressor, idxPath, a.diffDir, count); err != nil {
return nil, fmt.Errorf("mergeIntoStateFile buildIndex %s [%d-%d]: %w", treeName, aggFrom, aggTo, err)
}
@ -2611,8 +2611,8 @@ func (a *Aggregator) computeAggregation(treeName string,
}
func createDatAndIndex(treeName string, diffDir string, bt *btree.BTree, blockFrom uint64, blockTo uint64) (*compress.Decompressor, *recsplit.Index, error) {
datPath := path.Join(diffDir, fmt.Sprintf("%s.%d-%d.dat", treeName, blockFrom, blockTo))
idxPath := path.Join(diffDir, fmt.Sprintf("%s.%d-%d.idx", treeName, blockFrom, blockTo))
datPath := filepath.Join(diffDir, fmt.Sprintf("%s.%d-%d.dat", treeName, blockFrom, blockTo))
idxPath := filepath.Join(diffDir, fmt.Sprintf("%s.%d-%d.idx", treeName, blockFrom, blockTo))
count, err := btreeToFile(bt, datPath, diffDir, false /* trace */, 1 /* workers */)
if err != nil {
return nil, nil, fmt.Errorf("createDatAndIndex %s build btree: %w", treeName, err)
@ -2693,7 +2693,7 @@ func (a *Aggregator) mergeIntoStateFile(cp *CursorHeap, prefixLen int,
basename string, startBlock, endBlock uint64, dir string,
valTransform func(val []byte, transValBuf []byte) ([]byte, error),
) (*compress.Decompressor, int, error) {
datPath := path.Join(dir, fmt.Sprintf("%s.%d-%d.dat", basename, startBlock, endBlock))
datPath := filepath.Join(dir, fmt.Sprintf("%s.%d-%d.dat", basename, startBlock, endBlock))
comp, err := compress.NewCompressor(context.Background(), AggregatorPrefix, datPath, dir, compress.MinPatternScore, 1)
if err != nil {
return nil, 0, fmt.Errorf("compressor %s: %w", datPath, err)

View File

@ -22,7 +22,6 @@ import (
"hash/crc32"
"io"
"os"
"path"
"path/filepath"
"testing"
)
@ -76,7 +75,7 @@ func checksum(file string) uint32 {
func prepareDict(t *testing.T) *Decompressor {
tmpDir := t.TempDir()
file := path.Join(tmpDir, "compressed")
file := filepath.Join(tmpDir, "compressed")
t.Name()
c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2)
if err != nil {

View File

@ -19,14 +19,14 @@ package compress
import (
"context"
"fmt"
"path"
"path/filepath"
"strings"
"testing"
)
func prepareLoremDict(t *testing.T) *Decompressor {
tmpDir := t.TempDir()
file := path.Join(tmpDir, "compressed")
file := filepath.Join(tmpDir, "compressed")
t.Name()
c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2)
if err != nil {

View File

@ -20,7 +20,6 @@
package recsplit
import (
"path"
"testing"
)
@ -49,7 +48,7 @@ func FuzzRecSplit(f *testing.F) {
t.Skip()
}
tmpDir := t.TempDir()
indexFile := path.Join(tmpDir, "index")
indexFile := filepath.Join(tmpDir, "index")
rs, err := NewRecSplit(RecSplitArgs{
KeyCount: count,
Enums: true,

View File

@ -18,7 +18,7 @@ package recsplit
import (
"fmt"
"path"
"path/filepath"
"testing"
)
@ -29,7 +29,7 @@ func TestRecSplit2(t *testing.T) {
BucketSize: 10,
Salt: 0,
TmpDir: tmpDir,
IndexFile: path.Join(tmpDir, "index"),
IndexFile: filepath.Join(tmpDir, "index"),
LeafSize: 8,
StartSeed: []uint64{0x106393c187cae21a, 0x6453cec3f7376937, 0x643e521ddbd2be98, 0x3740c6412f6572cb, 0x717d47562f1ce470, 0x4cd6eb4c63befb7c, 0x9bfd8c5e18c8da73,
0x082f20e10092a9a3, 0x2ada2ce68d21defc, 0xe33cb4f3e7c6466b, 0x3980be458c509c59, 0xc466fd9584828e8c, 0x45f0aabe1a61ede6, 0xf6e7b8b33ad9b98d,
@ -65,7 +65,7 @@ func TestRecSplitDuplicate(t *testing.T) {
BucketSize: 10,
Salt: 0,
TmpDir: tmpDir,
IndexFile: path.Join(tmpDir, "index"),
IndexFile: filepath.Join(tmpDir, "index"),
LeafSize: 8,
StartSeed: []uint64{0x106393c187cae21a, 0x6453cec3f7376937, 0x643e521ddbd2be98, 0x3740c6412f6572cb, 0x717d47562f1ce470, 0x4cd6eb4c63befb7c, 0x9bfd8c5e18c8da73,
0x082f20e10092a9a3, 0x2ada2ce68d21defc, 0xe33cb4f3e7c6466b, 0x3980be458c509c59, 0xc466fd9584828e8c, 0x45f0aabe1a61ede6, 0xf6e7b8b33ad9b98d,
@ -92,7 +92,7 @@ func TestRecSplitLeafSizeTooLarge(t *testing.T) {
BucketSize: 10,
Salt: 0,
TmpDir: tmpDir,
IndexFile: path.Join(tmpDir, "index"),
IndexFile: filepath.Join(tmpDir, "index"),
LeafSize: 64,
StartSeed: []uint64{0x106393c187cae21a, 0x6453cec3f7376937, 0x643e521ddbd2be98, 0x3740c6412f6572cb, 0x717d47562f1ce470, 0x4cd6eb4c63befb7c, 0x9bfd8c5e18c8da73,
0x082f20e10092a9a3, 0x2ada2ce68d21defc, 0xe33cb4f3e7c6466b, 0x3980be458c509c59, 0xc466fd9584828e8c, 0x45f0aabe1a61ede6, 0xf6e7b8b33ad9b98d,
@ -105,7 +105,7 @@ func TestRecSplitLeafSizeTooLarge(t *testing.T) {
func TestIndexLookup(t *testing.T) {
tmpDir := t.TempDir()
indexFile := path.Join(tmpDir, "index")
indexFile := filepath.Join(tmpDir, "index")
rs, err := NewRecSplit(RecSplitArgs{
KeyCount: 100,
BucketSize: 10,
@ -141,7 +141,7 @@ func TestIndexLookup(t *testing.T) {
func TestTwoLayerIndex(t *testing.T) {
tmpDir := t.TempDir()
indexFile := path.Join(tmpDir, "index")
indexFile := filepath.Join(tmpDir, "index")
rs, err := NewRecSplit(RecSplitArgs{
KeyCount: 100,
BucketSize: 10,