2020-05-23 09:19:56 +00:00
|
|
|
package core
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
2020-05-31 06:57:47 +00:00
|
|
|
"os"
|
|
|
|
"reflect"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"testing"
|
2020-06-06 10:29:11 +00:00
|
|
|
|
|
|
|
"github.com/ledgerwatch/turbo-geth/common"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/common/dbutils"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/crypto"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/ethdb"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/log"
|
2020-05-23 09:19:56 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestIndexGenerator_GenerateIndex_SimpleCase(t *testing.T) {
|
2020-05-31 06:57:47 +00:00
|
|
|
test := func(blocksNum int, csBucket []byte) func(t *testing.T) {
|
|
|
|
return func(t *testing.T) {
|
|
|
|
db := ethdb.NewMemDatabase()
|
2020-06-06 10:29:11 +00:00
|
|
|
defer db.Close()
|
2020-05-31 06:57:47 +00:00
|
|
|
ig := NewIndexGenerator(db, make(chan struct{}))
|
|
|
|
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
|
|
csInfo, ok := mapper[string(csBucket)]
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("incorrect cs bucket")
|
|
|
|
}
|
|
|
|
addrs, expecedIndexes := generateTestData(t, db, csBucket, blocksNum)
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 08:12:10 +00:00
|
|
|
ig.ChangeSetBufSize = 16 * 1024
|
2020-05-31 06:57:47 +00:00
|
|
|
err := ig.GenerateIndex(0, csBucket)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[0], 0, expecedIndexes[string(addrs[0])][0])
|
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[0], 999, expecedIndexes[string(addrs[0])][0])
|
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[0], 1000, expecedIndexes[string(addrs[0])][1])
|
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[0], 1999, expecedIndexes[string(addrs[0])][1])
|
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[0], 2000, expecedIndexes[string(addrs[0])][2])
|
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[1], 0, expecedIndexes[string(addrs[1])][0])
|
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[1], 2000, expecedIndexes[string(addrs[1])][1])
|
|
|
|
checkIndex(t, db, csInfo.IndexBucket, addrs[2], 0, expecedIndexes[string(addrs[2])][0])
|
|
|
|
|
|
|
|
//check last chunk
|
|
|
|
lastChunkCheck(t, db, csInfo.IndexBucket, addrs[0], expecedIndexes[string(addrs[0])][2])
|
|
|
|
lastChunkCheck(t, db, csInfo.IndexBucket, addrs[1], expecedIndexes[string(addrs[1])][1])
|
|
|
|
lastChunkCheck(t, db, csInfo.IndexBucket, addrs[2], expecedIndexes[string(addrs[2])][0])
|
|
|
|
}
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Run("account hashed state", test(2100, dbutils.AccountChangeSetBucket))
|
|
|
|
t.Run("account plain state", test(2100, dbutils.PlainAccountChangeSetBucket))
|
|
|
|
t.Run("storage hashed state", test(2100, dbutils.StorageChangeSetBucket))
|
|
|
|
t.Run("storage plain state", test(2100, dbutils.PlainStorageChangeSetBucket))
|
2020-05-23 09:19:56 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIndexGenerator_Truncate(t *testing.T) {
|
2020-05-31 06:57:47 +00:00
|
|
|
buckets := [][]byte{dbutils.StorageChangeSetBucket}
|
|
|
|
//buckets:=[][]byte{dbutils.AccountChangeSetBucket, dbutils.StorageChangeSetBucket, dbutils.PlainAccountChangeSetBucket, dbutils.PlainStorageChangeSetBucket}
|
|
|
|
for i := range buckets {
|
|
|
|
csbucket := buckets[i]
|
|
|
|
db := ethdb.NewMemDatabase()
|
|
|
|
hashes, expected := generateTestData(t, db, csbucket, 2100)
|
|
|
|
mp := mapper[string(csbucket)]
|
|
|
|
indexBucket := mp.IndexBucket
|
|
|
|
ig := NewIndexGenerator(db, make(chan struct{}))
|
|
|
|
err := ig.GenerateIndex(0, csbucket)
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
reduceSlice := func(arr []uint64, timestamtTo uint64) []uint64 {
|
|
|
|
pos := sort.Search(len(arr), func(i int) bool {
|
|
|
|
return arr[i] > timestamtTo
|
|
|
|
})
|
|
|
|
return arr[:pos]
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Run("truncate to 2050 "+string(csbucket), func(t *testing.T) {
|
|
|
|
expected[string(hashes[0])][2] = reduceSlice(expected[string(hashes[0])][2], 2050)
|
|
|
|
expected[string(hashes[1])][1] = reduceSlice(expected[string(hashes[1])][1], 2050)
|
|
|
|
expected[string(hashes[2])][0] = reduceSlice(expected[string(hashes[2])][0], 2050)
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
err = ig.Truncate(2050, csbucket)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
checkIndex(t, db, indexBucket, hashes[0], 2030, expected[string(hashes[0])][2])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[1], 2030, expected[string(hashes[1])][1])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[2], 2030, expected[string(hashes[2])][0])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[0], 1999, expected[string(hashes[0])][1])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[1], 999, expected[string(hashes[1])][0])
|
2020-05-23 09:19:56 +00:00
|
|
|
})
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Run("truncate to 2000 "+string(csbucket), func(t *testing.T) {
|
|
|
|
expected[string(hashes[0])][2] = reduceSlice(expected[string(hashes[0])][2], 2000)
|
|
|
|
expected[string(hashes[1])][1] = reduceSlice(expected[string(hashes[1])][1], 2000)
|
|
|
|
expected[string(hashes[2])][0] = reduceSlice(expected[string(hashes[2])][0], 2000)
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
err = ig.Truncate(2000, csbucket)
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
checkIndex(t, db, indexBucket, hashes[0], 2000, expected[string(hashes[0])][2])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[1], 2000, expected[string(hashes[1])][1])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[2], expected[string(hashes[2])][0][len(expected[string(hashes[2])][0])-1], expected[string(hashes[2])][0])
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("truncate to 1999 "+string(csbucket), func(t *testing.T) {
|
|
|
|
err = ig.Truncate(1999, csbucket)
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
checkIndex(t, db, indexBucket, hashes[0], 1999, expected[string(hashes[0])][1])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[1], 1998, expected[string(hashes[1])][0])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[2], 1998, expected[string(hashes[2])][0])
|
|
|
|
_, err = db.GetIndexChunk(csbucket, hashes[0], 2000)
|
|
|
|
if err != ethdb.ErrKeyNotFound {
|
|
|
|
t.Fatal()
|
|
|
|
}
|
|
|
|
_, err = db.GetIndexChunk(csbucket, hashes[1], 2000)
|
|
|
|
if err != ethdb.ErrKeyNotFound {
|
|
|
|
t.Fatal()
|
|
|
|
}
|
|
|
|
})
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Run("truncate to 999 "+string(csbucket), func(t *testing.T) {
|
|
|
|
expected[string(hashes[1])][0] = reduceSlice(expected[string(hashes[1])][0], 999)
|
|
|
|
expected[string(hashes[2])][0] = reduceSlice(expected[string(hashes[2])][0], 999)
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
err = ig.Truncate(999, csbucket)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
checkIndex(t, db, indexBucket, hashes[0], 999, expected[string(hashes[0])][0])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[1], 998, expected[string(hashes[1])][0])
|
|
|
|
checkIndex(t, db, indexBucket, hashes[2], 999, expected[string(hashes[2])][0])
|
|
|
|
_, err = db.GetIndexChunk(csbucket, hashes[0], 1000)
|
|
|
|
if err != ethdb.ErrKeyNotFound {
|
|
|
|
t.Fatal()
|
|
|
|
}
|
|
|
|
_, err = db.GetIndexChunk(csbucket, hashes[1], 1000)
|
|
|
|
if err != ethdb.ErrKeyNotFound {
|
|
|
|
t.Fatal()
|
|
|
|
}
|
|
|
|
})
|
2020-06-06 10:29:11 +00:00
|
|
|
db.Close()
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
func generateTestData(t *testing.T, db ethdb.Database, csBucket []byte, numOfBlocks int) ([][]byte, map[string][][]uint64) { //nolint
|
|
|
|
csInfo, ok := mapper[string(csBucket)]
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("incorrect cs bucket")
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
var isPlain bool
|
|
|
|
if bytes.Equal(dbutils.PlainStorageChangeSetBucket, csBucket) || bytes.Equal(dbutils.PlainAccountChangeSetBucket, csBucket) {
|
|
|
|
isPlain = true
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
addrs, err := generateAddrs(3, isPlain)
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Fatal(err)
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
if bytes.Equal(dbutils.StorageChangeSetBucket, csBucket) || bytes.Equal(dbutils.PlainStorageChangeSetBucket, csBucket) {
|
|
|
|
keys, innerErr := generateAddrs(3, false)
|
|
|
|
if innerErr != nil {
|
|
|
|
t.Fatal(innerErr)
|
|
|
|
}
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
defaultIncarnation := make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint64(defaultIncarnation, ^uint64(1))
|
|
|
|
for i := range addrs {
|
|
|
|
addrs[i] = append(addrs[i], defaultIncarnation...)
|
|
|
|
addrs[i] = append(addrs[i], keys[i]...)
|
|
|
|
}
|
|
|
|
}
|
2020-05-23 09:19:56 +00:00
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
expected1 := make([][]uint64, 0)
|
|
|
|
expected1 = append(expected1, make([]uint64, 0))
|
|
|
|
expected2 := make([][]uint64, 0)
|
|
|
|
expected2 = append(expected2, make([]uint64, 0))
|
|
|
|
expected3 := make([][]uint64, 0)
|
|
|
|
expected3 = append(expected3, make([]uint64, 0))
|
2020-05-23 09:19:56 +00:00
|
|
|
|
|
|
|
for i := 0; i < numOfBlocks; i++ {
|
2020-05-31 06:57:47 +00:00
|
|
|
cs := csInfo.New()
|
|
|
|
err = cs.Add(addrs[0], []byte(strconv.Itoa(i)))
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Fatal(err)
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
if len(expected1[len(expected1)-1]) == dbutils.MaxChunkSize {
|
|
|
|
expected1 = append(expected1, make([]uint64, 0))
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
expected1[len(expected1)-1] = append(expected1[len(expected1)-1], uint64(i))
|
2020-05-23 09:19:56 +00:00
|
|
|
|
|
|
|
if i%2 == 0 {
|
2020-05-31 06:57:47 +00:00
|
|
|
err = cs.Add(addrs[1], []byte(strconv.Itoa(i)))
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Fatal(err)
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
if len(expected2[len(expected2)-1]) == dbutils.MaxChunkSize {
|
|
|
|
expected2 = append(expected2, make([]uint64, 0))
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
expected2[len(expected2)-1] = append(expected2[len(expected2)-1], uint64(i))
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
if i%3 == 0 {
|
2020-05-31 06:57:47 +00:00
|
|
|
err = cs.Add(addrs[2], []byte(strconv.Itoa(i)))
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if len(expected3[len(expected3)-1]) == dbutils.MaxChunkSize {
|
|
|
|
expected3 = append(expected3, make([]uint64, 0))
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
expected3[len(expected3)-1] = append(expected3[len(expected3)-1], uint64(i))
|
|
|
|
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
v, err := csInfo.Encode(cs)
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
err = db.Put(csBucket, dbutils.EncodeTimestamp(uint64(i)), v)
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
return addrs, map[string][][]uint64{
|
|
|
|
string(addrs[0]): expected1,
|
|
|
|
string(addrs[1]): expected2,
|
|
|
|
string(addrs[2]): expected3,
|
|
|
|
}
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
func checkIndex(t *testing.T, db ethdb.Database, bucket, addrHash []byte, chunkBlock uint64, expected []uint64) {
|
2020-05-23 09:19:56 +00:00
|
|
|
t.Helper()
|
2020-05-31 06:57:47 +00:00
|
|
|
b, err := db.GetIndexChunk(bucket, addrHash, chunkBlock)
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Fatal(err, common.Bytes2Hex(addrHash), chunkBlock)
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
val, _, err := dbutils.HistoryIndexBytes(b).Decode()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(val, expected) {
|
2020-05-31 06:57:47 +00:00
|
|
|
fmt.Println("get", val)
|
|
|
|
fmt.Println("expected", expected)
|
2020-05-23 09:19:56 +00:00
|
|
|
t.Fatal()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-31 06:57:47 +00:00
|
|
|
func lastChunkCheck(t *testing.T, db ethdb.Database, bucket, key []byte, expected []uint64) {
|
2020-05-23 09:19:56 +00:00
|
|
|
t.Helper()
|
2020-05-31 06:57:47 +00:00
|
|
|
v, err := db.Get(bucket, dbutils.CurrentChunkKey(key))
|
2020-05-23 09:19:56 +00:00
|
|
|
if err != nil {
|
2020-05-31 06:57:47 +00:00
|
|
|
t.Fatal(err, dbutils.CurrentChunkKey(key))
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
val, _, err := dbutils.HistoryIndexBytes(v).Decode()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(val, expected) {
|
|
|
|
fmt.Println(val)
|
|
|
|
fmt.Println(expected)
|
|
|
|
t.Fatal()
|
|
|
|
}
|
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
|
|
|
|
func generateAddrs(numOfAddrs int, isPlain bool) ([][]byte, error) {
|
|
|
|
addrs := make([][]byte, numOfAddrs)
|
|
|
|
for i := 0; i < numOfAddrs; i++ {
|
|
|
|
key1, err := crypto.GenerateKey()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addr := crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
if isPlain {
|
|
|
|
addrs[i] = addr.Bytes()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
hash, err := common.HashData(addr.Bytes())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addrs[i] = hash.Bytes()
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|
2020-05-31 06:57:47 +00:00
|
|
|
return addrs, nil
|
2020-05-23 09:19:56 +00:00
|
|
|
}
|