2020-08-17 06:45:52 +00:00
|
|
|
package ethdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2020-10-27 22:30:18 +00:00
|
|
|
"github.com/google/btree"
|
2020-08-17 06:45:52 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/common"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/log"
|
2020-10-19 13:11:01 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/metrics"
|
2020-08-17 06:45:52 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// TxDb - provides Database interface around ethdb.Tx
|
|
|
|
// It's not thread-safe!
|
2020-08-24 11:07:59 +00:00
|
|
|
// TxDb not usable after .Commit()/.Rollback() call, but usable after .CommitAndBegin() call
|
2020-08-17 06:45:52 +00:00
|
|
|
// you can put unlimited amount of data into this class, call IdealBatchSize is unnecessary
|
|
|
|
// Walk and MultiWalk methods - work outside of Tx object yet, will implement it later
|
|
|
|
type TxDb struct {
|
2021-02-10 17:04:22 +00:00
|
|
|
db Database
|
|
|
|
tx Tx
|
|
|
|
txFlags TxFlags
|
|
|
|
cursors map[string]Cursor
|
|
|
|
len uint64
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) Close() {
|
|
|
|
panic("don't call me")
|
|
|
|
}
|
|
|
|
|
2020-08-26 06:02:10 +00:00
|
|
|
// NewTxDbWithoutTransaction creates TxDb object without opening transaction,
|
|
|
|
// such TxDb not usable before .Begin() call on it
|
|
|
|
// It allows inject TxDb object into class hierarchy, but open write transaction later
|
2020-10-25 08:38:55 +00:00
|
|
|
func NewTxDbWithoutTransaction(db Database, flags TxFlags) *TxDb {
|
|
|
|
return &TxDb{db: db, txFlags: flags}
|
2020-08-26 06:02:10 +00:00
|
|
|
}
|
|
|
|
|
2020-10-25 08:38:55 +00:00
|
|
|
func (m *TxDb) Begin(ctx context.Context, flags TxFlags) (DbWithPendingMutations, error) {
|
2020-08-26 06:02:10 +00:00
|
|
|
batch := m
|
|
|
|
if m.tx != nil {
|
2021-02-10 17:04:22 +00:00
|
|
|
panic("nested transactions not supported")
|
2020-08-26 06:02:10 +00:00
|
|
|
}
|
|
|
|
|
2021-02-10 17:04:22 +00:00
|
|
|
if err := batch.begin(ctx, flags); err != nil {
|
2020-08-17 06:45:52 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return batch, nil
|
|
|
|
}
|
|
|
|
|
2021-01-15 09:38:09 +00:00
|
|
|
func (m *TxDb) cursor(bucket string) Cursor {
|
|
|
|
c, ok := m.cursors[bucket]
|
|
|
|
if !ok {
|
|
|
|
c = m.tx.Cursor(bucket)
|
|
|
|
m.cursors[bucket] = c
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2020-11-14 13:48:29 +00:00
|
|
|
func (m *TxDb) Sequence(bucket string, amount uint64) (res uint64, err error) {
|
|
|
|
return m.tx.Sequence(bucket, amount)
|
|
|
|
}
|
|
|
|
|
2020-08-17 06:45:52 +00:00
|
|
|
func (m *TxDb) Put(bucket string, key []byte, value []byte) error {
|
|
|
|
m.len += uint64(len(key) + len(value))
|
2021-01-15 09:38:09 +00:00
|
|
|
return m.cursor(bucket).Put(key, value)
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
2020-09-28 17:18:36 +00:00
|
|
|
func (m *TxDb) Reserve(bucket string, key []byte, i int) ([]byte, error) {
|
|
|
|
m.len += uint64(len(key) + i)
|
2021-01-15 09:38:09 +00:00
|
|
|
return m.cursor(bucket).Reserve(key, i)
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
|
|
|
|
2020-08-17 06:45:52 +00:00
|
|
|
func (m *TxDb) Append(bucket string, key []byte, value []byte) error {
|
|
|
|
m.len += uint64(len(key) + len(value))
|
2021-01-15 09:38:09 +00:00
|
|
|
return m.cursor(bucket).Append(key, value)
|
2020-11-28 14:24:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) AppendDup(bucket string, key []byte, value []byte) error {
|
|
|
|
m.len += uint64(len(key) + len(value))
|
2021-01-15 09:38:09 +00:00
|
|
|
return m.cursor(bucket).(CursorDupSort).AppendDup(key, value)
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
2020-10-29 13:19:31 +00:00
|
|
|
func (m *TxDb) Delete(bucket string, k, v []byte) error {
|
|
|
|
m.len += uint64(len(k))
|
2021-01-15 09:38:09 +00:00
|
|
|
return m.cursor(bucket).Delete(k, v)
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) NewBatch() DbWithPendingMutations {
|
|
|
|
return &mutation{
|
|
|
|
db: m,
|
2020-10-27 22:30:18 +00:00
|
|
|
puts: btree.New(32),
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-10 17:04:22 +00:00
|
|
|
func (m *TxDb) begin(ctx context.Context, flags TxFlags) error {
|
|
|
|
tx, err := m.db.(HasKV).KV().Begin(ctx, flags)
|
2020-08-17 06:45:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-08-26 06:02:10 +00:00
|
|
|
m.tx = tx
|
2020-09-10 12:35:58 +00:00
|
|
|
m.cursors = make(map[string]Cursor, 16)
|
2020-08-17 06:45:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) KV() KV {
|
2020-08-26 06:02:10 +00:00
|
|
|
panic("not allowed to get KV interface because you will loose transaction, please use .Tx() method")
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 04:11:28 +00:00
|
|
|
// Last can only be called from the transaction thread
|
2020-08-17 06:45:52 +00:00
|
|
|
func (m *TxDb) Last(bucket string) ([]byte, []byte, error) {
|
2021-01-15 09:38:09 +00:00
|
|
|
return m.cursor(bucket).Last()
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) Get(bucket string, key []byte) ([]byte, error) {
|
2021-03-08 11:58:44 +00:00
|
|
|
//if metrics.Enabled {
|
|
|
|
// defer dbGetTimer.UpdateSince(time.Now())
|
|
|
|
//}
|
2020-09-05 02:36:42 +00:00
|
|
|
|
2021-01-15 09:38:09 +00:00
|
|
|
_, v, err := m.cursor(bucket).SeekExact(key)
|
2020-08-17 06:45:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if v == nil {
|
|
|
|
return nil, ErrKeyNotFound
|
|
|
|
}
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) Has(bucket string, key []byte) (bool, error) {
|
|
|
|
v, err := m.Get(bucket, key)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return v != nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) DiskSize(ctx context.Context) (common.StorageSize, error) {
|
|
|
|
if m.db == nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
sz, err := m.db.(HasStats).DiskSize(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return common.StorageSize(sz), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) MultiPut(tuples ...[]byte) (uint64, error) {
|
2020-08-26 06:02:10 +00:00
|
|
|
return 0, MultiPut(m.tx, tuples...)
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func MultiPut(tx Tx, tuples ...[]byte) error {
|
2020-08-22 10:12:33 +00:00
|
|
|
logEvery := time.NewTicker(30 * time.Second)
|
|
|
|
defer logEvery.Stop()
|
|
|
|
|
2020-08-17 06:45:52 +00:00
|
|
|
count := 0
|
|
|
|
total := float64(len(tuples)) / 3
|
|
|
|
for bucketStart := 0; bucketStart < len(tuples); {
|
|
|
|
bucketEnd := bucketStart
|
|
|
|
for ; bucketEnd < len(tuples) && bytes.Equal(tuples[bucketEnd], tuples[bucketStart]); bucketEnd += 3 {
|
|
|
|
}
|
2020-10-28 03:18:10 +00:00
|
|
|
bucketName := string(tuples[bucketStart])
|
|
|
|
c := tx.Cursor(bucketName)
|
2020-08-17 06:45:52 +00:00
|
|
|
|
|
|
|
// move cursor to a first element in batch
|
|
|
|
// if it's nil, it means all keys in batch gonna be inserted after end of bucket (batch is sorted and has no duplicates here)
|
|
|
|
// can apply optimisations for this case
|
|
|
|
firstKey, _, err := c.Seek(tuples[bucketStart+1])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
isEndOfBucket := firstKey == nil
|
|
|
|
|
|
|
|
l := (bucketEnd - bucketStart) / 3
|
|
|
|
for i := 0; i < l; i++ {
|
|
|
|
k := tuples[bucketStart+3*i+1]
|
|
|
|
v := tuples[bucketStart+3*i+2]
|
|
|
|
if isEndOfBucket {
|
|
|
|
if v == nil {
|
|
|
|
// nothing to delete after end of bucket
|
|
|
|
} else {
|
|
|
|
if err := c.Append(k, v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if v == nil {
|
2020-10-29 13:19:31 +00:00
|
|
|
if err := c.Delete(k, nil); err != nil {
|
2020-08-17 06:45:52 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := c.Put(k, v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
count++
|
2020-08-22 10:12:33 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
default:
|
|
|
|
case <-logEvery.C:
|
2020-08-17 06:45:52 +00:00
|
|
|
progress := fmt.Sprintf("%.1fM/%.1fM", float64(count)/1_000_000, total/1_000_000)
|
2020-10-28 03:18:10 +00:00
|
|
|
log.Info("Write to db", "progress", progress, "current table", bucketName)
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketStart = bucketEnd
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) BatchSize() int {
|
|
|
|
return int(m.len)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IdealBatchSize defines the size of the data batches should ideally add in one write.
|
|
|
|
func (m *TxDb) IdealBatchSize() int {
|
2020-08-26 06:03:50 +00:00
|
|
|
panic("only mutation hast preferred batch size, because it limited by RAM")
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) Walk(bucket string, startkey []byte, fixedbits int, walker func([]byte, []byte) (bool, error)) error {
|
|
|
|
m.panicOnEmptyDB()
|
2020-10-25 08:38:55 +00:00
|
|
|
c := m.tx.Cursor(bucket) // create new cursor, then call other methods of TxDb inside MultiWalk callback will not affect this cursor
|
|
|
|
defer c.Close()
|
|
|
|
return Walk(c, startkey, fixedbits, walker)
|
2020-08-17 06:45:52 +00:00
|
|
|
}
|
|
|
|
|
2020-09-10 12:35:58 +00:00
|
|
|
func Walk(c Cursor, startkey []byte, fixedbits int, walker func(k, v []byte) (bool, error)) error {
|
2020-08-17 06:45:52 +00:00
|
|
|
fixedbytes, mask := Bytesmask(fixedbits)
|
|
|
|
k, v, err := c.Seek(startkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for k != nil && len(k) >= fixedbytes && (fixedbits == 0 || bytes.Equal(k[:fixedbytes-1], startkey[:fixedbytes-1]) && (k[fixedbytes-1]&mask) == (startkey[fixedbytes-1]&mask)) {
|
|
|
|
goOn, err := walker(k, v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !goOn {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
k, v, err = c.Next()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-09 10:09:55 +00:00
|
|
|
func ForEach(c Cursor, walker func(k, v []byte) (bool, error)) error {
|
2020-08-26 06:03:50 +00:00
|
|
|
for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ok, err := walker(k, v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-08 05:08:37 +00:00
|
|
|
// MultiWalk is similar to multiple Walk calls folded into one.
|
|
|
|
func MultiWalk(c Cursor, startkeys [][]byte, fixedbits []int, walker func(int, []byte, []byte) error) error { //nolint
|
2020-08-17 06:45:52 +00:00
|
|
|
rangeIdx := 0 // What is the current range we are extracting
|
|
|
|
fixedbytes, mask := Bytesmask(fixedbits[rangeIdx])
|
|
|
|
startkey := startkeys[rangeIdx]
|
|
|
|
k, v, err := c.Seek(startkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for k != nil {
|
|
|
|
// Adjust rangeIdx if needed
|
|
|
|
if fixedbytes > 0 {
|
|
|
|
cmp := int(-1)
|
|
|
|
for cmp != 0 {
|
|
|
|
cmp = bytes.Compare(k[:fixedbytes-1], startkey[:fixedbytes-1])
|
|
|
|
if cmp == 0 {
|
|
|
|
k1 := k[fixedbytes-1] & mask
|
|
|
|
k2 := startkey[fixedbytes-1] & mask
|
|
|
|
if k1 < k2 {
|
|
|
|
cmp = -1
|
|
|
|
} else if k1 > k2 {
|
|
|
|
cmp = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if cmp < 0 {
|
|
|
|
k, v, err = c.Seek(startkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if k == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else if cmp > 0 {
|
|
|
|
rangeIdx++
|
|
|
|
if rangeIdx == len(startkeys) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
fixedbytes, mask = Bytesmask(fixedbits[rangeIdx])
|
|
|
|
startkey = startkeys[rangeIdx]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(v) > 0 {
|
|
|
|
if err = walker(rangeIdx, k, v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
k, v, err = c.Next()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-28 17:18:36 +00:00
|
|
|
func (m *TxDb) CommitAndBegin(ctx context.Context) error {
|
2020-08-24 11:07:59 +00:00
|
|
|
_, err := m.Commit()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-02-10 17:04:22 +00:00
|
|
|
return m.begin(ctx, m.txFlags)
|
2020-09-28 17:18:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) RollbackAndBegin(ctx context.Context) error {
|
|
|
|
m.Rollback()
|
2021-02-10 17:04:22 +00:00
|
|
|
return m.begin(ctx, m.txFlags)
|
2020-08-24 11:07:59 +00:00
|
|
|
}
|
|
|
|
|
2020-08-17 06:45:52 +00:00
|
|
|
func (m *TxDb) Commit() (uint64, error) {
|
2020-09-05 03:10:17 +00:00
|
|
|
if metrics.Enabled {
|
|
|
|
defer dbCommitBigBatchTimer.UpdateSince(time.Now())
|
|
|
|
}
|
|
|
|
|
2020-08-26 06:02:10 +00:00
|
|
|
if m.tx == nil {
|
2020-08-17 06:45:52 +00:00
|
|
|
return 0, fmt.Errorf("second call .Commit() on same transaction")
|
|
|
|
}
|
2020-08-26 06:02:10 +00:00
|
|
|
if err := m.tx.Commit(context.Background()); err != nil {
|
2020-08-17 06:45:52 +00:00
|
|
|
return 0, err
|
|
|
|
}
|
2020-08-26 06:02:10 +00:00
|
|
|
m.tx = nil
|
2020-08-17 06:45:52 +00:00
|
|
|
m.cursors = nil
|
|
|
|
m.len = 0
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) Rollback() {
|
2020-08-26 06:02:10 +00:00
|
|
|
if m.tx == nil {
|
2020-08-17 06:45:52 +00:00
|
|
|
return
|
|
|
|
}
|
2020-08-26 06:02:10 +00:00
|
|
|
m.tx.Rollback()
|
2020-08-17 06:45:52 +00:00
|
|
|
m.cursors = nil
|
2020-08-26 06:02:10 +00:00
|
|
|
m.tx = nil
|
2020-08-17 06:45:52 +00:00
|
|
|
m.len = 0
|
|
|
|
}
|
|
|
|
|
2020-08-26 06:02:10 +00:00
|
|
|
func (m *TxDb) Tx() Tx {
|
|
|
|
return m.tx
|
|
|
|
}
|
|
|
|
|
2020-08-17 06:45:52 +00:00
|
|
|
func (m *TxDb) Keys() ([][]byte, error) {
|
|
|
|
panic("don't use me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) panicOnEmptyDB() {
|
|
|
|
if m.db == nil {
|
|
|
|
panic("Not implemented")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// [TURBO-GETH] Freezer support (not implemented yet)
|
|
|
|
// Ancients returns an error as we don't have a backing chain freezer.
|
|
|
|
func (m *TxDb) Ancients() (uint64, error) {
|
|
|
|
return 0, errNotSupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// TruncateAncients returns an error as we don't have a backing chain freezer.
|
|
|
|
func (m *TxDb) TruncateAncients(items uint64) error {
|
|
|
|
return errNotSupported
|
|
|
|
}
|
2020-09-08 19:39:43 +00:00
|
|
|
|
|
|
|
func (m *TxDb) BucketExists(name string) (bool, error) {
|
|
|
|
exists := false
|
|
|
|
migrator, ok := m.tx.(BucketMigrator)
|
|
|
|
if !ok {
|
|
|
|
return false, fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", m.tx)
|
|
|
|
}
|
|
|
|
exists = migrator.ExistsBucket(name)
|
|
|
|
return exists, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) ClearBuckets(buckets ...string) error {
|
|
|
|
for i := range buckets {
|
|
|
|
name := buckets[i]
|
|
|
|
|
|
|
|
migrator, ok := m.tx.(BucketMigrator)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", m.tx)
|
|
|
|
}
|
|
|
|
if err := migrator.ClearBucket(name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TxDb) DropBuckets(buckets ...string) error {
|
|
|
|
for i := range buckets {
|
|
|
|
name := buckets[i]
|
|
|
|
log.Info("Dropping bucket", "name", name)
|
|
|
|
migrator, ok := m.tx.(BucketMigrator)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("%T doesn't implement ethdb.TxMigrator interface", m.tx)
|
|
|
|
}
|
|
|
|
if err := migrator.DropBucket(name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|