2019-05-27 13:51:49 +00:00
|
|
|
package ethdb
|
|
|
|
|
|
|
|
import (
|
2020-06-04 09:35:42 +00:00
|
|
|
"context"
|
2019-12-20 12:25:40 +00:00
|
|
|
"fmt"
|
2019-11-21 18:38:00 +00:00
|
|
|
"sort"
|
2019-11-11 19:28:27 +00:00
|
|
|
"sync"
|
2019-12-20 12:25:40 +00:00
|
|
|
"sync/atomic"
|
2020-06-15 12:30:54 +00:00
|
|
|
"time"
|
2019-11-11 19:28:27 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/common"
|
2020-06-15 12:30:54 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/metrics"
|
2019-05-27 13:51:49 +00:00
|
|
|
)
|
|
|
|
|
2020-07-21 08:33:03 +00:00
|
|
|
var (
|
|
|
|
dbCommitBigBatchTimer = metrics.NewRegisteredTimer("db/commit/big_batch", nil)
|
|
|
|
dbCommitSmallBatchTimer = metrics.NewRegisteredTimer("db/commit/small_batch", nil)
|
|
|
|
)
|
2020-06-15 12:30:54 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
type mutation struct {
|
2020-06-12 17:59:06 +00:00
|
|
|
puts *puts // Map buckets to map[key]value
|
|
|
|
mu sync.RWMutex
|
|
|
|
db Database
|
|
|
|
tuples MultiPutTuples
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-06-05 09:25:33 +00:00
|
|
|
func (m *mutation) KV() KV {
|
|
|
|
if casted, ok := m.db.(HasKV); ok {
|
2020-03-20 11:30:14 +00:00
|
|
|
return casted.KV()
|
|
|
|
}
|
2020-05-15 08:58:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) getMem(bucket string, key []byte) ([]byte, bool) {
|
2019-05-27 13:51:49 +00:00
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
2020-04-20 10:35:33 +00:00
|
|
|
return m.puts.get(bucket, key)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Can only be called from the worker thread
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) Get(bucket string, key []byte) ([]byte, error) {
|
2019-05-27 13:51:49 +00:00
|
|
|
if value, ok := m.getMem(bucket, key); ok {
|
|
|
|
if value == nil {
|
|
|
|
return nil, ErrKeyNotFound
|
|
|
|
}
|
|
|
|
return value, nil
|
|
|
|
}
|
|
|
|
if m.db != nil {
|
|
|
|
return m.db.Get(bucket, key)
|
|
|
|
}
|
|
|
|
return nil, ErrKeyNotFound
|
|
|
|
}
|
|
|
|
|
2020-08-12 03:49:52 +00:00
|
|
|
func (m *mutation) Last(bucket string) ([]byte, []byte, error) {
|
|
|
|
return m.db.Last(bucket)
|
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) GetIndexChunk(bucket string, key []byte, timestamp uint64) ([]byte, error) {
|
2020-04-20 10:35:33 +00:00
|
|
|
if m.db != nil {
|
|
|
|
return m.db.GetIndexChunk(bucket, key, timestamp)
|
|
|
|
}
|
2020-04-21 08:15:40 +00:00
|
|
|
return nil, ErrKeyNotFound
|
2020-04-20 10:35:33 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) hasMem(bucket string, key []byte) bool {
|
2020-04-20 10:35:33 +00:00
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
_, ok := m.puts.get(bucket, key)
|
|
|
|
return ok
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) Has(bucket string, key []byte) (bool, error) {
|
2019-05-27 13:51:49 +00:00
|
|
|
if m.hasMem(bucket, key) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
if m.db != nil {
|
|
|
|
return m.db.Has(bucket, key)
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 09:35:42 +00:00
|
|
|
func (m *mutation) DiskSize(ctx context.Context) (common.StorageSize, error) {
|
2019-05-27 13:51:49 +00:00
|
|
|
if m.db == nil {
|
2020-06-04 09:35:42 +00:00
|
|
|
return 0, nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-07-29 04:31:46 +00:00
|
|
|
sz, err := m.db.(HasStats).DiskSize(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2020-06-04 09:35:42 +00:00
|
|
|
}
|
2020-07-29 04:31:46 +00:00
|
|
|
return common.StorageSize(sz), nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) Put(bucket string, key []byte, value []byte) error {
|
2019-05-27 13:51:49 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2019-11-21 18:38:00 +00:00
|
|
|
|
2020-04-09 17:23:29 +00:00
|
|
|
m.puts.set(bucket, key, value)
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mutation) MultiPut(tuples ...[]byte) (uint64, error) {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
l := len(tuples)
|
|
|
|
for i := 0; i < l; i += 3 {
|
2020-08-10 23:55:32 +00:00
|
|
|
m.puts.set(string(tuples[i]), tuples[i+1], tuples[i+2])
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mutation) BatchSize() int {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
2019-11-21 18:38:00 +00:00
|
|
|
return m.puts.Size()
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 15:12:38 +00:00
|
|
|
// IdealBatchSize defines the size of the data batches should ideally add in one write.
|
|
|
|
func (m *mutation) IdealBatchSize() int {
|
|
|
|
return m.db.IdealBatchSize()
|
|
|
|
}
|
|
|
|
|
2019-10-30 17:33:01 +00:00
|
|
|
// WARNING: Merged mem/DB walk is not implemented
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) Walk(bucket string, startkey []byte, fixedbits int, walker func([]byte, []byte) (bool, error)) error {
|
2019-12-20 12:25:40 +00:00
|
|
|
m.panicOnEmptyDB()
|
2019-05-27 13:51:49 +00:00
|
|
|
return m.db.Walk(bucket, startkey, fixedbits, walker)
|
|
|
|
}
|
|
|
|
|
2019-10-30 17:33:01 +00:00
|
|
|
// WARNING: Merged mem/DB walk is not implemented
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) MultiWalk(bucket string, startkeys [][]byte, fixedbits []int, walker func(int, []byte, []byte) error) error {
|
2019-12-20 12:25:40 +00:00
|
|
|
m.panicOnEmptyDB()
|
2019-05-27 13:51:49 +00:00
|
|
|
return m.db.MultiWalk(bucket, startkeys, fixedbits, walker)
|
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (m *mutation) Delete(bucket string, key []byte) error {
|
2019-05-27 13:51:49 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2019-11-21 18:38:00 +00:00
|
|
|
m.puts.Delete(bucket, key)
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mutation) Commit() (uint64, error) {
|
2020-06-15 12:30:54 +00:00
|
|
|
if metrics.Enabled {
|
2020-07-21 08:33:03 +00:00
|
|
|
if m.puts.Size() >= m.db.IdealBatchSize() {
|
|
|
|
defer dbCommitBigBatchTimer.UpdateSince(time.Now())
|
|
|
|
} else if m.puts.Len() < m.db.IdealBatchSize()/4 {
|
|
|
|
defer dbCommitSmallBatchTimer.UpdateSince(time.Now())
|
2020-06-15 12:30:54 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
if m.db == nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2020-06-12 17:59:06 +00:00
|
|
|
if m.tuples == nil {
|
|
|
|
m.tuples = make(MultiPutTuples, 0, m.puts.Len()*3)
|
|
|
|
}
|
|
|
|
m.tuples = m.tuples[:0]
|
2020-04-20 10:35:33 +00:00
|
|
|
for bucketStr, bt := range m.puts.mp {
|
2019-05-27 13:51:49 +00:00
|
|
|
bucketB := []byte(bucketStr)
|
2019-11-21 18:38:00 +00:00
|
|
|
for key := range bt {
|
|
|
|
value, _ := bt.GetStr(key)
|
2020-06-12 17:59:06 +00:00
|
|
|
m.tuples = append(m.tuples, bucketB, []byte(key), value)
|
2019-11-21 18:38:00 +00:00
|
|
|
}
|
2020-08-17 06:45:52 +00:00
|
|
|
delete(m.puts.mp, bucketStr)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-06-12 17:59:06 +00:00
|
|
|
sort.Sort(m.tuples)
|
2019-11-21 18:38:00 +00:00
|
|
|
|
2020-06-12 17:59:06 +00:00
|
|
|
written, err := m.db.MultiPut(m.tuples...)
|
2019-11-21 18:38:00 +00:00
|
|
|
if err != nil {
|
2020-01-31 04:11:20 +00:00
|
|
|
return 0, fmt.Errorf("db.MultiPut failed: %w", err)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-05-17 04:46:30 +00:00
|
|
|
|
2020-04-20 10:35:33 +00:00
|
|
|
m.puts = newPuts()
|
2020-08-17 06:45:52 +00:00
|
|
|
m.tuples = nil
|
2019-05-27 13:51:49 +00:00
|
|
|
return written, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mutation) Rollback() {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2020-04-20 10:35:33 +00:00
|
|
|
m.puts = newPuts()
|
2020-08-17 06:45:52 +00:00
|
|
|
m.tuples = nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mutation) Keys() ([][]byte, error) {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
2020-05-23 10:27:05 +00:00
|
|
|
tuples := common.NewTuples(m.puts.Len(), 2, 1)
|
2020-04-20 10:35:33 +00:00
|
|
|
for bucketStr, bt := range m.puts.mp {
|
2019-05-27 13:51:49 +00:00
|
|
|
bucketB := []byte(bucketStr)
|
2019-11-21 18:38:00 +00:00
|
|
|
for key := range bt {
|
|
|
|
if err := tuples.Append(bucketB, []byte(key)); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-11-21 18:38:00 +00:00
|
|
|
sort.Sort(tuples)
|
|
|
|
return tuples.Values, nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mutation) Close() {
|
|
|
|
m.Rollback()
|
|
|
|
}
|
|
|
|
|
2019-10-30 17:33:01 +00:00
|
|
|
func (m *mutation) NewBatch() DbWithPendingMutations {
|
2019-05-27 13:51:49 +00:00
|
|
|
mm := &mutation{
|
2020-04-15 09:33:22 +00:00
|
|
|
db: m,
|
|
|
|
puts: newPuts(),
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
return mm
|
|
|
|
}
|
|
|
|
|
2020-08-17 06:45:52 +00:00
|
|
|
func (m *mutation) Begin() (DbWithPendingMutations, error) {
|
|
|
|
return m.db.Begin()
|
|
|
|
}
|
|
|
|
|
2019-12-20 12:25:40 +00:00
|
|
|
func (m *mutation) panicOnEmptyDB() {
|
|
|
|
if m.db == nil {
|
|
|
|
panic("Not implemented")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (m *mutation) MemCopy() Database {
|
2019-12-20 12:25:40 +00:00
|
|
|
m.panicOnEmptyDB()
|
|
|
|
return m.db
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// [TURBO-GETH] Freezer support (not implemented yet)
|
|
|
|
// Ancients returns an error as we don't have a backing chain freezer.
|
|
|
|
func (m *mutation) Ancients() (uint64, error) {
|
|
|
|
return 0, errNotSupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// TruncateAncients returns an error as we don't have a backing chain freezer.
|
|
|
|
func (m *mutation) TruncateAncients(items uint64) error {
|
|
|
|
return errNotSupported
|
|
|
|
}
|
2019-12-20 12:25:40 +00:00
|
|
|
|
|
|
|
func NewRWDecorator(db Database) *RWCounterDecorator {
|
|
|
|
return &RWCounterDecorator{
|
|
|
|
db,
|
|
|
|
DBCounterStats{},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type RWCounterDecorator struct {
|
|
|
|
Database
|
|
|
|
DBCounterStats
|
|
|
|
}
|
|
|
|
|
|
|
|
type DBCounterStats struct {
|
2020-04-15 09:33:22 +00:00
|
|
|
Put uint64
|
|
|
|
Get uint64
|
|
|
|
GetS uint64
|
|
|
|
GetAsOf uint64
|
|
|
|
Has uint64
|
|
|
|
Walk uint64
|
|
|
|
WalkAsOf uint64
|
|
|
|
MultiWalk uint64
|
|
|
|
MultiWalkAsOf uint64
|
|
|
|
Delete uint64
|
|
|
|
MultiPut uint64
|
2019-12-20 12:25:40 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (d *RWCounterDecorator) Put(bucket string, key, value []byte) error {
|
2019-12-20 12:25:40 +00:00
|
|
|
atomic.AddUint64(&d.DBCounterStats.Put, 1)
|
|
|
|
return d.Database.Put(bucket, key, value)
|
|
|
|
}
|
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (d *RWCounterDecorator) Get(bucket string, key []byte) ([]byte, error) {
|
2019-12-20 12:25:40 +00:00
|
|
|
atomic.AddUint64(&d.DBCounterStats.Get, 1)
|
|
|
|
return d.Database.Get(bucket, key)
|
|
|
|
}
|
2020-01-07 10:41:33 +00:00
|
|
|
|
2020-08-10 23:55:32 +00:00
|
|
|
func (d *RWCounterDecorator) Has(bucket string, key []byte) (bool, error) {
|
2019-12-20 12:25:40 +00:00
|
|
|
atomic.AddUint64(&d.DBCounterStats.Has, 1)
|
|
|
|
return d.Database.Has(bucket, key)
|
|
|
|
}
|
2020-08-10 23:55:32 +00:00
|
|
|
func (d *RWCounterDecorator) Walk(bucket string, startkey []byte, fixedbits int, walker func([]byte, []byte) (bool, error)) error {
|
2019-12-20 12:25:40 +00:00
|
|
|
atomic.AddUint64(&d.DBCounterStats.Walk, 1)
|
|
|
|
return d.Database.Walk(bucket, startkey, fixedbits, walker)
|
|
|
|
}
|
2020-08-10 23:55:32 +00:00
|
|
|
func (d *RWCounterDecorator) MultiWalk(bucket string, startkeys [][]byte, fixedbits []int, walker func(int, []byte, []byte) error) error {
|
2019-12-20 12:25:40 +00:00
|
|
|
atomic.AddUint64(&d.DBCounterStats.MultiWalk, 1)
|
|
|
|
return d.Database.MultiWalk(bucket, startkeys, fixedbits, walker)
|
|
|
|
}
|
2020-08-10 23:55:32 +00:00
|
|
|
func (d *RWCounterDecorator) Delete(bucket string, key []byte) error {
|
2019-12-20 12:25:40 +00:00
|
|
|
atomic.AddUint64(&d.DBCounterStats.Delete, 1)
|
|
|
|
return d.Database.Delete(bucket, key)
|
|
|
|
}
|
|
|
|
func (d *RWCounterDecorator) MultiPut(tuples ...[]byte) (uint64, error) {
|
|
|
|
atomic.AddUint64(&d.DBCounterStats.MultiPut, 1)
|
|
|
|
return d.Database.MultiPut(tuples...)
|
|
|
|
}
|
|
|
|
func (d *RWCounterDecorator) NewBatch() DbWithPendingMutations {
|
|
|
|
mm := &mutation{
|
2020-04-15 09:33:22 +00:00
|
|
|
db: d,
|
|
|
|
puts: newPuts(),
|
2019-12-20 12:25:40 +00:00
|
|
|
}
|
|
|
|
return mm
|
|
|
|
}
|