mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 21:07:18 +00:00
7cc32c4dda
* remove unused code * remove defer use in loop * Remove unused methods and constants * gofmt and gaz * nilness check * remove unused args * Add TODO for refactoring subscribeWithBase to remove unused arg. It seems too involved to include in this sweeping PR. https://github.com/prysmaticlabs/prysm/issues/7437 * replace empty slice declaration * Remove unnecessary type conversions * remove redundant type declaration * rename receivers to be consistent * Remove bootnode query tool. It is now obsolete by discv5 * Remove relay node. It is no longer used or supported * Revert "Remove relay node. It is no longer used or supported" This reverts commit 4bd7717334dad85ef4766ed9bc4da711fb5fa810. * Delete unused test directory * Delete unsupported gcp startup script * Delete old k8s script * build fixes * fix build * go mod tidy * revert slasher/db/kv/block_header.go * fix build * remove redundant nil check * combine func args Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
149 lines
3.5 KiB
Go
149 lines
3.5 KiB
Go
package cache
|
|
|
|
import (
|
|
"context"
|
|
"math"
|
|
"sync"
|
|
"time"
|
|
|
|
lru "github.com/hashicorp/golang-lru"
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
"go.opencensus.io/trace"
|
|
)
|
|
|
|
var (
|
|
// Metrics
|
|
skipSlotCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
|
Name: "skip_slot_cache_hit",
|
|
Help: "The total number of cache hits on the skip slot cache.",
|
|
})
|
|
skipSlotCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
|
Name: "skip_slot_cache_miss",
|
|
Help: "The total number of cache misses on the skip slot cache.",
|
|
})
|
|
)
|
|
|
|
// SkipSlotCache is used to store the cached results of processing skip slots in state.ProcessSlots.
|
|
type SkipSlotCache struct {
|
|
cache *lru.Cache
|
|
lock sync.RWMutex
|
|
disabled bool // Allow for programmatic toggling of the cache, useful during initial sync.
|
|
inProgress map[uint64]bool
|
|
}
|
|
|
|
// NewSkipSlotCache initializes the map and underlying cache.
|
|
func NewSkipSlotCache() *SkipSlotCache {
|
|
cache, err := lru.New(8)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return &SkipSlotCache{
|
|
cache: cache,
|
|
inProgress: make(map[uint64]bool),
|
|
}
|
|
}
|
|
|
|
// Enable the skip slot cache.
|
|
func (c *SkipSlotCache) Enable() {
|
|
c.disabled = false
|
|
}
|
|
|
|
// Disable the skip slot cache.
|
|
func (c *SkipSlotCache) Disable() {
|
|
c.disabled = true
|
|
}
|
|
|
|
// Get waits for any in progress calculation to complete before returning a
|
|
// cached response, if any.
|
|
func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.BeaconState, error) {
|
|
ctx, span := trace.StartSpan(ctx, "skipSlotCache.Get")
|
|
defer span.End()
|
|
if c.disabled {
|
|
// Return a miss result if cache is not enabled.
|
|
skipSlotCacheMiss.Inc()
|
|
return nil, nil
|
|
}
|
|
|
|
delay := minDelay
|
|
|
|
// Another identical request may be in progress already. Let's wait until
|
|
// any in progress request resolves or our timeout is exceeded.
|
|
inProgress := false
|
|
for {
|
|
if ctx.Err() != nil {
|
|
return nil, ctx.Err()
|
|
}
|
|
|
|
c.lock.RLock()
|
|
if !c.inProgress[slot] {
|
|
c.lock.RUnlock()
|
|
break
|
|
}
|
|
inProgress = true
|
|
c.lock.RUnlock()
|
|
|
|
// This increasing backoff is to decrease the CPU cycles while waiting
|
|
// for the in progress boolean to flip to false.
|
|
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
|
delay *= delayFactor
|
|
delay = math.Min(delay, maxDelay)
|
|
}
|
|
span.AddAttributes(trace.BoolAttribute("inProgress", inProgress))
|
|
|
|
item, exists := c.cache.Get(slot)
|
|
|
|
if exists && item != nil {
|
|
skipSlotCacheHit.Inc()
|
|
span.AddAttributes(trace.BoolAttribute("hit", true))
|
|
return item.(*stateTrie.BeaconState).Copy(), nil
|
|
}
|
|
skipSlotCacheMiss.Inc()
|
|
span.AddAttributes(trace.BoolAttribute("hit", false))
|
|
return nil, nil
|
|
}
|
|
|
|
// MarkInProgress a request so that any other similar requests will block on
|
|
// Get until MarkNotInProgress is called.
|
|
func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
|
|
if c.disabled {
|
|
return nil
|
|
}
|
|
|
|
c.lock.Lock()
|
|
defer c.lock.Unlock()
|
|
|
|
if c.inProgress[slot] {
|
|
return ErrAlreadyInProgress
|
|
}
|
|
c.inProgress[slot] = true
|
|
return nil
|
|
}
|
|
|
|
// MarkNotInProgress will release the lock on a given request. This should be
|
|
// called after put.
|
|
func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
|
|
if c.disabled {
|
|
return nil
|
|
}
|
|
|
|
c.lock.Lock()
|
|
defer c.lock.Unlock()
|
|
|
|
delete(c.inProgress, slot)
|
|
return nil
|
|
}
|
|
|
|
// Put the response in the cache.
|
|
func (c *SkipSlotCache) Put(_ context.Context, slot uint64, state *stateTrie.BeaconState) error {
|
|
if c.disabled {
|
|
return nil
|
|
}
|
|
|
|
// Copy state so cached value is not mutated.
|
|
c.cache.Add(slot, state.Copy())
|
|
|
|
return nil
|
|
}
|