Beacon: Added basic operations pool (#8309)

Added operation pools for beacon chain. operations are the equivalent of
txs for eth2

Added operation pools for:

* Attester Slashings
* Proposer Slashings
* VoluntaryExits
* BLSExecutionToChange
* Postponed to later: Attestations (or maybe not)
This commit is contained in:
Giulio rebuffo 2023-09-29 23:42:07 +02:00 committed by GitHub
parent 5654ba07c9
commit 72ba18bd36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 411 additions and 113 deletions

View File

@ -9,6 +9,7 @@ import (
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/persistence"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
"github.com/ledgerwatch/erigon/cl/pool"
)
type ApiHandler struct {
@ -20,10 +21,11 @@ type ApiHandler struct {
genesisCfg *clparams.GenesisConfig
beaconChainCfg *clparams.BeaconChainConfig
forkchoiceStore forkchoice.ForkChoiceStorage
operationsPool pool.OperationsPool
}
func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig, source persistence.RawBeaconBlockChain, indiciesDB *sql.DB, forkchoiceStore forkchoice.ForkChoiceStorage) *ApiHandler {
return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig, indiciesDB: indiciesDB, blockSource: source, forkchoiceStore: forkchoiceStore}
func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig, source persistence.RawBeaconBlockChain, indiciesDB *sql.DB, forkchoiceStore forkchoice.ForkChoiceStorage, operationsPool pool.OperationsPool) *ApiHandler {
return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig, indiciesDB: indiciesDB, blockSource: source, forkchoiceStore: forkchoiceStore, operationsPool: operationsPool}
}
func (a *ApiHandler) init() {
@ -54,6 +56,9 @@ func (a *ApiHandler) init() {
r.Post("/binded_blocks", nil)
r.Route("/pool", func(r chi.Router) {
r.Post("/attestations", nil)
r.Get("/voluntary_exits", beaconHandlerWrapper(a.poolVoluntaryExits, false))
r.Get("/attester_slashings", beaconHandlerWrapper(a.poolAttesterSlashings, false))
r.Get("/proposer_slashings", beaconHandlerWrapper(a.poolProposerSlashings, false))
r.Post("/sync_committees", nil)
})
r.Get("/node/syncing", nil)

25
cl/beacon/handler/pool.go Normal file
View File

@ -0,0 +1,25 @@
package handler
import (
"net/http"
"github.com/ledgerwatch/erigon/cl/clparams"
)
func (a *ApiHandler) poolVoluntaryExits(r *http.Request) (data any, finalized *bool, version *clparams.StateVersion, httpStatus int, err error) {
httpStatus = http.StatusAccepted
data = a.operationsPool.VoluntaryExistsPool.Raw()
return
}
func (a *ApiHandler) poolAttesterSlashings(r *http.Request) (data any, finalized *bool, version *clparams.StateVersion, httpStatus int, err error) {
httpStatus = http.StatusAccepted
data = a.operationsPool.AttesterSlashingsPool.Raw()
return
}
func (a *ApiHandler) poolProposerSlashings(r *http.Request) (data any, finalized *bool, version *clparams.StateVersion, httpStatus int, err error) {
httpStatus = http.StatusAccepted
data = a.operationsPool.ProposerSlashingsPool.Raw()
return
}

View File

@ -101,23 +101,23 @@ func (*VoluntaryExit) EncodingSizeSSZ() int {
}
type SignedVoluntaryExit struct {
VolunaryExit *VoluntaryExit `json:"message"`
Signature libcommon.Bytes96 `json:"signature"`
VoluntaryExit *VoluntaryExit `json:"message"`
Signature libcommon.Bytes96 `json:"signature"`
}
func (e *SignedVoluntaryExit) EncodeSSZ(dst []byte) ([]byte, error) {
return ssz2.MarshalSSZ(dst, e.VolunaryExit, e.Signature[:])
return ssz2.MarshalSSZ(dst, e.VoluntaryExit, e.Signature[:])
}
func (e *SignedVoluntaryExit) DecodeSSZ(buf []byte, version int) error {
e.VolunaryExit = new(VoluntaryExit)
return ssz2.UnmarshalSSZ(buf, version, e.VolunaryExit, e.Signature[:])
e.VoluntaryExit = new(VoluntaryExit)
return ssz2.UnmarshalSSZ(buf, version, e.VoluntaryExit, e.Signature[:])
}
func (e *SignedVoluntaryExit) HashSSZ() ([32]byte, error) {
return merkle_tree.HashTreeRoot(e.VolunaryExit, e.Signature[:])
return merkle_tree.HashTreeRoot(e.VoluntaryExit, e.Signature[:])
}
func (e *SignedVoluntaryExit) EncodingSizeSSZ() int {
return 96 + e.VolunaryExit.EncodingSizeSSZ()
return 96 + e.VoluntaryExit.EncodingSizeSSZ()
}

View File

@ -21,8 +21,8 @@ func TestSignedVoluntaryExit(t *testing.T) {
signature := [96]byte{1, 2, 3}
signedExit := &cltypes.SignedVoluntaryExit{
VolunaryExit: voluntaryExit,
Signature: signature,
VoluntaryExit: voluntaryExit,
Signature: signature,
}
// Encode SignedVoluntaryExit to SSZ
@ -35,8 +35,8 @@ func TestSignedVoluntaryExit(t *testing.T) {
assert.NoError(t, err, "Failed to decode SSZ to SignedVoluntaryExit")
// Compare the original and decoded SignedVoluntaryExit
assert.Equal(t, signedExit.VolunaryExit.Epoch, decodedExit.VolunaryExit.Epoch, "Decoded SignedVoluntaryExit has incorrect epoch")
assert.Equal(t, signedExit.VolunaryExit.ValidatorIndex, decodedExit.VolunaryExit.ValidatorIndex, "Decoded SignedVoluntaryExit has incorrect validator index")
assert.Equal(t, signedExit.VoluntaryExit.Epoch, decodedExit.VoluntaryExit.Epoch, "Decoded SignedVoluntaryExit has incorrect epoch")
assert.Equal(t, signedExit.VoluntaryExit.ValidatorIndex, decodedExit.VoluntaryExit.ValidatorIndex, "Decoded SignedVoluntaryExit has incorrect validator index")
assert.Equal(t, signedExit.Signature, decodedExit.Signature, "Decoded SignedVoluntaryExit has incorrect signature")
}

View File

@ -8,6 +8,7 @@ import (
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
"github.com/ledgerwatch/erigon/cl/pool"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@ -47,7 +48,8 @@ func TestForkChoiceBasic(t *testing.T) {
// Initialize forkchoice store
anchorState := state.New(&clparams.MainnetBeaconConfig)
require.NoError(t, utils.DecodeSSZSnappy(anchorState, anchorStateEncoded, int(clparams.AltairVersion)))
store, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, false)
pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig)
store, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, pool, false)
require.NoError(t, err)
// first steps
store.OnTick(0)
@ -86,4 +88,13 @@ func TestForkChoiceBasic(t *testing.T) {
require.Equal(t, headRoot, libcommon.HexToHash("0x744cc484f6503462f0f3a5981d956bf4fcb3e57ab8687ed006467e05049ee033"))
// lastly do attestation
require.NoError(t, store.OnAttestation(testAttestation, false))
// Try processing a voluntary exit
err = store.OnVoluntaryExit(&cltypes.SignedVoluntaryExit{
VoluntaryExit: &cltypes.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 0,
},
}, true)
require.NoError(t, err)
require.Equal(t, len(pool.VoluntaryExistsPool.Raw()), 1)
}

View File

@ -9,6 +9,7 @@ import (
state2 "github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/phase1/execution_client"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph"
"github.com/ledgerwatch/erigon/cl/pool"
lru "github.com/hashicorp/golang-lru/v2"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@ -30,6 +31,8 @@ type ForkChoiceStore struct {
unrealizedJustifiedCheckpoint solid.Checkpoint
unrealizedFinalizedCheckpoint solid.Checkpoint
proposerBoostRoot libcommon.Hash
headHash libcommon.Hash
headSlot uint64
// Use go map because this is actually an unordered set
equivocatingIndicies map[uint64]struct{}
forkGraph *fork_graph.ForkGraph
@ -43,6 +46,8 @@ type ForkChoiceStore struct {
engine execution_client.ExecutionEngine
// freezer
recorder freezer.Freezer
// operations pool
operationsPool pool.OperationsPool
}
type LatestMessage struct {
@ -51,7 +56,7 @@ type LatestMessage struct {
}
// NewForkChoiceStore initialize a new store from the given anchor state, either genesis or checkpoint sync state.
func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconState, engine execution_client.ExecutionEngine, recorder freezer.Freezer, enabledPruning bool) (*ForkChoiceStore, error) {
func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconState, engine execution_client.ExecutionEngine, recorder freezer.Freezer, operationsPool pool.OperationsPool, enabledPruning bool) (*ForkChoiceStore, error) {
anchorRoot, err := anchorState.BlockRoot()
if err != nil {
return nil, err
@ -84,6 +89,7 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt
eth2Roots: eth2Roots,
engine: engine,
recorder: recorder,
operationsPool: operationsPool,
}, nil
}

View File

@ -17,9 +17,12 @@ func (f *ForkChoiceStore) GetHead() (libcommon.Hash, uint64, error) {
}
func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
if f.headHash != (libcommon.Hash{}) {
return f.headHash, f.headSlot, nil
}
// Retrieve att
head := f.justifiedCheckpoint.BlockRoot()
blocks := f.getFilteredBlockTree(head)
f.headHash = f.justifiedCheckpoint.BlockRoot()
blocks := f.getFilteredBlockTree(f.headHash)
// See which validators can be used for attestation score
justificationState, err := f.getCheckpointState(f.justifiedCheckpoint)
if err != nil {
@ -29,7 +32,7 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
filteredIndicies := f.filterValidatorSetForAttestationScores(justificationState.validators, justificationState.epoch)
for {
// Filter out current head children.
unfilteredChildren := f.forkGraph.GetChildren(head)
unfilteredChildren := f.forkGraph.GetChildren(f.headHash)
children := []libcommon.Hash{}
for _, child := range unfilteredChildren {
if _, ok := blocks[child]; ok {
@ -38,15 +41,16 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
}
// Stop if we dont have any more children
if len(children) == 0 {
header, hasHeader := f.forkGraph.GetHeader(head)
header, hasHeader := f.forkGraph.GetHeader(f.headHash)
if !hasHeader {
return libcommon.Hash{}, 0, fmt.Errorf("no slot for head is stored")
}
return head, header.Slot, nil
f.headSlot = header.Slot
return f.headHash, f.headSlot, nil
}
// Average case scenario.
if len(children) == 1 {
head = children[0]
f.headHash = children[0]
continue
}
// Sort children by lexigographical order
@ -57,13 +61,13 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
})
// After sorting is done determine best fit.
head = children[0]
f.headHash = children[0]
maxWeight := f.getWeight(children[0], filteredIndicies, justificationState)
for i := 1; i < len(children); i++ {
weight := f.getWeight(children[i], filteredIndicies, justificationState)
// Lexicographical order is king.
if weight >= maxWeight {
head = children[i]
f.headHash = children[i]
maxWeight = weight
}
}

View File

@ -29,7 +29,7 @@ type ForkChoiceStorageReader interface {
type ForkChoiceStorageWriter interface {
OnAttestation(attestation *solid.Attestation, fromBlock bool) error
OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing) error
OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error
OnBlock(block *cltypes.SignedBeaconBlock, newPayload bool, fullValidation bool) error
OnTick(time uint64)
}

View File

@ -16,6 +16,7 @@ import (
func (f *ForkChoiceStore) OnAttestation(attestation *solid.Attestation, fromBlock bool) error {
f.mu.Lock()
defer f.mu.Unlock()
f.headHash = libcommon.Hash{}
data := attestation.AttestantionData()
if err := f.validateOnAttestation(attestation, fromBlock); err != nil {
return err

View File

@ -3,48 +3,122 @@ package forkchoice
import (
"fmt"
"github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/pool"
"github.com/ledgerwatch/erigon/cl/cltypes"
)
func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing) error {
func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error {
if f.operationsPool.AttesterSlashingsPool.Has(pool.ComputeKeyForAttesterSlashing(attesterSlashing)) {
return nil
}
f.mu.Lock()
defer f.mu.Unlock()
// Check if these attestation is even slashable.
// Check if this attestation is even slashable.
attestation1 := attesterSlashing.Attestation_1
attestation2 := attesterSlashing.Attestation_2
if !cltypes.IsSlashableAttestationData(attestation1.Data, attestation2.Data) {
f.mu.Unlock()
return fmt.Errorf("attestation data is not slashable")
}
// Retrieve justified state
s, _, err := f.forkGraph.GetState(f.justifiedCheckpoint.BlockRoot(), false)
if err != nil {
f.mu.Unlock()
return err
}
if s == nil {
f.mu.Unlock()
return fmt.Errorf("justified checkpoint state not accessible")
}
// Verify validity of slashings
valid, err := state.IsValidIndexedAttestation(s, attestation1)
attestation1PublicKeys, err := getIndexedAttestationPublicKeys(s, attestation1)
if err != nil {
return fmt.Errorf("error calculating indexed attestation 1 validity: %v", err)
f.mu.Unlock()
return err
}
if !valid {
return fmt.Errorf("invalid indexed attestation 1")
attestation2PublicKeys, err := getIndexedAttestationPublicKeys(s, attestation2)
if err != nil {
f.mu.Unlock()
return err
}
domain1, err := s.GetDomain(s.BeaconConfig().DomainBeaconAttester, attestation1.Data.Target().Epoch())
if err != nil {
return fmt.Errorf("unable to get the domain: %v", err)
}
domain2, err := s.GetDomain(s.BeaconConfig().DomainBeaconAttester, attestation2.Data.Target().Epoch())
if err != nil {
return fmt.Errorf("unable to get the domain: %v", err)
}
f.mu.Unlock()
valid, err = state.IsValidIndexedAttestation(s, attestation2)
if err != nil {
return fmt.Errorf("error calculating indexed attestation 2 validity: %v", err)
}
if !valid {
return fmt.Errorf("invalid indexed attestation 2")
if !test {
// Verify validity of slashings (1)
signingRoot, err := fork.ComputeSigningRoot(attestation1.Data, domain1)
if err != nil {
return fmt.Errorf("unable to get signing root: %v", err)
}
valid, err := bls.VerifyAggregate(attestation1.Signature[:], signingRoot[:], attestation1PublicKeys)
if err != nil {
return fmt.Errorf("error while validating signature: %v", err)
}
if !valid {
return fmt.Errorf("invalid aggregate signature")
}
// Verify validity of slashings (2)
signingRoot, err = fork.ComputeSigningRoot(attestation2.Data, domain2)
if err != nil {
return fmt.Errorf("unable to get signing root: %v", err)
}
valid, err = bls.VerifyAggregate(attestation2.Signature[:], signingRoot[:], attestation2PublicKeys)
if err != nil {
return fmt.Errorf("error while validating signature: %v", err)
}
if !valid {
return fmt.Errorf("invalid aggregate signature")
}
}
f.mu.Lock()
defer f.mu.Unlock()
var anySlashed bool
for _, index := range solid.IntersectionOfSortedSets(attestation1.AttestingIndices, attestation2.AttestingIndices) {
f.equivocatingIndicies[index] = struct{}{}
if !anySlashed {
v, err := s.ValidatorForValidatorIndex(int(index))
if err != nil {
return fmt.Errorf("unable to retrieve state: %v", err)
}
if v.IsSlashable(state.Epoch(s)) {
anySlashed = true
}
}
}
if anySlashed {
f.operationsPool.AttesterSlashingsPool.Insert(pool.ComputeKeyForAttesterSlashing(attesterSlashing), attesterSlashing)
}
// add attestation indicies to equivocating indicies.
return nil
}
func getIndexedAttestationPublicKeys(b *state.CachingBeaconState, att *cltypes.IndexedAttestation) ([][]byte, error) {
inds := att.AttestingIndices
if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) {
return nil, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null")
}
pks := make([][]byte, 0, inds.Length())
if err := solid.RangeErr[uint64](inds, func(_ int, v uint64, _ int) error {
val, err := b.ValidatorForValidatorIndex(int(v))
if err != nil {
return err
}
pk := val.PublicKey()
pks = append(pks, pk[:])
return nil
}); err != nil {
return nil, err
}
return pks, nil
}

View File

@ -4,6 +4,7 @@ import (
"fmt"
"time"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/cl/cltypes"
@ -15,6 +16,7 @@ import (
func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, fullValidation bool) error {
f.mu.Lock()
defer f.mu.Unlock()
f.headHash = libcommon.Hash{}
start := time.Now()
blockRoot, err := block.Block.HashSSZ()
if err != nil {
@ -86,6 +88,7 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload,
if err := statechange.ProcessJustificationBitsAndFinality(lastProcessedState, nil); err != nil {
return err
}
f.operationsPool.NotifyBlock(block.Block)
f.updateUnrealizedCheckpoints(lastProcessedState.CurrentJustifiedCheckpoint().Copy(), lastProcessedState.FinalizedCheckpoint().Copy())
// Set the changed value pre-simulation
lastProcessedState.SetPreviousJustifiedCheckpoint(previousJustifiedCheckpoint)

View File

@ -0,0 +1,152 @@
package forkchoice
import (
"errors"
"fmt"
"github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/pool"
)
// NOTE: This file implements non-official handlers for other types of iterations. what it does is,using the forkchoices
// and verify external operations and eventually push them in the operations pool.
// OnVoluntaryExit is a non-official handler for voluntary exit operations. it pushes the voluntary exit in the pool.
func (f *ForkChoiceStore) OnVoluntaryExit(signedVoluntaryExit *cltypes.SignedVoluntaryExit, test bool) error {
voluntaryExit := signedVoluntaryExit.VoluntaryExit
if f.operationsPool.VoluntaryExistsPool.Has(voluntaryExit.ValidatorIndex) {
return nil
}
f.mu.Lock()
headHash, _, err := f.getHead()
if err != nil {
f.mu.Unlock()
return err
}
s, _, err := f.forkGraph.GetState(headHash, false)
if err != nil {
f.mu.Unlock()
return err
}
val, err := s.ValidatorForValidatorIndex(int(voluntaryExit.ValidatorIndex))
if err != nil {
f.mu.Unlock()
return err
}
if val.ExitEpoch() != f.forkGraph.Config().FarFutureEpoch {
f.mu.Unlock()
return nil
}
pk := val.PublicKey()
f.mu.Unlock()
domain, err := s.GetDomain(s.BeaconConfig().DomainVoluntaryExit, voluntaryExit.Epoch)
if err != nil {
return err
}
signingRoot, err := fork.ComputeSigningRoot(voluntaryExit, domain)
if err != nil {
return err
}
if !test {
valid, err := bls.Verify(signedVoluntaryExit.Signature[:], signingRoot[:], pk[:])
if err != nil {
return err
}
if !valid {
return errors.New("ProcessVoluntaryExit: BLS verification failed")
}
}
f.operationsPool.VoluntaryExistsPool.Insert(voluntaryExit.ValidatorIndex, signedVoluntaryExit)
return nil
}
// OnProposerSlashing is a non-official handler for proposer slashing operations. it pushes the proposer slashing in the pool.
func (f *ForkChoiceStore) OnProposerSlashing(proposerSlashing *cltypes.ProposerSlashing, test bool) (err error) {
if f.operationsPool.ProposerSlashingsPool.Has(pool.ComputeKeyForProposerSlashing(proposerSlashing)) {
return nil
}
h1 := proposerSlashing.Header1.Header
h2 := proposerSlashing.Header2.Header
if h1.Slot != h2.Slot {
return fmt.Errorf("non-matching slots on proposer slashing: %d != %d", h1.Slot, h2.Slot)
}
if h1.ProposerIndex != h2.ProposerIndex {
return fmt.Errorf("non-matching proposer indices proposer slashing: %d != %d", h1.ProposerIndex, h2.ProposerIndex)
}
if *h1 == *h2 {
return fmt.Errorf("proposee slashing headers are the same")
}
// Take lock as we interact with state.
f.mu.Lock()
headHash, _, err := f.getHead()
if err != nil {
f.mu.Unlock()
return err
}
s, _, err := f.forkGraph.GetState(headHash, false)
if err != nil {
f.mu.Unlock()
return err
}
proposer, err := s.ValidatorForValidatorIndex(int(h1.ProposerIndex))
if err != nil {
f.mu.Unlock()
return fmt.Errorf("unable to retrieve state: %v", err)
}
if !proposer.IsSlashable(state.Epoch(s)) {
f.mu.Unlock()
return fmt.Errorf("proposer is not slashable: %v", proposer)
}
domain1, err := s.GetDomain(s.BeaconConfig().DomainBeaconProposer, state.GetEpochAtSlot(s.BeaconConfig(), h1.Slot))
if err != nil {
return fmt.Errorf("unable to get domain: %v", err)
}
domain2, err := s.GetDomain(s.BeaconConfig().DomainBeaconProposer, state.GetEpochAtSlot(s.BeaconConfig(), h2.Slot))
if err != nil {
return fmt.Errorf("unable to get domain: %v", err)
}
pk := proposer.PublicKey()
f.mu.Unlock()
if test {
f.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(proposerSlashing), proposerSlashing)
return nil
}
signingRoot, err := fork.ComputeSigningRoot(h1, domain1)
if err != nil {
return fmt.Errorf("unable to compute signing root: %v", err)
}
valid, err := bls.Verify(proposerSlashing.Header1.Signature[:], signingRoot[:], pk[:])
if err != nil {
return fmt.Errorf("unable to verify signature: %v", err)
}
if !valid {
return fmt.Errorf("invalid signature: signature %v, root %v, pubkey %v", proposerSlashing.Header1.Signature[:], signingRoot[:], pk)
}
signingRoot, err = fork.ComputeSigningRoot(h2, domain2)
if err != nil {
return fmt.Errorf("unable to compute signing root: %v", err)
}
valid, err = bls.Verify(proposerSlashing.Header2.Signature[:], signingRoot[:], pk[:])
if err != nil {
return fmt.Errorf("unable to verify signature: %v", err)
}
if !valid {
return fmt.Errorf("invalid signature: signature %v, root %v, pubkey %v", proposerSlashing.Header2.Signature[:], signingRoot[:], pk)
}
f.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(proposerSlashing), proposerSlashing)
return nil
}

View File

@ -2,6 +2,7 @@ package network
import (
"context"
"fmt"
"sync"
"github.com/ledgerwatch/erigon/cl/freezer"
@ -59,10 +60,33 @@ func (g *GossipManager) SubscribeSignedBeaconBlocks(ctx context.Context) <-chan
g.mu.Unlock()
}()
return out
}
func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l log.Ctx) error {
func operationsContract[T ssz.EncodableSSZ](ctx context.Context, g *GossipManager, l log.Ctx, data *sentinel.GossipData, version int, name string, fn func(T, bool) error) error {
var t T
object := t.Clone().(T)
if err := object.DecodeSSZ(common.CopyBytes(data.Data), version); err != nil {
g.sentinel.BanPeer(ctx, data.Peer)
l["at"] = fmt.Sprintf("decoding %s", name)
return err
}
if err := fn(object /*test=*/, false); err != nil {
l["at"] = fmt.Sprintf("verify %s", name)
return err
}
if _, err := g.sentinel.PublishGossip(ctx, data); err != nil {
log.Debug("failed publish gossip", "err", err)
}
return nil
}
func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l log.Ctx) (err error) {
defer func() {
r := recover()
if r != nil {
err = fmt.Errorf("%v", r)
}
}()
currentEpoch := utils.GetCurrentEpoch(g.genesisConfig.GenesisTime, g.beaconConfig.SecondsPerSlot, g.beaconConfig.SlotsPerEpoch)
version := g.beaconConfig.GetCurrentStateVersion(currentEpoch)
@ -119,43 +143,19 @@ func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l
g.mu.RUnlock()
case sentinel.GossipType_VoluntaryExitGossipType:
object = &cltypes.SignedVoluntaryExit{}
if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
g.sentinel.BanPeer(ctx, data.Peer)
l["at"] = "decode exit"
if err := operationsContract[*cltypes.SignedVoluntaryExit](ctx, g, l, data, int(version), "voluntary exit", g.forkChoice.OnVoluntaryExit); err != nil {
return err
}
case sentinel.GossipType_ProposerSlashingGossipType:
object = &cltypes.ProposerSlashing{}
if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
l["at"] = "decode proposer slash"
g.sentinel.BanPeer(ctx, data.Peer)
if err := operationsContract[*cltypes.ProposerSlashing](ctx, g, l, data, int(version), "proposer slashing", g.forkChoice.OnProposerSlashing); err != nil {
return err
}
case sentinel.GossipType_AttesterSlashingGossipType:
object = &cltypes.AttesterSlashing{}
if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
l["at"] = "decode attester slash"
g.sentinel.BanPeer(ctx, data.Peer)
if err := operationsContract[*cltypes.AttesterSlashing](ctx, g, l, data, int(version), "attester slashing", g.forkChoice.OnAttesterSlashing); err != nil {
return err
}
if err := g.forkChoice.OnAttesterSlashing(object.(*cltypes.AttesterSlashing)); err != nil {
l["at"] = "on attester slash"
return err
}
case sentinel.GossipType_AggregateAndProofGossipType:
object = &cltypes.SignedAggregateAndProof{}
if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
l["at"] = "decoding proof"
g.sentinel.BanPeer(ctx, data.Peer)
return err
}
if err := g.forkChoice.OnAttestation(object.(*cltypes.SignedAggregateAndProof).Message.Aggregate, false); err != nil {
l["at"] = "on attestation"
log.Trace("Could not process attestation", "reason", err)
return err
}
//case sentinel.GossipType_AggregateAndProofGossipType:
// TODO: implement
}
return nil
}

View File

@ -1,32 +1,35 @@
package pool
import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
)
var operationsMultiplier = 10 // Cap the amount of cached element to max_operations_per_block * operations_multiplier
var operationsMultiplier = 20 // Cap the amount of cached element to max_operations_per_block * operations_multiplier
type OperationPool[T any] struct {
pool *lru.Cache[libcommon.Bytes96, T] // Map the Signature to the underlying object
type OperationPool[K comparable, T any] struct {
pool *lru.Cache[K, T] // Map the Signature to the underlying object
}
func NewOperationPool[T any](maxOperationsPerBlock int, matricName string) *OperationPool[T] {
pool, err := lru.New[libcommon.Bytes96, T](matricName, maxOperationsPerBlock*operationsMultiplier)
func NewOperationPool[K comparable, T any](maxOperationsPerBlock int, matricName string) *OperationPool[K, T] {
pool, err := lru.New[K, T](matricName, maxOperationsPerBlock*operationsMultiplier)
if err != nil {
panic(err)
}
return &OperationPool[T]{pool: pool}
return &OperationPool[K, T]{pool: pool}
}
func (o *OperationPool[T]) Insert(signature libcommon.Bytes96, operation T) {
o.pool.Add(signature, operation)
func (o *OperationPool[K, T]) Insert(k K, operation T) {
o.pool.Add(k, operation)
}
func (o *OperationPool[T]) DeleteIfExist(signature libcommon.Bytes96) (removed bool) {
return o.pool.Remove(signature)
func (o *OperationPool[K, T]) DeleteIfExist(k K) (removed bool) {
return o.pool.Remove(k)
}
func (o *OperationPool[T]) Raw() []T {
func (o *OperationPool[K, T]) Has(k K) (hash bool) {
return o.pool.Contains(k)
}
func (o *OperationPool[K, T]) Raw() []T {
return o.pool.Values()
}

View File

@ -5,12 +5,12 @@ import (
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/utils"
"github.com/ledgerwatch/erigon/crypto/blake2b"
)
// DoubleSignatureKey uses blake2b algorithm to merge two signatures together. blake2 is faster than sha3.
func doubleSignatureKey(one, two libcommon.Bytes96) (out libcommon.Bytes96) {
res := utils.Keccak256(one[:], two[:])
res := blake2b.Sum256(append(one[:], two[:]...))
copy(out[:], res[:])
return
}
@ -25,19 +25,34 @@ func ComputeKeyForAttesterSlashing(slashing *cltypes.AttesterSlashing) libcommon
// OperationsPool is the collection of all gossip-collectable operations.
type OperationsPool struct {
AttestationsPool *OperationPool[*solid.Attestation]
AttesterSlashingsPool *OperationPool[*cltypes.AttesterSlashing]
ProposerSlashingsPool *OperationPool[*cltypes.ProposerSlashing]
BLSToExecutionChangesPool *OperationPool[*cltypes.SignedBLSToExecutionChange]
VoluntaryExistsPool *OperationPool[*cltypes.SignedVoluntaryExit]
AttestationsPool *OperationPool[libcommon.Bytes96, *solid.Attestation]
AttesterSlashingsPool *OperationPool[libcommon.Bytes96, *cltypes.AttesterSlashing]
ProposerSlashingsPool *OperationPool[libcommon.Bytes96, *cltypes.ProposerSlashing]
BLSToExecutionChangesPool *OperationPool[libcommon.Bytes96, *cltypes.SignedBLSToExecutionChange]
VoluntaryExistsPool *OperationPool[uint64, *cltypes.SignedVoluntaryExit]
}
func NewOperationsPool(beaconCfg *clparams.BeaconChainConfig) OperationsPool {
return OperationsPool{
AttestationsPool: NewOperationPool[*solid.Attestation](int(beaconCfg.MaxAttestations), "attestationsPool"),
AttesterSlashingsPool: NewOperationPool[*cltypes.AttesterSlashing](int(beaconCfg.MaxAttestations), "attesterSlashingsPool"),
ProposerSlashingsPool: NewOperationPool[*cltypes.ProposerSlashing](int(beaconCfg.MaxAttestations), "proposerSlashingsPool"),
BLSToExecutionChangesPool: NewOperationPool[*cltypes.SignedBLSToExecutionChange](int(beaconCfg.MaxBlsToExecutionChanges), "blsExecutionChangesPool"),
VoluntaryExistsPool: NewOperationPool[*cltypes.SignedVoluntaryExit](int(beaconCfg.MaxBlsToExecutionChanges), "voluntaryExitsPool"),
AttestationsPool: NewOperationPool[libcommon.Bytes96, *solid.Attestation](int(beaconCfg.MaxAttestations), "attestationsPool"),
AttesterSlashingsPool: NewOperationPool[libcommon.Bytes96, *cltypes.AttesterSlashing](int(beaconCfg.MaxAttestations), "attesterSlashingsPool"),
ProposerSlashingsPool: NewOperationPool[libcommon.Bytes96, *cltypes.ProposerSlashing](int(beaconCfg.MaxAttestations), "proposerSlashingsPool"),
BLSToExecutionChangesPool: NewOperationPool[libcommon.Bytes96, *cltypes.SignedBLSToExecutionChange](int(beaconCfg.MaxBlsToExecutionChanges), "blsExecutionChangesPool"),
VoluntaryExistsPool: NewOperationPool[uint64, *cltypes.SignedVoluntaryExit](int(beaconCfg.MaxBlsToExecutionChanges), "voluntaryExitsPool"),
}
}
func (o *OperationsPool) NotifyBlock(blk *cltypes.BeaconBlock) {
blk.Body.VoluntaryExits.Range(func(_ int, exit *cltypes.SignedVoluntaryExit, _ int) bool {
o.VoluntaryExistsPool.DeleteIfExist(exit.VoluntaryExit.ValidatorIndex)
return true
})
blk.Body.AttesterSlashings.Range(func(_ int, att *cltypes.AttesterSlashing, _ int) bool {
o.AttesterSlashingsPool.DeleteIfExist(ComputeKeyForAttesterSlashing(att))
return true
})
blk.Body.ProposerSlashings.Range(func(_ int, ps *cltypes.ProposerSlashing, _ int) bool {
o.ProposerSlashingsPool.DeleteIfExist(ComputeKeyForProposerSlashing(ps))
return true
})
}

View File

@ -7,8 +7,10 @@ import (
"testing"
"github.com/ledgerwatch/erigon/cl/abstract"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
"github.com/ledgerwatch/erigon/cl/pool"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/cltypes"
@ -151,7 +153,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err
anchorState, err := spectest.ReadBeaconState(root, c.Version(), "anchor_state.ssz_snappy")
require.NoError(t, err)
forkStore, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, false)
forkStore, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, pool.NewOperationsPool(&clparams.MainnetBeaconConfig), false)
require.NoError(t, err)
var steps []ForkChoiceStep
@ -168,7 +170,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err
data := &cltypes.AttesterSlashing{}
err := spectest.ReadSsz(root, c.Version(), step.GetAttesterSlashing()+".ssz_snappy", data)
require.NoError(t, err, stepstr)
err = forkStore.OnAttesterSlashing(data)
err = forkStore.OnAttesterSlashing(data, false)
if step.GetValid() {
require.NoError(t, err, stepstr)
} else {

View File

@ -39,16 +39,8 @@ func (I *impl) ProcessProposerSlashing(s abstract.BeaconState, propSlashing *clt
return fmt.Errorf("non-matching proposer indices proposer slashing: %d != %d", h1.ProposerIndex, h2.ProposerIndex)
}
h1Root, err := h1.HashSSZ()
if err != nil {
return fmt.Errorf("unable to hash header1: %v", err)
}
h2Root, err := h2.HashSSZ()
if err != nil {
return fmt.Errorf("unable to hash header2: %v", err)
}
if h1Root == h2Root {
return fmt.Errorf("propose slashing headers are the same: %v == %v", h1Root, h2Root)
if *h1 == *h2 {
return fmt.Errorf("proposee slashing headers are the same")
}
proposer, err := s.ValidatorForValidatorIndex(int(h1.ProposerIndex))
@ -198,7 +190,7 @@ func (I *impl) ProcessDeposit(s abstract.BeaconState, deposit *cltypes.Deposit)
// ProcessVoluntaryExit takes a voluntary exit and applies state transition.
func (I *impl) ProcessVoluntaryExit(s abstract.BeaconState, signedVoluntaryExit *cltypes.SignedVoluntaryExit) error {
// Sanity checks so that we know it is good.
voluntaryExit := signedVoluntaryExit.VolunaryExit
voluntaryExit := signedVoluntaryExit.VoluntaryExit
currentEpoch := state.Epoch(s)
validator, err := s.ValidatorForValidatorIndex(int(voluntaryExit.ValidatorIndex))
if err != nil {

View File

@ -19,6 +19,7 @@ import (
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
"github.com/ledgerwatch/erigon/cl/phase1/network"
"github.com/ledgerwatch/erigon/cl/phase1/stages"
"github.com/ledgerwatch/erigon/cl/pool"
"github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
@ -83,7 +84,9 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi
return err
}
}
forkChoice, err := forkchoice.NewForkChoiceStore(ctx, state, engine, caplinFreezer, true)
pool := pool.NewOperationsPool(beaconConfig)
forkChoice, err := forkchoice.NewForkChoiceStore(ctx, state, engine, caplinFreezer, pool, true)
if err != nil {
logger.Error("Could not create forkchoice", "err", err)
return err
@ -114,7 +117,7 @@ func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engi
}
if cfg.Active {
apiHandler := handler.NewApiHandler(genesisConfig, beaconConfig, rawDB, db, forkChoice)
apiHandler := handler.NewApiHandler(genesisConfig, beaconConfig, rawDB, db, forkChoice, pool)
go beacon.ListenAndServe(apiHandler, &cfg)
log.Info("Beacon API started", "addr", cfg.Address)
}

View File

@ -8,9 +8,11 @@ import (
"github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
"github.com/ledgerwatch/erigon/cl/pool"
"github.com/ledgerwatch/log/v3"
)
@ -39,7 +41,7 @@ func (r *RegressionTester) Run(name string, fn func(*forkchoice.ForkChoiceStore,
if err != nil {
return err
}
store, err := forkchoice.NewForkChoiceStore(context.Background(), state, nil, nil, true)
store, err := forkchoice.NewForkChoiceStore(context.Background(), state, nil, nil, pool.NewOperationsPool(&clparams.MainnetBeaconConfig), true)
if err != nil {
return err
}

View File

@ -30,9 +30,9 @@ func createSentinel(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockC
gossipTopics := []sentinel.GossipTopic{
sentinel.BeaconBlockSsz,
//sentinel.BeaconAggregateAndProofSsz,
//sentinel.VoluntaryExitSsz,
//sentinel.ProposerSlashingSsz,
//sentinel.AttesterSlashingSsz,
sentinel.VoluntaryExitSsz,
sentinel.ProposerSlashingSsz,
sentinel.AttesterSlashingSsz,
}
// gossipTopics = append(gossipTopics, sentinel.GossipSidecarTopics(chain.MaxBlobsPerBlock)...)