From 4008ea736f570e74663871db88c735c076bf384c Mon Sep 17 00:00:00 2001 From: kasey <489222+kasey@users.noreply.github.com> Date: Wed, 6 Dec 2023 20:36:25 -0600 Subject: [PATCH] Verify roblobs (#13245) * scaffolding for verification package * WIP blob verification methods * lock wrapper for safer forkchoice sharing * more solid cache and verification designs; adding tests * more test coverage, adding missing cache files * clearer func name * remove forkchoice borrower (it's in another PR) * revert temporary interface experiment * lint * nishant feedback * add comments with spec text to all verifications * some comments on public methods * invert confusing verification name * deep source * remove cache from ProposerCache + gaz * more consistently early return on error paths * messed up the test with the wrong config value * terence naming feedback * tests on BeginsAt * lint * deep source... * name errors after failure, not expectation * deep sooource * check len()==0 instead of nil so empty lists work * update test for EIP-7044 --------- Co-authored-by: Kasey Kirkham --- beacon-chain/blockchain/kzg/BUILD.bazel | 1 + beacon-chain/blockchain/kzg/validation.go | 6 + beacon-chain/core/blocks/exit_test.go | 11 +- beacon-chain/core/signing/BUILD.bazel | 1 - beacon-chain/core/signing/signing_root.go | 21 +- beacon-chain/startup/clock.go | 5 + .../sync/rpc_beacon_blocks_by_root_test.go | 4 +- beacon-chain/sync/rpc_send_request_test.go | 6 +- beacon-chain/sync/validate_blob_test.go | 2 +- beacon-chain/verification/BUILD.bazel | 63 +- beacon-chain/verification/blob.go | 296 ++++++++ beacon-chain/verification/blob_test.go | 660 ++++++++++++++++++ beacon-chain/verification/cache.go | 169 +++++ beacon-chain/verification/cache_test.go | 119 ++++ beacon-chain/verification/error.go | 32 + beacon-chain/verification/initializer.go | 108 +++ beacon-chain/verification/result.go | 63 ++ consensus-types/blocks/roblob.go | 5 + encoding/ssz/detect/configfork_test.go | 40 +- network/forks/ordered.go | 15 + runtime/logging/blob.go | 11 +- testing/util/BUILD.bazel | 2 + testing/util/deneb.go | 96 ++- testing/util/deneb_test.go | 16 + time/slots/slottime.go | 6 + time/slots/slottime_test.go | 31 + 26 files changed, 1723 insertions(+), 66 deletions(-) create mode 100644 beacon-chain/verification/blob.go create mode 100644 beacon-chain/verification/blob_test.go create mode 100644 beacon-chain/verification/cache.go create mode 100644 beacon-chain/verification/cache_test.go create mode 100644 beacon-chain/verification/error.go create mode 100644 beacon-chain/verification/initializer.go create mode 100644 beacon-chain/verification/result.go create mode 100644 testing/util/deneb_test.go diff --git a/beacon-chain/blockchain/kzg/BUILD.bazel b/beacon-chain/blockchain/kzg/BUILD.bazel index 216754b51..4f30dc2dc 100644 --- a/beacon-chain/blockchain/kzg/BUILD.bazel +++ b/beacon-chain/blockchain/kzg/BUILD.bazel @@ -10,6 +10,7 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg", visibility = ["//visibility:public"], deps = [ + "//consensus-types/blocks:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "@com_github_crate_crypto_go_kzg_4844//:go_default_library", "@com_github_pkg_errors//:go_default_library", diff --git a/beacon-chain/blockchain/kzg/validation.go b/beacon-chain/blockchain/kzg/validation.go index 6af08988b..f4a46a02c 100644 --- a/beacon-chain/blockchain/kzg/validation.go +++ b/beacon-chain/blockchain/kzg/validation.go @@ -4,6 +4,7 @@ import ( "fmt" GoKZG "github.com/crate-crypto/go-kzg-4844" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" ) @@ -31,6 +32,11 @@ func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.DeprecatedBlobSidec return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs) } +// VerifyROBlobCommitment is a helper that massages the fields of an ROBlob into the types needed to call VerifyBlobKZGProof. +func VerifyROBlobCommitment(sc blocks.ROBlob) error { + return kzgContext.VerifyBlobKZGProof(bytesToBlob(sc.Blob), bytesToCommitment(sc.KzgCommitment), bytesToKZGProof(sc.KzgProof)) +} + func bytesToBlob(blob []byte) (ret GoKZG.Blob) { copy(ret[:], blob) return diff --git a/beacon-chain/core/blocks/exit_test.go b/beacon-chain/core/blocks/exit_test.go index 4274b523f..84592a6a3 100644 --- a/beacon-chain/core/blocks/exit_test.go +++ b/beacon-chain/core/blocks/exit_test.go @@ -17,6 +17,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/testing/assert" "github.com/prysmaticlabs/prysm/v4/testing/require" "github.com/prysmaticlabs/prysm/v4/testing/util" + "github.com/prysmaticlabs/prysm/v4/time/slots" ) func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) { @@ -134,6 +135,10 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) { } func TestVerifyExitAndSignature(t *testing.T) { + undo := util.HackDenebMaxuint(t) + defer undo() + denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch) + require.NoError(t, err) tests := []struct { name string setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) @@ -241,11 +246,11 @@ func TestVerifyExitAndSignature(t *testing.T) { fork := ðpb.Fork{ PreviousVersion: params.BeaconConfig().CapellaForkVersion, CurrentVersion: params.BeaconConfig().DenebForkVersion, - Epoch: primitives.Epoch(2), + Epoch: params.BeaconConfig().DenebForkEpoch, } signedExit := ðpb.SignedVoluntaryExit{ Exit: ðpb.VoluntaryExit{ - Epoch: 2, + Epoch: params.BeaconConfig().CapellaForkEpoch, ValidatorIndex: 0, }, } @@ -253,7 +258,7 @@ func TestVerifyExitAndSignature(t *testing.T) { bs, err := state_native.InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{ GenesisValidatorsRoot: bs.GenesisValidatorsRoot(), Fork: fork, - Slot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1, + Slot: denebSlot, Validators: bs.Validators(), }) if err != nil { diff --git a/beacon-chain/core/signing/BUILD.bazel b/beacon-chain/core/signing/BUILD.bazel index 95c76a052..8d3b87b71 100644 --- a/beacon-chain/core/signing/BUILD.bazel +++ b/beacon-chain/core/signing/BUILD.bazel @@ -16,7 +16,6 @@ go_library( "//crypto/bls:go_default_library", "//encoding/bytesutil:go_default_library", "//proto/prysm/v1alpha1:go_default_library", - "//runtime/version:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prysmaticlabs_fastssz//:go_default_library", ], diff --git a/beacon-chain/core/signing/signing_root.go b/beacon-chain/core/signing/signing_root.go index 772e689d6..2c1f68a18 100644 --- a/beacon-chain/core/signing/signing_root.go +++ b/beacon-chain/core/signing/signing_root.go @@ -11,7 +11,6 @@ import ( "github.com/prysmaticlabs/prysm/v4/crypto/bls" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" - "github.com/prysmaticlabs/prysm/v4/runtime/version" ) // ForkVersionByteLength length of fork version byte array. @@ -57,18 +56,22 @@ const ( // ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key. func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) { - fork := st.Fork() + return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key) +} + +// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState. +// This is particularly helpful for signing values in tests. +func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) { // EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits. // This allows for signed validator exits to be valid forever. - if st.Version() >= version.Deneb && domain == params.BeaconConfig().DomainVoluntaryExit { + if domain == params.BeaconConfig().DomainVoluntaryExit && epoch >= params.BeaconConfig().DenebForkEpoch { fork = ðpb.Fork{ PreviousVersion: params.BeaconConfig().CapellaForkVersion, CurrentVersion: params.BeaconConfig().CapellaForkVersion, Epoch: params.BeaconConfig().CapellaForkEpoch, } } - - d, err := Domain(fork, epoch, domain, st.GenesisValidatorsRoot()) + d, err := Domain(fork, epoch, domain, vr) if err != nil { return nil, err } @@ -102,8 +105,14 @@ func Data(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) { if err != nil { return [32]byte{}, err } + return ComputeSigningRootForRoot(objRoot, domain) +} + +// ComputeSigningRootForRoot works the same as ComputeSigningRoot, +// except that gets the root from an argument instead of a callback. +func ComputeSigningRootForRoot(root [32]byte, domain []byte) ([32]byte, error) { container := ðpb.SigningData{ - ObjectRoot: objRoot[:], + ObjectRoot: root[:], Domain: domain, } return container.HashTreeRoot() diff --git a/beacon-chain/startup/clock.go b/beacon-chain/startup/clock.go index a35b3ff99..d68d0f227 100644 --- a/beacon-chain/startup/clock.go +++ b/beacon-chain/startup/clock.go @@ -41,6 +41,11 @@ func (g *Clock) CurrentSlot() types.Slot { return slots.Duration(g.t, now) } +// SlotStart computes the time the given slot begins. +func (g *Clock) SlotStart(slot types.Slot) time.Time { + return slots.BeginsAt(slot, g.t) +} + // Now provides a value for time.Now() that can be overridden in tests. func (g *Clock) Now() time.Time { return g.now() diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go index 18d0cc864..c635c057b 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go @@ -437,8 +437,8 @@ func TestConstructPendingBlobsRequest(t *testing.T) { Signature: bytesutil.PadTo([]byte{}, 96), } blobSidecars := []blocks.ROBlob{ - util.GenerateTestDenebBlobSidecar(t, root, header, 0, bytesutil.PadTo([]byte{}, 48)), - util.GenerateTestDenebBlobSidecar(t, root, header, 2, bytesutil.PadTo([]byte{}, 48)), + util.GenerateTestDenebBlobSidecar(t, root, header, 0, bytesutil.PadTo([]byte{}, 48), make([][]byte, 0)), + util.GenerateTestDenebBlobSidecar(t, root, header, 2, bytesutil.PadTo([]byte{}, 48), make([][]byte, 0)), } vscs, err := verification.BlobSidecarSliceNoop(blobSidecars) require.NoError(t, err) diff --git a/beacon-chain/sync/rpc_send_request_test.go b/beacon-chain/sync/rpc_send_request_test.go index 8d74a077a..703a24932 100644 --- a/beacon-chain/sync/rpc_send_request_test.go +++ b/beacon-chain/sync/rpc_send_request_test.go @@ -482,8 +482,8 @@ func TestBlobValidatorFromRootReq(t *testing.T) { validRoot := bytesutil.PadTo([]byte("valid"), 32) invalidRoot := bytesutil.PadTo([]byte("invalid"), 32) header := ðpb.SignedBeaconBlockHeader{} - validb := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(validRoot), header, 0, []byte{}) - invalidb := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(invalidRoot), header, 0, []byte{}) + validb := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(validRoot), header, 0, []byte{}, make([][]byte, 0)) + invalidb := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(invalidRoot), header, 0, []byte{}, make([][]byte, 0)) cases := []struct { name string ids []*ethpb.BlobIdentifier @@ -584,7 +584,7 @@ func TestBlobValidatorFromRangeReq(t *testing.T) { header := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{Slot: c.responseSlot}, } - sc := util.GenerateTestDenebBlobSidecar(t, [32]byte{}, header, 0, []byte{}) + sc := util.GenerateTestDenebBlobSidecar(t, [32]byte{}, header, 0, []byte{}, make([][]byte, 0)) err := vf(sc) if c.err != nil { require.ErrorIs(t, err, c.err) diff --git a/beacon-chain/sync/validate_blob_test.go b/beacon-chain/sync/validate_blob_test.go index 09445998b..c90d81718 100644 --- a/beacon-chain/sync/validate_blob_test.go +++ b/beacon-chain/sync/validate_blob_test.go @@ -239,7 +239,7 @@ func TestValidateBlob_AlreadySeenInCache(t *testing.T) { //_, scs := util.GenerateTestDenebBlockWithSidecar(t, r, chainService.CurrentSlot()+1, 1) header, err := signedBb.Header() require.NoError(t, err) - sc := util.GenerateTestDenebBlobSidecar(t, r, header, 0, make([]byte, 48)) + sc := util.GenerateTestDenebBlobSidecar(t, r, header, 0, make([]byte, 48), make([][]byte, 0)) b := sc.BlobSidecar buf := new(bytes.Buffer) diff --git a/beacon-chain/verification/BUILD.bazel b/beacon-chain/verification/BUILD.bazel index 90fb979ed..98f181a93 100644 --- a/beacon-chain/verification/BUILD.bazel +++ b/beacon-chain/verification/BUILD.bazel @@ -1,9 +1,66 @@ -load("@prysm//tools/go:def.bzl", "go_library") +load("@prysm//tools/go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["fake.go"], + srcs = [ + "blob.go", + "cache.go", + "error.go", + "fake.go", + "initializer.go", + "result.go", + ], importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification", visibility = ["//visibility:public"], - deps = ["//consensus-types/blocks:go_default_library"], + deps = [ + "//beacon-chain/blockchain/kzg:go_default_library", + "//beacon-chain/core/helpers:go_default_library", + "//beacon-chain/core/signing:go_default_library", + "//beacon-chain/core/transition:go_default_library", + "//beacon-chain/forkchoice/types:go_default_library", + "//beacon-chain/startup:go_default_library", + "//beacon-chain/state:go_default_library", + "//cache/lru:go_default_library", + "//config/fieldparams:go_default_library", + "//config/params:go_default_library", + "//consensus-types/blocks:go_default_library", + "//consensus-types/interfaces:go_default_library", + "//consensus-types/primitives:go_default_library", + "//crypto/bls:go_default_library", + "//encoding/bytesutil:go_default_library", + "//network/forks:go_default_library", + "//proto/prysm/v1alpha1:go_default_library", + "//runtime/logging:go_default_library", + "//time/slots:go_default_library", + "@com_github_hashicorp_golang_lru//:go_default_library", + "@com_github_pkg_errors//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "blob_test.go", + "cache_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//beacon-chain/core/signing:go_default_library", + "//beacon-chain/db:go_default_library", + "//beacon-chain/forkchoice/types:go_default_library", + "//beacon-chain/startup:go_default_library", + "//beacon-chain/state:go_default_library", + "//config/fieldparams:go_default_library", + "//config/params:go_default_library", + "//consensus-types/blocks:go_default_library", + "//consensus-types/primitives:go_default_library", + "//crypto/bls:go_default_library", + "//proto/prysm/v1alpha1:go_default_library", + "//runtime/interop:go_default_library", + "//testing/require:go_default_library", + "//testing/util:go_default_library", + "//time/slots:go_default_library", + "@com_github_pkg_errors//:go_default_library", + ], ) diff --git a/beacon-chain/verification/blob.go b/beacon-chain/verification/blob.go new file mode 100644 index 000000000..535c4bca8 --- /dev/null +++ b/beacon-chain/verification/blob.go @@ -0,0 +1,296 @@ +package verification + +import ( + "context" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/config/params" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/v4/runtime/logging" + "github.com/prysmaticlabs/prysm/v4/time/slots" + log "github.com/sirupsen/logrus" +) + +const ( + RequireBlobIndexInBounds Requirement = iota + RequireSlotNotTooEarly + RequireSlotAboveFinalized + RequireValidProposerSignature + RequireSidecarParentSeen + RequireSidecarParentValid + RequireSidecarParentSlotLower + RequireSidecarDescendsFromFinalized + RequireSidecarInclusionProven + RequireSidecarKzgProofVerified + RequireSidecarProposerExpected +) + +// GossipSidecarRequirements defines the set of requirements that BlobSidecars received on gossip +// must satisfy in order to upgrade an ROBlob to a VerifiedROBlob. +var GossipSidecarRequirements = []Requirement{ + RequireBlobIndexInBounds, + RequireSlotNotTooEarly, + RequireSlotAboveFinalized, + RequireValidProposerSignature, + RequireSidecarParentSeen, + RequireSidecarParentValid, + RequireSidecarParentSlotLower, + RequireSidecarDescendsFromFinalized, + RequireSidecarInclusionProven, + RequireSidecarKzgProofVerified, + RequireSidecarProposerExpected, +} + +var ( + ErrBlobInvalid = errors.New("blob failed verification") + // ErrBlobIndexInvalid means RequireBlobIndexInBounds failed. + ErrBlobIndexInvalid = errors.Wrap(ErrBlobInvalid, "incorrect blob sidecar index") + // ErrSlotTooEarly means RequireSlotNotTooEarly failed. + ErrSlotTooEarly = errors.Wrap(ErrBlobInvalid, "slot is too far in the future") + // ErrSlotNotAfterFinalized means RequireSlotAboveFinalized failed. + ErrSlotNotAfterFinalized = errors.Wrap(ErrBlobInvalid, "slot <= finalized checkpoint") + // ErrInvalidProposerSignature means RequireValidProposerSignature failed. + ErrInvalidProposerSignature = errors.Wrap(ErrBlobInvalid, "proposer signature could not be verified") + // ErrSidecarParentNotSeen means RequireSidecarParentSeen failed. + ErrSidecarParentNotSeen = errors.Wrap(ErrBlobInvalid, "parent root has not been seen") + // ErrSidecarParentInvalid means RequireSidecarParentValid failed. + ErrSidecarParentInvalid = errors.Wrap(ErrBlobInvalid, "parent block is not valid") + // ErrSlotNotAfterParent means RequireSidecarParentSlotLower failed. + ErrSlotNotAfterParent = errors.Wrap(ErrBlobInvalid, "slot <= slot") + // ErrSidecarNotFinalizedDescendent means RequireSidecarDescendsFromFinalized failed. + ErrSidecarNotFinalizedDescendent = errors.Wrap(ErrBlobInvalid, "blob parent is not descended from the finalized block") + // ErrSidecarInclusionProofInvalid means RequireSidecarInclusionProven failed. + ErrSidecarInclusionProofInvalid = errors.Wrap(ErrBlobInvalid, "sidecar inclusion proof verification failed") + // ErrSidecarKzgProofInvalid means RequireSidecarKzgProofVerified failed. + ErrSidecarKzgProofInvalid = errors.Wrap(ErrBlobInvalid, "sidecar kzg commitment proof verification failed") + // ErrSidecarUnexpectedProposer means RequireSidecarProposerExpected failed. + ErrSidecarUnexpectedProposer = errors.Wrap(ErrBlobInvalid, "sidecar was not proposed by the expected proposer_index") +) + +type BlobVerifier struct { + *sharedResources + results *results + blob blocks.ROBlob + parent state.BeaconState + verifyBlobCommitment roblobCommitmentVerifier +} + +type roblobCommitmentVerifier func(blocks.ROBlob) error + +// VerifiedROBlob "upgrades" the wrapped ROBlob to a VerifiedROBlob. +// If any of the verifications ran against the blob failed, or some required verifications +// were not run, an error will be returned. +func (bv *BlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) { + if bv.results.allSatisfied() { + return blocks.NewVerifiedROBlob(bv.blob), nil + } + return blocks.VerifiedROBlob{}, bv.results.errors(ErrBlobInvalid) +} + +func (bv *BlobVerifier) recordResult(req Requirement, err *error) { + if err == nil || *err == nil { + bv.results.record(req, nil) + return + } + bv.results.record(req, *err) +} + +// BlobIndexInBounds represents the follow spec verification: +// [REJECT] The sidecar's index is consistent with MAX_BLOBS_PER_BLOCK -- i.e. blob_sidecar.index < MAX_BLOBS_PER_BLOCK. +func (bv *BlobVerifier) BlobIndexInBounds() (err error) { + defer bv.recordResult(RequireBlobIndexInBounds, &err) + if bv.blob.Index >= fieldparams.MaxBlobsPerBlock { + log.WithFields(logging.BlobFields(bv.blob)).Debug("Sidecar index > MAX_BLOBS_PER_BLOCK") + return ErrBlobIndexInvalid + } + return nil +} + +// SlotNotTooEarly represents the spec verification: +// [IGNORE] The sidecar is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) +// -- i.e. validate that block_header.slot <= current_slot +func (bv *BlobVerifier) SlotNotTooEarly() (err error) { + defer bv.recordResult(RequireSlotNotTooEarly, &err) + if bv.clock.CurrentSlot() == bv.blob.Slot() { + return nil + } + // subtract the max clock disparity from the start slot time + validAfter := bv.clock.SlotStart(bv.blob.Slot()).Add(-1 * params.BeaconNetworkConfig().MaximumGossipClockDisparity) + // If the difference between now and gt is greater than maximum clock disparity, the block is too far in the future. + if bv.clock.Now().Before(validAfter) { + return ErrSlotTooEarly + } + return nil +} + +// SlotAboveFinalized represents the spec verification: +// [IGNORE] The sidecar is from a slot greater than the latest finalized slot +// -- i.e. validate that block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) +func (bv *BlobVerifier) SlotAboveFinalized() (err error) { + defer bv.recordResult(RequireSlotAboveFinalized, &err) + fcp := bv.fc.FinalizedCheckpoint() + fSlot, err := slots.EpochStart(fcp.Epoch) + if err != nil { + return errors.Wrapf(ErrSlotNotAfterFinalized, "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error()) + } + if bv.blob.Slot() <= fSlot { + return ErrSlotNotAfterFinalized + } + return nil +} + +// ValidProposerSignature represents the spec verification: +// [REJECT] The proposer signature of blob_sidecar.signed_block_header, +// is valid with respect to the block_header.proposer_index pubkey. +func (bv *BlobVerifier) ValidProposerSignature(ctx context.Context) (err error) { + defer bv.recordResult(RequireValidProposerSignature, &err) + sd := blobToSignatureData(bv.blob) + // First check if there is a cached verification that can be reused. + seen, err := bv.sc.SignatureVerified(sd) + if seen { + if err != nil { + log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("reusing failed proposer signature validation from cache") + return ErrInvalidProposerSignature + } + return nil + } + + // retrieve the parent state to fallback to full verification + parent, err := bv.parentState(ctx) + if err != nil { + log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("could not replay parent state for blob signature verification") + return ErrInvalidProposerSignature + } + // Full verification, which will subsequently be cached for anything sharing the signature cache. + if err := bv.sc.VerifySignature(sd, parent); err != nil { + log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("signature verification failed") + return ErrInvalidProposerSignature + } + return nil +} + +// SidecarParentSeen represents the spec verification: +// [IGNORE] The sidecar's block's parent (defined by block_header.parent_root) has been seen +// (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +func (bv *BlobVerifier) SidecarParentSeen(badParent func([32]byte) bool) (err error) { + defer bv.recordResult(RequireSidecarParentSeen, &err) + if bv.fc.HasNode(bv.blob.ParentRoot()) { + return nil + } + if badParent != nil && badParent(bv.blob.ParentRoot()) { + return nil + } + return ErrSidecarParentNotSeen +} + +// SidecarParentValid represents the spec verification: +// [REJECT] The sidecar's block's parent (defined by block_header.parent_root) passes validation. +func (bv *BlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) { + defer bv.recordResult(RequireSidecarParentValid, &err) + if badParent != nil && badParent(bv.blob.ParentRoot()) { + return ErrSidecarParentInvalid + } + return nil +} + +// SidecarParentSlotLower represents the spec verification: +// [REJECT] The sidecar is from a higher slot than the sidecar's block's parent (defined by block_header.parent_root). +func (bv *BlobVerifier) SidecarParentSlotLower() (err error) { + defer bv.recordResult(RequireSidecarParentSlotLower, &err) + parentSlot, err := bv.fc.Slot(bv.blob.ParentRoot()) + if err != nil { + return errors.Wrap(ErrSlotNotAfterParent, "parent root not in forkchoice") + } + if parentSlot >= bv.blob.Slot() { + return ErrSlotNotAfterParent + } + return nil +} + +// SidecarDescendsFromFinalized represents the spec verification: +// [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's block +// -- i.e. get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root. +func (bv *BlobVerifier) SidecarDescendsFromFinalized() (err error) { + defer bv.recordResult(RequireSidecarDescendsFromFinalized, &err) + if !bv.fc.IsCanonical(bv.blob.ParentRoot()) { + return ErrSidecarNotFinalizedDescendent + } + return nil +} + +// SidecarInclusionProven represents the spec verification: +// [REJECT] The sidecar's inclusion proof is valid as verified by verify_blob_sidecar_inclusion_proof(blob_sidecar). +func (bv *BlobVerifier) SidecarInclusionProven() (err error) { + defer bv.recordResult(RequireSidecarInclusionProven, &err) + if err := blocks.VerifyKZGInclusionProof(bv.blob); err != nil { + log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("sidecar inclusion proof verification failed") + return ErrSidecarInclusionProofInvalid + } + return nil +} + +// SidecarKzgProofVerified represents the spec verification: +// [REJECT] The sidecar's blob is valid as verified by +// verify_blob_kzg_proof(blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof). +func (bv *BlobVerifier) SidecarKzgProofVerified() (err error) { + defer bv.recordResult(RequireSidecarKzgProofVerified, &err) + if err := bv.verifyBlobCommitment(bv.blob); err != nil { + log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("kzg commitment proof verification failed") + return ErrSidecarKzgProofInvalid + } + return nil +} + +// SidecarProposerExpected represents the spec verification: +// [REJECT] The sidecar is proposed by the expected proposer_index for the block's slot +// in the context of the current shuffling (defined by block_header.parent_root/block_header.slot). +// If the proposer_index cannot immediately be verified against the expected shuffling, the sidecar MAY be queued +// for later processing while proposers for the block's branch are calculated -- in such a case do not REJECT, instead IGNORE this message. +func (bv *BlobVerifier) SidecarProposerExpected(ctx context.Context) (err error) { + defer bv.recordResult(RequireSidecarProposerExpected, &err) + idx, cached := bv.pc.Proposer(bv.blob.ParentRoot(), bv.blob.Slot()) + if !cached { + pst, err := bv.parentState(ctx) + if err != nil { + log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("state replay to parent_root failed") + return ErrSidecarUnexpectedProposer + } + idx, err = bv.pc.ComputeProposer(ctx, bv.blob.ParentRoot(), bv.blob.Slot(), pst) + if err != nil { + log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("error computing proposer index from parent state") + return ErrSidecarUnexpectedProposer + } + } + if idx != bv.blob.ProposerIndex() { + log.WithError(ErrSidecarUnexpectedProposer). + WithFields(logging.BlobFields(bv.blob)).WithField("expected_proposer", idx). + Debug("unexpected blob proposer") + return ErrSidecarUnexpectedProposer + } + return nil +} + +func (bv *BlobVerifier) parentState(ctx context.Context) (state.BeaconState, error) { + if bv.parent != nil { + return bv.parent, nil + } + st, err := bv.sr.StateByRoot(ctx, bv.blob.ParentRoot()) + if err != nil { + return nil, err + } + bv.parent = st + return bv.parent, nil +} + +func blobToSignatureData(b blocks.ROBlob) SignatureData { + return SignatureData{ + Root: b.BlockRoot(), + Parent: b.ParentRoot(), + Signature: bytesutil.ToBytes96(b.SignedBlockHeader.Signature), + Proposer: b.ProposerIndex(), + Slot: b.Slot(), + } +} diff --git a/beacon-chain/verification/blob_test.go b/beacon-chain/verification/blob_test.go new file mode 100644 index 000000000..fd1c6f1f9 --- /dev/null +++ b/beacon-chain/verification/blob_test.go @@ -0,0 +1,660 @@ +package verification + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" + forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/config/params" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/v4/testing/require" + "github.com/prysmaticlabs/prysm/v4/testing/util" + "github.com/prysmaticlabs/prysm/v4/time/slots" +) + +func TestBlobIndexInBounds(t *testing.T) { + ini := &Initializer{} + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) + b := blobs[0] + // set Index to a value that is out of bounds + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.BlobIndexInBounds()) + require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds)) + require.NoError(t, v.results.result(RequireBlobIndexInBounds)) + + b.Index = fieldparams.MaxBlobsPerBlock + v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.BlobIndexInBounds(), ErrBlobIndexInvalid) + require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds)) + require.NotNil(t, v.results.result(RequireBlobIndexInBounds)) +} + +func TestSlotNotTooEarly(t *testing.T) { + now := time.Now() + // make genesis 1 slot in the past + genesis := now.Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) + b := blobs[0] + // slot 1 should be 12 seconds after genesis + b.SignedBlockHeader.Header.Slot = 1 + + // This clock will give a current slot of 1 on the nose + happyClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now })) + ini := Initializer{shared: &sharedResources{clock: happyClock}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SlotNotTooEarly()) + require.Equal(t, true, v.results.executed(RequireSlotNotTooEarly)) + require.NoError(t, v.results.result(RequireSlotNotTooEarly)) + + // Since we have an early return for slots that are directly equal, give a time that is less than max disparity + // but still in the previous slot. + closeClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now.Add(-1 * params.BeaconNetworkConfig().MaximumGossipClockDisparity / 2) })) + ini = Initializer{shared: &sharedResources{clock: closeClock}} + v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SlotNotTooEarly()) + + // This clock will give a current slot of 0, with now coming more than max clock disparity before slot 1 + disparate := now.Add(-2 * params.BeaconNetworkConfig().MaximumGossipClockDisparity) + dispClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return disparate })) + // Set up initializer to use the clock that will set now to a little to far before slot 1 + ini = Initializer{shared: &sharedResources{clock: dispClock}} + v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SlotNotTooEarly(), ErrSlotTooEarly) + require.Equal(t, true, v.results.executed(RequireSlotNotTooEarly)) + require.NotNil(t, v.results.result(RequireSlotNotTooEarly)) +} + +func TestSlotAboveFinalized(t *testing.T) { + ini := &Initializer{shared: &sharedResources{}} + cases := []struct { + name string + slot primitives.Slot + finalizedSlot primitives.Slot + err error + }{ + { + name: "finalized epoch < blob epoch", + slot: 32, + }, + { + name: "finalized slot < blob slot (same epoch)", + slot: 31, + }, + { + name: "finalized epoch > blob epoch", + finalizedSlot: 32, + err: ErrSlotNotAfterFinalized, + }, + { + name: "finalized slot == blob slot", + slot: 35, + finalizedSlot: 35, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + finalizedCB := func() *forkchoicetypes.Checkpoint { + return &forkchoicetypes.Checkpoint{ + Epoch: slots.ToEpoch(c.finalizedSlot), + Root: [32]byte{}, + } + } + ini.shared.fc = &mockForkchoicer{FinalizedCheckpointCB: finalizedCB} + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) + b := blobs[0] + b.SignedBlockHeader.Header.Slot = c.slot + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + err := v.SlotAboveFinalized() + require.Equal(t, true, v.results.executed(RequireSlotAboveFinalized)) + if c.err == nil { + require.NoError(t, err) + require.NoError(t, v.results.result(RequireSlotAboveFinalized)) + } else { + require.ErrorIs(t, err, c.err) + require.NotNil(t, v.results.result(RequireSlotAboveFinalized)) + } + }) + } +} + +func TestValidProposerSignature_Cached(t *testing.T) { + ctx := context.Background() + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) + b := blobs[0] + expectedSd := blobToSignatureData(b) + sc := &mockSignatureCache{ + svcb: func(sig SignatureData) (bool, error) { + if sig != expectedSd { + t.Error("Did not see expected SignatureData") + } + return true, nil + }, + vscb: func(sig SignatureData, v ValidatorAtIndexer) (err error) { + t.Error("VerifySignature should not be called if the result is cached") + return nil + }, + } + ini := Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.ValidProposerSignature(ctx)) + require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) + require.NoError(t, v.results.result(RequireValidProposerSignature)) + + // simulate an error in the cache - indicating the previous verification failed + sc.svcb = func(sig SignatureData) (bool, error) { + if sig != expectedSd { + t.Error("Did not see expected SignatureData") + } + return true, errors.New("derp") + } + ini = Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}} + v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) + require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) + require.NotNil(t, v.results.result(RequireValidProposerSignature)) +} + +func TestValidProposerSignature_CacheMiss(t *testing.T) { + ctx := context.Background() + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) + b := blobs[0] + expectedSd := blobToSignatureData(b) + sc := &mockSignatureCache{ + svcb: func(sig SignatureData) (bool, error) { + return false, nil + }, + vscb: func(sig SignatureData, v ValidatorAtIndexer) (err error) { + if expectedSd != sig { + t.Error("unexpected signature data") + } + return nil + }, + } + ini := Initializer{shared: &sharedResources{sc: sc, sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{})}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.ValidProposerSignature(ctx)) + require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) + require.NoError(t, v.results.result(RequireValidProposerSignature)) + + // simulate state not found + ini = Initializer{shared: &sharedResources{sc: sc, sr: sbrNotFound(t, expectedSd.Parent)}} + v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) + require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) + require.NotNil(t, v.results.result(RequireValidProposerSignature)) + + // simulate successful state lookup, but sig failure + sbr := sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}) + sc = &mockSignatureCache{ + svcb: sc.svcb, + vscb: func(sig SignatureData, v ValidatorAtIndexer) (err error) { + if expectedSd != sig { + t.Error("unexpected signature data") + } + return errors.New("signature, not so good!") + }, + } + ini = Initializer{shared: &sharedResources{sc: sc, sr: sbr}} + v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + + // make sure all the histories are clean before calling the method + // so we don't get polluted by previous usages + require.Equal(t, false, sbr.calledForRoot[expectedSd.Parent]) + require.Equal(t, false, sc.svCalledForSig[expectedSd]) + require.Equal(t, false, sc.vsCalledForSig[expectedSd]) + + // Here we're mainly checking that all the right interfaces get used in the unhappy path + require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) + require.Equal(t, true, sbr.calledForRoot[expectedSd.Parent]) + require.Equal(t, true, sc.svCalledForSig[expectedSd]) + require.Equal(t, true, sc.vsCalledForSig[expectedSd]) + require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) + require.NotNil(t, v.results.result(RequireValidProposerSignature)) +} + +func badParentCb(t *testing.T, expected [32]byte, e bool) func([32]byte) bool { + return func(r [32]byte) bool { + if expected != r { + t.Error("badParent callback did not receive expected root") + } + return e + } +} + +func TestSidecarParentSeen(t *testing.T) { + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) + b := blobs[0] + + fcHas := &mockForkchoicer{ + HasNodeCB: func(parent [32]byte) bool { + if parent != b.ParentRoot() { + t.Error("forkchoice.HasNode called with unexpected parent root") + } + return true + }, + } + fcLacks := &mockForkchoicer{ + HasNodeCB: func(parent [32]byte) bool { + if parent != b.ParentRoot() { + t.Error("forkchoice.HasNode called with unexpected parent root") + } + return false + }, + } + + t.Run("happy path", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{fc: fcHas}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SidecarParentSeen(nil)) + require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) + require.NoError(t, v.results.result(RequireSidecarParentSeen)) + }) + t.Run("HasNode false, no badParent cb, expected error", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{fc: fcLacks}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarParentSeen(nil), ErrSidecarParentNotSeen) + require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) + require.NotNil(t, v.results.result(RequireSidecarParentSeen)) + }) + + t.Run("HasNode false, badParent true", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{fc: fcLacks}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SidecarParentSeen(badParentCb(t, b.ParentRoot(), true))) + require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) + require.NoError(t, v.results.result(RequireSidecarParentSeen)) + }) + t.Run("HasNode false, badParent false", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{fc: fcLacks}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarParentSeen(badParentCb(t, b.ParentRoot(), false)), ErrSidecarParentNotSeen) + require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) + require.NotNil(t, v.results.result(RequireSidecarParentSeen)) + }) +} + +func TestSidecarParentValid(t *testing.T) { + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) + b := blobs[0] + t.Run("parent valid", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SidecarParentValid(badParentCb(t, b.ParentRoot(), false))) + require.Equal(t, true, v.results.executed(RequireSidecarParentValid)) + require.NoError(t, v.results.result(RequireSidecarParentValid)) + }) + t.Run("parent not valid", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarParentValid(badParentCb(t, b.ParentRoot(), true)), ErrSidecarParentInvalid) + require.Equal(t, true, v.results.executed(RequireSidecarParentValid)) + require.NotNil(t, v.results.result(RequireSidecarParentValid)) + }) +} + +func TestSidecarParentSlotLower(t *testing.T) { + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) + b := blobs[0] + cases := []struct { + name string + fcSlot primitives.Slot + fcErr error + err error + }{ + { + name: "not in fc", + fcErr: errors.New("not in forkchoice"), + err: ErrSlotNotAfterParent, + }, + { + name: "in fc, slot lower", + fcSlot: b.Slot() - 1, + }, + { + name: "in fc, slot equal", + fcSlot: b.Slot(), + err: ErrSlotNotAfterParent, + }, + { + name: "in fc, slot higher", + fcSlot: b.Slot() + 1, + err: ErrSlotNotAfterParent, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ini := Initializer{shared: &sharedResources{fc: &mockForkchoicer{SlotCB: func(r [32]byte) (primitives.Slot, error) { + if b.ParentRoot() != r { + t.Error("forkchoice.Slot called with unexpected parent root") + } + return c.fcSlot, c.fcErr + }}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + err := v.SidecarParentSlotLower() + require.Equal(t, true, v.results.executed(RequireSidecarParentSlotLower)) + if c.err == nil { + require.NoError(t, err) + require.NoError(t, v.results.result(RequireSidecarParentSlotLower)) + } else { + require.ErrorIs(t, err, c.err) + require.NotNil(t, v.results.result(RequireSidecarParentSlotLower)) + } + }) + } +} + +func TestSidecarDescendsFromFinalized(t *testing.T) { + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) + b := blobs[0] + t.Run("not canonical", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{fc: &mockForkchoicer{IsCanonicalCB: func(r [32]byte) bool { + if b.ParentRoot() != r { + t.Error("forkchoice.Slot called with unexpected parent root") + } + return false + }}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarDescendsFromFinalized(), ErrSidecarNotFinalizedDescendent) + require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized)) + require.NotNil(t, v.results.result(RequireSidecarDescendsFromFinalized)) + }) + t.Run("not canonical", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{fc: &mockForkchoicer{IsCanonicalCB: func(r [32]byte) bool { + if b.ParentRoot() != r { + t.Error("forkchoice.Slot called with unexpected parent root") + } + return true + }}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SidecarDescendsFromFinalized()) + require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized)) + require.NoError(t, v.results.result(RequireSidecarDescendsFromFinalized)) + }) +} + +func TestSidecarInclusionProven(t *testing.T) { + // GenerateTestDenebBlockWithSidecar is supposed to generate valid inclusion proofs + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) + b := blobs[0] + + ini := Initializer{} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SidecarInclusionProven()) + require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) + require.NoError(t, v.results.result(RequireSidecarInclusionProven)) + + // Invert bits of the first byte of the body root to mess up the proof + byte0 := b.SignedBlockHeader.Header.BodyRoot[0] + b.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255 + v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarInclusionProven(), ErrSidecarInclusionProofInvalid) + require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) + require.NotNil(t, v.results.result(RequireSidecarInclusionProven)) +} + +func TestSidecarKzgProofVerified(t *testing.T) { + // GenerateTestDenebBlockWithSidecar is supposed to generate valid commitments + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) + b := blobs[0] + passes := func(vb blocks.ROBlob) error { + require.Equal(t, true, bytes.Equal(b.KzgCommitment, vb.KzgCommitment)) + return nil + } + v := &BlobVerifier{verifyBlobCommitment: passes, results: newResults(), blob: b} + require.NoError(t, v.SidecarKzgProofVerified()) + require.Equal(t, true, v.results.executed(RequireSidecarKzgProofVerified)) + require.NoError(t, v.results.result(RequireSidecarKzgProofVerified)) + + fails := func(vb blocks.ROBlob) error { + require.Equal(t, true, bytes.Equal(b.KzgCommitment, vb.KzgCommitment)) + return errors.New("bad blob") + } + v = &BlobVerifier{verifyBlobCommitment: fails, results: newResults(), blob: b} + require.ErrorIs(t, v.SidecarKzgProofVerified(), ErrSidecarKzgProofInvalid) + require.Equal(t, true, v.results.executed(RequireSidecarKzgProofVerified)) + require.NotNil(t, v.results.result(RequireSidecarKzgProofVerified)) +} + +func TestSidecarProposerExpected(t *testing.T) { + ctx := context.Background() + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) + b := blobs[0] + t.Run("cached, matches", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex())}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SidecarProposerExpected(ctx)) + require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) + require.NoError(t, v.results.result(RequireSidecarProposerExpected)) + }) + t.Run("cached, does not match", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex() + 1)}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) + require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) + require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) + }) + t.Run("not cached, state lookup failure", func(t *testing.T) { + ini := Initializer{shared: &sharedResources{sr: sbrNotFound(t, b.ParentRoot()), pc: &mockProposerCache{ProposerCB: pcReturnsNotFound()}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) + require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) + require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) + }) + + t.Run("not cached, proposer matches", func(t *testing.T) { + pc := &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + require.Equal(t, b.ParentRoot(), root) + require.Equal(t, b.Slot(), slot) + return b.ProposerIndex(), nil + }, + } + ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.NoError(t, v.SidecarProposerExpected(ctx)) + require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) + require.NoError(t, v.results.result(RequireSidecarProposerExpected)) + }) + t.Run("not cached, proposer does not match", func(t *testing.T) { + pc := &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + require.Equal(t, b.ParentRoot(), root) + require.Equal(t, b.Slot(), slot) + return b.ProposerIndex() + 1, nil + }, + } + ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) + require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) + require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) + }) + t.Run("not cached, ComputeProposer fails", func(t *testing.T) { + pc := &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + require.Equal(t, b.ParentRoot(), root) + require.Equal(t, b.Slot(), slot) + return 0, errors.New("ComputeProposer failed") + }, + } + ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) + require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) + require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) + }) +} + +func TestRequirementSatisfaction(t *testing.T) { + _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) + b := blobs[0] + ini := Initializer{} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + + _, err := v.VerifiedROBlob() + require.ErrorIs(t, err, ErrBlobInvalid) + me, ok := err.(VerificationMultiError) + require.Equal(t, true, ok) + fails := me.Failures() + // we haven't performed any verification, so all the results should be this type + for _, v := range fails { + require.ErrorIs(t, v, ErrMissingVerification) + } + + // satisfy everything through the backdoor and ensure we get the verified ro blob at the end + for _, r := range GossipSidecarRequirements { + v.results.record(r, nil) + } + require.Equal(t, true, v.results.allSatisfied()) + _, err = v.VerifiedROBlob() + require.NoError(t, err) +} + +type mockForkchoicer struct { + FinalizedCheckpointCB func() *forkchoicetypes.Checkpoint + HasNodeCB func([32]byte) bool + IsCanonicalCB func(root [32]byte) bool + SlotCB func([32]byte) (primitives.Slot, error) +} + +var _ Forkchoicer = &mockForkchoicer{} + +func (m *mockForkchoicer) FinalizedCheckpoint() *forkchoicetypes.Checkpoint { + return m.FinalizedCheckpointCB() +} + +func (m *mockForkchoicer) HasNode(root [32]byte) bool { + return m.HasNodeCB(root) +} + +func (m *mockForkchoicer) IsCanonical(root [32]byte) bool { + return m.IsCanonicalCB(root) +} + +func (m *mockForkchoicer) Slot(root [32]byte) (primitives.Slot, error) { + return m.SlotCB(root) +} + +type mockSignatureCache struct { + svCalledForSig map[SignatureData]bool + svcb func(sig SignatureData) (bool, error) + vsCalledForSig map[SignatureData]bool + vscb func(sig SignatureData, v ValidatorAtIndexer) (err error) +} + +// SignatureVerified implements SignatureCache. +func (m *mockSignatureCache) SignatureVerified(sig SignatureData) (bool, error) { + if m.svCalledForSig == nil { + m.svCalledForSig = make(map[SignatureData]bool) + } + m.svCalledForSig[sig] = true + return m.svcb(sig) +} + +// VerifySignature implements SignatureCache. +func (m *mockSignatureCache) VerifySignature(sig SignatureData, v ValidatorAtIndexer) (err error) { + if m.vsCalledForSig == nil { + m.vsCalledForSig = make(map[SignatureData]bool) + } + m.vsCalledForSig[sig] = true + return m.vscb(sig, v) +} + +var _ SignatureCache = &mockSignatureCache{} + +type sbrfunc func(context.Context, [32]byte) (state.BeaconState, error) + +type mockStateByRooter struct { + sbr sbrfunc + calledForRoot map[[32]byte]bool +} + +func (sbr *mockStateByRooter) StateByRoot(ctx context.Context, root [32]byte) (state.BeaconState, error) { + if sbr.calledForRoot == nil { + sbr.calledForRoot = make(map[[32]byte]bool) + } + sbr.calledForRoot[root] = true + return sbr.sbr(ctx, root) +} + +var _ StateByRooter = &mockStateByRooter{} + +func sbrErrorIfCalled(t *testing.T) sbrfunc { + return func(_ context.Context, _ [32]byte) (state.BeaconState, error) { + t.Error("StateByRoot should not have been called") + return nil, nil + } +} + +func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter { + return &mockStateByRooter{sbr: func(_ context.Context, parent [32]byte) (state.BeaconState, error) { + if parent != expectedRoot { + t.Errorf("did not receive expected root in StateByRootCall, want %#x got %#x", expectedRoot, parent) + } + return nil, db.ErrNotFound + }} +} + +func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter { + return &mockStateByRooter{sbr: func(_ context.Context, root [32]byte) (state.BeaconState, error) { + return &validxStateOverride{vals: map[primitives.ValidatorIndex]*ethpb.Validator{ + idx: val, + }}, nil + }} +} + +type validxStateOverride struct { + state.BeaconState + vals map[primitives.ValidatorIndex]*ethpb.Validator +} + +var _ state.BeaconState = &validxStateOverride{} + +func (v *validxStateOverride) ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error) { + val, ok := v.vals[idx] + if !ok { + return nil, fmt.Errorf("validxStateOverride does not know index %d", idx) + } + return val, nil +} + +type mockProposerCache struct { + ComputeProposerCB func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) + ProposerCB func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) +} + +func (p *mockProposerCache) ComputeProposer(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + return p.ComputeProposerCB(ctx, root, slot, pst) +} + +func (p *mockProposerCache) Proposer(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return p.ProposerCB(root, slot) +} + +var _ ProposerCache = &mockProposerCache{} + +func pcReturnsIdx(idx primitives.ValidatorIndex) func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return idx, true + } +} + +func pcReturnsNotFound() func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return 0, false + } +} diff --git a/beacon-chain/verification/cache.go b/beacon-chain/verification/cache.go new file mode 100644 index 000000000..4a60914fb --- /dev/null +++ b/beacon-chain/verification/cache.go @@ -0,0 +1,169 @@ +package verification + +import ( + "context" + "fmt" + + lru "github.com/hashicorp/golang-lru" + lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" + log "github.com/sirupsen/logrus" + + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" + "github.com/prysmaticlabs/prysm/v4/config/params" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/crypto/bls" + "github.com/prysmaticlabs/prysm/v4/network/forks" + ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/v4/time/slots" +) + +const ( + DefaultSignatureCacheSize = 256 +) + +// ValidatorAtIndexer defines the method needed to retrieve a validator by its index. +// This interface is satisfied by state.BeaconState, but can also be satisfied by a cache. +type ValidatorAtIndexer interface { + ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error) +} + +// SignatureCache represents a type that can perform signature verification and cache the result so that it +// can be used when the same signature is seen in multiple places, like a SignedBeaconBlockHeader +// found in multiple BlobSidecars. +type SignatureCache interface { + // VerifySignature perform signature verification and caches the result. + VerifySignature(sig SignatureData, v ValidatorAtIndexer) (err error) + // SignatureVerified accesses the result of a previous signature verification. + SignatureVerified(sig SignatureData) (bool, error) +} + +// SignatureData represents the set of parameters that together uniquely identify a signature observed on +// a beacon block. This is used as the key for the signature cache. +type SignatureData struct { + Root [32]byte + Parent [32]byte + Signature [96]byte + Proposer primitives.ValidatorIndex + Slot primitives.Slot +} + +func (d SignatureData) logFields() log.Fields { + return log.Fields{ + "root": fmt.Sprintf("%#x", d.Root), + "parent_root": fmt.Sprintf("%#x", d.Parent), + "signature": fmt.Sprintf("%#x", d.Signature), + "proposer": d.Proposer, + "slot": d.Slot, + } +} + +func newSigCache(vr []byte, size int) *sigCache { + return &sigCache{Cache: lruwrpr.New(size), valRoot: vr} +} + +type sigCache struct { + *lru.Cache + valRoot []byte +} + +// VerifySignature verifies the given signature data against the key obtained via ValidatorAtIndexer. +func (c *sigCache) VerifySignature(sig SignatureData, v ValidatorAtIndexer) (err error) { + defer func() { + if err == nil { + c.Add(sig, true) + } else { + log.WithError(err).WithFields(sig.logFields()).Debug("caching failed signature verification result") + c.Add(sig, false) + } + }() + e := slots.ToEpoch(sig.Slot) + fork, err := forks.Fork(e) + if err != nil { + return err + } + domain, err := signing.Domain(fork, e, params.BeaconConfig().DomainBeaconProposer, c.valRoot) + if err != nil { + return err + } + pv, err := v.ValidatorAtIndex(sig.Proposer) + if err != nil { + return err + } + pb, err := bls.PublicKeyFromBytes(pv.PublicKey) + if err != nil { + return err + } + s, err := bls.SignatureFromBytes(sig.Signature[:]) + if err != nil { + return err + } + sr, err := signing.ComputeSigningRootForRoot(sig.Root, domain) + if err != nil { + return err + } + if !s.Verify(pb, sr[:]) { + return signing.ErrSigFailedToVerify + } + + return nil +} + +// SignatureVerified checks the signature cache for the given key, and returns a boolean value of true +// if it has been seen before, and an error value indicating whether the signature verification succeeded. +// ie only a result of (true, nil) means a previous signature check passed. +func (c *sigCache) SignatureVerified(sig SignatureData) (bool, error) { + val, seen := c.Get(sig) + if !seen { + return false, nil + } + verified, ok := val.(bool) + if !ok { + log.WithFields(sig.logFields()).Debug("ignoring invalid value found in signature cache") + // This shouldn't happen, and if it does, the caller should treat it as a cache miss and run verification + // again to correctly populate the cache key. + return false, nil + } + if verified { + return true, nil + } + return true, signing.ErrSigFailedToVerify +} + +// ProposerCache represents a type that can compute the proposer for a given slot + parent root, +// and cache the result so that it can be reused when the same verification needs to be performed +// across multiple values. +type ProposerCache interface { + ComputeProposer(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) + Proposer(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) +} + +func newPropCache() *propCache { + return &propCache{} +} + +type propCache struct { +} + +// ComputeProposer takes the state for the given parent root and slot and computes the proposer index, updating the +// proposer index cache when successful. +func (*propCache) ComputeProposer(ctx context.Context, parent [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + pst, err := transition.ProcessSlotsUsingNextSlotCache(ctx, pst, parent[:], slot) + if err != nil { + return 0, err + } + idx, err := helpers.BeaconProposerIndex(ctx, pst) + if err != nil { + return 0, err + } + return idx, nil +} + +// Proposer returns the validator index if it is found in the cache, along with a boolean indicating +// whether the value was present, similar to accessing an lru or go map. +func (*propCache) Proposer(_ [32]byte, _ primitives.Slot) (primitives.ValidatorIndex, bool) { + // TODO: replace with potuz' proposer id cache + return 0, false +} diff --git a/beacon-chain/verification/cache_test.go b/beacon-chain/verification/cache_test.go new file mode 100644 index 000000000..2093a2d60 --- /dev/null +++ b/beacon-chain/verification/cache_test.go @@ -0,0 +1,119 @@ +package verification + +import ( + "context" + "testing" + + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/crypto/bls" + eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/v4/runtime/interop" + "github.com/prysmaticlabs/prysm/v4/testing/require" + "github.com/prysmaticlabs/prysm/v4/testing/util" +) + +func testSignedBlockBlobKeys(t *testing.T, valRoot []byte, slot primitives.Slot, nblobs int) (blocks.ROBlock, []blocks.ROBlob, bls.SecretKey, bls.PublicKey) { + sks, pks, err := interop.DeterministicallyGenerateKeys(0, 1) + require.NoError(t, err) + block, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nblobs, util.WithProposerSigning(0, sks[0], pks[0], valRoot)) + return block, blobs, sks[0], pks[0] +} + +func TestVerifySignature(t *testing.T) { + valRoot := [32]byte{} + _, blobs, _, pk := testSignedBlockBlobKeys(t, valRoot[:], 0, 1) + b := blobs[0] + + sc := newSigCache(valRoot[:], 1) + cb := func(idx primitives.ValidatorIndex) (*eth.Validator, error) { + return ð.Validator{PublicKey: pk.Marshal()}, nil + } + mv := &mockValidatorAtIndexer{cb: cb} + + sd := blobToSignatureData(b) + require.NoError(t, sc.VerifySignature(sd, mv)) +} + +func TestSignatureCacheMissThenHit(t *testing.T) { + valRoot := [32]byte{} + _, blobs, _, pk := testSignedBlockBlobKeys(t, valRoot[:], 0, 1) + b := blobs[0] + + sc := newSigCache(valRoot[:], 1) + cb := func(idx primitives.ValidatorIndex) (*eth.Validator, error) { + return ð.Validator{PublicKey: pk.Marshal()}, nil + } + + sd := blobToSignatureData(b) + cached, err := sc.SignatureVerified(sd) + // Should not be cached yet. + require.Equal(t, false, cached) + require.NoError(t, err) + + mv := &mockValidatorAtIndexer{cb: cb} + require.NoError(t, sc.VerifySignature(sd, mv)) + + // Now it should be cached. + cached, err = sc.SignatureVerified(sd) + require.Equal(t, true, cached) + require.NoError(t, err) + + // note the changed slot, which will give this blob a different cache key + _, blobs, _, _ = testSignedBlockBlobKeys(t, valRoot[:], 1, 1) + badSd := blobToSignatureData(blobs[0]) + + // new value, should not be cached + cached, err = sc.SignatureVerified(badSd) + require.Equal(t, false, cached) + require.NoError(t, err) + + // note that the first argument is incremented, so it will be a different deterministic key + _, pks, err := interop.DeterministicallyGenerateKeys(1, 1) + require.NoError(t, err) + wrongKey := pks[0] + cb = func(idx primitives.ValidatorIndex) (*eth.Validator, error) { + return ð.Validator{PublicKey: wrongKey.Marshal()}, nil + } + mv = &mockValidatorAtIndexer{cb: cb} + require.ErrorIs(t, sc.VerifySignature(badSd, mv), signing.ErrSigFailedToVerify) + + // we should now get the failure error from the cache + cached, err = sc.SignatureVerified(badSd) + require.Equal(t, true, cached) + require.ErrorIs(t, err, signing.ErrSigFailedToVerify) +} + +type mockValidatorAtIndexer struct { + cb func(idx primitives.ValidatorIndex) (*eth.Validator, error) +} + +// ValidatorAtIndex implements ValidatorAtIndexer. +func (m *mockValidatorAtIndexer) ValidatorAtIndex(idx primitives.ValidatorIndex) (*eth.Validator, error) { + return m.cb(idx) +} + +var _ ValidatorAtIndexer = &mockValidatorAtIndexer{} + +func TestProposerCache(t *testing.T) { + ctx := context.Background() + // 3 validators because that was the first number that produced a non-zero proposer index by default + st, _ := util.DeterministicGenesisStateDeneb(t, 3) + + pc := newPropCache() + _, cached := pc.Proposer([32]byte{}, 1) + // should not be cached yet + require.Equal(t, false, cached) + + // If this test breaks due to changes in the determinstic state gen, just replace '2' with whatever the right index is. + expectedIdx := 2 + idx, err := pc.ComputeProposer(ctx, [32]byte{}, 1, st) + require.NoError(t, err) + require.Equal(t, primitives.ValidatorIndex(expectedIdx), idx) + + idx, cached = pc.Proposer([32]byte{}, 1) + // TODO: update this test when we integrate a proposer id cache + require.Equal(t, false, cached) + require.Equal(t, primitives.ValidatorIndex(0), idx) +} diff --git a/beacon-chain/verification/error.go b/beacon-chain/verification/error.go new file mode 100644 index 000000000..0c5ce1d0c --- /dev/null +++ b/beacon-chain/verification/error.go @@ -0,0 +1,32 @@ +package verification + +import "github.com/pkg/errors" + +// ErrMissingVerification indicates that the given verification function was never performed on the value. +var ErrMissingVerification = errors.New("verification was not performed for requirement") + +// VerificationMultiError is a custom error that can be used to access individual verification failures. +type VerificationMultiError struct { + r *results + err error +} + +// Unwrap is used by errors.Is to unwrap errors. +func (ve VerificationMultiError) Unwrap() error { + return ve.err +} + +// Error satisfies the standard error interface. +func (ve VerificationMultiError) Error() string { + return ve.err.Error() +} + +// Failures provides access to map of Requirements->error messages +// so that calling code can introspect on what went wrong. +func (ve VerificationMultiError) Failures() map[Requirement]error { + return ve.r.failures() +} + +func newVerificationMultiError(r *results, err error) VerificationMultiError { + return VerificationMultiError{r: r, err: err} +} diff --git a/beacon-chain/verification/initializer.go b/beacon-chain/verification/initializer.go new file mode 100644 index 000000000..88db4e464 --- /dev/null +++ b/beacon-chain/verification/initializer.go @@ -0,0 +1,108 @@ +package verification + +import ( + "context" + "sync" + + "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg" + forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" +) + +// Database represents the db methods that the verifiers need. +type Database interface { + Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) +} + +// Forkchoicer represents the forkchoice methods that the verifiers need. +// Note that forkchoice is used here in a lock-free fashion, assuming that a version of forkchoice +// is given that internally handles the details of locking the underlying store. +type Forkchoicer interface { + FinalizedCheckpoint() *forkchoicetypes.Checkpoint + HasNode([32]byte) bool + IsCanonical(root [32]byte) bool + Slot([32]byte) (primitives.Slot, error) +} + +// StateByRooter describes a stategen-ish type that can produce arbitrary states by their root +type StateByRooter interface { + StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) +} + +// sharedResources provides access to resources that are required by different verification types. +// for example, sidecar verification and block verification share the block signature verification cache. +type sharedResources struct { + clock *startup.Clock + fc Forkchoicer + sc SignatureCache + pc ProposerCache + db Database + sr StateByRooter +} + +// Initializer is used to create different Verifiers. +// Verifiers require access to stateful data structures, like caches, +// and it is Initializer's job to provides access to those. +type Initializer struct { + shared *sharedResources +} + +// NewBlobVerifier creates a BlobVerifier for a single blob, with the given set of requirements. +func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs ...Requirement) *BlobVerifier { + return &BlobVerifier{ + sharedResources: ini.shared, + blob: b, + results: newResults(reqs...), + verifyBlobCommitment: kzg.VerifyROBlobCommitment, + } +} + +// InitializerWaiter provides an Initializer once all dependent resources are ready +// via the WaitForInitializer method. +type InitializerWaiter struct { + sync.RWMutex + ready bool + cw startup.ClockWaiter + ini *Initializer +} + +// NewInitializerWaiter creates an InitializerWaiter which can be used to obtain an Initializer once async dependencies are ready. +func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sc SignatureCache, pc ProposerCache, db Database, sr StateByRooter) *InitializerWaiter { + shared := &sharedResources{ + fc: fc, + sc: sc, + pc: pc, + db: db, + sr: sr, + } + return &InitializerWaiter{cw: cw, ini: &Initializer{shared: shared}} +} + +// WaitForInitializer ensures that asynchronous initialization of the shared resources the initializer +// depends on has completed before the underlying Initializer is accessible by client code. +func (w *InitializerWaiter) WaitForInitializer(ctx context.Context) (*Initializer, error) { + if err := w.waitForReady(ctx); err != nil { + return nil, err + } + return w.ini, nil +} + +func (w *InitializerWaiter) waitForReady(ctx context.Context) error { + w.Lock() + defer w.Unlock() + if w.ready { + return nil + } + + clock, err := w.cw.WaitForClock(ctx) + if err != nil { + return err + } + w.ini.shared.clock = clock + w.ready = true + return nil +} diff --git a/beacon-chain/verification/result.go b/beacon-chain/verification/result.go new file mode 100644 index 000000000..f4d2ed35a --- /dev/null +++ b/beacon-chain/verification/result.go @@ -0,0 +1,63 @@ +package verification + +// Requirement represents a validation check that needs to pass in order for a Verified form a consensus type to be issued. +type Requirement int + +// results collects positive verification results. +// This bitmap can be used to test which verifications have been successfully completed in order to +// decide whether it is safe to issue a "Verified" type variant. +type results struct { + done map[Requirement]error + reqs []Requirement +} + +func newResults(reqs ...Requirement) *results { + return &results{done: make(map[Requirement]error, len(reqs)), reqs: reqs} +} + +func (r *results) record(req Requirement, err error) { + r.done[req] = err +} + +// allSatisfied returns true if there is a nil error result for every Requirement. +func (r *results) allSatisfied() bool { + if len(r.done) != len(r.reqs) { + return false + } + for i := range r.reqs { + err, ok := r.done[r.reqs[i]] + if !ok || err != nil { + return false + } + } + return true +} + +func (r *results) executed(req Requirement) bool { + _, ok := r.done[req] + return ok +} + +func (r *results) result(req Requirement) error { + return r.done[req] +} + +func (r *results) errors(err error) error { + return newVerificationMultiError(r, err) +} + +func (r *results) failures() map[Requirement]error { + fail := make(map[Requirement]error, len(r.done)) + for i := range r.reqs { + req := r.reqs[i] + err, ok := r.done[req] + if !ok { + fail[req] = ErrMissingVerification + continue + } + if err != nil { + fail[req] = err + } + } + return fail +} diff --git a/consensus-types/blocks/roblob.go b/consensus-types/blocks/roblob.go index 2a4f9be33..e087f4c4e 100644 --- a/consensus-types/blocks/roblob.go +++ b/consensus-types/blocks/roblob.go @@ -53,6 +53,11 @@ func (b *ROBlob) ParentRoot() [32]byte { return bytesutil.ToBytes32(b.SignedBlockHeader.Header.ParentRoot) } +// ParentRootSlice returns the parent root as a byte slice. +func (b *ROBlob) ParentRootSlice() []byte { + return b.SignedBlockHeader.Header.ParentRoot +} + // BodyRoot returns the body root of the blob sidecar. func (b *ROBlob) BodyRoot() [32]byte { return bytesutil.ToBytes32(b.SignedBlockHeader.Header.BodyRoot) diff --git a/encoding/ssz/detect/configfork_test.go b/encoding/ssz/detect/configfork_test.go index 1c38bf20e..6f3c4f4dd 100644 --- a/encoding/ssz/detect/configfork_test.go +++ b/encoding/ssz/detect/configfork_test.go @@ -3,7 +3,6 @@ package detect import ( "context" "fmt" - "math" "testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" @@ -48,11 +47,8 @@ func TestSlotFromBlock(t *testing.T) { } func TestByState(t *testing.T) { - undo, err := hackDenebMaxuint() - require.NoError(t, err) - defer func() { - require.NoError(t, undo()) - }() + undo := util.HackDenebMaxuint(t) + defer undo() bc := params.BeaconConfig() altairSlot, err := slots.EpochStart(bc.AltairForkEpoch) require.NoError(t, err) @@ -137,11 +133,8 @@ func stateForVersion(v int) (state.BeaconState, error) { func TestUnmarshalState(t *testing.T) { ctx := context.Background() - undo, err := hackDenebMaxuint() - require.NoError(t, err) - defer func() { - require.NoError(t, undo()) - }() + undo := util.HackDenebMaxuint(t) + defer undo() bc := params.BeaconConfig() altairSlot, err := slots.EpochStart(bc.AltairForkEpoch) require.NoError(t, err) @@ -211,23 +204,9 @@ func TestUnmarshalState(t *testing.T) { } } -func hackDenebMaxuint() (func() error, error) { - // We monkey patch the config to use a smaller value for the next fork epoch (which is always set to maxint). - // Upstream configs use MaxUint64, which leads to a multiplication overflow when converting epoch->slot. - // Unfortunately we have unit tests that assert our config matches the upstream config, so we have to choose between - // breaking conformance, adding a special case to the conformance unit test, or patch it here. - bc := params.MainnetConfig().Copy() - bc.DenebForkEpoch = math.MaxUint32 - undo, err := params.SetActiveWithUndo(bc) - return undo, err -} - func TestUnmarshalBlock(t *testing.T) { - undo, err := hackDenebMaxuint() - require.NoError(t, err) - defer func() { - require.NoError(t, undo()) - }() + undo := util.HackDenebMaxuint(t) + defer undo() genv := bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion) altairv := bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion) bellav := bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion) @@ -339,11 +318,8 @@ func TestUnmarshalBlock(t *testing.T) { } func TestUnmarshalBlindedBlock(t *testing.T) { - undo, err := hackDenebMaxuint() - require.NoError(t, err) - defer func() { - require.NoError(t, undo()) - }() + undo := util.HackDenebMaxuint(t) + defer undo() genv := bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion) altairv := bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion) bellav := bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion) diff --git a/network/forks/ordered.go b/network/forks/ordered.go index 0df5bb782..3fe550356 100644 --- a/network/forks/ordered.go +++ b/network/forks/ordered.go @@ -9,6 +9,7 @@ import ( fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" ) // ForkScheduleEntry is a Version+Epoch tuple for sorted storage in an OrderedSchedule @@ -57,6 +58,20 @@ func (o OrderedSchedule) VersionForName(name string) ([fieldparams.VersionLength return [4]byte{}, errors.Wrapf(ErrVersionNotFound, "no version with name %s", lower) } +func (o OrderedSchedule) ForkFromVersion(version [fieldparams.VersionLength]byte) (*ethpb.Fork, error) { + for i := range o { + e := o[i] + if e.Version == version { + f := ðpb.Fork{Epoch: e.Epoch, CurrentVersion: version[:], PreviousVersion: version[:]} + if i > 0 { + f.PreviousVersion = o[i-1].Version[:] + } + return f, nil + } + } + return nil, errors.Wrapf(ErrVersionNotFound, "could not determine fork for version %#x", version) +} + func (o OrderedSchedule) Previous(version [fieldparams.VersionLength]byte) ([fieldparams.VersionLength]byte, error) { for i := len(o) - 1; i >= 0; i-- { if o[i].Version == version { diff --git a/runtime/logging/blob.go b/runtime/logging/blob.go index 8a6f51d52..848bf6146 100644 --- a/runtime/logging/blob.go +++ b/runtime/logging/blob.go @@ -11,10 +11,11 @@ import ( // which can be passed to log.WithFields. func BlobFields(blob blocks.ROBlob) logrus.Fields { return logrus.Fields{ - "slot": blob.Slot(), - "proposerIndex": blob.ProposerIndex(), - "blockRoot": fmt.Sprintf("%#x", blob.BlockRoot()), - "kzgCommitment": fmt.Sprintf("%#x", blob.KzgCommitment), - "index": blob.Index, + "slot": blob.Slot(), + "proposer_index": blob.ProposerIndex(), + "block_root": fmt.Sprintf("%#x", blob.BlockRoot()), + "parent_root": fmt.Sprintf("%#x", blob.ParentRoot()), + "kzg_commitment": fmt.Sprintf("%#x", blob.KzgCommitment), + "index": blob.Index, } } diff --git a/testing/util/BUILD.bazel b/testing/util/BUILD.bazel index d5bd7b140..7e3dfedf0 100644 --- a/testing/util/BUILD.bazel +++ b/testing/util/BUILD.bazel @@ -47,6 +47,7 @@ go_library( "//crypto/hash:go_default_library", "//crypto/rand:go_default_library", "//encoding/bytesutil:go_default_library", + "//network/forks:go_default_library", "//proto/engine/v1:go_default_library", "//proto/eth/v1:go_default_library", "//proto/eth/v2:go_default_library", @@ -73,6 +74,7 @@ go_test( "bellatrix_state_test.go", "block_test.go", "capella_block_test.go", + "deneb_test.go", "deposits_test.go", "helpers_test.go", "state_test.go", diff --git a/testing/util/deneb.go b/testing/util/deneb.go index a436f54a9..9a9d915c2 100644 --- a/testing/util/deneb.go +++ b/testing/util/deneb.go @@ -2,22 +2,58 @@ package util import ( "encoding/binary" + "math" "math/big" "testing" "github.com/ethereum/go-ethereum/common" gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/crypto/bls" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/v4/network/forks" enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/testing/require" + "github.com/prysmaticlabs/prysm/v4/time/slots" ) -func GenerateTestDenebBlockWithSidecar(t *testing.T, parent [32]byte, slot primitives.Slot, nblobs int) (blocks.ROBlock, []blocks.ROBlob) { - // Start service with 160 as allowed blocks capacity (and almost zero capacity recovery). +type DenebBlockGeneratorOption func(*denebBlockGenerator) + +type denebBlockGenerator struct { + parent [32]byte + slot primitives.Slot + nblobs int + sign bool + sk bls.SecretKey + pk bls.PublicKey + proposer primitives.ValidatorIndex + valRoot []byte +} + +func WithProposerSigning(idx primitives.ValidatorIndex, sk bls.SecretKey, pk bls.PublicKey, valRoot []byte) DenebBlockGeneratorOption { + return func(g *denebBlockGenerator) { + g.sign = true + g.proposer = idx + g.sk = sk + g.pk = pk + g.valRoot = valRoot + } +} + +func GenerateTestDenebBlockWithSidecar(t *testing.T, parent [32]byte, slot primitives.Slot, nblobs int, opts ...DenebBlockGeneratorOption) (blocks.ROBlock, []blocks.ROBlob) { + g := &denebBlockGenerator{ + parent: parent, + slot: slot, + nblobs: nblobs, + } + for _, o := range opts { + o(g) + } stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength) receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength) logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength) @@ -58,16 +94,37 @@ func GenerateTestDenebBlockWithSidecar(t *testing.T, parent [32]byte, slot primi } block := NewBeaconBlockDeneb() block.Block.Body.ExecutionPayload = payload - block.Block.Slot = slot - block.Block.ParentRoot = parent[:] - commitments := make([][48]byte, nblobs) - block.Block.Body.BlobKzgCommitments = make([][]byte, nblobs) + block.Block.Slot = g.slot + block.Block.ParentRoot = g.parent[:] + commitments := make([][48]byte, g.nblobs) + block.Block.Body.BlobKzgCommitments = make([][]byte, g.nblobs) for i := range commitments { binary.LittleEndian.PutUint16(commitments[i][0:16], uint16(i)) - binary.LittleEndian.PutUint16(commitments[i][16:32], uint16(slot)) + binary.LittleEndian.PutUint16(commitments[i][16:32], uint16(g.slot)) block.Block.Body.BlobKzgCommitments[i] = commitments[i][:] } + body, err := blocks.NewBeaconBlockBody(block.Block.Body) + require.NoError(t, err) + inclusion := make([][][]byte, len(commitments)) + for i := range commitments { + proof, err := blocks.MerkleProofKZGCommitment(body, i) + require.NoError(t, err) + inclusion[i] = proof + } + if g.sign { + epoch := slots.ToEpoch(block.Block.Slot) + schedule := forks.NewOrderedSchedule(params.BeaconConfig()) + version, err := schedule.VersionForEpoch(epoch) + require.NoError(t, err) + fork, err := schedule.ForkFromVersion(version) + require.NoError(t, err) + domain := params.BeaconConfig().DomainBeaconProposer + sig, err := signing.ComputeDomainAndSignWithoutState(fork, epoch, domain, g.valRoot, block.Block, g.sk) + require.NoError(t, err) + block.Signature = sig + } + root, err := block.Block.HashTreeRoot() require.NoError(t, err) @@ -78,7 +135,7 @@ func GenerateTestDenebBlockWithSidecar(t *testing.T, parent [32]byte, slot primi sh, err := sbb.Header() require.NoError(t, err) for i, c := range block.Block.Body.BlobKzgCommitments { - sidecars[i] = GenerateTestDenebBlobSidecar(t, root, sh, i, c) + sidecars[i] = GenerateTestDenebBlobSidecar(t, root, sh, i, c, inclusion[i]) } rob, err := blocks.NewROBlock(sbb) @@ -86,7 +143,7 @@ func GenerateTestDenebBlockWithSidecar(t *testing.T, parent [32]byte, slot primi return rob, sidecars } -func GenerateTestDenebBlobSidecar(t *testing.T, root [32]byte, header *ethpb.SignedBeaconBlockHeader, index int, commitment []byte) blocks.ROBlob { +func GenerateTestDenebBlobSidecar(t *testing.T, root [32]byte, header *ethpb.SignedBeaconBlockHeader, index int, commitment []byte, incProof [][]byte) blocks.ROBlob { blob := make([]byte, fieldparams.BlobSize) binary.LittleEndian.PutUint64(blob, uint64(index)) pb := ðpb.BlobSidecar{ @@ -96,7 +153,10 @@ func GenerateTestDenebBlobSidecar(t *testing.T, root [32]byte, header *ethpb.Sig KzgCommitment: commitment, KzgProof: commitment, } - pb.CommitmentInclusionProof = fakeEmptyProof(t, pb) + if len(incProof) == 0 { + incProof = fakeEmptyProof(t, pb) + } + pb.CommitmentInclusionProof = incProof r, err := blocks.NewROBlobWithRoot(pb, root) require.NoError(t, err) return r @@ -127,3 +187,19 @@ func ExtendBlocksPlusBlobs(t *testing.T, blks []blocks.ROBlock, size int) ([]blo return blks, blobs } + +// HackDenebForkEpoch is helpful for tests that need to set up cases where the deneb fork has passed. +// We have unit tests that assert our config matches the upstream config, where the next fork is always +// set to MaxUint64 until the fork epoch is formally set. This creates an issue for tests that want to +// work with slots that are defined to be after deneb because converting the max epoch to a slot leads +// to multiplication overflow. +// Monkey patching tests with this function is the simplest workaround in these cases. +func HackDenebMaxuint(t *testing.T) func() { + bc := params.MainnetConfig().Copy() + bc.DenebForkEpoch = math.MaxUint32 + undo, err := params.SetActiveWithUndo(bc) + require.NoError(t, err) + return func() { + require.NoError(t, undo()) + } +} diff --git a/testing/util/deneb_test.go b/testing/util/deneb_test.go new file mode 100644 index 000000000..d00693e1a --- /dev/null +++ b/testing/util/deneb_test.go @@ -0,0 +1,16 @@ +package util + +import ( + "testing" + + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/testing/require" +) + +func TestInclusionProofs(t *testing.T) { + _, blobs := GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, fieldparams.MaxBlobsPerBlock) + for i := range blobs { + require.NoError(t, blocks.VerifyKZGInclusionProof(blobs[i])) + } +} diff --git a/time/slots/slottime.go b/time/slots/slottime.go index e99f74450..3ace1b97a 100644 --- a/time/slots/slottime.go +++ b/time/slots/slottime.go @@ -163,6 +163,12 @@ func ToTime(genesisTimeSec uint64, slot primitives.Slot) (time.Time, error) { return time.Unix(int64(sTime), 0), nil // lint:ignore uintcast -- A timestamp will not exceed int64 in your lifetime. } +// BeginsAt computes the timestamp where the given slot begins, relative to the genesis timestamp. +func BeginsAt(slot primitives.Slot, genesis time.Time) time.Time { + sd := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slot) + return genesis.Add(sd) +} + // Since computes the number of time slots that have occurred since the given timestamp. func Since(time time.Time) primitives.Slot { return CurrentSlot(uint64(time.Unix())) diff --git a/time/slots/slottime_test.go b/time/slots/slottime_test.go index 6556c0ed4..cb8271eb1 100644 --- a/time/slots/slottime_test.go +++ b/time/slots/slottime_test.go @@ -153,6 +153,37 @@ func TestEpochStartSlot_OK(t *testing.T) { } } +func TestBeginsAtOK(t *testing.T) { + cases := []struct { + name string + genesis int64 + slot primitives.Slot + slotTime time.Time + }{ + { + name: "genesis", + slotTime: time.Unix(0, 0), + }, + { + name: "slot 1", + slot: 1, + slotTime: time.Unix(int64(params.BeaconConfig().SecondsPerSlot), 0), + }, + { + name: "slot 1", + slot: 32, + slotTime: time.Unix(int64(params.BeaconConfig().SecondsPerSlot)*32, 0), + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + genesis := time.Unix(c.genesis, 0) + st := BeginsAt(c.slot, genesis) + require.Equal(t, c.slotTime, st) + }) + } +} + func TestEpochEndSlot_OK(t *testing.T) { tests := []struct { epoch primitives.Epoch