[First PR] Removing FastSSZ and use more efficient hashing with gohashtree (#6520)

This commit is contained in:
Giulio rebuffo 2023-01-07 12:25:28 +01:00 committed by GitHub
parent 3c7e6b114e
commit 77d946ba3e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 1431 additions and 1975 deletions

View File

@ -1,5 +1,4 @@
all:
go run github.com/prysmaticlabs/fastssz/sszgen -path types.go
go run github.com/prysmaticlabs/fastssz/sszgen -path network.go
clean:
rm lightrpc/*.pb.go

View File

@ -1,254 +0,0 @@
package cltypes
import (
"encoding/binary"
"github.com/ledgerwatch/erigon/common"
)
const maxAttestationSize = 2276
var commonAggregationBytes = map[byte]bool{
0x00: true,
0xff: true,
}
func EncodeAttestationsForStorage(attestations []*Attestation) []byte {
if len(attestations) == 0 {
return nil
}
referencedAttestations := []*AttestationData{
nil, // Full diff
}
// Pre-allocate some memory.
encoded := make([]byte, 0, maxAttestationSize*len(attestations)+1)
for _, attestation := range attestations {
// Encode attestation metadata
// Also we need to keep track of aggregation bits size manually.
encoded = append(encoded, encodeAggregationBits(attestation.AggregationBits)...)
// Encode signature
encoded = append(encoded, attestation.Signature[:]...)
// Encode attestation body
var bestEncoding []byte
bestEncodingIndex := 0
// try all non-repeating attestations.
for i, att := range referencedAttestations {
currentEncoding := EncodeAttestationDataForStorage(attestation.Data, att)
// check if we find a better fit.
if len(bestEncoding) == 0 || len(bestEncoding) > len(currentEncoding) {
bestEncodingIndex = i
bestEncoding = currentEncoding
// cannot get lower than 1, so accept it as best.
if len(bestEncoding) == 1 {
break
}
}
}
// If it is not repeated then save it.
if len(bestEncoding) != 1 {
referencedAttestations = append(referencedAttestations, attestation.Data)
}
encoded = append(encoded, byte(bestEncodingIndex))
encoded = append(encoded, bestEncoding...)
// Encode attester index
encoded = append(encoded, encodeNumber(attestation.Data.Index)...)
}
return encoded
}
func DecodeAttestationsForStorage(buf []byte) ([]*Attestation, error) {
if len(buf) == 0 {
return nil, nil
}
referencedAttestations := []*AttestationData{
nil, // Full diff
}
var attestations []*Attestation
// current position is how much we read.
pos := 0
for pos != len(buf) {
n, aggrBits := rebuildAggregationBits(buf[pos:])
pos += n
// Decode aggrefation bits
attestation := &Attestation{
AggregationBits: aggrBits,
}
// Decode signature
copy(attestation.Signature[:], buf[pos:])
pos += 96
// decode attestation body
// 1) read comparison index
comparisonIndex := int(buf[pos])
pos++
n, attestation.Data = DecodeAttestationDataForStorage(buf[pos:], referencedAttestations[comparisonIndex])
// field set is not null, so we need to remember it.
if n != 1 {
referencedAttestations = append(referencedAttestations, attestation.Data)
}
pos += n
// decode attester index
attestation.Data.Index = decodeNumber(buf[pos:])
pos += 3
attestations = append(attestations, attestation)
}
return attestations, nil
}
func encodeNumber(x uint64) []byte {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(x))
return b[1:]
}
func decodeNumber(b []byte) uint64 {
tmp := make([]byte, 4)
copy(tmp[1:], b[:3])
return uint64(binary.BigEndian.Uint32(tmp))
}
// EncodeAttestationsDataForStorage encodes attestation data and compress everything by defaultData.
func EncodeAttestationDataForStorage(data *AttestationData, defaultData *AttestationData) []byte {
fieldSet := byte(0)
var ret []byte
// Encode in slot
if defaultData == nil || data.Slot != defaultData.Slot {
slotBytes := make([]byte, 4)
binary.BigEndian.PutUint32(slotBytes, uint32(data.Slot))
ret = append(ret, slotBytes...)
} else {
fieldSet = 1
}
if defaultData == nil || data.BeaconBlockHash != defaultData.BeaconBlockHash {
ret = append(ret, data.BeaconBlockHash[:]...)
} else {
fieldSet |= 2
}
if defaultData == nil || data.Source.Epoch != defaultData.Source.Epoch {
ret = append(ret, encodeNumber(data.Source.Epoch)...)
} else {
fieldSet |= 4
}
if defaultData == nil || data.Source.Root != defaultData.Source.Root {
ret = append(ret, data.Source.Root[:]...)
} else {
fieldSet |= 8
}
if defaultData == nil || data.Target.Epoch != defaultData.Target.Epoch {
ret = append(ret, encodeNumber(data.Target.Epoch)...)
} else {
fieldSet |= 16
}
if defaultData == nil || data.Target.Root != defaultData.Target.Root {
ret = append(ret, data.Target.Root[:]...)
} else {
fieldSet |= 32
}
return append([]byte{fieldSet}, ret...)
}
// DecodeAttestationDataForStorage decodes attestation data and decompress everything by defaultData.
func DecodeAttestationDataForStorage(buf []byte, defaultData *AttestationData) (n int, data *AttestationData) {
data = &AttestationData{
Target: &Checkpoint{},
Source: &Checkpoint{},
}
if len(buf) == 0 {
return
}
fieldSet := buf[0]
n++
if fieldSet&1 > 0 {
data.Slot = defaultData.Slot
} else {
data.Slot = uint64(binary.BigEndian.Uint32(buf[n:]))
n += 4
}
if fieldSet&2 > 0 {
data.BeaconBlockHash = defaultData.BeaconBlockHash
} else {
data.BeaconBlockHash = common.BytesToHash(buf[n : n+32])
n += 32
}
if fieldSet&4 > 0 {
data.Source.Epoch = defaultData.Source.Epoch
} else {
data.Source.Epoch = decodeNumber(buf[n:])
n += 3
}
if fieldSet&8 > 0 {
data.Source.Root = defaultData.Source.Root
} else {
data.Source.Root = common.BytesToHash(buf[n : n+32])
n += 32
}
if fieldSet&16 > 0 {
data.Target.Epoch = defaultData.Target.Epoch
} else {
data.Target.Epoch = decodeNumber(buf[n:])
n += 3
}
if fieldSet&32 > 0 {
data.Target.Root = defaultData.Target.Root
} else {
data.Target.Root = common.BytesToHash(buf[n : n+32])
n += 32
}
return
}
func encodeAggregationBits(bits []byte) (encoded []byte) {
i := 0
encoded = append(encoded, byte(len(bits)))
for i < len(bits) {
_, isCommon := commonAggregationBytes[bits[i]]
if isCommon {
importantByte := bits[i]
encoded = append(encoded, importantByte)
count := 0
for i < len(bits) && bits[i] == importantByte {
count++
i++
}
encoded = append(encoded, byte(count))
continue
}
encoded = append(encoded, bits[i])
i++
}
return
}
func rebuildAggregationBits(buf []byte) (n int, ret []byte) {
i := 0
bitsLength := int(buf[0])
n = 1
for i < bitsLength {
currByte := buf[n]
_, isCommon := commonAggregationBytes[currByte]
n++
if isCommon {
count := int(buf[n])
n++
for j := 0; j < count; j++ {
ret = append(ret, currByte)
i++
}
continue
}
ret = append(ret, currByte)
i++
}
return
}

602
cl/cltypes/attestations.go Normal file
View File

@ -0,0 +1,602 @@
package cltypes
import (
"encoding/binary"
"fmt"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
"github.com/ledgerwatch/erigon/common"
ssz "github.com/prysmaticlabs/fastssz"
)
// Full signed attestation
type Attestation struct {
AggregationBits []byte `ssz-max:"2048" ssz:"bitlist"`
Data *AttestationData
Signature [96]byte `ssz-size:"96"`
}
const maxAttestationSize = 2276
var commonAggregationBytes = map[byte]bool{
0x00: true,
0xff: true,
}
func EncodeAttestationsForStorage(attestations []*Attestation) []byte {
if len(attestations) == 0 {
return nil
}
referencedAttestations := []*AttestationData{
nil, // Full diff
}
// Pre-allocate some memory.
encoded := make([]byte, 0, maxAttestationSize*len(attestations)+1)
for _, attestation := range attestations {
// Encode attestation metadata
// Also we need to keep track of aggregation bits size manually.
encoded = append(encoded, encodeAggregationBits(attestation.AggregationBits)...)
// Encode signature
encoded = append(encoded, attestation.Signature[:]...)
// Encode attestation body
var bestEncoding []byte
bestEncodingIndex := 0
// try all non-repeating attestations.
for i, att := range referencedAttestations {
currentEncoding := EncodeAttestationDataForStorage(attestation.Data, att)
// check if we find a better fit.
if len(bestEncoding) == 0 || len(bestEncoding) > len(currentEncoding) {
bestEncodingIndex = i
bestEncoding = currentEncoding
// cannot get lower than 1, so accept it as best.
if len(bestEncoding) == 1 {
break
}
}
}
// If it is not repeated then save it.
if len(bestEncoding) != 1 {
referencedAttestations = append(referencedAttestations, attestation.Data)
}
encoded = append(encoded, byte(bestEncodingIndex))
encoded = append(encoded, bestEncoding...)
// Encode attester index
encoded = append(encoded, encodeNumber(attestation.Data.Index)...)
}
return encoded
}
func DecodeAttestationsForStorage(buf []byte) ([]*Attestation, error) {
if len(buf) == 0 {
return nil, nil
}
referencedAttestations := []*AttestationData{
nil, // Full diff
}
var attestations []*Attestation
// current position is how much we read.
pos := 0
for pos != len(buf) {
n, aggrBits := rebuildAggregationBits(buf[pos:])
pos += n
// Decode aggrefation bits
attestation := &Attestation{
AggregationBits: aggrBits,
}
// Decode signature
copy(attestation.Signature[:], buf[pos:])
pos += 96
// decode attestation body
// 1) read comparison index
comparisonIndex := int(buf[pos])
pos++
n, attestation.Data = DecodeAttestationDataForStorage(buf[pos:], referencedAttestations[comparisonIndex])
// field set is not null, so we need to remember it.
if n != 1 {
referencedAttestations = append(referencedAttestations, attestation.Data)
}
pos += n
// decode attester index
attestation.Data.Index = decodeNumber(buf[pos:])
pos += 3
attestations = append(attestations, attestation)
}
return attestations, nil
}
func encodeNumber(x uint64) []byte {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(x))
return b[1:]
}
func decodeNumber(b []byte) uint64 {
tmp := make([]byte, 4)
copy(tmp[1:], b[:3])
return uint64(binary.BigEndian.Uint32(tmp))
}
// EncodeAttestationsDataForStorage encodes attestation data and compress everything by defaultData.
func EncodeAttestationDataForStorage(data *AttestationData, defaultData *AttestationData) []byte {
fieldSet := byte(0)
var ret []byte
// Encode in slot
if defaultData == nil || data.Slot != defaultData.Slot {
slotBytes := make([]byte, 4)
binary.BigEndian.PutUint32(slotBytes, uint32(data.Slot))
ret = append(ret, slotBytes...)
} else {
fieldSet = 1
}
if defaultData == nil || data.BeaconBlockHash != defaultData.BeaconBlockHash {
ret = append(ret, data.BeaconBlockHash[:]...)
} else {
fieldSet |= 2
}
if defaultData == nil || data.Source.Epoch != defaultData.Source.Epoch {
ret = append(ret, encodeNumber(data.Source.Epoch)...)
} else {
fieldSet |= 4
}
if defaultData == nil || data.Source.Root != defaultData.Source.Root {
ret = append(ret, data.Source.Root[:]...)
} else {
fieldSet |= 8
}
if defaultData == nil || data.Target.Epoch != defaultData.Target.Epoch {
ret = append(ret, encodeNumber(data.Target.Epoch)...)
} else {
fieldSet |= 16
}
if defaultData == nil || data.Target.Root != defaultData.Target.Root {
ret = append(ret, data.Target.Root[:]...)
} else {
fieldSet |= 32
}
return append([]byte{fieldSet}, ret...)
}
// DecodeAttestationDataForStorage decodes attestation data and decompress everything by defaultData.
func DecodeAttestationDataForStorage(buf []byte, defaultData *AttestationData) (n int, data *AttestationData) {
data = &AttestationData{
Target: &Checkpoint{},
Source: &Checkpoint{},
}
if len(buf) == 0 {
return
}
fieldSet := buf[0]
n++
if fieldSet&1 > 0 {
data.Slot = defaultData.Slot
} else {
data.Slot = uint64(binary.BigEndian.Uint32(buf[n:]))
n += 4
}
if fieldSet&2 > 0 {
data.BeaconBlockHash = defaultData.BeaconBlockHash
} else {
data.BeaconBlockHash = common.BytesToHash(buf[n : n+32])
n += 32
}
if fieldSet&4 > 0 {
data.Source.Epoch = defaultData.Source.Epoch
} else {
data.Source.Epoch = decodeNumber(buf[n:])
n += 3
}
if fieldSet&8 > 0 {
data.Source.Root = defaultData.Source.Root
} else {
data.Source.Root = common.BytesToHash(buf[n : n+32])
n += 32
}
if fieldSet&16 > 0 {
data.Target.Epoch = defaultData.Target.Epoch
} else {
data.Target.Epoch = decodeNumber(buf[n:])
n += 3
}
if fieldSet&32 > 0 {
data.Target.Root = defaultData.Target.Root
} else {
data.Target.Root = common.BytesToHash(buf[n : n+32])
n += 32
}
return
}
func encodeAggregationBits(bits []byte) (encoded []byte) {
i := 0
encoded = append(encoded, byte(len(bits)))
for i < len(bits) {
_, isCommon := commonAggregationBytes[bits[i]]
if isCommon {
importantByte := bits[i]
encoded = append(encoded, importantByte)
count := 0
for i < len(bits) && bits[i] == importantByte {
count++
i++
}
encoded = append(encoded, byte(count))
continue
}
encoded = append(encoded, bits[i])
i++
}
return
}
func rebuildAggregationBits(buf []byte) (n int, ret []byte) {
i := 0
bitsLength := int(buf[0])
n = 1
for i < bitsLength {
currByte := buf[n]
_, isCommon := commonAggregationBytes[currByte]
n++
if isCommon {
count := int(buf[n])
n++
for j := 0; j < count; j++ {
ret = append(ret, currByte)
i++
}
continue
}
ret = append(ret, currByte)
i++
}
return
}
// MarshalSSZ ssz marshals the Attestation object
func (a *Attestation) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(a)
}
// MarshalSSZTo ssz marshals the Attestation object to a target array
func (a *Attestation) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(228)
// Offset (0) 'AggregationBits'
dst = ssz.WriteOffset(dst, offset)
// Field (1) 'Data'
if a.Data == nil {
a.Data = new(AttestationData)
}
var attData []byte
attData, err = a.Data.MarshalSSZ()
if err != nil {
return
}
dst = append(dst, attData...)
// Field (2) 'Signature'
dst = append(dst, a.Signature[:]...)
// Field (0) 'AggregationBits'
if size := len(a.AggregationBits); size > 2048 {
err = ssz.ErrBytesLengthFn("--.AggregationBits", size, 2048)
return
}
dst = append(dst, a.AggregationBits...)
return
}
// UnmarshalSSZ ssz unmarshals the Attestation object
func (a *Attestation) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 228 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'AggregationBits'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 < 228 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Data'
if a.Data == nil {
a.Data = new(AttestationData)
}
if err = a.Data.UnmarshalSSZ(buf[4:132]); err != nil {
return err
}
// Field (2) 'Signature'
copy(a.Signature[:], buf[132:228])
// Field (0) 'AggregationBits'
{
buf = tail[o0:]
if err = ssz.ValidateBitlist(buf, 2048); err != nil {
return err
}
if cap(a.AggregationBits) == 0 {
a.AggregationBits = make([]byte, 0, len(buf))
}
a.AggregationBits = append(a.AggregationBits, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Attestation object
func (a *Attestation) SizeSSZ() int {
return 228 + len(a.AggregationBits)
}
// HashTreeRoot ssz hashes the Attestation object
func (a *Attestation) HashTreeRoot() ([32]byte, error) {
leaves := make([][32]byte, 3)
var err error
if a.Data == nil {
return [32]byte{}, fmt.Errorf("missing attestation data")
}
leaves[0], err = merkle_tree.BitlistRootWithLimit(a.AggregationBits, 2048)
if err != nil {
return [32]byte{}, err
}
leaves[1], err = a.Data.HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
leaves[2], err = merkle_tree.SignatureRoot(a.Signature)
if err != nil {
return [32]byte{}, err
}
return merkle_tree.ArraysRoot(leaves, 4)
}
// HashTreeRootWith ssz hashes the IndexedAttestation object with a hasher
func (a *Attestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
root, err := a.HashTreeRoot()
if err != nil {
return err
}
hh.PutBytes(root[:])
return
}
/*
* IndexedAttestation are attestantions sets to prove that someone misbehaved.
*/
type IndexedAttestation struct {
AttestingIndices []uint64 `ssz-max:"2048"`
Data *AttestationData
Signature [96]byte `ssz-size:"96"`
}
// MarshalSSZ ssz marshals the IndexedAttestation object
func (i *IndexedAttestation) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(i)
}
// MarshalSSZTo ssz marshals the IndexedAttestation object to a target array
func (i *IndexedAttestation) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(228)
// Offset (0) 'AttestingIndices'
dst = ssz.WriteOffset(dst, offset)
// Field (1) 'Data'
if i.Data == nil {
i.Data = new(AttestationData)
}
var dataMarshalled []byte
dataMarshalled, err = i.Data.MarshalSSZ()
if err != nil {
return
}
dst = append(dst, dataMarshalled...)
// Field (2) 'Signature'
dst = append(dst, i.Signature[:]...)
// Field (0) 'AttestingIndices'
if size := len(i.AttestingIndices); size > 2048 {
err = ssz.ErrListTooBigFn("--.AttestingIndices", size, 2048)
return
}
for ii := 0; ii < len(i.AttestingIndices); ii++ {
dst = ssz.MarshalUint64(dst, i.AttestingIndices[ii])
}
return
}
// UnmarshalSSZ ssz unmarshals the IndexedAttestation object
func (i *IndexedAttestation) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 228 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'AttestingIndices'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 < 228 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Data'
if i.Data == nil {
i.Data = new(AttestationData)
}
if err = i.Data.UnmarshalSSZ(buf[4:132]); err != nil {
return err
}
// Field (2) 'Signature'
copy(i.Signature[:], buf[132:228])
// Field (0) 'AttestingIndices'
{
buf = tail[o0:]
num, err := ssz.DivideInt2(len(buf), 8, 2048)
if err != nil {
return err
}
i.AttestingIndices = ssz.ExtendUint64(i.AttestingIndices, num)
for ii := 0; ii < num; ii++ {
i.AttestingIndices[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8])
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the IndexedAttestation object
func (i *IndexedAttestation) SizeSSZ() int {
return 228 + len(i.AttestingIndices)*8
}
// HashTreeRoot ssz hashes the IndexedAttestation object
func (i *IndexedAttestation) HashTreeRoot() ([32]byte, error) {
leaves := make([][32]byte, 3)
var err error
leaves[0], err = merkle_tree.Uint64ListRootWithLimit(i.AttestingIndices, 2048)
if err != nil {
return [32]byte{}, err
}
leaves[1], err = i.Data.HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
leaves[2], err = merkle_tree.SignatureRoot(i.Signature)
if err != nil {
return [32]byte{}, err
}
return merkle_tree.ArraysRoot(leaves, 4)
}
// HashTreeRootWith ssz hashes the IndexedAttestation object with a hasher
func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
root, err := i.HashTreeRoot()
if err != nil {
return err
}
hh.PutBytes(root[:])
return
}
// AttestantionData contains information about attestantion, including finalized/attested checkpoints.
type AttestationData struct {
Slot uint64
Index uint64
BeaconBlockHash common.Hash
Source *Checkpoint
Target *Checkpoint
}
// MarshalSSZ ssz marshals the AttestationData object
func (a *AttestationData) MarshalSSZ() ([]byte, error) {
buf := make([]byte, a.SizeSSZ())
ssz_utils.MarshalUint64SSZ(buf, a.Slot)
ssz_utils.MarshalUint64SSZ(buf[8:], a.Index)
copy(buf[16:], a.BeaconBlockHash[:])
source, err := a.Source.MarshalSSZ()
if err != nil {
return nil, err
}
target, err := a.Target.MarshalSSZ()
if err != nil {
return nil, err
}
copy(buf[48:], source)
copy(buf[88:], target)
return buf, nil
}
// UnmarshalSSZ ssz unmarshals the AttestationData object
func (a *AttestationData) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != uint64(a.SizeSSZ()) {
return ssz.ErrSize
}
a.Slot = ssz_utils.UnmarshalUint64SSZ(buf)
a.Index = ssz_utils.UnmarshalUint64SSZ(buf[8:])
copy(a.BeaconBlockHash[:], buf[16:48])
if a.Source == nil {
a.Source = new(Checkpoint)
}
if err = a.Source.UnmarshalSSZ(buf[48:88]); err != nil {
return err
}
if a.Target == nil {
a.Target = new(Checkpoint)
}
if err = a.Target.UnmarshalSSZ(buf[88:]); err != nil {
return err
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the AttestationData object
func (a *AttestationData) SizeSSZ() int {
return 2*common.BlockNumberLength + common.HashLength + a.Source.SizeSSZ()*2
}
// HashTreeRoot ssz hashes the AttestationData object
func (a *AttestationData) HashTreeRoot() ([32]byte, error) {
sourceRoot, err := a.Source.HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
targetRoot, err := a.Target.HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
return merkle_tree.ArraysRoot([][32]byte{
merkle_tree.Uint64Root(a.Slot),
merkle_tree.Uint64Root(a.Index),
a.BeaconBlockHash,
sourceRoot,
targetRoot,
}, 8)
}

View File

@ -0,0 +1,62 @@
package cltypes_test
import (
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var testAttData = &cltypes.AttestationData{
Slot: 69,
Index: 402,
BeaconBlockHash: common.HexToHash("123"),
Source: testCheckpoint,
Target: testCheckpoint,
}
var attestations = []*cltypes.Attestation{
{
AggregationBits: []byte{2},
Data: testAttData,
},
{
AggregationBits: []byte{2},
Data: testAttData,
},
{
AggregationBits: []byte{2},
Data: testAttData,
},
{
AggregationBits: []byte{2},
Data: testAttData,
},
}
var expectedAttestationMarshalled = "e4000000450000000000000092010000000000000000000000000000000000000000000000000000000000000000000000000123450000000000000000000000000000000000000000000000000000000000000000000000000000034500000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002"
func TestAttestationHashTest(t *testing.T) {
hash, err := attestations[0].HashTreeRoot()
require.NoError(t, err)
require.Equal(t, common.Bytes2Hex(hash[:]), "c9cf21a5c4273a2b85a84b5eff0e500dbafc8b20ecd21c59a87c610791112ba7")
}
func TestEncodeForStorage(t *testing.T) {
enc := cltypes.EncodeAttestationsForStorage(attestations)
require.Less(t, len(enc), attestations[0].SizeSSZ()*len(attestations))
decAttestations, err := cltypes.DecodeAttestationsForStorage(enc)
require.NoError(t, err)
require.Equal(t, attestations, decAttestations)
}
func TestAttestationMarshalUnmarmashal(t *testing.T) {
marshalled, err := attestations[0].MarshalSSZ()
require.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(marshalled[:]), expectedAttestationMarshalled)
testData2 := &cltypes.Attestation{}
require.NoError(t, testData2.UnmarshalSSZ(marshalled))
require.Equal(t, testData2, attestations[0])
}

View File

@ -4,6 +4,7 @@ import (
"bytes"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/cl/utils"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/ethdb/cbor"
@ -66,7 +67,7 @@ func (b *BeaconBody) Version() clparams.StateVersion {
// SSZ methods
func (b *BeaconBody) GetUnderlyingSSZ() ObjectSSZ {
func (b *BeaconBody) GetUnderlyingSSZ() ssz_utils.ObjectSSZ {
switch b.version {
case clparams.Phase0Version:
return &BeaconBodyPhase0{
@ -130,7 +131,7 @@ func (b *BeaconBody) HashTreeRoot() ([32]byte, error) {
return b.GetUnderlyingSSZ().HashTreeRoot()
}
func (b *BeaconBlock) GetUnderlyingSSZ() ObjectSSZ {
func (b *BeaconBlock) GetUnderlyingSSZ() ssz_utils.ObjectSSZ {
switch b.Version() {
case clparams.Phase0Version:
return &BeaconBlockPhase0{
@ -182,7 +183,7 @@ func (b *BeaconBlock) HashTreeRoot() ([32]byte, error) {
return b.GetUnderlyingSSZ().HashTreeRoot()
}
func (b *SignedBeaconBlock) GetUnderlyingSSZ() ObjectSSZ {
func (b *SignedBeaconBlock) GetUnderlyingSSZ() ssz_utils.ObjectSSZ {
switch b.Version() {
case clparams.Phase0Version:
return &SignedBeaconBlockPhase0{
@ -299,7 +300,7 @@ func DecodeBeaconBlockForStorage(buf []byte) (block *SignedBeaconBlock, eth1Numb
}, storageObject.Eth1Number, storageObject.Eth1BlockHash, storageObject.Eth2BlockRoot, nil
}
func NewSignedBeaconBlock(obj ObjectSSZ) *SignedBeaconBlock {
func NewSignedBeaconBlock(obj ssz_utils.ObjectSSZ) *SignedBeaconBlock {
switch block := obj.(type) {
case *SignedBeaconBlockPhase0:
return &SignedBeaconBlock{
@ -321,7 +322,7 @@ func NewSignedBeaconBlock(obj ObjectSSZ) *SignedBeaconBlock {
}
}
func NewBeaconBlock(obj ObjectSSZ) *BeaconBlock {
func NewBeaconBlock(obj ssz_utils.ObjectSSZ) *BeaconBlock {
switch block := obj.(type) {
case *BeaconBlockPhase0:
return &BeaconBlock{
@ -353,7 +354,7 @@ func NewBeaconBlock(obj ObjectSSZ) *BeaconBlock {
}
}
func NewBeaconBody(obj ObjectSSZ) *BeaconBody {
func NewBeaconBody(obj ssz_utils.ObjectSSZ) *BeaconBody {
switch body := obj.(type) {
case *BeaconBodyPhase0:
return &BeaconBody{

64
cl/cltypes/checkpoint.go Normal file
View File

@ -0,0 +1,64 @@
package cltypes
import (
"github.com/ledgerwatch/erigon/cl/merkle_tree"
"github.com/ledgerwatch/erigon/common"
ssz "github.com/prysmaticlabs/fastssz"
)
type Checkpoint struct {
Epoch uint64
Root common.Hash
}
func (c *Checkpoint) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
dst = ssz.MarshalUint64(dst, c.Epoch)
dst = append(dst, c.Root[:]...)
return
}
func (c *Checkpoint) MarshalSSZ() ([]byte, error) {
buf := make([]byte, 0, c.SizeSSZ())
return c.MarshalSSZTo(buf)
}
func (c *Checkpoint) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != uint64(c.SizeSSZ()) {
return ssz.ErrSize
}
// Field (0) 'Epoch'
c.Epoch = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Root'
copy(c.Root[:], buf[8:40])
return err
}
func (c *Checkpoint) SizeSSZ() int {
return common.BlockNumberLength + common.HashLength
}
func (c *Checkpoint) HashTreeRoot() ([32]byte, error) {
leaves := [][32]byte{
merkle_tree.Uint64Root(c.Epoch),
c.Root,
}
return merkle_tree.ArraysRoot(leaves, 2)
}
func (c *Checkpoint) HashTreeRootWith(hh *ssz.Hasher) (err error) {
var root common.Hash
root, err = c.HashTreeRoot()
if err != nil {
return
}
hh.PutBytes(root[:])
return
}

View File

@ -0,0 +1,33 @@
package cltypes_test
import (
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var testCheckpoint = &cltypes.Checkpoint{
Epoch: 69,
Root: common.HexToHash("0x3"),
}
var expectedTestCheckpointMarshalled = common.Hex2Bytes("45000000000000000000000000000000000000000000000000000000000000000000000000000003")
var expectedTestCheckpointRoot = common.Hex2Bytes("be8567f9fdae831b10720823dbcf0e3680e61d6a2a27d85ca00f6c15a7bbb1ea")
func TestCheckpointMarshalUnmarmashal(t *testing.T) {
marshalled, err := testCheckpoint.MarshalSSZ()
require.NoError(t, err)
assert.Equal(t, marshalled, expectedTestCheckpointMarshalled)
checkpoint := &cltypes.Checkpoint{}
require.NoError(t, checkpoint.UnmarshalSSZ(marshalled))
require.Equal(t, checkpoint, testCheckpoint)
}
func TestCheckpointHashTreeRoot(t *testing.T) {
root, err := testCheckpoint.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, root[:], expectedTestCheckpointRoot)
}

View File

@ -30,26 +30,14 @@ func (*LightClientOptimisticUpdate) Clone() communication.Packet {
return &LightClientOptimisticUpdate{}
}
func (*MetadataV1) Clone() communication.Packet {
return &MetadataV1{}
}
func (*MetadataV2) Clone() communication.Packet {
return &MetadataV2{}
func (*Metadata) Clone() communication.Packet {
return &Metadata{}
}
func (*Ping) Clone() communication.Packet {
return &Ping{}
}
func (*Status) Clone() communication.Packet {
return &Status{}
}
func (*SingleRoot) Clone() communication.Packet {
return &SingleRoot{}
}
func (*LightClientBootstrap) Clone() communication.Packet {
return &LightClientBootstrap{}
}

71
cl/cltypes/eth1_data.go Normal file
View File

@ -0,0 +1,71 @@
package cltypes
import (
"github.com/ledgerwatch/erigon/cl/merkle_tree"
"github.com/ledgerwatch/erigon/common"
ssz "github.com/prysmaticlabs/fastssz"
)
type Eth1Data struct {
Root common.Hash
BlockHash common.Hash
DepositCount uint64
}
// MarshalSSZTo ssz marshals the Eth1Data object to a target array
func (e *Eth1Data) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
dst = append(dst, e.Root[:]...)
dst = ssz.MarshalUint64(dst, e.DepositCount)
dst = append(dst, e.BlockHash[:]...)
return
}
// MarshalSSZ ssz marshals the Eth1Data object
func (e *Eth1Data) MarshalSSZ() ([]byte, error) {
buf := make([]byte, 0, common.BlockNumberLength+common.HashLength*2)
return e.MarshalSSZTo(buf)
}
// UnmarshalSSZ ssz unmarshals the Eth1Data object
func (e *Eth1Data) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 72 {
return ssz.ErrSize
}
copy(e.Root[:], buf[0:32])
e.DepositCount = ssz.UnmarshallUint64(buf[32:40])
copy(e.BlockHash[:], buf[40:72])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Eth1Data object
func (e *Eth1Data) SizeSSZ() int {
return common.BlockNumberLength + common.HashLength*2
}
// HashTreeRoot ssz hashes the Eth1Data object
func (e *Eth1Data) HashTreeRoot() ([32]byte, error) {
leaves := [][32]byte{
e.Root,
merkle_tree.Uint64Root(e.DepositCount),
e.BlockHash,
}
return merkle_tree.ArraysRoot(leaves, 4)
}
// HashTreeRootWith ssz hashes the Eth1Data object with a hasher
func (e *Eth1Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
var root common.Hash
root, err = e.HashTreeRoot()
if err != nil {
return
}
hh.PutBytes(root[:])
return
}

View File

@ -0,0 +1,34 @@
package cltypes_test
import (
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var testEth1Data = &cltypes.Eth1Data{
Root: common.HexToHash("0x2"),
BlockHash: common.HexToHash("0x3"),
DepositCount: 69,
}
var expectedTestEth1DataMarshalled = common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000245000000000000000000000000000000000000000000000000000000000000000000000000000003")
var expectedTestEth1DataRoot = common.Hex2Bytes("adbafa10f1d6046b59cb720371c5e70ce2c6c3067b0e87985f5cd0899a515886")
func TestEth1DataMarshalUnmarmashal(t *testing.T) {
marshalled, err := testEth1Data.MarshalSSZ()
require.NoError(t, err)
assert.Equal(t, marshalled, expectedTestEth1DataMarshalled)
testData2 := &cltypes.Eth1Data{}
require.NoError(t, testData2.UnmarshalSSZ(marshalled))
require.Equal(t, testData2, testEth1Data)
}
func TestEth1DataHashTreeRoot(t *testing.T) {
root, err := testEth1Data.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, root[:], expectedTestEth1DataRoot)
}

View File

@ -1,37 +1,83 @@
package cltypes
type MetadataV1 struct {
import (
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/common"
)
type Metadata struct {
SeqNumber uint64
Attnets uint64
Syncnets *uint64
}
type MetadataV2 struct {
SeqNumber uint64
Attnets uint64
Syncnets uint64
func (m *Metadata) MarshalSSZ() ([]byte, error) {
ret := make([]byte, 24)
ssz_utils.MarshalUint64SSZ(ret, m.SeqNumber)
ssz_utils.MarshalUint64SSZ(ret[8:], m.Attnets)
if m.Syncnets == nil {
return ret[:16], nil
}
ssz_utils.MarshalUint64SSZ(ret[16:], *m.Syncnets)
return ret, nil
}
// ENRForkID contains current fork id for ENR entries.
type ENRForkID struct {
CurrentForkDigest [4]byte `ssz-size:"4" `
NextForkVersion [4]byte `ssz-size:"4" `
NextForkEpoch uint64
func (m *Metadata) UnmarshalSSZ(buf []byte) error {
m.SeqNumber = ssz_utils.UnmarshalUint64SSZ(buf)
m.Attnets = ssz_utils.UnmarshalUint64SSZ(buf[8:])
if len(buf) < 24 {
return nil
}
m.Syncnets = new(uint64)
*m.Syncnets = ssz_utils.UnmarshalUint64SSZ(buf[16:])
return nil
}
// ForkData contains current fork id for gossip subscription.
type ForkData struct {
CurrentVersion [4]byte `ssz-size:"4" `
GenesisValidatorsRoot [32]byte `ssz-size:"32" `
func (m *Metadata) SizeSSZ() (ret int) {
ret = common.BlockNumberLength * 2
if m.Syncnets != nil {
ret += 8
}
return
}
// Ping is a test P2P message, used to test out liveness of our peer/signaling disconnection.
type Ping struct {
Id uint64 `json:"id" `
Id uint64
}
func (p *Ping) MarshalSSZ() ([]byte, error) {
ret := make([]byte, p.SizeSSZ())
ssz_utils.MarshalUint64SSZ(ret, p.Id)
return ret, nil
}
func (p *Ping) UnmarshalSSZ(buf []byte) error {
p.Id = ssz_utils.UnmarshalUint64SSZ(buf)
return nil
}
func (p *Ping) SizeSSZ() int {
return common.BlockNumberLength
}
// P2P Message for bootstrap
type SingleRoot struct {
Root [32]byte `ssz-size:"32" `
Root [32]byte
}
func (s *SingleRoot) MarshalSSZ() ([]byte, error) {
return s.Root[:], nil
}
func (s *SingleRoot) UnmarshalSSZ(buf []byte) error {
copy(s.Root[:], buf)
return nil
}
func (s *SingleRoot) SizeSSZ() int {
return common.HashLength
}
/*
@ -43,6 +89,23 @@ type LightClientUpdatesByRangeRequest struct {
Count uint64
}
func (l *LightClientUpdatesByRangeRequest) MarshalSSZ() ([]byte, error) {
buf := make([]byte, l.SizeSSZ())
ssz_utils.MarshalUint64SSZ(buf, l.Period)
ssz_utils.MarshalUint64SSZ(buf[8:], l.Count)
return buf, nil
}
func (l *LightClientUpdatesByRangeRequest) UnmarshalSSZ(buf []byte) error {
l.Period = ssz_utils.UnmarshalUint64SSZ(buf)
l.Count = ssz_utils.UnmarshalUint64SSZ(buf[8:])
return nil
}
func (l *LightClientUpdatesByRangeRequest) SizeSSZ() int {
return 2 * common.BlockNumberLength
}
/*
* BeaconBlocksByRangeRequest is the request for getting a range of blocks.
*/
@ -52,6 +115,25 @@ type BeaconBlocksByRangeRequest struct {
Step uint64 // Deprecated, must be set to 1
}
func (b *BeaconBlocksByRangeRequest) MarshalSSZ() ([]byte, error) {
buf := make([]byte, b.SizeSSZ())
ssz_utils.MarshalUint64SSZ(buf, b.StartSlot)
ssz_utils.MarshalUint64SSZ(buf[8:], b.Count)
ssz_utils.MarshalUint64SSZ(buf[16:], b.Step)
return buf, nil
}
func (b *BeaconBlocksByRangeRequest) UnmarshalSSZ(buf []byte) error {
b.StartSlot = ssz_utils.UnmarshalUint64SSZ(buf)
b.Count = ssz_utils.UnmarshalUint64SSZ(buf[8:])
b.Step = ssz_utils.UnmarshalUint64SSZ(buf[16:])
return nil
}
func (b *BeaconBlocksByRangeRequest) SizeSSZ() int {
return 3 * common.BlockNumberLength
}
/*
* Status is a P2P Message we exchange when connecting to a new Peer.
* It contains network information about the other peer and if mismatching we drop it.
@ -64,12 +146,25 @@ type Status struct {
HeadSlot uint64
}
/*
* SigningData is the message we want to verify against the sync committee signature.
* Root is the HastTreeRoot() of the beacon block header,
* while the domain is the sync committee identifier.
*/
type SigningData struct {
Root [32]byte `ssz-size:"32"`
Domain []byte `ssz-size:"32"`
func (s *Status) MarshalSSZ() ([]byte, error) {
buf := make([]byte, s.SizeSSZ())
copy(buf, s.ForkDigest[:])
copy(buf[4:], s.FinalizedRoot[:])
ssz_utils.MarshalUint64SSZ(buf[36:], s.FinalizedEpoch)
copy(buf[44:], s.HeadRoot[:])
ssz_utils.MarshalUint64SSZ(buf[76:], s.HeadSlot)
return buf, nil
}
func (s *Status) UnmarshalSSZ(buf []byte) error {
copy(s.ForkDigest[:], buf)
copy(s.FinalizedRoot[:], buf[4:])
s.FinalizedEpoch = ssz_utils.UnmarshalUint64SSZ(buf[36:])
copy(s.HeadRoot[:], buf[44:])
s.HeadSlot = ssz_utils.UnmarshalUint64SSZ(buf[76:])
return nil
}
func (s *Status) SizeSSZ() int {
return 84
}

View File

@ -1,694 +0,0 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: 80a83db2a9d3d18fcb401e059e65d2b1ac52ce8fd945adfe8830196f5137e71c
package cltypes
import (
ssz "github.com/prysmaticlabs/fastssz"
)
// MarshalSSZ ssz marshals the MetadataV1 object
func (m *MetadataV1) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(m)
}
// MarshalSSZTo ssz marshals the MetadataV1 object to a target array
func (m *MetadataV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'SeqNumber'
dst = ssz.MarshalUint64(dst, m.SeqNumber)
// Field (1) 'Attnets'
dst = ssz.MarshalUint64(dst, m.Attnets)
return
}
// UnmarshalSSZ ssz unmarshals the MetadataV1 object
func (m *MetadataV1) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 16 {
return ssz.ErrSize
}
// Field (0) 'SeqNumber'
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Attnets'
m.Attnets = ssz.UnmarshallUint64(buf[8:16])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the MetadataV1 object
func (m *MetadataV1) SizeSSZ() (size int) {
size = 16
return
}
// HashTreeRoot ssz hashes the MetadataV1 object
func (m *MetadataV1) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(m)
}
// HashTreeRootWith ssz hashes the MetadataV1 object with a hasher
func (m *MetadataV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'SeqNumber'
hh.PutUint64(m.SeqNumber)
// Field (1) 'Attnets'
hh.PutUint64(m.Attnets)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the MetadataV2 object
func (m *MetadataV2) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(m)
}
// MarshalSSZTo ssz marshals the MetadataV2 object to a target array
func (m *MetadataV2) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'SeqNumber'
dst = ssz.MarshalUint64(dst, m.SeqNumber)
// Field (1) 'Attnets'
dst = ssz.MarshalUint64(dst, m.Attnets)
// Field (2) 'Syncnets'
dst = ssz.MarshalUint64(dst, m.Syncnets)
return
}
// UnmarshalSSZ ssz unmarshals the MetadataV2 object
func (m *MetadataV2) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 24 {
return ssz.ErrSize
}
// Field (0) 'SeqNumber'
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Attnets'
m.Attnets = ssz.UnmarshallUint64(buf[8:16])
// Field (2) 'Syncnets'
m.Syncnets = ssz.UnmarshallUint64(buf[16:24])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the MetadataV2 object
func (m *MetadataV2) SizeSSZ() (size int) {
size = 24
return
}
// HashTreeRoot ssz hashes the MetadataV2 object
func (m *MetadataV2) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(m)
}
// HashTreeRootWith ssz hashes the MetadataV2 object with a hasher
func (m *MetadataV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'SeqNumber'
hh.PutUint64(m.SeqNumber)
// Field (1) 'Attnets'
hh.PutUint64(m.Attnets)
// Field (2) 'Syncnets'
hh.PutUint64(m.Syncnets)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the ENRForkID object
func (e *ENRForkID) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ENRForkID object to a target array
func (e *ENRForkID) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'CurrentForkDigest'
dst = append(dst, e.CurrentForkDigest[:]...)
// Field (1) 'NextForkVersion'
dst = append(dst, e.NextForkVersion[:]...)
// Field (2) 'NextForkEpoch'
dst = ssz.MarshalUint64(dst, e.NextForkEpoch)
return
}
// UnmarshalSSZ ssz unmarshals the ENRForkID object
func (e *ENRForkID) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 16 {
return ssz.ErrSize
}
// Field (0) 'CurrentForkDigest'
copy(e.CurrentForkDigest[:], buf[0:4])
// Field (1) 'NextForkVersion'
copy(e.NextForkVersion[:], buf[4:8])
// Field (2) 'NextForkEpoch'
e.NextForkEpoch = ssz.UnmarshallUint64(buf[8:16])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ENRForkID object
func (e *ENRForkID) SizeSSZ() (size int) {
size = 16
return
}
// HashTreeRoot ssz hashes the ENRForkID object
func (e *ENRForkID) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ENRForkID object with a hasher
func (e *ENRForkID) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'CurrentForkDigest'
hh.PutBytes(e.CurrentForkDigest[:])
// Field (1) 'NextForkVersion'
hh.PutBytes(e.NextForkVersion[:])
// Field (2) 'NextForkEpoch'
hh.PutUint64(e.NextForkEpoch)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the ForkData object
func (f *ForkData) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(f)
}
// MarshalSSZTo ssz marshals the ForkData object to a target array
func (f *ForkData) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'CurrentVersion'
dst = append(dst, f.CurrentVersion[:]...)
// Field (1) 'GenesisValidatorsRoot'
dst = append(dst, f.GenesisValidatorsRoot[:]...)
return
}
// UnmarshalSSZ ssz unmarshals the ForkData object
func (f *ForkData) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 36 {
return ssz.ErrSize
}
// Field (0) 'CurrentVersion'
copy(f.CurrentVersion[:], buf[0:4])
// Field (1) 'GenesisValidatorsRoot'
copy(f.GenesisValidatorsRoot[:], buf[4:36])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ForkData object
func (f *ForkData) SizeSSZ() (size int) {
size = 36
return
}
// HashTreeRoot ssz hashes the ForkData object
func (f *ForkData) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(f)
}
// HashTreeRootWith ssz hashes the ForkData object with a hasher
func (f *ForkData) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'CurrentVersion'
hh.PutBytes(f.CurrentVersion[:])
// Field (1) 'GenesisValidatorsRoot'
hh.PutBytes(f.GenesisValidatorsRoot[:])
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the Ping object
func (p *Ping) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(p)
}
// MarshalSSZTo ssz marshals the Ping object to a target array
func (p *Ping) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Id'
dst = ssz.MarshalUint64(dst, p.Id)
return
}
// UnmarshalSSZ ssz unmarshals the Ping object
func (p *Ping) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 8 {
return ssz.ErrSize
}
// Field (0) 'Id'
p.Id = ssz.UnmarshallUint64(buf[0:8])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Ping object
func (p *Ping) SizeSSZ() (size int) {
size = 8
return
}
// HashTreeRoot ssz hashes the Ping object
func (p *Ping) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(p)
}
// HashTreeRootWith ssz hashes the Ping object with a hasher
func (p *Ping) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Id'
hh.PutUint64(p.Id)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the SingleRoot object
func (s *SingleRoot) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(s)
}
// MarshalSSZTo ssz marshals the SingleRoot object to a target array
func (s *SingleRoot) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Root'
dst = append(dst, s.Root[:]...)
return
}
// UnmarshalSSZ ssz unmarshals the SingleRoot object
func (s *SingleRoot) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 32 {
return ssz.ErrSize
}
// Field (0) 'Root'
copy(s.Root[:], buf[0:32])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SingleRoot object
func (s *SingleRoot) SizeSSZ() (size int) {
size = 32
return
}
// HashTreeRoot ssz hashes the SingleRoot object
func (s *SingleRoot) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(s)
}
// HashTreeRootWith ssz hashes the SingleRoot object with a hasher
func (s *SingleRoot) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Root'
hh.PutBytes(s.Root[:])
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the LightClientUpdatesByRangeRequest object
func (l *LightClientUpdatesByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(l)
}
// MarshalSSZTo ssz marshals the LightClientUpdatesByRangeRequest object to a target array
func (l *LightClientUpdatesByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Period'
dst = ssz.MarshalUint64(dst, l.Period)
// Field (1) 'Count'
dst = ssz.MarshalUint64(dst, l.Count)
return
}
// UnmarshalSSZ ssz unmarshals the LightClientUpdatesByRangeRequest object
func (l *LightClientUpdatesByRangeRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 16 {
return ssz.ErrSize
}
// Field (0) 'Period'
l.Period = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Count'
l.Count = ssz.UnmarshallUint64(buf[8:16])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the LightClientUpdatesByRangeRequest object
func (l *LightClientUpdatesByRangeRequest) SizeSSZ() (size int) {
size = 16
return
}
// HashTreeRoot ssz hashes the LightClientUpdatesByRangeRequest object
func (l *LightClientUpdatesByRangeRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(l)
}
// HashTreeRootWith ssz hashes the LightClientUpdatesByRangeRequest object with a hasher
func (l *LightClientUpdatesByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Period'
hh.PutUint64(l.Period)
// Field (1) 'Count'
hh.PutUint64(l.Count)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the BeaconBlocksByRangeRequest object to a target array
func (b *BeaconBlocksByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'StartSlot'
dst = ssz.MarshalUint64(dst, b.StartSlot)
// Field (1) 'Count'
dst = ssz.MarshalUint64(dst, b.Count)
// Field (2) 'Step'
dst = ssz.MarshalUint64(dst, b.Step)
return
}
// UnmarshalSSZ ssz unmarshals the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 24 {
return ssz.ErrSize
}
// Field (0) 'StartSlot'
b.StartSlot = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Count'
b.Count = ssz.UnmarshallUint64(buf[8:16])
// Field (2) 'Step'
b.Step = ssz.UnmarshallUint64(buf[16:24])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) SizeSSZ() (size int) {
size = 24
return
}
// HashTreeRoot ssz hashes the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the BeaconBlocksByRangeRequest object with a hasher
func (b *BeaconBlocksByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'StartSlot'
hh.PutUint64(b.StartSlot)
// Field (1) 'Count'
hh.PutUint64(b.Count)
// Field (2) 'Step'
hh.PutUint64(b.Step)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the Status object
func (s *Status) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(s)
}
// MarshalSSZTo ssz marshals the Status object to a target array
func (s *Status) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'ForkDigest'
dst = append(dst, s.ForkDigest[:]...)
// Field (1) 'FinalizedRoot'
dst = append(dst, s.FinalizedRoot[:]...)
// Field (2) 'FinalizedEpoch'
dst = ssz.MarshalUint64(dst, s.FinalizedEpoch)
// Field (3) 'HeadRoot'
dst = append(dst, s.HeadRoot[:]...)
// Field (4) 'HeadSlot'
dst = ssz.MarshalUint64(dst, s.HeadSlot)
return
}
// UnmarshalSSZ ssz unmarshals the Status object
func (s *Status) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 84 {
return ssz.ErrSize
}
// Field (0) 'ForkDigest'
copy(s.ForkDigest[:], buf[0:4])
// Field (1) 'FinalizedRoot'
copy(s.FinalizedRoot[:], buf[4:36])
// Field (2) 'FinalizedEpoch'
s.FinalizedEpoch = ssz.UnmarshallUint64(buf[36:44])
// Field (3) 'HeadRoot'
copy(s.HeadRoot[:], buf[44:76])
// Field (4) 'HeadSlot'
s.HeadSlot = ssz.UnmarshallUint64(buf[76:84])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Status object
func (s *Status) SizeSSZ() (size int) {
size = 84
return
}
// HashTreeRoot ssz hashes the Status object
func (s *Status) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(s)
}
// HashTreeRootWith ssz hashes the Status object with a hasher
func (s *Status) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ForkDigest'
hh.PutBytes(s.ForkDigest[:])
// Field (1) 'FinalizedRoot'
hh.PutBytes(s.FinalizedRoot[:])
// Field (2) 'FinalizedEpoch'
hh.PutUint64(s.FinalizedEpoch)
// Field (3) 'HeadRoot'
hh.PutBytes(s.HeadRoot[:])
// Field (4) 'HeadSlot'
hh.PutUint64(s.HeadSlot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the SigningData object
func (s *SigningData) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(s)
}
// MarshalSSZTo ssz marshals the SigningData object to a target array
func (s *SigningData) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Root'
dst = append(dst, s.Root[:]...)
// Field (1) 'Domain'
if size := len(s.Domain); size != 32 {
err = ssz.ErrBytesLengthFn("--.Domain", size, 32)
return
}
dst = append(dst, s.Domain...)
return
}
// UnmarshalSSZ ssz unmarshals the SigningData object
func (s *SigningData) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 64 {
return ssz.ErrSize
}
// Field (0) 'Root'
copy(s.Root[:], buf[0:32])
// Field (1) 'Domain'
if cap(s.Domain) == 0 {
s.Domain = make([]byte, 0, len(buf[32:64]))
}
s.Domain = append(s.Domain, buf[32:64]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SigningData object
func (s *SigningData) SizeSSZ() (size int) {
size = 64
return
}
// HashTreeRoot ssz hashes the SigningData object
func (s *SigningData) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(s)
}
// HashTreeRootWith ssz hashes the SigningData object with a hasher
func (s *SigningData) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Root'
hh.PutBytes(s.Root[:])
// Field (1) 'Domain'
if size := len(s.Domain); size != 32 {
err = ssz.ErrBytesLengthFn("--.Domain", size, 32)
return
}
hh.PutBytes(s.Domain)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}

View File

@ -0,0 +1,67 @@
package cltypes_test
import (
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/require"
)
var testMetadata = &cltypes.Metadata{
SeqNumber: 99,
Attnets: 69,
}
var testPing = &cltypes.Ping{
Id: 420,
}
var testSingleRoot = &cltypes.SingleRoot{
Root: common.HexToHash("96"),
}
var testLcRangeRequest = &cltypes.LightClientUpdatesByRangeRequest{
Period: 69,
Count: 666,
}
var testBlockRangeRequest = &cltypes.BeaconBlocksByRangeRequest{
StartSlot: 999,
Count: 666,
}
var testStatus = &cltypes.Status{
FinalizedEpoch: 666,
HeadSlot: 94,
HeadRoot: common.HexToHash("a"),
FinalizedRoot: common.HexToHash("bbba"),
}
func TestMarshalNetworkTypes(t *testing.T) {
cases := []ssz_utils.EncodableSSZ{
testMetadata,
testPing,
testSingleRoot,
testLcRangeRequest,
testBlockRangeRequest,
testStatus,
}
unmarshalDestinations := []ssz_utils.EncodableSSZ{
&cltypes.Metadata{},
&cltypes.Ping{},
&cltypes.SingleRoot{},
&cltypes.LightClientUpdatesByRangeRequest{},
&cltypes.BeaconBlocksByRangeRequest{},
&cltypes.Status{},
}
for i, tc := range cases {
marshalledBytes, err := tc.MarshalSSZ()
require.NoError(t, err)
require.Equal(t, len(marshalledBytes), tc.SizeSSZ())
require.NoError(t, unmarshalDestinations[i].UnmarshalSSZ(marshalledBytes))
require.Equal(t, tc, unmarshalDestinations[i])
}
}

View File

@ -0,0 +1,36 @@
package ssz_utils
import (
"encoding/binary"
ssz "github.com/ferranbt/fastssz"
)
type ObjectSSZ interface {
ssz.Marshaler
ssz.Unmarshaler
HashTreeRoot() ([32]byte, error)
}
type EncodableSSZ interface {
Marshaler
Unmarshaler
}
type Marshaler interface {
MarshalSSZ() ([]byte, error)
SizeSSZ() int
}
type Unmarshaler interface {
UnmarshalSSZ(buf []byte) error
}
func MarshalUint64SSZ(buf []byte, x uint64) {
binary.LittleEndian.PutUint64(buf, x)
}
func UnmarshalUint64SSZ(x []byte) uint64 {
return binary.LittleEndian.Uint64(x)
}

View File

@ -3,26 +3,9 @@ package cltypes
import (
"bytes"
ssz "github.com/ferranbt/fastssz"
"github.com/ledgerwatch/erigon/cl/utils"
)
// Eth1Data represents the relevant ETH1 Data for block buidling.
type Eth1Data struct {
Root [32]byte `ssz-size:"32"`
DepositCount uint64
BlockHash [32]byte `ssz-size:"32"`
}
// AttestantionData contains information about attestantion, including finalized/attested checkpoints.
type AttestationData struct {
Slot uint64
Index uint64
BeaconBlockHash [32]byte `ssz-size:"32"`
Source *Checkpoint
Target *Checkpoint
}
/*
* BeaconBlockHeader is the message we validate in the lightclient.
* It contains the hash of the block body, and state root data.
@ -43,15 +26,6 @@ type SignedBeaconBlockHeader struct {
Signature [96]byte `ssz-size:"96"`
}
/*
* IndexedAttestation are attestantions sets to prove that someone misbehaved.
*/
type IndexedAttestation struct {
AttestingIndices []uint64 `ssz-max:"2048"`
Data *AttestationData
Signature [96]byte `ssz-size:"96"`
}
// Slashing requires 2 blocks with the same signer as proof
type ProposerSlashing struct {
Header1 *SignedBeaconBlockHeader
@ -66,13 +40,6 @@ type AttesterSlashing struct {
Attestation_2 *IndexedAttestation
}
// Full signed attestation
type Attestation struct {
AggregationBits []byte `ssz-max:"2048" ssz:"bitlist"`
Data *AttestationData
Signature [96]byte `ssz-size:"96"`
}
type DepositData struct {
PubKey [48]byte `ssz-size:"48"`
WithdrawalCredentials []byte `ssz-size:"32"`
@ -356,19 +323,6 @@ type Validator struct {
WithdrawableEpoch uint64
}
type PendingAttestation struct {
AggregationBits []byte `ssz-max:"2048"`
Data *AttestationData
InclusionDelay uint64
ProposerIndex uint64
}
// Checkpoint is used to create the initial store through checkpoint sync.
type Checkpoint struct {
Epoch uint64
Root [32]byte `ssz-size:"32"`
}
/*
* AggregateAndProof contains the index of the aggregator, the attestation
* to be aggregated and the BLS signature of the attestation.
@ -429,10 +383,3 @@ func (b *BeaconStateBellatrix) BlockRoot() ([32]byte, error) {
}
return tempHeader.HashTreeRoot()
}
type ObjectSSZ interface {
ssz.Marshaler
ssz.Unmarshaler
HashTreeRoot() ([32]byte, error)
}

View File

@ -6,194 +6,6 @@ import (
ssz "github.com/prysmaticlabs/fastssz"
)
// MarshalSSZ ssz marshals the Eth1Data object
func (e *Eth1Data) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the Eth1Data object to a target array
func (e *Eth1Data) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Root'
dst = append(dst, e.Root[:]...)
// Field (1) 'DepositCount'
dst = ssz.MarshalUint64(dst, e.DepositCount)
// Field (2) 'BlockHash'
dst = append(dst, e.BlockHash[:]...)
return
}
// UnmarshalSSZ ssz unmarshals the Eth1Data object
func (e *Eth1Data) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 72 {
return ssz.ErrSize
}
// Field (0) 'Root'
copy(e.Root[:], buf[0:32])
// Field (1) 'DepositCount'
e.DepositCount = ssz.UnmarshallUint64(buf[32:40])
// Field (2) 'BlockHash'
copy(e.BlockHash[:], buf[40:72])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Eth1Data object
func (e *Eth1Data) SizeSSZ() (size int) {
size = 72
return
}
// HashTreeRoot ssz hashes the Eth1Data object
func (e *Eth1Data) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the Eth1Data object with a hasher
func (e *Eth1Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Root'
hh.PutBytes(e.Root[:])
// Field (1) 'DepositCount'
hh.PutUint64(e.DepositCount)
// Field (2) 'BlockHash'
hh.PutBytes(e.BlockHash[:])
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the AttestationData object
func (a *AttestationData) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(a)
}
// MarshalSSZTo ssz marshals the AttestationData object to a target array
func (a *AttestationData) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Slot'
dst = ssz.MarshalUint64(dst, a.Slot)
// Field (1) 'Index'
dst = ssz.MarshalUint64(dst, a.Index)
// Field (2) 'BeaconBlockHash'
dst = append(dst, a.BeaconBlockHash[:]...)
// Field (3) 'Source'
if a.Source == nil {
a.Source = new(Checkpoint)
}
if dst, err = a.Source.MarshalSSZTo(dst); err != nil {
return
}
// Field (4) 'Target'
if a.Target == nil {
a.Target = new(Checkpoint)
}
if dst, err = a.Target.MarshalSSZTo(dst); err != nil {
return
}
return
}
// UnmarshalSSZ ssz unmarshals the AttestationData object
func (a *AttestationData) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 128 {
return ssz.ErrSize
}
// Field (0) 'Slot'
a.Slot = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Index'
a.Index = ssz.UnmarshallUint64(buf[8:16])
// Field (2) 'BeaconBlockHash'
copy(a.BeaconBlockHash[:], buf[16:48])
// Field (3) 'Source'
if a.Source == nil {
a.Source = new(Checkpoint)
}
if err = a.Source.UnmarshalSSZ(buf[48:88]); err != nil {
return err
}
// Field (4) 'Target'
if a.Target == nil {
a.Target = new(Checkpoint)
}
if err = a.Target.UnmarshalSSZ(buf[88:128]); err != nil {
return err
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the AttestationData object
func (a *AttestationData) SizeSSZ() (size int) {
size = 128
return
}
// HashTreeRoot ssz hashes the AttestationData object
func (a *AttestationData) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(a)
}
// HashTreeRootWith ssz hashes the AttestationData object with a hasher
func (a *AttestationData) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Slot'
hh.PutUint64(a.Slot)
// Field (1) 'Index'
hh.PutUint64(a.Index)
// Field (2) 'BeaconBlockHash'
hh.PutBytes(a.BeaconBlockHash[:])
// Field (3) 'Source'
if err = a.Source.HashTreeRootWith(hh); err != nil {
return
}
// Field (4) 'Target'
if err = a.Target.HashTreeRootWith(hh); err != nil {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the BeaconBlockHeader object
func (b *BeaconBlockHeader) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
@ -361,144 +173,6 @@ func (s *SignedBeaconBlockHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
// MarshalSSZ ssz marshals the IndexedAttestation object
func (i *IndexedAttestation) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(i)
}
// MarshalSSZTo ssz marshals the IndexedAttestation object to a target array
func (i *IndexedAttestation) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(228)
// Offset (0) 'AttestingIndices'
dst = ssz.WriteOffset(dst, offset)
offset += len(i.AttestingIndices) * 8
// Field (1) 'Data'
if i.Data == nil {
i.Data = new(AttestationData)
}
if dst, err = i.Data.MarshalSSZTo(dst); err != nil {
return
}
// Field (2) 'Signature'
dst = append(dst, i.Signature[:]...)
// Field (0) 'AttestingIndices'
if size := len(i.AttestingIndices); size > 2048 {
err = ssz.ErrListTooBigFn("--.AttestingIndices", size, 2048)
return
}
for ii := 0; ii < len(i.AttestingIndices); ii++ {
dst = ssz.MarshalUint64(dst, i.AttestingIndices[ii])
}
return
}
// UnmarshalSSZ ssz unmarshals the IndexedAttestation object
func (i *IndexedAttestation) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 228 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'AttestingIndices'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 < 228 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Data'
if i.Data == nil {
i.Data = new(AttestationData)
}
if err = i.Data.UnmarshalSSZ(buf[4:132]); err != nil {
return err
}
// Field (2) 'Signature'
copy(i.Signature[:], buf[132:228])
// Field (0) 'AttestingIndices'
{
buf = tail[o0:]
num, err := ssz.DivideInt2(len(buf), 8, 2048)
if err != nil {
return err
}
i.AttestingIndices = ssz.ExtendUint64(i.AttestingIndices, num)
for ii := 0; ii < num; ii++ {
i.AttestingIndices[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8])
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the IndexedAttestation object
func (i *IndexedAttestation) SizeSSZ() (size int) {
size = 228
// Field (0) 'AttestingIndices'
size += len(i.AttestingIndices) * 8
return
}
// HashTreeRoot ssz hashes the IndexedAttestation object
func (i *IndexedAttestation) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(i)
}
// HashTreeRootWith ssz hashes the IndexedAttestation object with a hasher
func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'AttestingIndices'
{
if size := len(i.AttestingIndices); size > 2048 {
err = ssz.ErrListTooBigFn("--.AttestingIndices", size, 2048)
return
}
subIndx := hh.Index()
for _, i := range i.AttestingIndices {
hh.AppendUint64(i)
}
hh.FillUpTo32()
numItems := uint64(len(i.AttestingIndices))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
} else {
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
}
}
// Field (1) 'Data'
if err = i.Data.HashTreeRootWith(hh); err != nil {
return
}
// Field (2) 'Signature'
hh.PutBytes(i.Signature[:])
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the ProposerSlashing object
func (p *ProposerSlashing) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(p)
@ -719,127 +393,6 @@ func (a *AttesterSlashing) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
// MarshalSSZ ssz marshals the Attestation object
func (a *Attestation) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(a)
}
// MarshalSSZTo ssz marshals the Attestation object to a target array
func (a *Attestation) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(228)
// Offset (0) 'AggregationBits'
dst = ssz.WriteOffset(dst, offset)
offset += len(a.AggregationBits)
// Field (1) 'Data'
if a.Data == nil {
a.Data = new(AttestationData)
}
if dst, err = a.Data.MarshalSSZTo(dst); err != nil {
return
}
// Field (2) 'Signature'
dst = append(dst, a.Signature[:]...)
// Field (0) 'AggregationBits'
if size := len(a.AggregationBits); size > 2048 {
err = ssz.ErrBytesLengthFn("--.AggregationBits", size, 2048)
return
}
dst = append(dst, a.AggregationBits...)
return
}
// UnmarshalSSZ ssz unmarshals the Attestation object
func (a *Attestation) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 228 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'AggregationBits'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 < 228 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Data'
if a.Data == nil {
a.Data = new(AttestationData)
}
if err = a.Data.UnmarshalSSZ(buf[4:132]); err != nil {
return err
}
// Field (2) 'Signature'
copy(a.Signature[:], buf[132:228])
// Field (0) 'AggregationBits'
{
buf = tail[o0:]
if err = ssz.ValidateBitlist(buf, 2048); err != nil {
return err
}
if cap(a.AggregationBits) == 0 {
a.AggregationBits = make([]byte, 0, len(buf))
}
a.AggregationBits = append(a.AggregationBits, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Attestation object
func (a *Attestation) SizeSSZ() (size int) {
size = 228
// Field (0) 'AggregationBits'
size += len(a.AggregationBits)
return
}
// HashTreeRoot ssz hashes the Attestation object
func (a *Attestation) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(a)
}
// HashTreeRootWith ssz hashes the Attestation object with a hasher
func (a *Attestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'AggregationBits'
if len(a.AggregationBits) == 0 {
err = ssz.ErrEmptyBitlist
return
}
hh.PutBitlist(a.AggregationBits, 2048)
// Field (1) 'Data'
if err = a.Data.HashTreeRootWith(hh); err != nil {
return
}
// Field (2) 'Signature'
hh.PutBytes(a.Signature[:])
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the DepositData object
func (d *DepositData) MarshalSSZ() ([]byte, error) {
@ -5292,210 +4845,6 @@ func (v *Validator) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
// MarshalSSZ ssz marshals the PendingAttestation object
func (p *PendingAttestation) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(p)
}
// MarshalSSZTo ssz marshals the PendingAttestation object to a target array
func (p *PendingAttestation) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(148)
// Offset (0) 'AggregationBits'
dst = ssz.WriteOffset(dst, offset)
offset += len(p.AggregationBits)
// Field (1) 'Data'
if p.Data == nil {
p.Data = new(AttestationData)
}
if dst, err = p.Data.MarshalSSZTo(dst); err != nil {
return
}
// Field (2) 'InclusionDelay'
dst = ssz.MarshalUint64(dst, p.InclusionDelay)
// Field (3) 'ProposerIndex'
dst = ssz.MarshalUint64(dst, p.ProposerIndex)
// Field (0) 'AggregationBits'
if size := len(p.AggregationBits); size > 2048 {
err = ssz.ErrBytesLengthFn("--.AggregationBits", size, 2048)
return
}
dst = append(dst, p.AggregationBits...)
return
}
// UnmarshalSSZ ssz unmarshals the PendingAttestation object
func (p *PendingAttestation) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 148 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'AggregationBits'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 < 148 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Data'
if p.Data == nil {
p.Data = new(AttestationData)
}
if err = p.Data.UnmarshalSSZ(buf[4:132]); err != nil {
return err
}
// Field (2) 'InclusionDelay'
p.InclusionDelay = ssz.UnmarshallUint64(buf[132:140])
// Field (3) 'ProposerIndex'
p.ProposerIndex = ssz.UnmarshallUint64(buf[140:148])
// Field (0) 'AggregationBits'
{
buf = tail[o0:]
if len(buf) > 2048 {
return ssz.ErrBytesLength
}
if cap(p.AggregationBits) == 0 {
p.AggregationBits = make([]byte, 0, len(buf))
}
p.AggregationBits = append(p.AggregationBits, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the PendingAttestation object
func (p *PendingAttestation) SizeSSZ() (size int) {
size = 148
// Field (0) 'AggregationBits'
size += len(p.AggregationBits)
return
}
// HashTreeRoot ssz hashes the PendingAttestation object
func (p *PendingAttestation) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(p)
}
// HashTreeRootWith ssz hashes the PendingAttestation object with a hasher
func (p *PendingAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'AggregationBits'
{
elemIndx := hh.Index()
byteLen := uint64(len(p.AggregationBits))
if byteLen > 2048 {
err = ssz.ErrIncorrectListSize
return
}
hh.PutBytes(p.AggregationBits)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (2048+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (2048+31)/32)
}
}
// Field (1) 'Data'
if err = p.Data.HashTreeRootWith(hh); err != nil {
return
}
// Field (2) 'InclusionDelay'
hh.PutUint64(p.InclusionDelay)
// Field (3) 'ProposerIndex'
hh.PutUint64(p.ProposerIndex)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the Checkpoint object
func (c *Checkpoint) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(c)
}
// MarshalSSZTo ssz marshals the Checkpoint object to a target array
func (c *Checkpoint) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Epoch'
dst = ssz.MarshalUint64(dst, c.Epoch)
// Field (1) 'Root'
dst = append(dst, c.Root[:]...)
return
}
// UnmarshalSSZ ssz unmarshals the Checkpoint object
func (c *Checkpoint) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 40 {
return ssz.ErrSize
}
// Field (0) 'Epoch'
c.Epoch = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Root'
copy(c.Root[:], buf[8:40])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the Checkpoint object
func (c *Checkpoint) SizeSSZ() (size int) {
size = 40
return
}
// HashTreeRoot ssz hashes the Checkpoint object
func (c *Checkpoint) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(c)
}
// HashTreeRootWith ssz hashes the Checkpoint object with a hasher
func (c *Checkpoint) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Epoch'
hh.PutUint64(c.Epoch)
// Field (1) 'Root'
hh.PutBytes(c.Root[:])
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
return
}
// MarshalSSZ ssz marshals the AggregateAndProof object
func (a *AggregateAndProof) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(a)

View File

@ -14,13 +14,14 @@
package fork
import (
"encoding/binary"
"errors"
"math"
"sort"
"time"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/cl/utils"
"github.com/ledgerwatch/erigon/common"
)
@ -66,15 +67,9 @@ func forkList(schedule map[[4]byte]uint64) (f []fork) {
}
func ComputeForkDigestForVersion(currentVersion [4]byte, genesisValidatorsRoot [32]byte) (digest [4]byte, err error) {
data := cltypes.ForkData{
CurrentVersion: currentVersion,
GenesisValidatorsRoot: genesisValidatorsRoot,
}
var dataRoot [32]byte
dataRoot, err = data.HashTreeRoot()
if err != nil {
return
}
var currentVersion32 common.Hash
copy(currentVersion32[:], currentVersion[:])
dataRoot := utils.Keccak256(currentVersion32[:], genesisValidatorsRoot[:])
// copy first four bytes to output
copy(digest[:], dataRoot[:4])
return
@ -106,12 +101,12 @@ func ComputeForkId(
nextForkVersion = fork.version
}
enrForkID := cltypes.ENRForkID{
CurrentForkDigest: digest,
NextForkVersion: nextForkVersion,
NextForkEpoch: nextForkEpoch,
}
return enrForkID.MarshalSSZ()
enrForkId := make([]byte, 16)
copy(enrForkId, digest[:])
copy(enrForkId[4:], nextForkVersion[:])
binary.BigEndian.PutUint64(enrForkId[8:], nextForkEpoch)
return enrForkId, nil
}
func GetLastFork(
@ -136,26 +131,19 @@ func ComputeDomain(
currentVersion [4]byte,
genesisValidatorsRoot [32]byte,
) ([]byte, error) {
forkDataRoot, err := (&cltypes.ForkData{
CurrentVersion: currentVersion,
GenesisValidatorsRoot: genesisValidatorsRoot,
}).HashTreeRoot()
if err != nil {
return nil, err
}
var currentVersion32 common.Hash
copy(currentVersion32[:], currentVersion[:])
forkDataRoot := utils.Keccak256(currentVersion32[:], genesisValidatorsRoot[:])
return append(domainType, forkDataRoot[:28]...), nil
}
func ComputeSigningRoot(
obj cltypes.ObjectSSZ,
obj ssz_utils.ObjectSSZ,
domain []byte,
) ([32]byte, error) {
objRoot, err := obj.HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
return (&cltypes.SigningData{
Root: objRoot,
Domain: domain,
}).HashTreeRoot()
return utils.Keccak256(objRoot[:], domain), nil
}

View File

@ -1,4 +1,4 @@
package state_encoding
package merkle_tree
import "encoding/binary"

134
cl/merkle_tree/list.go Normal file
View File

@ -0,0 +1,134 @@
package merkle_tree
import (
"math/bits"
"github.com/ledgerwatch/erigon/cl/utils"
"github.com/prysmaticlabs/gohashtree"
)
// MerkleizeVector uses our optimized routine to hash a list of 32-byte
// elements.
func MerkleizeVector(elements [][32]byte, length uint64) ([32]byte, error) {
depth := getDepth(length)
// Return zerohash at depth
if len(elements) == 0 {
return ZeroHashes[depth], nil
}
for i := uint8(0); i < depth; i++ {
layerLen := len(elements)
oddNodeLength := layerLen%2 == 1
if oddNodeLength {
zerohash := ZeroHashes[i]
elements = append(elements, zerohash)
}
outputLen := len(elements) / 2
if err := gohashtree.Hash(elements, elements); err != nil {
return [32]byte{}, err
}
elements = elements[:outputLen]
}
return elements[0], nil
}
// ArraysRootWithLimit calculates the root hash of an array of hashes by first vectorizing the input array using the MerkleizeVector function, then calculating the root hash of the vectorized array using the Keccak256 function and the root hash of the length of the input array.
func ArraysRootWithLimit(input [][32]byte, limit uint64) ([32]byte, error) {
base, err := MerkleizeVector(input, limit)
if err != nil {
return [32]byte{}, err
}
lengthRoot := Uint64Root(uint64(len(input)))
return utils.Keccak256(base[:], lengthRoot[:]), nil
}
// ArraysRoot calculates the root hash of an array of hashes by first making a copy of the input array, then calculating the Merkle root of the copy using the MerkleRootFromLeaves function.
func ArraysRoot(input [][32]byte, length uint64) ([32]byte, error) {
leaves := make([][32]byte, length)
copy(leaves, input)
res, err := MerkleRootFromLeaves(leaves)
if err != nil {
return [32]byte{}, err
}
return res, nil
}
// Uint64ListRootWithLimit calculates the root hash of an array of uint64 values by first packing the input array into chunks using the PackUint64IntoChunks function,
// then vectorizing the chunks using the MerkleizeVector function, then calculating the
// root hash of the vectorized array using the Keccak256 function and
// the root hash of the length of the input array.
func Uint64ListRootWithLimit(list []uint64, limit uint64) ([32]byte, error) {
var err error
roots := PackUint64IntoChunks(list)
base, err := MerkleizeVector(roots, limit)
if err != nil {
return [32]byte{}, err
}
lengthRoot := Uint64Root(uint64(len(list)))
return utils.Keccak256(base[:], lengthRoot[:]), nil
}
// BitlistRootWithLimit computes the HashTreeRoot merkleization of
// participation roots.
func BitlistRootWithLimit(bits []byte, limit uint64) ([32]byte, error) {
var (
unpackedRoots []byte
size uint64
)
unpackedRoots, size = parseBitlist(unpackedRoots, bits)
roots := packBits(unpackedRoots)
base, err := MerkleizeVector(roots, (limit+255)/256)
if err != nil {
return [32]byte{}, err
}
lengthRoot := Uint64Root(size)
return utils.Keccak256(base[:], lengthRoot[:]), nil
}
// BitlistRootWithLimitForState computes the HashTreeRoot merkleization of
// participation roots.
func BitlistRootWithLimitForState(bits []byte, limit uint64) ([32]byte, error) {
roots := packBits(bits)
base, err := MerkleizeVector(roots, (limit+31)/32)
if err != nil {
return [32]byte{}, err
}
lengthRoot := Uint64Root(uint64(len(bits)))
return utils.Keccak256(base[:], lengthRoot[:]), nil
}
func packBits(bytes []byte) [][32]byte {
var chunks [][32]byte
for i := 0; i < len(bytes); i += 32 {
var chunk [32]byte
copy(chunk[:], bytes[i:])
chunks = append(chunks, chunk)
}
return chunks
}
func parseBitlist(dst, buf []byte) ([]byte, uint64) {
msb := uint8(bits.Len8(buf[len(buf)-1])) - 1
size := uint64(8*(len(buf)-1) + int(msb))
dst = append(dst, buf...)
dst[len(dst)-1] &^= uint8(1 << msb)
newLen := len(dst)
for i := len(dst) - 1; i >= 0; i-- {
if dst[i] != 0x00 {
break
}
newLen = i
}
res := dst[:newLen]
return res, size
}

View File

@ -0,0 +1,58 @@
package merkle_tree
import (
"errors"
"fmt"
"github.com/ledgerwatch/erigon/cl/utils"
"github.com/prysmaticlabs/gohashtree"
)
// merkleizeTrieLeaves returns intermediate roots of given leaves.
func merkleizeTrieLeaves(leaves [][32]byte) ([32]byte, error) {
for len(leaves) > 1 {
if !utils.IsPowerOf2(uint64(len(leaves))) {
return [32]byte{}, fmt.Errorf("hash layer is a non power of 2: %d", len(leaves))
}
layer := make([][32]byte, len(leaves)/2)
if err := gohashtree.Hash(layer, leaves); err != nil {
return [32]byte{}, err
}
leaves = layer
}
return leaves[0], nil
}
func MerkleRootFromLeaves(leaves [][32]byte) ([32]byte, error) {
if len(leaves) == 0 {
return [32]byte{}, errors.New("zero leaves provided")
}
if len(leaves) == 1 {
return leaves[0], nil
}
hashLayer := leaves
return merkleizeTrieLeaves(hashLayer)
}
// getDepth returns the depth of a merkle tree with a given number of nodes.
// The depth is defined as the number of levels in the tree, with the root
// node at level 0 and each child node at a level one greater than its parent.
// If the number of nodes is less than or equal to 1, the depth is 0.
func getDepth(v uint64) uint8 {
// If there are 0 or 1 nodes, the depth is 0.
if v <= 1 {
return 0
}
// Initialize the depth to 0.
depth := uint8(0)
// Divide the number of nodes by 2 until it is less than or equal to 1.
// The number of iterations is the depth of the tree.
for v > 1 {
v >>= 1
depth++
}
return depth
}

View File

@ -0,0 +1,50 @@
package merkle_tree_test
import (
"testing"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
"github.com/ledgerwatch/erigon/cmd/erigon-cl/core/state/state_encoding"
"github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/require"
)
func TestEmptyArraysRoot(t *testing.T) {
expected := common.HexToHash("df6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e")
root, err := merkle_tree.ArraysRoot([][32]byte{}, state_encoding.StateRootsLength)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}
func TestEmptyArraysWithLengthRoot(t *testing.T) {
expected := common.HexToHash("0xf770287da731841c38eb035da016bd2daad53bf0bca607461c0685b0ea54c5f9")
roots := [][32]byte{
common.BytesToHash([]byte{1}),
common.BytesToHash([]byte{2}),
common.BytesToHash([]byte{3}),
common.BytesToHash([]byte{4}),
common.BytesToHash([]byte{5}),
common.BytesToHash([]byte{6}),
common.BytesToHash([]byte{7}),
common.BytesToHash([]byte{8}),
}
root, err := merkle_tree.ArraysRootWithLimit(roots, 8192)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}
func TestUint64ListRootWithLimit(t *testing.T) {
expected := common.HexToHash("0xfbe583f8fbcc3683d98c12ae969e93aaa5ac472e15422c14759cb7f3ef60673c")
nums := []uint64{1, 2, 4, 5, 2, 5, 6, 7, 1, 4, 3, 5, 100, 6, 64, 2}
root, err := merkle_tree.Uint64ListRootWithLimit(nums, 274877906944)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}
func TestParticipationBitsRoot(t *testing.T) {
expected := common.HexToHash("0x8e6653ba3656afddaf5e6c69c149e63a2e26ff91a2e361b3c40b11f08c039572")
bits := []byte{1, 2, 4, 5, 2, 5, 6, 7, 1, 4, 3, 5, 100, 6, 64, 2}
root, err := merkle_tree.BitlistRootWithLimitForState(bits, 1099511627776)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}

View File

@ -0,0 +1,22 @@
package merkle_tree
import (
"encoding/binary"
"github.com/ledgerwatch/erigon/common"
)
// Uint64Root retrieves the root hash of a uint64 value by converting it to a byte array and returning it as a hash.
func Uint64Root(val uint64) common.Hash {
var root common.Hash
binary.LittleEndian.PutUint64(root[:], val)
return root
}
func SignatureRoot(signature [96]byte) (common.Hash, error) {
return ArraysRoot([][32]byte{
common.BytesToHash(signature[0:32]),
common.BytesToHash(signature[32:64]),
common.BytesToHash(signature[64:]),
}, 4)
}

View File

@ -1,4 +1,4 @@
package state_encoding
package merkle_tree
// ZeroHashes is a representation of all zerohashes of
// varying depths till h=100.

View File

@ -12,6 +12,7 @@ import (
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/erigon/cl/utils"
"github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication"
@ -126,7 +127,7 @@ func (b *BeaconRpcP2P) SendLightClientUpdatesReqV1(period uint64) (*cltypes.Ligh
return nil, err
}
responsePacket := []cltypes.ObjectSSZ{&cltypes.LightClientUpdate{}}
responsePacket := []ssz_utils.ObjectSSZ{&cltypes.LightClientUpdate{}}
data := common.CopyBytes(buffer.Bytes())
message, err := b.sentinel.SendRequest(b.ctx, &sentinel.RequestData{
@ -146,9 +147,9 @@ func (b *BeaconRpcP2P) SendLightClientUpdatesReqV1(period uint64) (*cltypes.Ligh
return responsePacket[0].(*cltypes.LightClientUpdate), nil
}
func (b *BeaconRpcP2P) sendBlocksRequest(topic string, reqData []byte, count uint64) ([]cltypes.ObjectSSZ, error) {
func (b *BeaconRpcP2P) sendBlocksRequest(topic string, reqData []byte, count uint64) ([]ssz_utils.ObjectSSZ, error) {
// Prepare output slice.
responsePacket := []cltypes.ObjectSSZ{}
responsePacket := []ssz_utils.ObjectSSZ{}
message, err := b.sentinel.SendRequest(b.ctx, &sentinel.RequestData{
Data: reqData,
@ -224,7 +225,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(topic string, reqData []byte, count uin
return nil, err
}
}
var responseChunk cltypes.ObjectSSZ
var responseChunk ssz_utils.ObjectSSZ
switch respForkDigest {
case utils.Bytes4ToUint32(phase0ForkDigest):
@ -250,7 +251,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(topic string, reqData []byte, count uin
}
// SendBeaconBlocksByRangeReq retrieves blocks range from beacon chain.
func (b *BeaconRpcP2P) SendBeaconBlocksByRangeReq(start, count uint64) ([]cltypes.ObjectSSZ, error) {
func (b *BeaconRpcP2P) SendBeaconBlocksByRangeReq(start, count uint64) ([]ssz_utils.ObjectSSZ, error) {
req := &cltypes.BeaconBlocksByRangeRequest{
StartSlot: start,
Count: count,
@ -266,7 +267,7 @@ func (b *BeaconRpcP2P) SendBeaconBlocksByRangeReq(start, count uint64) ([]cltype
}
// SendBeaconBlocksByRootReq retrieves blocks by root from beacon chain.
func (b *BeaconRpcP2P) SendBeaconBlocksByRootReq(roots [][32]byte) ([]cltypes.ObjectSSZ, error) {
func (b *BeaconRpcP2P) SendBeaconBlocksByRootReq(roots [][32]byte) ([]ssz_utils.ObjectSSZ, error) {
var req cltypes.BeaconBlocksByRootRequest = roots
var buffer buffer.Buffer
if err := ssz_snappy.EncodeAndWrite(&buffer, &req); err != nil {

View File

@ -18,6 +18,7 @@ import (
"github.com/golang/snappy"
"github.com/klauspost/compress/zstd"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
ssz "github.com/prysmaticlabs/fastssz"
)
@ -56,9 +57,8 @@ func CompressSnappy(data []byte) []byte {
return snappy.Encode(nil, data)
}
func EncodeSSZSnappy(data ssz.Marshaler) ([]byte, error) {
enc := make([]byte, data.SizeSSZ())
enc, err := data.MarshalSSZTo(enc[:0])
func EncodeSSZSnappy(data ssz_utils.Marshaler) ([]byte, error) {
enc, err := data.MarshalSSZ()
if err != nil {
return nil, err
}

View File

@ -10,13 +10,13 @@ import (
)
func TestSSZSnappy(t *testing.T) {
verySussyMessage := &cltypes.MetadataV1{
verySussyMessage := &cltypes.Metadata{
SeqNumber: 69, // :D
Attnets: 96, // :(
}
sussyEncoded, err := utils.EncodeSSZSnappy(verySussyMessage)
require.NoError(t, err)
sussyDecoded := &cltypes.MetadataV1{}
sussyDecoded := &cltypes.Metadata{}
require.NoError(t, utils.DecodeSSZSnappy(sussyDecoded, sussyEncoded))
require.Equal(t, verySussyMessage.SeqNumber, sussyDecoded.SeqNumber)
require.Equal(t, verySussyMessage.Attnets, sussyDecoded.Attnets)

View File

@ -21,7 +21,6 @@ func TestBeaconBlock(t *testing.T) {
signedBeaconBlockRaw := &cltypes.SignedBeaconBlockBellatrix{}
require.NoError(t, signedBeaconBlockRaw.UnmarshalSSZ(rawdb.SSZTestBeaconBlock))
_, tx := memdb.NewTestTx(t)
signedBeaconBlock := cltypes.NewSignedBeaconBlock(signedBeaconBlockRaw)
require.NoError(t, rawdb.WriteBeaconBlock(tx, signedBeaconBlock))

View File

@ -3,6 +3,7 @@ package state
import (
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/common"
)
@ -109,7 +110,7 @@ func (b *BeaconState) LatestExecutionPayloadHeader() *cltypes.ExecutionHeader {
}
// GetStateSSZObject allows us to use ssz methods.
func (b *BeaconState) GetStateSSZObject() cltypes.ObjectSSZ {
func (b *BeaconState) GetStateSSZObject() ssz_utils.ObjectSSZ {
switch b.version {
case clparams.BellatrixVersion:
return &cltypes.BeaconStateBellatrix{

View File

@ -1,6 +1,7 @@
package state
import (
"github.com/ledgerwatch/erigon/cl/merkle_tree"
"github.com/ledgerwatch/erigon/cmd/erigon-cl/core/state/state_encoding"
"github.com/ledgerwatch/erigon/common"
)
@ -15,7 +16,7 @@ func (b *BeaconState) HashTreeRoot() ([32]byte, error) {
for len(currentLayer) != 32 {
currentLayer = append(currentLayer, [32]byte{})
}
return state_encoding.MerkleRootFromLeaves(currentLayer)
return merkle_tree.MerkleRootFromLeaves(currentLayer)
}
func (b *BeaconState) computeDirtyLeaves() error {
@ -24,7 +25,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(0): GenesisTime
if b.isLeafDirty(GenesisTimeLeafIndex) {
b.updateLeaf(GenesisTimeLeafIndex, state_encoding.Uint64Root(b.genesisTime))
b.updateLeaf(GenesisTimeLeafIndex, merkle_tree.Uint64Root(b.genesisTime))
}
// Field(1): GenesisValidatorsRoot
@ -34,7 +35,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(2): Slot
if b.isLeafDirty(SlotLeafIndex) {
b.updateLeaf(SlotLeafIndex, state_encoding.Uint64Root(b.slot))
b.updateLeaf(SlotLeafIndex, merkle_tree.Uint64Root(b.slot))
}
// Field(3): Fork
@ -57,7 +58,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(5): BlockRoots
if b.isLeafDirty(BlockRootsLeafIndex) {
blockRootsRoot, err := state_encoding.ArraysRoot(b.blockRoots, state_encoding.BlockRootsLength)
blockRootsRoot, err := merkle_tree.ArraysRoot(b.blockRoots, state_encoding.BlockRootsLength)
if err != nil {
return err
}
@ -66,7 +67,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(6): StateRoots
if b.isLeafDirty(StateRootsLeafIndex) {
stateRootsRoot, err := state_encoding.ArraysRoot(b.stateRoots, state_encoding.StateRootsLength)
stateRootsRoot, err := merkle_tree.ArraysRoot(b.stateRoots, state_encoding.StateRootsLength)
if err != nil {
return err
}
@ -75,7 +76,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(7): HistoricalRoots
if b.isLeafDirty(HistoricalRootsLeafIndex) {
historicalRootsRoot, err := state_encoding.ArraysRootWithLimit(b.historicalRoots, state_encoding.HistoricalRootsLength)
historicalRootsRoot, err := merkle_tree.ArraysRootWithLimit(b.historicalRoots, state_encoding.HistoricalRootsLength)
if err != nil {
return err
}
@ -102,7 +103,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(10): Eth1DepositIndex
if b.isLeafDirty(Eth1DepositIndexLeafIndex) {
b.updateLeaf(Eth1DepositIndexLeafIndex, state_encoding.Uint64Root(b.eth1DepositIndex))
b.updateLeaf(Eth1DepositIndexLeafIndex, merkle_tree.Uint64Root(b.eth1DepositIndex))
}
// Field(11): Validators
@ -116,7 +117,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(12): Balances
if b.isLeafDirty(BalancesLeafIndex) {
balancesRoot, err := state_encoding.Uint64ListRootWithLimit(b.balances, state_encoding.ValidatorLimitForBalancesChunks())
balancesRoot, err := merkle_tree.Uint64ListRootWithLimit(b.balances, state_encoding.ValidatorLimitForBalancesChunks())
if err != nil {
return err
}
@ -125,7 +126,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(13): RandaoMixes
if b.isLeafDirty(RandaoMixesLeafIndex) {
randaoRootsRoot, err := state_encoding.ArraysRoot(b.randaoMixes, state_encoding.RandaoMixesLength)
randaoRootsRoot, err := merkle_tree.ArraysRoot(b.randaoMixes, state_encoding.RandaoMixesLength)
if err != nil {
return err
}
@ -142,7 +143,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
// Field(15): PreviousEpochParticipation
if b.isLeafDirty(PreviousEpochParticipationLeafIndex) {
participationRoot, err := state_encoding.ParticipationBitsRoot(b.previousEpochParticipation)
participationRoot, err := merkle_tree.BitlistRootWithLimitForState(b.previousEpochParticipation, state_encoding.ValidatorRegistryLimit)
if err != nil {
return err
}
@ -151,7 +152,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(16): CurrentEpochParticipation
if b.isLeafDirty(CurrentEpochParticipationLeafIndex) {
participationRoot, err := state_encoding.ParticipationBitsRoot(b.currentEpochParticipation)
participationRoot, err := merkle_tree.BitlistRootWithLimitForState(b.currentEpochParticipation, state_encoding.ValidatorRegistryLimit)
if err != nil {
return err
}
@ -194,7 +195,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
// Field(21): Inactivity Scores
if b.isLeafDirty(InactivityScoresLeafIndex) {
scoresRoot, err := state_encoding.Uint64ListRootWithLimit(b.inactivityScores, state_encoding.ValidatorLimitForBalancesChunks())
scoresRoot, err := merkle_tree.Uint64ListRootWithLimit(b.inactivityScores, state_encoding.ValidatorLimitForBalancesChunks())
if err != nil {
return err
}

View File

@ -2,13 +2,9 @@ package state_encoding
import (
"encoding/binary"
"errors"
"fmt"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/utils"
"github.com/ledgerwatch/erigon/common"
"github.com/prysmaticlabs/gohashtree"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
)
const (
@ -24,37 +20,6 @@ const (
// This code is a collection of functions related to encoding and
// hashing state data in the Ethereum 2.0 beacon chain.
// Uint64Root retrieves the root hash of a uint64 value by converting it to a byte array and returning it as a hash.
func Uint64Root(val uint64) common.Hash {
var root common.Hash
binary.LittleEndian.PutUint64(root[:], val)
return root
}
// ArraysRoot calculates the root hash of an array of hashes by first making a copy of the input array, then calculating the Merkle root of the copy using the MerkleRootFromLeaves function.
func ArraysRoot(input [][32]byte, length uint64) ([32]byte, error) {
leaves := make([][32]byte, length)
copy(leaves, input)
res, err := MerkleRootFromLeaves(leaves)
if err != nil {
return [32]byte{}, err
}
return res, nil
}
// ArraysRootWithLimit calculates the root hash of an array of hashes by first vectorizing the input array using the MerkleizeVector function, then calculating the root hash of the vectorized array using the Keccak256 function and the root hash of the length of the input array.
func ArraysRootWithLimit(input [][32]byte, limit uint64) ([32]byte, error) {
base, err := MerkleizeVector(input, limit)
if err != nil {
return [32]byte{}, err
}
lengthRoot := Uint64Root(uint64(len(input)))
return utils.Keccak256(base[:], lengthRoot[:]), nil
}
// Eth1DataVectorRoot calculates the root hash of an array of Eth1Data values by first vectorizing the input array using
// the HashTreeRoot method on each Eth1Data value, then calculating the root hash of the vectorized array using
// the ArraysRootWithLimit function and the Eth1DataVotesRootsLimit constant.
@ -70,24 +35,7 @@ func Eth1DataVectorRoot(votes []*cltypes.Eth1Data) ([32]byte, error) {
}
}
return ArraysRootWithLimit(vectorizedVotesRoot, Eth1DataVotesRootsLimit)
}
// Uint64ListRootWithLimit calculates the root hash of an array of uint64 values by first packing the input array into chunks using the PackUint64IntoChunks function,
// then vectorizing the chunks using the MerkleizeVector function, then calculating the
// root hash of the vectorized array using the Keccak256 function and
// the root hash of the length of the input array.
func Uint64ListRootWithLimit(list []uint64, limit uint64) ([32]byte, error) {
var err error
roots := PackUint64IntoChunks(list)
base, err := MerkleizeVector(roots, limit)
if err != nil {
return [32]byte{}, err
}
lengthRoot := Uint64Root(uint64(len(list)))
return utils.Keccak256(base[:], lengthRoot[:]), nil
return merkle_tree.ArraysRootWithLimit(vectorizedVotesRoot, Eth1DataVotesRootsLimit)
}
func ValidatorsVectorRoot(validators []*cltypes.Validator) ([32]byte, error) {
@ -102,56 +50,7 @@ func ValidatorsVectorRoot(validators []*cltypes.Validator) ([32]byte, error) {
}
}
return ArraysRootWithLimit(vectorizedValidatorsRoot, ValidatorRegistryLimit)
}
func MerkleRootFromLeaves(leaves [][32]byte) ([32]byte, error) {
if len(leaves) == 0 {
return [32]byte{}, errors.New("zero leaves provided")
}
if len(leaves) == 1 {
return leaves[0], nil
}
hashLayer := leaves
return merkleizeTrieLeaves(hashLayer)
}
// getDepth returns the depth of a merkle tree with a given number of nodes.
// The depth is defined as the number of levels in the tree, with the root
// node at level 0 and each child node at a level one greater than its parent.
// If the number of nodes is less than or equal to 1, the depth is 0.
func getDepth(v uint64) uint8 {
// If there are 0 or 1 nodes, the depth is 0.
if v <= 1 {
return 0
}
// Initialize the depth to 0.
depth := uint8(0)
// Divide the number of nodes by 2 until it is less than or equal to 1.
// The number of iterations is the depth of the tree.
for v > 1 {
v >>= 1
depth++
}
return depth
}
// merkleizeTrieLeaves returns intermediate roots of given leaves.
func merkleizeTrieLeaves(leaves [][32]byte) ([32]byte, error) {
for len(leaves) > 1 {
if !utils.IsPowerOf2(uint64(len(leaves))) {
return [32]byte{}, fmt.Errorf("hash layer is a non power of 2: %d", len(leaves))
}
layer := make([][32]byte, len(leaves)/2)
if err := gohashtree.Hash(layer, leaves); err != nil {
return [32]byte{}, err
}
leaves = layer
}
return leaves[0], nil
return merkle_tree.ArraysRootWithLimit(vectorizedValidatorsRoot, ValidatorRegistryLimit)
}
func ValidatorLimitForBalancesChunks() uint64 {
@ -167,33 +66,9 @@ func SlashingsRoot(slashings []uint64) ([32]byte, error) {
binary.LittleEndian.PutUint64(slashBuf, slashings[i])
slashingMarshaling[i] = slashBuf
}
slashingChunks, err := PackSlashings(slashingMarshaling)
slashingChunks, err := merkle_tree.PackSlashings(slashingMarshaling)
if err != nil {
return [32]byte{}, err
}
return ArraysRoot(slashingChunks, uint64(len(slashingChunks)))
}
// MerkleizeVector uses our optimized routine to hash a list of 32-byte
// elements.
func MerkleizeVector(elements [][32]byte, length uint64) ([32]byte, error) {
depth := getDepth(length)
// Return zerohash at depth
if len(elements) == 0 {
return ZeroHashes[depth], nil
}
for i := uint8(0); i < depth; i++ {
layerLen := len(elements)
oddNodeLength := layerLen%2 == 1
if oddNodeLength {
zerohash := ZeroHashes[i]
elements = append(elements, zerohash)
}
outputLen := len(elements) / 2
if err := gohashtree.Hash(elements, elements); err != nil {
return [32]byte{}, err
}
elements = elements[:outputLen]
}
return elements[0], nil
return merkle_tree.ArraysRoot(slashingChunks, uint64(len(slashingChunks)))
}

View File

@ -11,30 +11,6 @@ import (
// The test below match prysm output
func TestEmptyArraysRoot(t *testing.T) {
expected := common.HexToHash("df6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e")
root, err := state_encoding.ArraysRoot([][32]byte{}, state_encoding.StateRootsLength)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}
func TestEmptyArraysWithLengthRoot(t *testing.T) {
expected := common.HexToHash("0xf770287da731841c38eb035da016bd2daad53bf0bca607461c0685b0ea54c5f9")
roots := [][32]byte{
common.BytesToHash([]byte{1}),
common.BytesToHash([]byte{2}),
common.BytesToHash([]byte{3}),
common.BytesToHash([]byte{4}),
common.BytesToHash([]byte{5}),
common.BytesToHash([]byte{6}),
common.BytesToHash([]byte{7}),
common.BytesToHash([]byte{8}),
}
root, err := state_encoding.ArraysRootWithLimit(roots, 8192)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}
func TestEth1DataVector(t *testing.T) {
expected := common.HexToHash("0xaa5de3cc36f794bf4e5f1882a0a3b2f6570ed933b2e12901077781e3b09b4d6a")
votes := []*cltypes.Eth1Data{
@ -67,14 +43,6 @@ func TestValidatorsVectorRoot(t *testing.T) {
require.Equal(t, expected, common.Hash(root))
}
func TestUint64ListRootWithLimit(t *testing.T) {
expected := common.HexToHash("0xfbe583f8fbcc3683d98c12ae969e93aaa5ac472e15422c14759cb7f3ef60673c")
nums := []uint64{1, 2, 4, 5, 2, 5, 6, 7, 1, 4, 3, 5, 100, 6, 64, 2}
root, err := state_encoding.Uint64ListRootWithLimit(nums, 274877906944)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}
func TestSlashingsRoot(t *testing.T) {
expected := common.HexToHash("0xaf328cf63282226acd6da21937c28296ece7a66100089f9f016f9ff47eaf59de")
nums := []uint64{1, 2, 4, 5, 2, 5, 6, 7, 1, 4, 3, 5, 100, 6, 64, 2}
@ -82,11 +50,3 @@ func TestSlashingsRoot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}
func TestParticipationBitsRoot(t *testing.T) {
expected := common.HexToHash("0x8e6653ba3656afddaf5e6c69c149e63a2e26ff91a2e361b3c40b11f08c039572")
bits := []byte{1, 2, 4, 5, 2, 5, 6, 7, 1, 4, 3, 5, 100, 6, 64, 2}
root, err := state_encoding.ParticipationBitsRoot(bits)
require.NoError(t, err)
require.Equal(t, expected, common.Hash(root))
}

View File

@ -1,30 +1 @@
package state_encoding
import "github.com/ledgerwatch/erigon/cl/utils"
// ParticipationBitsRoot computes the HashTreeRoot merkleization of
// participation roots.
func ParticipationBitsRoot(bits []byte) ([32]byte, error) {
roots, err := packParticipationBits(bits)
if err != nil {
return [32]byte{}, err
}
base, err := MerkleizeVector(roots, uint64(ValidatorRegistryLimit+31)/32)
if err != nil {
return [32]byte{}, err
}
lengthRoot := Uint64Root(uint64(len(bits)))
return utils.Keccak256(base[:], lengthRoot[:]), nil
}
func packParticipationBits(bytes []byte) ([][32]byte, error) {
var chunks [][32]byte
for i := 0; i < len(bytes); i += 32 {
var chunk [32]byte
copy(chunk[:], bytes[i:])
chunks = append(chunks, chunk)
}
return chunks, nil
}

View File

@ -139,11 +139,7 @@ func ComputeSigningRootEpoch(epoch uint64, domain []byte) ([32]byte, error) {
b := make([]byte, 32)
binary.LittleEndian.PutUint64(b, epoch)
hash := utils.Keccak256(b)
sd := &cltypes.SigningData{
Root: hash,
Domain: domain,
}
return sd.HashTreeRoot()
return utils.Keccak256(hash[:], domain), nil
}
func GetBeaconProposerIndex(state *state.BeaconState) (uint64, error) {

View File

@ -4,6 +4,7 @@ import (
"sync"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/cl/rpc"
"github.com/ledgerwatch/erigon/common"
"golang.org/x/net/context"
@ -43,7 +44,7 @@ func NewForwardBeaconDownloader(ctx context.Context, rpc *rpc.BeaconRpcP2P) *For
}
// Start begins the gossip listening process.
func (f *ForwardBeaconDownloader) ReceiveGossip(obj cltypes.ObjectSSZ) {
func (f *ForwardBeaconDownloader) ReceiveGossip(obj ssz_utils.ObjectSSZ) {
signedBlock := obj.(*cltypes.SignedBeaconBlockBellatrix)
if signedBlock.Block.ParentRoot == f.highestBlockRootProcessed {
f.addSegment(cltypes.NewSignedBeaconBlock(obj))

View File

@ -5,11 +5,12 @@ import (
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/log/v3"
)
type GossipReceiver interface {
ReceiveGossip(cltypes.ObjectSSZ)
ReceiveGossip(ssz_utils.ObjectSSZ)
}
type GossipManager struct {
@ -51,7 +52,7 @@ func (g *GossipManager) Loop() {
//If the deserialization fails, an error is logged and the loop continues to the next iteration.
//If the deserialization is successful, the object is set to the deserialized value and the loop continues to the next iteration.
receivers := g.receivers[data.Type]
var object cltypes.ObjectSSZ
var object ssz_utils.ObjectSSZ
switch data.Type {
case sentinel.GossipType_BeaconBlockGossipType:
object = &cltypes.SignedBeaconBlockBellatrix{}

View File

@ -26,6 +26,7 @@ import (
sentinelrpc "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
"github.com/ledgerwatch/erigon/cl/fork"
lcCli "github.com/ledgerwatch/erigon/cmd/sentinel/cli"
"github.com/ledgerwatch/erigon/cmd/sentinel/cli/flags"
@ -61,7 +62,7 @@ func constructBodyFreeRequest(t string) *sentinelrpc.RequestData {
}
}
func constructRequest(t string, reqBody cltypes.ObjectSSZ) (*sentinelrpc.RequestData, error) {
func constructRequest(t string, reqBody ssz_utils.ObjectSSZ) (*sentinelrpc.RequestData, error) {
var buffer buffer.Buffer
if err := ssz_snappy.EncodeAndWrite(&buffer, reqBody); err != nil {
return nil, fmt.Errorf("unable to encode request body: %v", err)

View File

@ -21,11 +21,10 @@ import (
"io"
"github.com/golang/snappy"
"github.com/ledgerwatch/erigon/cl/cltypes"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/ledgerwatch/erigon/cl/cltypes/ssz_utils"
)
func EncodeAndWrite(w io.Writer, val ssz.Marshaler, prefix ...byte) error {
func EncodeAndWrite(w io.Writer, val ssz_utils.Marshaler, prefix ...byte) error {
// create prefix for length of packet
lengthBuf := make([]byte, 10)
vin := binary.PutUvarint(lengthBuf, uint64(val.SizeSSZ()))
@ -39,8 +38,7 @@ func EncodeAndWrite(w io.Writer, val ssz.Marshaler, prefix ...byte) error {
sw := snappy.NewBufferedWriter(wr)
defer sw.Flush()
// Marshall and snap it
xs := make([]byte, 0, val.SizeSSZ())
enc, err := val.MarshalSSZTo(xs)
enc, err := val.MarshalSSZ()
if err != nil {
return err
}
@ -48,7 +46,7 @@ func EncodeAndWrite(w io.Writer, val ssz.Marshaler, prefix ...byte) error {
return err
}
func DecodeAndRead(r io.Reader, val cltypes.ObjectSSZ) error {
func DecodeAndRead(r io.Reader, val ssz_utils.ObjectSSZ) error {
forkDigest := make([]byte, 4)
// TODO(issues/5884): assert the fork digest matches the expectation for
// a specific configuration.
@ -58,7 +56,7 @@ func DecodeAndRead(r io.Reader, val cltypes.ObjectSSZ) error {
return DecodeAndReadNoForkDigest(r, val)
}
func DecodeAndReadNoForkDigest(r io.Reader, val cltypes.ObjectSSZ) error {
func DecodeAndReadNoForkDigest(r io.Reader, val ssz_utils.EncodableSSZ) error {
// Read varint for length of message.
encodedLn, _, err := ReadUvarint(r)
if err != nil {
@ -101,7 +99,7 @@ func ReadUvarint(r io.Reader) (x, n uint64, err error) {
return 0, n, nil
}
func DecodeListSSZ(data []byte, count uint64, list []cltypes.ObjectSSZ) error {
func DecodeListSSZ(data []byte, count uint64, list []ssz_utils.ObjectSSZ) error {
objSize := list[0].SizeSSZ()
r := bytes.NewReader(data)

View File

@ -30,7 +30,7 @@ type ConsensusHandlers struct {
handlers map[protocol.ID]network.StreamHandler
host host.Host
peers *peers.Peers
metadata *cltypes.MetadataV2
metadata *cltypes.Metadata
beaconConfig *clparams.BeaconChainConfig
genesisConfig *clparams.GenesisConfig
ctx context.Context
@ -44,7 +44,7 @@ const (
)
func NewConsensusHandlers(ctx context.Context, db kv.RoDB, host host.Host,
peers *peers.Peers, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, metadata *cltypes.MetadataV2) *ConsensusHandlers {
peers *peers.Peers, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, metadata *cltypes.Metadata) *ConsensusHandlers {
c := &ConsensusHandlers{
peers: peers,
host: host,

View File

@ -35,7 +35,7 @@ func (c *ConsensusHandlers) goodbyeHandler(s network.Stream) {
}
func (c *ConsensusHandlers) metadataV1Handler(s network.Stream) {
ssz_snappy.EncodeAndWrite(s, &cltypes.MetadataV1{
ssz_snappy.EncodeAndWrite(s, &cltypes.Metadata{
SeqNumber: c.metadata.SeqNumber,
Attnets: c.metadata.Attnets,
}, SuccessfulResponsePrefix)

View File

@ -86,6 +86,5 @@ func (h *HandShaker) ValidatePeer(id peer.ID) bool {
if err := ssz_snappy.DecodeAndReadNoForkDigest(bytes.NewReader(response), responseStatus); err != nil {
return false
}
return h.rule(responseStatus, status, h.genesisConfig, h.beaconConfig)
}

View File

@ -48,7 +48,7 @@ type Sentinel struct {
host host.Host
cfg *SentinelConfig
peers *peers.Peers
metadataV2 *cltypes.MetadataV2
metadataV2 *cltypes.Metadata
handshaker *handshake.HandShaker
db kv.RoDB
@ -132,10 +132,10 @@ func (s *Sentinel) createListener() (*discover.UDPv5, error) {
}
// TODO: Set up proper attestation number
s.metadataV2 = &cltypes.MetadataV2{
s.metadataV2 = &cltypes.Metadata{
SeqNumber: localNode.Seq(),
Attnets: 0,
Syncnets: 0,
Syncnets: new(uint64),
}
// Start stream handlers