Block Processing And State Transition Improvments (#10408)

* cleanup

* clean up more

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
This commit is contained in:
Nishant Das 2022-03-23 19:57:48 +08:00 committed by GitHub
parent c8c1d04c07
commit 0b9b635646
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 30 additions and 19 deletions

View File

@ -318,9 +318,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
if err := s.handleEpochBoundary(ctx, preState); err != nil {
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
}
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()

View File

@ -302,11 +302,11 @@ func handlePendingAttestationSlice(val []*ethpb.PendingAttestation, indices []ui
// handleBalanceSlice returns the root of a slice of validator balances.
func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, error) {
if convertAll {
balancesMarshaling := make([][]byte, 0)
for _, b := range val {
balancesMarshaling := make([][]byte, len(val))
for i, b := range val {
balanceBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(balanceBuf, b)
balancesMarshaling = append(balancesMarshaling, balanceBuf)
balancesMarshaling[i] = balanceBuf
}
balancesChunks, err := ssz.PackByChunk(balancesMarshaling)
if err != nil {

View File

@ -6,7 +6,6 @@ import (
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
)
@ -35,7 +34,7 @@ func ParticipationBitsRoot(bits []byte) ([32]byte, error) {
// it does not have length bytes per chunk.
func packParticipationBits(bytes []byte) ([][32]byte, error) {
numItems := len(bytes)
var chunks [][32]byte
chunks := make([][32]byte, 0, numItems/32)
for i := 0; i < numItems; i += 32 {
j := i + 32
// We create our upper bound index of the chunk, if it is greater than numItems,
@ -45,7 +44,9 @@ func packParticipationBits(bytes []byte) ([][32]byte, error) {
}
// We create chunks from the list of items based on the
// indices determined above.
chunks = append(chunks, bytesutil.ToBytes32(bytes[i:j]))
chunk := [32]byte{}
copy(chunk[:], bytes[i:j])
chunks = append(chunks, chunk)
}
if len(chunks) == 0 {

View File

@ -172,6 +172,8 @@ func recomputeRootFromLayer(idx int, layers [][]*[32]byte, chunks []*[32]byte,
// Using information about the index which changed, idx, we recompute
// only its branch up the tree.
currentIndex := idx
// Allocate only once.
combinedChunks := [64]byte{}
for i := 0; i < len(layers)-1; i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
@ -181,12 +183,16 @@ func recomputeRootFromLayer(idx int, layers [][]*[32]byte, chunks []*[32]byte,
neighbor = *layers[i][neighborIdx]
}
if isLeft {
parentHash := hasher(append(root[:], neighbor[:]...))
root = parentHash
copy(combinedChunks[:32], root[:])
copy(combinedChunks[32:], neighbor[:])
} else {
parentHash := hasher(append(neighbor[:], root[:]...))
root = parentHash
copy(combinedChunks[:32], neighbor[:])
copy(combinedChunks[32:], root[:])
}
parentHash := hasher(combinedChunks[:])
root = parentHash
parentIdx := currentIndex / 2
// Update the cached layers at the parent index.
rootVal := root
@ -217,23 +223,30 @@ func recomputeRootFromLayerVariable(idx int, item [32]byte, layers [][]*[32]byte
currentIndex := idx
root := item
// Allocate only once.
neighbor := [32]byte{}
combinedChunks := [64]byte{}
for i := 0; i < len(layers)-1; i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
neighbor := [32]byte{}
if neighborIdx >= len(layers[i]) {
neighbor = trie.ZeroHashes[i]
} else {
neighbor = *layers[i][neighborIdx]
}
if isLeft {
parentHash := hasher(append(root[:], neighbor[:]...))
root = parentHash
copy(combinedChunks[:32], root[:])
copy(combinedChunks[32:], neighbor[:])
} else {
parentHash := hasher(append(neighbor[:], root[:]...))
root = parentHash
copy(combinedChunks[:32], neighbor[:])
copy(combinedChunks[32:], root[:])
}
parentHash := hasher(combinedChunks[:])
root = parentHash
parentIdx := currentIndex / 2
if len(layers[i+1]) == 0 || parentIdx >= len(layers[i+1]) {
newItem := root

View File

@ -76,7 +76,7 @@ func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) {
return chunks, nil
}
// We flatten the list in order to pack its items into byte chunks correctly.
var orderedItems []byte
orderedItems := make([]byte, 0, len(serializedItems)*len(serializedItems[0]))
for _, item := range serializedItems {
orderedItems = append(orderedItems, item...)
}