Add maligned struct static check ()

* Add maligned static check
* Add file, oops
* lint
This commit is contained in:
Preston Van Loon 2020-04-02 22:09:15 -07:00 committed by GitHub
parent 477b014bd1
commit 7bdd1355b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 359 additions and 58 deletions
BUILD.bazel
beacon-chain
flags
p2p
powchain
sync/initial-sync
contracts/deposit-contract
nogo_config.json
shared/featureconfig
tools/analyzers/maligned
validator/client

View File

@ -104,6 +104,7 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/assign:go_tool_library", "@org_golang_x_tools//go/analysis/passes/assign:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library", "@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library", "@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
"//tools/analyzers/maligned:go_tool_library",
], ],
) )

View File

@ -13,11 +13,11 @@ type GlobalFlags struct {
EnableArchivedValidatorSetChanges bool EnableArchivedValidatorSetChanges bool
EnableArchivedBlocks bool EnableArchivedBlocks bool
EnableArchivedAttestations bool EnableArchivedAttestations bool
UnsafeSync bool
EnableDiscv5 bool
MinimumSyncPeers int MinimumSyncPeers int
MaxPageSize int MaxPageSize int
DeploymentBlock int DeploymentBlock int
UnsafeSync bool
EnableDiscv5 bool
} }
var globalConfig *GlobalFlags var globalConfig *GlobalFlags

View File

@ -4,6 +4,8 @@ package p2p
// to initialize the p2p service. // to initialize the p2p service.
type Config struct { type Config struct {
NoDiscovery bool NoDiscovery bool
EnableUPnP bool
EnableDiscv5 bool
StaticPeers []string StaticPeers []string
BootstrapNodeAddr []string BootstrapNodeAddr []string
KademliaBootStrapAddr []string KademliaBootStrapAddr []string
@ -18,7 +20,5 @@ type Config struct {
UDPPort uint UDPPort uint
MaxPeers uint MaxPeers uint
WhitelistCIDR string WhitelistCIDR string
EnableUPnP bool
EnableDiscv5 bool
Encoding string Encoding string
} }

View File

@ -120,13 +120,17 @@ type RPCClient interface {
// Validator Registration Contract on the ETH1.0 chain to kick off the beacon // Validator Registration Contract on the ETH1.0 chain to kick off the beacon
// chain's validator registration process. // chain's validator registration process.
type Service struct { type Service struct {
requestingOldLogs bool
connectedETH1 bool
isRunning bool
depositContractAddress common.Address
processingLock sync.RWMutex
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
client Client client Client
headerChan chan *gethTypes.Header headerChan chan *gethTypes.Header
eth1Endpoint string eth1Endpoint string
httpEndpoint string httpEndpoint string
depositContractAddress common.Address
stateNotifier statefeed.Notifier stateNotifier statefeed.Notifier
reader Reader reader Reader
logger bind.ContractFilterer logger bind.ContractFilterer
@ -142,12 +146,8 @@ type Service struct {
beaconDB db.HeadAccessDatabase // Circular dep if using HeadFetcher. beaconDB db.HeadAccessDatabase // Circular dep if using HeadFetcher.
depositCache *depositcache.DepositCache depositCache *depositcache.DepositCache
lastReceivedMerkleIndex int64 // Keeps track of the last received index to prevent log spam. lastReceivedMerkleIndex int64 // Keeps track of the last received index to prevent log spam.
isRunning bool
runError error runError error
preGenesisState *stateTrie.BeaconState preGenesisState *stateTrie.BeaconState
processingLock sync.RWMutex
requestingOldLogs bool
connectedETH1 bool
} }
// Web3ServiceConfig defines a config struct for web3 service to use through its life cycle. // Web3ServiceConfig defines a config struct for web3 service to use through its life cycle.

View File

@ -20,13 +20,13 @@ func TestStateMachine_Stringify(t *testing.T) {
{ {
"newly created state machine", "newly created state machine",
[]*epochState{ []*epochState{
{epoch: 8, state: stateNew,}, {epoch: 8, state: stateNew},
{epoch: 9, state: stateScheduled,}, {epoch: 9, state: stateScheduled},
{epoch: 10, state: stateDataParsed,}, {epoch: 10, state: stateDataParsed},
{epoch: 11, state: stateSkipped,}, {epoch: 11, state: stateSkipped},
{epoch: 12, state: stateSkippedExt,}, {epoch: 12, state: stateSkippedExt},
{epoch: 13, state: stateComplete,}, {epoch: 13, state: stateComplete},
{epoch: 14, state: stateSent,}, {epoch: 14, state: stateSent},
}, },
"[8:new 9:scheduled 10:dataParsed 11:skipped 12:skippedExt 13:complete 14:sent]", "[8:new 9:scheduled 10:dataParsed 11:skipped 12:skippedExt 13:complete 14:sent]",
}, },
@ -85,9 +85,9 @@ func TestStateMachine_trigger(t *testing.T) {
} }
type args struct { type args struct {
name eventID name eventID
returnState stateID
epoch uint64 epoch uint64
data interface{} data interface{}
returnState stateID
} }
tests := []struct { tests := []struct {
name string name string
@ -100,7 +100,7 @@ func TestStateMachine_trigger(t *testing.T) {
name: "event not found", name: "event not found",
events: []event{}, events: []event{},
epochs: []uint64{}, epochs: []uint64{},
args: args{eventSchedule, 12, nil, stateNew}, args: args{name: eventSchedule, epoch: 12, data: nil, returnState: stateNew},
err: fmt.Errorf("event not found: %v", eventSchedule), err: fmt.Errorf("event not found: %v", eventSchedule),
}, },
{ {
@ -109,7 +109,7 @@ func TestStateMachine_trigger(t *testing.T) {
{stateNew, eventSchedule, stateScheduled, false}, {stateNew, eventSchedule, stateScheduled, false},
}, },
epochs: []uint64{}, epochs: []uint64{},
args: args{eventSchedule, 12, nil, stateScheduled}, args: args{name: eventSchedule, epoch: 12, data: nil, returnState: stateScheduled},
err: fmt.Errorf("state for %v epoch not found", 12), err: fmt.Errorf("state for %v epoch not found", 12),
}, },
{ {
@ -118,7 +118,7 @@ func TestStateMachine_trigger(t *testing.T) {
{stateNew, eventSchedule, stateScheduled, false}, {stateNew, eventSchedule, stateScheduled, false},
}, },
epochs: []uint64{12, 13}, epochs: []uint64{12, 13},
args: args{eventSchedule, 12, nil, stateScheduled}, args: args{name: eventSchedule, epoch: 12, data: nil, returnState: stateScheduled},
err: nil, err: nil,
}, },
{ {
@ -129,7 +129,7 @@ func TestStateMachine_trigger(t *testing.T) {
{stateSent, eventSchedule, stateComplete, false}, {stateSent, eventSchedule, stateComplete, false},
}, },
epochs: []uint64{12, 13}, epochs: []uint64{12, 13},
args: args{eventSchedule, 12, nil, stateScheduled}, args: args{name: eventSchedule, epoch: 12, data: nil, returnState: stateScheduled},
err: nil, err: nil,
}, },
{ {
@ -140,7 +140,7 @@ func TestStateMachine_trigger(t *testing.T) {
{stateSent, eventSchedule, stateComplete, false}, {stateSent, eventSchedule, stateComplete, false},
}, },
epochs: []uint64{12, 13}, epochs: []uint64{12, 13},
args: args{eventSchedule, 12, nil, stateComplete}, args: args{name: eventSchedule, epoch: 12, data: nil, returnState: stateComplete},
err: nil, err: nil,
}, },
{ {
@ -151,7 +151,7 @@ func TestStateMachine_trigger(t *testing.T) {
{stateNew, eventSchedule, stateComplete, false}, {stateNew, eventSchedule, stateComplete, false},
}, },
epochs: []uint64{12, 13}, epochs: []uint64{12, 13},
args: args{eventSchedule, 12, nil, stateSent}, args: args{name: eventSchedule, epoch: 12, data: nil, returnState: stateSent},
err: nil, err: nil,
}, },
} }

View File

@ -22,8 +22,8 @@ var (
// through which we can perform actions on the eth1.0 chain. // through which we can perform actions on the eth1.0 chain.
type TestAccount struct { type TestAccount struct {
Addr common.Address Addr common.Address
Contract *DepositContract
ContractAddr common.Address ContractAddr common.Address
Contract *DepositContract
Backend *backends.SimulatedBackend Backend *backends.SimulatedBackend
TxOpts *bind.TransactOpts TxOpts *bind.TransactOpts
} }
@ -54,7 +54,7 @@ func Setup() (*TestAccount, error) {
} }
backend.Commit() backend.Commit()
return &TestAccount{addr, contract, contractAddr, backend, txOpts}, nil return &TestAccount{addr, contractAddr, contract, backend, txOpts}, nil
} }
// Amount32Eth returns 32Eth(in wei) in terms of the big.Int type. // Amount32Eth returns 32Eth(in wei) in terms of the big.Int type.

View File

@ -51,5 +51,13 @@
"exclude_files": { "exclude_files": {
"external/.*": "Third party code" "external/.*": "Third party code"
} }
},
"maligned": {
"exclude_files": {
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
"shared/params/config.go": "This config struct needs to be organized for now",
"proto/.*": "Excluding protobuf objects for now"
}
} }
} }

View File

@ -28,32 +28,30 @@ var log = logrus.WithField("prefix", "flags")
// Flags is a struct to represent which features the client will perform on runtime. // Flags is a struct to represent which features the client will perform on runtime.
type Flags struct { type Flags struct {
NoCustomConfig bool // NoCustomConfigFlag determines whether to launch a beacon chain using real parameters or demo parameters. NoCustomConfig bool // NoCustomConfigFlag determines whether to launch a beacon chain using real parameters or demo parameters.
CustomGenesisDelay uint64 // CustomGenesisDelay signals how long of a delay to set to start the chain. MinimalConfig bool // MinimalConfig as defined in the spec.
MinimalConfig bool // MinimalConfig as defined in the spec. WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory.
WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory. InitSyncNoVerify bool // InitSyncNoVerify when initial syncing w/o verifying block's contents.
InitSyncNoVerify bool // InitSyncNoVerify when initial syncing w/o verifying block's contents. EnableDynamicCommitteeSubnets bool // Enables dynamic attestation committee subnets via p2p.
EnableDynamicCommitteeSubnets bool // Enables dynamic attestation committee subnets via p2p. SkipBLSVerify bool // Skips BLS verification across the runtime.
SkipBLSVerify bool // Skips BLS verification across the runtime. EnableBackupWebhook bool // EnableBackupWebhook to allow database backups to trigger from monitoring port /db/backup.
EnableBackupWebhook bool // EnableBackupWebhook to allow database backups to trigger from monitoring port /db/backup. PruneEpochBoundaryStates bool // PruneEpochBoundaryStates prunes the epoch boundary state before last finalized check point.
PruneEpochBoundaryStates bool // PruneEpochBoundaryStates prunes the epoch boundary state before last finalized check point. EnableSnappyDBCompression bool // EnableSnappyDBCompression in the database.
EnableSnappyDBCompression bool // EnableSnappyDBCompression in the database. ProtectProposer bool // ProtectProposer prevents the validator client from signing any proposals that would be considered a slashable offense.
KafkaBootstrapServers string // KafkaBootstrapServers to find kafka servers to stream blocks, attestations, etc. ProtectAttester bool // ProtectAttester prevents the validator client from signing any attestations that would be considered a slashable offense.
ProtectProposer bool // ProtectProposer prevents the validator client from signing any proposals that would be considered a slashable offense. DisableStrictAttestationPubsubVerification bool // DisableStrictAttestationPubsubVerification will disabling strict signature verification in pubsub.
ProtectAttester bool // ProtectAttester prevents the validator client from signing any attestations that would be considered a slashable offense. DisableUpdateHeadPerAttestation bool // DisableUpdateHeadPerAttestation will disabling update head on per attestation basis.
DisableStrictAttestationPubsubVerification bool // DisableStrictAttestationPubsubVerification will disabling strict signature verification in pubsub. EnableByteMempool bool // EnaableByteMempool memory management.
DisableUpdateHeadPerAttestation bool // DisableUpdateHeadPerAttestation will disabling update head on per attestation basis. EnableDomainDataCache bool // EnableDomainDataCache caches validator calls to DomainData per epoch.
EnableByteMempool bool // EnaableByteMempool memory management. EnableStateGenSigVerify bool // EnableStateGenSigVerify verifies proposer and randao signatures during state gen.
EnableDomainDataCache bool // EnableDomainDataCache caches validator calls to DomainData per epoch. CheckHeadState bool // CheckHeadState checks the current headstate before retrieving the desired state from the db.
EnableStateGenSigVerify bool // EnableStateGenSigVerify verifies proposer and randao signatures during state gen. EnableNoise bool // EnableNoise enables the beacon node to use NOISE instead of SECIO when performing a handshake with another peer.
CheckHeadState bool // CheckHeadState checks the current headstate before retrieving the desired state from the db. DontPruneStateStartUp bool // DontPruneStateStartUp disables pruning state upon beacon node start up.
EnableNoise bool // EnableNoise enables the beacon node to use NOISE instead of SECIO when performing a handshake with another peer. NewStateMgmt bool // NewStateMgmt enables the new experimental state mgmt service.
DontPruneStateStartUp bool // DontPruneStateStartUp disables pruning state upon beacon node start up. DisableInitSyncQueue bool // DisableInitSyncQueue disables the new initial sync implementation.
NewStateMgmt bool // NewStateMgmt enables the new experimental state mgmt service. EnableFieldTrie bool // EnableFieldTrie enables the state from using field specific tries when computing the root.
DisableInitSyncQueue bool // DisableInitSyncQueue disables the new initial sync implementation. EnableBlockHTR bool // EnableBlockHTR enables custom hashing of our beacon blocks.
EnableFieldTrie bool // EnableFieldTrie enables the state from using field specific tries when computing the root. NoInitSyncBatchSaveBlocks bool // NoInitSyncBatchSaveBlocks disables batch save blocks mode during initial syncing.
EnableBlockHTR bool // EnableBlockHTR enables custom hashing of our beacon blocks.
NoInitSyncBatchSaveBlocks bool // NoInitSyncBatchSaveBlocks disables batch save blocks mode during initial syncing.
// DisableForkChoice disables using LMD-GHOST fork choice to update // DisableForkChoice disables using LMD-GHOST fork choice to update
// the head of the chain based on attestations and instead accepts any valid received block // the head of the chain based on attestations and instead accepts any valid received block
// as the chain head. UNSAFE, use with caution. // as the chain head. UNSAFE, use with caution.
@ -67,6 +65,9 @@ type Flags struct {
EnableEth1DataVoteCache bool // EnableEth1DataVoteCache; see https://github.com/prysmaticlabs/prysm/issues/3106. EnableEth1DataVoteCache bool // EnableEth1DataVoteCache; see https://github.com/prysmaticlabs/prysm/issues/3106.
EnableSlasherConnection bool // EnableSlasher enable retrieval of slashing events from a slasher instance. EnableSlasherConnection bool // EnableSlasher enable retrieval of slashing events from a slasher instance.
EnableBlockTreeCache bool // EnableBlockTreeCache enable fork choice service to maintain latest filtered block tree. EnableBlockTreeCache bool // EnableBlockTreeCache enable fork choice service to maintain latest filtered block tree.
KafkaBootstrapServers string // KafkaBootstrapServers to find kafka servers to stream blocks, attestations, etc.
CustomGenesisDelay uint64 // CustomGenesisDelay signals how long of a delay to set to start the chain.
} }
var featureConfig *Flags var featureConfig *Flags

View File

@ -0,0 +1,31 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_tool_library")
go_tool_library(
name = "go_tool_library",
srcs = [
"analyzer.go",
"maligned.go",
],
importpath = "maligned",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
"@org_golang_x_tools//go/ast/inspector:go_tool_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"analyzer.go",
"maligned.go",
],
importpath = "github.com/prysmaticlabs/prysm/tools/analyzers/maligned",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/ast/inspector:go_default_library",
],
)

View File

@ -0,0 +1,39 @@
package maligned
import (
"go/ast"
"go/types"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
// Doc explaining the tool.
const Doc = "Tool to detect Go structs that would take less memory if their fields were sorted."
// Analyzer runs static analysis.
var Analyzer = &analysis.Analyzer{
Name: "maligned",
Doc: Doc,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.StructType)(nil),
}
inspect.Preorder(nodeFilter, func(node ast.Node) {
if s, ok := node.(*ast.StructType); ok {
if err := malign(node.Pos(), pass.TypesInfo.Types[s].Type.(*types.Struct)); err != nil {
pass.Reportf(node.Pos(), err.Error())
}
}
})
return nil, nil
}

View File

@ -0,0 +1,221 @@
// Originally from https://github.com/mdempsky/maligned, adapted to work with nogo.
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package maligned
import (
"fmt"
"go/build"
"go/token"
"go/types"
"sort"
"strings"
)
var fset = token.NewFileSet()
func malign(pos token.Pos, str *types.Struct) error {
wordSize := int64(8)
maxAlign := int64(8)
switch build.Default.GOARCH {
case "386", "arm":
wordSize, maxAlign = 4, 4
case "amd64p32":
wordSize = 4
}
s := gcSizes{wordSize, maxAlign}
sz, opt := s.Sizeof(str), optimalSize(str, &s)
if sz != opt {
return fmt.Errorf("maligned struct of size %d could be %d if fields were sorted by type. See: https://bit.ly/malignedstruct\n\nOptimal sort order is\n%s", sz, opt, optimalSort(s, str))
}
return nil
}
func optimalSort(sizes gcSizes, str *types.Struct) string {
nf := str.NumFields()
fields := make([]*types.Var, nf)
alignofs := make([]int64, nf)
sizeofs := make([]int64, nf)
for i := 0; i < nf; i++ {
fields[i] = str.Field(i)
ft := fields[i].Type()
alignofs[i] = sizes.Alignof(ft)
sizeofs[i] = sizes.Sizeof(ft)
}
sort.Sort(&byAlignAndSize{fields, alignofs, sizeofs})
// Reverse fields
for left, right := 0, len(fields)-1; left < right; left, right = left+1, right-1 {
fields[left], fields[right] = fields[right], fields[left]
}
ss := make([]string, nf)
for i, f := range fields {
ss[i] = fmt.Sprintf(" %s %s", f.Name(), f.Type().String())
}
return strings.Join(ss, "\n")
}
func optimalSize(str *types.Struct, sizes *gcSizes) int64 {
nf := str.NumFields()
fields := make([]*types.Var, nf)
alignofs := make([]int64, nf)
sizeofs := make([]int64, nf)
for i := 0; i < nf; i++ {
fields[i] = str.Field(i)
ft := fields[i].Type()
alignofs[i] = sizes.Alignof(ft)
sizeofs[i] = sizes.Sizeof(ft)
}
sort.Sort(&byAlignAndSize{fields, alignofs, sizeofs})
return sizes.Sizeof(types.NewStruct(fields, nil))
}
type byAlignAndSize struct {
fields []*types.Var
alignofs []int64
sizeofs []int64
}
func (s *byAlignAndSize) Len() int { return len(s.fields) }
func (s *byAlignAndSize) Swap(i, j int) {
s.fields[i], s.fields[j] = s.fields[j], s.fields[i]
s.alignofs[i], s.alignofs[j] = s.alignofs[j], s.alignofs[i]
s.sizeofs[i], s.sizeofs[j] = s.sizeofs[j], s.sizeofs[i]
}
func (s *byAlignAndSize) Less(i, j int) bool {
// Place zero sized objects before non-zero sized objects.
if s.sizeofs[i] == 0 && s.sizeofs[j] != 0 {
return true
}
if s.sizeofs[j] == 0 && s.sizeofs[i] != 0 {
return false
}
// Next, place more tightly aligned objects before less tightly aligned objects.
if s.alignofs[i] != s.alignofs[j] {
return s.alignofs[i] > s.alignofs[j]
}
// Lastly, order by size.
if s.sizeofs[i] != s.sizeofs[j] {
return s.sizeofs[i] > s.sizeofs[j]
}
return false
}
// Code below based on go/types.StdSizes.
type gcSizes struct {
WordSize int64
MaxAlign int64
}
func (s *gcSizes) Alignof(T types.Type) int64 {
// NOTE: On amd64, complex64 is 8 byte aligned,
// even though float32 is only 4 byte aligned.
// For arrays and structs, alignment is defined in terms
// of alignment of the elements and fields, respectively.
switch t := T.Underlying().(type) {
case *types.Array:
// spec: "For a variable x of array type: unsafe.Alignof(x)
// is the same as unsafe.Alignof(x[0]), but at least 1."
return s.Alignof(t.Elem())
case *types.Struct:
// spec: "For a variable x of struct type: unsafe.Alignof(x)
// is the largest of the values unsafe.Alignof(x.f) for each
// field f of x, but at least 1."
max := int64(1)
for i, nf := 0, t.NumFields(); i < nf; i++ {
if a := s.Alignof(t.Field(i).Type()); a > max {
max = a
}
}
return max
}
a := s.Sizeof(T) // may be 0
// spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
if a < 1 {
return 1
}
if a > s.MaxAlign {
return s.MaxAlign
}
return a
}
var basicSizes = [...]byte{
types.Bool: 1,
types.Int8: 1,
types.Int16: 2,
types.Int32: 4,
types.Int64: 8,
types.Uint8: 1,
types.Uint16: 2,
types.Uint32: 4,
types.Uint64: 8,
types.Float32: 4,
types.Float64: 8,
types.Complex64: 8,
types.Complex128: 16,
}
func (s *gcSizes) Sizeof(T types.Type) int64 {
switch t := T.Underlying().(type) {
case *types.Basic:
k := t.Kind()
if int(k) < len(basicSizes) {
if s := basicSizes[k]; s > 0 {
return int64(s)
}
}
if k == types.String {
return s.WordSize * 2
}
case *types.Array:
n := t.Len()
if n == 0 {
return 0
}
a := s.Alignof(t.Elem())
z := s.Sizeof(t.Elem())
return align(z, a)*(n-1) + z
case *types.Slice:
return s.WordSize * 3
case *types.Struct:
nf := t.NumFields()
if nf == 0 {
return 0
}
var o int64
max := int64(1)
for i := 0; i < nf; i++ {
ft := t.Field(i).Type()
a, sz := s.Alignof(ft), s.Sizeof(ft)
if a > max {
max = a
}
if i == nf-1 && sz == 0 && o != 0 {
sz = 1
}
o = align(o, a) + sz
}
return align(o, max)
case *types.Interface:
return s.WordSize * 2
}
return s.WordSize // catch-all
}
// align returns the smallest y >= x such that y % a == 0.
func align(x, a int64) int64 {
y := x + a - 1
return y - y%a
}

View File

@ -14,22 +14,22 @@ type fakeValidator struct {
WaitForActivationCalled bool WaitForActivationCalled bool
WaitForChainStartCalled bool WaitForChainStartCalled bool
WaitForSyncCalled bool WaitForSyncCalled bool
NextSlotRet <-chan uint64
NextSlotCalled bool NextSlotCalled bool
CanonicalHeadSlotCalled bool CanonicalHeadSlotCalled bool
UpdateDutiesCalled bool UpdateDutiesCalled bool
UpdateDutiesArg1 uint64
UpdateDutiesRet error
RoleAtCalled bool RoleAtCalled bool
RoleAtArg1 uint64
RolesAtRet []pb.ValidatorRole
AttestToBlockHeadCalled bool AttestToBlockHeadCalled bool
AttestToBlockHeadArg1 uint64
ProposeBlockCalled bool ProposeBlockCalled bool
ProposeBlockArg1 uint64
LogValidatorGainsAndLossesCalled bool LogValidatorGainsAndLossesCalled bool
SlotDeadlineCalled bool SlotDeadlineCalled bool
ProposeBlockArg1 uint64
AttestToBlockHeadArg1 uint64
RoleAtArg1 uint64
UpdateDutiesArg1 uint64
NextSlotRet <-chan uint64
PublicKey string PublicKey string
UpdateDutiesRet error
RolesAtRet []pb.ValidatorRole
} }
func (fv *fakeValidator) Done() { func (fv *fakeValidator) Done() {