diff --git a/beacon-chain/db/kv/blocks_test.go b/beacon-chain/db/kv/blocks_test.go index 5b6f7dc26..8e850f83c 100644 --- a/beacon-chain/db/kv/blocks_test.go +++ b/beacon-chain/db/kv/blocks_test.go @@ -327,7 +327,7 @@ func TestStore_Blocks_Retrieve_Epoch(t *testing.T) { totalBlocks := make([]*ethpb.SignedBeaconBlock, slots) for i := uint64(0); i < slots; i++ { b := testutil.NewBeaconBlock() - b.Block.Slot = uint64(i) + b.Block.Slot = i b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32) totalBlocks[i] = b } diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index 7f5d3f841..ce2c2a202 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -233,7 +233,7 @@ func (b *BeaconNode) Start() { for i := 10; i > 0; i-- { <-sigc if i > 1 { - log.Info("Already shutting down, interrupt more to panic", "times", i-1) + log.WithField("times", i-1).Info("Already shutting down, interrupt more to panic") } } panic("Panic closing the beacon node") diff --git a/beacon-chain/p2p/options.go b/beacon-chain/p2p/options.go index 34f85464f..3f99c84f9 100644 --- a/beacon-chain/p2p/options.go +++ b/beacon-chain/p2p/options.go @@ -4,7 +4,6 @@ import ( "crypto/ecdsa" "fmt" "net" - "time" "github.com/libp2p/go-libp2p" noise "github.com/libp2p/go-libp2p-noise" @@ -15,15 +14,6 @@ import ( "github.com/prysmaticlabs/prysm/shared/version" ) -const ( - // Period that we allocate each new peer before we mark them as valid - // for trimming. - gracePeriod = 2 * time.Minute - // Buffer for the number of peers allowed to connect above max peers before the - // connection manager begins trimming them. - peerBuffer = 5 -) - // buildOptions for the libp2p host. func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option { cfg := s.cfg diff --git a/beacon-chain/p2p/service.go b/beacon-chain/p2p/service.go index 98028f03e..c852cc500 100644 --- a/beacon-chain/p2p/service.go +++ b/beacon-chain/p2p/service.go @@ -54,6 +54,9 @@ const lookupLimit = 15 // maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it. const maxBadResponses = 5 +// Exclusion list cache config values. +const cacheNumCounters, cacheMaxCost, cacheBufferItems = 1000, 1000, 64 + // Service for managing peer to peer (p2p) networking. type Service struct { started bool @@ -83,9 +86,9 @@ func NewService(cfg *Config) (*Service, error) { ctx, cancel := context.WithCancel(context.Background()) _ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop(). cache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: 1000, - MaxCost: 1000, - BufferItems: 64, + NumCounters: cacheNumCounters, + MaxCost: cacheMaxCost, + BufferItems: cacheBufferItems, }) if err != nil { return nil, err diff --git a/beacon-chain/p2p/watch_peers.go b/beacon-chain/p2p/watch_peers.go index e394d66e1..3bb28ef30 100644 --- a/beacon-chain/p2p/watch_peers.go +++ b/beacon-chain/p2p/watch_peers.go @@ -7,6 +7,8 @@ import ( host "github.com/libp2p/go-libp2p-host" ) +const timeoutMax = 30 * time.Second + // ensurePeerConnections will attempt to reestablish connection to the peers // if there are currently no connections to that peer. func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) { @@ -26,7 +28,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) { c := h.Network().ConnsToPeer(peer.ID) if len(c) == 0 { log.WithField("peer", peer.ID).Debug("No connections to peer, reconnecting") - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, timeoutMax) defer cancel() if err := h.Connect(ctx, *peer); err != nil { log.WithField("peer", peer.ID).WithField("addrs", peer.Addrs).WithError(err).Errorf("Failed to reconnect to peer") diff --git a/beacon-chain/rpc/service.go b/beacon-chain/rpc/service.go index 8a50cf032..d85a31ff6 100644 --- a/beacon-chain/rpc/service.go +++ b/beacon-chain/rpc/service.go @@ -45,6 +45,8 @@ import ( "google.golang.org/grpc/reflection" ) +const attestationBufferSize = 100 + var log logrus.FieldLogger func init() { @@ -275,8 +277,8 @@ func (s *Service) Start() { Broadcaster: s.p2p, StateGen: s.stateGen, SyncChecker: s.syncService, - ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, 100), - CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 100), + ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, attestationBufferSize), + CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, attestationBufferSize), } ethpb.RegisterNodeServer(s.grpcServer, nodeServer) ethpb.RegisterBeaconChainServer(s.grpcServer, beaconChainServer) diff --git a/beacon-chain/sync/rpc_goodbye.go b/beacon-chain/sync/rpc_goodbye.go index 61f94c81b..43483b944 100644 --- a/beacon-chain/sync/rpc_goodbye.go +++ b/beacon-chain/sync/rpc_goodbye.go @@ -3,6 +3,7 @@ package sync import ( "context" "fmt" + "time" libp2pcore "github.com/libp2p/go-libp2p-core" "github.com/libp2p/go-libp2p-core/helpers" @@ -24,6 +25,10 @@ var goodByes = map[uint64]string{ codeGenericError: "fault/error", } +// Add a short delay to allow the stream to flush before resetting it. +// There is still a chance that the peer won't receive the message. +const flushDelay = 50 * time.Millisecond + // goodbyeRPCHandler reads the incoming goodbye rpc message from the peer. func (s *Service) goodbyeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { defer func() { diff --git a/beacon-chain/sync/rpc_status.go b/beacon-chain/sync/rpc_status.go index 310691017..d0d48d69e 100644 --- a/beacon-chain/sync/rpc_status.go +++ b/beacon-chain/sync/rpc_status.go @@ -228,9 +228,7 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream if err := stream.Close(); err != nil { // Close before disconnecting. log.WithError(err).Error("Failed to close stream") } - // Add a short delay to allow the stream to flush before closing the connection. - // There is still a chance that the peer won't receive the message. - time.Sleep(50 * time.Millisecond) + time.Sleep(flushDelay) if err := s.p2p.Disconnect(stream.Conn().RemotePeer()); err != nil { log.WithError(err).Error("Failed to disconnect from peer") } diff --git a/beacon-chain/sync/service.go b/beacon-chain/sync/service.go index 9686970a2..554d86a83 100644 --- a/beacon-chain/sync/service.go +++ b/beacon-chain/sync/service.go @@ -42,6 +42,8 @@ const seenExitSize = 100 const seenAttesterSlashingSize = 100 const seenProposerSlashingSize = 100 +const syncMetricsInterval = 10 * time.Second + // Config to set up the regular sync service. type Config struct { P2P p2p.P2P @@ -158,7 +160,7 @@ func (s *Service) Start() { s.resyncIfBehind() // Update sync metrics. - runutil.RunEvery(s.ctx, time.Second*10, s.updateMetrics) + runutil.RunEvery(s.ctx, syncMetricsInterval, s.updateMetrics) } // Stop the regular sync service. diff --git a/endtoend/components/eth1.go b/endtoend/components/eth1.go index 3b8a796c8..0a19bd4da 100644 --- a/endtoend/components/eth1.go +++ b/endtoend/components/eth1.go @@ -25,6 +25,9 @@ import ( "github.com/prysmaticlabs/prysm/shared/params" ) +const timeGapPerTX = 100 * time.Millisecond +const timeGapPerMiningTX = 250 * time.Millisecond + // StartEth1Node starts an eth1 local dev chain and deploys a deposit contract. func StartEth1Node(t *testing.T) (string, int) { binaryPath, found := bazel.FindBinary("cmd/geth", "geth") @@ -117,7 +120,7 @@ func StartEth1Node(t *testing.T) (string, int) { if err != nil { t.Fatal(err) } - time.Sleep(100 * time.Millisecond) + time.Sleep(timeGapPerTX) } // Advancing the blocks another eth1follow distance to prevent issues reading the chain. @@ -153,7 +156,7 @@ func mineBlocks(web3 *ethclient.Client, keystore *keystore.Key, blocksToMake uin return err } nonce++ - time.Sleep(250 * time.Microsecond) + time.Sleep(timeGapPerMiningTX) block, err = web3.BlockByNumber(context.Background(), nil) if err != nil { return err diff --git a/endtoend/components/validator.go b/endtoend/components/validator.go index 5345c8245..73a1077d6 100644 --- a/endtoend/components/validator.go +++ b/endtoend/components/validator.go @@ -25,6 +25,8 @@ import ( "github.com/prysmaticlabs/prysm/shared/testutil" ) +const depositGasLimit = 4000000 + // StartValidatorClients starts the configured amount of validators, also sending and mining their validator deposits. // Should only be used on initialization. func StartValidatorClients(t *testing.T, config *types.E2EConfig, keystorePath string) []int { @@ -125,7 +127,7 @@ func SendDeposits(web3 *ethclient.Client, keystoreBytes []byte, num int, offset } depositInGwei := big.NewInt(int64(params.BeaconConfig().MaxEffectiveBalance)) txOps.Value = depositInGwei.Mul(depositInGwei, big.NewInt(int64(params.BeaconConfig().GweiPerEth))) - txOps.GasLimit = 4000000 + txOps.GasLimit = depositGasLimit nonce, err := web3.PendingNonceAt(context.Background(), txOps.From) if err != nil { return err diff --git a/endtoend/evaluators/metrics.go b/endtoend/evaluators/metrics.go index 6858421b3..380cf536c 100644 --- a/endtoend/evaluators/metrics.go +++ b/endtoend/evaluators/metrics.go @@ -18,6 +18,8 @@ import ( "google.golang.org/grpc" ) +const maxMemStatsBytes = 100000000 // 1 MB. + // MetricsCheck performs a check on metrics to make sure caches are functioning, and // overall health is good. Not checking the first epoch so the sample size isn't too small. var MetricsCheck = types.Evaluator{ @@ -43,7 +45,7 @@ var metricLessThanTests = []equalityTest{ { name: "memory usage", topic: "go_memstats_alloc_bytes", - value: 100000000, // 100 Mb + value: maxMemStatsBytes, }, } diff --git a/shared/event/subscription.go b/shared/event/subscription.go index 1a77a31ca..903e13892 100644 --- a/shared/event/subscription.go +++ b/shared/event/subscription.go @@ -24,6 +24,10 @@ import ( "github.com/prysmaticlabs/prysm/shared/mclockutil" ) +// waitQuotient is divided against the max backoff time, in order to have N requests based on the full +// request backoff time. +const waitQuotient = 10 + // Subscription represents a stream of events. The carrier of the events is typically a // channel, but isn't part of the interface. // @@ -96,7 +100,7 @@ func (s *funcSub) Err() <-chan error { // based on the error rate, but will never exceed backoffMax. func Resubscribe(backoffMax time.Duration, fn ResubscribeFunc) Subscription { s := &resubscribeSub{ - waitTime: backoffMax / 10, + waitTime: backoffMax / waitQuotient, backoffMax: backoffMax, fn: fn, err: make(chan error), @@ -187,7 +191,7 @@ func (s *resubscribeSub) waitForError(sub Subscription) bool { func (s *resubscribeSub) backoffWait() bool { if time.Duration(mclockutil.Now()-s.lastTry) > s.backoffMax { - s.waitTime = s.backoffMax / 10 + s.waitTime = s.backoffMax / waitQuotient } else { s.waitTime *= 2 if s.waitTime > s.backoffMax { diff --git a/tools/analyzers/cryptorand/testdata/rand_new.go b/tools/analyzers/cryptorand/testdata/rand_new.go index aa82713f7..597804146 100644 --- a/tools/analyzers/cryptorand/testdata/rand_new.go +++ b/tools/analyzers/cryptorand/testdata/rand_new.go @@ -15,6 +15,6 @@ func UseRandNew() { } func UseWithoutSeed() { - assignedIndex := rand.Intn(int(128)) + assignedIndex := rand.Intn(128) _ = assignedIndex } diff --git a/tools/bootnode/bootnode.go b/tools/bootnode/bootnode.go index c67c2d8e1..a374e0b1f 100644 --- a/tools/bootnode/bootnode.go +++ b/tools/bootnode/bootnode.go @@ -48,7 +48,6 @@ var ( logFileName = flag.String("log-file", "", "Specify log filename, relative or absolute") privateKey = flag.String("private", "", "Private key to use for peer ID") discv5port = flag.Int("discv5-port", 4000, "Port to listen for discv5 connections") - kademliaPort = flag.Int("kad-port", 4500, "Port to listen for connections to kad DHT") metricsPort = flag.Int("metrics-port", 5000, "Port to listen for connections") externalIP = flag.String("external-ip", "", "External IP for the bootnode") disableKad = flag.Bool("disable-kad", false, "Disables the bootnode from running kademlia dht") @@ -59,9 +58,6 @@ var ( }) ) -const dhtProtocol = "/prysm/0.0.0/dht" -const defaultIP = "127.0.0.1" - type handler struct { listener *discover.UDPv5 } diff --git a/tools/faucet/server.go b/tools/faucet/server.go index 8d5659b35..cf6369dce 100644 --- a/tools/faucet/server.go +++ b/tools/faucet/server.go @@ -30,6 +30,9 @@ var ipCounter = make(map[string]int) var fundingLock sync.Mutex var pruneDuration = time.Hour * 4 +const txGasLimit = 40000 +const fundingAmountWei = "32500000000000000000" // 32.5 ETH in Wei. + type faucetServer struct { r recaptcha.Recaptcha client *ethclient.Client @@ -40,7 +43,7 @@ type faucetServer struct { func init() { var ok bool - fundingAmount, ok = new(big.Int).SetString("32500000000000000000", 10) + fundingAmount, ok = new(big.Int).SetString(fundingAmountWei, 10) if !ok { log.Fatal("could not set funding amount") } @@ -157,7 +160,7 @@ func (s *faucetServer) fundAndWait(to common.Address) (string, error) { return "", err } - tx := types.NewTransaction(nonce, to, fundingAmount, 40000, big.NewInt(1*params.GWei), nil /*data*/) + tx := types.NewTransaction(nonce, to, fundingAmount, txGasLimit, big.NewInt(1*params.GWei), nil /*data*/) tx, err = types.SignTx(tx, types.NewEIP155Signer(big.NewInt(5)), s.pk) if err != nil { diff --git a/validator/accounts/v1/status.go b/validator/accounts/v1/status.go index 0f9ac28f4..c61fc6fb9 100644 --- a/validator/accounts/v1/status.go +++ b/validator/accounts/v1/status.go @@ -13,6 +13,9 @@ import ( "go.opencensus.io/trace" ) +// statusTimeout defines a period after which request to fetch account statuses is cancelled. +const statusTimeout = 30 * time.Second + // ValidatorStatusMetadata holds all status information about a validator. type ValidatorStatusMetadata struct { PublicKey []byte @@ -39,7 +42,7 @@ func FetchAccountStatuses( ) ([]ValidatorStatusMetadata, error) { ctx, span := trace.StartSpan(ctx, "accounts.FetchAccountStatuses") defer span.End() - ctx, cancel := context.WithTimeout(ctx, 30*time.Second /* Cancel if running over thirty seconds. */) + ctx, cancel := context.WithTimeout(ctx, statusTimeout) defer cancel() req := ðpb.MultipleValidatorStatusRequest{PublicKeys: pubKeys} diff --git a/validator/main.go b/validator/main.go index 273b4ebe2..11ab5b30f 100644 --- a/validator/main.go +++ b/validator/main.go @@ -34,6 +34,9 @@ import ( "google.golang.org/grpc" ) +// connTimeout defines a period after which connection to beacon node is cancelled. +const connTimeout = 10 * time.Second + var log = logrus.WithField("prefix", "main") func startNode(ctx *cli.Context) error { @@ -183,8 +186,7 @@ contract in order to activate the validator client`, if err != nil { return err } - ctx, cancel := context.WithTimeout( - context.Background(), 10*time.Second /* Cancel if cannot connect to beacon node in 10 seconds. */) + ctx, cancel := context.WithTimeout(context.Background(), connTimeout) defer cancel() dialOpts := client.ConstructDialOptions( cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name), diff --git a/validator/node/node.go b/validator/node/node.go index bd0bd45bf..d3c9dbfd0 100644 --- a/validator/node/node.go +++ b/validator/node/node.go @@ -160,10 +160,10 @@ func (s *ValidatorClient) Start() { for i := 10; i > 0; i-- { <-sigc if i > 1 { - log.Info("Already shutting down, interrupt more to panic.", "times", i-1) + log.WithField("times", i-1).Info("Already shutting down, interrupt more to panic.") } } - panic("Panic closing the sharding validator") + panic("Panic closing the validator client") }() // Wait for stop channel to be closed.