Move hard-coded constants to global consts (#6467)

* Move hard-coded constants to global consts
* Fixes
* Merge branch 'master' into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Comments
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge refs/heads/master into remove-hard-constants
* Merge branch 'master' of github.com:prysmaticlabs/prysm into remove-hard-constants
This commit is contained in:
Ivan Martinez 2020-07-03 11:25:32 -04:00 committed by GitHub
parent d1f1628478
commit c69b3f568e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 57 additions and 40 deletions

View File

@ -327,7 +327,7 @@ func TestStore_Blocks_Retrieve_Epoch(t *testing.T) {
totalBlocks := make([]*ethpb.SignedBeaconBlock, slots)
for i := uint64(0); i < slots; i++ {
b := testutil.NewBeaconBlock()
b.Block.Slot = uint64(i)
b.Block.Slot = i
b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
totalBlocks[i] = b
}

View File

@ -233,7 +233,7 @@ func (b *BeaconNode) Start() {
for i := 10; i > 0; i-- {
<-sigc
if i > 1 {
log.Info("Already shutting down, interrupt more to panic", "times", i-1)
log.WithField("times", i-1).Info("Already shutting down, interrupt more to panic")
}
}
panic("Panic closing the beacon node")

View File

@ -4,7 +4,6 @@ import (
"crypto/ecdsa"
"fmt"
"net"
"time"
"github.com/libp2p/go-libp2p"
noise "github.com/libp2p/go-libp2p-noise"
@ -15,15 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/shared/version"
)
const (
// Period that we allocate each new peer before we mark them as valid
// for trimming.
gracePeriod = 2 * time.Minute
// Buffer for the number of peers allowed to connect above max peers before the
// connection manager begins trimming them.
peerBuffer = 5
)
// buildOptions for the libp2p host.
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
cfg := s.cfg

View File

@ -54,6 +54,9 @@ const lookupLimit = 15
// maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it.
const maxBadResponses = 5
// Exclusion list cache config values.
const cacheNumCounters, cacheMaxCost, cacheBufferItems = 1000, 1000, 64
// Service for managing peer to peer (p2p) networking.
type Service struct {
started bool
@ -83,9 +86,9 @@ func NewService(cfg *Config) (*Service, error) {
ctx, cancel := context.WithCancel(context.Background())
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
cache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1000,
MaxCost: 1000,
BufferItems: 64,
NumCounters: cacheNumCounters,
MaxCost: cacheMaxCost,
BufferItems: cacheBufferItems,
})
if err != nil {
return nil, err

View File

@ -7,6 +7,8 @@ import (
host "github.com/libp2p/go-libp2p-host"
)
const timeoutMax = 30 * time.Second
// ensurePeerConnections will attempt to reestablish connection to the peers
// if there are currently no connections to that peer.
func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
@ -26,7 +28,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
c := h.Network().ConnsToPeer(peer.ID)
if len(c) == 0 {
log.WithField("peer", peer.ID).Debug("No connections to peer, reconnecting")
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
ctx, cancel := context.WithTimeout(ctx, timeoutMax)
defer cancel()
if err := h.Connect(ctx, *peer); err != nil {
log.WithField("peer", peer.ID).WithField("addrs", peer.Addrs).WithError(err).Errorf("Failed to reconnect to peer")

View File

@ -45,6 +45,8 @@ import (
"google.golang.org/grpc/reflection"
)
const attestationBufferSize = 100
var log logrus.FieldLogger
func init() {
@ -275,8 +277,8 @@ func (s *Service) Start() {
Broadcaster: s.p2p,
StateGen: s.stateGen,
SyncChecker: s.syncService,
ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, 100),
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 100),
ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, attestationBufferSize),
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, attestationBufferSize),
}
ethpb.RegisterNodeServer(s.grpcServer, nodeServer)
ethpb.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)

View File

@ -3,6 +3,7 @@ package sync
import (
"context"
"fmt"
"time"
libp2pcore "github.com/libp2p/go-libp2p-core"
"github.com/libp2p/go-libp2p-core/helpers"
@ -24,6 +25,10 @@ var goodByes = map[uint64]string{
codeGenericError: "fault/error",
}
// Add a short delay to allow the stream to flush before resetting it.
// There is still a chance that the peer won't receive the message.
const flushDelay = 50 * time.Millisecond
// goodbyeRPCHandler reads the incoming goodbye rpc message from the peer.
func (s *Service) goodbyeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
defer func() {

View File

@ -228,9 +228,7 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
if err := stream.Close(); err != nil { // Close before disconnecting.
log.WithError(err).Error("Failed to close stream")
}
// Add a short delay to allow the stream to flush before closing the connection.
// There is still a chance that the peer won't receive the message.
time.Sleep(50 * time.Millisecond)
time.Sleep(flushDelay)
if err := s.p2p.Disconnect(stream.Conn().RemotePeer()); err != nil {
log.WithError(err).Error("Failed to disconnect from peer")
}

View File

@ -42,6 +42,8 @@ const seenExitSize = 100
const seenAttesterSlashingSize = 100
const seenProposerSlashingSize = 100
const syncMetricsInterval = 10 * time.Second
// Config to set up the regular sync service.
type Config struct {
P2P p2p.P2P
@ -158,7 +160,7 @@ func (s *Service) Start() {
s.resyncIfBehind()
// Update sync metrics.
runutil.RunEvery(s.ctx, time.Second*10, s.updateMetrics)
runutil.RunEvery(s.ctx, syncMetricsInterval, s.updateMetrics)
}
// Stop the regular sync service.

View File

@ -25,6 +25,9 @@ import (
"github.com/prysmaticlabs/prysm/shared/params"
)
const timeGapPerTX = 100 * time.Millisecond
const timeGapPerMiningTX = 250 * time.Millisecond
// StartEth1Node starts an eth1 local dev chain and deploys a deposit contract.
func StartEth1Node(t *testing.T) (string, int) {
binaryPath, found := bazel.FindBinary("cmd/geth", "geth")
@ -117,7 +120,7 @@ func StartEth1Node(t *testing.T) (string, int) {
if err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
time.Sleep(timeGapPerTX)
}
// Advancing the blocks another eth1follow distance to prevent issues reading the chain.
@ -153,7 +156,7 @@ func mineBlocks(web3 *ethclient.Client, keystore *keystore.Key, blocksToMake uin
return err
}
nonce++
time.Sleep(250 * time.Microsecond)
time.Sleep(timeGapPerMiningTX)
block, err = web3.BlockByNumber(context.Background(), nil)
if err != nil {
return err

View File

@ -25,6 +25,8 @@ import (
"github.com/prysmaticlabs/prysm/shared/testutil"
)
const depositGasLimit = 4000000
// StartValidatorClients starts the configured amount of validators, also sending and mining their validator deposits.
// Should only be used on initialization.
func StartValidatorClients(t *testing.T, config *types.E2EConfig, keystorePath string) []int {
@ -125,7 +127,7 @@ func SendDeposits(web3 *ethclient.Client, keystoreBytes []byte, num int, offset
}
depositInGwei := big.NewInt(int64(params.BeaconConfig().MaxEffectiveBalance))
txOps.Value = depositInGwei.Mul(depositInGwei, big.NewInt(int64(params.BeaconConfig().GweiPerEth)))
txOps.GasLimit = 4000000
txOps.GasLimit = depositGasLimit
nonce, err := web3.PendingNonceAt(context.Background(), txOps.From)
if err != nil {
return err

View File

@ -18,6 +18,8 @@ import (
"google.golang.org/grpc"
)
const maxMemStatsBytes = 100000000 // 1 MB.
// MetricsCheck performs a check on metrics to make sure caches are functioning, and
// overall health is good. Not checking the first epoch so the sample size isn't too small.
var MetricsCheck = types.Evaluator{
@ -43,7 +45,7 @@ var metricLessThanTests = []equalityTest{
{
name: "memory usage",
topic: "go_memstats_alloc_bytes",
value: 100000000, // 100 Mb
value: maxMemStatsBytes,
},
}

View File

@ -24,6 +24,10 @@ import (
"github.com/prysmaticlabs/prysm/shared/mclockutil"
)
// waitQuotient is divided against the max backoff time, in order to have N requests based on the full
// request backoff time.
const waitQuotient = 10
// Subscription represents a stream of events. The carrier of the events is typically a
// channel, but isn't part of the interface.
//
@ -96,7 +100,7 @@ func (s *funcSub) Err() <-chan error {
// based on the error rate, but will never exceed backoffMax.
func Resubscribe(backoffMax time.Duration, fn ResubscribeFunc) Subscription {
s := &resubscribeSub{
waitTime: backoffMax / 10,
waitTime: backoffMax / waitQuotient,
backoffMax: backoffMax,
fn: fn,
err: make(chan error),
@ -187,7 +191,7 @@ func (s *resubscribeSub) waitForError(sub Subscription) bool {
func (s *resubscribeSub) backoffWait() bool {
if time.Duration(mclockutil.Now()-s.lastTry) > s.backoffMax {
s.waitTime = s.backoffMax / 10
s.waitTime = s.backoffMax / waitQuotient
} else {
s.waitTime *= 2
if s.waitTime > s.backoffMax {

View File

@ -15,6 +15,6 @@ func UseRandNew() {
}
func UseWithoutSeed() {
assignedIndex := rand.Intn(int(128))
assignedIndex := rand.Intn(128)
_ = assignedIndex
}

View File

@ -48,7 +48,6 @@ var (
logFileName = flag.String("log-file", "", "Specify log filename, relative or absolute")
privateKey = flag.String("private", "", "Private key to use for peer ID")
discv5port = flag.Int("discv5-port", 4000, "Port to listen for discv5 connections")
kademliaPort = flag.Int("kad-port", 4500, "Port to listen for connections to kad DHT")
metricsPort = flag.Int("metrics-port", 5000, "Port to listen for connections")
externalIP = flag.String("external-ip", "", "External IP for the bootnode")
disableKad = flag.Bool("disable-kad", false, "Disables the bootnode from running kademlia dht")
@ -59,9 +58,6 @@ var (
})
)
const dhtProtocol = "/prysm/0.0.0/dht"
const defaultIP = "127.0.0.1"
type handler struct {
listener *discover.UDPv5
}

View File

@ -30,6 +30,9 @@ var ipCounter = make(map[string]int)
var fundingLock sync.Mutex
var pruneDuration = time.Hour * 4
const txGasLimit = 40000
const fundingAmountWei = "32500000000000000000" // 32.5 ETH in Wei.
type faucetServer struct {
r recaptcha.Recaptcha
client *ethclient.Client
@ -40,7 +43,7 @@ type faucetServer struct {
func init() {
var ok bool
fundingAmount, ok = new(big.Int).SetString("32500000000000000000", 10)
fundingAmount, ok = new(big.Int).SetString(fundingAmountWei, 10)
if !ok {
log.Fatal("could not set funding amount")
}
@ -157,7 +160,7 @@ func (s *faucetServer) fundAndWait(to common.Address) (string, error) {
return "", err
}
tx := types.NewTransaction(nonce, to, fundingAmount, 40000, big.NewInt(1*params.GWei), nil /*data*/)
tx := types.NewTransaction(nonce, to, fundingAmount, txGasLimit, big.NewInt(1*params.GWei), nil /*data*/)
tx, err = types.SignTx(tx, types.NewEIP155Signer(big.NewInt(5)), s.pk)
if err != nil {

View File

@ -13,6 +13,9 @@ import (
"go.opencensus.io/trace"
)
// statusTimeout defines a period after which request to fetch account statuses is cancelled.
const statusTimeout = 30 * time.Second
// ValidatorStatusMetadata holds all status information about a validator.
type ValidatorStatusMetadata struct {
PublicKey []byte
@ -39,7 +42,7 @@ func FetchAccountStatuses(
) ([]ValidatorStatusMetadata, error) {
ctx, span := trace.StartSpan(ctx, "accounts.FetchAccountStatuses")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, 30*time.Second /* Cancel if running over thirty seconds. */)
ctx, cancel := context.WithTimeout(ctx, statusTimeout)
defer cancel()
req := &ethpb.MultipleValidatorStatusRequest{PublicKeys: pubKeys}

View File

@ -34,6 +34,9 @@ import (
"google.golang.org/grpc"
)
// connTimeout defines a period after which connection to beacon node is cancelled.
const connTimeout = 10 * time.Second
var log = logrus.WithField("prefix", "main")
func startNode(ctx *cli.Context) error {
@ -183,8 +186,7 @@ contract in order to activate the validator client`,
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(
context.Background(), 10*time.Second /* Cancel if cannot connect to beacon node in 10 seconds. */)
ctx, cancel := context.WithTimeout(context.Background(), connTimeout)
defer cancel()
dialOpts := client.ConstructDialOptions(
cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name),

View File

@ -160,10 +160,10 @@ func (s *ValidatorClient) Start() {
for i := 10; i > 0; i-- {
<-sigc
if i > 1 {
log.Info("Already shutting down, interrupt more to panic.", "times", i-1)
log.WithField("times", i-1).Info("Already shutting down, interrupt more to panic.")
}
}
panic("Panic closing the sharding validator")
panic("Panic closing the validator client")
}()
// Wait for stop channel to be closed.