Enable prealloc linter (#5177)

* Enable prealloc linter

* Set inital slice len to 0
This commit is contained in:
Håvard Anda Estensen 2022-08-26 05:04:36 +02:00 committed by GitHub
parent 6371cd7b5b
commit 7c15ed59e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 37 additions and 39 deletions

View File

@ -16,6 +16,7 @@ linters:
- bodyclose
- gosec
# - forcetypeassert
- prealloc
linters-settings:
gocritic:

View File

@ -42,7 +42,7 @@ func AllTorrentPaths(dir string) ([]string, error) {
if err != nil {
return nil, err
}
var res []string
res := make([]string, 0, len(files))
for _, f := range files {
torrentFilePath := filepath.Join(dir, f)
res = append(res, torrentFilePath)
@ -55,7 +55,7 @@ func AllTorrentFiles(dir string) ([]string, error) {
if err != nil {
return nil, err
}
var res []string
res := make([]string, 0, len(files))
for _, f := range files {
if !snap.IsCorrectFileName(f.Name()) {
continue
@ -79,7 +79,7 @@ func seedableSegmentFiles(dir string) ([]string, error) {
if err != nil {
return nil, err
}
var res []string
res := make([]string, 0, len(files))
for _, f := range files {
if !snap.IsCorrectFileName(f.Name()) {
continue

View File

@ -131,7 +131,7 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*bor.Valida
v := oldValidatorSet
oldVals := v.Validators
var changes []*bor.Validator
changes := make([]*bor.Validator, 0, len(oldVals))
for _, ov := range oldVals {
if f, ok := validatorContains(newVals, ov); ok {
ov.VotingPower = f.VotingPower

View File

@ -34,7 +34,7 @@ func blockNumbersFromTraces(t *testing.T, b []byte) []int {
if elems, err = v.Array(); err != nil {
t.Fatalf("expected array in the response: %v", err)
}
var numbers []int
numbers := make([]int, 0, len(elems))
for _, elem := range elems {
bn := elem.GetInt("blockNumber")
numbers = append(numbers, bn)

View File

@ -86,7 +86,7 @@ func (api *StarknetImpl) Call(ctx context.Context, request StarknetCallRequest,
return nil, err
}
var result []string
result := make([]string, 0, len(response.Result))
for _, v := range response.Result {
s := wrapperspb.String("")
v.UnmarshalTo(s)

View File

@ -131,7 +131,7 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*bor.Valida
v := oldValidatorSet
oldVals := v.Validators
var changes []*bor.Validator
changes := make([]*bor.Validator, 0, len(oldVals))
for _, ov := range oldVals {
if f, ok := validatorContains(newVals, ov); ok {
ov.VotingPower = f.VotingPower

View File

@ -34,7 +34,7 @@ func blockNumbersFromTraces(t *testing.T, b []byte) []int {
if elems, err = v.Array(); err != nil {
t.Fatalf("expected array in the response: %v", err)
}
var numbers []int
numbers := make([]int, 0, len(elems))
for _, elem := range elems {
bn := elem.GetInt("blockNumber")
numbers = append(numbers, bn)

View File

@ -86,7 +86,7 @@ func (api *StarknetImpl) Call(ctx context.Context, request StarknetCallRequest,
return nil, err
}
var result []string
result := make([]string, 0, len(response.Result))
for _, v := range response.Result {
s := wrapperspb.String("")
v.UnmarshalTo(s)

View File

@ -1604,9 +1604,9 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
return nil
}
// Otherwise resolve absolute paths and return them
var preloads []string
for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") {
files := strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",")
preloads := make([]string, 0, len(files))
for _, file := range files {
preloads = append(preloads, strings.TrimSpace(file))
}
return preloads

View File

@ -417,7 +417,7 @@ func NewAuRa(config *params.AuRaConfig, db kv.RwDB, ourSigningAddress common.Add
if auraParams.StartStep != nil {
initialStep = *auraParams.StartStep
}
var durations []StepDurationInfo
durations := make([]StepDurationInfo, 0, 1+len(auraParams.StepDurations))
durInfo := StepDurationInfo{
TransitionStep: 0,
TransitionTimestamp: 0,

View File

@ -1137,7 +1137,7 @@ func (c *Bor) fetchAndCommitSpan(
}
// get validators bytes
var validators []MinimalVal
validators := make([]MinimalVal, 0, len(heimdallSpan.ValidatorSet.Validators))
for _, val := range heimdallSpan.ValidatorSet.Validators {
validators = append(validators, val.MinimalVal())
}
@ -1147,7 +1147,7 @@ func (c *Bor) fetchAndCommitSpan(
}
// get producers bytes
var producers []MinimalVal
producers := make([]MinimalVal, 0, len(heimdallSpan.SelectedProducers))
for _, val := range heimdallSpan.SelectedProducers {
producers = append(producers, val.MinimalVal())
}
@ -1330,7 +1330,7 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*Validator)
v := oldValidatorSet
oldVals := v.Validators
var changes []*Validator
changes := make([]*Validator, 0, len(oldVals))
for _, ov := range oldVals {
if f, ok := validatorContains(newVals, ov); ok {
ov.VotingPower = f.VotingPower

View File

@ -37,7 +37,7 @@ func ConvertTo32(input []byte) (output [32]byte, err error) {
}
func Convert(input [][32]byte) [][]byte {
var output [][]byte
output := make([][]byte, 0, len(input))
for _, in := range input {
newInput := make([]byte, len(in[:]))
copy(newInput, in[:])

View File

@ -24,7 +24,7 @@ import (
func lexAll(src string) []token {
ch := Lex([]byte(src), false)
var tokens []token
var tokens []token // nolint:prealloc
for i := range ch {
tokens = append(tokens, i)
}

View File

@ -10,7 +10,7 @@ import (
func verifyAddrs(t *testing.T, s *IntraBlockState, astrings ...string) {
t.Helper()
// convert to common.Address form
var addresses []common.Address
addresses := make([]common.Address, 0, len(astrings))
var addressMap = make(map[common.Address]struct{})
for _, astring := range astrings {
address := common.HexToAddress(astring)
@ -37,7 +37,7 @@ func verifySlots(t *testing.T, s *IntraBlockState, addrString string, slotString
}
var address = common.HexToAddress(addrString)
// convert to common.Hash form
var slots []common.Hash
slots := make([]common.Hash, 0, len(slotStrings))
var slotMap = make(map[common.Hash]struct{})
for _, slotString := range slotStrings {
s := common.HexToHash(slotString)

View File

@ -866,7 +866,7 @@ func (s *Ethereum) Peers(ctx context.Context) (*remote.PeersReply, error) {
// Protocols returns all the currently configured
// network protocols to start.
func (s *Ethereum) Protocols() []p2p.Protocol {
var protocols []p2p.Protocol
protocols := make([]p2p.Protocol, 0, len(s.sentryServers))
for i := range s.sentryServers {
protocols = append(protocols, s.sentryServers[i].Protocol)
}

View File

@ -66,7 +66,7 @@ func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Addres
// Flatten the address and topic filter clauses into a single bloombits filter
// system. Since the bloombits are not positional, nil topics are permitted,
// which get flattened into a nil byte slice.
var filters [][][]byte
filters := make([][][]byte, 0, len(topics))
if len(addresses) > 0 {
filter := make([][]byte, len(addresses))
for i, address := range addresses {

View File

@ -133,10 +133,9 @@ func AnswerGetBlockHeadersQuery(db kv.Tx, query *GetBlockHeadersPacket, blockRea
func AnswerGetBlockBodiesQuery(db kv.Tx, query GetBlockBodiesPacket) []rlp.RawValue { //nolint:unparam
// Gather blocks until the fetch or network limits is reached
var (
bytes int
bodies []rlp.RawValue
)
var bytes int
bodies := make([]rlp.RawValue, 0, len(query))
for lookups, hash := range query {
if bytes >= softResponseLimit || len(bodies) >= MaxBodiesServe ||
lookups >= 2*MaxBodiesServe {

View File

@ -1326,7 +1326,7 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error {
// send all hashes to the Downloader service
preverified := snapcfg.KnownCfg(cfg.chainConfig.ChainName, snInDB).Preverified
var downloadRequest []snapshotsync.DownloadRequest
downloadRequest := make([]snapshotsync.DownloadRequest, 0, len(preverified)+len(missingSnapshots))
// build all download requests
// builds preverified snapshots request
for _, p := range preverified {

View File

@ -365,7 +365,7 @@ func (s *Service) login(conn *connWrapper) error {
// Construct and send the login authentication
// infos := s.server.NodeInfo()
var protocols []string
protocols := make([]string, 0, len(s.servers))
for _, srv := range s.servers {
protocols = append(protocols, fmt.Sprintf("%s/%d", srv.Protocol.Name, srv.Protocol.Version))
}
@ -542,9 +542,7 @@ func (s *Service) assembleBlockStats(block *types.Block, td *big.Int) *blockStat
td = common.Big0
}
// Gather the block infos from the local blockchain
var (
txs []txStats
)
txs := make([]txStats, 0, len(block.Transactions()))
for _, tx := range block.Transactions() {
txs = append(txs, txStats{tx.Hash()})
}

View File

@ -313,7 +313,7 @@ func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node {
return nil
}
// Interpret the list as a discovery node array
var nodes []*enode.Node
nodes := make([]*enode.Node, 0, len(nodelist))
for _, url := range nodelist {
if url == "" {
continue

View File

@ -159,7 +159,7 @@ func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
var nodes []*enode.Node
nodes := make([]*enode.Node, 0, len(&tab.buckets))
for _, b := range &tab.buckets {
for _, n := range b.entries {
nodes = append(nodes, unwrapNode(n))

View File

@ -790,7 +790,7 @@ func (t *UDPv5) handleFindnode(p *v5wire.Findnode, fromID enode.ID, fromAddr *ne
// collectTableNodes creates a FINDNODE result set for the given distances.
func (t *UDPv5) collectTableNodes(rip net.IP, distances []uint, limit int) []*enode.Node {
var nodes []*enode.Node
nodes := make([]*enode.Node, 0, len(distances))
var processed = make(map[uint]struct{})
for _, dist := range distances {
// Reject duplicate / invalid distances.

View File

@ -172,7 +172,7 @@ func TestUDPv5_findnodeHandling(t *testing.T) {
// This request gets all the distance-249 nodes and some more at 248 because
// the bucket at 249 is not full.
test.packetIn(&v5wire.Findnode{ReqID: []byte{5}, Distances: []uint{249, 248}})
var nodes []*enode.Node
nodes := make([]*enode.Node, 0, len(nodes249)+len(nodes248[:10]))
nodes = append(nodes, nodes249...)
nodes = append(nodes, nodes248[:10]...)
test.expectNodes([]byte{5}, 5, nodes)

View File

@ -104,7 +104,7 @@ func (t *Tree) Links() []string {
// Nodes returns all nodes contained in the tree.
func (t *Tree) Nodes() []*enode.Node {
var nodes []*enode.Node
nodes := make([]*enode.Node, 0, len(t.entries))
for _, e := range t.entries {
if ee, ok := e.(*enrEntry); ok {
nodes = append(nodes, ee.node)

View File

@ -490,7 +490,7 @@ type PeerInfo struct {
// Info gathers and returns a collection of metadata known about a peer.
func (p *Peer) Info() *PeerInfo {
// Gather the protocol capabilities
var caps []string
caps := make([]string, 0, len(p.Caps()))
for _, cap := range p.Caps() {
caps = append(caps, cap.String())
}

View File

@ -598,7 +598,7 @@ func (s *RoSnapshots) ReopenFolder() error {
if err != nil {
return err
}
var list []string
list := make([]string, 0, len(files))
for _, f := range files {
_, fName := filepath.Split(f.Path)
list = append(list, fName)
@ -1085,7 +1085,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25
notifier.OnNewSnapshot()
}
var downloadRequest []DownloadRequest
downloadRequest := make([]DownloadRequest, 0, len(rangesToMerge))
for i := range rangesToMerge {
downloadRequest = append(downloadRequest, NewDownloadRequest(&rangesToMerge[i], "", ""))
}