diff --git a/BUILD.bazel b/BUILD.bazel index 2b7289878..77be99443 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -124,6 +124,7 @@ nogo( "//tools/analyzers/interfacechecker:go_default_library", "//tools/analyzers/ineffassign:go_default_library", "//tools/analyzers/properpermissions:go_default_library", + "//tools/analyzers/recursivelock:go_default_library", ] + select({ # nogo checks that fail with coverage enabled. ":coverage_enabled": [], diff --git a/beacon-chain/cache/depositcache/deposits_cache.go b/beacon-chain/cache/depositcache/deposits_cache.go index b8eea7d89..9320b6548 100644 --- a/beacon-chain/cache/depositcache/deposits_cache.go +++ b/beacon-chain/cache/depositcache/deposits_cache.go @@ -180,6 +180,10 @@ func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*e dc.depositsLock.RLock() defer dc.depositsLock.RUnlock() + return dc.allDeposits(untilBlk) +} + +func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit { var deposits []*ethpb.Deposit for _, ctnr := range dc.deposits { if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight { @@ -249,7 +253,7 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big. defer dc.depositsLock.RUnlock() if dc.finalizedDeposits == nil { - return dc.AllDeposits(ctx, untilBlk) + return dc.allDeposits(untilBlk) } lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex diff --git a/beacon-chain/state/stategen/epoch_boundary_state_cache.go b/beacon-chain/state/stategen/epoch_boundary_state_cache.go index c938859fd..e5e37929c 100644 --- a/beacon-chain/state/stategen/epoch_boundary_state_cache.go +++ b/beacon-chain/state/stategen/epoch_boundary_state_cache.go @@ -71,6 +71,10 @@ func (e *epochBoundaryState) getByRoot(r [32]byte) (*rootStateInfo, bool, error) e.lock.RLock() defer e.lock.RUnlock() + return e.getByRootLockFree(r) +} + +func (e *epochBoundaryState) getByRootLockFree(r [32]byte) (*rootStateInfo, bool, error) { obj, exists, err := e.rootStateCache.GetByKey(string(r[:])) if err != nil { return nil, false, err @@ -106,7 +110,7 @@ func (e *epochBoundaryState) getBySlot(s types.Slot) (*rootStateInfo, bool, erro return nil, false, errNotSlotRootInfo } - return e.getByRoot(info.root) + return e.getByRootLockFree(info.root) } // put adds a state to the epoch boundary state cache. This method also trims the diff --git a/tools/analyzers/recursivelock/analyzer.go b/tools/analyzers/recursivelock/analyzer.go index dbbf9d0d8..87018bbd3 100644 --- a/tools/analyzers/recursivelock/analyzer.go +++ b/tools/analyzers/recursivelock/analyzer.go @@ -5,7 +5,6 @@ package recursivelock import ( "errors" "fmt" - "go/ast" "go/token" "go/types" @@ -39,10 +38,11 @@ func run(pass *analysis.Pass) (interface{}, error) { (*ast.FuncDecl)(nil), (*ast.FuncLit)(nil), (*ast.File)(nil), + (*ast.IfStmt)(nil), (*ast.ReturnStmt)(nil), } - var keepTrackOf tracker + keepTrackOf := &tracker{} inspect.Preorder(nodeFilter, func(node ast.Node) { if keepTrackOf.funcLitEnd.IsValid() && node.Pos() <= keepTrackOf.funcLitEnd { return @@ -57,78 +57,90 @@ func run(pass *analysis.Pass) (interface{}, error) { keepTrackOf.retEnd = token.NoPos keepTrackOf.incFRU() } + keepTrackOf = stmtSelector(node, pass, keepTrackOf, inspect) + }) + return nil, nil +} - switch stmt := node.(type) { - case *ast.CallExpr: - call := getCallInfo(pass.TypesInfo, stmt) - if call == nil { - break - } - name := call.name - selMap := mapSelTypes(stmt, pass) - if selMap == nil { - break - } - if keepTrackOf.rLockSelector != nil { - if keepTrackOf.foundRLock > 0 { - if keepTrackOf.rLockSelector.isEqual(selMap, 0) { +func stmtSelector(node ast.Node, pass *analysis.Pass, keepTrackOf *tracker, inspect *inspector.Inspector) *tracker { + switch stmt := node.(type) { + case *ast.CallExpr: + call := getCallInfo(pass.TypesInfo, stmt) + if call == nil { + break + } + name := call.name + selMap := mapSelTypes(stmt, pass) + if selMap == nil { + break + } + if keepTrackOf.rLockSelector != nil { + if keepTrackOf.foundRLock > 0 { + if keepTrackOf.rLockSelector.isEqual(selMap, 0) { + pass.Reportf( + node.Pos(), + fmt.Sprintf( + "%v", + errNestedRLock, + ), + ) + } else { + if stack := hasNestedRLock(keepTrackOf.rLockSelector, selMap, call, inspect, pass, make(map[string]bool)); stack != "" { pass.Reportf( node.Pos(), fmt.Sprintf( - "%v", + "%v\n%v", errNestedRLock, + stack, ), ) - } else { - if stack := hasNestedRLock(keepTrackOf.rLockSelector, selMap, call, inspect, pass, make(map[string]bool)); stack != "" { - pass.Reportf( - node.Pos(), - fmt.Sprintf( - "%v\n%v", - errNestedRLock, - stack, - ), - ) - } } } - if name == "RUnlock" && keepTrackOf.rLockSelector.isEqual(selMap, 1) { - keepTrackOf.deincFRU() - } - } else if name == "RLock" && keepTrackOf.foundRLock == 0 { - keepTrackOf.rLockSelector = selMap - keepTrackOf.incFRU() } - - case *ast.File: - keepTrackOf = tracker{} - - case *ast.FuncDecl: - keepTrackOf = tracker{} - keepTrackOf.funcEnd = stmt.End() - - case *ast.FuncLit: - if keepTrackOf.funcLitEnd == token.NoPos { - keepTrackOf.funcLitEnd = stmt.End() - } - - case *ast.DeferStmt: - call := getCallInfo(pass.TypesInfo, stmt.Call) - if keepTrackOf.deferEnd == token.NoPos { - keepTrackOf.deferEnd = stmt.End() - } - if call != nil && call.name == "RUnlock" { - keepTrackOf.deferredRUnlock = true - } - - case *ast.ReturnStmt: - if keepTrackOf.deferredRUnlock && keepTrackOf.retEnd == token.NoPos { + if name == "RUnlock" && keepTrackOf.rLockSelector.isEqual(selMap, 1) { keepTrackOf.deincFRU() - keepTrackOf.retEnd = stmt.End() } + } else if name == "RLock" && keepTrackOf.foundRLock == 0 { + keepTrackOf.rLockSelector = selMap + keepTrackOf.incFRU() } - }) - return nil, nil + + case *ast.File: + keepTrackOf = &tracker{} + + case *ast.FuncDecl: + keepTrackOf = &tracker{} + keepTrackOf.funcEnd = stmt.End() + + case *ast.FuncLit: + if keepTrackOf.funcLitEnd == token.NoPos { + keepTrackOf.funcLitEnd = stmt.End() + } + case *ast.IfStmt: + stmts := stmt.Body.List + for i := 0; i < len(stmts); i++ { + keepTrackOf = stmtSelector(stmts[i], pass, keepTrackOf, inspect) + } + keepTrackOf = stmtSelector(stmt.Else, pass, keepTrackOf, inspect) + case *ast.DeferStmt: + call := getCallInfo(pass.TypesInfo, stmt.Call) + if keepTrackOf.deferEnd == token.NoPos { + keepTrackOf.deferEnd = stmt.End() + } + if call != nil && call.name == "RUnlock" { + keepTrackOf.deferredRUnlock = true + } + + case *ast.ReturnStmt: + for i := 0; i < len(stmt.Results); i++ { + keepTrackOf = stmtSelector(stmt.Results[i], pass, keepTrackOf, inspect) + } + if keepTrackOf.deferredRUnlock && keepTrackOf.retEnd == token.NoPos { + keepTrackOf.deincFRU() + keepTrackOf.retEnd = stmt.End() + } + } + return keepTrackOf } type tracker struct { diff --git a/tools/analyzers/recursivelock/testdata/nonrlocks.go b/tools/analyzers/recursivelock/testdata/nonrlocks.go index e146592f7..9897ad138 100644 --- a/tools/analyzers/recursivelock/testdata/nonrlocks.go +++ b/tools/analyzers/recursivelock/testdata/nonrlocks.go @@ -1,10 +1,10 @@ // These are all non recursive rlocks. Testing to make sure there are no false positives package testdata -func (resource *ProtectResource) NonNestedRLockWithDefer() string { +func (resource *ProtectResource) NestedRLockWithDefer() string { resource.RLock() defer resource.RUnlock() - return resource.GetResource() // this is not a nested rlock because runlock is deferred + return resource.GetResource() // want `found recursive read lock call` } func (resource *NestedProtectResource) NonNestedRLockDifferentRLocks() {