swarm/storage: Fix race in TestLDBStoreCollectGarbage. Disable testLDBStoreRemoveThenCollectGarbage (#18512)

This commit is contained in:
Ferenc Szabo 2019-01-24 12:34:12 +01:00 committed by Anton Evangelatov
parent 6f45fa66d8
commit 3591fc603f

View File

@ -26,7 +26,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
ch "github.com/ethereum/go-ethereum/swarm/chunk" ch "github.com/ethereum/go-ethereum/swarm/chunk"
@ -388,11 +387,11 @@ func testLDBStoreCollectGarbage(t *testing.T) {
t.Fatal(err.Error()) t.Fatal(err.Error())
} }
allChunks = append(allChunks, chunks...) allChunks = append(allChunks, chunks...)
ldb.lock.RLock()
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n) log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
ldb.lock.RUnlock()
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) waitGc(ldb)
defer cancel()
waitGc(ctx, ldb)
} }
// attempt gets on all put chunks // attempt gets on all put chunks
@ -466,6 +465,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
} }
func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
t.Skip("flaky with -race flag")
params := strings.Split(t.Name(), "/") params := strings.Split(t.Name(), "/")
capacity, err := strconv.Atoi(params[2]) capacity, err := strconv.Atoi(params[2])
@ -496,9 +496,7 @@ func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
} }
} }
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) waitGc(ldb)
defer cancel()
waitGc(ctx, ldb)
// delete all chunks // delete all chunks
// (only count the ones actually deleted, the rest will have been gc'd) // (only count the ones actually deleted, the rest will have been gc'd)
@ -537,14 +535,14 @@ func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
remaining -= putCount remaining -= putCount
for putCount > 0 { for putCount > 0 {
ldb.Put(context.TODO(), chunks[puts]) ldb.Put(context.TODO(), chunks[puts])
ldb.lock.RLock()
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget) log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
ldb.lock.RUnlock()
puts++ puts++
putCount-- putCount--
} }
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) waitGc(ldb)
defer cancel()
waitGc(ctx, ldb)
} }
// expect first surplus chunks to be missing, because they have the smallest access value // expect first surplus chunks to be missing, because they have the smallest access value
@ -597,9 +595,7 @@ func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
} }
// wait for garbage collection to kick in on the responsible actor // wait for garbage collection to kick in on the responsible actor
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) waitGc(ldb)
defer cancel()
waitGc(ctx, ldb)
var missing int var missing int
for i, ch := range chunks[2 : capacity/2] { for i, ch := range chunks[2 : capacity/2] {
@ -788,7 +784,10 @@ func TestCleanIndex(t *testing.T) {
} }
} }
func waitGc(ctx context.Context, ldb *LDBStore) { // Note: waitGc does not guarantee that we wait 1 GC round; it only
// guarantees that if the GC is running we wait for that run to finish
// ticket: https://github.com/ethersphere/go-ethereum/issues/1151
func waitGc(ldb *LDBStore) {
<-ldb.gc.runC <-ldb.gc.runC
ldb.gc.runC <- struct{}{} ldb.gc.runC <- struct{}{}
} }