mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 04:47:18 +00:00
918129cf36
* refactor initialization to blocking startup method * require genesisSetter in blockchain, fix tests * work-around gazelle weirdness * fix dep gazelle ignores * only call SetGenesis once * fix typo * validator test setup and fix to return right error * move waitForChainStart to Start * wire up sync Service.genesisWaiter * fix p2p genesisWaiter plumbing * remove extra clock type, integrate into genesis and rename * use time.Now when no Nower is specified * remove unused ClockSetter * simplify rpc context checking * fix typo * use clock everywhere in sync; [32]byte val root * don't use DeepEqual to compare [32]byte and []byte * don't use clock in init sync, not wired up yet * use clock waiter in blockchain as well * use cancelable contexts in tests with goroutines * missed a reference to WithClockSetter * Update beacon-chain/startup/genesis.go Co-authored-by: Radosław Kapka <rkapka@wp.pl> * Update beacon-chain/blockchain/service_test.go Co-authored-by: Radosław Kapka <rkapka@wp.pl> * more clear docs * doc for NewClock * move clock typedef to more logical file name * adding documentation * gaz * fixes for capella * reducing test raciness * fix races in committee cache tests * lint * add tests on Duration slot math helper * startup package test coverage * fix bad merge * set non-zero genesis time in tests that call Start * happy deepsource, happy me-epsource * replace Synced event with channel * remove unused error * remove accidental wip commit * gaz! * remove unused event constants * remove sync statefeed subscription to fix deadlock * remove state notifier * fix build --------- Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com> Co-authored-by: Radosław Kapka <rkapka@wp.pl> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: nisdas <nishdas93@gmail.com>
241 lines
7.3 KiB
Go
241 lines
7.3 KiB
Go
package sync
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/libp2p/go-libp2p/core/network"
|
|
"github.com/libp2p/go-libp2p/core/protocol"
|
|
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
|
db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
|
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
|
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
|
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
|
)
|
|
|
|
func TestGoodByeRPCHandler_Disconnects_With_Peer(t *testing.T) {
|
|
params.SetupTestConfigCleanup(t)
|
|
cfg := params.MainnetConfig().Copy()
|
|
cfg.SecondsPerSlot = 1
|
|
params.OverrideBeaconConfig(cfg)
|
|
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
// Set up a head state in the database with data we expect.
|
|
d := db.SetupDB(t)
|
|
r := &Service{
|
|
cfg: &config{
|
|
beaconDB: d,
|
|
p2p: p1,
|
|
},
|
|
rateLimiter: newRateLimiter(p1),
|
|
}
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, time.Second, false)
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
expectResetStream(t, stream)
|
|
})
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
|
|
|
assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream1))
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID())
|
|
if len(conns) > 0 {
|
|
t.Error("Peer is still not disconnected despite sending a goodbye message")
|
|
}
|
|
}
|
|
|
|
func TestGoodByeRPCHandler_BackOffPeer(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p3 := p2ptest.NewTestP2P(t)
|
|
|
|
p1.Connect(p2)
|
|
p1.Connect(p3)
|
|
assert.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
// Set up a head state in the database with data we expect.
|
|
d := db.SetupDB(t)
|
|
r := &Service{
|
|
cfg: &config{
|
|
beaconDB: d,
|
|
p2p: p1,
|
|
},
|
|
rateLimiter: newRateLimiter(p1),
|
|
}
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/testing")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, time.Second, false)
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
expectResetStream(t, stream)
|
|
})
|
|
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
|
|
|
assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream1))
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID())
|
|
if len(conns) > 0 {
|
|
t.Error("Peer is still not disconnected despite sending a goodbye message")
|
|
}
|
|
valTime, err := p1.Peers().NextValidTime(p2.BHost.ID())
|
|
require.NoError(t, err)
|
|
expectedTime := time.Now().Add(backOffTime[failureCode])
|
|
diff := expectedTime.Sub(valTime)
|
|
// Add a little bit of allowance
|
|
require.Equal(t, true, diff.Seconds() <= 1)
|
|
|
|
wg.Add(1)
|
|
p3.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
expectResetStream(t, stream)
|
|
})
|
|
|
|
stream2, err := p1.BHost.NewStream(context.Background(), p3.BHost.ID(), pcl)
|
|
require.NoError(t, err)
|
|
failureCode = p2ptypes.GoodbyeCodeBanned
|
|
|
|
assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream2))
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
conns = p1.BHost.Network().ConnsToPeer(p3.BHost.ID())
|
|
if len(conns) > 0 {
|
|
t.Error("Peer is still not disconnected despite sending a goodbye message")
|
|
}
|
|
valTime, err = p1.Peers().NextValidTime(p3.BHost.ID())
|
|
require.NoError(t, err)
|
|
expectedTime = time.Now().Add(backOffTime[failureCode])
|
|
diff = expectedTime.Sub(valTime)
|
|
// Add a little bit of allowance
|
|
require.Equal(t, true, diff.Seconds() <= 1)
|
|
}
|
|
|
|
func TestSendGoodbye_SendsMessage(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
// Set up a head state in the database with data we expect.
|
|
d := db.SetupDB(t)
|
|
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}
|
|
r := &Service{
|
|
cfg: &config{
|
|
beaconDB: d,
|
|
p2p: p1,
|
|
chain: chain,
|
|
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
|
},
|
|
rateLimiter: newRateLimiter(p1),
|
|
}
|
|
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/eth2/beacon_chain/req/goodbye/1/ssz_snappy")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, time.Second, false)
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
out := new(primitives.SSZUint64)
|
|
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
|
assert.Equal(t, failureCode, *out)
|
|
assert.NoError(t, stream.Close())
|
|
})
|
|
|
|
err := r.sendGoodByeMessage(context.Background(), failureCode, p2.BHost.ID())
|
|
assert.NoError(t, err)
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
conns := p1.BHost.Network().ConnsToPeer(p1.BHost.ID())
|
|
if len(conns) > 0 {
|
|
t.Error("Peer is still not disconnected despite sending a goodbye message")
|
|
}
|
|
}
|
|
|
|
func TestSendGoodbye_DisconnectWithPeer(t *testing.T) {
|
|
p1 := p2ptest.NewTestP2P(t)
|
|
p2 := p2ptest.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
|
|
|
// Set up a head state in the database with data we expect.
|
|
d := db.SetupDB(t)
|
|
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
|
r := &Service{
|
|
cfg: &config{
|
|
beaconDB: d,
|
|
p2p: p1,
|
|
chain: chain,
|
|
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
|
},
|
|
rateLimiter: newRateLimiter(p1),
|
|
}
|
|
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
|
|
|
// Setup streams
|
|
pcl := protocol.ID("/eth2/beacon_chain/req/goodbye/1/ssz_snappy")
|
|
topic := string(pcl)
|
|
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, time.Second, false)
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
|
defer wg.Done()
|
|
out := new(primitives.SSZUint64)
|
|
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
|
assert.Equal(t, failureCode, *out)
|
|
assert.NoError(t, stream.Close())
|
|
})
|
|
|
|
assert.NoError(t, r.sendGoodByeAndDisconnect(context.Background(), failureCode, p2.BHost.ID()))
|
|
conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID())
|
|
if len(conns) > 0 {
|
|
t.Error("Peer is still not disconnected despite sending a goodbye message")
|
|
}
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
|
|
}
|