2019-08-13 21:12:00 +00:00
|
|
|
package p2p
|
|
|
|
|
|
|
|
import (
|
2019-08-21 06:08:30 +00:00
|
|
|
"context"
|
2019-08-22 15:23:16 +00:00
|
|
|
"crypto/ecdsa"
|
2019-08-21 06:08:30 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2019-08-13 21:12:00 +00:00
|
|
|
"testing"
|
2019-08-21 06:08:30 +00:00
|
|
|
"time"
|
2019-08-13 21:12:00 +00:00
|
|
|
|
2019-08-21 06:08:30 +00:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/discv5"
|
|
|
|
"github.com/libp2p/go-libp2p"
|
|
|
|
"github.com/libp2p/go-libp2p-core/host"
|
2019-08-23 16:53:38 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
2019-08-21 06:08:30 +00:00
|
|
|
"github.com/multiformats/go-multiaddr"
|
2019-08-13 21:12:00 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
|
|
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
|
|
|
)
|
|
|
|
|
2019-08-21 06:08:30 +00:00
|
|
|
type mockListener struct{}
|
|
|
|
|
|
|
|
func (m *mockListener) Self() *discv5.Node {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockListener) Close() {
|
|
|
|
//no-op
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockListener) Lookup(discv5.NodeID) []*discv5.Node {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockListener) ReadRandomNodes([]*discv5.Node) int {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockListener) SetFallbackNodes([]*discv5.Node) error {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockListener) Resolve(discv5.NodeID) *discv5.Node {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockListener) RegisterTopic(discv5.Topic, <-chan struct{}) {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockListener) SearchTopic(discv5.Topic, <-chan time.Duration, chan<- *discv5.Node, chan<- bool) {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func createPeer(t *testing.T, cfg *Config, port int) (Listener, host.Host) {
|
2019-08-22 15:23:16 +00:00
|
|
|
h, pkey, ipAddr := createHost(t, port)
|
|
|
|
cfg.UDPPort = uint(port)
|
|
|
|
cfg.Port = uint(port)
|
|
|
|
listener, err := startDiscoveryV5(ipAddr, pkey, cfg)
|
2019-08-21 06:08:30 +00:00
|
|
|
if err != nil {
|
2019-08-22 15:23:16 +00:00
|
|
|
t.Errorf("Could not start discovery for node: %v", err)
|
2019-08-21 06:08:30 +00:00
|
|
|
}
|
2019-08-22 15:23:16 +00:00
|
|
|
return listener, h
|
|
|
|
}
|
|
|
|
|
|
|
|
func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
|
|
|
ipAddr, pkey := createAddrAndPrivKey(t)
|
|
|
|
ipAddr = net.ParseIP("127.0.0.1")
|
2019-08-21 06:08:30 +00:00
|
|
|
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to p2p listen: %v", err)
|
|
|
|
}
|
|
|
|
h, err := libp2p.New(context.Background(), []libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen)}...)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2019-08-22 15:23:16 +00:00
|
|
|
return h, pkey, ipAddr
|
2019-08-21 06:08:30 +00:00
|
|
|
}
|
|
|
|
|
2019-08-13 21:12:00 +00:00
|
|
|
func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
|
|
|
s, _ := NewService(nil)
|
|
|
|
s.started = true
|
2019-08-21 06:08:30 +00:00
|
|
|
s.dv5Listener = &mockListener{}
|
2019-08-13 21:12:00 +00:00
|
|
|
_ = s.Stop()
|
2019-08-21 06:08:30 +00:00
|
|
|
|
2019-08-13 21:12:00 +00:00
|
|
|
if s.started != false {
|
|
|
|
t.Error("Expected Service.started to be false, got true")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
|
|
|
hook := logTest.NewGlobal()
|
|
|
|
|
2019-08-21 06:08:30 +00:00
|
|
|
cfg := &Config{
|
|
|
|
Port: 2000,
|
|
|
|
UDPPort: 2000,
|
|
|
|
}
|
|
|
|
s, _ := NewService(cfg)
|
|
|
|
s.dv5Listener = &mockListener{}
|
2019-08-13 21:12:00 +00:00
|
|
|
defer s.Stop()
|
|
|
|
s.Start()
|
|
|
|
if s.started != true {
|
|
|
|
t.Error("Expected service to be started")
|
|
|
|
}
|
|
|
|
s.Start()
|
|
|
|
testutil.AssertLogsContain(t, hook, "Attempted to start p2p service when it was already started")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestService_Status_NotRunning(t *testing.T) {
|
|
|
|
s := &Service{started: false}
|
2019-08-21 06:08:30 +00:00
|
|
|
s.dv5Listener = &mockListener{}
|
2019-08-13 21:12:00 +00:00
|
|
|
if s.Status().Error() != "not running" {
|
|
|
|
t.Errorf("Status returned wrong error, got %v", s.Status())
|
|
|
|
}
|
|
|
|
}
|
2019-08-21 06:08:30 +00:00
|
|
|
|
|
|
|
func TestListenForNewNodes(t *testing.T) {
|
|
|
|
// setup bootnode
|
|
|
|
port := 2000
|
|
|
|
_, pkey := createAddrAndPrivKey(t)
|
|
|
|
ipAddr := net.ParseIP("127.0.0.1")
|
|
|
|
bootListener := createListener(ipAddr, port, pkey)
|
|
|
|
defer bootListener.Close()
|
|
|
|
|
|
|
|
bootNode := bootListener.Self()
|
|
|
|
|
|
|
|
cfg := &Config{
|
|
|
|
BootstrapNodeAddr: bootNode.String(),
|
|
|
|
}
|
|
|
|
var listeners []*discv5.Network
|
2019-08-22 15:23:16 +00:00
|
|
|
var hosts []host.Host
|
2019-08-21 06:08:30 +00:00
|
|
|
// setup other nodes
|
|
|
|
for i := 1; i <= 5; i++ {
|
2019-08-22 15:23:16 +00:00
|
|
|
listener, h := createPeer(t, cfg, port+i)
|
2019-08-21 06:08:30 +00:00
|
|
|
listeners = append(listeners, listener.(*discv5.Network))
|
2019-08-22 15:23:16 +00:00
|
|
|
hosts = append(hosts, h)
|
2019-08-21 06:08:30 +00:00
|
|
|
}
|
|
|
|
|
2019-08-22 15:23:16 +00:00
|
|
|
// close peers upon exit of test
|
|
|
|
defer func() {
|
|
|
|
for _, h := range hosts {
|
|
|
|
_ = h.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-08-21 06:08:30 +00:00
|
|
|
cfg.Port = 4000
|
|
|
|
cfg.UDPPort = 4000
|
|
|
|
|
|
|
|
s, err := NewService(cfg)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Start()
|
|
|
|
defer s.Stop()
|
|
|
|
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
peers := s.host.Network().Peers()
|
|
|
|
if len(peers) != 5 {
|
|
|
|
t.Errorf("Not all peers added to peerstore, wanted %d but got %d", 5, len(peers))
|
|
|
|
}
|
|
|
|
// close down all peers
|
|
|
|
for _, listener := range listeners {
|
|
|
|
listener.Close()
|
|
|
|
}
|
|
|
|
}
|
2019-08-23 16:53:38 +00:00
|
|
|
|
|
|
|
func TestPeer_Disconnect(t *testing.T) {
|
|
|
|
h1, _, _ := createHost(t, 5000)
|
|
|
|
defer h1.Close()
|
|
|
|
|
|
|
|
s := &Service{
|
|
|
|
host: h1,
|
|
|
|
}
|
|
|
|
|
|
|
|
h2, _, ipaddr := createHost(t, 5001)
|
|
|
|
defer h2.Close()
|
|
|
|
|
|
|
|
h2Addr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%s", ipaddr, 5001, h2.ID()))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
addrInfo, err := peer.AddrInfoFromP2pAddr(h2Addr)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := s.host.Connect(context.Background(), *addrInfo); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if len(s.host.Network().Peers()) != 1 {
|
|
|
|
t.Fatalf("Number of peers is %d when it was supposed to be %d", len(s.host.Network().Peers()), 1)
|
|
|
|
}
|
|
|
|
if len(s.host.Network().Conns()) != 1 {
|
|
|
|
t.Fatalf("Number of connections is %d when it was supposed to be %d", len(s.host.Network().Conns()), 1)
|
|
|
|
}
|
|
|
|
if err := s.Disconnect(h2.ID()); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if len(s.host.Network().Conns()) != 0 {
|
|
|
|
t.Fatalf("Number of connections is %d when it was supposed to be %d", len(s.host.Network().Conns()), 0)
|
|
|
|
}
|
|
|
|
}
|