Commit feb5e495 authored by Juan Batiz-Benet's avatar Juan Batiz-Benet

use ZeroLocalTCPAddress for network tests

This commit makes all network tests use ZeroLocalTCPAddress
as the initial peer address, and then relies on
net.ListenAddresses()

This should get rid of the tcp addr clash problems.
parent 8fb5cf95
......@@ -51,7 +51,6 @@ func setupConn(t *testing.T, ctx context.Context, secure bool) (a, b Conn, p1, p
p1 = tu.RandPeerNetParamsOrFatal(t)
p2 = tu.RandPeerNetParamsOrFatal(t)
laddr := p1.Addr
key1 := p1.PrivKey
key2 := p2.PrivKey
......@@ -59,10 +58,11 @@ func setupConn(t *testing.T, ctx context.Context, secure bool) (a, b Conn, p1, p
key1 = nil
key2 = nil
}
l1, err := Listen(ctx, laddr, p1.ID, key1)
l1, err := Listen(ctx, p1.Addr, p1.ID, key1)
if err != nil {
t.Fatal(err)
}
p1.Addr = l1.Multiaddr() // Addr has been determined by kernel.
d2 := &Dialer{
LocalPeer: p2.ID,
......@@ -110,6 +110,7 @@ func testDialer(t *testing.T, secure bool) {
if err != nil {
t.Fatal(err)
}
p1.Addr = l1.Multiaddr() // Addr has been determined by kernel.
d2 := &Dialer{
LocalPeer: p2.ID,
......
......@@ -17,25 +17,24 @@ import (
type listener struct {
manet.Listener
maddr ma.Multiaddr // Local multiaddr to listen on
local peer.ID // LocalPeer is the identity of the local Peer
privk ic.PrivKey // private key to use to initialize secure conns
local peer.ID // LocalPeer is the identity of the local Peer
privk ic.PrivKey // private key to use to initialize secure conns
cg ctxgroup.ContextGroup
}
func (l *listener) teardown() error {
defer log.Debugf("listener closed: %s %s", l.local, l.maddr)
defer log.Debugf("listener closed: %s %s", l.local, l.Multiaddr())
return l.Listener.Close()
}
func (l *listener) Close() error {
log.Debugf("listener closing: %s %s", l.local, l.maddr)
log.Debugf("listener closing: %s %s", l.local, l.Multiaddr())
return l.cg.Close()
}
func (l *listener) String() string {
return fmt.Sprintf("<Listener %s %s>", l.local, l.maddr)
return fmt.Sprintf("<Listener %s %s>", l.local, l.Multiaddr())
}
// Accept waits for and returns the next connection to the listener.
......@@ -73,8 +72,14 @@ func (l *listener) Addr() net.Addr {
}
// Multiaddr is the identity of the local Peer.
// If there is an error converting from net.Addr to ma.Multiaddr,
// the return value will be nil.
func (l *listener) Multiaddr() ma.Multiaddr {
return l.maddr
maddr, err := manet.FromNetAddr(l.Addr())
if err != nil {
return nil // error
}
return maddr
}
// LocalPeer is the identity of the local Peer.
......@@ -102,7 +107,6 @@ func Listen(ctx context.Context, addr ma.Multiaddr, local peer.ID, sk ic.PrivKey
l := &listener{
Listener: ml,
maddr: addr,
local: local,
privk: sk,
cg: ctxgroup.WithContext(ctx),
......
......@@ -15,13 +15,14 @@ func TestSimultOpen(t *testing.T) {
// t.Skip("skipping for another test")
ctx := context.Background()
swarms, peers := makeSwarms(ctx, t, 2)
swarms := makeSwarms(ctx, t, 2)
// connect everyone
{
var wg sync.WaitGroup
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
// copy for other peer
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr)
s.peers.AddAddress(dst, addr)
if _, err := s.Dial(ctx, dst); err != nil {
t.Fatal("error swarm dialing to peer", err)
......@@ -31,8 +32,8 @@ func TestSimultOpen(t *testing.T) {
log.Info("Connecting swarms simultaneously.")
wg.Add(2)
go connect(swarms[0], swarms[1].local, peers[1].Addr)
go connect(swarms[1], swarms[0].local, peers[0].Addr)
go connect(swarms[0], swarms[1].local, swarms[1].ListenAddresses()[0])
go connect(swarms[1], swarms[0].local, swarms[0].ListenAddresses()[0])
wg.Wait()
}
......
......@@ -47,20 +47,17 @@ func EchoStreamHandler(stream inet.Stream) {
}()
}
func makeSwarms(ctx context.Context, t *testing.T, num int) ([]*Swarm, []testutil.PeerNetParams) {
func makeSwarms(ctx context.Context, t *testing.T, num int) []*Swarm {
swarms := make([]*Swarm, 0, num)
peersnp := make([]testutil.PeerNetParams, 0, num)
for i := 0; i < num; i++ {
localnp := testutil.RandPeerNetParamsOrFatal(t)
peersnp = append(peersnp, localnp)
peerstore := peer.NewPeerstore()
peerstore.AddAddress(localnp.ID, localnp.Addr)
peerstore.AddPubKey(localnp.ID, localnp.PubKey)
peerstore.AddPrivKey(localnp.ID, localnp.PrivKey)
addrs := peerstore.Addresses(localnp.ID)
addrs := []ma.Multiaddr{localnp.Addr}
swarm, err := NewSwarm(ctx, addrs, localnp.ID, peerstore)
if err != nil {
t.Fatal(err)
......@@ -70,10 +67,10 @@ func makeSwarms(ctx context.Context, t *testing.T, num int) ([]*Swarm, []testuti
swarms = append(swarms, swarm)
}
return swarms, peersnp
return swarms
}
func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm, peersnp []testutil.PeerNetParams) {
func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {
var wg sync.WaitGroup
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
......@@ -86,11 +83,11 @@ func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm, peersnp [
}
log.Info("Connecting swarms simultaneously.")
for _, s := range swarms {
for _, p := range peersnp {
if p.ID != s.local { // don't connect to self.
for _, s1 := range swarms {
for _, s2 := range swarms {
if s2.local != s1.local { // don't connect to self.
wg.Add(1)
connect(s, p.ID, p.Addr)
connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) // try the first.
}
}
}
......@@ -105,10 +102,10 @@ func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
// t.Skip("skipping for another test")
ctx := context.Background()
swarms, peersnp := makeSwarms(ctx, t, SwarmNum)
swarms := makeSwarms(ctx, t, SwarmNum)
// connect everyone
connectSwarms(t, ctx, swarms, peersnp)
connectSwarms(t, ctx, swarms)
// ping/pong
for _, s1 := range swarms {
......@@ -118,7 +115,7 @@ func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
_, cancel := context.WithCancel(ctx)
got := map[peer.ID]int{}
errChan := make(chan error, MsgNum*len(peersnp))
errChan := make(chan error, MsgNum*len(swarms))
streamChan := make(chan *Stream, MsgNum)
// send out "ping" x MsgNum to every peer
......@@ -150,13 +147,13 @@ func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
streamChan <- stream
}
for _, p := range peersnp {
if p.ID == s1.local {
for _, s2 := range swarms {
if s2.local == s1.local {
continue // dont send to self...
}
wg.Add(1)
go send(p.ID)
go send(s2.local)
}
wg.Wait()
}()
......@@ -165,7 +162,7 @@ func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
go func() {
defer close(errChan)
count := 0
countShouldBe := MsgNum * (len(peersnp) - 1)
countShouldBe := MsgNum * (len(swarms) - 1)
for stream := range streamChan { // one per peer
defer stream.Close()
......@@ -209,8 +206,8 @@ func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
}
log.Debugf("%s got pongs", s1.local)
if (len(peersnp) - 1) != len(got) {
t.Errorf("got (%d) less messages than sent (%d).", len(got), len(peersnp))
if (len(swarms) - 1) != len(got) {
t.Errorf("got (%d) less messages than sent (%d).", len(got), len(swarms))
}
for p, n := range got {
......@@ -241,14 +238,14 @@ func TestConnHandler(t *testing.T) {
// t.Skip("skipping for another test")
ctx := context.Background()
swarms, peersnp := makeSwarms(ctx, t, 5)
swarms := makeSwarms(ctx, t, 5)
gotconn := make(chan struct{}, 10)
swarms[0].SetConnHandler(func(conn *Conn) {
gotconn <- struct{}{}
})
connectSwarms(t, ctx, swarms, peersnp)
connectSwarms(t, ctx, swarms)
<-time.After(time.Millisecond)
// should've gotten 5 by now.
......
......@@ -10,18 +10,19 @@ import (
tu "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
func GenSwarmNetwork(t *testing.T, ctx context.Context) *swarm.Network {
p := tu.RandPeerNetParamsOrFatal(t)
ps := peer.NewPeerstore()
ps.AddAddress(p.ID, p.Addr)
ps.AddPubKey(p.ID, p.PubKey)
ps.AddPrivKey(p.ID, p.PrivKey)
n, err := swarm.NewNetwork(ctx, ps.Addresses(p.ID), p.ID, ps)
n, err := swarm.NewNetwork(ctx, []ma.Multiaddr{p.Addr}, p.ID, ps)
if err != nil {
t.Fatal(err)
}
ps.AddAddresses(p.ID, n.ListenAddresses())
return n
}
......
......@@ -16,6 +16,19 @@ import (
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
// ZeroLocalTCPAddress is the "zero" tcp local multiaddr. This means:
// /ip4/127.0.0.1/tcp/0
var ZeroLocalTCPAddress ma.Multiaddr
func init() {
// initialize ZeroLocalTCPAddress
maddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
if err != nil {
panic(err)
}
ZeroLocalTCPAddress = maddr
}
func RandKeyPair(bits int) (ci.PrivKey, ci.PubKey, error) {
return ci.GenerateKeyPairWithReader(ci.RSA, bits, u.NewTimeSeededRand())
}
......@@ -48,6 +61,11 @@ func RandPeerIDFatal(t testing.TB) peer.ID {
// RandLocalTCPAddress returns a random multiaddr. it suppresses errors
// for nice composability-- do check the address isn't nil.
//
// Note: for real network tests, use ZeroLocalTCPAddress so the kernel
// assigns an unused TCP port. otherwise you may get clashes. This
// function remains here so that p2p/net/mock (which does not touch the
// real network) can assign different addresses to peers.
func RandLocalTCPAddress() ma.Multiaddr {
// chances are it will work out, but it **might** fail if the port is in use
......@@ -123,7 +141,7 @@ func RandPeerNetParamsOrFatal(t *testing.T) PeerNetParams {
func RandPeerNetParams() (*PeerNetParams, error) {
var p PeerNetParams
var err error
p.Addr = RandLocalTCPAddress()
p.Addr = ZeroLocalTCPAddress
p.PrivKey, p.PubKey, err = RandKeyPair(512)
if err != nil {
return nil, err
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment