Unverified Commit 43fb6cab authored by Jakub Sztandera's avatar Jakub Sztandera Committed by GitHub

Merge pull request #127 from libp2p/fix/test-improvements

misc test improvements
parents 000c4a39 6ec48e92
......@@ -113,6 +113,8 @@ func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
log.Debugf("Bootstrapping DHTs...")
// tried async. sequential fares much better. compare:
......@@ -129,7 +131,6 @@ func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) {
dht := dhts[(start+i)%len(dhts)]
dht.runBootstrap(ctx, cfg)
}
cancel()
}
func TestValueGetSet(t *testing.T) {
......@@ -188,8 +189,12 @@ func TestValueGetSet(t *testing.T) {
}
func TestInvalidMessageSenderTracking(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dht := setupDHT(ctx, t, false)
defer dht.Close()
foo := peer.ID("asdasd")
_, err := dht.messageSenderForPeer(foo)
if err == nil {
......@@ -197,15 +202,18 @@ func TestInvalidMessageSenderTracking(t *testing.T) {
}
dht.smlk.Lock()
defer dht.smlk.Unlock()
if len(dht.strmap) > 0 {
mscnt := len(dht.strmap)
dht.smlk.Unlock()
if mscnt > 0 {
t.Fatal("should have no message senders in map")
}
}
func TestProvides(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, _, dhts := setupDHTS(ctx, 4, t)
defer func() {
......@@ -254,7 +262,8 @@ func TestProvides(t *testing.T) {
func TestLocalProvides(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, _, dhts := setupDHTS(ctx, 4, t)
defer func() {
......@@ -340,7 +349,8 @@ func TestBootstrap(t *testing.T) {
t.SkipNow()
}
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDHTs := 30
_, _, dhts := setupDHTS(ctx, nDHTs, t)
......@@ -393,7 +403,8 @@ func TestPeriodicBootstrap(t *testing.T) {
t.SkipNow()
}
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDHTs := 30
_, _, dhts := setupDHTS(ctx, nDHTs, t)
......@@ -404,20 +415,7 @@ func TestPeriodicBootstrap(t *testing.T) {
}
}()
// signal amplifier
amplify := func(signal chan time.Time, other []chan time.Time) {
for t := range signal {
for _, s := range other {
s <- t
}
}
for _, s := range other {
close(s)
}
}
signal := make(chan time.Time)
allSignals := []chan time.Time{}
signals := []chan time.Time{}
var cfg BootstrapConfig
cfg = DefaultBootstrapConfig
......@@ -426,10 +424,13 @@ func TestPeriodicBootstrap(t *testing.T) {
// kick off periodic bootstrappers with instrumented signals.
for _, dht := range dhts {
s := make(chan time.Time)
allSignals = append(allSignals, s)
dht.BootstrapOnSignal(cfg, s)
signals = append(signals, s)
proc, err := dht.BootstrapOnSignal(cfg, s)
if err != nil {
t.Fatal(err)
}
defer proc.Close()
}
go amplify(signal, allSignals)
t.Logf("dhts are not connected. %d", nDHTs)
for _, dht := range dhts {
......@@ -456,7 +457,10 @@ func TestPeriodicBootstrap(t *testing.T) {
}
t.Logf("bootstrapping them so they find each other. %d", nDHTs)
signal <- time.Now()
now := time.Now()
for _, signal := range signals {
go func(s chan time.Time) { s <- now }(signal)
}
// this is async, and we dont know when it's finished with one cycle, so keep checking
// until the routing tables look better, or some long timeout for the failure case.
......@@ -470,7 +474,8 @@ func TestPeriodicBootstrap(t *testing.T) {
func TestProvidesMany(t *testing.T) {
t.Skip("this test doesn't work")
// t.Skip("skipping test to debug another")
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDHTs := 40
_, _, dhts := setupDHTS(ctx, nDHTs, t)
......@@ -571,7 +576,8 @@ func TestProvidesAsync(t *testing.T) {
t.SkipNow()
}
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, _, dhts := setupDHTS(ctx, 4, t)
defer func() {
......@@ -652,7 +658,8 @@ func TestFindPeer(t *testing.T) {
t.SkipNow()
}
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, peers, dhts := setupDHTS(ctx, 4, t)
defer func() {
......@@ -689,7 +696,8 @@ func TestFindPeersConnectedToPeer(t *testing.T) {
t.SkipNow()
}
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, peers, dhts := setupDHTS(ctx, 4, t)
defer func() {
......@@ -777,7 +785,7 @@ func TestConnectCollision(t *testing.T) {
for rtime := 0; rtime < runTimes; rtime++ {
log.Info("Running Time: ", rtime)
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
dhtA := setupDHT(ctx, t, false)
dhtB := setupDHT(ctx, t, false)
......@@ -824,6 +832,7 @@ func TestConnectCollision(t *testing.T) {
dhtB.Close()
dhtA.host.Close()
dhtB.host.Close()
cancel()
}
}
......
......@@ -2,7 +2,11 @@ package dht
import (
"context"
"fmt"
"testing"
"time"
tu "github.com/libp2p/go-testutil"
)
func TestNotifieeMultipleConn(t *testing.T) {
......@@ -40,13 +44,16 @@ func TestNotifieeMultipleConn(t *testing.T) {
conn.Close()
}
if checkRoutingTable(d1, d2) {
t.Fatal("routes")
}
tu.WaitFor(ctx, func() error {
if checkRoutingTable(d1, d2) {
return fmt.Errorf("should not have routes")
}
return nil
})
}
func TestNotifieeFuzz(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
d1 := setupDHT(ctx, t, false)
......@@ -58,9 +65,12 @@ func TestNotifieeFuzz(t *testing.T) {
conn.Close()
}
}
if checkRoutingTable(d1, d2) {
t.Fatal("should not have routes")
}
tu.WaitFor(ctx, func() error {
if checkRoutingTable(d1, d2) {
return fmt.Errorf("should not have routes")
}
return nil
})
connect(t, ctx, d1, d2)
}
......
......@@ -18,7 +18,9 @@ import (
)
func TestProviderManager(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mid := peer.ID("testing")
p := NewProviderManager(ctx, mid, ds.NewMapDatastore())
a := cid.NewCidV0(u.Hash([]byte("test")))
......@@ -35,7 +37,9 @@ func TestProvidersDatastore(t *testing.T) {
lruCacheSize = 10
defer func() { lruCacheSize = old }()
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mid := peer.ID("testing")
p := NewProviderManager(ctx, mid, ds.NewMapDatastore())
defer p.proc.Close()
......@@ -112,7 +116,9 @@ func TestProvidesExpire(t *testing.T) {
defaultCleanupInterval = cleanup
}()
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mid := peer.ID("testing")
p := NewProviderManager(ctx, mid, ds.NewMapDatastore())
......@@ -217,7 +223,8 @@ func TestUponCacheMissProvidersAreReadFromDatastore(t *testing.T) {
old := lruCacheSize
lruCacheSize = 1
defer func() { lruCacheSize = old }()
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
p1, p2 := peer.ID("a"), peer.ID("b")
c1 := cid.NewCidV1(cid.DagCBOR, u.Hash([]byte("1")))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment