query_test.go 3.07 KB
Newer Older
Aarsh Shah's avatar
Aarsh Shah committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
package dht

import (
	"context"
	"fmt"
	"testing"
	"time"

	tu "github.com/libp2p/go-libp2p-testing/etc"

	"github.com/stretchr/testify/require"
)

// TODO Debug test failures due to timing issue on windows
// Tests  are timing dependent as can be seen in the 2 seconds timed context that we use in "tu.WaitFor".
// While the tests work fine on OSX and complete in under a second,
// they repeatedly fail to complete in the stipulated time on Windows.
// However, increasing the timeout makes them pass on Windows.

func TestRTEvictionOnFailedQuery(t *testing.T) {
	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
	defer cancel()

	d1 := setupDHT(ctx, t, false)
	d2 := setupDHT(ctx, t, false)

	for i := 0; i < 10; i++ {
		connect(t, ctx, d1, d2)
		for _, conn := range d1.host.Network().ConnsToPeer(d2.self) {
			conn.Close()
		}
	}

	// peers should be in the RT because of fixLowPeers
	require.NoError(t, tu.WaitFor(ctx, func() error {
		if !checkRoutingTable(d1, d2) {
			return fmt.Errorf("should  have routes")
		}
		return nil
	}))

	// close both hosts so query fails
	require.NoError(t, d1.host.Close())
	require.NoError(t, d2.host.Close())
	// peers will still be in the RT because we have decoupled membership from connectivity
	require.NoError(t, tu.WaitFor(ctx, func() error {
		if !checkRoutingTable(d1, d2) {
			return fmt.Errorf("should  have routes")
		}
		return nil
	}))

	// failed queries should remove the peers from the RT
	_, err := d1.GetClosestPeers(ctx, "test")
	require.NoError(t, err)

	_, err = d2.GetClosestPeers(ctx, "test")
	require.NoError(t, err)

	require.NoError(t, tu.WaitFor(ctx, func() error {
		if checkRoutingTable(d1, d2) {
			return fmt.Errorf("should not have routes")
		}
		return nil
	}))
}

func TestRTAdditionOnSuccessfulQuery(t *testing.T) {
	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
	defer cancel()

	d1 := setupDHT(ctx, t, false)
	d2 := setupDHT(ctx, t, false)
	d3 := setupDHT(ctx, t, false)

	connect(t, ctx, d1, d2)
	connect(t, ctx, d2, d3)
	// validate RT states

	// d1 has d2
	require.NoError(t, tu.WaitFor(ctx, func() error {
		if !checkRoutingTable(d1, d2) {
			return fmt.Errorf("should  have routes")
		}
		return nil
	}))
	// d2 has d3
	require.NoError(t, tu.WaitFor(ctx, func() error {
		if !checkRoutingTable(d2, d3) {
			return fmt.Errorf("should  have routes")
		}
		return nil
	}))

	// however, d1 does not know about d3
	require.NoError(t, tu.WaitFor(ctx, func() error {
		if checkRoutingTable(d1, d3) {
			return fmt.Errorf("should not have routes")
		}
		return nil
	}))

	// but when d3 queries d2, d1 and d3 discover each other
	_, err := d3.GetClosestPeers(ctx, "something")
	require.NoError(t, err)
	require.NoError(t, tu.WaitFor(ctx, func() error {
		if !checkRoutingTable(d1, d3) {
			return fmt.Errorf("should have routes")
		}
		return nil
	}))
}

func checkRoutingTable(a, b *IpfsDHT) bool {
	// loop until connection notification has been received.
	// under high load, this may not happen as immediately as we would like.
	return a.routingTable.Find(b.self) != "" && b.routingTable.Find(a.self) != ""
}