ext_test.go 11.2 KB
Newer Older
Jeromy's avatar
Jeromy committed
1 2 3
package dht

import (
Jeromy's avatar
Jeromy committed
4
	"context"
5
	"math/rand"
Jeromy's avatar
Jeromy committed
6
	"testing"
7
	"time"
Jeromy's avatar
Jeromy committed
8

9 10
	"github.com/libp2p/go-libp2p-core/network"
	"github.com/libp2p/go-libp2p-core/peer"
11
	"github.com/libp2p/go-libp2p-core/peerstore"
Aarsh Shah's avatar
Aarsh Shah committed
12
	"github.com/libp2p/go-libp2p-core/protocol"
13
	"github.com/libp2p/go-libp2p-core/routing"
Aarsh Shah's avatar
Aarsh Shah committed
14 15 16

	pb "github.com/libp2p/go-libp2p-kad-dht/pb"
	record "github.com/libp2p/go-libp2p-record"
17 18
	swarmt "github.com/libp2p/go-libp2p-swarm/testing"
	bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
Aarsh Shah's avatar
Aarsh Shah committed
19
	mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
20

George Antoniadis's avatar
George Antoniadis committed
21
	ggio "github.com/gogo/protobuf/io"
22
	u "github.com/ipfs/go-ipfs-util"
Jeromy's avatar
Jeromy committed
23 24
)

25 26 27
// Test that one hung request to a peer doesn't prevent another request
// using that same peer from obeying its context.
func TestHungRequest(t *testing.T) {
28 29 30
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

31 32 33 34 35 36
	mn, err := mocknet.FullMeshConnected(ctx, 2)
	if err != nil {
		t.Fatal(err)
	}
	hosts := mn.Hosts()

37
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
38 39 40 41
	d, err := New(ctx, hosts[0], os...)
	if err != nil {
		t.Fatal(err)
	}
42 43 44 45 46 47 48
	for _, proto := range d.serverProtocols {
		// Hang on every request.
		hosts[1].SetStreamHandler(proto, func(s network.Stream) {
			defer s.Reset() //nolint
			<-ctx.Done()
		})
	}
Aarsh Shah's avatar
Aarsh Shah committed
49

Steven Allen's avatar
Steven Allen committed
50
	// Wait at a bit for a peer in our routing table.
51 52 53
	for i := 0; i < 100 && d.routingTable.Size() == 0; i++ {
		time.Sleep(10 * time.Millisecond)
	}
54 55 56
	if d.routingTable.Size() == 0 {
		t.Fatal("failed to fill routing table")
	}
57 58 59 60

	ctx1, cancel1 := context.WithTimeout(ctx, 1*time.Second)
	defer cancel1()

61 62 63 64 65
	done := make(chan error, 1)
	go func() {
		_, err := d.GetClosestPeers(ctx1, testCaseCids[0].KeyString())
		done <- err
	}()
66 67 68 69

	time.Sleep(100 * time.Millisecond)
	ctx2, cancel2 := context.WithTimeout(ctx, 100*time.Millisecond)
	defer cancel2()
70 71
	err = d.Provide(ctx2, testCaseCids[0], true)
	if err != context.DeadlineExceeded {
72 73 74
		t.Errorf("expected to fail with deadline exceeded, got: %s", ctx2.Err())
	}
	select {
75 76
	case err = <-done:
		t.Error("GetClosestPeers should not have returned yet", err)
77
	default:
78 79 80 81
		err = <-done
		if err != context.DeadlineExceeded {
			t.Errorf("expected the deadline to be exceeded, got %s", err)
		}
82 83
	}

84 85 86 87
	if d.routingTable.Size() == 0 {
		// make sure we didn't just disconnect
		t.Fatal("expected peers in the routing table")
	}
88 89
}

90
func TestGetFailures(t *testing.T) {
91 92 93
	if testing.Short() {
		t.SkipNow()
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
94

95
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
96

97 98 99
	host1 := bhost.New(swarmt.GenSwarm(t, ctx, swarmt.OptDisableReuseport))
	host2 := bhost.New(swarmt.GenSwarm(t, ctx, swarmt.OptDisableReuseport))

Adin Schmahmann's avatar
Adin Schmahmann committed
100
	d, err := New(ctx, host1, testPrefix, DisableAutoRefresh(), Mode(ModeServer))
101 102 103
	if err != nil {
		t.Fatal(err)
	}
104

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
105
	// Reply with failures to every message
106 107 108 109 110 111
	for _, proto := range d.serverProtocols {
		host2.SetStreamHandler(proto, func(s network.Stream) {
			time.Sleep(400 * time.Millisecond)
			s.Close()
		})
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
112

113 114 115 116 117 118 119
	host1.Peerstore().AddAddrs(host2.ID(), host2.Addrs(), peerstore.ConnectedAddrTTL)
	_, err = host1.Network().DialPeer(ctx, host2.ID())
	if err != nil {
		t.Fatal(err)
	}
	time.Sleep(1 * time.Second)

120
	// This one should time out
Jeromy's avatar
Jeromy committed
121 122
	ctx1, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
	defer cancel()
123
	if _, err := d.GetValue(ctx1, "test"); err != nil {
124 125 126 127
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}

Łukasz Magiera's avatar
Łukasz Magiera committed
128
		if err != context.DeadlineExceeded {
129
			t.Fatal("Got different error than we expected", err)
130 131 132
		}
	} else {
		t.Fatal("Did not get expected error!")
Jeromy's avatar
Jeromy committed
133 134
	}

135
	t.Log("Timeout test passed.")
136

137 138 139 140
	for _, proto := range d.serverProtocols {
		// Reply with failures to every message
		host2.SetStreamHandler(proto, func(s network.Stream) {
			defer s.Close()
141

142 143
			pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
			pbw := ggio.NewDelimitedWriter(s)
144

145 146 147 148 149
			pmes := new(pb.Message)
			if err := pbr.ReadMsg(pmes); err != nil {
				// user gave up
				return
			}
150

151 152 153 154 155 156
			resp := &pb.Message{
				Type: pmes.Type,
			}
			_ = pbw.WriteMsg(resp)
		})
	}
157

158 159 160 161 162
	// This one should fail with NotFound.
	// long context timeout to ensure we dont end too early.
	// the dht should be exhausting its query and returning not found.
	// (was 3 seconds before which should be _plenty_ of time, but maybe
	// travis machines really have a hard time...)
Jeromy's avatar
Jeromy committed
163 164
	ctx2, cancel := context.WithTimeout(context.Background(), 20*time.Second)
	defer cancel()
165
	_, err = d.GetValue(ctx2, "test")
166
	if err != nil {
167 168 169
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}
170
		if err != routing.ErrNotFound {
171
			t.Fatalf("Expected ErrNotFound, got: %s", err)
172 173 174 175
		}
	} else {
		t.Fatal("expected error, got none.")
	}
176

177 178
	t.Log("ErrNotFound check passed!")

179
	// Now we test this DHT's handleGetValue failure
180 181 182
	{
		typ := pb.Message_GET_VALUE
		str := "hello"
Jeromy's avatar
Jeromy committed
183

184
		rec := record.MakePutRecord(str, []byte("blah"))
185
		req := pb.Message{
186 187
			Type:   typ,
			Key:    []byte(str),
188 189
			Record: rec,
		}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
190

191
		s, err := host2.NewStream(context.Background(), host1.ID(), d.protocols...)
192 193 194 195
		if err != nil {
			t.Fatal(err)
		}
		defer s.Close()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
196

197
		pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
198
		pbw := ggio.NewDelimitedWriter(s)
199

200 201 202
		if err := pbw.WriteMsg(&req); err != nil {
			t.Fatal(err)
		}
203

204 205 206 207 208 209 210 211 212 213 214
		pmes := new(pb.Message)
		if err := pbr.ReadMsg(pmes); err != nil {
			t.Fatal(err)
		}
		if pmes.GetRecord() != nil {
			t.Fatal("shouldnt have value")
		}
		if pmes.GetProviderPeers() != nil {
			t.Fatal("shouldnt have provider peers")
		}
	}
215 216 217 218 219

	if d.routingTable.Size() == 0 {
		// make sure we didn't just disconnect
		t.Fatal("expected peers in the routing table")
	}
Jeromy's avatar
Jeromy committed
220
}
Jeromy's avatar
Jeromy committed
221 222

func TestNotFound(t *testing.T) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
223
	// t.Skip("skipping test to debug another")
224 225 226
	if testing.Short() {
		t.SkipNow()
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
227

228
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
229
	mn, err := mocknet.FullMeshConnected(ctx, 16)
230 231 232
	if err != nil {
		t.Fatal(err)
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
233
	hosts := mn.Hosts()
234

235
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
236
	d, err := New(ctx, hosts[0], os...)
237 238 239
	if err != nil {
		t.Fatal(err)
	}
Jeromy's avatar
Jeromy committed
240 241

	// Reply with random peers to every message
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
242 243
	for _, host := range hosts {
		host := host // shadow loop var
244 245 246
		for _, proto := range d.serverProtocols {
			host.SetStreamHandler(proto, func(s network.Stream) {
				defer s.Close()
Jeromy's avatar
Jeromy committed
247

248 249
				pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
				pbw := ggio.NewDelimitedWriter(s)
250

251 252
				pmes := new(pb.Message)
				if err := pbr.ReadMsg(pmes); err != nil {
Steven Allen's avatar
Steven Allen committed
253 254
					// this isn't an error, it just means the stream has died.
					return
255 256
				}

257 258 259 260 261 262 263 264 265 266 267 268 269
				switch pmes.GetType() {
				case pb.Message_GET_VALUE:
					resp := &pb.Message{Type: pmes.Type}

					ps := []peer.AddrInfo{}
					for i := 0; i < 7; i++ {
						p := hosts[rand.Intn(len(hosts))].ID()
						pi := host.Peerstore().PeerInfo(p)
						ps = append(ps, pi)
					}

					resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps)
					if err := pbw.WriteMsg(resp); err != nil {
Steven Allen's avatar
Steven Allen committed
270
						return
271 272 273
					}
				default:
					panic("Shouldnt recieve this.")
274
				}
275 276
			})
		}
277 278 279 280 281 282 283 284 285 286
		for _, peer := range hosts {
			if host == peer {
				continue
			}
			_ = peer.Peerstore().AddProtocols(host.ID(), protocol.ConvertToStrings(d.serverProtocols)...)
		}
	}

	for _, p := range hosts {
		d.peerFound(ctx, p.ID(), true)
287
	}
Jeromy's avatar
Jeromy committed
288

289
	// long timeout to ensure timing is not at play.
rht's avatar
rht committed
290 291
	ctx, cancel := context.WithTimeout(ctx, time.Second*20)
	defer cancel()
292
	v, err := d.GetValue(ctx, "hello")
Matt Joiner's avatar
Matt Joiner committed
293
	logger.Debugf("get value got %v", v)
Jeromy's avatar
Jeromy committed
294
	if err != nil {
295 296 297
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}
Jeromy's avatar
Jeromy committed
298
		switch err {
299
		case routing.ErrNotFound:
300 301 302 303
			if d.routingTable.Size() == 0 {
				// make sure we didn't just disconnect
				t.Fatal("expected peers in the routing table")
			}
Jeromy's avatar
Jeromy committed
304 305 306 307 308 309 310 311 312 313
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
314 315 316 317

// If less than K nodes are in the entire network, it should fail when we make
// a GET rpc and nobody has the value
func TestLessThanKResponses(t *testing.T) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
318
	// t.Skip("skipping test to debug another")
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
319 320
	// t.Skip("skipping test because it makes a lot of output")

321
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
322
	mn, err := mocknet.FullMeshConnected(ctx, 6)
323 324 325
	if err != nil {
		t.Fatal(err)
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
326
	hosts := mn.Hosts()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
327

328
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
329
	d, err := New(ctx, hosts[0], os...)
330 331 332
	if err != nil {
		t.Fatal(err)
	}
333

334
	for i := 1; i < 5; i++ {
Aarsh Shah's avatar
Aarsh Shah committed
335
		d.peerFound(ctx, hosts[i].ID(), true)
336 337 338
	}

	// Reply with random peers to every message
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
339 340
	for _, host := range hosts {
		host := host // shadow loop var
341 342 343
		for _, proto := range d.serverProtocols {
			host.SetStreamHandler(proto, func(s network.Stream) {
				defer s.Close()
344

345 346
				pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
				pbw := ggio.NewDelimitedWriter(s)
347

348 349 350
				pmes := new(pb.Message)
				if err := pbr.ReadMsg(pmes); err != nil {
					panic(err)
351 352
				}

353 354 355 356 357 358 359 360 361 362 363 364 365
				switch pmes.GetType() {
				case pb.Message_GET_VALUE:
					pi := host.Peerstore().PeerInfo(hosts[1].ID())
					resp := &pb.Message{
						Type:        pmes.Type,
						CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}),
					}

					if err := pbw.WriteMsg(resp); err != nil {
						panic(err)
					}
				default:
					panic("Shouldnt recieve this.")
366
				}
367

368 369
			})
		}
370
	}
371

rht's avatar
rht committed
372 373
	ctx, cancel := context.WithTimeout(ctx, time.Second*30)
	defer cancel()
374
	if _, err := d.GetValue(ctx, "hello"); err != nil {
375
		switch err {
376
		case routing.ErrNotFound:
377 378 379 380 381 382 383 384 385 386
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
387 388 389 390 391 392 393 394 395 396 397 398 399

// Test multiple queries against a node that closes its stream after every query.
func TestMultipleQueries(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	mn, err := mocknet.FullMeshConnected(ctx, 2)
	if err != nil {
		t.Fatal(err)
	}
	hosts := mn.Hosts()
400
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
401
	d, err := New(ctx, hosts[0], os...)
402 403 404
	if err != nil {
		t.Fatal(err)
	}
405

Aarsh Shah's avatar
Aarsh Shah committed
406
	d.peerFound(ctx, hosts[1].ID(), true)
407

408 409 410 411 412
	for _, proto := range d.serverProtocols {
		// It would be nice to be able to just get a value and succeed but then
		// we'd need to deal with selectors and validators...
		hosts[1].SetStreamHandler(proto, func(s network.Stream) {
			defer s.Close()
413

414 415
			pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
			pbw := ggio.NewDelimitedWriter(s)
416

417 418 419
			pmes := new(pb.Message)
			if err := pbr.ReadMsg(pmes); err != nil {
				panic(err)
420 421
			}

422 423 424 425 426 427 428 429 430 431 432 433 434
			switch pmes.GetType() {
			case pb.Message_GET_VALUE:
				pi := hosts[1].Peerstore().PeerInfo(hosts[0].ID())
				resp := &pb.Message{
					Type:        pmes.Type,
					CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}),
				}

				if err := pbw.WriteMsg(resp); err != nil {
					panic(err)
				}
			default:
				panic("Shouldnt recieve this.")
435
			}
436 437
		})
	}
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456

	// long timeout to ensure timing is not at play.
	ctx, cancel := context.WithTimeout(ctx, time.Second*20)
	defer cancel()
	for i := 0; i < 10; i++ {
		if _, err := d.GetValue(ctx, "hello"); err != nil {
			switch err {
			case routing.ErrNotFound:
				//Success!
				continue
			case u.ErrTimeout:
				t.Fatal("Should not have gotten timeout!")
			default:
				t.Fatalf("Got unexpected error: %s", err)
			}
		}
		t.Fatal("Expected to recieve an error.")
	}
}