ext_test.go 11.3 KB
Newer Older
Jeromy's avatar
Jeromy committed
1 2 3
package dht

import (
Jeromy's avatar
Jeromy committed
4
	"context"
5
	"math/rand"
Jeromy's avatar
Jeromy committed
6
	"testing"
7
	"time"
Jeromy's avatar
Jeromy committed
8

9 10
	"github.com/libp2p/go-libp2p-core/network"
	"github.com/libp2p/go-libp2p-core/peer"
11
	"github.com/libp2p/go-libp2p-core/peerstore"
Aarsh Shah's avatar
Aarsh Shah committed
12
	"github.com/libp2p/go-libp2p-core/protocol"
13
	"github.com/libp2p/go-libp2p-core/routing"
Aarsh Shah's avatar
Aarsh Shah committed
14 15

	record "github.com/libp2p/go-libp2p-record"
16 17
	swarmt "github.com/libp2p/go-libp2p-swarm/testing"
	bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
Aarsh Shah's avatar
Aarsh Shah committed
18
	mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
19 20 21
	"github.com/libp2p/go-msgio/protoio"

	pb "github.com/libp2p/go-libp2p-kad-dht/pb"
22

23
	u "github.com/ipfs/go-ipfs-util"
Jeromy's avatar
Jeromy committed
24 25
)

26 27 28
// Test that one hung request to a peer doesn't prevent another request
// using that same peer from obeying its context.
func TestHungRequest(t *testing.T) {
29 30 31
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

32
	mn, err := mocknet.FullMeshLinked(ctx, 2)
33 34 35 36 37
	if err != nil {
		t.Fatal(err)
	}
	hosts := mn.Hosts()

38
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
39 40 41 42
	d, err := New(ctx, hosts[0], os...)
	if err != nil {
		t.Fatal(err)
	}
43 44 45 46 47 48 49
	for _, proto := range d.serverProtocols {
		// Hang on every request.
		hosts[1].SetStreamHandler(proto, func(s network.Stream) {
			defer s.Reset() //nolint
			<-ctx.Done()
		})
	}
Aarsh Shah's avatar
Aarsh Shah committed
50

51 52 53 54 55
	err = mn.ConnectAllButSelf()
	if err != nil {
		t.Fatal("failed to connect peers", err)
	}

Steven Allen's avatar
Steven Allen committed
56
	// Wait at a bit for a peer in our routing table.
57 58 59
	for i := 0; i < 100 && d.routingTable.Size() == 0; i++ {
		time.Sleep(10 * time.Millisecond)
	}
60 61 62
	if d.routingTable.Size() == 0 {
		t.Fatal("failed to fill routing table")
	}
63 64 65 66

	ctx1, cancel1 := context.WithTimeout(ctx, 1*time.Second)
	defer cancel1()

67 68 69 70 71
	done := make(chan error, 1)
	go func() {
		_, err := d.GetClosestPeers(ctx1, testCaseCids[0].KeyString())
		done <- err
	}()
72 73 74 75

	time.Sleep(100 * time.Millisecond)
	ctx2, cancel2 := context.WithTimeout(ctx, 100*time.Millisecond)
	defer cancel2()
76 77
	err = d.Provide(ctx2, testCaseCids[0], true)
	if err != context.DeadlineExceeded {
78 79 80
		t.Errorf("expected to fail with deadline exceeded, got: %s", ctx2.Err())
	}
	select {
81 82
	case err = <-done:
		t.Error("GetClosestPeers should not have returned yet", err)
83
	default:
84 85 86 87
		err = <-done
		if err != context.DeadlineExceeded {
			t.Errorf("expected the deadline to be exceeded, got %s", err)
		}
88 89
	}

90 91 92 93
	if d.routingTable.Size() == 0 {
		// make sure we didn't just disconnect
		t.Fatal("expected peers in the routing table")
	}
94 95
}

96
func TestGetFailures(t *testing.T) {
97 98 99
	if testing.Short() {
		t.SkipNow()
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
100

101
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
102

103 104 105
	host1 := bhost.New(swarmt.GenSwarm(t, ctx, swarmt.OptDisableReuseport))
	host2 := bhost.New(swarmt.GenSwarm(t, ctx, swarmt.OptDisableReuseport))

Adin Schmahmann's avatar
Adin Schmahmann committed
106
	d, err := New(ctx, host1, testPrefix, DisableAutoRefresh(), Mode(ModeServer))
107 108 109
	if err != nil {
		t.Fatal(err)
	}
110

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
111
	// Reply with failures to every message
112 113 114 115 116 117
	for _, proto := range d.serverProtocols {
		host2.SetStreamHandler(proto, func(s network.Stream) {
			time.Sleep(400 * time.Millisecond)
			s.Close()
		})
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
118

119 120 121 122 123 124 125
	host1.Peerstore().AddAddrs(host2.ID(), host2.Addrs(), peerstore.ConnectedAddrTTL)
	_, err = host1.Network().DialPeer(ctx, host2.ID())
	if err != nil {
		t.Fatal(err)
	}
	time.Sleep(1 * time.Second)

126
	// This one should time out
Jeromy's avatar
Jeromy committed
127 128
	ctx1, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
	defer cancel()
129
	if _, err := d.GetValue(ctx1, "test"); err != nil {
130 131 132 133
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}

Łukasz Magiera's avatar
Łukasz Magiera committed
134
		if err != context.DeadlineExceeded {
135
			t.Fatal("Got different error than we expected", err)
136 137 138
		}
	} else {
		t.Fatal("Did not get expected error!")
Jeromy's avatar
Jeromy committed
139 140
	}

141
	t.Log("Timeout test passed.")
142

143 144 145 146
	for _, proto := range d.serverProtocols {
		// Reply with failures to every message
		host2.SetStreamHandler(proto, func(s network.Stream) {
			defer s.Close()
147

148 149
			pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax)
			pbw := protoio.NewDelimitedWriter(s)
150

151 152 153 154 155
			pmes := new(pb.Message)
			if err := pbr.ReadMsg(pmes); err != nil {
				// user gave up
				return
			}
156

157 158 159 160 161 162
			resp := &pb.Message{
				Type: pmes.Type,
			}
			_ = pbw.WriteMsg(resp)
		})
	}
163

164 165 166 167 168
	// This one should fail with NotFound.
	// long context timeout to ensure we dont end too early.
	// the dht should be exhausting its query and returning not found.
	// (was 3 seconds before which should be _plenty_ of time, but maybe
	// travis machines really have a hard time...)
Jeromy's avatar
Jeromy committed
169 170
	ctx2, cancel := context.WithTimeout(context.Background(), 20*time.Second)
	defer cancel()
171
	_, err = d.GetValue(ctx2, "test")
172
	if err != nil {
173 174 175
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}
176
		if err != routing.ErrNotFound {
177
			t.Fatalf("Expected ErrNotFound, got: %s", err)
178 179 180 181
		}
	} else {
		t.Fatal("expected error, got none.")
	}
182

183 184
	t.Log("ErrNotFound check passed!")

185
	// Now we test this DHT's handleGetValue failure
186 187 188
	{
		typ := pb.Message_GET_VALUE
		str := "hello"
Jeromy's avatar
Jeromy committed
189

190
		rec := record.MakePutRecord(str, []byte("blah"))
191
		req := pb.Message{
192 193
			Type:   typ,
			Key:    []byte(str),
194 195
			Record: rec,
		}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
196

197
		s, err := host2.NewStream(context.Background(), host1.ID(), d.protocols...)
198 199 200 201
		if err != nil {
			t.Fatal(err)
		}
		defer s.Close()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
202

203 204
		pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax)
		pbw := protoio.NewDelimitedWriter(s)
205

206 207 208
		if err := pbw.WriteMsg(&req); err != nil {
			t.Fatal(err)
		}
209

210 211 212 213 214 215 216 217 218 219 220
		pmes := new(pb.Message)
		if err := pbr.ReadMsg(pmes); err != nil {
			t.Fatal(err)
		}
		if pmes.GetRecord() != nil {
			t.Fatal("shouldnt have value")
		}
		if pmes.GetProviderPeers() != nil {
			t.Fatal("shouldnt have provider peers")
		}
	}
221 222 223 224 225

	if d.routingTable.Size() == 0 {
		// make sure we didn't just disconnect
		t.Fatal("expected peers in the routing table")
	}
Jeromy's avatar
Jeromy committed
226
}
Jeromy's avatar
Jeromy committed
227 228

func TestNotFound(t *testing.T) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
229
	// t.Skip("skipping test to debug another")
230 231 232
	if testing.Short() {
		t.SkipNow()
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
233

234
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
235
	mn, err := mocknet.FullMeshConnected(ctx, 16)
236 237 238
	if err != nil {
		t.Fatal(err)
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
239
	hosts := mn.Hosts()
240

241
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
242
	d, err := New(ctx, hosts[0], os...)
243 244 245
	if err != nil {
		t.Fatal(err)
	}
Jeromy's avatar
Jeromy committed
246 247

	// Reply with random peers to every message
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
248 249
	for _, host := range hosts {
		host := host // shadow loop var
250 251 252
		for _, proto := range d.serverProtocols {
			host.SetStreamHandler(proto, func(s network.Stream) {
				defer s.Close()
Jeromy's avatar
Jeromy committed
253

254 255
				pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax)
				pbw := protoio.NewDelimitedWriter(s)
256

257 258
				pmes := new(pb.Message)
				if err := pbr.ReadMsg(pmes); err != nil {
Steven Allen's avatar
Steven Allen committed
259 260
					// this isn't an error, it just means the stream has died.
					return
261 262
				}

263 264 265 266 267 268 269 270 271 272 273 274 275
				switch pmes.GetType() {
				case pb.Message_GET_VALUE:
					resp := &pb.Message{Type: pmes.Type}

					ps := []peer.AddrInfo{}
					for i := 0; i < 7; i++ {
						p := hosts[rand.Intn(len(hosts))].ID()
						pi := host.Peerstore().PeerInfo(p)
						ps = append(ps, pi)
					}

					resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps)
					if err := pbw.WriteMsg(resp); err != nil {
Steven Allen's avatar
Steven Allen committed
276
						return
277 278 279
					}
				default:
					panic("Shouldnt recieve this.")
280
				}
281 282
			})
		}
283 284 285 286 287 288 289 290 291 292
		for _, peer := range hosts {
			if host == peer {
				continue
			}
			_ = peer.Peerstore().AddProtocols(host.ID(), protocol.ConvertToStrings(d.serverProtocols)...)
		}
	}

	for _, p := range hosts {
		d.peerFound(ctx, p.ID(), true)
293
	}
Jeromy's avatar
Jeromy committed
294

295
	// long timeout to ensure timing is not at play.
rht's avatar
rht committed
296 297
	ctx, cancel := context.WithTimeout(ctx, time.Second*20)
	defer cancel()
298
	v, err := d.GetValue(ctx, "hello")
Matt Joiner's avatar
Matt Joiner committed
299
	logger.Debugf("get value got %v", v)
Jeromy's avatar
Jeromy committed
300
	if err != nil {
301 302 303
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}
Jeromy's avatar
Jeromy committed
304
		switch err {
305
		case routing.ErrNotFound:
306 307 308 309
			if d.routingTable.Size() == 0 {
				// make sure we didn't just disconnect
				t.Fatal("expected peers in the routing table")
			}
Jeromy's avatar
Jeromy committed
310 311 312 313 314 315 316 317 318 319
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
320 321 322 323

// If less than K nodes are in the entire network, it should fail when we make
// a GET rpc and nobody has the value
func TestLessThanKResponses(t *testing.T) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
324
	// t.Skip("skipping test to debug another")
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
325 326
	// t.Skip("skipping test because it makes a lot of output")

327
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
328
	mn, err := mocknet.FullMeshConnected(ctx, 6)
329 330 331
	if err != nil {
		t.Fatal(err)
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
332
	hosts := mn.Hosts()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
333

334
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
335
	d, err := New(ctx, hosts[0], os...)
336 337 338
	if err != nil {
		t.Fatal(err)
	}
339

340
	for i := 1; i < 5; i++ {
Aarsh Shah's avatar
Aarsh Shah committed
341
		d.peerFound(ctx, hosts[i].ID(), true)
342 343 344
	}

	// Reply with random peers to every message
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
345 346
	for _, host := range hosts {
		host := host // shadow loop var
347 348 349
		for _, proto := range d.serverProtocols {
			host.SetStreamHandler(proto, func(s network.Stream) {
				defer s.Close()
350

351 352
				pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax)
				pbw := protoio.NewDelimitedWriter(s)
353

354 355 356
				pmes := new(pb.Message)
				if err := pbr.ReadMsg(pmes); err != nil {
					panic(err)
357 358
				}

359 360 361 362 363 364 365 366 367 368 369 370 371
				switch pmes.GetType() {
				case pb.Message_GET_VALUE:
					pi := host.Peerstore().PeerInfo(hosts[1].ID())
					resp := &pb.Message{
						Type:        pmes.Type,
						CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}),
					}

					if err := pbw.WriteMsg(resp); err != nil {
						panic(err)
					}
				default:
					panic("Shouldnt recieve this.")
372
				}
373

374 375
			})
		}
376
	}
377

rht's avatar
rht committed
378 379
	ctx, cancel := context.WithTimeout(ctx, time.Second*30)
	defer cancel()
380
	if _, err := d.GetValue(ctx, "hello"); err != nil {
381
		switch err {
382
		case routing.ErrNotFound:
383 384 385 386 387 388 389 390 391 392
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
393 394 395 396 397 398 399 400 401 402 403 404 405

// Test multiple queries against a node that closes its stream after every query.
func TestMultipleQueries(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	mn, err := mocknet.FullMeshConnected(ctx, 2)
	if err != nil {
		t.Fatal(err)
	}
	hosts := mn.Hosts()
406
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
407
	d, err := New(ctx, hosts[0], os...)
408 409 410
	if err != nil {
		t.Fatal(err)
	}
411

Aarsh Shah's avatar
Aarsh Shah committed
412
	d.peerFound(ctx, hosts[1].ID(), true)
413

414 415 416 417 418
	for _, proto := range d.serverProtocols {
		// It would be nice to be able to just get a value and succeed but then
		// we'd need to deal with selectors and validators...
		hosts[1].SetStreamHandler(proto, func(s network.Stream) {
			defer s.Close()
419

420 421
			pbr := protoio.NewDelimitedReader(s, network.MessageSizeMax)
			pbw := protoio.NewDelimitedWriter(s)
422

423 424 425
			pmes := new(pb.Message)
			if err := pbr.ReadMsg(pmes); err != nil {
				panic(err)
426 427
			}

428 429 430 431 432 433 434 435 436 437 438 439 440
			switch pmes.GetType() {
			case pb.Message_GET_VALUE:
				pi := hosts[1].Peerstore().PeerInfo(hosts[0].ID())
				resp := &pb.Message{
					Type:        pmes.Type,
					CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}),
				}

				if err := pbw.WriteMsg(resp); err != nil {
					panic(err)
				}
			default:
				panic("Shouldnt recieve this.")
441
			}
442 443
		})
	}
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462

	// long timeout to ensure timing is not at play.
	ctx, cancel := context.WithTimeout(ctx, time.Second*20)
	defer cancel()
	for i := 0; i < 10; i++ {
		if _, err := d.GetValue(ctx, "hello"); err != nil {
			switch err {
			case routing.ErrNotFound:
				//Success!
				continue
			case u.ErrTimeout:
				t.Fatal("Should not have gotten timeout!")
			default:
				t.Fatalf("Got unexpected error: %s", err)
			}
		}
		t.Fatal("Expected to recieve an error.")
	}
}