ext_test.go 11.1 KB
Newer Older
Jeromy's avatar
Jeromy committed
1 2 3
package dht

import (
Jeromy's avatar
Jeromy committed
4
	"context"
5
	"math/rand"
Jeromy's avatar
Jeromy committed
6
	"testing"
7
	"time"
Jeromy's avatar
Jeromy committed
8

9 10
	"github.com/libp2p/go-libp2p-core/network"
	"github.com/libp2p/go-libp2p-core/peer"
11
	"github.com/libp2p/go-libp2p-core/peerstore"
Aarsh Shah's avatar
Aarsh Shah committed
12
	"github.com/libp2p/go-libp2p-core/protocol"
13
	"github.com/libp2p/go-libp2p-core/routing"
Aarsh Shah's avatar
Aarsh Shah committed
14 15 16

	pb "github.com/libp2p/go-libp2p-kad-dht/pb"
	record "github.com/libp2p/go-libp2p-record"
17 18
	swarmt "github.com/libp2p/go-libp2p-swarm/testing"
	bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
Aarsh Shah's avatar
Aarsh Shah committed
19
	mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
20

George Antoniadis's avatar
George Antoniadis committed
21
	ggio "github.com/gogo/protobuf/io"
22
	u "github.com/ipfs/go-ipfs-util"
Jeromy's avatar
Jeromy committed
23 24
)

25 26 27
// Test that one hung request to a peer doesn't prevent another request
// using that same peer from obeying its context.
func TestHungRequest(t *testing.T) {
28 29 30
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

31 32 33 34 35 36
	mn, err := mocknet.FullMeshConnected(ctx, 2)
	if err != nil {
		t.Fatal(err)
	}
	hosts := mn.Hosts()

37
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
38 39 40 41
	d, err := New(ctx, hosts[0], os...)
	if err != nil {
		t.Fatal(err)
	}
42 43 44 45 46 47 48
	for _, proto := range d.serverProtocols {
		// Hang on every request.
		hosts[1].SetStreamHandler(proto, func(s network.Stream) {
			defer s.Reset() //nolint
			<-ctx.Done()
		})
	}
Aarsh Shah's avatar
Aarsh Shah committed
49

50 51 52 53
	// Wait at most 100ms for a peer in our routing table.
	for i := 0; i < 100 && d.routingTable.Size() == 0; i++ {
		time.Sleep(10 * time.Millisecond)
	}
54 55 56 57

	ctx1, cancel1 := context.WithTimeout(ctx, 1*time.Second)
	defer cancel1()

58 59 60 61 62
	done := make(chan error, 1)
	go func() {
		_, err := d.GetClosestPeers(ctx1, testCaseCids[0].KeyString())
		done <- err
	}()
63 64 65 66

	time.Sleep(100 * time.Millisecond)
	ctx2, cancel2 := context.WithTimeout(ctx, 100*time.Millisecond)
	defer cancel2()
67 68
	err = d.Provide(ctx2, testCaseCids[0], true)
	if err != context.DeadlineExceeded {
69 70 71
		t.Errorf("expected to fail with deadline exceeded, got: %s", ctx2.Err())
	}
	select {
72 73
	case err = <-done:
		t.Error("GetClosestPeers should not have returned yet", err)
74
	default:
75 76 77 78
		err = <-done
		if err != context.DeadlineExceeded {
			t.Errorf("expected the deadline to be exceeded, got %s", err)
		}
79 80
	}

81 82 83 84
	if d.routingTable.Size() == 0 {
		// make sure we didn't just disconnect
		t.Fatal("expected peers in the routing table")
	}
85 86
}

87
func TestGetFailures(t *testing.T) {
88 89 90
	if testing.Short() {
		t.SkipNow()
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
91

92
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
93

94 95 96
	host1 := bhost.New(swarmt.GenSwarm(t, ctx, swarmt.OptDisableReuseport))
	host2 := bhost.New(swarmt.GenSwarm(t, ctx, swarmt.OptDisableReuseport))

Adin Schmahmann's avatar
Adin Schmahmann committed
97
	d, err := New(ctx, host1, testPrefix, DisableAutoRefresh(), Mode(ModeServer))
98 99 100
	if err != nil {
		t.Fatal(err)
	}
101

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
102
	// Reply with failures to every message
103 104 105 106 107 108
	for _, proto := range d.serverProtocols {
		host2.SetStreamHandler(proto, func(s network.Stream) {
			time.Sleep(400 * time.Millisecond)
			s.Close()
		})
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
109

110 111 112 113 114 115 116
	host1.Peerstore().AddAddrs(host2.ID(), host2.Addrs(), peerstore.ConnectedAddrTTL)
	_, err = host1.Network().DialPeer(ctx, host2.ID())
	if err != nil {
		t.Fatal(err)
	}
	time.Sleep(1 * time.Second)

117
	// This one should time out
Jeromy's avatar
Jeromy committed
118 119
	ctx1, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
	defer cancel()
120
	if _, err := d.GetValue(ctx1, "test"); err != nil {
121 122 123 124
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}

Łukasz Magiera's avatar
Łukasz Magiera committed
125
		if err != context.DeadlineExceeded {
126
			t.Fatal("Got different error than we expected", err)
127 128 129
		}
	} else {
		t.Fatal("Did not get expected error!")
Jeromy's avatar
Jeromy committed
130 131
	}

132
	t.Log("Timeout test passed.")
133

134 135 136 137
	for _, proto := range d.serverProtocols {
		// Reply with failures to every message
		host2.SetStreamHandler(proto, func(s network.Stream) {
			defer s.Close()
138

139 140
			pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
			pbw := ggio.NewDelimitedWriter(s)
141

142 143 144 145 146
			pmes := new(pb.Message)
			if err := pbr.ReadMsg(pmes); err != nil {
				// user gave up
				return
			}
147

148 149 150 151 152 153
			resp := &pb.Message{
				Type: pmes.Type,
			}
			_ = pbw.WriteMsg(resp)
		})
	}
154

155 156 157 158 159
	// This one should fail with NotFound.
	// long context timeout to ensure we dont end too early.
	// the dht should be exhausting its query and returning not found.
	// (was 3 seconds before which should be _plenty_ of time, but maybe
	// travis machines really have a hard time...)
Jeromy's avatar
Jeromy committed
160 161
	ctx2, cancel := context.WithTimeout(context.Background(), 20*time.Second)
	defer cancel()
162
	_, err = d.GetValue(ctx2, "test")
163
	if err != nil {
164 165 166
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}
167
		if err != routing.ErrNotFound {
168
			t.Fatalf("Expected ErrNotFound, got: %s", err)
169 170 171 172
		}
	} else {
		t.Fatal("expected error, got none.")
	}
173

174 175
	t.Log("ErrNotFound check passed!")

176
	// Now we test this DHT's handleGetValue failure
177 178 179
	{
		typ := pb.Message_GET_VALUE
		str := "hello"
Jeromy's avatar
Jeromy committed
180

181
		rec := record.MakePutRecord(str, []byte("blah"))
182
		req := pb.Message{
183 184
			Type:   typ,
			Key:    []byte(str),
185 186
			Record: rec,
		}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
187

188
		s, err := host2.NewStream(context.Background(), host1.ID(), d.protocols...)
189 190 191 192
		if err != nil {
			t.Fatal(err)
		}
		defer s.Close()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
193

194
		pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
195
		pbw := ggio.NewDelimitedWriter(s)
196

197 198 199
		if err := pbw.WriteMsg(&req); err != nil {
			t.Fatal(err)
		}
200

201 202 203 204 205 206 207 208 209 210 211
		pmes := new(pb.Message)
		if err := pbr.ReadMsg(pmes); err != nil {
			t.Fatal(err)
		}
		if pmes.GetRecord() != nil {
			t.Fatal("shouldnt have value")
		}
		if pmes.GetProviderPeers() != nil {
			t.Fatal("shouldnt have provider peers")
		}
	}
212 213 214 215 216

	if d.routingTable.Size() == 0 {
		// make sure we didn't just disconnect
		t.Fatal("expected peers in the routing table")
	}
Jeromy's avatar
Jeromy committed
217
}
Jeromy's avatar
Jeromy committed
218 219

func TestNotFound(t *testing.T) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
220
	// t.Skip("skipping test to debug another")
221 222 223
	if testing.Short() {
		t.SkipNow()
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
224

225
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
226
	mn, err := mocknet.FullMeshConnected(ctx, 16)
227 228 229
	if err != nil {
		t.Fatal(err)
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
230
	hosts := mn.Hosts()
231

232
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
233
	d, err := New(ctx, hosts[0], os...)
234 235 236
	if err != nil {
		t.Fatal(err)
	}
Jeromy's avatar
Jeromy committed
237 238

	// Reply with random peers to every message
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
239 240
	for _, host := range hosts {
		host := host // shadow loop var
241 242 243
		for _, proto := range d.serverProtocols {
			host.SetStreamHandler(proto, func(s network.Stream) {
				defer s.Close()
Jeromy's avatar
Jeromy committed
244

245 246
				pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
				pbw := ggio.NewDelimitedWriter(s)
247

248 249
				pmes := new(pb.Message)
				if err := pbr.ReadMsg(pmes); err != nil {
Steven Allen's avatar
Steven Allen committed
250 251
					// this isn't an error, it just means the stream has died.
					return
252 253
				}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
				switch pmes.GetType() {
				case pb.Message_GET_VALUE:
					resp := &pb.Message{Type: pmes.Type}

					ps := []peer.AddrInfo{}
					for i := 0; i < 7; i++ {
						p := hosts[rand.Intn(len(hosts))].ID()
						pi := host.Peerstore().PeerInfo(p)
						ps = append(ps, pi)
					}

					resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps)
					if err := pbw.WriteMsg(resp); err != nil {
						panic(err)
					}
				default:
					panic("Shouldnt recieve this.")
271
				}
272 273
			})
		}
274 275 276 277 278 279 280 281 282 283
		for _, peer := range hosts {
			if host == peer {
				continue
			}
			_ = peer.Peerstore().AddProtocols(host.ID(), protocol.ConvertToStrings(d.serverProtocols)...)
		}
	}

	for _, p := range hosts {
		d.peerFound(ctx, p.ID(), true)
284
	}
Jeromy's avatar
Jeromy committed
285

286
	// long timeout to ensure timing is not at play.
rht's avatar
rht committed
287 288
	ctx, cancel := context.WithTimeout(ctx, time.Second*20)
	defer cancel()
289
	v, err := d.GetValue(ctx, "hello")
Matt Joiner's avatar
Matt Joiner committed
290
	logger.Debugf("get value got %v", v)
Jeromy's avatar
Jeromy committed
291
	if err != nil {
292 293 294
		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
			err = merr[0]
		}
Jeromy's avatar
Jeromy committed
295
		switch err {
296
		case routing.ErrNotFound:
297 298 299 300
			if d.routingTable.Size() == 0 {
				// make sure we didn't just disconnect
				t.Fatal("expected peers in the routing table")
			}
Jeromy's avatar
Jeromy committed
301 302 303 304 305 306 307 308 309 310
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
311 312 313 314

// If less than K nodes are in the entire network, it should fail when we make
// a GET rpc and nobody has the value
func TestLessThanKResponses(t *testing.T) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
315
	// t.Skip("skipping test to debug another")
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
316 317
	// t.Skip("skipping test because it makes a lot of output")

318
	ctx := context.Background()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
319
	mn, err := mocknet.FullMeshConnected(ctx, 6)
320 321 322
	if err != nil {
		t.Fatal(err)
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
323
	hosts := mn.Hosts()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
324

325
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
326
	d, err := New(ctx, hosts[0], os...)
327 328 329
	if err != nil {
		t.Fatal(err)
	}
330

331
	for i := 1; i < 5; i++ {
Aarsh Shah's avatar
Aarsh Shah committed
332
		d.peerFound(ctx, hosts[i].ID(), true)
333 334 335
	}

	// Reply with random peers to every message
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
336 337
	for _, host := range hosts {
		host := host // shadow loop var
338 339 340
		for _, proto := range d.serverProtocols {
			host.SetStreamHandler(proto, func(s network.Stream) {
				defer s.Close()
341

342 343
				pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
				pbw := ggio.NewDelimitedWriter(s)
344

345 346 347
				pmes := new(pb.Message)
				if err := pbr.ReadMsg(pmes); err != nil {
					panic(err)
348 349
				}

350 351 352 353 354 355 356 357 358 359 360 361 362
				switch pmes.GetType() {
				case pb.Message_GET_VALUE:
					pi := host.Peerstore().PeerInfo(hosts[1].ID())
					resp := &pb.Message{
						Type:        pmes.Type,
						CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}),
					}

					if err := pbw.WriteMsg(resp); err != nil {
						panic(err)
					}
				default:
					panic("Shouldnt recieve this.")
363
				}
364

365 366
			})
		}
367
	}
368

rht's avatar
rht committed
369 370
	ctx, cancel := context.WithTimeout(ctx, time.Second*30)
	defer cancel()
371
	if _, err := d.GetValue(ctx, "hello"); err != nil {
372
		switch err {
373
		case routing.ErrNotFound:
374 375 376 377 378 379 380 381 382 383
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
384 385 386 387 388 389 390 391 392 393 394 395 396

// Test multiple queries against a node that closes its stream after every query.
func TestMultipleQueries(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	mn, err := mocknet.FullMeshConnected(ctx, 2)
	if err != nil {
		t.Fatal(err)
	}
	hosts := mn.Hosts()
397
	os := []Option{testPrefix, DisableAutoRefresh(), Mode(ModeServer)}
398
	d, err := New(ctx, hosts[0], os...)
399 400 401
	if err != nil {
		t.Fatal(err)
	}
402

Aarsh Shah's avatar
Aarsh Shah committed
403
	d.peerFound(ctx, hosts[1].ID(), true)
404

405 406 407 408 409
	for _, proto := range d.serverProtocols {
		// It would be nice to be able to just get a value and succeed but then
		// we'd need to deal with selectors and validators...
		hosts[1].SetStreamHandler(proto, func(s network.Stream) {
			defer s.Close()
410

411 412
			pbr := ggio.NewDelimitedReader(s, network.MessageSizeMax)
			pbw := ggio.NewDelimitedWriter(s)
413

414 415 416
			pmes := new(pb.Message)
			if err := pbr.ReadMsg(pmes); err != nil {
				panic(err)
417 418
			}

419 420 421 422 423 424 425 426 427 428 429 430 431
			switch pmes.GetType() {
			case pb.Message_GET_VALUE:
				pi := hosts[1].Peerstore().PeerInfo(hosts[0].ID())
				resp := &pb.Message{
					Type:        pmes.Type,
					CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.AddrInfo{pi}),
				}

				if err := pbw.WriteMsg(resp); err != nil {
					panic(err)
				}
			default:
				panic("Shouldnt recieve this.")
432
			}
433 434
		})
	}
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453

	// long timeout to ensure timing is not at play.
	ctx, cancel := context.WithTimeout(ctx, time.Second*20)
	defer cancel()
	for i := 0; i < 10; i++ {
		if _, err := d.GetValue(ctx, "hello"); err != nil {
			switch err {
			case routing.ErrNotFound:
				//Success!
				continue
			case u.ErrTimeout:
				t.Fatal("Should not have gotten timeout!")
			default:
				t.Fatalf("Got unexpected error: %s", err)
			}
		}
		t.Fatal("Expected to recieve an error.")
	}
}