graphsync_test.go 17 KB
Newer Older
1 2 3
package graphsync

import (
4
	"bytes"
5
	"context"
6
	"errors"
7 8
	"io"
	"io/ioutil"
9 10
	"math"
	"math/rand"
11 12
	"os"
	"path/filepath"
13 14 15 16
	"reflect"
	"testing"
	"time"

17 18
	ipldfree "github.com/ipld/go-ipld-prime/impl/free"

19
	cidlink "github.com/ipld/go-ipld-prime/linking/cid"
20

21
	blocks "github.com/ipfs/go-block-format"
22 23 24 25 26 27 28 29 30 31 32 33 34
	"github.com/ipfs/go-blockservice"
	"github.com/ipfs/go-datastore"
	dss "github.com/ipfs/go-datastore/sync"
	bstore "github.com/ipfs/go-ipfs-blockstore"
	chunker "github.com/ipfs/go-ipfs-chunker"
	offline "github.com/ipfs/go-ipfs-exchange-offline"
	files "github.com/ipfs/go-ipfs-files"
	ipldformat "github.com/ipfs/go-ipld-format"
	"github.com/ipfs/go-merkledag"
	unixfile "github.com/ipfs/go-unixfs/file"
	"github.com/ipfs/go-unixfs/importer/balanced"
	ihelper "github.com/ipfs/go-unixfs/importer/helpers"

35
	"github.com/ipfs/go-graphsync"
36

37
	"github.com/ipfs/go-graphsync/ipldbridge"
38 39 40 41 42
	gsmsg "github.com/ipfs/go-graphsync/message"
	gsnet "github.com/ipfs/go-graphsync/network"
	"github.com/ipfs/go-graphsync/testbridge"
	"github.com/ipfs/go-graphsync/testutil"
	ipld "github.com/ipld/go-ipld-prime"
Edgar Lee's avatar
Edgar Lee committed
43
	ipldselector "github.com/ipld/go-ipld-prime/traversal/selector"
44
	"github.com/ipld/go-ipld-prime/traversal/selector/builder"
45
	"github.com/libp2p/go-libp2p-core/host"
46
	"github.com/libp2p/go-libp2p-core/peer"
47 48 49 50 51 52 53 54
	mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
)

func TestMakeRequestToNetwork(t *testing.T) {
	// create network
	ctx := context.Background()
	ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
	defer cancel()
55
	td := newGsTestData(ctx, t)
56
	r := &receiver{
57
		messageReceived: make(chan receivedMessage),
58
	}
59 60
	td.gsnet2.SetDelegate(r)
	graphSync := td.GraphSyncHost1()
61

62
	blockChainLength := 100
63
	blockChain := testutil.SetupBlockChain(ctx, t, td.loader1, td.storer1, 100, blockChainLength)
64

65 66
	requestCtx, requestCancel := context.WithCancel(ctx)
	defer requestCancel()
67
	graphSync.Request(requestCtx, td.host2.ID(), blockChain.TipLink, blockChain.Selector(), td.extension)
68

69
	var message receivedMessage
70 71 72
	select {
	case <-ctx.Done():
		t.Fatal("did not receive message sent")
73
	case message = <-r.messageReceived:
74 75
	}

76
	sender := message.sender
77
	if sender != td.host1.ID() {
78 79 80
		t.Fatal("received message from wrong node")
	}

81
	received := message.message
82 83 84 85 86
	receivedRequests := received.Requests()
	if len(receivedRequests) != 1 {
		t.Fatal("Did not add request to received message")
	}
	receivedRequest := receivedRequests[0]
87
	receivedSpec := receivedRequest.Selector()
88
	if !reflect.DeepEqual(blockChain.Selector(), receivedSpec) {
89 90
		t.Fatal("did not transmit selector spec correctly")
	}
91
	_, err := ipldbridge.ParseSelector(receivedSpec)
92 93 94
	if err != nil {
		t.Fatal("did not receive parsible selector on other side")
	}
95

96 97
	returnedData, found := receivedRequest.Extension(td.extensionName)
	if !found || !reflect.DeepEqual(td.extensionData, returnedData) {
98 99
		t.Fatal("Failed to encode extension")
	}
100
}
101 102 103 104 105 106

func TestSendResponseToIncomingRequest(t *testing.T) {
	// create network
	ctx := context.Background()
	ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
	defer cancel()
107
	td := newGsTestData(ctx, t)
108 109 110
	r := &receiver{
		messageReceived: make(chan receivedMessage),
	}
111
	td.gsnet1.SetDelegate(r)
112 113

	var receivedRequestData []byte
114
	// initialize graphsync on second node to response to requests
115 116
	gsnet := td.GraphSyncHost2()
	err := gsnet.RegisterRequestReceivedHook(
117
		func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.RequestReceivedHookActions) {
118
			var has bool
119
			receivedRequestData, has = requestData.Extension(td.extensionName)
120 121 122
			if !has {
				t.Fatal("did not have expected extension")
			}
123
			hookActions.SendExtensionData(td.extensionResponse)
124 125 126 127 128
		},
	)
	if err != nil {
		t.Fatal("error registering extension")
	}
129

130
	blockChainLength := 100
131
	blockChain := testutil.SetupBlockChain(ctx, t, td.loader2, td.storer2, 100, blockChainLength)
132

133
	requestID := graphsync.RequestID(rand.Int31())
134 135

	message := gsmsg.New()
136
	message.AddRequest(gsmsg.NewRequest(requestID, blockChain.TipLink.(cidlink.Link).Cid, blockChain.Selector(), graphsync.Priority(math.MaxInt32), td.extension))
137
	// send request across network
138
	td.gsnet1.SendMessage(ctx, td.host2.ID(), message)
139 140 141
	// read the values sent back to requestor
	var received gsmsg.GraphSyncMessage
	var receivedBlocks []blocks.Block
142
	var receivedExtensions [][]byte
143 144 145 146 147 148 149
readAllMessages:
	for {
		select {
		case <-ctx.Done():
			t.Fatal("did not receive complete response")
		case message := <-r.messageReceived:
			sender := message.sender
150
			if sender != td.host2.ID() {
151 152 153 154 155 156
				t.Fatal("received message from wrong node")
			}

			received = message.message
			receivedBlocks = append(receivedBlocks, received.Blocks()...)
			receivedResponses := received.Responses()
157
			receivedExtension, found := receivedResponses[0].Extension(td.extensionName)
158 159 160
			if found {
				receivedExtensions = append(receivedExtensions, receivedExtension)
			}
161 162 163 164 165 166
			if len(receivedResponses) != 1 {
				t.Fatal("Did not receive response")
			}
			if receivedResponses[0].RequestID() != requestID {
				t.Fatal("Sent response for incorrect request id")
			}
167
			if receivedResponses[0].Status() != graphsync.PartialResponse {
168 169 170 171 172
				break readAllMessages
			}
		}
	}

173
	if len(receivedBlocks) != blockChainLength {
174 175
		t.Fatal("Send incorrect number of blocks or there were duplicate blocks")
	}
176

177
	if !reflect.DeepEqual(td.extensionData, receivedRequestData) {
178 179 180 181 182 183 184
		t.Fatal("did not receive correct request extension data")
	}

	if len(receivedExtensions) != 1 {
		t.Fatal("should have sent extension responses but didn't")
	}

185
	if !reflect.DeepEqual(receivedExtensions[0], td.extensionResponseData) {
186 187
		t.Fatal("did not return correct extension data")
	}
188
}
189 190 191 192 193 194

func TestGraphsyncRoundTrip(t *testing.T) {
	// create network
	ctx := context.Background()
	ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
	defer cancel()
195
	td := newGsTestData(ctx, t)
196

197
	// initialize graphsync on first node to make requests
198
	requestor := td.GraphSyncHost1()
199 200

	// setup receiving peer to just record message coming in
201
	blockChainLength := 100
202
	blockChain := testutil.SetupBlockChain(ctx, t, td.loader2, td.storer2, 100, blockChainLength)
203 204

	// initialize graphsync on second node to response to requests
205
	responder := td.GraphSyncHost2()
206 207 208 209

	var receivedResponseData []byte
	var receivedRequestData []byte

210
	err := requestor.RegisterResponseReceivedHook(
211
		func(p peer.ID, responseData graphsync.ResponseData) error {
212
			data, has := responseData.Extension(td.extensionName)
213 214 215 216 217 218 219 220 221 222 223
			if has {
				receivedResponseData = data
			}
			return nil
		})
	if err != nil {
		t.Fatal("Error setting up extension")
	}

	err = responder.RegisterRequestReceivedHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.RequestReceivedHookActions) {
		var has bool
224
		receivedRequestData, has = requestData.Extension(td.extensionName)
225 226 227
		if !has {
			hookActions.TerminateWithError(errors.New("Missing extension"))
		} else {
228
			hookActions.SendExtensionData(td.extensionResponse)
229 230 231 232 233 234
		}
	})

	if err != nil {
		t.Fatal("Error setting up extension")
	}
235

236
	progressChan, errChan := requestor.Request(ctx, td.host2.ID(), blockChain.TipLink, blockChain.Selector(), td.extension)
237 238 239 240

	responses := testutil.CollectResponses(ctx, t, progressChan)
	errs := testutil.CollectErrors(ctx, t, errChan)

241
	if len(responses) != blockChainLength*2 {
242 243
		t.Fatal("did not traverse all nodes")
	}
244 245
	if len(errs) != 0 {
		t.Fatal("errors during traverse")
246
	}
247
	if len(td.blockStore1) != blockChainLength {
248 249
		t.Fatal("did not store all blocks")
	}
250 251 252 253 254 255 256 257 258 259 260 261 262 263

	expectedPath := ""
	for i, response := range responses {
		if response.Path.String() != expectedPath {
			t.Fatal("incorrect path")
		}
		if i%2 == 0 {
			if expectedPath == "" {
				expectedPath = "Parents"
			} else {
				expectedPath = expectedPath + "/Parents"
			}
		} else {
			expectedPath = expectedPath + "/0"
264 265
		}
	}
266 267

	// verify extension roundtrip
268
	if !reflect.DeepEqual(receivedRequestData, td.extensionData) {
269 270 271
		t.Fatal("did not receive correct extension request data")
	}

272
	if !reflect.DeepEqual(receivedResponseData, td.extensionResponseData) {
273 274
		t.Fatal("did not receive correct extension response data")
	}
275
}
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292

// TestRoundTripLargeBlocksSlowNetwork test verifies graphsync continues to work
// under a specific of adverse conditions:
// -- large blocks being returned by a query
// -- slow network connection
// It verifies that Graphsync will properly break up network message packets
// so they can still be decoded on the client side, instead of building up a huge
// backlog of blocks and then sending them in one giant network packet that can't
// be decoded on the client side
func TestRoundTripLargeBlocksSlowNetwork(t *testing.T) {
	// create network
	if testing.Short() {
		t.Skip()
	}
	ctx := context.Background()
	ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
	defer cancel()
293 294 295 296 297
	td := newGsTestData(ctx, t)
	td.mn.SetLinkDefaults(mocknet.LinkOptions{Latency: 100 * time.Millisecond, Bandwidth: 3000000})

	// initialize graphsync on first node to make requests
	requestor := td.GraphSyncHost1()
298

299 300
	// setup receiving peer to just record message coming in
	blockChainLength := 40
301
	blockChain := testutil.SetupBlockChain(ctx, t, td.loader1, td.storer2, 200000, blockChainLength)
302 303 304 305

	// initialize graphsync on second node to response to requests
	td.GraphSyncHost2()

306
	progressChan, errChan := requestor.Request(ctx, td.host2.ID(), blockChain.TipLink, blockChain.Selector())
307 308 309 310 311 312 313 314 315 316 317 318

	responses := testutil.CollectResponses(ctx, t, progressChan)
	errs := testutil.CollectErrors(ctx, t, errChan)

	if len(responses) != blockChainLength*2 {
		t.Fatal("did not traverse all nodes")
	}
	if len(errs) != 0 {
		t.Fatal("errors during traverse")
	}
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
// What this test does:
// - Construct a blockstore + dag service
// - Import a file to UnixFS v1
// - setup a graphsync request from one node to the other
// for the file
// - Load the file from the new block store on the other node
// using the
// existing UnixFS v1 file reader
// - Verify the bytes match the original
func TestUnixFSFetch(t *testing.T) {
	if testing.Short() {
		t.Skip()
	}

	const unixfsChunkSize uint64 = 1 << 10
	const unixfsLinksPerLevel = 1024

	ctx := context.Background()
	ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
	defer cancel()

	makeLoader := func(bs bstore.Blockstore) ipld.Loader {
		return func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) {
			c, ok := lnk.(cidlink.Link)
			if !ok {
				return nil, errors.New("Incorrect Link Type")
			}
			// read block from one store
			block, err := bs.Get(c.Cid)
			if err != nil {
				return nil, err
			}
			return bytes.NewReader(block.RawData()), nil
		}
	}

	makeStorer := func(bs bstore.Blockstore) ipld.Storer {
		return func(lnkCtx ipld.LinkContext) (io.Writer, ipld.StoreCommitter, error) {
			var buf bytes.Buffer
			var committer ipld.StoreCommitter = func(lnk ipld.Link) error {
				c, ok := lnk.(cidlink.Link)
				if !ok {
					return errors.New("Incorrect Link Type")
				}
				block, err := blocks.NewBlockWithCid(buf.Bytes(), c.Cid)
				if err != nil {
					return err
				}
				return bs.Put(block)
			}
			return &buf, committer, nil
		}
	}
	// make a blockstore and dag service
	bs1 := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore()))

	// make a second blockstore
	bs2 := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore()))
	dagService2 := merkledag.NewDAGService(blockservice.New(bs2, offline.Exchange(bs2)))

	// read in a fixture file
	path, err := filepath.Abs(filepath.Join("fixtures", "lorem.txt"))
	if err != nil {
		t.Fatal("unable to create path for fixture file")
	}

	f, err := os.Open(path)
	if err != nil {
		t.Fatal("unable to open fixture file")
	}
	var buf bytes.Buffer
	tr := io.TeeReader(f, &buf)
	file := files.NewReaderFile(tr)

	// import to UnixFS
	bufferedDS := ipldformat.NewBufferedDAG(ctx, dagService2)

	params := ihelper.DagBuilderParams{
		Maxlinks:   unixfsLinksPerLevel,
		RawLeaves:  true,
		CidBuilder: nil,
		Dagserv:    bufferedDS,
	}

	db, err := params.New(chunker.NewSizeSplitter(file, int64(unixfsChunkSize)))
	if err != nil {
		t.Fatal("unable to setup dag builder")
	}
	nd, err := balanced.Layout(db)
	if err != nil {
		t.Fatal("unable to create unix fs node")
	}
	err = bufferedDS.Commit()
	if err != nil {
		t.Fatal("unable to commit unix fs node")
	}

	// save the original files bytes
	origBytes := buf.Bytes()

	// setup an IPLD loader/storer for blockstore 1
	loader1 := makeLoader(bs1)
	storer1 := makeStorer(bs1)

	// setup an IPLD loader/storer for blockstore 2
	loader2 := makeLoader(bs2)
	storer2 := makeStorer(bs2)

	td := newGsTestData(ctx, t)
428 429
	requestor := New(ctx, td.gsnet1, loader1, storer1)
	responder := New(ctx, td.gsnet2, loader2, storer2)
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	extensionName := graphsync.ExtensionName("Free for all")
	responder.RegisterRequestReceivedHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.RequestReceivedHookActions) {
		hookActions.ValidateRequest()
		hookActions.SendExtensionData(graphsync.ExtensionData{
			Name: extensionName,
			Data: nil,
		})
	})
	// make a go-ipld-prime link for the root UnixFS node
	clink := cidlink.Link{Cid: nd.Cid()}

	// create a selector for the whole UnixFS dag
	ssb := builder.NewSelectorSpecBuilder(ipldfree.NodeBuilder())

	allSelector := ssb.ExploreRecursive(ipldselector.RecursionLimitNone(),
		ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node()

	// execute the traversal
	progressChan, errChan := requestor.Request(ctx, td.host2.ID(), clink, allSelector,
		graphsync.ExtensionData{
			Name: extensionName,
			Data: nil,
		})

	_ = testutil.CollectResponses(ctx, t, progressChan)
	responseErrors := testutil.CollectErrors(ctx, t, errChan)

	// verify traversal was successful
	if len(responseErrors) != 0 {
		t.Fatal("Response should be successful but wasn't")
	}

	// setup a DagService for the second block store
	dagService1 := merkledag.NewDAGService(blockservice.New(bs1, offline.Exchange(bs1)))

	// load the root of the UnixFS DAG from the new blockstore
	otherNode, err := dagService1.Get(ctx, nd.Cid())
	if err != nil {
		t.Fatal("should have been able to read received root node but didn't")
	}

	// Setup a UnixFS file reader
	n, err := unixfile.NewUnixfsFile(ctx, dagService1, otherNode)
	if err != nil {
		t.Fatal("should have been able to setup UnixFS file but wasn't")
	}

	fn, ok := n.(files.File)
	if !ok {
		t.Fatal("file should be a regular file, but wasn't")
	}

	// Read the bytes for the UnixFS File
	finalBytes, err := ioutil.ReadAll(fn)
	if err != nil {
		t.Fatal("should have been able to read all of unix FS file but wasn't")
	}

	// verify original bytes match final bytes!
	if !reflect.DeepEqual(origBytes, finalBytes) {
		t.Fatal("should have gotten same bytes written as read but didn't")
	}

}

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
type gsTestData struct {
	mn                       mocknet.Mocknet
	ctx                      context.Context
	host1                    host.Host
	host2                    host.Host
	gsnet1                   gsnet.GraphSyncNetwork
	gsnet2                   gsnet.GraphSyncNetwork
	blockStore1, blockStore2 map[ipld.Link][]byte
	loader1, loader2         ipld.Loader
	storer1, storer2         ipld.Storer
	extensionData            []byte
	extensionName            graphsync.ExtensionName
	extension                graphsync.ExtensionData
	extensionResponseData    []byte
	extensionResponse        graphsync.ExtensionData
}

func newGsTestData(ctx context.Context, t *testing.T) *gsTestData {
	td := &gsTestData{ctx: ctx}
	td.mn = mocknet.New(ctx)
	var err error
516
	// setup network
517
	td.host1, err = td.mn.GenPeer()
518 519 520
	if err != nil {
		t.Fatal("error generating host")
	}
521
	td.host2, err = td.mn.GenPeer()
522 523 524
	if err != nil {
		t.Fatal("error generating host")
	}
525
	err = td.mn.LinkAll()
526 527 528 529
	if err != nil {
		t.Fatal("error linking hosts")
	}

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
	td.gsnet1 = gsnet.NewFromLibp2pHost(td.host1)
	td.gsnet2 = gsnet.NewFromLibp2pHost(td.host2)
	td.blockStore1 = make(map[ipld.Link][]byte)
	td.loader1, td.storer1 = testbridge.NewMockStore(td.blockStore1)
	td.blockStore2 = make(map[ipld.Link][]byte)
	td.loader2, td.storer2 = testbridge.NewMockStore(td.blockStore2)
	// setup extension handlers
	td.extensionData = testutil.RandomBytes(100)
	td.extensionName = graphsync.ExtensionName("AppleSauce/McGee")
	td.extension = graphsync.ExtensionData{
		Name: td.extensionName,
		Data: td.extensionData,
	}
	td.extensionResponseData = testutil.RandomBytes(100)
	td.extensionResponse = graphsync.ExtensionData{
		Name: td.extensionName,
		Data: td.extensionResponseData,
	}
548

549 550
	return td
}
551

552
func (td *gsTestData) GraphSyncHost1() graphsync.GraphExchange {
553
	return New(td.ctx, td.gsnet1, td.loader1, td.storer1)
554
}
555

556
func (td *gsTestData) GraphSyncHost2() graphsync.GraphExchange {
557

558
	return New(td.ctx, td.gsnet2, td.loader2, td.storer2)
559
}
560

561 562 563 564
type receivedMessage struct {
	message gsmsg.GraphSyncMessage
	sender  peer.ID
}
565

566 567 568 569
// Receiver is an interface for receiving messages from the GraphSyncNetwork.
type receiver struct {
	messageReceived chan receivedMessage
}
570

571 572 573 574
func (r *receiver) ReceiveMessage(
	ctx context.Context,
	sender peer.ID,
	incoming gsmsg.GraphSyncMessage) {
575

576 577 578 579 580
	select {
	case <-ctx.Done():
	case r.messageReceived <- receivedMessage{incoming, sender}:
	}
}
581

582 583
func (r *receiver) ReceiveError(err error) {
}
584

585 586 587 588 589
func (r *receiver) Connected(p peer.ID) {
}

func (r *receiver) Disconnected(p peer.ID) {
}