bitswap.go 13.8 KB
Newer Older
1
// package bitswap implements the IPFS exchange interface with the BitSwap
Brian Tiger Chow's avatar
Brian Tiger Chow committed
2
// bilateral exchange protocol.
3 4 5
package bitswap

import (
6
	"context"
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
7
	"errors"
8
	"sync"
Jeromy's avatar
Jeromy committed
9
	"sync/atomic"
Jeromy's avatar
Jeromy committed
10 11
	"time"

12 13
	bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter"

Jeromy's avatar
Jeromy committed
14
	decision "github.com/ipfs/go-bitswap/decision"
15
	bsgetter "github.com/ipfs/go-bitswap/getter"
Jeromy's avatar
Jeromy committed
16
	bsmsg "github.com/ipfs/go-bitswap/message"
17
	bsmq "github.com/ipfs/go-bitswap/messagequeue"
Jeromy's avatar
Jeromy committed
18 19
	bsnet "github.com/ipfs/go-bitswap/network"
	notifications "github.com/ipfs/go-bitswap/notifications"
20
	bspm "github.com/ipfs/go-bitswap/peermanager"
21
	bspqm "github.com/ipfs/go-bitswap/providerquerymanager"
22
	bssession "github.com/ipfs/go-bitswap/session"
23
	bssm "github.com/ipfs/go-bitswap/sessionmanager"
24
	bsspm "github.com/ipfs/go-bitswap/sessionpeermanager"
25
	bswm "github.com/ipfs/go-bitswap/wantmanager"
Jeromy's avatar
Jeromy committed
26 27 28 29 30 31 32 33 34 35 36
	blocks "github.com/ipfs/go-block-format"
	cid "github.com/ipfs/go-cid"
	blockstore "github.com/ipfs/go-ipfs-blockstore"
	delay "github.com/ipfs/go-ipfs-delay"
	exchange "github.com/ipfs/go-ipfs-exchange-interface"
	flags "github.com/ipfs/go-ipfs-flags"
	logging "github.com/ipfs/go-log"
	metrics "github.com/ipfs/go-metrics-interface"
	process "github.com/jbenet/goprocess"
	procctx "github.com/jbenet/goprocess/context"
	peer "github.com/libp2p/go-libp2p-peer"
37 38
)

Jeromy's avatar
Jeromy committed
39
var log = logging.Logger("bitswap")
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
40

41 42
var _ exchange.SessionExchange = (*Bitswap)(nil)

Brian Tiger Chow's avatar
Brian Tiger Chow committed
43
const (
Brian Tiger Chow's avatar
Brian Tiger Chow committed
44 45 46
	// maxProvidersPerRequest specifies the maximum number of providers desired
	// from the network. This value is specified because the network streams
	// results.
Brian Tiger Chow's avatar
Brian Tiger Chow committed
47 48
	// TODO: if a 'non-nice' strategy is implemented, consider increasing this value
	maxProvidersPerRequest = 3
Steven Allen's avatar
Steven Allen committed
49
	findProviderDelay      = 1 * time.Second
Brian Tiger Chow's avatar
Brian Tiger Chow committed
50
	providerRequestTimeout = time.Second * 10
51 52
	provideTimeout         = time.Second * 15
	sizeBatchRequestChan   = 32
Jeromy's avatar
Jeromy committed
53
)
54

Jeromy's avatar
Jeromy committed
55
var (
56 57 58
	HasBlockBufferSize    = 256
	provideKeysBufferSize = 2048
	provideWorkerMax      = 512
59 60 61

	// the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size
	metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22}
Brian Tiger Chow's avatar
Brian Tiger Chow committed
62
)
Jeromy's avatar
Jeromy committed
63

Jeromy's avatar
Jeromy committed
64 65 66 67 68 69 70 71
func init() {
	if flags.LowMemMode {
		HasBlockBufferSize = 64
		provideKeysBufferSize = 512
		provideWorkerMax = 16
	}
}

72
var rebroadcastDelay = delay.Fixed(time.Minute)
73

Brian Tiger Chow's avatar
Brian Tiger Chow committed
74 75 76 77
// New initializes a BitSwap instance that communicates over the provided
// BitSwapNetwork. This function registers the returned instance as the network
// delegate.
// Runs until context is cancelled.
Łukasz Magiera's avatar
Łukasz Magiera committed
78 79
func New(parent context.Context, network bsnet.BitSwapNetwork,
	bstore blockstore.Blockstore) exchange.Interface {
80

81 82
	// important to use provided parent context (since it may include important
	// loggable data). It's probably not a good idea to allow bitswap to be
83
	// coupled to the concerns of the ipfs daemon in this way.
84 85 86 87
	//
	// FIXME(btc) Now that bitswap manages itself using a process, it probably
	// shouldn't accept a context anymore. Clients should probably use Close()
	// exclusively. We should probably find another way to share logging data
88
	ctx, cancelFunc := context.WithCancel(parent)
89
	ctx = metrics.CtxSubScope(ctx, "bitswap")
90
	dupHist := metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate"+
91
		" data blocks recived").Histogram(metricsBuckets)
92
	allHist := metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all"+
93
		" data blocks recived").Histogram(metricsBuckets)
94

95 96 97
	sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+
		" this bitswap").Histogram(metricsBuckets)

98
	notif := notifications.New()
99 100 101 102 103
	px := process.WithTeardown(func() error {
		notif.Shutdown()
		return nil
	})

104 105 106 107
	peerQueueFactory := func(p peer.ID) bspm.PeerQueue {
		return bsmq.New(p, network)
	}

108
	wm := bswm.New(ctx)
109 110
	pqm := bspqm.New(ctx, network)

111 112
	sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) bssm.Session {
		return bssession.New(ctx, id, wm, pm, srs)
113 114
	}
	sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager {
115
		return bsspm.New(ctx, id, network.ConnectionManager(), pqm)
116
	}
117 118 119
	sessionRequestSplitterFactory := func(ctx context.Context) bssession.RequestSplitter {
		return bssrs.New(ctx)
	}
120

121
	bs := &Bitswap{
122
		blockstore:    bstore,
123
		notifications: notif,
124
		engine:        decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method
125
		network:       network,
126
		findKeys:      make(chan *blockRequest, sizeBatchRequestChan),
127
		process:       px,
128 129
		newBlocks:     make(chan cid.Cid, HasBlockBufferSize),
		provideKeys:   make(chan cid.Cid, provideKeysBufferSize),
130
		wm:            wm,
131
		pqm:           pqm,
132
		pm:            bspm.New(ctx, peerQueueFactory),
133
		sm:            bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory),
134
		counters:      new(counters),
135 136 137
		dupMetric:     dupHist,
		allMetric:     allHist,
		sentHistogram: sentHistogram,
138
	}
139 140 141 142

	bs.wm.SetDelegate(bs.pm)
	bs.pm.Startup()
	bs.wm.Startup()
143
	bs.pqm.Startup()
Brian Tiger Chow's avatar
Brian Tiger Chow committed
144
	network.SetDelegate(bs)
145

146 147
	// Start up bitswaps async worker routines
	bs.startWorkers(px, ctx)
148 149 150 151 152 153 154 155 156

	// bind the context and process.
	// do it over here to avoid closing before all setup is done.
	go func() {
		<-px.Closing() // process closes first
		cancelFunc()
	}()
	procctx.CloseAfterContext(px, ctx) // parent cancelled first

157 158 159
	return bs
}

160 161
// Bitswap instances implement the bitswap protocol.
type Bitswap struct {
162 163
	// the peermanager manages sending messages to peers in a way that
	// wont block bitswap operation
164 165 166
	pm *bspm.PeerManager

	// the wantlist tracks global wants for bitswap
167
	wm *bswm.WantManager
168

169 170 171
	// the provider query manager manages requests to find providers
	pqm *bspqm.ProviderQueryManager

172 173
	// the engine is the bit of logic that decides who to send which blocks to
	engine *decision.Engine
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
174

175 176
	// network delivers messages on behalf of the session
	network bsnet.BitSwapNetwork
177 178 179 180 181

	// blockstore is the local database
	// NB: ensure threadsafety
	blockstore blockstore.Blockstore

182 183
	// notifications engine for receiving new blocks and routing them to the
	// appropriate user requests
184 185
	notifications notifications.PubSub

186
	// findKeys sends keys to a worker to find and connect to providers for them
187
	findKeys chan *blockRequest
188 189 190
	// newBlocks is a channel for newly added blocks to be provided to the
	// network.  blocks pushed down this channel get buffered and fed to the
	// provideKeys channel later on to avoid too much network activity
191
	newBlocks chan cid.Cid
192
	// provideKeys directly feeds provide workers
193
	provideKeys chan cid.Cid
194

195 196 197
	process process.Process

	// Counters for various statistics
198 199
	counterLk sync.Mutex
	counters  *counters
200 201

	// Metrics interface metrics
202 203 204
	dupMetric     metrics.Histogram
	allMetric     metrics.Histogram
	sentHistogram metrics.Histogram
Jeromy's avatar
Jeromy committed
205

206 207
	// the sessionmanager manages tracking sessions
	sm *bssm.SessionManager
208 209
}

210 211 212 213 214 215 216 217 218 219
type counters struct {
	blocksRecvd    uint64
	dupBlocksRecvd uint64
	dupDataRecvd   uint64
	blocksSent     uint64
	dataSent       uint64
	dataRecvd      uint64
	messagesRecvd  uint64
}

220
type blockRequest struct {
221
	Cid cid.Cid
222
	Ctx context.Context
223 224
}

225
// GetBlock attempts to retrieve a particular block from peers within the
226
// deadline enforced by the context.
227
func (bs *Bitswap) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) {
228
	return bsgetter.SyncGetBlock(parent, k, bs.GetBlocks)
229 230
}

231 232
func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid {
	var out []cid.Cid
233
	for _, e := range bs.engine.WantlistForPeer(p) {
234
		out = append(out, e.Cid)
235 236 237 238
	}
	return out
}

239 240 241 242
func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt {
	return bs.engine.LedgerForPeer(p)
}

243 244 245 246 247 248 249
// GetBlocks returns a channel where the caller may receive blocks that
// correspond to the provided |keys|. Returns an error if BitSwap is unable to
// begin this request within the deadline enforced by the context.
//
// NB: Your request remains open until the context expires. To conserve
// resources, provide a context with a reasonably short deadline (ie. not one
// that lasts throughout the lifetime of the server)
250
func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) {
251
	if len(keys) == 0 {
252
		out := make(chan blocks.Block)
253 254 255 256
		close(out)
		return out, nil
	}

257 258 259 260 261
	select {
	case <-bs.process.Closing():
		return nil, errors.New("bitswap is closed")
	default:
	}
262
	promise := bs.notifications.Subscribe(ctx, keys...)
263

264
	for _, k := range keys {
265
		log.Event(ctx, "Bitswap.GetBlockRequest.Start", k)
266 267
	}

268
	mses := bs.sm.GetNextSessionID()
Jeromy's avatar
Jeromy committed
269 270

	bs.wm.WantBlocks(ctx, keys, nil, mses)
271

272
	remaining := cid.NewSet()
273
	for _, k := range keys {
274
		remaining.Add(k)
275 276 277 278 279 280 281 282
	}

	out := make(chan blocks.Block)
	go func() {
		ctx, cancel := context.WithCancel(ctx)
		defer cancel()
		defer close(out)
		defer func() {
283
			// can't just defer this call on its own, arguments are resolved *when* the defer is created
Jeromy's avatar
Jeromy committed
284
			bs.CancelWants(remaining.Keys(), mses)
285
		}()
Steven Allen's avatar
Steven Allen committed
286 287 288 289 290 291 292 293 294 295 296
		findProvsDelay := time.NewTimer(findProviderDelay)
		defer findProvsDelay.Stop()

		findProvsDelayCh := findProvsDelay.C
		req := &blockRequest{
			Cid: keys[0],
			Ctx: ctx,
		}

		var findProvsReqCh chan<- *blockRequest

297 298
		for {
			select {
Steven Allen's avatar
Steven Allen committed
299 300 301 302 303 304 305 306
			case <-findProvsDelayCh:
				// NB: Optimization. Assumes that providers of key[0] are likely to
				// be able to provide for all keys. This currently holds true in most
				// every situation. Later, this assumption may not hold as true.
				findProvsReqCh = bs.findKeys
				findProvsDelayCh = nil
			case findProvsReqCh <- req:
				findProvsReqCh = nil
307 308 309 310 311
			case blk, ok := <-promise:
				if !ok {
					return
				}

Steven Allen's avatar
Steven Allen committed
312 313 314 315 316
				// No need to find providers now.
				findProvsDelay.Stop()
				findProvsDelayCh = nil
				findProvsReqCh = nil

317
				bs.CancelWants([]cid.Cid{blk.Cid()}, mses)
318
				remaining.Remove(blk.Cid())
319 320 321 322 323 324 325 326 327 328 329
				select {
				case out <- blk:
				case <-ctx.Done():
					return
				}
			case <-ctx.Done():
				return
			}
		}
	}()

Steven Allen's avatar
Steven Allen committed
330
	return out, nil
Jeromy's avatar
Jeromy committed
331 332
}

333
// CancelWants removes a given key from the wantlist.
334
func (bs *Bitswap) CancelWants(cids []cid.Cid, ses uint64) {
335 336 337
	if len(cids) == 0 {
		return
	}
Jeromy's avatar
Jeromy committed
338
	bs.wm.CancelWants(context.Background(), cids, nil, ses)
339 340
}

Łukasz Magiera's avatar
Łukasz Magiera committed
341
// HasBlock announces the existence of a block to this bitswap service. The
342
// service will potentially notify its peers.
343
func (bs *Bitswap) HasBlock(blk blocks.Block) error {
344 345 346 347 348 349 350 351
	return bs.receiveBlockFrom(blk, "")
}

// TODO: Some of this stuff really only needs to be done when adding a block
// from the user, not when receiving it from the network.
// In case you run `git blame` on this comment, I'll save you some time: ask
// @whyrusleeping, I don't know the answers you seek.
func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error {
352 353 354 355 356
	select {
	case <-bs.process.Closing():
		return errors.New("bitswap is closed")
	default:
	}
357

358
	err := bs.blockstore.Put(blk)
359 360
	if err != nil {
		log.Errorf("Error writing block to datastore: %s", err)
361 362
		return err
	}
363

364 365 366 367 368
	// NOTE: There exists the possiblity for a race condition here.  If a user
	// creates a node, then adds it to the dagservice while another goroutine
	// is waiting on a GetBlock for that object, they will receive a reference
	// to the same node. We should address this soon, but i'm not going to do
	// it now as it requires more thought and isnt causing immediate problems.
Jeromy's avatar
Jeromy committed
369 370
	bs.notifications.Publish(blk)

371
	bs.sm.ReceiveBlockFrom(from, blk)
372

373 374
	bs.engine.AddBlock(blk)

375
	select {
376
	case bs.newBlocks <- blk.Cid():
377
		// send block off to be reprovided
378 379
	case <-bs.process.Closing():
		return bs.process.Close()
380 381
	}
	return nil
382 383
}

384
func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) {
385
	atomic.AddUint64(&bs.counters.messagesRecvd, 1)
Jeromy's avatar
Jeromy committed
386

Jeromy's avatar
Jeromy committed
387 388
	// This call records changes to wantlists, blocks received,
	// and number of bytes transfered.
389
	bs.engine.MessageReceived(p, incoming)
Jeromy's avatar
Jeromy committed
390 391
	// TODO: this is bad, and could be easily abused.
	// Should only track *useful* messages in ledger
392

393 394 395
	iblocks := incoming.Blocks()

	if len(iblocks) == 0 {
396 397 398
		return
	}

Jeromy's avatar
Jeromy committed
399 400
	wg := sync.WaitGroup{}
	for _, block := range iblocks {
401

Jeromy's avatar
Jeromy committed
402
		wg.Add(1)
403
		go func(b blocks.Block) { // TODO: this probably doesnt need to be a goroutine...
Jeromy's avatar
Jeromy committed
404
			defer wg.Done()
405

406
			bs.updateReceiveCounters(b)
407
			bs.sm.UpdateReceiveCounters(b)
408
			log.Debugf("got block %s from %s", b, p)
409

410
			// skip received blocks that are not in the wantlist
411
			if !bs.wm.IsWanted(b.Cid()) {
412 413 414
				return
			}

415 416
			if err := bs.receiveBlockFrom(b, p); err != nil {
				log.Warningf("ReceiveMessage recvBlockFrom error: %s", err)
Jeromy's avatar
Jeromy committed
417
			}
418
			log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid())
Jeromy's avatar
Jeromy committed
419
		}(block)
420
	}
Jeromy's avatar
Jeromy committed
421
	wg.Wait()
422 423
}

424 425
var ErrAlreadyHaveBlock = errors.New("already have block")

426
func (bs *Bitswap) updateReceiveCounters(b blocks.Block) {
427
	blkLen := len(b.RawData())
428
	has, err := bs.blockstore.Has(b.Cid())
429 430
	if err != nil {
		log.Infof("blockstore.Has error: %s", err)
431
		return
432
	}
433 434 435

	bs.allMetric.Observe(float64(blkLen))
	if has {
436
		bs.dupMetric.Observe(float64(blkLen))
437 438
	}

439 440
	bs.counterLk.Lock()
	defer bs.counterLk.Unlock()
441
	c := bs.counters
442

443 444
	c.blocksRecvd++
	c.dataRecvd += uint64(len(b.RawData()))
445
	if has {
446 447
		c.dupBlocksRecvd++
		c.dupDataRecvd += uint64(blkLen)
448 449 450
	}
}

451
// Connected/Disconnected warns bitswap about peer connections.
452
func (bs *Bitswap) PeerConnected(p peer.ID) {
453 454
	initialWants := bs.wm.CurrentBroadcastWants()
	bs.pm.Connected(p, initialWants)
455
	bs.engine.PeerConnected(p)
456 457
}

458
// Connected/Disconnected warns bitswap about peer connections.
459
func (bs *Bitswap) PeerDisconnected(p peer.ID) {
460
	bs.pm.Disconnected(p)
461
	bs.engine.PeerDisconnected(p)
462 463
}

464
func (bs *Bitswap) ReceiveError(err error) {
465
	log.Infof("Bitswap ReceiveError: %s", err)
466 467
	// TODO log the network error
	// TODO bubble the network error up to the parent context/error logger
468 469
}

470
func (bs *Bitswap) Close() error {
471
	return bs.process.Close()
472
}
473

474
func (bs *Bitswap) GetWantlist() []cid.Cid {
475
	entries := bs.wm.CurrentWants()
476
	out := make([]cid.Cid, 0, len(entries))
477
	for _, e := range entries {
478
		out = append(out, e.Cid)
479 480 481
	}
	return out
}
482 483 484 485

func (bs *Bitswap) IsOnline() bool {
	return true
}
486 487 488 489

func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher {
	return bs.sm.NewSession(ctx)
}