workers.go 5.13 KB
Newer Older
1 2 3
package bitswap

import (
4
	"context"
dirkmc's avatar
dirkmc committed
5
	"fmt"
6

7
	engine "github.com/ipfs/go-bitswap/internal/decision"
dirkmc's avatar
dirkmc committed
8
	pb "github.com/ipfs/go-bitswap/message/pb"
Jeromy's avatar
Jeromy committed
9 10 11
	cid "github.com/ipfs/go-cid"
	process "github.com/jbenet/goprocess"
	procctx "github.com/jbenet/goprocess/context"
12
	"go.uber.org/zap"
13 14
)

15 16
// TaskWorkerCount is the total number of simultaneous threads sending
// outgoing messages
17
var TaskWorkerCount = 8
18

19
func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) {
20

Jeromy's avatar
Jeromy committed
21 22
	// Start up workers to handle requests from other nodes for the data on this node
	for i := 0; i < TaskWorkerCount; i++ {
23
		i := i
Jeromy's avatar
Jeromy committed
24
		px.Go(func(px process.Process) {
25
			bs.taskWorker(ctx, i)
Jeromy's avatar
Jeromy committed
26 27
		})
	}
28

29
	if bs.provideEnabled {
30 31 32 33 34 35 36 37 38 39
		// Start up a worker to manage sending out provides messages
		px.Go(func(px process.Process) {
			bs.provideCollector(ctx)
		})

		// Spawn up multiple workers to handle incoming blocks
		// consider increasing number if providing blocks bottlenecks
		// file transfers
		px.Go(bs.provideWorker)
	}
40 41
}

42
func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
Jeromy's avatar
Jeromy committed
43
	defer log.Debug("bitswap task worker shutting down...")
44
	log := log.With("ID", id)
45
	for {
46
		log.Debug("Bitswap.TaskWorker.Loop")
47 48 49 50 51 52 53
		select {
		case nextEnvelope := <-bs.engine.Outbox():
			select {
			case envelope, ok := <-nextEnvelope:
				if !ok {
					continue
				}
dirkmc's avatar
dirkmc committed
54 55

				// TODO: Only record message as sent if there was no error?
56 57 58
				// Ideally, yes. But we'd need some way to trigger a retry and/or drop
				// the peer.
				bs.engine.MessageSent(envelope.Peer, envelope.Message)
59
				bs.sendBlocks(ctx, envelope)
60 61 62 63 64 65 66 67 68
			case <-ctx.Done():
				return
			}
		case <-ctx.Done():
			return
		}
	}
}

69
func (bs *Bitswap) logOutgoingBlocks(env *engine.Envelope) {
Steven Allen's avatar
Steven Allen committed
70
	if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil {
71 72
		return
	}
dirkmc's avatar
dirkmc committed
73

Steven Allen's avatar
Steven Allen committed
74 75
	self := bs.network.Self()

dirkmc's avatar
dirkmc committed
76 77 78 79
	for _, blockPresence := range env.Message.BlockPresences() {
		c := blockPresence.Cid
		switch blockPresence.Type {
		case pb.Message_Have:
Steven Allen's avatar
Steven Allen committed
80
			log.Debugw("sent message",
81 82
				"type", "HAVE",
				"cid", c,
Steven Allen's avatar
Steven Allen committed
83 84
				"local", self,
				"to", env.Peer,
85
			)
dirkmc's avatar
dirkmc committed
86
		case pb.Message_DontHave:
Steven Allen's avatar
Steven Allen committed
87
			log.Debugw("sent message",
88 89
				"type", "DONT_HAVE",
				"cid", c,
Steven Allen's avatar
Steven Allen committed
90 91
				"local", self,
				"to", env.Peer,
92
			)
dirkmc's avatar
dirkmc committed
93 94 95 96 97
		default:
			panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type))
		}

	}
98
	for _, block := range env.Message.Blocks() {
Steven Allen's avatar
Steven Allen committed
99
		log.Debugw("sent message",
100 101
			"type", "BLOCK",
			"cid", block.Cid(),
Steven Allen's avatar
Steven Allen committed
102 103
			"local", self,
			"to", env.Peer,
104
		)
105
	}
106
}
107

108 109 110 111 112 113
func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) {
	// Blocks need to be sent synchronously to maintain proper backpressure
	// throughout the network stack
	defer env.Sent()

	err := bs.network.SendMessage(ctx, env.Peer, env.Message)
114
	if err != nil {
115 116 117 118 119 120 121
		log.Debugw("failed to send blocks message",
			"peer", env.Peer,
			"error", err,
		)
		return
	}

Steven Allen's avatar
Steven Allen committed
122 123
	bs.logOutgoingBlocks(env)

124 125 126 127
	dataSent := 0
	blocks := env.Message.Blocks()
	for _, b := range blocks {
		dataSent += len(b.RawData())
128
	}
129 130 131 132 133 134
	bs.counterLk.Lock()
	bs.counters.blocksSent += uint64(len(blocks))
	bs.counters.dataSent += uint64(dataSent)
	bs.counterLk.Unlock()
	bs.sentHistogram.Observe(float64(env.Message.Size()))
	log.Debugw("sent message", "peer", env.Peer)
135 136
}

137
func (bs *Bitswap) provideWorker(px process.Process) {
138 139 140 141 142 143 144 145 146
	// FIXME: OnClosingContext returns a _custom_ context type.
	// Unfortunately, deriving a new cancelable context from this custom
	// type fires off a goroutine. To work around this, we create a single
	// cancelable context up-front and derive all sub-contexts from that.
	//
	// See: https://github.com/ipfs/go-ipfs/issues/5810
	ctx := procctx.OnClosingContext(px)
	ctx, cancel := context.WithCancel(ctx)
	defer cancel()
147

148
	limit := make(chan struct{}, provideWorkerMax)
149

150
	limitedGoProvide := func(k cid.Cid, wid int) {
151 152 153 154
		defer func() {
			// replace token when done
			<-limit
		}()
155

156 157
		log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k)
		defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k)
158

159 160
		ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
		defer cancel()
161

162
		if err := bs.network.Provide(ctx, k); err != nil {
163
			log.Warn(err)
164
		}
165
	}
166 167 168

	// worker spawner, reads from bs.provideKeys until it closes, spawning a
	// _ratelimited_ number of workers to handle each key.
169
	for wid := 2; ; wid++ {
170
		log.Debug("Bitswap.ProvideWorker.Loop")
171

172 173 174 175 176 177 178 179
		select {
		case <-px.Closing():
			return
		case k, ok := <-bs.provideKeys:
			if !ok {
				log.Debug("provideKeys channel closed")
				return
			}
180 181 182
			select {
			case <-px.Closing():
				return
183 184
			case limit <- struct{}{}:
				go limitedGoProvide(k, wid)
185 186
			}
		}
187
	}
188 189
}

190 191
func (bs *Bitswap) provideCollector(ctx context.Context) {
	defer close(bs.provideKeys)
192 193 194
	var toProvide []cid.Cid
	var nextKey cid.Cid
	var keysOut chan cid.Cid
195 196 197

	for {
		select {
198
		case blkey, ok := <-bs.newBlocks:
199 200 201 202
			if !ok {
				log.Debug("newBlocks channel closed")
				return
			}
203

Jeromy's avatar
Jeromy committed
204
			if keysOut == nil {
205
				nextKey = blkey
Jeromy's avatar
Jeromy committed
206 207
				keysOut = bs.provideKeys
			} else {
208
				toProvide = append(toProvide, blkey)
Jeromy's avatar
Jeromy committed
209 210
			}
		case keysOut <- nextKey:
Jeromy's avatar
Jeromy committed
211 212 213
			if len(toProvide) > 0 {
				nextKey = toProvide[0]
				toProvide = toProvide[1:]
214
			} else {
Jeromy's avatar
Jeromy committed
215
				keysOut = nil
216 217 218 219 220 221
			}
		case <-ctx.Done():
			return
		}
	}
}