engine.go 7.73 KB
Newer Older
Jeromy's avatar
Jeromy committed
1
// package decision implements the decision engine for the bitswap service.
2
package decision
3 4 5 6

import (
	"sync"

7 8 9 10 11 12
	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
	bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
	bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
	wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
	peer "github.com/ipfs/go-ipfs/p2p/peer"
	eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog"
13 14
)

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
// TODO consider taking responsibility for other types of requests. For
// example, there could be a |cancelQueue| for all of the cancellation
// messages that need to go out. There could also be a |wantlistQueue| for
// the local peer's wantlists. Alternatively, these could all be bundled
// into a single, intelligent global queue that efficiently
// batches/combines and takes all of these into consideration.
//
// Right now, messages go onto the network for four reasons:
// 1. an initial `sendwantlist` message to a provider of the first key in a request
// 2. a periodic full sweep of `sendwantlist` messages to all providers
// 3. upon receipt of blocks, a `cancel` message to all peers
// 4. draining the priority queue of `blockrequests` from peers
//
// Presently, only `blockrequests` are handled by the decision engine.
// However, there is an opportunity to give it more responsibility! If the
// decision engine is given responsibility for all of the others, it can
// intelligently decide how to combine requests efficiently.
//
// Some examples of what would be possible:
//
// * when sending out the wantlists, include `cancel` requests
// * when handling `blockrequests`, include `sendwantlist` and `cancel` as appropriate
// * when handling `cancel`, if we recently received a wanted block from a
// 	 peer, include a partial wantlist that contains a few other high priority
//   blocks
//
// In a sense, if we treat the decision engine as a black box, it could do
// whatever it sees fit to produce desired outcomes (get wanted keys
// quickly, maintain good relationships with peers, etc).

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
45
var log = eventlog.Logger("engine")
46

Brian Tiger Chow's avatar
Brian Tiger Chow committed
47
const (
48 49
	// outboxChanBuffer must be 0 to prevent stale messages from being sent
	outboxChanBuffer = 0
Brian Tiger Chow's avatar
Brian Tiger Chow committed
50 51
)

52
// Envelope contains a message for a Peer
53
type Envelope struct {
54
	// Peer is the intended recipient
55
	Peer peer.ID
56
	// Message is the payload
57
	Message bsmsg.BitSwapMessage
58 59 60

	// A callback to notify the decision queue that the task is complete
	Sent func()
61 62
}

63
type Engine struct {
64 65 66
	// peerRequestQueue is a priority queue of requests received from peers.
	// Requests are popped from the queue, packaged up, and placed in the
	// outbox.
Brian Tiger Chow's avatar
Brian Tiger Chow committed
67
	peerRequestQueue peerRequestQueue
68

69 70 71 72 73
	// FIXME it's a bit odd for the client and the worker to both share memory
	// (both modify the peerRequestQueue) and also to communicate over the
	// workSignal channel. consider sending requests over the channel and
	// allowing the worker to have exclusive access to the peerRequestQueue. In
	// that case, no lock would be required.
Jeromy's avatar
Jeromy committed
74
	workSignal chan struct{}
75

76 77
	// outbox contains outgoing messages to peers. This is owned by the
	// taskWorker goroutine
Brian Tiger Chow's avatar
Brian Tiger Chow committed
78
	outbox chan (<-chan *Envelope)
79 80 81

	bs bstore.Blockstore

82
	lock sync.RWMutex // protects the fields immediatly below
83
	// ledgerMap lists Ledgers by their Partner key.
84
	ledgerMap map[peer.ID]*ledger
85 86
}

87 88
func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine {
	e := &Engine{
89
		ledgerMap:        make(map[peer.ID]*ledger),
Brian Tiger Chow's avatar
Brian Tiger Chow committed
90
		bs:               bs,
Brian Tiger Chow's avatar
Brian Tiger Chow committed
91
		peerRequestQueue: newPRQ(),
Brian Tiger Chow's avatar
Brian Tiger Chow committed
92
		outbox:           make(chan (<-chan *Envelope), outboxChanBuffer),
Brian Tiger Chow's avatar
Brian Tiger Chow committed
93
		workSignal:       make(chan struct{}),
94
	}
95 96
	go e.taskWorker(ctx)
	return e
Jeromy's avatar
Jeromy committed
97 98
}

99
func (e *Engine) taskWorker(ctx context.Context) {
100 101
	defer close(e.outbox) // because taskWorker uses the channel exclusively
	for {
Brian Tiger Chow's avatar
Brian Tiger Chow committed
102
		oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking
103 104 105 106 107 108 109 110 111 112 113 114
		select {
		case <-ctx.Done():
			return
		case e.outbox <- oneTimeUse:
		}
		// receiver is ready for an outoing envelope. let's prepare one. first,
		// we must acquire a task from the PQ...
		envelope, err := e.nextEnvelope(ctx)
		if err != nil {
			close(oneTimeUse)
			return // ctx cancelled
		}
Brian Tiger Chow's avatar
Brian Tiger Chow committed
115
		oneTimeUse <- envelope // buffered. won't block
116 117 118 119 120 121 122
		close(oneTimeUse)
	}
}

// nextEnvelope runs in the taskWorker goroutine. Returns an error if the
// context is cancelled before the next Envelope can be created.
func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {
Jeromy's avatar
Jeromy committed
123
	for {
Brian Tiger Chow's avatar
Brian Tiger Chow committed
124
		nextTask := e.peerRequestQueue.Pop()
125
		for nextTask == nil {
Jeromy's avatar
Jeromy committed
126
			select {
127
			case <-ctx.Done():
128
				return nil, ctx.Err()
129
			case <-e.workSignal:
130
				nextTask = e.peerRequestQueue.Pop()
Jeromy's avatar
Jeromy committed
131 132
			}
		}
133 134

		// with a task in hand, we're ready to prepare the envelope...
135

136
		block, err := e.bs.Get(nextTask.Entry.Key)
137
		if err != nil {
Brian Tiger Chow's avatar
Brian Tiger Chow committed
138
			continue
139
		}
140 141

		m := bsmsg.New() // TODO: maybe add keys from our wantlist?
142
		m.AddBlock(block)
143 144 145 146 147
		return &Envelope{
			Peer:    nextTask.Target,
			Message: m,
			Sent:    nextTask.Done,
		}, nil
Jeromy's avatar
Jeromy committed
148 149 150
	}
}

151
// Outbox returns a channel of one-time use Envelope channels.
Brian Tiger Chow's avatar
Brian Tiger Chow committed
152
func (e *Engine) Outbox() <-chan (<-chan *Envelope) {
153
	return e.outbox
154 155 156
}

// Returns a slice of Peers with whom the local node has active sessions
157
func (e *Engine) Peers() []peer.ID {
158 159
	e.lock.RLock()
	defer e.lock.RUnlock()
160

161
	response := make([]peer.ID, 0)
162
	for _, ledger := range e.ledgerMap {
163 164 165 166 167 168 169
		response = append(response, ledger.Partner)
	}
	return response
}

// MessageReceived performs book-keeping. Returns error if passed invalid
// arguments.
170
func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error {
171 172 173
	e.lock.Lock()
	defer e.lock.Unlock()

174
	if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 {
175
		log.Debug("received empty message from", p)
176 177
	}

178 179 180
	newWorkExists := false
	defer func() {
		if newWorkExists {
181
			e.signalNewWork()
182 183
		}
	}()
184

185
	l := e.findOrCreate(p)
186 187 188
	if m.Full() {
		l.wantList = wl.New()
	}
189

190 191
	for _, entry := range m.Wantlist() {
		if entry.Cancel {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
192
			log.Debug("cancel", entry.Key)
193
			l.CancelWant(entry.Key)
Brian Tiger Chow's avatar
Brian Tiger Chow committed
194
			e.peerRequestQueue.Remove(entry.Key, p)
195
		} else {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
196
			log.Debug("wants", entry.Key, entry.Priority)
197
			l.Wants(entry.Key, entry.Priority)
198
			if exists, err := e.bs.Has(entry.Key); err == nil && exists {
199
				e.peerRequestQueue.Push(entry.Entry, p)
Brian Tiger Chow's avatar
Brian Tiger Chow committed
200
				newWorkExists = true
201
			}
202 203
		}
	}
Jeromy's avatar
Jeromy committed
204

205
	for _, block := range m.Blocks() {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
206
		log.Debug("got block %s %d bytes", block.Key(), len(block.Data))
207
		l.ReceivedBytes(len(block.Data))
208
		for _, l := range e.ledgerMap {
209 210
			if entry, ok := l.WantListContains(block.Key()); ok {
				e.peerRequestQueue.Push(entry, l.Partner)
Brian Tiger Chow's avatar
Brian Tiger Chow committed
211
				newWorkExists = true
Jeromy's avatar
Jeromy committed
212 213
			}
		}
214 215 216 217 218 219 220 221 222 223
	}
	return nil
}

// TODO add contents of m.WantList() to my local wantlist? NB: could introduce
// race conditions where I send a message, but MessageSent gets handled after
// MessageReceived. The information in the local wantlist could become
// inconsistent. Would need to ensure that Sends and acknowledgement of the
// send happen atomically

224
func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error {
225 226
	e.lock.Lock()
	defer e.lock.Unlock()
227

228
	l := e.findOrCreate(p)
229 230 231
	for _, block := range m.Blocks() {
		l.SentBytes(len(block.Data))
		l.wantList.Remove(block.Key())
Brian Tiger Chow's avatar
Brian Tiger Chow committed
232
		e.peerRequestQueue.Remove(block.Key(), p)
233 234 235 236 237
	}

	return nil
}

238 239 240 241
func (e *Engine) PeerDisconnected(p peer.ID) {
	// TODO: release ledger
}

242
func (e *Engine) numBytesSentTo(p peer.ID) uint64 {
Brian Tiger Chow's avatar
Brian Tiger Chow committed
243
	// NB not threadsafe
244
	return e.findOrCreate(p).Accounting.BytesSent
245 246
}

247
func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 {
Brian Tiger Chow's avatar
Brian Tiger Chow committed
248
	// NB not threadsafe
249
	return e.findOrCreate(p).Accounting.BytesRecv
250 251 252
}

// ledger lazily instantiates a ledger
253 254
func (e *Engine) findOrCreate(p peer.ID) *ledger {
	l, ok := e.ledgerMap[p]
255 256
	if !ok {
		l = newLedger(p)
257
		e.ledgerMap[p] = l
258 259 260
	}
	return l
}
261 262 263 264 265 266 267 268

func (e *Engine) signalNewWork() {
	// Signal task generation to restart (if stopped!)
	select {
	case e.workSignal <- struct{}{}:
	default:
	}
}