dht.go 17 KB
Newer Older
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
1 2
package dht

3
import (
4
	"bytes"
5 6 7
	"errors"
	"sync"
	"time"
8

9
	peer "github.com/jbenet/go-ipfs/peer"
Jeromy's avatar
Jeromy committed
10
	kb "github.com/jbenet/go-ipfs/routing/kbucket"
11 12
	swarm "github.com/jbenet/go-ipfs/swarm"
	u "github.com/jbenet/go-ipfs/util"
13 14

	ma "github.com/jbenet/go-multiaddr"
Jeromy's avatar
Jeromy committed
15 16 17

	ds "github.com/jbenet/datastore.go"

18
	"code.google.com/p/goprotobuf/proto"
19 20
)

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
21 22 23 24 25
// TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js

// IpfsDHT is an implementation of Kademlia with Coral and S/Kademlia modifications.
// It is used to implement the base IpfsRouting module.
type IpfsDHT struct {
26 27
	// Array of routing tables for differently distanced nodes
	// NOTE: (currently, only a single table is used)
28
	routingTables []*kb.RoutingTable
29

30
	network swarm.Network
31

Jeromy's avatar
Jeromy committed
32 33 34 35 36
	// Local peer (yourself)
	self *peer.Peer

	// Local data
	datastore ds.Datastore
37

38
	// Map keys to peers that can provide their value
39
	providers    map[u.Key][]*providerInfo
40
	providerLock sync.RWMutex
41

42 43
	// Signal to shutdown dht
	shutdown chan struct{}
44 45 46

	// When this peer started up
	birth time.Time
47 48 49

	//lock to make diagnostics work better
	diaglock sync.Mutex
50

51
	// listener is a server to register to listen for responses to messages
52
	listener *mesListener
53 54
}

Jeromy's avatar
Jeromy committed
55
// NewDHT creates a new DHT object with the given peer as the 'local' host
Jeromy's avatar
Jeromy committed
56
func NewDHT(p *peer.Peer, net swarm.Network) *IpfsDHT {
57
	dht := new(IpfsDHT)
Jeromy's avatar
Jeromy committed
58
	dht.network = net
59 60
	dht.datastore = ds.NewMapDatastore()
	dht.self = p
61
	dht.providers = make(map[u.Key][]*providerInfo)
Jeromy's avatar
Jeromy committed
62
	dht.shutdown = make(chan struct{})
63

64 65 66 67 68
	dht.routingTables = make([]*kb.RoutingTable, 3)
	dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*30)
	dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*100)
	dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Hour)
	dht.listener = newMesListener()
69
	dht.birth = time.Now()
Jeromy's avatar
Jeromy committed
70
	return dht
71 72
}

73
// Start up background goroutines needed by the DHT
74 75 76 77
func (dht *IpfsDHT) Start() {
	go dht.handleMessages()
}

78
// Connect to a new peer at the given address, ping and add to the routing table
79
func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
80
	maddrstr, _ := addr.String()
81
	u.DOut("Connect to new peer: %s", maddrstr)
82
	npeer, err := dht.network.ConnectNew(addr)
83
	if err != nil {
84
		return nil, err
85 86
	}

Jeromy's avatar
Jeromy committed
87 88
	// Ping new peer to register in their routing table
	// NOTE: this should be done better...
89
	err = dht.Ping(npeer, time.Second*2)
Jeromy's avatar
Jeromy committed
90
	if err != nil {
Jeromy's avatar
Jeromy committed
91
		return nil, errors.New("failed to ping newly connected peer")
Jeromy's avatar
Jeromy committed
92 93
	}

94 95
	dht.Update(npeer)

96
	return npeer, nil
Jeromy's avatar
Jeromy committed
97 98
}

99 100
// Read in all messages from swarm and handle them appropriately
// NOTE: this function is just a quick sketch
101
func (dht *IpfsDHT) handleMessages() {
102
	u.DOut("Begin message handling routine")
103 104

	checkTimeouts := time.NewTicker(time.Minute * 5)
105
	ch := dht.network.GetChan()
106 107
	for {
		select {
108
		case mes, ok := <-ch.Incoming:
109 110 111 112
			if !ok {
				u.DOut("handleMessages closing, bad recv on incoming")
				return
			}
113
			pmes := new(PBDHTMessage)
114 115 116 117 118 119
			err := proto.Unmarshal(mes.Data, pmes)
			if err != nil {
				u.PErr("Failed to decode protobuf message: %s", err)
				continue
			}

120
			dht.Update(mes.Peer)
121

122
			// Note: not sure if this is the correct place for this
123
			if pmes.GetResponse() {
124
				dht.listener.Respond(pmes.GetId(), mes)
125
				continue
126
			}
127 128
			//

Jeromy's avatar
Jeromy committed
129 130
			u.DOut("[peer: %s]\nGot message type: '%s' [id = %x, from = %s]",
				dht.self.ID.Pretty(),
131
				PBDHTMessage_MessageType_name[int32(pmes.GetType())],
132
				pmes.GetId(), mes.Peer.ID.Pretty())
133
			switch pmes.GetType() {
134
			case PBDHTMessage_GET_VALUE:
135
				dht.handleGetValue(mes.Peer, pmes)
136
			case PBDHTMessage_PUT_VALUE:
Jeromy's avatar
Jeromy committed
137
				dht.handlePutValue(mes.Peer, pmes)
138
			case PBDHTMessage_FIND_NODE:
Jeromy's avatar
Jeromy committed
139
				dht.handleFindPeer(mes.Peer, pmes)
140
			case PBDHTMessage_ADD_PROVIDER:
141
				dht.handleAddProvider(mes.Peer, pmes)
142
			case PBDHTMessage_GET_PROVIDERS:
143
				dht.handleGetProviders(mes.Peer, pmes)
144
			case PBDHTMessage_PING:
145
				dht.handlePing(mes.Peer, pmes)
146
			case PBDHTMessage_DIAGNOSTIC:
147
				dht.handleDiagnostic(mes.Peer, pmes)
148 149
			}

150 151
		case err := <-ch.Errors:
			u.PErr("dht err: %s", err)
152
		case <-dht.shutdown:
153
			checkTimeouts.Stop()
154
			return
155
		case <-checkTimeouts.C:
156 157 158 159 160 161 162 163 164 165 166 167 168
			// Time to collect some garbage!
			dht.cleanExpiredProviders()
		}
	}
}

func (dht *IpfsDHT) cleanExpiredProviders() {
	dht.providerLock.Lock()
	for k, parr := range dht.providers {
		var cleaned []*providerInfo
		for _, v := range parr {
			if time.Since(v.Creation) < time.Hour {
				cleaned = append(cleaned, v)
169
			}
170
		}
171
		dht.providers[k] = cleaned
172
	}
173 174 175
	dht.providerLock.Unlock()
}

Jeromy's avatar
Jeromy committed
176
func (dht *IpfsDHT) putValueToNetwork(p *peer.Peer, key string, value []byte) error {
177
	pmes := Message{
178 179
		Type:  PBDHTMessage_PUT_VALUE,
		Key:   key,
180
		Value: value,
181
		ID:    GenerateMessageID(),
182 183 184
	}

	mes := swarm.NewMessage(p, pmes.ToProtobuf())
185
	dht.network.Send(mes)
186 187 188
	return nil
}

189
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) {
Jeromy's avatar
Jeromy committed
190
	u.DOut("handleGetValue for key: %s", pmes.GetKey())
Jeromy's avatar
Jeromy committed
191
	dskey := ds.NewKey(pmes.GetKey())
192
	resp := &Message{
Jeromy's avatar
Jeromy committed
193
		Response: true,
194
		ID:       pmes.GetId(),
Jeromy's avatar
Jeromy committed
195 196 197
		Key:      pmes.GetKey(),
	}
	iVal, err := dht.datastore.Get(dskey)
Jeromy's avatar
Jeromy committed
198
	if err == nil {
Jeromy's avatar
Jeromy committed
199
		u.DOut("handleGetValue success!")
Jeromy's avatar
Jeromy committed
200 201
		resp.Success = true
		resp.Value = iVal.([]byte)
Jeromy's avatar
Jeromy committed
202
	} else if err == ds.ErrNotFound {
Jeromy's avatar
Jeromy committed
203 204 205
		// Check if we know any providers for the requested value
		provs, ok := dht.providers[u.Key(pmes.GetKey())]
		if ok && len(provs) > 0 {
Jeromy's avatar
Jeromy committed
206
			u.DOut("handleGetValue returning %d provider[s]", len(provs))
Jeromy's avatar
Jeromy committed
207 208 209 210 211 212
			for _, prov := range provs {
				resp.Peers = append(resp.Peers, prov.Value)
			}
			resp.Success = true
		} else {
			// No providers?
213 214
			// Find closest peer on given cluster to desired key and reply with that info

215 216 217 218 219 220 221
			level := 0
			if len(pmes.GetValue()) < 1 {
				// TODO: maybe return an error? Defaulting isnt a good idea IMO
				u.PErr("handleGetValue: no routing level specified, assuming 0")
			} else {
				level = int(pmes.GetValue()[0]) // Using value field to specify cluster level
			}
Jeromy's avatar
Jeromy committed
222
			u.DOut("handleGetValue searching level %d clusters", level)
223

224
			closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
225

Jeromy's avatar
Jeromy committed
226 227 228 229 230
			if closer.ID.Equal(dht.self.ID) {
				u.DOut("Attempted to return self! this shouldnt happen...")
				resp.Peers = nil
				goto out
			}
231 232 233
			// If this peer is closer than the one from the table, return nil
			if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
				resp.Peers = nil
Jeromy's avatar
Jeromy committed
234
				u.DOut("handleGetValue could not find a closer node than myself.")
235
			} else {
Jeromy's avatar
Jeromy committed
236
				u.DOut("handleGetValue returning a closer peer: '%s'", closer.ID.Pretty())
237 238
				resp.Peers = []*peer.Peer{closer}
			}
239
		}
Jeromy's avatar
Jeromy committed
240
	} else {
241
		//temp: what other errors can a datastore return?
Jeromy's avatar
Jeromy committed
242
		panic(err)
243
	}
244

Jeromy's avatar
Jeromy committed
245
out:
246
	mes := swarm.NewMessage(p, resp.ToProtobuf())
247
	dht.network.Send(mes)
248 249
}

Jeromy's avatar
Jeromy committed
250
// Store a value in this peer local storage
251
func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *PBDHTMessage) {
Jeromy's avatar
Jeromy committed
252 253 254 255 256 257
	dskey := ds.NewKey(pmes.GetKey())
	err := dht.datastore.Put(dskey, pmes.GetValue())
	if err != nil {
		// For now, just panic, handle this better later maybe
		panic(err)
	}
258 259
}

260
func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *PBDHTMessage) {
261
	resp := Message{
262
		Type:     pmes.GetType(),
263
		Response: true,
264
		ID:       pmes.GetId(),
265
	}
266

267
	dht.network.Send(swarm.NewMessage(p, resp.ToProtobuf()))
268 269
}

270
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) {
271
	resp := Message{
272
		Type:     pmes.GetType(),
273
		ID:       pmes.GetId(),
274 275 276 277 278 279 280 281
		Response: true,
	}
	defer func() {
		mes := swarm.NewMessage(p, resp.ToProtobuf())
		dht.network.Send(mes)
	}()
	level := pmes.GetValue()[0]
	u.DOut("handleFindPeer: searching for '%s'", peer.ID(pmes.GetKey()).Pretty())
282
	closest := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
Jeromy's avatar
Jeromy committed
283
	if closest == nil {
284
		u.PErr("handleFindPeer: could not find anything.")
285
		return
Jeromy's avatar
Jeromy committed
286 287 288
	}

	if len(closest.Addresses) == 0 {
289
		u.PErr("handleFindPeer: no addresses for connected peer...")
290
		return
Jeromy's avatar
Jeromy committed
291 292
	}

293 294 295
	// If the found peer further away than this peer...
	if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) {
		return
Jeromy's avatar
Jeromy committed
296 297
	}

298 299 300 301 302 303
	u.DOut("handleFindPeer: sending back '%s'", closest.ID.Pretty())
	resp.Peers = []*peer.Peer{closest}
	resp.Success = true
}

func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) {
304
	resp := Message{
305 306
		Type:     PBDHTMessage_GET_PROVIDERS,
		Key:      pmes.GetKey(),
307
		ID:       pmes.GetId(),
308
		Response: true,
Jeromy's avatar
Jeromy committed
309 310
	}

311
	dht.providerLock.RLock()
312
	providers := dht.providers[u.Key(pmes.GetKey())]
313
	dht.providerLock.RUnlock()
314
	if providers == nil || len(providers) == 0 {
Jeromy's avatar
Jeromy committed
315 316 317 318 319
		level := 0
		if len(pmes.GetValue()) > 0 {
			level = int(pmes.GetValue()[0])
		}

320
		closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
Jeromy's avatar
Jeromy committed
321 322 323 324 325
		if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
			resp.Peers = nil
		} else {
			resp.Peers = []*peer.Peer{closer}
		}
326 327 328
	} else {
		for _, prov := range providers {
			resp.Peers = append(resp.Peers, prov.Value)
329
		}
330
		resp.Success = true
331 332 333
	}

	mes := swarm.NewMessage(p, resp.ToProtobuf())
334
	dht.network.Send(mes)
335 336
}

337 338
type providerInfo struct {
	Creation time.Time
339
	Value    *peer.Peer
340 341
}

342
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *PBDHTMessage) {
343 344
	//TODO: need to implement TTLs on providers
	key := u.Key(pmes.GetKey())
345
	dht.addProviderEntry(key, p)
346 347
}

348
// Halt stops all communications from this peer and shut down
349 350 351 352
func (dht *IpfsDHT) Halt() {
	dht.shutdown <- struct{}{}
	dht.network.Close()
}
353

354 355 356 357
func (dht *IpfsDHT) addProviderEntry(key u.Key, p *peer.Peer) {
	u.DOut("Adding %s as provider for '%s'", p.Key().Pretty(), key)
	dht.providerLock.Lock()
	provs := dht.providers[key]
358
	dht.providers[key] = append(provs, &providerInfo{time.Now(), p})
359 360
	dht.providerLock.Unlock()
}
361

Jeromy's avatar
Jeromy committed
362
// NOTE: not yet finished, low priority
363
func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *PBDHTMessage) {
364
	seq := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10)
365
	listenChan := dht.listener.Listen(pmes.GetId(), len(seq), time.Second*30)
366

367
	for _, ps := range seq {
368
		mes := swarm.NewMessage(ps, pmes)
369
		dht.network.Send(mes)
370 371 372
	}

	buf := new(bytes.Buffer)
373 374 375
	di := dht.getDiagInfo()
	buf.Write(di.Marshal())

376 377 378 379 380 381 382 383
	// NOTE: this shouldnt be a hardcoded value
	after := time.After(time.Second * 20)
	count := len(seq)
	for count > 0 {
		select {
		case <-after:
			//Timeout, return what we have
			goto out
384 385 386
		case reqResp := <-listenChan:
			pmesOut := new(PBDHTMessage)
			err := proto.Unmarshal(reqResp.Data, pmesOut)
387 388 389 390
			if err != nil {
				// It broke? eh, whatever, keep going
				continue
			}
391
			buf.Write(reqResp.Data)
392 393 394 395 396
			count--
		}
	}

out:
397
	resp := Message{
398
		Type:     PBDHTMessage_DIAGNOSTIC,
399
		ID:       pmes.GetId(),
400
		Value:    buf.Bytes(),
401 402 403 404
		Response: true,
	}

	mes := swarm.NewMessage(p, resp.ToProtobuf())
405
	dht.network.Send(mes)
406
}
407

408 409 410
func (dht *IpfsDHT) getValueOrPeers(p *peer.Peer, key u.Key, timeout time.Duration, level int) ([]byte, []*peer.Peer, error) {
	pmes, err := dht.getValueSingle(p, key, timeout, level)
	if err != nil {
411
		return nil, nil, err
412 413 414 415 416 417 418 419 420 421 422 423 424
	}

	if pmes.GetSuccess() {
		if pmes.Value == nil { // We were given provider[s]
			val, err := dht.getFromPeerList(key, timeout, pmes.GetPeers(), level)
			if err != nil {
				return nil, nil, err
			}
			return val, nil, nil
		}

		// Success! We were given the value
		return pmes.GetValue(), nil, nil
425
	}
426

427 428 429 430 431 432 433 434 435 436 437
	// We were given a closer node
	var peers []*peer.Peer
	for _, pb := range pmes.GetPeers() {
		if peer.ID(pb.GetId()).Equal(dht.self.ID) {
			continue
		}
		addr, err := ma.NewMultiaddr(pb.GetAddr())
		if err != nil {
			u.PErr(err.Error())
			continue
		}
438

439 440 441 442
		np, err := dht.network.GetConnection(peer.ID(pb.GetId()), addr)
		if err != nil {
			u.PErr(err.Error())
			continue
443
		}
444 445

		peers = append(peers, np)
446
	}
447
	return nil, peers, nil
448 449
}

450 451
// getValueSingle simply performs the get value RPC with the given parameters
func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration, level int) (*PBDHTMessage, error) {
452
	pmes := Message{
453 454 455
		Type:  PBDHTMessage_GET_VALUE,
		Key:   string(key),
		Value: []byte{byte(level)},
456
		ID:    GenerateMessageID(),
Jeromy's avatar
Jeromy committed
457
	}
458
	responseChan := dht.listener.Listen(pmes.ID, 1, time.Minute)
Jeromy's avatar
Jeromy committed
459 460

	mes := swarm.NewMessage(p, pmes.ToProtobuf())
461
	t := time.Now()
462
	dht.network.Send(mes)
Jeromy's avatar
Jeromy committed
463 464 465 466 467

	// Wait for either the response or a timeout
	timeup := time.After(timeout)
	select {
	case <-timeup:
468
		dht.listener.Unlisten(pmes.ID)
Jeromy's avatar
Jeromy committed
469
		return nil, u.ErrTimeout
470
	case resp, ok := <-responseChan:
Jeromy's avatar
Jeromy committed
471 472 473 474
		if !ok {
			u.PErr("response channel closed before timeout, please investigate.")
			return nil, u.ErrTimeout
		}
475 476
		roundtrip := time.Since(t)
		resp.Peer.SetLatency(roundtrip)
477 478
		pmesOut := new(PBDHTMessage)
		err := proto.Unmarshal(resp.Data, pmesOut)
Jeromy's avatar
Jeromy committed
479 480 481
		if err != nil {
			return nil, err
		}
482
		return pmesOut, nil
Jeromy's avatar
Jeromy committed
483 484 485
	}
}

486 487 488 489 490 491 492 493 494 495
// TODO: Im not certain on this implementation, we get a list of peers/providers
// from someone what do we do with it? Connect to each of them? randomly pick
// one to get the value from? Or just connect to one at a time until we get a
// successful connection and request the value from it?
func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration,
	peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) {
	for _, pinfo := range peerlist {
		p, _ := dht.Find(peer.ID(pinfo.GetId()))
		if p == nil {
			maddr, err := ma.NewMultiaddr(pinfo.GetAddr())
Jeromy's avatar
Jeromy committed
496 497 498 499
			if err != nil {
				u.PErr("getValue error: %s", err)
				continue
			}
500

501
			p, err = dht.network.GetConnection(peer.ID(pinfo.GetId()), maddr)
Jeromy's avatar
Jeromy committed
502 503 504 505 506
			if err != nil {
				u.PErr("getValue error: %s", err)
				continue
			}
		}
507
		pmes, err := dht.getValueSingle(p, key, timeout, level)
Jeromy's avatar
Jeromy committed
508
		if err != nil {
509
			u.DErr("getFromPeers error: %s", err)
Jeromy's avatar
Jeromy committed
510 511
			continue
		}
512
		dht.addProviderEntry(key, p)
Jeromy's avatar
Jeromy committed
513

514 515 516 517
		// Make sure it was a successful get
		if pmes.GetSuccess() && pmes.Value != nil {
			return pmes.GetValue(), nil
		}
Jeromy's avatar
Jeromy committed
518 519 520 521
	}
	return nil, u.ErrNotFound
}

522
func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) {
523
	v, err := dht.datastore.Get(ds.NewKey(string(key)))
524 525 526 527 528 529
	if err != nil {
		return nil, err
	}
	return v.([]byte), nil
}

530
func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
531 532
	return dht.datastore.Put(ds.NewKey(string(key)), value)
}
533

534
// Update TODO(chas) Document this function
535
func (dht *IpfsDHT) Update(p *peer.Peer) {
536
	for _, route := range dht.routingTables {
537 538 539 540
		removed := route.Update(p)
		// Only drop the connection if no tables refer to this peer
		if removed != nil {
			found := false
541
			for _, r := range dht.routingTables {
542 543 544 545 546 547 548 549 550
				if r.Find(removed.ID) != nil {
					found = true
					break
				}
			}
			if !found {
				dht.network.Drop(removed)
			}
		}
551 552
	}
}
Jeromy's avatar
Jeromy committed
553

554
// Find looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
Jeromy's avatar
Jeromy committed
555
func (dht *IpfsDHT) Find(id peer.ID) (*peer.Peer, *kb.RoutingTable) {
556
	for _, table := range dht.routingTables {
Jeromy's avatar
Jeromy committed
557 558 559 560 561 562 563
		p := table.Find(id)
		if p != nil {
			return p, table
		}
	}
	return nil, nil
}
564 565

func (dht *IpfsDHT) findPeerSingle(p *peer.Peer, id peer.ID, timeout time.Duration, level int) (*PBDHTMessage, error) {
566
	pmes := Message{
567 568
		Type:  PBDHTMessage_FIND_NODE,
		Key:   string(id),
569
		ID:    GenerateMessageID(),
570 571 572 573
		Value: []byte{byte(level)},
	}

	mes := swarm.NewMessage(p, pmes.ToProtobuf())
574
	listenChan := dht.listener.Listen(pmes.ID, 1, time.Minute)
575
	t := time.Now()
576 577 578 579
	dht.network.Send(mes)
	after := time.After(timeout)
	select {
	case <-after:
580
		dht.listener.Unlisten(pmes.ID)
581 582
		return nil, u.ErrTimeout
	case resp := <-listenChan:
583 584
		roundtrip := time.Since(t)
		resp.Peer.SetLatency(roundtrip)
585 586
		pmesOut := new(PBDHTMessage)
		err := proto.Unmarshal(resp.Data, pmesOut)
587 588 589 590
		if err != nil {
			return nil, err
		}

591
		return pmesOut, nil
592 593
	}
}
594

595 596
func (dht *IpfsDHT) printTables() {
	for _, route := range dht.routingTables {
597 598 599
		route.Print()
	}
}
Jeromy's avatar
Jeromy committed
600 601

func (dht *IpfsDHT) findProvidersSingle(p *peer.Peer, key u.Key, level int, timeout time.Duration) (*PBDHTMessage, error) {
602
	pmes := Message{
Jeromy's avatar
Jeromy committed
603 604
		Type:  PBDHTMessage_GET_PROVIDERS,
		Key:   string(key),
605
		ID:    GenerateMessageID(),
Jeromy's avatar
Jeromy committed
606 607 608 609 610
		Value: []byte{byte(level)},
	}

	mes := swarm.NewMessage(p, pmes.ToProtobuf())

611
	listenChan := dht.listener.Listen(pmes.ID, 1, time.Minute)
Jeromy's avatar
Jeromy committed
612 613 614 615
	dht.network.Send(mes)
	after := time.After(timeout)
	select {
	case <-after:
616
		dht.listener.Unlisten(pmes.ID)
Jeromy's avatar
Jeromy committed
617 618 619
		return nil, u.ErrTimeout
	case resp := <-listenChan:
		u.DOut("FindProviders: got response.")
620 621
		pmesOut := new(PBDHTMessage)
		err := proto.Unmarshal(resp.Data, pmesOut)
Jeromy's avatar
Jeromy committed
622 623 624 625
		if err != nil {
			return nil, err
		}

626
		return pmesOut, nil
Jeromy's avatar
Jeromy committed
627 628 629 630
	}
}

func (dht *IpfsDHT) addPeerList(key u.Key, peers []*PBDHTMessage_PBPeer) []*peer.Peer {
631
	var provArr []*peer.Peer
Jeromy's avatar
Jeromy committed
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	for _, prov := range peers {
		// Dont add outselves to the list
		if peer.ID(prov.GetId()).Equal(dht.self.ID) {
			continue
		}
		// Dont add someone who is already on the list
		p := dht.network.Find(u.Key(prov.GetId()))
		if p == nil {
			u.DOut("given provider %s was not in our network already.", peer.ID(prov.GetId()).Pretty())
			maddr, err := ma.NewMultiaddr(prov.GetAddr())
			if err != nil {
				u.PErr("error connecting to new peer: %s", err)
				continue
			}
			p, err = dht.network.GetConnection(peer.ID(prov.GetId()), maddr)
			if err != nil {
				u.PErr("error connecting to new peer: %s", err)
				continue
			}
		}
		dht.addProviderEntry(key, p)
653
		provArr = append(provArr, p)
Jeromy's avatar
Jeromy committed
654
	}
655
	return provArr
Jeromy's avatar
Jeromy committed
656
}