dht.go 15.8 KB
Newer Older
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
1 2
package dht

3
import (
4
	"bytes"
5 6 7
	"errors"
	"sync"
	"time"
8

9
	peer "github.com/jbenet/go-ipfs/peer"
Jeromy's avatar
Jeromy committed
10
	kb "github.com/jbenet/go-ipfs/routing/kbucket"
11 12
	swarm "github.com/jbenet/go-ipfs/swarm"
	u "github.com/jbenet/go-ipfs/util"
13 14

	ma "github.com/jbenet/go-multiaddr"
Jeromy's avatar
Jeromy committed
15 16 17

	ds "github.com/jbenet/datastore.go"

18
	"code.google.com/p/goprotobuf/proto"
19 20
)

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
21 22 23 24 25
// TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js

// IpfsDHT is an implementation of Kademlia with Coral and S/Kademlia modifications.
// It is used to implement the base IpfsRouting module.
type IpfsDHT struct {
26 27
	// Array of routing tables for differently distanced nodes
	// NOTE: (currently, only a single table is used)
28
	routes []*kb.RoutingTable
29

30
	network swarm.Network
31

Jeromy's avatar
Jeromy committed
32 33 34 35 36
	// Local peer (yourself)
	self *peer.Peer

	// Local data
	datastore ds.Datastore
37

38
	// Map keys to peers that can provide their value
39
	providers    map[u.Key][]*providerInfo
40
	providerLock sync.RWMutex
41

42
	// map of channels waiting for reply messages
43
	listeners  map[uint64]*listenInfo
44
	listenLock sync.RWMutex
45 46 47

	// Signal to shutdown dht
	shutdown chan struct{}
48 49 50

	// When this peer started up
	birth time.Time
51 52 53 54 55

	//lock to make diagnostics work better
	diaglock sync.Mutex
}

56
// The listen info struct holds information about a message that is being waited for
57
type listenInfo struct {
58
	// Responses matching the listen ID will be sent through resp
59
	resp chan *swarm.Message
60 61

	// count is the number of responses to listen for
62
	count int
63 64

	// eol is the time at which this listener will expire
65
	eol time.Time
66 67
}

Jeromy's avatar
Jeromy committed
68
// NewDHT creates a new DHT object with the given peer as the 'local' host
Jeromy's avatar
Jeromy committed
69
func NewDHT(p *peer.Peer, net swarm.Network) *IpfsDHT {
70
	dht := new(IpfsDHT)
Jeromy's avatar
Jeromy committed
71
	dht.network = net
72 73
	dht.datastore = ds.NewMapDatastore()
	dht.self = p
74
	dht.listeners = make(map[uint64]*listenInfo)
75
	dht.providers = make(map[u.Key][]*providerInfo)
Jeromy's avatar
Jeromy committed
76
	dht.shutdown = make(chan struct{})
77 78 79 80 81 82

	dht.routes = make([]*kb.RoutingTable, 3)
	dht.routes[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*30)
	dht.routes[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*100)
	dht.routes[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Hour)

83
	dht.birth = time.Now()
Jeromy's avatar
Jeromy committed
84
	return dht
85 86
}

87
// Start up background goroutines needed by the DHT
88 89 90 91
func (dht *IpfsDHT) Start() {
	go dht.handleMessages()
}

92
// Connect to a new peer at the given address, ping and add to the routing table
93
func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
94
	maddrstr, _ := addr.String()
95
	u.DOut("Connect to new peer: %s", maddrstr)
96
	npeer, err := dht.network.Connect(addr)
97
	if err != nil {
98
		return nil, err
99 100
	}

Jeromy's avatar
Jeromy committed
101 102
	// Ping new peer to register in their routing table
	// NOTE: this should be done better...
103
	err = dht.Ping(npeer, time.Second*2)
Jeromy's avatar
Jeromy committed
104
	if err != nil {
Jeromy's avatar
Jeromy committed
105
		return nil, errors.New("failed to ping newly connected peer")
Jeromy's avatar
Jeromy committed
106 107
	}

108 109
	dht.Update(npeer)

110
	return npeer, nil
Jeromy's avatar
Jeromy committed
111 112
}

113 114
// Read in all messages from swarm and handle them appropriately
// NOTE: this function is just a quick sketch
115
func (dht *IpfsDHT) handleMessages() {
116
	u.DOut("Begin message handling routine")
117 118

	checkTimeouts := time.NewTicker(time.Minute * 5)
119
	ch := dht.network.GetChan()
120 121
	for {
		select {
122
		case mes, ok := <-ch.Incoming:
123 124 125 126
			if !ok {
				u.DOut("handleMessages closing, bad recv on incoming")
				return
			}
127
			pmes := new(PBDHTMessage)
128 129 130 131 132 133
			err := proto.Unmarshal(mes.Data, pmes)
			if err != nil {
				u.PErr("Failed to decode protobuf message: %s", err)
				continue
			}

134
			dht.Update(mes.Peer)
135

136
			// Note: not sure if this is the correct place for this
137 138
			if pmes.GetResponse() {
				dht.listenLock.RLock()
139
				list, ok := dht.listeners[pmes.GetId()]
140 141 142 143 144
				dht.listenLock.RUnlock()
				if time.Now().After(list.eol) {
					dht.Unlisten(pmes.GetId())
					ok = false
				}
145 146 147
				if list.count > 1 {
					list.count--
				}
148
				if ok {
149
					list.resp <- mes
150 151 152
					if list.count == 1 {
						dht.Unlisten(pmes.GetId())
					}
153 154
				} else {
					u.DOut("Received response with nobody listening...")
155 156 157
				}

				continue
158
			}
159 160
			//

161 162
			u.DOut("[peer: %s]", dht.self.ID.Pretty())
			u.DOut("Got message type: '%s' [id = %x, from = %s]",
163
				PBDHTMessage_MessageType_name[int32(pmes.GetType())],
164
				pmes.GetId(), mes.Peer.ID.Pretty())
165
			switch pmes.GetType() {
166
			case PBDHTMessage_GET_VALUE:
167
				dht.handleGetValue(mes.Peer, pmes)
168
			case PBDHTMessage_PUT_VALUE:
Jeromy's avatar
Jeromy committed
169
				dht.handlePutValue(mes.Peer, pmes)
170
			case PBDHTMessage_FIND_NODE:
Jeromy's avatar
Jeromy committed
171
				dht.handleFindPeer(mes.Peer, pmes)
172
			case PBDHTMessage_ADD_PROVIDER:
173
				dht.handleAddProvider(mes.Peer, pmes)
174
			case PBDHTMessage_GET_PROVIDERS:
175
				dht.handleGetProviders(mes.Peer, pmes)
176
			case PBDHTMessage_PING:
177
				dht.handlePing(mes.Peer, pmes)
178
			case PBDHTMessage_DIAGNOSTIC:
179
				dht.handleDiagnostic(mes.Peer, pmes)
180 181
			}

182 183
		case err := <-ch.Errors:
			u.PErr("dht err: %s", err)
184
		case <-dht.shutdown:
185
			checkTimeouts.Stop()
186
			return
187
		case <-checkTimeouts.C:
188 189 190 191 192 193 194 195 196 197 198 199 200 201
			// Time to collect some garbage!
			dht.cleanExpiredProviders()
			dht.cleanExpiredListeners()
		}
	}
}

func (dht *IpfsDHT) cleanExpiredProviders() {
	dht.providerLock.Lock()
	for k, parr := range dht.providers {
		var cleaned []*providerInfo
		for _, v := range parr {
			if time.Since(v.Creation) < time.Hour {
				cleaned = append(cleaned, v)
202
			}
203
		}
204
		dht.providers[k] = cleaned
205
	}
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
	dht.providerLock.Unlock()
}

func (dht *IpfsDHT) cleanExpiredListeners() {
	dht.listenLock.Lock()
	var remove []uint64
	now := time.Now()
	for k, v := range dht.listeners {
		if now.After(v.eol) {
			remove = append(remove, k)
		}
	}
	for _, k := range remove {
		delete(dht.listeners, k)
	}
	dht.listenLock.Unlock()
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
222
}
223

Jeromy's avatar
Jeromy committed
224
func (dht *IpfsDHT) putValueToNetwork(p *peer.Peer, key string, value []byte) error {
225 226 227
	pmes := DHTMessage{
		Type:  PBDHTMessage_PUT_VALUE,
		Key:   key,
228
		Value: value,
229
		Id:    GenerateMessageID(),
230 231 232
	}

	mes := swarm.NewMessage(p, pmes.ToProtobuf())
233
	dht.network.Send(mes)
234 235 236
	return nil
}

237
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) {
Jeromy's avatar
Jeromy committed
238
	dskey := ds.NewKey(pmes.GetKey())
Jeromy's avatar
Jeromy committed
239 240 241 242 243 244
	resp := &DHTMessage{
		Response: true,
		Id:       pmes.GetId(),
		Key:      pmes.GetKey(),
	}
	iVal, err := dht.datastore.Get(dskey)
Jeromy's avatar
Jeromy committed
245
	if err == nil {
Jeromy's avatar
Jeromy committed
246 247
		resp.Success = true
		resp.Value = iVal.([]byte)
Jeromy's avatar
Jeromy committed
248
	} else if err == ds.ErrNotFound {
Jeromy's avatar
Jeromy committed
249 250 251 252 253 254 255 256 257
		// Check if we know any providers for the requested value
		provs, ok := dht.providers[u.Key(pmes.GetKey())]
		if ok && len(provs) > 0 {
			for _, prov := range provs {
				resp.Peers = append(resp.Peers, prov.Value)
			}
			resp.Success = true
		} else {
			// No providers?
258 259
			// Find closest peer on given cluster to desired key and reply with that info

260 261 262 263 264 265 266
			level := 0
			if len(pmes.GetValue()) < 1 {
				// TODO: maybe return an error? Defaulting isnt a good idea IMO
				u.PErr("handleGetValue: no routing level specified, assuming 0")
			} else {
				level = int(pmes.GetValue()[0]) // Using value field to specify cluster level
			}
267 268 269 270 271 272 273 274 275

			closer := dht.routes[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))

			// If this peer is closer than the one from the table, return nil
			if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
				resp.Peers = nil
			} else {
				resp.Peers = []*peer.Peer{closer}
			}
276
		}
Jeromy's avatar
Jeromy committed
277
	} else {
278
		//temp: what other errors can a datastore return?
Jeromy's avatar
Jeromy committed
279
		panic(err)
280
	}
281 282

	mes := swarm.NewMessage(p, resp.ToProtobuf())
283
	dht.network.Send(mes)
284 285
}

Jeromy's avatar
Jeromy committed
286
// Store a value in this peer local storage
287
func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *PBDHTMessage) {
Jeromy's avatar
Jeromy committed
288 289 290 291 292 293
	dskey := ds.NewKey(pmes.GetKey())
	err := dht.datastore.Put(dskey, pmes.GetValue())
	if err != nil {
		// For now, just panic, handle this better later maybe
		panic(err)
	}
294 295
}

296 297 298
func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *PBDHTMessage) {
	resp := DHTMessage{
		Type:     pmes.GetType(),
299
		Response: true,
300
		Id:       pmes.GetId(),
301
	}
302

303
	dht.network.Send(swarm.NewMessage(p, resp.ToProtobuf()))
304 305
}

306
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) {
307 308 309 310 311 312 313 314 315 316 317 318
	resp := DHTMessage{
		Type:     pmes.GetType(),
		Id:       pmes.GetId(),
		Response: true,
	}
	defer func() {
		mes := swarm.NewMessage(p, resp.ToProtobuf())
		dht.network.Send(mes)
	}()
	level := pmes.GetValue()[0]
	u.DOut("handleFindPeer: searching for '%s'", peer.ID(pmes.GetKey()).Pretty())
	closest := dht.routes[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
Jeromy's avatar
Jeromy committed
319
	if closest == nil {
320
		u.PErr("handleFindPeer: could not find anything.")
321
		return
Jeromy's avatar
Jeromy committed
322 323 324
	}

	if len(closest.Addresses) == 0 {
325
		u.PErr("handleFindPeer: no addresses for connected peer...")
326
		return
Jeromy's avatar
Jeromy committed
327 328
	}

329 330 331
	// If the found peer further away than this peer...
	if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) {
		return
Jeromy's avatar
Jeromy committed
332 333
	}

334 335 336 337 338 339
	u.DOut("handleFindPeer: sending back '%s'", closest.ID.Pretty())
	resp.Peers = []*peer.Peer{closest}
	resp.Success = true
}

func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) {
340
	resp := DHTMessage{
341 342
		Type:     PBDHTMessage_GET_PROVIDERS,
		Key:      pmes.GetKey(),
343
		Id:       pmes.GetId(),
344
		Response: true,
Jeromy's avatar
Jeromy committed
345 346
	}

347
	dht.providerLock.RLock()
348
	providers := dht.providers[u.Key(pmes.GetKey())]
349
	dht.providerLock.RUnlock()
350
	if providers == nil || len(providers) == 0 {
351 352 353 354 355 356
		// TODO: work on tiering this
		closer := dht.routes[0].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
		resp.Peers = []*peer.Peer{closer}
	} else {
		for _, prov := range providers {
			resp.Peers = append(resp.Peers, prov.Value)
357
		}
358
		resp.Success = true
359 360 361
	}

	mes := swarm.NewMessage(p, resp.ToProtobuf())
362
	dht.network.Send(mes)
363 364
}

365 366
type providerInfo struct {
	Creation time.Time
367
	Value    *peer.Peer
368 369
}

370
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *PBDHTMessage) {
371 372
	//TODO: need to implement TTLs on providers
	key := u.Key(pmes.GetKey())
373
	dht.addProviderEntry(key, p)
374 375
}

376 377
// Register a handler for a specific message ID, used for getting replies
// to certain messages (i.e. response to a GET_VALUE message)
378
func (dht *IpfsDHT) ListenFor(mesid uint64, count int, timeout time.Duration) <-chan *swarm.Message {
379
	lchan := make(chan *swarm.Message)
380
	dht.listenLock.Lock()
381
	dht.listeners[mesid] = &listenInfo{lchan, count, time.Now().Add(timeout)}
382 383 384
	dht.listenLock.Unlock()
	return lchan
}
385

386
// Unregister the given message id from the listener map
Jeromy's avatar
Jeromy committed
387 388
func (dht *IpfsDHT) Unlisten(mesid uint64) {
	dht.listenLock.Lock()
389
	list, ok := dht.listeners[mesid]
Jeromy's avatar
Jeromy committed
390 391 392 393
	if ok {
		delete(dht.listeners, mesid)
	}
	dht.listenLock.Unlock()
394 395 396
	close(list.resp)
}

Jeromy's avatar
Jeromy committed
397
// Check whether or not the dht is currently listening for mesid
398 399
func (dht *IpfsDHT) IsListening(mesid uint64) bool {
	dht.listenLock.RLock()
400
	li, ok := dht.listeners[mesid]
401
	dht.listenLock.RUnlock()
402 403 404 405 406 407
	if time.Now().After(li.eol) {
		dht.listenLock.Lock()
		delete(dht.listeners, mesid)
		dht.listenLock.Unlock()
		return false
	}
408
	return ok
Jeromy's avatar
Jeromy committed
409 410
}

Jeromy's avatar
Jeromy committed
411
// Stop all communications from this peer and shut down
412 413 414 415
func (dht *IpfsDHT) Halt() {
	dht.shutdown <- struct{}{}
	dht.network.Close()
}
416

417 418 419 420
func (dht *IpfsDHT) addProviderEntry(key u.Key, p *peer.Peer) {
	u.DOut("Adding %s as provider for '%s'", p.Key().Pretty(), key)
	dht.providerLock.Lock()
	provs := dht.providers[key]
421
	dht.providers[key] = append(provs, &providerInfo{time.Now(), p})
422 423
	dht.providerLock.Unlock()
}
424

Jeromy's avatar
Jeromy committed
425
// NOTE: not yet finished, low priority
426
func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *PBDHTMessage) {
427 428 429 430 431 432 433 434
	dht.diaglock.Lock()
	if dht.IsListening(pmes.GetId()) {
		//TODO: ehhh..........
		dht.diaglock.Unlock()
		return
	}
	dht.diaglock.Unlock()

435
	seq := dht.routes[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10)
Jeromy's avatar
Jeromy committed
436
	listenChan := dht.ListenFor(pmes.GetId(), len(seq), time.Second*30)
437

438
	for _, ps := range seq {
439
		mes := swarm.NewMessage(ps, pmes)
440
		dht.network.Send(mes)
441 442 443
	}

	buf := new(bytes.Buffer)
444 445 446
	di := dht.getDiagInfo()
	buf.Write(di.Marshal())

447 448 449 450 451 452 453 454
	// NOTE: this shouldnt be a hardcoded value
	after := time.After(time.Second * 20)
	count := len(seq)
	for count > 0 {
		select {
		case <-after:
			//Timeout, return what we have
			goto out
Jeromy's avatar
Jeromy committed
455
		case req_resp := <-listenChan:
456
			pmes_out := new(PBDHTMessage)
457 458 459 460 461
			err := proto.Unmarshal(req_resp.Data, pmes_out)
			if err != nil {
				// It broke? eh, whatever, keep going
				continue
			}
462 463 464 465 466 467
			buf.Write(req_resp.Data)
			count--
		}
	}

out:
468 469 470 471
	resp := DHTMessage{
		Type:     PBDHTMessage_DIAGNOSTIC,
		Id:       pmes.GetId(),
		Value:    buf.Bytes(),
472 473 474 475
		Response: true,
	}

	mes := swarm.NewMessage(p, resp.ToProtobuf())
476
	dht.network.Send(mes)
477
}
478

479 480
// getValueSingle simply performs the get value RPC with the given parameters
func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration, level int) (*PBDHTMessage, error) {
Jeromy's avatar
Jeromy committed
481
	pmes := DHTMessage{
482 483 484 485
		Type:  PBDHTMessage_GET_VALUE,
		Key:   string(key),
		Value: []byte{byte(level)},
		Id:    GenerateMessageID(),
Jeromy's avatar
Jeromy committed
486 487 488 489
	}
	response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)

	mes := swarm.NewMessage(p, pmes.ToProtobuf())
490
	t := time.Now()
491
	dht.network.Send(mes)
Jeromy's avatar
Jeromy committed
492 493 494 495 496 497 498 499 500 501 502 503

	// Wait for either the response or a timeout
	timeup := time.After(timeout)
	select {
	case <-timeup:
		dht.Unlisten(pmes.Id)
		return nil, u.ErrTimeout
	case resp, ok := <-response_chan:
		if !ok {
			u.PErr("response channel closed before timeout, please investigate.")
			return nil, u.ErrTimeout
		}
504 505
		roundtrip := time.Since(t)
		resp.Peer.SetLatency(roundtrip)
Jeromy's avatar
Jeromy committed
506 507 508 509 510
		pmes_out := new(PBDHTMessage)
		err := proto.Unmarshal(resp.Data, pmes_out)
		if err != nil {
			return nil, err
		}
511
		return pmes_out, nil
Jeromy's avatar
Jeromy committed
512 513 514
	}
}

515 516 517 518 519 520 521 522 523 524
// TODO: Im not certain on this implementation, we get a list of peers/providers
// from someone what do we do with it? Connect to each of them? randomly pick
// one to get the value from? Or just connect to one at a time until we get a
// successful connection and request the value from it?
func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration,
	peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) {
	for _, pinfo := range peerlist {
		p, _ := dht.Find(peer.ID(pinfo.GetId()))
		if p == nil {
			maddr, err := ma.NewMultiaddr(pinfo.GetAddr())
Jeromy's avatar
Jeromy committed
525 526 527 528
			if err != nil {
				u.PErr("getValue error: %s", err)
				continue
			}
529 530

			p, err = dht.network.Connect(maddr)
Jeromy's avatar
Jeromy committed
531 532 533 534 535
			if err != nil {
				u.PErr("getValue error: %s", err)
				continue
			}
		}
536
		pmes, err := dht.getValueSingle(p, key, timeout, level)
Jeromy's avatar
Jeromy committed
537
		if err != nil {
538
			u.DErr("getFromPeers error: %s", err)
Jeromy's avatar
Jeromy committed
539 540
			continue
		}
541
		dht.addProviderEntry(key, p)
Jeromy's avatar
Jeromy committed
542

543 544 545 546
		// Make sure it was a successful get
		if pmes.GetSuccess() && pmes.Value != nil {
			return pmes.GetValue(), nil
		}
Jeromy's avatar
Jeromy committed
547 548 549 550
	}
	return nil, u.ErrNotFound
}

551
func (dht *IpfsDHT) GetLocal(key u.Key) ([]byte, error) {
552
	v, err := dht.datastore.Get(ds.NewKey(string(key)))
553 554 555 556 557 558 559 560 561
	if err != nil {
		return nil, err
	}
	return v.([]byte), nil
}

func (dht *IpfsDHT) PutLocal(key u.Key, value []byte) error {
	return dht.datastore.Put(ds.NewKey(string(key)), value)
}
562 563

func (dht *IpfsDHT) Update(p *peer.Peer) {
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	for _, route := range dht.routes {
		removed := route.Update(p)
		// Only drop the connection if no tables refer to this peer
		if removed != nil {
			found := false
			for _, r := range dht.routes {
				if r.Find(removed.ID) != nil {
					found = true
					break
				}
			}
			if !found {
				dht.network.Drop(removed)
			}
		}
579 580
	}
}
Jeromy's avatar
Jeromy committed
581 582 583 584 585 586 587 588 589 590 591

// Look for a peer with a given ID connected to this dht
func (dht *IpfsDHT) Find(id peer.ID) (*peer.Peer, *kb.RoutingTable) {
	for _, table := range dht.routes {
		p := table.Find(id)
		if p != nil {
			return p, table
		}
	}
	return nil, nil
}
592 593 594 595 596 597 598 599 600 601 602

func (dht *IpfsDHT) findPeerSingle(p *peer.Peer, id peer.ID, timeout time.Duration, level int) (*PBDHTMessage, error) {
	pmes := DHTMessage{
		Type:  PBDHTMessage_FIND_NODE,
		Key:   string(id),
		Id:    GenerateMessageID(),
		Value: []byte{byte(level)},
	}

	mes := swarm.NewMessage(p, pmes.ToProtobuf())
	listenChan := dht.ListenFor(pmes.Id, 1, time.Minute)
603
	t := time.Now()
604 605 606 607 608 609 610
	dht.network.Send(mes)
	after := time.After(timeout)
	select {
	case <-after:
		dht.Unlisten(pmes.Id)
		return nil, u.ErrTimeout
	case resp := <-listenChan:
611 612
		roundtrip := time.Since(t)
		resp.Peer.SetLatency(roundtrip)
613 614 615 616 617 618 619 620 621
		pmes_out := new(PBDHTMessage)
		err := proto.Unmarshal(resp.Data, pmes_out)
		if err != nil {
			return nil, err
		}

		return pmes_out, nil
	}
}
622 623 624 625 626 627

func (dht *IpfsDHT) PrintTables() {
	for _, route := range dht.routes {
		route.Print()
	}
}