Commit aab538ff authored by Juan Batiz-Benet's avatar Juan Batiz-Benet Committed by Brian Tiger Chow

Fixed connections all over.

parent 164f56cd
......@@ -46,7 +46,7 @@ func peersToPBPeers(peers []*peer.Peer) []*Message_Peer {
func (m *Message) GetClusterLevel() int {
level := m.GetClusterLevelRaw() - 1
if level < 0 {
u.PErr("handleGetValue: no routing level specified, assuming 0\n")
u.PErr("GetClusterLevel: no routing level specified, assuming 0\n")
level = 0
}
return int(level)
......
......@@ -125,7 +125,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) (msg.
dht.Update(mPeer)
// Print out diagnostic
u.DOut("[peer: %s]\nGot message type: '%s' [from = %s]\n",
u.DOut("[peer: %s] Got message type: '%s' [from = %s]\n",
dht.self.ID.Pretty(),
Message_MessageType_name[int32(pmes.GetType())], mPeer.ID.Pretty())
......@@ -141,6 +141,11 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) (msg.
return nil, err
}
// if nil response, return it before serializing
if rpmes == nil {
return nil, nil
}
// serialize response msg
rmes, err := msg.FromObject(mPeer, rpmes)
if err != nil {
......@@ -161,6 +166,11 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p *peer.Peer, pmes *Message
start := time.Now()
// Print out diagnostic
u.DOut("[peer: %s] Sent message type: '%s' [to = %s]\n",
dht.self.ID.Pretty(),
Message_MessageType_name[int32(pmes.GetType())], p.ID.Pretty())
rmes, err := dht.sender.SendRequest(ctx, mes)
if err != nil {
return nil, err
......@@ -209,10 +219,10 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer,
return nil, nil, err
}
u.POut("pmes.GetValue() %v\n", pmes.GetValue())
u.DOut("pmes.GetValue() %v\n", pmes.GetValue())
if value := pmes.GetValue(); value != nil {
// Success! We were given the value
u.POut("getValueOrPeers: got value\n")
u.DOut("getValueOrPeers: got value\n")
return value, nil, nil
}
......@@ -222,7 +232,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer,
if err != nil {
return nil, nil, err
}
u.POut("getValueOrPeers: get from providers\n")
u.DOut("getValueOrPeers: get from providers\n")
return val, nil, nil
}
......@@ -250,11 +260,11 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer,
}
if len(peers) > 0 {
u.POut("getValueOrPeers: peers\n")
u.DOut("getValueOrPeers: peers\n")
return nil, peers, nil
}
u.POut("getValueOrPeers: u.ErrNotFound\n")
u.DOut("getValueOrPeers: u.ErrNotFound\n")
return nil, nil, u.ErrNotFound
}
......
package dht
// import (
// "testing"
//
// context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
//
// ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
// ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
//
// ci "github.com/jbenet/go-ipfs/crypto"
// spipe "github.com/jbenet/go-ipfs/crypto/spipe"
// inet "github.com/jbenet/go-ipfs/net"
// mux "github.com/jbenet/go-ipfs/net/mux"
// netservice "github.com/jbenet/go-ipfs/net/service"
// peer "github.com/jbenet/go-ipfs/peer"
// u "github.com/jbenet/go-ipfs/util"
//
// "bytes"
// "fmt"
// "time"
// )
//
// func setupDHT(t *testing.T, p *peer.Peer) *IpfsDHT {
// ctx := context.TODO()
//
// peerstore := peer.NewPeerstore()
//
// ctx, _ = context.WithCancel(ctx)
// dhts := netservice.NewService(nil) // nil handler for now, need to patch it
// if err := dhts.Start(ctx); err != nil {
// t.Fatal(err)
// }
//
// net, err := inet.NewIpfsNetwork(context.TODO(), p, &mux.ProtocolMap{
// mux.ProtocolID_Routing: dhts,
// })
// if err != nil {
// t.Fatal(err)
// }
//
// d := NewDHT(p, peerstore, net, dhts, ds.NewMapDatastore())
// dhts.Handler = d
// return d
// }
//
// func setupDHTS(n int, t *testing.T) ([]*ma.Multiaddr, []*peer.Peer, []*IpfsDHT) {
// var addrs []*ma.Multiaddr
// for i := 0; i < 4; i++ {
// a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
// if err != nil {
// t.Fatal(err)
// }
// addrs = append(addrs, a)
// }
//
// var peers []*peer.Peer
// for i := 0; i < 4; i++ {
// p := new(peer.Peer)
// p.AddAddress(addrs[i])
// sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
// if err != nil {
// panic(err)
// }
// p.PubKey = pk
// p.PrivKey = sk
// id, err := spipe.IDFromPubKey(pk)
// if err != nil {
// panic(err)
// }
// p.ID = id
// peers = append(peers, p)
// }
//
// var dhts []*IpfsDHT
// for i := 0; i < 4; i++ {
// dhts[i] = setupDHT(t, peers[i])
// }
//
// return addrs, peers, dhts
// }
//
// func makePeer(addr *ma.Multiaddr) *peer.Peer {
// p := new(peer.Peer)
// p.AddAddress(addr)
// sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
// if err != nil {
// panic(err)
// }
// p.PrivKey = sk
// p.PubKey = pk
// id, err := spipe.IDFromPubKey(pk)
// if err != nil {
// panic(err)
// }
//
// p.ID = id
// return p
// }
//
// func TestPing(t *testing.T) {
// u.Debug = true
// addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222")
// if err != nil {
// t.Fatal(err)
// }
// addrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5678")
// if err != nil {
// t.Fatal(err)
// }
//
// peerA := makePeer(addrA)
// peerB := makePeer(addrB)
//
// dhtA := setupDHT(t, peerA)
// dhtB := setupDHT(t, peerB)
//
// defer dhtA.Halt()
// defer dhtB.Halt()
//
// _, err = dhtA.Connect(addrB)
// if err != nil {
// t.Fatal(err)
// }
//
// //Test that we can ping the node
// err = dhtA.Ping(peerB, time.Second*2)
// if err != nil {
// t.Fatal(err)
// }
// }
//
// func TestValueGetSet(t *testing.T) {
// u.Debug = false
// addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1235")
// if err != nil {
// t.Fatal(err)
// }
// addrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5679")
// if err != nil {
// t.Fatal(err)
// }
//
// peerA := makePeer(addrA)
// peerB := makePeer(addrB)
//
// dhtA := setupDHT(t, peerA)
// dhtB := setupDHT(t, peerB)
//
// defer dhtA.Halt()
// defer dhtB.Halt()
//
// _, err = dhtA.Connect(addrB)
// if err != nil {
// t.Fatal(err)
// }
//
// dhtA.PutValue("hello", []byte("world"))
//
// val, err := dhtA.GetValue("hello", time.Second*2)
// if err != nil {
// t.Fatal(err)
// }
//
// if string(val) != "world" {
// t.Fatalf("Expected 'world' got '%s'", string(val))
// }
//
// }
//
import (
"testing"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
ci "github.com/jbenet/go-ipfs/crypto"
spipe "github.com/jbenet/go-ipfs/crypto/spipe"
inet "github.com/jbenet/go-ipfs/net"
mux "github.com/jbenet/go-ipfs/net/mux"
netservice "github.com/jbenet/go-ipfs/net/service"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
"fmt"
"time"
)
func setupDHT(t *testing.T, p *peer.Peer) *IpfsDHT {
ctx, _ := context.WithCancel(context.TODO())
peerstore := peer.NewPeerstore()
dhts := netservice.NewService(nil) // nil handler for now, need to patch it
if err := dhts.Start(ctx); err != nil {
t.Fatal(err)
}
net, err := inet.NewIpfsNetwork(ctx, p, &mux.ProtocolMap{
mux.ProtocolID_Routing: dhts,
})
if err != nil {
t.Fatal(err)
}
d := NewDHT(p, peerstore, net, dhts, ds.NewMapDatastore())
dhts.Handler = d
return d
}
func setupDHTS(n int, t *testing.T) ([]*ma.Multiaddr, []*peer.Peer, []*IpfsDHT) {
var addrs []*ma.Multiaddr
for i := 0; i < 4; i++ {
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, a)
}
var peers []*peer.Peer
for i := 0; i < 4; i++ {
p := makePeer(addrs[i])
peers = append(peers, p)
}
var dhts []*IpfsDHT
for i := 0; i < 4; i++ {
dhts[i] = setupDHT(t, peers[i])
}
return addrs, peers, dhts
}
func makePeer(addr *ma.Multiaddr) *peer.Peer {
p := new(peer.Peer)
p.AddAddress(addr)
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
if err != nil {
panic(err)
}
p.PrivKey = sk
p.PubKey = pk
id, err := spipe.IDFromPubKey(pk)
if err != nil {
panic(err)
}
p.ID = id
return p
}
func TestPing(t *testing.T) {
u.Debug = true
addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222")
if err != nil {
t.Fatal(err)
}
addrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5678")
if err != nil {
t.Fatal(err)
}
peerA := makePeer(addrA)
peerB := makePeer(addrB)
dhtA := setupDHT(t, peerA)
dhtB := setupDHT(t, peerB)
defer dhtA.Halt()
defer dhtB.Halt()
_, err = dhtA.Connect(peerB)
if err != nil {
t.Fatal(err)
}
//Test that we can ping the node
err = dhtA.Ping(peerB, time.Second*2)
if err != nil {
t.Fatal(err)
}
}
func TestValueGetSet(t *testing.T) {
u.Debug = false
addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1235")
if err != nil {
t.Fatal(err)
}
addrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5679")
if err != nil {
t.Fatal(err)
}
peerA := makePeer(addrA)
peerB := makePeer(addrB)
dhtA := setupDHT(t, peerA)
dhtB := setupDHT(t, peerB)
defer dhtA.Halt()
defer dhtB.Halt()
_, err = dhtA.Connect(peerB)
if err != nil {
t.Fatal(err)
}
dhtA.PutValue("hello", []byte("world"))
val, err := dhtA.GetValue("hello", time.Second*2)
if err != nil {
t.Fatal(err)
}
if string(val) != "world" {
t.Fatalf("Expected 'world' got '%s'", string(val))
}
}
// func TestProvides(t *testing.T) {
// u.Debug = false
//
// addrs, _, dhts := setupDHTS(4, t)
// _, peers, dhts := setupDHTS(4, t)
// defer func() {
// for i := 0; i < 4; i++ {
// dhts[i].Halt()
// }
// }()
//
// _, err := dhts[0].Connect(addrs[1])
// _, err := dhts[0].Connect(peers[1])
// if err != nil {
// t.Fatal(err)
// }
//
// _, err = dhts[1].Connect(addrs[2])
// _, err = dhts[1].Connect(peers[2])
// if err != nil {
// t.Fatal(err)
// }
//
// _, err = dhts[1].Connect(addrs[3])
// _, err = dhts[1].Connect(peers[3])
// if err != nil {
// t.Fatal(err)
// }
......
......@@ -97,6 +97,7 @@ func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *Message) (*Message, error
defer dht.dslock.Unlock()
dskey := ds.NewKey(pmes.GetKey())
err := dht.datastore.Put(dskey, pmes.GetValue())
u.DOut("[%s] handlePutValue %v %v", dht.self.ID.Pretty(), dskey, pmes.GetValue())
return nil, err
}
......
......@@ -163,7 +163,7 @@ func (r *dhtQueryRunner) addPeerToQuery(next *peer.Peer, benchmark *peer.Peer) {
r.peersSeen[next.Key()] = next
r.Unlock()
u.POut("adding peer to query: %v\n", next.ID.Pretty())
u.DOut("adding peer to query: %v\n", next.ID.Pretty())
// do this after unlocking to prevent possible deadlocks.
r.peersRemaining.Increment(1)
......@@ -187,14 +187,14 @@ func (r *dhtQueryRunner) spawnWorkers() {
if !more {
return // channel closed.
}
u.POut("spawning worker for: %v\n", p.ID.Pretty())
u.DOut("spawning worker for: %v\n", p.ID.Pretty())
go r.queryPeer(p)
}
}
}
func (r *dhtQueryRunner) queryPeer(p *peer.Peer) {
u.POut("spawned worker for: %v\n", p.ID.Pretty())
u.DOut("spawned worker for: %v\n", p.ID.Pretty())
// make sure we rate limit concurrency.
select {
......@@ -204,33 +204,33 @@ func (r *dhtQueryRunner) queryPeer(p *peer.Peer) {
return
}
u.POut("running worker for: %v\n", p.ID.Pretty())
u.DOut("running worker for: %v\n", p.ID.Pretty())
// finally, run the query against this peer
res, err := r.query.qfunc(r.ctx, p)
if err != nil {
u.POut("ERROR worker for: %v %v\n", p.ID.Pretty(), err)
u.DOut("ERROR worker for: %v %v\n", p.ID.Pretty(), err)
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()
} else if res.success {
u.POut("SUCCESS worker for: %v\n", p.ID.Pretty(), res)
u.DOut("SUCCESS worker for: %v\n", p.ID.Pretty(), res)
r.Lock()
r.result = res
r.Unlock()
r.cancel() // signal to everyone that we're done.
} else if res.closerPeers != nil {
u.POut("PEERS CLOSER -- worker for: %v\n", p.ID.Pretty())
u.DOut("PEERS CLOSER -- worker for: %v\n", p.ID.Pretty())
for _, next := range res.closerPeers {
r.addPeerToQuery(next, p)
}
}
// signal we're done proccessing peer p
u.POut("completing worker for: %v\n", p.ID.Pretty())
u.DOut("completing worker for: %v\n", p.ID.Pretty())
r.peersRemaining.Decrement(1)
r.rateLimit <- struct{}{}
}
......@@ -30,6 +30,7 @@ func (dht *IpfsDHT) PutValue(key u.Key, value []byte) error {
}
query := newQuery(key, func(ctx context.Context, p *peer.Peer) (*dhtQueryResult, error) {
u.DOut("[%s] PutValue qry part %v\n", dht.self.ID.Pretty(), p.ID.Pretty())
err := dht.putValueToNetwork(ctx, p, string(key), value)
if err != nil {
return nil, err
......@@ -38,6 +39,7 @@ func (dht *IpfsDHT) PutValue(key u.Key, value []byte) error {
})
_, err := query.Run(ctx, peers)
u.DOut("[%s] PutValue %v %v\n", dht.self.ID.Pretty(), key, value)
return err
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment