diff --git a/routing/dht/Message.go b/routing/dht/Message.go
index ed7dc2a21deb474e18ed3d769f94e39ad4a36ae7..1be9a3b801e5d929674ada3001b5ec2592e89115 100644
--- a/routing/dht/Message.go
+++ b/routing/dht/Message.go
@@ -46,7 +46,7 @@ func peersToPBPeers(peers []*peer.Peer) []*Message_Peer {
 func (m *Message) GetClusterLevel() int {
 	level := m.GetClusterLevelRaw() - 1
 	if level < 0 {
-		u.PErr("GetClusterLevel: no routing level specified, assuming 0\n")
+		u.DErr("GetClusterLevel: no routing level specified, assuming 0\n")
 		level = 0
 	}
 	return int(level)
diff --git a/routing/dht/dht.go b/routing/dht/dht.go
index ec22da9b0c72c051ea1b549477c85f2d5c1f0cc2..148168d012da7cd2e768918968fbe8e2468fb67a 100644
--- a/routing/dht/dht.go
+++ b/routing/dht/dht.go
@@ -215,6 +215,7 @@ func (dht *IpfsDHT) putProvider(ctx context.Context, p *peer.Peer, key string) e
 		return err
 	}
 
+	u.DOut("[%s] putProvider: %s for %s\n", dht.self.ID.Pretty(), p.ID.Pretty(), key)
 	if *rpmes.Key != *pmes.Key {
 		return errors.New("provider not added correctly")
 	}
@@ -393,6 +394,8 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []*peer.Peer
 			continue
 		}
 
+		u.DOut("[%s] adding provider: %s for %s", dht.self.ID.Pretty(), p, key)
+
 		// Dont add outselves to the list
 		if p.ID.Equal(dht.self.ID) {
 			continue
@@ -464,7 +467,7 @@ func (dht *IpfsDHT) peerFromInfo(pbp *Message_Peer) (*peer.Peer, error) {
 		}
 
 		// create new Peer
-		p := &peer.Peer{ID: id}
+		p = &peer.Peer{ID: id}
 		p.AddAddress(maddr)
 		dht.peerstore.Put(p)
 	}
diff --git a/routing/dht/dht_test.go b/routing/dht/dht_test.go
index 94e9ee6d3536f7d441bb69c478d73524beaa0d91..e3f056ce2bac747c9f985dfa3e9f423c72a94f34 100644
--- a/routing/dht/dht_test.go
+++ b/routing/dht/dht_test.go
@@ -86,7 +86,9 @@ func makePeer(addr *ma.Multiaddr) *peer.Peer {
 }
 
 func TestPing(t *testing.T) {
-	u.Debug = true
+	// t.Skip("skipping test to debug another")
+
+	u.Debug = false
 	addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222")
 	if err != nil {
 		t.Fatal(err)
@@ -104,6 +106,8 @@ func TestPing(t *testing.T) {
 
 	defer dhtA.Halt()
 	defer dhtB.Halt()
+	defer dhtA.network.Close()
+	defer dhtB.network.Close()
 
 	_, err = dhtA.Connect(peerB)
 	if err != nil {
@@ -118,7 +122,9 @@ func TestPing(t *testing.T) {
 }
 
 func TestValueGetSet(t *testing.T) {
-	u.Debug = true
+	// t.Skip("skipping test to debug another")
+
+	u.Debug = false
 	addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1235")
 	if err != nil {
 		t.Fatal(err)
@@ -136,6 +142,8 @@ func TestValueGetSet(t *testing.T) {
 
 	defer dhtA.Halt()
 	defer dhtB.Halt()
+	defer dhtA.network.Close()
+	defer dhtB.network.Close()
 
 	_, err = dhtA.Connect(peerB)
 	if err != nil {
@@ -155,140 +163,149 @@ func TestValueGetSet(t *testing.T) {
 
 }
 
-// func TestProvides(t *testing.T) {
-// 	u.Debug = false
-//
-// 	_, peers, dhts := setupDHTS(4, t)
-// 	defer func() {
-// 		for i := 0; i < 4; i++ {
-// 			dhts[i].Halt()
-// 		}
-// 	}()
-//
-// 	_, err := dhts[0].Connect(peers[1])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	_, err = dhts[1].Connect(peers[2])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	_, err = dhts[1].Connect(peers[3])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	bits, err := dhts[3].getLocal(u.Key("hello"))
-// 	if err != nil && bytes.Equal(bits, []byte("world")) {
-// 		t.Fatal(err)
-// 	}
-//
-// 	err = dhts[3].Provide(u.Key("hello"))
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	time.Sleep(time.Millisecond * 60)
-//
-// 	provs, err := dhts[0].FindProviders(u.Key("hello"), time.Second)
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	if len(provs) != 1 {
-// 		t.Fatal("Didnt get back providers")
-// 	}
-// }
-//
-// func TestLayeredGet(t *testing.T) {
-// 	u.Debug = false
-// 	addrs, _, dhts := setupDHTS(4, t)
-// 	defer func() {
-// 		for i := 0; i < 4; i++ {
-// 			dhts[i].Halt()
-// 		}
-// 	}()
-//
-// 	_, err := dhts[0].Connect(addrs[1])
-// 	if err != nil {
-// 		t.Fatalf("Failed to connect: %s", err)
-// 	}
-//
-// 	_, err = dhts[1].Connect(addrs[2])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	_, err = dhts[1].Connect(addrs[3])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	err = dhts[3].Provide(u.Key("hello"))
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	time.Sleep(time.Millisecond * 60)
-//
-// 	val, err := dhts[0].GetValue(u.Key("hello"), time.Second)
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	if string(val) != "world" {
-// 		t.Fatal("Got incorrect value.")
-// 	}
-//
-// }
-//
-// func TestFindPeer(t *testing.T) {
-// 	u.Debug = false
-//
-// 	addrs, peers, dhts := setupDHTS(4, t)
-// 	go func() {
-// 		for i := 0; i < 4; i++ {
-// 			dhts[i].Halt()
-// 		}
-// 	}()
-//
-// 	_, err := dhts[0].Connect(addrs[1])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	_, err = dhts[1].Connect(addrs[2])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	_, err = dhts[1].Connect(addrs[3])
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	p, err := dhts[0].FindPeer(peers[2].ID, time.Second)
-// 	if err != nil {
-// 		t.Fatal(err)
-// 	}
-//
-// 	if p == nil {
-// 		t.Fatal("Failed to find peer.")
-// 	}
-//
-// 	if !p.ID.Equal(peers[2].ID) {
-// 		t.Fatal("Didnt find expected peer.")
-// 	}
-// }
+func TestProvides(t *testing.T) {
+	// t.Skip("skipping test to debug another")
+
+	u.Debug = false
+
+	_, peers, dhts := setupDHTS(4, t)
+	defer func() {
+		for i := 0; i < 4; i++ {
+			dhts[i].Halt()
+			defer dhts[i].network.Close()
+		}
+	}()
+
+	_, err := dhts[0].Connect(peers[1])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = dhts[1].Connect(peers[2])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = dhts[1].Connect(peers[3])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bits, err := dhts[3].getLocal(u.Key("hello"))
+	if err != nil && bytes.Equal(bits, []byte("world")) {
+		t.Fatal(err)
+	}
+
+	err = dhts[3].Provide(u.Key("hello"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	time.Sleep(time.Millisecond * 60)
+
+	provs, err := dhts[0].FindProviders(u.Key("hello"), time.Second)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(provs) != 1 {
+		t.Fatal("Didnt get back providers")
+	}
+}
+
+func TestLayeredGet(t *testing.T) {
+	// t.Skip("skipping test to debug another")
+
+	u.Debug = false
+	_, peers, dhts := setupDHTS(4, t)
+	defer func() {
+		for i := 0; i < 4; i++ {
+			dhts[i].Halt()
+			defer dhts[i].network.Close()
+		}
+	}()
+
+	_, err := dhts[0].Connect(peers[1])
+	if err != nil {
+		t.Fatalf("Failed to connect: %s", err)
+	}
+
+	_, err = dhts[1].Connect(peers[2])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = dhts[1].Connect(peers[3])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = dhts[3].Provide(u.Key("hello"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	time.Sleep(time.Millisecond * 60)
+
+	val, err := dhts[0].GetValue(u.Key("hello"), time.Second)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if string(val) != "world" {
+		t.Fatal("Got incorrect value.")
+	}
+
+}
+
+func TestFindPeer(t *testing.T) {
+	// t.Skip("skipping test to debug another")
+
+	u.Debug = false
+
+	_, peers, dhts := setupDHTS(4, t)
+	defer func() {
+		for i := 0; i < 4; i++ {
+			dhts[i].Halt()
+			dhts[i].network.Close()
+		}
+	}()
+
+	_, err := dhts[0].Connect(peers[1])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = dhts[1].Connect(peers[2])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = dhts[1].Connect(peers[3])
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	p, err := dhts[0].FindPeer(peers[2].ID, time.Second)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if p == nil {
+		t.Fatal("Failed to find peer.")
+	}
+
+	if !p.ID.Equal(peers[2].ID) {
+		t.Fatal("Didnt find expected peer.")
+	}
+}
diff --git a/routing/dht/ext_test.go b/routing/dht/ext_test.go
index 47eb6429a29153b763e5f9e02b945ab5703c107f..26fbfea35cb374394244f5caffcb6e78229e6d4c 100644
--- a/routing/dht/ext_test.go
+++ b/routing/dht/ext_test.go
@@ -92,6 +92,8 @@ func (f *fauxNet) SendMessage(msg.NetMessage) error {
 func (f *fauxNet) Close() error { return nil }
 
 func TestGetFailures(t *testing.T) {
+	// t.Skip("skipping test because it makes a lot of output")
+
 	ctx := context.Background()
 	fn := &fauxNet{}
 	fs := &fauxSender{}
@@ -189,6 +191,8 @@ func _randPeer() *peer.Peer {
 }
 
 func TestNotFound(t *testing.T) {
+	// t.Skip("skipping test because it makes a lot of output")
+
 	fn := &fauxNet{}
 	fs := &fauxSender{}
 
@@ -233,7 +237,7 @@ func TestNotFound(t *testing.T) {
 	})
 
 	v, err := d.GetValue(u.Key("hello"), time.Second*5)
-	u.POut("get value got %v\n", v)
+	u.DOut("get value got %v\n", v)
 	if err != nil {
 		switch err {
 		case u.ErrNotFound:
@@ -251,6 +255,8 @@ func TestNotFound(t *testing.T) {
 // If less than K nodes are in the entire network, it should fail when we make
 // a GET rpc and nobody has the value
 func TestLessThanKResponses(t *testing.T) {
+	// t.Skip("skipping test because it makes a lot of output")
+
 	u.Debug = false
 	fn := &fauxNet{}
 	fs := &fauxSender{}
diff --git a/routing/dht/handlers.go b/routing/dht/handlers.go
index 5320cc10aa41658ccf995b4348210a2404fc64a2..fe22121bb2896c6f2cd3555868cdfa8852194784 100644
--- a/routing/dht/handlers.go
+++ b/routing/dht/handlers.go
@@ -176,7 +176,7 @@ func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) (*Message, er
 		dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty())
 
 	dht.providers.AddProvider(key, p)
-	return nil, nil
+	return pmes, nil // send back same msg as confirmation.
 }
 
 // Halt stops all communications from this peer and shut down