package merkledag_test import ( "bytes" "fmt" "io" "io/ioutil" "sync" "testing" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" blockservice "github.com/ipfs/go-ipfs/blockservice" bserv "github.com/ipfs/go-ipfs/blockservice" offline "github.com/ipfs/go-ipfs/exchange/offline" imp "github.com/ipfs/go-ipfs/importer" chunk "github.com/ipfs/go-ipfs/importer/chunk" . "github.com/ipfs/go-ipfs/merkledag" "github.com/ipfs/go-ipfs/pin" uio "github.com/ipfs/go-ipfs/unixfs/io" u "github.com/ipfs/go-ipfs/util" ) type dagservAndPinner struct { ds DAGService mp pin.ManualPinner } func getDagservAndPinner(t *testing.T) dagservAndPinner { db := dssync.MutexWrap(ds.NewMapDatastore()) bs := bstore.NewBlockstore(db) blockserv, err := bserv.New(bs, offline.Exchange(bs)) if err != nil { t.Fatal(err) } dserv := NewDAGService(blockserv) mpin := pin.NewPinner(db, dserv).GetManual() return dagservAndPinner{ ds: dserv, mp: mpin, } } func TestNode(t *testing.T) { n1 := &Node{Data: []byte("beep")} n2 := &Node{Data: []byte("boop")} n3 := &Node{Data: []byte("beep boop")} if err := n3.AddNodeLink("beep-link", n1); err != nil { t.Error(err) } if err := n3.AddNodeLink("boop-link", n2); err != nil { t.Error(err) } printn := func(name string, n *Node) { fmt.Println(">", name) fmt.Println("data:", string(n.Data)) fmt.Println("links:") for _, l := range n.Links { fmt.Println("-", l.Name, l.Size, l.Hash) } e, err := n.Encoded(false) if err != nil { t.Error(err) } else { fmt.Println("encoded:", e) } h, err := n.Multihash() if err != nil { t.Error(err) } else { fmt.Println("hash:", h) } k, err := n.Key() if err != nil { t.Error(err) } else if k != u.Key(h) { t.Error("Key is not equivalent to multihash") } else { fmt.Println("key: ", k) } SubtestNodeStat(t, n) } printn("beep", n1) printn("boop", n2) printn("beep boop", n3) } func SubtestNodeStat(t *testing.T, n *Node) { enc, err := n.Encoded(true) if err != nil { t.Error("n.Encoded(true) failed") return } cumSize, err := n.Size() if err != nil { t.Error("n.Size() failed") return } expected := NodeStat{ NumLinks: len(n.Links), BlockSize: len(enc), LinksSize: len(enc) - len(n.Data), // includes framing. DataSize: len(n.Data), CumulativeSize: int(cumSize), } actual, err := n.Stat() if err != nil { t.Error("n.Stat() failed") return } if expected != *actual { t.Error("n.Stat incorrect.\nexpect: %s\nactual: %s", expected, actual) } else { fmt.Printf("n.Stat correct: %s\n", actual) } } type devZero struct{} func (_ devZero) Read(b []byte) (int, error) { for i, _ := range b { b[i] = 0 } return len(b), nil } func TestBatchFetch(t *testing.T) { read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) runBatchFetchTest(t, read) } func TestBatchFetchDupBlock(t *testing.T) { read := io.LimitReader(devZero{}, 1024*32) runBatchFetchTest(t, read) } func runBatchFetchTest(t *testing.T, read io.Reader) { var dagservs []DAGService for _, bsi := range blockservice.Mocks(t, 5) { dagservs = append(dagservs, NewDAGService(bsi)) } spl := &chunk.SizeSplitter{512} root, err := imp.BuildDagFromReader(read, dagservs[0], nil, spl) if err != nil { t.Fatal(err) } t.Log("finished setup.") dagr, err := uio.NewDagReader(context.TODO(), root, dagservs[0]) if err != nil { t.Fatal(err) } expected, err := ioutil.ReadAll(dagr) if err != nil { t.Fatal(err) } err = dagservs[0].AddRecursive(root) if err != nil { t.Fatal(err) } t.Log("Added file to first node.") k, err := root.Key() if err != nil { t.Fatal(err) } wg := sync.WaitGroup{} for i := 1; i < len(dagservs); i++ { wg.Add(1) go func(i int) { defer wg.Done() first, err := dagservs[i].Get(context.Background(), k) if err != nil { t.Fatal(err) } fmt.Println("Got first node back.") read, err := uio.NewDagReader(context.TODO(), first, dagservs[i]) if err != nil { t.Fatal(err) } datagot, err := ioutil.ReadAll(read) if err != nil { t.Fatal(err) } if !bytes.Equal(datagot, expected) { t.Fatal("Got bad data back!") } }(i) } wg.Wait() }