Commit 331b83e3 authored by Hector Sanjuan's avatar Hector Sanjuan

Feat: remove circular dependencies in merkledag package tests

This avoids using unixfs package and importer packages in
merkledag, which removes circular depedencies making it hard
to extract this module.

License: MIT
Signed-off-by: default avatarHector Sanjuan <hector@protocol.ai>
parent 87341562
...@@ -16,14 +16,11 @@ import ( ...@@ -16,14 +16,11 @@ import (
bserv "github.com/ipfs/go-ipfs/blockservice" bserv "github.com/ipfs/go-ipfs/blockservice"
bstest "github.com/ipfs/go-ipfs/blockservice/test" bstest "github.com/ipfs/go-ipfs/blockservice/test"
offline "github.com/ipfs/go-ipfs/exchange/offline" offline "github.com/ipfs/go-ipfs/exchange/offline"
imp "github.com/ipfs/go-ipfs/importer"
. "github.com/ipfs/go-ipfs/merkledag" . "github.com/ipfs/go-ipfs/merkledag"
mdpb "github.com/ipfs/go-ipfs/merkledag/pb" mdpb "github.com/ipfs/go-ipfs/merkledag/pb"
dstest "github.com/ipfs/go-ipfs/merkledag/test" dstest "github.com/ipfs/go-ipfs/merkledag/test"
uio "github.com/ipfs/go-ipfs/unixfs/io"
u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
chunker "gx/ipfs/QmWo8jYc19ppG7YoTsrr2kEtLRbARTJho5oNXFTR6B7Peq/go-ipfs-chunker"
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format" ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
...@@ -129,6 +126,61 @@ func TestBatchFetchDupBlock(t *testing.T) { ...@@ -129,6 +126,61 @@ func TestBatchFetchDupBlock(t *testing.T) {
runBatchFetchTest(t, read) runBatchFetchTest(t, read)
} }
// makeTestDAG creates a simple DAG from the data in a reader.
// First, a node is created from each 512 bytes of data from the reader
// (like a the Size chunker would do). Then all nodes are added as children
// to a root node, which is returned.
func makeTestDAG(t *testing.T, read io.Reader, ds ipld.DAGService) ipld.Node {
p := make([]byte, 512)
nodes := []*ProtoNode{}
var err error = nil
_, err = read.Read(p)
for err == nil {
protoNode := NodeWithData(p)
nodes = append(nodes, protoNode)
_, err = read.Read(p)
}
if err != io.EOF {
t.Fatal(err)
}
ctx := context.Background()
// Add a root referencing all created nodes
root := NodeWithData(nil)
for _, n := range nodes {
root.AddNodeLink(n.Cid().String(), n)
err := ds.Add(ctx, n)
if err != nil {
t.Fatal(err)
}
}
err = ds.Add(ctx, root)
if err != nil {
t.Fatal(err)
}
return root
}
// makeTestDAGReader takes the root node as returned by makeTestDAG and
// provides a reader that reads all the RawData from that node and its children.
func makeTestDAGReader(t *testing.T, root ipld.Node, ds ipld.DAGService) io.Reader {
ctx := context.Background()
buf := new(bytes.Buffer)
buf.Write(root.RawData())
for _, l := range root.Links() {
n, err := ds.Get(ctx, l.Cid)
if err != nil {
t.Fatal(err)
}
_, err = buf.Write(n.RawData())
if err != nil {
t.Fatal(err)
}
}
return buf
}
func runBatchFetchTest(t *testing.T, read io.Reader) { func runBatchFetchTest(t *testing.T, read io.Reader) {
ctx := context.Background() ctx := context.Background()
var dagservs []ipld.DAGService var dagservs []ipld.DAGService
...@@ -136,19 +188,11 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { ...@@ -136,19 +188,11 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
dagservs = append(dagservs, NewDAGService(bsi)) dagservs = append(dagservs, NewDAGService(bsi))
} }
spl := chunker.NewSizeSplitter(read, 512) root := makeTestDAG(t, read, dagservs[0])
root, err := imp.BuildDagFromReader(dagservs[0], spl)
if err != nil {
t.Fatal(err)
}
t.Log("finished setup.") t.Log("finished setup.")
dagr, err := uio.NewDagReader(ctx, root, dagservs[0]) dagr := makeTestDAGReader(t, root, dagservs[0])
if err != nil {
t.Fatal(err)
}
expected, err := ioutil.ReadAll(dagr) expected, err := ioutil.ReadAll(dagr)
if err != nil { if err != nil {
...@@ -181,11 +225,9 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { ...@@ -181,11 +225,9 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
if !ok { if !ok {
errs <- ErrNotProtobuf errs <- ErrNotProtobuf
} }
_ = firstpb
read, err := uio.NewDagReader(ctx, firstpb, dagservs[i]) _ = expected
if err != nil { read := makeTestDAGReader(t, firstpb, dagservs[i])
errs <- err
}
datagot, err := ioutil.ReadAll(read) datagot, err := ioutil.ReadAll(read)
if err != nil { if err != nil {
errs <- err errs <- err
...@@ -228,12 +270,9 @@ func TestFetchGraph(t *testing.T) { ...@@ -228,12 +270,9 @@ func TestFetchGraph(t *testing.T) {
} }
read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) read := io.LimitReader(u.NewTimeSeededRand(), 1024*32)
root, err := imp.BuildDagFromReader(dservs[0], chunker.NewSizeSplitter(read, 512)) root := makeTestDAG(t, read, dservs[0])
if err != nil {
t.Fatal(err)
}
err = FetchGraph(context.TODO(), root.Cid(), dservs[1]) err := FetchGraph(context.TODO(), root.Cid(), dservs[1])
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -254,14 +293,11 @@ func TestEnumerateChildren(t *testing.T) { ...@@ -254,14 +293,11 @@ func TestEnumerateChildren(t *testing.T) {
ds := NewDAGService(bsi[0]) ds := NewDAGService(bsi[0])
read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024) read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024)
root, err := imp.BuildDagFromReader(ds, chunker.NewSizeSplitter(read, 512)) root := makeTestDAG(t, read, ds)
if err != nil {
t.Fatal(err)
}
set := cid.NewSet() set := cid.NewSet()
err = EnumerateChildren(context.Background(), ds.GetLinks, root.Cid(), set.Visit) err := EnumerateChildren(context.Background(), ds.GetLinks, root.Cid(), set.Visit)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment