add_test.go 6.21 KB
Newer Older
1 2 3
package coreunix

import (
Jeromy's avatar
Jeromy committed
4
	"bytes"
5
	"context"
Jeromy's avatar
Jeromy committed
6 7
	"io"
	"io/ioutil"
8 9
	"math/rand"
	"os"
10
	"path/filepath"
11
	"testing"
Jeromy's avatar
Jeromy committed
12
	"time"
13

14
	"github.com/ipfs/go-ipfs/core"
Jeromy's avatar
Jeromy committed
15
	"github.com/ipfs/go-ipfs/pin/gc"
16
	"github.com/ipfs/go-ipfs/repo"
17

Jakub Sztandera's avatar
Jakub Sztandera committed
18 19 20 21 22 23 24 25 26 27 28
	blocks "github.com/ipfs/go-block-format"
	"github.com/ipfs/go-blockservice"
	cid "github.com/ipfs/go-cid"
	datastore "github.com/ipfs/go-datastore"
	syncds "github.com/ipfs/go-datastore/sync"
	blockstore "github.com/ipfs/go-ipfs-blockstore"
	config "github.com/ipfs/go-ipfs-config"
	files "github.com/ipfs/go-ipfs-files"
	pi "github.com/ipfs/go-ipfs-posinfo"
	dag "github.com/ipfs/go-merkledag"
	coreiface "github.com/ipfs/interface-go-ipfs-core"
29 30
)

31 32
const testPeerID = "QmTFauExutTsy4XP6JbMFcw2Wa9645HJt2bTqL6qYDCKfe"

Jeromy's avatar
Jeromy committed
33 34 35 36
func TestAddGCLive(t *testing.T) {
	r := &repo.Mock{
		C: config.Config{
			Identity: config.Identity{
37
				PeerID: testPeerID, // required by offline node
Jeromy's avatar
Jeromy committed
38 39
			},
		},
40
		D: syncds.MutexWrap(datastore.NewMapDatastore()),
Jeromy's avatar
Jeromy committed
41 42 43 44 45 46 47
	}
	node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
	if err != nil {
		t.Fatal(err)
	}

	out := make(chan interface{})
48
	adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG)
Jeromy's avatar
Jeromy committed
49 50 51
	if err != nil {
		t.Fatal(err)
	}
52
	adder.Out = out
Jeromy's avatar
Jeromy committed
53

Łukasz Magiera's avatar
Łukasz Magiera committed
54
	rfa := files.NewBytesFile([]byte("testfileA"))
Jeromy's avatar
Jeromy committed
55 56 57

	// make two files with pipes so we can 'pause' the add for timing of the test
	piper, pipew := io.Pipe()
Łukasz Magiera's avatar
Łukasz Magiera committed
58
	hangfile := files.NewReaderFile(piper)
Jeromy's avatar
Jeromy committed
59

Łukasz Magiera's avatar
Łukasz Magiera committed
60
	rfd := files.NewBytesFile([]byte("testfileD"))
Jeromy's avatar
Jeromy committed
61

Łukasz Magiera's avatar
Łukasz Magiera committed
62
	slf := files.NewMapDirectory(map[string]files.Node{
63 64 65
		"a": rfa,
		"b": hangfile,
		"d": rfd,
Łukasz Magiera's avatar
Łukasz Magiera committed
66
	})
Jeromy's avatar
Jeromy committed
67 68 69 70 71

	addDone := make(chan struct{})
	go func() {
		defer close(addDone)
		defer close(out)
72
		_, err := adder.AddAllAndPin(slf)
Jeromy's avatar
Jeromy committed
73 74

		if err != nil {
75
			t.Error(err)
Jeromy's avatar
Jeromy committed
76 77 78 79 80 81 82
		}

	}()

	addedHashes := make(map[string]struct{})
	select {
	case o := <-out:
83
		addedHashes[o.(*coreiface.AddEvent).Path.Cid().String()] = struct{}{}
Jeromy's avatar
Jeromy committed
84 85 86 87
	case <-addDone:
		t.Fatal("add shouldnt complete yet")
	}

88
	var gcout <-chan gc.Result
Jeromy's avatar
Jeromy committed
89 90 91
	gcstarted := make(chan struct{})
	go func() {
		defer close(gcstarted)
92
		gcout = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
Jeromy's avatar
Jeromy committed
93 94 95
	}()

	// gc shouldnt start until we let the add finish its current file.
96 97 98
	if _, err := pipew.Write([]byte("some data for file b")); err != nil {
		t.Fatal(err)
	}
Jeromy's avatar
Jeromy committed
99 100 101 102 103 104 105 106 107 108 109 110 111

	select {
	case <-gcstarted:
		t.Fatal("gc shouldnt have started yet")
	default:
	}

	time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock

	// finish write and unblock gc
	pipew.Close()

	// receive next object from adder
112
	o := <-out
113
	addedHashes[o.(*coreiface.AddEvent).Path.Cid().String()] = struct{}{}
Jeromy's avatar
Jeromy committed
114

115
	<-gcstarted
Jeromy's avatar
Jeromy committed
116

117 118 119 120 121
	for r := range gcout {
		if r.Error != nil {
			t.Fatal(err)
		}
		if _, ok := addedHashes[r.KeyRemoved.String()]; ok {
Jeromy's avatar
Jeromy committed
122 123 124 125
			t.Fatal("gc'ed a hash we just added")
		}
	}

126
	var last cid.Cid
Jeromy's avatar
Jeromy committed
127 128
	for a := range out {
		// wait for it to finish
129
		c, err := cid.Decode(a.(*coreiface.AddEvent).Path.Cid().String())
Jeromy's avatar
Jeromy committed
130 131 132 133
		if err != nil {
			t.Fatal(err)
		}
		last = c
Jeromy's avatar
Jeromy committed
134 135 136 137 138
	}

	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
	defer cancel()

Jeromy's avatar
Jeromy committed
139
	set := cid.NewSet()
140
	err = dag.EnumerateChildren(ctx, dag.GetLinksWithDAG(node.DAG), last, set.Visit)
Jeromy's avatar
Jeromy committed
141 142 143 144
	if err != nil {
		t.Fatal(err)
	}
}
145 146 147 148 149

func testAddWPosInfo(t *testing.T, rawLeaves bool) {
	r := &repo.Mock{
		C: config.Config{
			Identity: config.Identity{
150
				PeerID: testPeerID, // required by offline node
151 152
			},
		},
153
		D: syncds.MutexWrap(datastore.NewMapDatastore()),
154 155 156 157 158 159
	}
	node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
	if err != nil {
		t.Fatal(err)
	}

160
	bs := &testBlockstore{GCBlockstore: node.Blockstore, expectedPath: filepath.Join(os.TempDir(), "foo.txt"), t: t}
161 162 163 164 165 166
	bserv := blockservice.New(bs, node.Exchange)
	dserv := dag.NewDAGService(bserv)
	adder, err := NewAdder(context.Background(), node.Pinning, bs, dserv)
	if err != nil {
		t.Fatal(err)
	}
167 168
	out := make(chan interface{})
	adder.Out = out
169 170
	adder.Progress = true
	adder.RawLeaves = rawLeaves
171
	adder.NoCopy = true
172 173 174 175 176

	data := make([]byte, 5*1024*1024)
	rand.New(rand.NewSource(2)).Read(data) // Rand.Read never returns an error
	fileData := ioutil.NopCloser(bytes.NewBuffer(data))
	fileInfo := dummyFileInfo{"foo.txt", int64(len(data)), time.Now()}
177
	file, _ := files.NewReaderPathFile(filepath.Join(os.TempDir(), "foo.txt"), fileData, &fileInfo)
178 179 180

	go func() {
		defer close(adder.Out)
181
		_, err = adder.AddAllAndPin(file)
182
		if err != nil {
183
			t.Error(err)
184 185
		}
	}()
186
	for range out {
187 188
	}

189 190 191 192 193
	exp := 0
	nonOffZero := 0
	if rawLeaves {
		exp = 1
		nonOffZero = 19
194
	}
195
	if bs.countAtOffsetZero != exp {
196
		t.Fatalf("expected %d blocks with an offset at zero (one root and one leaf), got %d", exp, bs.countAtOffsetZero)
197 198
	}
	if bs.countAtOffsetNonZero != nonOffZero {
199
		// note: the exact number will depend on the size and the sharding algo. used
200
		t.Fatalf("expected %d blocks with an offset > 0, got %d", nonOffZero, bs.countAtOffsetNonZero)
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
	}
}

func TestAddWPosInfo(t *testing.T) {
	testAddWPosInfo(t, false)
}

func TestAddWPosInfoAndRawLeafs(t *testing.T) {
	testAddWPosInfo(t, true)
}

type testBlockstore struct {
	blockstore.GCBlockstore
	expectedPath         string
	t                    *testing.T
	countAtOffsetZero    int
	countAtOffsetNonZero int
}

func (bs *testBlockstore) Put(block blocks.Block) error {
	bs.CheckForPosInfo(block)
	return bs.GCBlockstore.Put(block)
}

func (bs *testBlockstore) PutMany(blocks []blocks.Block) error {
	for _, blk := range blocks {
		bs.CheckForPosInfo(blk)
	}
	return bs.GCBlockstore.PutMany(blocks)
}

232
func (bs *testBlockstore) CheckForPosInfo(block blocks.Block) {
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	fsn, ok := block.(*pi.FilestoreNode)
	if ok {
		posInfo := fsn.PosInfo
		if posInfo.FullPath != bs.expectedPath {
			bs.t.Fatal("PosInfo does not have the expected path")
		}
		if posInfo.Offset == 0 {
			bs.countAtOffsetZero += 1
		} else {
			bs.countAtOffsetNonZero += 1
		}
	}
}

type dummyFileInfo struct {
	name    string
	size    int64
	modTime time.Time
}

func (fi *dummyFileInfo) Name() string       { return fi.name }
func (fi *dummyFileInfo) Size() int64        { return fi.size }
func (fi *dummyFileInfo) Mode() os.FileMode  { return 0 }
func (fi *dummyFileInfo) ModTime() time.Time { return fi.modTime }
func (fi *dummyFileInfo) IsDir() bool        { return false }
func (fi *dummyFileInfo) Sys() interface{}   { return nil }