add.go 12.6 KB
Newer Older
1 2 3
package commands

import (
4
	"errors"
5
	"fmt"
6
	"io"
7
	"strings"
8

9
	bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
10
	blockservice "github.com/ipfs/go-ipfs/blockservice"
11 12 13
	cmds "github.com/ipfs/go-ipfs/commands"
	files "github.com/ipfs/go-ipfs/commands/files"
	core "github.com/ipfs/go-ipfs/core"
14
	"github.com/ipfs/go-ipfs/core/coreunix"
15 16
	offline "github.com/ipfs/go-ipfs/exchange/offline"
	dag "github.com/ipfs/go-ipfs/merkledag"
Jeromy's avatar
Jeromy committed
17 18
	dagtest "github.com/ipfs/go-ipfs/merkledag/test"
	mfs "github.com/ipfs/go-ipfs/mfs"
19
	ft "github.com/ipfs/go-ipfs/unixfs"
20

21 22
	u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
	mh "gx/ipfs/QmU9a9NV9RdPNwZQDYd5uKsm6N6LJLSvLbywDDYFbaaC6P/go-multihash"
23
	"gx/ipfs/QmeWjRodbcZFKe5tMN7poEx3izym6osrLSnTLf9UjJZBbs/pb"
24 25 26 27 28
)

// Error indicating the max depth has been exceded.
var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded")

29
const (
30
	quietOptionName       = "quiet"
31
	quieterOptionName     = "quieter"
32 33 34 35 36 37 38 39 40 41 42
	silentOptionName      = "silent"
	progressOptionName    = "progress"
	trickleOptionName     = "trickle"
	wrapOptionName        = "wrap-with-directory"
	hiddenOptionName      = "hidden"
	onlyHashOptionName    = "only-hash"
	chunkerOptionName     = "chunker"
	pinOptionName         = "pin"
	rawLeavesOptionName   = "raw-leaves"
	noCopyOptionName      = "nocopy"
	fstoreCacheOptionName = "fscache"
43
	cidVersionOptionName  = "cid-version"
44
	hashOptionName        = "hash"
45
)
46

47 48
const adderOutChanSize = 8

49
var AddCmd = &cmds.Command{
50
	Helptext: cmds.HelpText{
51
		Tagline: "Add a file or directory to ipfs.",
52
		ShortDescription: `
53
Adds contents of <path> to ipfs. Use -r to add directories (recursively).
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
`,
		LongDescription: `
Adds contents of <path> to ipfs. Use -r to add directories.
Note that directories are added recursively, to form the ipfs
MerkleDAG.

The wrap option, '-w', wraps the file (or files, if using the
recursive option) in a directory. This directory contains only
the files which have been added, and means that the file retains
its filename. For example:

  > ipfs add example.jpg
  added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg
  > ipfs add example.jpg -w
  added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg
  added QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx

You can now refer to the added file in a gateway, like so:

  /ipfs/QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx/example.jpg
74

75 76
The chunker option, '-s', specifies the chunking strategy that dictates
how to break files into blocks. Blocks with same content can
77
be deduplicated. The default is a fixed block size of
78 79
256 * 1024 bytes, 'size-262144'. Alternatively, you can use the
rabin chunker for content defined chunking by specifying
80
rabin-[min]-[avg]-[max] (where min/avg/max refer to the resulting
81 82
chunk sizes). Using other chunking strategies will produce
different hashes for the same file.
83

84 85 86 87
  > ipfs add --chunker=size-2048 ipfs-logo.svg
  added QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 ipfs-logo.svg
  > ipfs add --chunker=rabin-512-1024-2048 ipfs-logo.svg
  added Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn ipfs-logo.svg
88 89 90

You can now check what blocks have been created by:

91 92 93 94 95 96
  > ipfs object links QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87
  QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059
  Qmf7ZQeSxq2fJVJbCmgTrLLVN9tDR9Wy5k75DxQKuz5Gyt 1195
  > ipfs object links Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn
  QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059
  QmerURi9k4XzKCaaPbsK6BL5pMEjF7PGphjDvkkjDtsVf3 868
97
  QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338
98 99 100
`,
	},

101
	Arguments: []cmds.Argument{
102
		cmds.FileArg("path", true, true, "The path to a file to be added to ipfs.").EnableRecursive().EnableStdin(),
103
	},
104 105
	Options: []cmds.Option{
		cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
106
		cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
107
		cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
108
		cmds.BoolOption(silentOptionName, "Write no output."),
109
		cmds.BoolOption(progressOptionName, "p", "Stream progress data."),
110 111 112 113
		cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
		cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."),
		cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."),
		cmds.BoolOption(hiddenOptionName, "H", "Include files that are hidden. Only takes effect on recursive add."),
114
		cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes] or rabin-[min]-[avg]-[max]").Default("size-262144"),
115
		cmds.BoolOption(pinOptionName, "Pin this object when adding.").Default(true),
116
		cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. (experimental)"),
117 118
		cmds.BoolOption(noCopyOptionName, "Add the file using filestore. (experimental)"),
		cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
119
		cmds.IntOption(cidVersionOptionName, "Cid version. Non-zero value will change default of 'raw-leaves' to true. (experimental)").Default(0),
120
		cmds.StringOption(hashOptionName, "Hash function to use. Will set Cid version to 1 if used. (experimental)").Default("sha2-256"),
121 122
	},
	PreRun: func(req cmds.Request) error {
123
		quiet, _, _ := req.Option(quietOptionName).Bool()
124 125 126
		quieter, _, _ := req.Option(quieterOptionName).Bool()
		quiet = quiet || quieter

127 128 129
		silent, _, _ := req.Option(silentOptionName).Bool()

		if quiet || silent {
130 131 132
			return nil
		}

133
		// ipfs cli progress bar defaults to true unless quiet or silent is used
134 135 136 137 138
		_, found, _ := req.Option(progressOptionName).Bool()
		if !found {
			req.SetOption(progressOptionName, true)
		}

139 140 141
		sizeFile, ok := req.Files().(files.SizeFile)
		if !ok {
			// we don't need to error, the progress bar just won't know how big the files are
142
			log.Warning("cannot determine size of input file")
143 144 145
			return nil
		}

146 147
		sizeCh := make(chan int64, 1)
		req.Values()["size"] = sizeCh
rht's avatar
rht committed
148

149 150 151
		go func() {
			size, err := sizeFile.Size()
			if err != nil {
Jeromy's avatar
Jeromy committed
152
				log.Warningf("error getting files size: %s", err)
153 154 155 156 157 158 159
				// see comment above
				return
			}

			log.Debugf("Total size of file being added: %v\n", size)
			sizeCh <- size
		}()
160 161

		return nil
162
	},
163
	Run: func(req cmds.Request, res cmds.Response) {
Jeromy's avatar
Jeromy committed
164
		n, err := req.InvocContext().GetNode()
165
		if err != nil {
166 167
			res.SetError(err, cmds.ErrNormal)
			return
168
		}
169 170 171 172 173 174

		cfg, err := n.Repo.Config()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
rht's avatar
rht committed
175 176 177 178 179 180 181
		// check if repo will exceed storage limit if added
		// TODO: this doesn't handle the case if the hashed file is already in blocks (deduplicated)
		// TODO: conditional GC is disabled due to it is somehow not possible to pass the size to the daemon
		//if err := corerepo.ConditionalGC(req.Context(), n, uint64(size)); err != nil {
		//	res.SetError(err, cmds.ErrNormal)
		//	return
		//}
182

183
		progress, _, _ := req.Option(progressOptionName).Bool()
184
		trickle, _, _ := req.Option(trickleOptionName).Bool()
185
		wrap, _, _ := req.Option(wrapOptionName).Bool()
gatesvp's avatar
gatesvp committed
186 187
		hash, _, _ := req.Option(onlyHashOptionName).Bool()
		hidden, _, _ := req.Option(hiddenOptionName).Bool()
Jeromy's avatar
Jeromy committed
188
		silent, _, _ := req.Option(silentOptionName).Bool()
189
		chunker, _, _ := req.Option(chunkerOptionName).String()
190
		dopin, _, _ := req.Option(pinOptionName).Bool()
Jeromy's avatar
Jeromy committed
191
		rawblks, rbset, _ := req.Option(rawLeavesOptionName).Bool()
192 193
		nocopy, _, _ := req.Option(noCopyOptionName).Bool()
		fscache, _, _ := req.Option(fstoreCacheOptionName).Bool()
194
		cidVer, _, _ := req.Option(cidVersionOptionName).Int()
195
		hashFunStr, hfset, _ := req.Option(hashOptionName).String()
196

197 198 199 200 201 202
		if nocopy && !cfg.Experimental.FilestoreEnabled {
			res.SetError(errors.New("filestore is not enabled, see https://git.io/vy4XN"),
				cmds.ErrClient)
			return
		}

Jeromy's avatar
Jeromy committed
203 204 205 206
		if nocopy && !rbset {
			rawblks = true
		}

207 208 209 210
		if nocopy && !rawblks {
			res.SetError(fmt.Errorf("nocopy option requires '--raw-leaves' to be enabled as well"), cmds.ErrNormal)
			return
		}
Jeromy's avatar
Jeromy committed
211

212 213 214 215
		if hfset && cidVer == 0 {
			cidVer = 1
		}

216 217 218 219 220 221 222 223 224 225
		if cidVer >= 1 && !rbset {
			rawblks = true
		}

		prefix, err := dag.PrefixForCidVersion(cidVer)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}

226 227 228 229 230 231 232 233 234
		hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)]
		if !ok {
			res.SetError(fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr)), cmds.ErrNormal)
			return
		}

		prefix.MhType = hashFunCode
		prefix.MhLength = -1

Jeromy's avatar
Jeromy committed
235
		if hash {
236 237 238
			nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{
				//TODO: need this to be true or all files
				// hashed will be stored in memory!
239
				NilRepo: true,
240
			})
Jeromy's avatar
Jeromy committed
241 242 243 244 245 246
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
			n = nilnode
		}
247

248
		addblockstore := n.Blockstore
Jeromy's avatar
Jeromy committed
249
		if !(fscache || nocopy) {
250 251 252 253
			addblockstore = bstore.NewGCBlockstore(n.BaseBlocks, n.GCLocker)
		}

		exch := n.Exchange
254 255
		local, _, _ := req.Option("local").Bool()
		if local {
256
			exch = offline.Exchange(addblockstore)
257 258
		}

259 260 261
		bserv := blockservice.New(addblockstore, exch)
		dserv := dag.NewDAGService(bserv)

262
		fileAdder, err := coreunix.NewAdder(req.Context(), n.Pinning, n.Blockstore, dserv)
Jeromy's avatar
Jeromy committed
263 264 265 266
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
267

268
		outChan := make(chan interface{}, adderOutChanSize)
269 270
		res.SetOutput((<-chan interface{})(outChan))

271
		fileAdder.Out = outChan
272 273 274 275 276
		fileAdder.Chunker = chunker
		fileAdder.Progress = progress
		fileAdder.Hidden = hidden
		fileAdder.Trickle = trickle
		fileAdder.Wrap = wrap
277
		fileAdder.Pin = dopin
Jeromy's avatar
Jeromy committed
278
		fileAdder.Silent = silent
279
		fileAdder.RawLeaves = rawblks
280
		fileAdder.NoCopy = nocopy
281
		fileAdder.Prefix = &prefix
282

Jeromy's avatar
Jeromy committed
283 284
		if hash {
			md := dagtest.Mock()
285
			mr, err := mfs.NewRoot(req.Context(), md, ft.EmptyDirNode(), nil)
Jeromy's avatar
Jeromy committed
286 287 288 289 290 291 292 293
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}

			fileAdder.SetMfsRoot(mr)
		}

294
		addAllAndPin := func(f files.File) error {
295 296 297 298 299 300 301 302 303 304 305 306 307 308
			// Iterate over each top-level file and add individually. Otherwise the
			// single files.File f is treated as a directory, affecting hidden file
			// semantics.
			for {
				file, err := f.NextFile()
				if err == io.EOF {
					// Finished the list of files.
					break
				} else if err != nil {
					return err
				}
				if err := fileAdder.AddFile(file); err != nil {
					return err
				}
309
			}
310

311
			// copy intermediary nodes from editor to our actual dagservice
Jeromy's avatar
Jeromy committed
312
			_, err := fileAdder.Finalize()
313 314
			if err != nil {
				return err
315 316
			}

Stephen Whitmore's avatar
Stephen Whitmore committed
317 318 319 320
			if hash {
				return nil
			}

321
			return fileAdder.PinRoot()
322 323 324 325 326
		}

		go func() {
			defer close(outChan)
			if err := addAllAndPin(req.Files()); err != nil {
327 328
				res.SetError(err, cmds.ErrNormal)
				return
329
			}
330

331
		}()
332
	},
333
	PostRun: func(req cmds.Request, res cmds.Response) {
334 335 336
		if res.Error() != nil {
			return
		}
337 338 339 340 341
		outChan, ok := res.Output().(<-chan interface{})
		if !ok {
			res.SetError(u.ErrCast(), cmds.ErrNormal)
			return
		}
342
		res.SetOutput(nil)
343

344 345 346
		quiet, _, _ := req.Option(quietOptionName).Bool()
		quieter, _, _ := req.Option(quieterOptionName).Bool()
		quiet = quiet || quieter
347

348
		progress, _, _ := req.Option(progressOptionName).Bool()
Jeromy's avatar
Jeromy committed
349

350
		var bar *pb.ProgressBar
351
		if progress {
352
			bar = pb.New64(0).SetUnits(pb.U_BYTES)
353
			bar.ManualUpdate = true
Jeromy's avatar
Jeromy committed
354 355 356
			bar.ShowTimeLeft = false
			bar.ShowPercent = false
			bar.Output = res.Stderr()
357 358 359
			bar.Start()
		}

360 361 362 363 364 365
		var sizeChan chan int64
		s, found := req.Values()["size"]
		if found {
			sizeChan = s.(chan int64)
		}

366
		lastFile := ""
367
		lastHash := ""
368
		var totalProgress, prevFiles, lastBytes int64
369

370 371 372 373 374
	LOOP:
		for {
			select {
			case out, ok := <-outChan:
				if !ok {
375 376 377
					if quieter {
						fmt.Fprintln(res.Stdout(), lastHash)
					}
378
					break LOOP
379
				}
380 381
				output := out.(*coreunix.AddedObject)
				if len(output.Hash) > 0 {
382 383 384 385 386
					lastHash = output.Hash
					if quieter {
						continue
					}

387
					if progress {
388 389 390 391 392 393 394 395 396 397 398
						// clear progress bar line before we print "added x" output
						fmt.Fprintf(res.Stderr(), "\033[2K\r")
					}
					if quiet {
						fmt.Fprintf(res.Stdout(), "%s\n", output.Hash)
					} else {
						fmt.Fprintf(res.Stdout(), "added %s %s\n", output.Hash, output.Name)
					}
				} else {
					log.Debugf("add progress: %v %v\n", output.Name, output.Bytes)

399
					if !progress {
400 401 402 403 404 405 406 407 408 409 410 411 412
						continue
					}

					if len(lastFile) == 0 {
						lastFile = output.Name
					}
					if output.Name != lastFile || output.Bytes < lastBytes {
						prevFiles += lastBytes
						lastFile = output.Name
					}
					lastBytes = output.Bytes
					delta := prevFiles + lastBytes - totalProgress
					totalProgress = bar.Add64(delta)
413 414
				}

415
				if progress {
416
					bar.Update()
417
				}
418
			case size := <-sizeChan:
419
				if progress {
Jeromy's avatar
Jeromy committed
420 421 422 423 424
					bar.Total = size
					bar.ShowPercent = true
					bar.ShowBar = true
					bar.ShowTimeLeft = true
				}
Jeromy's avatar
Jeromy committed
425 426 427
			case <-req.Context().Done():
				res.SetError(req.Context().Err(), cmds.ErrNormal)
				return
428 429
			}
		}
430
	},
431
	Type: coreunix.AddedObject{},
432
}