add.go 12.6 KB
Newer Older
1 2 3
package commands

import (
4
	"errors"
5
	"fmt"
6
	"io"
7
	"strings"
8

9
	bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
10
	blockservice "github.com/ipfs/go-ipfs/blockservice"
11 12 13
	cmds "github.com/ipfs/go-ipfs/commands"
	files "github.com/ipfs/go-ipfs/commands/files"
	core "github.com/ipfs/go-ipfs/core"
14
	"github.com/ipfs/go-ipfs/core/coreunix"
15 16
	offline "github.com/ipfs/go-ipfs/exchange/offline"
	dag "github.com/ipfs/go-ipfs/merkledag"
Jeromy's avatar
Jeromy committed
17 18
	dagtest "github.com/ipfs/go-ipfs/merkledag/test"
	mfs "github.com/ipfs/go-ipfs/mfs"
19
	ft "github.com/ipfs/go-ipfs/unixfs"
20

21 22
	u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
	mh "gx/ipfs/QmU9a9NV9RdPNwZQDYd5uKsm6N6LJLSvLbywDDYFbaaC6P/go-multihash"
23
	"gx/ipfs/QmeWjRodbcZFKe5tMN7poEx3izym6osrLSnTLf9UjJZBbs/pb"
24 25 26 27 28
)

// Error indicating the max depth has been exceded.
var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded")

29
const (
30
	quietOptionName       = "quiet"
31
	quieterOptionName     = "quieter"
32 33 34 35 36 37 38 39 40 41 42
	silentOptionName      = "silent"
	progressOptionName    = "progress"
	trickleOptionName     = "trickle"
	wrapOptionName        = "wrap-with-directory"
	hiddenOptionName      = "hidden"
	onlyHashOptionName    = "only-hash"
	chunkerOptionName     = "chunker"
	pinOptionName         = "pin"
	rawLeavesOptionName   = "raw-leaves"
	noCopyOptionName      = "nocopy"
	fstoreCacheOptionName = "fscache"
43
	cidVersionOptionName  = "cid-version"
44
	hashOptionName        = "hash"
45
)
46

47 48
const adderOutChanSize = 8

49
var AddCmd = &cmds.Command{
50
	Helptext: cmds.HelpText{
51
		Tagline: "Add a file or directory to ipfs.",
52
		ShortDescription: `
53
Adds contents of <path> to ipfs. Use -r to add directories (recursively).
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
`,
		LongDescription: `
Adds contents of <path> to ipfs. Use -r to add directories.
Note that directories are added recursively, to form the ipfs
MerkleDAG.

The wrap option, '-w', wraps the file (or files, if using the
recursive option) in a directory. This directory contains only
the files which have been added, and means that the file retains
its filename. For example:

  > ipfs add example.jpg
  added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg
  > ipfs add example.jpg -w
  added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg
  added QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx

You can now refer to the added file in a gateway, like so:

  /ipfs/QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx/example.jpg
74

75
The chunker option, '-s', defines the chunking strategy for how
76 77
ipfs puts larger files into blocks. Blocks with same content can
be deduplicated. The default is a fixed block size of
78 79 80 81 82
256 * 1024 bytes, 'size-262144'. Alternatively, you can use the
rabin chunker for content defined chunking by specifying
rabin-[min]-[avg]-[max] (where min/avg/max refer to the resulting 
chunk sizes). Using other chunking strategies will produce
different hashes for the same file.
83 84 85 86 87 88 89 90 91

  > ipfs add --chunker=size-1024 ipfs-logo.svg
  added QmZ9KNxxeeLCPrdceBjvVcmSyLbUWMF4ceKDP8yboLNoHT ipfs-logo.svg
  > ipfs add --chunker=rabin-128-256-2048 ipfs-logo.svg
  added QmQN64Mbj1WK8wAej5MoFkHkD3aTtSGzGnAFXomdVHNNhs ipfs-logo.svg

You can now check what blocks have been created by:

  > ipfs object links QmQN64Mbj1WK8wAej5MoFkHkD3aTtSGzGnAFXomdVHNNhs
92 93 94 95 96 97
  Qmb3Wzsmy9RXkyQhERdPd93mBP2jTRUjgVBwx5yWJR5b6k 223
  Qmc7ikMLM1SS88YxxogRtjAyuLLfR85ZDKjfy7ahZYD38h 141
  QmeUiJHEXZnEwjpCfUUSkDuXFk2nuT6gALCeUXtZkRvcbj 858
  QmWwiQMNhpjq9kFUiuFbSQoqrQNTVjwo1wgbbx8wPr9PjB 1040
  QmNPg3SUphwfoJwveDyMRjDDaoitTCE7g9UUqYod8gPwz5 431
  QmPzBVWJtMPdeaoJK55rua7LgbEsjHseMHsUSUrPJ3AncS 278
98
  QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338
99 100 101
`,
	},

102
	Arguments: []cmds.Argument{
103
		cmds.FileArg("path", true, true, "The path to a file to be added to ipfs.").EnableRecursive().EnableStdin(),
104
	},
105 106
	Options: []cmds.Option{
		cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
107
		cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
108
		cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
109
		cmds.BoolOption(silentOptionName, "Write no output."),
110
		cmds.BoolOption(progressOptionName, "p", "Stream progress data."),
111 112 113 114
		cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
		cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."),
		cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."),
		cmds.BoolOption(hiddenOptionName, "H", "Include files that are hidden. Only takes effect on recursive add."),
115
		cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes] or rabin-[min]-[avg]-[max]").Default("size-262144"),
116
		cmds.BoolOption(pinOptionName, "Pin this object when adding.").Default(true),
117
		cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. (experimental)"),
118 119
		cmds.BoolOption(noCopyOptionName, "Add the file using filestore. (experimental)"),
		cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
120
		cmds.IntOption(cidVersionOptionName, "Cid version. Non-zero value will change default of 'raw-leaves' to true. (experimental)").Default(0),
121
		cmds.StringOption(hashOptionName, "Hash function to use. Will set Cid version to 1 if used. (experimental)").Default("sha2-256"),
122 123
	},
	PreRun: func(req cmds.Request) error {
124
		quiet, _, _ := req.Option(quietOptionName).Bool()
125 126 127
		quieter, _, _ := req.Option(quieterOptionName).Bool()
		quiet = quiet || quieter

128 129 130
		silent, _, _ := req.Option(silentOptionName).Bool()

		if quiet || silent {
131 132 133
			return nil
		}

134
		// ipfs cli progress bar defaults to true unless quiet or silent is used
135 136 137 138 139
		_, found, _ := req.Option(progressOptionName).Bool()
		if !found {
			req.SetOption(progressOptionName, true)
		}

140 141 142
		sizeFile, ok := req.Files().(files.SizeFile)
		if !ok {
			// we don't need to error, the progress bar just won't know how big the files are
143
			log.Warning("cannot determine size of input file")
144 145 146
			return nil
		}

147 148
		sizeCh := make(chan int64, 1)
		req.Values()["size"] = sizeCh
rht's avatar
rht committed
149

150 151 152
		go func() {
			size, err := sizeFile.Size()
			if err != nil {
Jeromy's avatar
Jeromy committed
153
				log.Warningf("error getting files size: %s", err)
154 155 156 157 158 159 160
				// see comment above
				return
			}

			log.Debugf("Total size of file being added: %v\n", size)
			sizeCh <- size
		}()
161 162

		return nil
163
	},
164
	Run: func(req cmds.Request, res cmds.Response) {
Jeromy's avatar
Jeromy committed
165
		n, err := req.InvocContext().GetNode()
166
		if err != nil {
167 168
			res.SetError(err, cmds.ErrNormal)
			return
169
		}
170 171 172 173 174 175

		cfg, err := n.Repo.Config()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
rht's avatar
rht committed
176 177 178 179 180 181 182
		// check if repo will exceed storage limit if added
		// TODO: this doesn't handle the case if the hashed file is already in blocks (deduplicated)
		// TODO: conditional GC is disabled due to it is somehow not possible to pass the size to the daemon
		//if err := corerepo.ConditionalGC(req.Context(), n, uint64(size)); err != nil {
		//	res.SetError(err, cmds.ErrNormal)
		//	return
		//}
183

184
		progress, _, _ := req.Option(progressOptionName).Bool()
185
		trickle, _, _ := req.Option(trickleOptionName).Bool()
186
		wrap, _, _ := req.Option(wrapOptionName).Bool()
gatesvp's avatar
gatesvp committed
187 188
		hash, _, _ := req.Option(onlyHashOptionName).Bool()
		hidden, _, _ := req.Option(hiddenOptionName).Bool()
Jeromy's avatar
Jeromy committed
189
		silent, _, _ := req.Option(silentOptionName).Bool()
190
		chunker, _, _ := req.Option(chunkerOptionName).String()
191
		dopin, _, _ := req.Option(pinOptionName).Bool()
Jeromy's avatar
Jeromy committed
192
		rawblks, rbset, _ := req.Option(rawLeavesOptionName).Bool()
193 194
		nocopy, _, _ := req.Option(noCopyOptionName).Bool()
		fscache, _, _ := req.Option(fstoreCacheOptionName).Bool()
195
		cidVer, _, _ := req.Option(cidVersionOptionName).Int()
196
		hashFunStr, hfset, _ := req.Option(hashOptionName).String()
197

198 199 200 201 202 203
		if nocopy && !cfg.Experimental.FilestoreEnabled {
			res.SetError(errors.New("filestore is not enabled, see https://git.io/vy4XN"),
				cmds.ErrClient)
			return
		}

Jeromy's avatar
Jeromy committed
204 205 206 207
		if nocopy && !rbset {
			rawblks = true
		}

208 209 210 211
		if nocopy && !rawblks {
			res.SetError(fmt.Errorf("nocopy option requires '--raw-leaves' to be enabled as well"), cmds.ErrNormal)
			return
		}
Jeromy's avatar
Jeromy committed
212

213 214 215 216
		if hfset && cidVer == 0 {
			cidVer = 1
		}

217 218 219 220 221 222 223 224 225 226
		if cidVer >= 1 && !rbset {
			rawblks = true
		}

		prefix, err := dag.PrefixForCidVersion(cidVer)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}

227 228 229 230 231 232 233 234 235
		hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)]
		if !ok {
			res.SetError(fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr)), cmds.ErrNormal)
			return
		}

		prefix.MhType = hashFunCode
		prefix.MhLength = -1

Jeromy's avatar
Jeromy committed
236
		if hash {
237 238 239
			nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{
				//TODO: need this to be true or all files
				// hashed will be stored in memory!
240
				NilRepo: true,
241
			})
Jeromy's avatar
Jeromy committed
242 243 244 245 246 247
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
			n = nilnode
		}
248

249
		addblockstore := n.Blockstore
Jeromy's avatar
Jeromy committed
250
		if !(fscache || nocopy) {
251 252 253 254
			addblockstore = bstore.NewGCBlockstore(n.BaseBlocks, n.GCLocker)
		}

		exch := n.Exchange
255 256
		local, _, _ := req.Option("local").Bool()
		if local {
257
			exch = offline.Exchange(addblockstore)
258 259
		}

260 261 262
		bserv := blockservice.New(addblockstore, exch)
		dserv := dag.NewDAGService(bserv)

263
		fileAdder, err := coreunix.NewAdder(req.Context(), n.Pinning, n.Blockstore, dserv)
Jeromy's avatar
Jeromy committed
264 265 266 267
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
268

269
		outChan := make(chan interface{}, adderOutChanSize)
270 271
		res.SetOutput((<-chan interface{})(outChan))

272
		fileAdder.Out = outChan
273 274 275 276 277
		fileAdder.Chunker = chunker
		fileAdder.Progress = progress
		fileAdder.Hidden = hidden
		fileAdder.Trickle = trickle
		fileAdder.Wrap = wrap
278
		fileAdder.Pin = dopin
Jeromy's avatar
Jeromy committed
279
		fileAdder.Silent = silent
280
		fileAdder.RawLeaves = rawblks
281
		fileAdder.NoCopy = nocopy
282
		fileAdder.Prefix = &prefix
283

Jeromy's avatar
Jeromy committed
284 285
		if hash {
			md := dagtest.Mock()
286
			mr, err := mfs.NewRoot(req.Context(), md, ft.EmptyDirNode(), nil)
Jeromy's avatar
Jeromy committed
287 288 289 290 291 292 293 294
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}

			fileAdder.SetMfsRoot(mr)
		}

295
		addAllAndPin := func(f files.File) error {
296 297 298 299 300 301 302 303 304 305 306 307 308 309
			// Iterate over each top-level file and add individually. Otherwise the
			// single files.File f is treated as a directory, affecting hidden file
			// semantics.
			for {
				file, err := f.NextFile()
				if err == io.EOF {
					// Finished the list of files.
					break
				} else if err != nil {
					return err
				}
				if err := fileAdder.AddFile(file); err != nil {
					return err
				}
310
			}
311

312
			// copy intermediary nodes from editor to our actual dagservice
Jeromy's avatar
Jeromy committed
313
			_, err := fileAdder.Finalize()
314 315
			if err != nil {
				return err
316 317
			}

Stephen Whitmore's avatar
Stephen Whitmore committed
318 319 320 321
			if hash {
				return nil
			}

322
			return fileAdder.PinRoot()
323 324 325 326 327
		}

		go func() {
			defer close(outChan)
			if err := addAllAndPin(req.Files()); err != nil {
328 329
				res.SetError(err, cmds.ErrNormal)
				return
330
			}
331

332
		}()
333
	},
334
	PostRun: func(req cmds.Request, res cmds.Response) {
335 336 337
		if res.Error() != nil {
			return
		}
338 339 340 341 342
		outChan, ok := res.Output().(<-chan interface{})
		if !ok {
			res.SetError(u.ErrCast(), cmds.ErrNormal)
			return
		}
343
		res.SetOutput(nil)
344

345 346 347
		quiet, _, _ := req.Option(quietOptionName).Bool()
		quieter, _, _ := req.Option(quieterOptionName).Bool()
		quiet = quiet || quieter
348

349
		progress, _, _ := req.Option(progressOptionName).Bool()
Jeromy's avatar
Jeromy committed
350

351
		var bar *pb.ProgressBar
352
		if progress {
353
			bar = pb.New64(0).SetUnits(pb.U_BYTES)
354
			bar.ManualUpdate = true
Jeromy's avatar
Jeromy committed
355 356 357
			bar.ShowTimeLeft = false
			bar.ShowPercent = false
			bar.Output = res.Stderr()
358 359 360
			bar.Start()
		}

361 362 363 364 365 366
		var sizeChan chan int64
		s, found := req.Values()["size"]
		if found {
			sizeChan = s.(chan int64)
		}

367
		lastFile := ""
368
		lastHash := ""
369
		var totalProgress, prevFiles, lastBytes int64
370

371 372 373 374 375
	LOOP:
		for {
			select {
			case out, ok := <-outChan:
				if !ok {
376 377 378
					if quieter {
						fmt.Fprintln(res.Stdout(), lastHash)
					}
379
					break LOOP
380
				}
381 382
				output := out.(*coreunix.AddedObject)
				if len(output.Hash) > 0 {
383 384 385 386 387
					lastHash = output.Hash
					if quieter {
						continue
					}

388
					if progress {
389 390 391 392 393 394 395 396 397 398 399
						// clear progress bar line before we print "added x" output
						fmt.Fprintf(res.Stderr(), "\033[2K\r")
					}
					if quiet {
						fmt.Fprintf(res.Stdout(), "%s\n", output.Hash)
					} else {
						fmt.Fprintf(res.Stdout(), "added %s %s\n", output.Hash, output.Name)
					}
				} else {
					log.Debugf("add progress: %v %v\n", output.Name, output.Bytes)

400
					if !progress {
401 402 403 404 405 406 407 408 409 410 411 412 413
						continue
					}

					if len(lastFile) == 0 {
						lastFile = output.Name
					}
					if output.Name != lastFile || output.Bytes < lastBytes {
						prevFiles += lastBytes
						lastFile = output.Name
					}
					lastBytes = output.Bytes
					delta := prevFiles + lastBytes - totalProgress
					totalProgress = bar.Add64(delta)
414 415
				}

416
				if progress {
417
					bar.Update()
418
				}
419
			case size := <-sizeChan:
420
				if progress {
Jeromy's avatar
Jeromy committed
421 422 423 424 425
					bar.Total = size
					bar.ShowPercent = true
					bar.ShowBar = true
					bar.ShowTimeLeft = true
				}
Jeromy's avatar
Jeromy committed
426 427 428
			case <-req.Context().Done():
				res.SetError(req.Context().Err(), cmds.ErrNormal)
				return
429 430
			}
		}
431
	},
432
	Type: coreunix.AddedObject{},
433
}