add.go 10.1 KB
Newer Older
1 2 3
package commands

import (
4
	"errors"
5
	"fmt"
6
	"io"
7

8
	bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
9
	blockservice "github.com/ipfs/go-ipfs/blockservice"
10 11 12
	cmds "github.com/ipfs/go-ipfs/commands"
	files "github.com/ipfs/go-ipfs/commands/files"
	core "github.com/ipfs/go-ipfs/core"
13
	"github.com/ipfs/go-ipfs/core/coreunix"
14 15
	offline "github.com/ipfs/go-ipfs/exchange/offline"
	dag "github.com/ipfs/go-ipfs/merkledag"
Jeromy's avatar
Jeromy committed
16 17
	dagtest "github.com/ipfs/go-ipfs/merkledag/test"
	mfs "github.com/ipfs/go-ipfs/mfs"
18
	ft "github.com/ipfs/go-ipfs/unixfs"
19

20
	u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util"
21
	"gx/ipfs/QmeWjRodbcZFKe5tMN7poEx3izym6osrLSnTLf9UjJZBbs/pb"
22 23 24 25 26
)

// Error indicating the max depth has been exceded.
var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded")

27
const (
28 29 30 31 32 33 34 35 36 37 38 39
	quietOptionName       = "quiet"
	silentOptionName      = "silent"
	progressOptionName    = "progress"
	trickleOptionName     = "trickle"
	wrapOptionName        = "wrap-with-directory"
	hiddenOptionName      = "hidden"
	onlyHashOptionName    = "only-hash"
	chunkerOptionName     = "chunker"
	pinOptionName         = "pin"
	rawLeavesOptionName   = "raw-leaves"
	noCopyOptionName      = "nocopy"
	fstoreCacheOptionName = "fscache"
40
)
41

42
var AddCmd = &cmds.Command{
43
	Helptext: cmds.HelpText{
44
		Tagline: "Add a file or directory to ipfs.",
45
		ShortDescription: `
46
Adds contents of <path> to ipfs. Use -r to add directories (recursively).
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
`,
		LongDescription: `
Adds contents of <path> to ipfs. Use -r to add directories.
Note that directories are added recursively, to form the ipfs
MerkleDAG.

The wrap option, '-w', wraps the file (or files, if using the
recursive option) in a directory. This directory contains only
the files which have been added, and means that the file retains
its filename. For example:

  > ipfs add example.jpg
  added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg
  > ipfs add example.jpg -w
  added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg
  added QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx

You can now refer to the added file in a gateway, like so:

  /ipfs/QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx/example.jpg
67 68 69
`,
	},

70
	Arguments: []cmds.Argument{
71
		cmds.FileArg("path", true, true, "The path to a file to be added to ipfs.").EnableRecursive().EnableStdin(),
72
	},
73 74
	Options: []cmds.Option{
		cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
75 76
		cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
		cmds.BoolOption(silentOptionName, "Write no output."),
77
		cmds.BoolOption(progressOptionName, "p", "Stream progress data."),
78 79 80 81
		cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
		cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."),
		cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."),
		cmds.BoolOption(hiddenOptionName, "H", "Include files that are hidden. Only takes effect on recursive add."),
82
		cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm to use."),
83
		cmds.BoolOption(pinOptionName, "Pin this object when adding.").Default(true),
84
		cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. (experimental)"),
85 86
		cmds.BoolOption(noCopyOptionName, "Add the file using filestore. (experimental)"),
		cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
87 88
	},
	PreRun: func(req cmds.Request) error {
89 90 91 92
		quiet, _, _ := req.Option(quietOptionName).Bool()
		silent, _, _ := req.Option(silentOptionName).Bool()

		if quiet || silent {
93 94 95
			return nil
		}

96
		// ipfs cli progress bar defaults to true unless quiet or silent is used
97 98 99 100 101
		_, found, _ := req.Option(progressOptionName).Bool()
		if !found {
			req.SetOption(progressOptionName, true)
		}

102 103 104
		sizeFile, ok := req.Files().(files.SizeFile)
		if !ok {
			// we don't need to error, the progress bar just won't know how big the files are
Jeromy's avatar
Jeromy committed
105
			log.Warning("cannnot determine size of input file")
106 107 108
			return nil
		}

109 110
		sizeCh := make(chan int64, 1)
		req.Values()["size"] = sizeCh
rht's avatar
rht committed
111

112 113 114
		go func() {
			size, err := sizeFile.Size()
			if err != nil {
Jeromy's avatar
Jeromy committed
115
				log.Warningf("error getting files size: %s", err)
116 117 118 119 120 121 122
				// see comment above
				return
			}

			log.Debugf("Total size of file being added: %v\n", size)
			sizeCh <- size
		}()
123 124

		return nil
125
	},
126
	Run: func(req cmds.Request, res cmds.Response) {
Jeromy's avatar
Jeromy committed
127
		n, err := req.InvocContext().GetNode()
128
		if err != nil {
129 130
			res.SetError(err, cmds.ErrNormal)
			return
131
		}
132 133 134 135 136 137

		cfg, err := n.Repo.Config()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
rht's avatar
rht committed
138 139 140 141 142 143 144
		// check if repo will exceed storage limit if added
		// TODO: this doesn't handle the case if the hashed file is already in blocks (deduplicated)
		// TODO: conditional GC is disabled due to it is somehow not possible to pass the size to the daemon
		//if err := corerepo.ConditionalGC(req.Context(), n, uint64(size)); err != nil {
		//	res.SetError(err, cmds.ErrNormal)
		//	return
		//}
145

146
		progress, _, _ := req.Option(progressOptionName).Bool()
147
		trickle, _, _ := req.Option(trickleOptionName).Bool()
148
		wrap, _, _ := req.Option(wrapOptionName).Bool()
gatesvp's avatar
gatesvp committed
149 150
		hash, _, _ := req.Option(onlyHashOptionName).Bool()
		hidden, _, _ := req.Option(hiddenOptionName).Bool()
Jeromy's avatar
Jeromy committed
151
		silent, _, _ := req.Option(silentOptionName).Bool()
152
		chunker, _, _ := req.Option(chunkerOptionName).String()
153
		dopin, _, _ := req.Option(pinOptionName).Bool()
Jeromy's avatar
Jeromy committed
154
		rawblks, rbset, _ := req.Option(rawLeavesOptionName).Bool()
155 156 157
		nocopy, _, _ := req.Option(noCopyOptionName).Bool()
		fscache, _, _ := req.Option(fstoreCacheOptionName).Bool()

158 159 160 161 162 163
		if nocopy && !cfg.Experimental.FilestoreEnabled {
			res.SetError(errors.New("filestore is not enabled, see https://git.io/vy4XN"),
				cmds.ErrClient)
			return
		}

Jeromy's avatar
Jeromy committed
164 165 166 167
		if nocopy && !rbset {
			rawblks = true
		}

168 169 170 171
		if nocopy && !rawblks {
			res.SetError(fmt.Errorf("nocopy option requires '--raw-leaves' to be enabled as well"), cmds.ErrNormal)
			return
		}
Jeromy's avatar
Jeromy committed
172 173

		if hash {
174 175 176
			nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{
				//TODO: need this to be true or all files
				// hashed will be stored in memory!
177
				NilRepo: true,
178
			})
Jeromy's avatar
Jeromy committed
179 180 181 182 183 184
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
			n = nilnode
		}
185

186
		addblockstore := n.Blockstore
Jeromy's avatar
Jeromy committed
187
		if !(fscache || nocopy) {
188 189 190 191
			addblockstore = bstore.NewGCBlockstore(n.BaseBlocks, n.GCLocker)
		}

		exch := n.Exchange
192 193
		local, _, _ := req.Option("local").Bool()
		if local {
194
			exch = offline.Exchange(addblockstore)
195 196
		}

197 198 199
		bserv := blockservice.New(addblockstore, exch)
		dserv := dag.NewDAGService(bserv)

200
		outChan := make(chan interface{}, 8)
201
		res.SetOutput((<-chan interface{})(outChan))
202

203
		fileAdder, err := coreunix.NewAdder(req.Context(), n.Pinning, n.Blockstore, dserv)
Jeromy's avatar
Jeromy committed
204 205 206 207
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
208 209

		fileAdder.Out = outChan
210 211 212 213 214
		fileAdder.Chunker = chunker
		fileAdder.Progress = progress
		fileAdder.Hidden = hidden
		fileAdder.Trickle = trickle
		fileAdder.Wrap = wrap
215
		fileAdder.Pin = dopin
Jeromy's avatar
Jeromy committed
216
		fileAdder.Silent = silent
217
		fileAdder.RawLeaves = rawblks
218
		fileAdder.NoCopy = nocopy
219

Jeromy's avatar
Jeromy committed
220 221
		if hash {
			md := dagtest.Mock()
222
			mr, err := mfs.NewRoot(req.Context(), md, ft.EmptyDirNode(), nil)
Jeromy's avatar
Jeromy committed
223 224 225 226 227 228 229 230
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}

			fileAdder.SetMfsRoot(mr)
		}

231
		addAllAndPin := func(f files.File) error {
232 233 234 235 236 237 238 239 240 241 242 243 244 245
			// Iterate over each top-level file and add individually. Otherwise the
			// single files.File f is treated as a directory, affecting hidden file
			// semantics.
			for {
				file, err := f.NextFile()
				if err == io.EOF {
					// Finished the list of files.
					break
				} else if err != nil {
					return err
				}
				if err := fileAdder.AddFile(file); err != nil {
					return err
				}
246
			}
247

248
			// copy intermediary nodes from editor to our actual dagservice
Jeromy's avatar
Jeromy committed
249
			_, err := fileAdder.Finalize()
250 251
			if err != nil {
				return err
252 253
			}

Stephen Whitmore's avatar
Stephen Whitmore committed
254 255 256 257
			if hash {
				return nil
			}

258
			return fileAdder.PinRoot()
259 260 261 262 263
		}

		go func() {
			defer close(outChan)
			if err := addAllAndPin(req.Files()); err != nil {
264 265
				res.SetError(err, cmds.ErrNormal)
				return
266
			}
267

268
		}()
269
	},
270
	PostRun: func(req cmds.Request, res cmds.Response) {
271 272 273
		if res.Error() != nil {
			return
		}
274 275 276 277 278
		outChan, ok := res.Output().(<-chan interface{})
		if !ok {
			res.SetError(u.ErrCast(), cmds.ErrNormal)
			return
		}
279
		res.SetOutput(nil)
280

281
		quiet, _, err := req.Option("quiet").Bool()
282 283 284 285
		if err != nil {
			res.SetError(u.ErrCast(), cmds.ErrNormal)
			return
		}
286

287
		progress, _, err := req.Option(progressOptionName).Bool()
Jeromy's avatar
Jeromy committed
288 289 290 291 292
		if err != nil {
			res.SetError(u.ErrCast(), cmds.ErrNormal)
			return
		}

293
		var bar *pb.ProgressBar
294
		if progress {
295
			bar = pb.New64(0).SetUnits(pb.U_BYTES)
296
			bar.ManualUpdate = true
Jeromy's avatar
Jeromy committed
297 298 299
			bar.ShowTimeLeft = false
			bar.ShowPercent = false
			bar.Output = res.Stderr()
300 301 302
			bar.Start()
		}

303 304 305 306 307 308
		var sizeChan chan int64
		s, found := req.Values()["size"]
		if found {
			sizeChan = s.(chan int64)
		}

309 310
		lastFile := ""
		var totalProgress, prevFiles, lastBytes int64
311

312 313 314 315 316 317
	LOOP:
		for {
			select {
			case out, ok := <-outChan:
				if !ok {
					break LOOP
318
				}
319 320
				output := out.(*coreunix.AddedObject)
				if len(output.Hash) > 0 {
321
					if progress {
322 323 324 325 326 327 328 329
						// clear progress bar line before we print "added x" output
						fmt.Fprintf(res.Stderr(), "\033[2K\r")
					}
					if quiet {
						fmt.Fprintf(res.Stdout(), "%s\n", output.Hash)
					} else {
						fmt.Fprintf(res.Stdout(), "added %s %s\n", output.Hash, output.Name)
					}
330

331 332 333
				} else {
					log.Debugf("add progress: %v %v\n", output.Name, output.Bytes)

334
					if !progress {
335 336 337 338 339 340 341 342 343 344 345 346 347
						continue
					}

					if len(lastFile) == 0 {
						lastFile = output.Name
					}
					if output.Name != lastFile || output.Bytes < lastBytes {
						prevFiles += lastBytes
						lastFile = output.Name
					}
					lastBytes = output.Bytes
					delta := prevFiles + lastBytes - totalProgress
					totalProgress = bar.Add64(delta)
348 349
				}

350
				if progress {
351
					bar.Update()
352
				}
353
			case size := <-sizeChan:
354
				if progress {
Jeromy's avatar
Jeromy committed
355 356 357 358 359
					bar.Total = size
					bar.ShowPercent = true
					bar.ShowBar = true
					bar.ShowTimeLeft = true
				}
Jeromy's avatar
Jeromy committed
360 361 362
			case <-req.Context().Done():
				res.SetError(req.Context().Err(), cmds.ErrNormal)
				return
363 364
			}
		}
365
	},
366
	Type: coreunix.AddedObject{},
367
}