package coreunix import ( "context" "fmt" "io" "io/ioutil" "os" gopath "path" bs "github.com/ipfs/go-ipfs/blocks/blockstore" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bserv "github.com/ipfs/go-ipfs/blockservice" "github.com/ipfs/go-ipfs/commands/files" core "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/exchange/offline" balanced "github.com/ipfs/go-ipfs/importer/balanced" "github.com/ipfs/go-ipfs/importer/chunk" ihelper "github.com/ipfs/go-ipfs/importer/helpers" trickle "github.com/ipfs/go-ipfs/importer/trickle" dag "github.com/ipfs/go-ipfs/merkledag" mfs "github.com/ipfs/go-ipfs/mfs" "github.com/ipfs/go-ipfs/pin" unixfs "github.com/ipfs/go-ipfs/unixfs" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" node "gx/ipfs/QmU7bFWQ793qmvNy7outdCaMfSDNk8uqhx4VNrxYj5fj5g/go-ipld-node" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" syncds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" ) var log = logging.Logger("coreunix") // how many bytes of progress to wait before sending a progress update message const progressReaderIncrement = 1024 * 256 type Link struct { Name, Hash string Size uint64 } type Object struct { Hash string Links []Link } type hiddenFileError struct { fileName string } func (e *hiddenFileError) Error() string { return fmt.Sprintf("%s is a hidden file", e.fileName) } type ignoreFileError struct { fileName string } func (e *ignoreFileError) Error() string { return fmt.Sprintf("%s is an ignored file", e.fileName) } type AddedObject struct { Name string Hash string `json:",omitempty"` Bytes int64 `json:",omitempty"` } func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCBlockstore, ds dag.DAGService) (*Adder, error) { mr, err := mfs.NewRoot(ctx, ds, unixfs.EmptyDirNode(), nil) if err != nil { return nil, err } return &Adder{ mr: mr, ctx: ctx, pinning: p, blockstore: bs, dagService: ds, Progress: false, Hidden: true, Pin: true, Trickle: false, Wrap: false, Chunker: "", }, nil } // Internal structure for holding the switches passed to the `add` call type Adder struct { ctx context.Context pinning pin.Pinner blockstore bstore.GCBlockstore dagService dag.DAGService Out chan interface{} Progress bool Hidden bool Pin bool Trickle bool RawLeaves bool Silent bool Wrap bool Chunker string root node.Node mr *mfs.Root unlocker bs.Unlocker tempRoot *cid.Cid } func (adder *Adder) SetMfsRoot(r *mfs.Root) { adder.mr = r } // Perform the actual add & pin locally, outputting results to reader func (adder Adder) add(reader io.Reader) (node.Node, error) { chnk, err := chunk.FromString(reader, adder.Chunker) if err != nil { return nil, err } params := ihelper.DagBuilderParams{ Dagserv: adder.dagService, RawLeaves: adder.RawLeaves, Maxlinks: ihelper.DefaultLinksPerBlock, } if adder.Trickle { return trickle.TrickleLayout(params.New(chnk)) } return balanced.BalancedLayout(params.New(chnk)) } func (adder *Adder) RootNode() (node.Node, error) { // for memoizing if adder.root != nil { return adder.root, nil } root, err := adder.mr.GetValue().GetNode() if err != nil { return nil, err } // if not wrapping, AND one root file, use that hash as root. if !adder.Wrap && len(root.Links()) == 1 { nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService) if err != nil { return nil, err } root = nd } adder.root = root return root, err } func (adder *Adder) PinRoot() error { root, err := adder.RootNode() if err != nil { return err } if !adder.Pin { return nil } rnk, err := adder.dagService.Add(root) if err != nil { return err } if adder.tempRoot != nil { err := adder.pinning.Unpin(adder.ctx, adder.tempRoot, true) if err != nil { return err } adder.tempRoot = rnk } adder.pinning.PinWithMode(rnk, pin.Recursive) return adder.pinning.Flush() } func (adder *Adder) Finalize() (node.Node, error) { root := adder.mr.GetValue() // cant just call adder.RootNode() here as we need the name for printing rootNode, err := root.GetNode() if err != nil { return nil, err } var name string if !adder.Wrap { name = rootNode.Links()[0].Name dir, ok := adder.mr.GetValue().(*mfs.Directory) if !ok { return nil, fmt.Errorf("root is not a directory") } root, err = dir.Child(name) if err != nil { return nil, err } } err = adder.outputDirs(name, root) if err != nil { return nil, err } err = adder.mr.Close() if err != nil { return nil, err } return root.GetNode() } func (adder *Adder) outputDirs(path string, fsn mfs.FSNode) error { switch fsn := fsn.(type) { case *mfs.File: return nil case *mfs.Directory: for _, name := range fsn.ListNames() { child, err := fsn.Child(name) if err != nil { return err } childpath := gopath.Join(path, name) err = adder.outputDirs(childpath, child) if err != nil { return err } fsn.Uncache(name) } nd, err := fsn.GetNode() if err != nil { return err } return outputDagnode(adder.Out, path, nd) default: return fmt.Errorf("unrecognized fsn type: %#v", fsn) } } // Add builds a merkledag from the a reader, pinning all objects to the local // datastore. Returns a key representing the root node. func Add(n *core.IpfsNode, r io.Reader) (string, error) { return AddWithContext(n.Context(), n, r) } func AddWithContext(ctx context.Context, n *core.IpfsNode, r io.Reader) (string, error) { defer n.Blockstore.PinLock().Unlock() fileAdder, err := NewAdder(n.Context(), n.Pinning, n.Blockstore, n.DAG) if err != nil { return "", err } node, err := fileAdder.add(r) if err != nil { return "", err } return node.Cid().String(), nil } // AddR recursively adds files in |path|. func AddR(n *core.IpfsNode, root string) (key string, err error) { n.Blockstore.PinLock().Unlock() stat, err := os.Lstat(root) if err != nil { return "", err } f, err := files.NewSerialFile(root, root, false, stat) if err != nil { return "", err } defer f.Close() fileAdder, err := NewAdder(n.Context(), n.Pinning, n.Blockstore, n.DAG) if err != nil { return "", err } err = fileAdder.addFile(f) if err != nil { return "", err } nd, err := fileAdder.Finalize() if err != nil { return "", err } return nd.String(), nil } // AddWrapped adds data from a reader, and wraps it with a directory object // to preserve the filename. // Returns the path of the added file ("/filename"), the DAG node of // the directory, and and error if any. func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, node.Node, error) { file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil) fileAdder, err := NewAdder(n.Context(), n.Pinning, n.Blockstore, n.DAG) if err != nil { return "", nil, err } fileAdder.Wrap = true defer n.Blockstore.PinLock().Unlock() err = fileAdder.addFile(file) if err != nil { return "", nil, err } dagnode, err := fileAdder.Finalize() if err != nil { return "", nil, err } c := dagnode.Cid() return gopath.Join(c.String(), filename), dagnode, nil } func (adder *Adder) addNode(node node.Node, path string) error { // patch it into the root if path == "" { path = node.Cid().String() } dir := gopath.Dir(path) if dir != "." { if err := mfs.Mkdir(adder.mr, dir, true, false); err != nil { return err } } if err := mfs.PutNode(adder.mr, path, node); err != nil { return err } if !adder.Silent { return outputDagnode(adder.Out, path, node) } return nil } // Add the given file while respecting the adder. func (adder *Adder) AddFile(file files.File) error { if adder.Pin { adder.unlocker = adder.blockstore.PinLock() } defer func() { if adder.unlocker != nil { adder.unlocker.Unlock() } }() return adder.addFile(file) } func (adder *Adder) addFile(file files.File) error { err := adder.maybePauseForGC() if err != nil { return err } if file.IsDirectory() { return adder.addDir(file) } // case for symlink if s, ok := file.(*files.Symlink); ok { sdata, err := unixfs.SymlinkData(s.Target) if err != nil { return err } dagnode := dag.NodeWithData(sdata) _, err = adder.dagService.Add(dagnode) if err != nil { return err } return adder.addNode(dagnode, s.FileName()) } // case for regular file // if the progress flag was specified, wrap the file so that we can send // progress updates to the client (over the output channel) var reader io.Reader = file if adder.Progress { rdr := &progressReader{file: file, out: adder.Out} if fi, ok := file.(files.FileInfo); ok { reader = &progressReader2{rdr, fi} } else { reader = rdr } } dagnode, err := adder.add(reader) if err != nil { return err } // patch it into the root return adder.addNode(dagnode, file.FileName()) } func (adder *Adder) addDir(dir files.File) error { log.Infof("adding directory: %s", dir.FileName()) err := mfs.Mkdir(adder.mr, dir.FileName(), true, false) if err != nil { return err } for { file, err := dir.NextFile() if err != nil && err != io.EOF { return err } if file == nil { break } // Skip hidden files when adding recursively, unless Hidden is enabled. if files.IsHidden(file) && !adder.Hidden { log.Infof("%s is hidden, skipping", file.FileName()) continue } err = adder.addFile(file) if err != nil { return err } } return nil } func (adder *Adder) maybePauseForGC() error { if adder.unlocker != nil && adder.blockstore.GCRequested() { err := adder.PinRoot() if err != nil { return err } adder.unlocker.Unlock() adder.unlocker = adder.blockstore.PinLock() } return nil } // outputDagnode sends dagnode info over the output channel func outputDagnode(out chan interface{}, name string, dn node.Node) error { if out == nil { return nil } o, err := getOutput(dn) if err != nil { return err } out <- &AddedObject{ Hash: o.Hash, Name: name, } return nil } func NewMemoryDagService() dag.DAGService { // build mem-datastore for editor's intermediary nodes bs := bstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) bsrv := bserv.New(bs, offline.Exchange(bs)) return dag.NewDAGService(bsrv) } // from core/commands/object.go func getOutput(dagnode node.Node) (*Object, error) { c := dagnode.Cid() output := &Object{ Hash: c.String(), Links: make([]Link, len(dagnode.Links())), } for i, link := range dagnode.Links() { output.Links[i] = Link{ Name: link.Name, Size: link.Size, } } return output, nil } type progressReader struct { file files.File out chan interface{} bytes int64 lastProgress int64 } func (i *progressReader) Read(p []byte) (int, error) { n, err := i.file.Read(p) i.bytes += int64(n) if i.bytes-i.lastProgress >= progressReaderIncrement || err == io.EOF { i.lastProgress = i.bytes i.out <- &AddedObject{ Name: i.file.FileName(), Bytes: i.bytes, } } return n, err } type progressReader2 struct { *progressReader files.FileInfo }