Commit 6ae09b16 authored by Matt Bell's avatar Matt Bell Committed by Juan Batiz-Benet

core/commands: Moved commands that were rebased into the wrong directory

parent 0c205f56
......@@ -10,8 +10,11 @@ import (
"github.com/jbenet/go-ipfs/core"
"github.com/jbenet/go-ipfs/importer"
"github.com/jbenet/go-ipfs/importer/chunk"
dag "github.com/jbenet/go-ipfs/merkledag"
"github.com/jbenet/go-ipfs/pin"
ft "github.com/jbenet/go-ipfs/unixfs"
uio "github.com/jbenet/go-ipfs/unixfs/io"
)
// Error indicating the max depth has been exceded.
......@@ -87,7 +90,14 @@ func addDir(n *core.IpfsNode, fpath string, depth int, out io.Writer) (*dag.Node
}
func addFile(n *core.IpfsNode, fpath string, depth int, out io.Writer) (*dag.Node, error) {
root, err := importer.NewDagFromFile(fpath)
dw := uio.NewDagWriter(n.DAG, chunk.DefaultSplitter)
mp, ok := n.Pinning.(pin.ManualPinner)
if !ok {
return nil, errors.New("invalid pinner type! expected manual pinner")
}
dw.Pinner = mp
root, err := importer.ImportFileDag(fpath, dw)
if err != nil {
return nil, err
}
......@@ -98,7 +108,15 @@ func addFile(n *core.IpfsNode, fpath string, depth int, out io.Writer) (*dag.Nod
log.Info("adding subblock: %s %s", l.Name, l.Hash.B58String())
}
return root, addNode(n, root, fpath, out)
k, err := root.Key()
if err != nil {
return nil, err
}
// output that we've added this node
fmt.Fprintf(out, "added %s %s\n", k, fpath)
return root, nil
}
// addNode adds the node to the graph + local storage
......
......@@ -7,7 +7,7 @@ import (
"os"
"time"
"code.google.com/p/go.net/context"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
"github.com/jbenet/go-ipfs/blocks"
......@@ -28,7 +28,7 @@ func BlockGet(n *core.IpfsNode, args []string, opts map[string]interface{}, out
}
k := u.Key(h)
log.Debug("BlockGet key: '%q'", k)
log.Debugf("BlockGet key: '%q'", k)
ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
b, err := n.Blocks.GetBlock(ctx, k)
if err != nil {
......@@ -48,7 +48,7 @@ func BlockPut(n *core.IpfsNode, args []string, opts map[string]interface{}, out
}
b := blocks.NewBlock(data)
log.Debug("BlockPut key: '%q'", b.Key())
log.Debugf("BlockPut key: '%q'", b.Key())
k, err := n.Blocks.AddBlock(b)
if err != nil {
......
......@@ -19,7 +19,7 @@ func ObjectData(n *core.IpfsNode, args []string, opts map[string]interface{}, ou
if err != nil {
return fmt.Errorf("objectData error: %v", err)
}
log.Debug("objectData: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
log.Debugf("objectData: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
_, err = io.Copy(out, bytes.NewReader(dagnode.Data))
return err
......@@ -31,7 +31,7 @@ func ObjectLinks(n *core.IpfsNode, args []string, opts map[string]interface{}, o
if err != nil {
return fmt.Errorf("objectLinks error: %v", err)
}
log.Debug("ObjectLinks: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
log.Debugf("ObjectLinks: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
for _, link := range dagnode.Links {
_, err = fmt.Fprintf(out, "%s %d %q\n", link.Hash.B58String(), link.Size, link.Name)
......@@ -70,7 +70,7 @@ func ObjectGet(n *core.IpfsNode, args []string, opts map[string]interface{}, out
if err != nil {
return fmt.Errorf("ObjectGet error: %v", err)
}
log.Debug("objectGet: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
log.Debugf("objectGet: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
// sadly all encodings dont implement a common interface
var data []byte
......
package commands
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/jbenet/go-ipfs/core"
"github.com/jbenet/go-ipfs/importer"
dag "github.com/jbenet/go-ipfs/merkledag"
"github.com/jbenet/go-ipfs/pin"
ft "github.com/jbenet/go-ipfs/unixfs"
)
// Error indicating the max depth has been exceded.
var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded")
// Add is a command that imports files and directories -- given as arguments -- into ipfs.
func Add(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
depth := 1
// if recursive, set depth to reflect so
if r, ok := opts["r"].(bool); r && ok {
depth = -1
}
// add every path in args
for _, path := range args {
// Add the file
_, err := AddPath(n, path, depth, out)
if err != nil {
if err == ErrDepthLimitExceeded && depth == 1 {
err = errors.New("use -r to recursively add directories")
}
return fmt.Errorf("addFile error: %v", err)
}
}
return nil
}
// AddPath adds a particular path to ipfs.
func AddPath(n *core.IpfsNode, fpath string, depth int, out io.Writer) (*dag.Node, error) {
if depth == 0 {
return nil, ErrDepthLimitExceeded
}
fi, err := os.Stat(fpath)
if err != nil {
return nil, err
}
if fi.IsDir() {
return addDir(n, fpath, depth, out)
}
return addFile(n, fpath, depth, out)
}
func addDir(n *core.IpfsNode, fpath string, depth int, out io.Writer) (*dag.Node, error) {
tree := &dag.Node{Data: ft.FolderPBData()}
files, err := ioutil.ReadDir(fpath)
if err != nil {
return nil, err
}
// construct nodes for containing files.
for _, f := range files {
fp := filepath.Join(fpath, f.Name())
nd, err := AddPath(n, fp, depth-1, out)
if err != nil {
return nil, err
}
if err = tree.AddNodeLink(f.Name(), nd); err != nil {
return nil, err
}
}
log.Infof("adding dir: %s", fpath)
return tree, addNode(n, tree, fpath, out)
}
func addFile(n *core.IpfsNode, fpath string, depth int, out io.Writer) (*dag.Node, error) {
mp, ok := n.Pinning.(pin.ManualPinner)
if !ok {
return nil, errors.New("invalid pinner type! expected manual pinner")
}
root, err := importer.BuildDagFromFile(fpath, n.DAG, mp)
if err != nil {
return nil, err
}
log.Infof("adding file: %s", fpath)
for _, l := range root.Links {
log.Infof("adding subblock: '%s' %s", l.Name, l.Hash.B58String())
}
k, err := root.Key()
if err != nil {
return nil, err
}
// output that we've added this node
fmt.Fprintf(out, "added %s %s\n", k, fpath)
return root, nil
}
// addNode adds the node to the graph + local storage
func addNode(n *core.IpfsNode, nd *dag.Node, fpath string, out io.Writer) error {
// add the file to the graph + local storage
err := n.DAG.AddRecursive(nd)
if err != nil {
return err
}
k, err := nd.Key()
if err != nil {
return err
}
// output that we've added this node
fmt.Fprintf(out, "added %s %s\n", k, fpath)
// ensure we keep it
return n.Pinning.Pin(nd, true)
}
package commands
import (
"fmt"
"io"
"io/ioutil"
"os"
"time"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
"github.com/jbenet/go-ipfs/blocks"
"github.com/jbenet/go-ipfs/core"
u "github.com/jbenet/go-ipfs/util"
)
// BlockGet retrives a raw ipfs block from the node's BlockService
func BlockGet(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
if !u.IsValidHash(args[0]) {
return fmt.Errorf("block get: not a valid hash")
}
h, err := mh.FromB58String(args[0])
if err != nil {
return fmt.Errorf("block get: %v", err)
}
k := u.Key(h)
log.Debugf("BlockGet key: '%q'", k)
ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
b, err := n.Blocks.GetBlock(ctx, k)
if err != nil {
return fmt.Errorf("block get: %v", err)
}
_, err = out.Write(b.Data)
return err
}
// BlockPut reads everything from conn and saves the data to the nodes BlockService
func BlockPut(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
// TODO: this should read from an io.Reader arg
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
b := blocks.NewBlock(data)
log.Debugf("BlockPut key: '%q'", b.Key())
k, err := n.Blocks.AddBlock(b)
if err != nil {
return err
}
fmt.Fprintf(out, "added as '%s'\n", k)
return nil
}
package commands
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/jbenet/go-ipfs/core"
dag "github.com/jbenet/go-ipfs/merkledag"
)
// ObjectData takes a key string from args and writes out the raw bytes of that node (if there is one)
func ObjectData(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
dagnode, err := n.Resolver.ResolvePath(args[0])
if err != nil {
return fmt.Errorf("objectData error: %v", err)
}
log.Debugf("objectData: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
_, err = io.Copy(out, bytes.NewReader(dagnode.Data))
return err
}
// ObjectLinks takes a key string from args and lists the links it points to
func ObjectLinks(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
dagnode, err := n.Resolver.ResolvePath(args[0])
if err != nil {
return fmt.Errorf("objectLinks error: %v", err)
}
log.Debugf("ObjectLinks: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
for _, link := range dagnode.Links {
_, err = fmt.Fprintf(out, "%s %d %q\n", link.Hash.B58String(), link.Size, link.Name)
if err != nil {
break
}
}
return err
}
// ErrUnknownObjectEnc is returned if a invalid encoding is supplied
var ErrUnknownObjectEnc = errors.New("unknown object encoding")
type objectEncoding string
const (
objectEncodingJSON objectEncoding = "json"
objectEncodingProtobuf = "protobuf"
)
func getObjectEnc(o interface{}) objectEncoding {
v, ok := o.(string)
if !ok {
// chosen as default because it's human readable
log.Warning("option is not a string - falling back to json")
return objectEncodingJSON
}
return objectEncoding(v)
}
// ObjectGet takes a key string from args and a format option and serializes the dagnode to that format
func ObjectGet(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
dagnode, err := n.Resolver.ResolvePath(args[0])
if err != nil {
return fmt.Errorf("ObjectGet error: %v", err)
}
log.Debugf("objectGet: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
// sadly all encodings dont implement a common interface
var data []byte
switch getObjectEnc(opts["encoding"]) {
case objectEncodingJSON:
data, err = json.MarshalIndent(dagnode, "", " ")
case objectEncodingProtobuf:
data, err = dagnode.Marshal()
default:
return ErrUnknownObjectEnc
}
if err != nil {
return fmt.Errorf("ObjectGet error: %v", err)
}
_, err = io.Copy(out, bytes.NewReader(data))
return err
}
// ErrObjectTooLarge is returned when too much data was read from stdin. current limit 512k
var ErrObjectTooLarge = errors.New("input object was too large. limit is 512kbytes")
const inputLimit = 512 * 1024
// ObjectPut takes a format option, serilizes bytes from stdin and updates the dag with that data
func ObjectPut(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
var (
dagnode *dag.Node
data []byte
err error
)
data, err = ioutil.ReadAll(io.LimitReader(os.Stdin, inputLimit+10))
if err != nil {
return fmt.Errorf("ObjectPut error: %v", err)
}
if len(data) >= inputLimit {
return ErrObjectTooLarge
}
switch getObjectEnc(opts["encoding"]) {
case objectEncodingJSON:
dagnode = new(dag.Node)
err = json.Unmarshal(data, dagnode)
case objectEncodingProtobuf:
dagnode, err = dag.Decoded(data)
default:
return ErrUnknownObjectEnc
}
if err != nil {
return fmt.Errorf("ObjectPut error: %v", err)
}
return addNode(n, dagnode, "stdin", out)
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment