Commit 9f0c8134 authored by rht's avatar rht

Decompose DagArchive from unixfs tar

License: MIT
Signed-off-by: default avatarrht <rhtbot@gmail.com>
parent dfa0351d
......@@ -15,7 +15,7 @@ import (
core "github.com/ipfs/go-ipfs/core"
path "github.com/ipfs/go-ipfs/path"
tar "github.com/ipfs/go-ipfs/thirdparty/tar"
utar "github.com/ipfs/go-ipfs/unixfs/tar"
uarchive "github.com/ipfs/go-ipfs/unixfs/archive"
)
var ErrInvalidCompressionLevel = errors.New("Compression level must be between 1 and 9")
......@@ -70,7 +70,7 @@ may also specify the level of compression by specifying '-l=<1-9>'.
}
archive, _, _ := req.Option("archive").Bool()
reader, err := utar.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)
reader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
......
package archive
import (
"bufio"
"compress/gzip"
"io"
"path"
cxt "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
mdag "github.com/ipfs/go-ipfs/merkledag"
tar "github.com/ipfs/go-ipfs/unixfs/archive/tar"
uio "github.com/ipfs/go-ipfs/unixfs/io"
)
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
// TODO: does this need to be configurable?
var DefaultBufSize = 1048576
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
_, filename := path.Split(name)
// need to connect a writer to a reader
piper, pipew := io.Pipe()
// use a buffered writer to parallelize task
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
// compression determines whether to use gzip compression.
var maybeGzw io.Writer
if compression != gzip.NoCompression {
var err error
maybeGzw, err = gzip.NewWriterLevel(bufw, compression)
if err != nil {
return nil, err
}
} else {
maybeGzw = bufw
}
if !archive && compression != gzip.NoCompression {
// the case when the node is a file
dagr, err := uio.NewDagReader(ctx, nd, dag)
if err != nil {
pipew.CloseWithError(err)
return nil, err
}
go func() {
if _, err := dagr.WriteTo(maybeGzw); err != nil {
pipew.CloseWithError(err)
return
}
pipew.Close() // everything seems to be ok.
}()
} else {
// the case for 1. archive, and 2. not archived and not compressed, in which tar is used anyway as a transport format
// construct the tar writer
w, err := tar.NewWriter(ctx, dag, archive, compression, maybeGzw)
if err != nil {
return nil, err
}
go func() {
// write all the nodes recursively
if err := w.WriteNode(nd, filename); err != nil {
pipew.CloseWithError(err)
return
}
if err := bufw.Flush(); err != nil {
pipew.CloseWithError(err)
return
}
w.Close()
pipew.Close() // everything seems to be ok.
}()
}
return piper, nil
}
......@@ -2,8 +2,6 @@ package tar
import (
"archive/tar"
"bufio"
"compress/gzip"
"io"
"path"
"time"
......@@ -17,73 +15,6 @@ import (
upb "github.com/ipfs/go-ipfs/unixfs/pb"
)
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
// TODO: does this need to be configurable?
var DefaultBufSize = 1048576
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
_, filename := path.Split(name)
// need to connect a writer to a reader
piper, pipew := io.Pipe()
// use a buffered writer to parallelize task
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
// compression determines whether to use gzip compression.
var maybeGzw io.Writer
if compression != gzip.NoCompression {
var err error
maybeGzw, err = gzip.NewWriterLevel(bufw, compression)
if err != nil {
return nil, err
}
} else {
maybeGzw = bufw
}
// construct the tar writer
w, err := NewWriter(ctx, dag, archive, compression, maybeGzw)
if err != nil {
return nil, err
}
// write all the nodes recursively
go func() {
if !archive && compression != gzip.NoCompression {
// the case when the node is a file
dagr, err := uio.NewDagReader(w.ctx, nd, w.Dag)
if err != nil {
pipew.CloseWithError(err)
return
}
if _, err := dagr.WriteTo(maybeGzw); err != nil {
pipew.CloseWithError(err)
return
}
} else {
// the case for 1. archive, and 2. not archived and not compressed, in which tar is used anyway as a transport format
if err := w.WriteNode(nd, filename); err != nil {
pipew.CloseWithError(err)
return
}
}
if err := bufw.Flush(); err != nil {
pipew.CloseWithError(err)
return
}
w.Close()
pipew.Close() // everything seems to be ok.
}()
return piper, nil
}
// Writer is a utility structure that helps to write
// unixfs merkledag nodes as a tar archive format.
// It wraps any io.Writer.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment