Commit 093c8fb0 authored by Jeromy's avatar Jeromy

Rework package structure for unixfs and subpackage

cc @jbenet
parent 15a47010
......@@ -10,8 +10,8 @@ import (
"github.com/jbenet/go-ipfs/core"
"github.com/jbenet/go-ipfs/importer"
ft "github.com/jbenet/go-ipfs/importer/format"
dag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
u "github.com/jbenet/go-ipfs/util"
)
......
......@@ -5,7 +5,7 @@ import (
"io"
"github.com/jbenet/go-ipfs/core"
mdag "github.com/jbenet/go-ipfs/merkledag"
uio "github.com/jbenet/go-ipfs/unixfs/io"
)
func Cat(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error {
......@@ -15,7 +15,7 @@ func Cat(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Wr
return fmt.Errorf("catFile error: %v", err)
}
read, err := mdag.NewDagReader(dagnode, n.DAG)
read, err := uio.NewDagReader(dagnode, n.DAG)
if err != nil {
return fmt.Errorf("cat error: %v", err)
}
......
package ipns
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
......@@ -12,10 +13,10 @@ import (
"github.com/jbenet/go-ipfs/core"
ci "github.com/jbenet/go-ipfs/crypto"
imp "github.com/jbenet/go-ipfs/importer"
dt "github.com/jbenet/go-ipfs/importer/dagwriter"
ft "github.com/jbenet/go-ipfs/importer/format"
"github.com/jbenet/go-ipfs/importer/chunk"
mdag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
uio "github.com/jbenet/go-ipfs/unixfs/io"
u "github.com/jbenet/go-ipfs/util"
)
......@@ -204,7 +205,7 @@ type Node struct {
Ipfs *core.IpfsNode
Nd *mdag.Node
dagMod *dt.DagModifier
dagMod *uio.DagModifier
cached *ft.PBData
}
......@@ -293,7 +294,7 @@ func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
// ReadAll reads the object data as file data
func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
log.Debug("ipns: ReadAll [%s]", s.name)
r, err := mdag.NewDagReader(s.Nd, s.Ipfs.DAG)
r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG)
if err != nil {
return nil, err
}
......@@ -312,7 +313,7 @@ func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.I
if n.dagMod == nil {
// Create a DagModifier to allow us to change the existing dag node
dmod, err := dt.NewDagModifier(n.Nd, n.Ipfs.DAG, imp.DefaultSplitter)
dmod, err := uio.NewDagModifier(n.Nd, n.Ipfs.DAG, chunk.DefaultSplitter)
if err != nil {
log.Error("Error creating dag modifier: %s", err)
return err
......@@ -541,7 +542,7 @@ func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fus
}
default:
log.Critical("Unknown node type for rename target dir!")
return err
return errors.New("Unknown fs node type!")
}
return nil
}
......
......@@ -19,8 +19,9 @@ import (
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs"
core "github.com/jbenet/go-ipfs/core"
ft "github.com/jbenet/go-ipfs/importer/format"
mdag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
uio "github.com/jbenet/go-ipfs/unixfs/io"
u "github.com/jbenet/go-ipfs/util"
)
......@@ -79,7 +80,7 @@ func (*Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
type Node struct {
Ipfs *core.IpfsNode
Nd *mdag.Node
fd *mdag.DagReader
fd *uio.DagReader
cached *ft.PBData
}
......@@ -143,7 +144,7 @@ func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
// ReadAll reads the object data as file data
func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
u.DOut("Read node.\n")
r, err := mdag.NewDagReader(s.Nd, s.Ipfs.DAG)
r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG)
if err != nil {
return nil, err
}
......
package importer
package chunk
import (
"bufio"
......
package importer
package chunk
import "io"
import (
"io"
"github.com/jbenet/go-ipfs/util"
)
var log = util.Logger("chunk")
var DefaultSplitter = &SizeSplitter{1024 * 512}
type BlockSplitter interface {
Split(r io.Reader) chan []byte
......
......@@ -5,8 +5,9 @@ import (
"io"
"os"
ft "github.com/jbenet/go-ipfs/importer/format"
"github.com/jbenet/go-ipfs/importer/chunk"
dag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
"github.com/jbenet/go-ipfs/util"
)
......@@ -18,18 +19,16 @@ var BlockSizeLimit = int64(1048576) // 1 MB
// ErrSizeLimitExceeded signals that a block is larger than BlockSizeLimit.
var ErrSizeLimitExceeded = fmt.Errorf("object size limit exceeded")
var DefaultSplitter = &SizeSplitter{1024 * 512}
// todo: incremental construction with an ipfs node. dumping constructed
// objects into the datastore, to avoid buffering all in memory
// NewDagFromReader constructs a Merkle DAG from the given io.Reader.
// size required for block construction.
func NewDagFromReader(r io.Reader) (*dag.Node, error) {
return NewDagFromReaderWithSplitter(r, DefaultSplitter)
return NewDagFromReaderWithSplitter(r, chunk.DefaultSplitter)
}
func NewDagFromReaderWithSplitter(r io.Reader, spl BlockSplitter) (*dag.Node, error) {
func NewDagFromReaderWithSplitter(r io.Reader, spl chunk.BlockSplitter) (*dag.Node, error) {
blkChan := spl.Split(r)
first := <-blkChan
root := &dag.Node{}
......
......@@ -9,9 +9,13 @@ import (
"os"
"testing"
dag "github.com/jbenet/go-ipfs/merkledag"
"github.com/jbenet/go-ipfs/importer/chunk"
uio "github.com/jbenet/go-ipfs/unixfs/io"
)
// NOTE:
// These tests tests a combination of unixfs/io/dagreader and importer/chunk.
// Maybe split them up somehow?
func TestBuildDag(t *testing.T) {
td := os.TempDir()
fi, err := os.Create(td + "/tmpfi")
......@@ -34,9 +38,9 @@ func TestBuildDag(t *testing.T) {
//Test where calls to read are smaller than the chunk size
func TestSizeBasedSplit(t *testing.T) {
bs := &SizeSplitter{512}
bs := &chunk.SizeSplitter{512}
testFileConsistency(t, bs, 32*512)
bs = &SizeSplitter{4096}
bs = &chunk.SizeSplitter{4096}
testFileConsistency(t, bs, 32*4096)
// Uneven offset
......@@ -49,7 +53,7 @@ func dup(b []byte) []byte {
return o
}
func testFileConsistency(t *testing.T, bs BlockSplitter, nbytes int) {
func testFileConsistency(t *testing.T, bs chunk.BlockSplitter, nbytes int) {
buf := new(bytes.Buffer)
io.CopyN(buf, rand.Reader, int64(nbytes))
should := dup(buf.Bytes())
......@@ -57,7 +61,7 @@ func testFileConsistency(t *testing.T, bs BlockSplitter, nbytes int) {
if err != nil {
t.Fatal(err)
}
r, err := dag.NewDagReader(nd, nil)
r, err := uio.NewDagReader(nd, nil)
if err != nil {
t.Fatal(err)
}
......@@ -86,14 +90,14 @@ func arrComp(a, b []byte) error {
}
func TestMaybeRabinConsistency(t *testing.T) {
testFileConsistency(t, NewMaybeRabin(4096), 256*4096)
testFileConsistency(t, chunk.NewMaybeRabin(4096), 256*4096)
}
func TestRabinBlockSize(t *testing.T) {
buf := new(bytes.Buffer)
nbytes := 1024 * 1024
io.CopyN(buf, rand.Reader, int64(nbytes))
rab := NewMaybeRabin(4096)
rab := chunk.NewMaybeRabin(4096)
blkch := rab.Split(buf)
var blocks [][]byte
......
......@@ -6,6 +6,7 @@ import (
core "github.com/jbenet/go-ipfs/core"
"github.com/jbenet/go-ipfs/importer"
dag "github.com/jbenet/go-ipfs/merkledag"
uio "github.com/jbenet/go-ipfs/unixfs/io"
u "github.com/jbenet/go-ipfs/util"
)
......@@ -33,5 +34,5 @@ func (i *ipfsHandler) AddNodeToDAG(nd *dag.Node) (u.Key, error) {
}
func (i *ipfsHandler) NewDagReader(nd *dag.Node) (io.Reader, error) {
return dag.NewDagReader(nd, i.node.DAG)
return uio.NewDagReader(nd, i.node.DAG)
}
......@@ -3,7 +3,7 @@
// DO NOT EDIT!
/*
Package format is a generated protocol buffer package.
Package unixfs is a generated protocol buffer package.
It is generated from these files:
data.proto
......@@ -11,9 +11,9 @@ It is generated from these files:
It has these top-level messages:
PBData
*/
package format
package unixfs
import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
import proto "code.google.com/p/goprotobuf/proto"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
......@@ -57,7 +57,7 @@ func (x *PBData_DataType) UnmarshalJSON(data []byte) error {
}
type PBData struct {
Type *PBData_DataType `protobuf:"varint,1,req,enum=format.PBData_DataType" json:"Type,omitempty"`
Type *PBData_DataType `protobuf:"varint,1,req,enum=unixfs.PBData_DataType" json:"Type,omitempty"`
Data []byte `protobuf:"bytes,2,opt" json:"Data,omitempty"`
Filesize *uint64 `protobuf:"varint,3,opt,name=filesize" json:"filesize,omitempty"`
Blocksizes []uint64 `protobuf:"varint,4,rep,name=blocksizes" json:"blocksizes,omitempty"`
......@@ -97,5 +97,5 @@ func (m *PBData) GetBlocksizes() []uint64 {
}
func init() {
proto.RegisterEnum("format.PBData_DataType", PBData_DataType_name, PBData_DataType_value)
proto.RegisterEnum("unixfs.PBData_DataType", PBData_DataType_name, PBData_DataType_value)
}
package format;
package unixfs;
message PBData {
enum DataType {
......
// Package format implements a data format for files in the ipfs filesystem
// It is not the only format in ipfs, but it is the one that the filesystem assumes
package format
package unixfs
import (
"errors"
......
package format
package unixfs
import (
"testing"
......
package dagwriter
package io
import (
"bytes"
......@@ -6,9 +6,9 @@ import (
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
imp "github.com/jbenet/go-ipfs/importer"
ft "github.com/jbenet/go-ipfs/importer/format"
"github.com/jbenet/go-ipfs/importer/chunk"
mdag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
u "github.com/jbenet/go-ipfs/util"
)
......@@ -20,10 +20,10 @@ type DagModifier struct {
curNode *mdag.Node
pbdata *ft.PBData
splitter imp.BlockSplitter
splitter chunk.BlockSplitter
}
func NewDagModifier(from *mdag.Node, serv *mdag.DAGService, spl imp.BlockSplitter) (*DagModifier, error) {
func NewDagModifier(from *mdag.Node, serv *mdag.DAGService, spl chunk.BlockSplitter) (*DagModifier, error) {
pbd, err := ft.FromBytes(from.Data)
if err != nil {
return nil, err
......@@ -172,7 +172,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
// splitBytes uses a splitterFunc to turn a large array of bytes
// into many smaller arrays of bytes
func splitBytes(b []byte, spl imp.BlockSplitter) [][]byte {
func splitBytes(b []byte, spl chunk.BlockSplitter) [][]byte {
out := spl.Split(bytes.NewReader(b))
var arr [][]byte
for blk := range out {
......
package dagwriter
package io
import (
"fmt"
......@@ -8,9 +8,9 @@ import (
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging"
bs "github.com/jbenet/go-ipfs/blockservice"
imp "github.com/jbenet/go-ipfs/importer"
ft "github.com/jbenet/go-ipfs/importer/format"
"github.com/jbenet/go-ipfs/importer/chunk"
mdag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
u "github.com/jbenet/go-ipfs/util"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
......@@ -26,7 +26,7 @@ func getMockDagServ(t *testing.T) *mdag.DAGService {
}
func getNode(t *testing.T, dserv *mdag.DAGService, size int64) ([]byte, *mdag.Node) {
dw := NewDagWriter(dserv, &imp.SizeSplitter{500})
dw := NewDagWriter(dserv, &chunk.SizeSplitter{500})
n, err := io.CopyN(dw, u.NewFastRand(), size)
if err != nil {
......@@ -39,7 +39,7 @@ func getNode(t *testing.T, dserv *mdag.DAGService, size int64) ([]byte, *mdag.No
dw.Close()
node := dw.GetNode()
dr, err := mdag.NewDagReader(node, dserv)
dr, err := NewDagReader(node, dserv)
if err != nil {
t.Fatal(err)
}
......@@ -76,7 +76,7 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier)
t.Fatal(err)
}
rd, err := mdag.NewDagReader(nd, dm.dagserv)
rd, err := NewDagReader(nd, dm.dagserv)
if err != nil {
t.Fatal(err)
}
......@@ -99,7 +99,7 @@ func TestDagModifierBasic(t *testing.T) {
dserv := getMockDagServ(t)
b, n := getNode(t, dserv, 50000)
dagmod, err := NewDagModifier(n, dserv, &imp.SizeSplitter{512})
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512})
if err != nil {
t.Fatal(err)
}
......
package merkledag
package io
import (
"bytes"
......@@ -6,7 +6,8 @@ import (
"io"
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
ft "github.com/jbenet/go-ipfs/importer/format"
mdag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
u "github.com/jbenet/go-ipfs/util"
)
......@@ -14,15 +15,15 @@ var ErrIsDir = errors.New("this dag node is a directory")
// DagReader provides a way to easily read the data contained in a dag.
type DagReader struct {
serv *DAGService
node *Node
serv *mdag.DAGService
node *mdag.Node
position int
buf *bytes.Buffer
}
// NewDagReader creates a new reader object that reads the data represented by the given
// node, using the passed in DAGService for data retreival
func NewDagReader(n *Node, serv *DAGService) (io.Reader, error) {
func NewDagReader(n *mdag.Node, serv *mdag.DAGService) (io.Reader, error) {
pb := new(ft.PBData)
err := proto.Unmarshal(n.Data, pb)
if err != nil {
......
package dagwriter
package io
import (
imp "github.com/jbenet/go-ipfs/importer"
ft "github.com/jbenet/go-ipfs/importer/format"
"github.com/jbenet/go-ipfs/importer/chunk"
dag "github.com/jbenet/go-ipfs/merkledag"
ft "github.com/jbenet/go-ipfs/unixfs"
"github.com/jbenet/go-ipfs/util"
)
......@@ -15,11 +15,11 @@ type DagWriter struct {
totalSize int64
splChan chan []byte
done chan struct{}
splitter imp.BlockSplitter
splitter chunk.BlockSplitter
seterr error
}
func NewDagWriter(ds *dag.DAGService, splitter imp.BlockSplitter) *DagWriter {
func NewDagWriter(ds *dag.DAGService, splitter chunk.BlockSplitter) *DagWriter {
dw := new(DagWriter)
dw.dagserv = ds
dw.splChan = make(chan []byte, 8)
......
package dagwriter
package io
import (
"testing"
......@@ -7,7 +7,7 @@ import (
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
bs "github.com/jbenet/go-ipfs/blockservice"
imp "github.com/jbenet/go-ipfs/importer"
chunk "github.com/jbenet/go-ipfs/importer/chunk"
mdag "github.com/jbenet/go-ipfs/merkledag"
)
......@@ -54,7 +54,7 @@ func TestDagWriter(t *testing.T) {
t.Fatal(err)
}
dag := &mdag.DAGService{bserv}
dw := NewDagWriter(dag, &imp.SizeSplitter{4096})
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096})
nbytes := int64(1024 * 1024 * 2)
n, err := io.CopyN(dw, &datasource{}, nbytes)
......@@ -69,7 +69,7 @@ func TestDagWriter(t *testing.T) {
dw.Close()
node := dw.GetNode()
read, err := mdag.NewDagReader(node, dag)
read, err := NewDagReader(node, dag)
if err != nil {
t.Fatal(err)
}
......@@ -88,7 +88,7 @@ func TestMassiveWrite(t *testing.T) {
t.Fatal(err)
}
dag := &mdag.DAGService{bserv}
dw := NewDagWriter(dag, &imp.SizeSplitter{4096})
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096})
nbytes := int64(1024 * 1024 * 1024 * 16)
n, err := io.CopyN(dw, &datasource{}, nbytes)
......@@ -113,7 +113,7 @@ func BenchmarkDagWriter(b *testing.B) {
nbytes := int64(100000)
for i := 0; i < b.N; i++ {
b.SetBytes(nbytes)
dw := NewDagWriter(dag, &imp.SizeSplitter{4096})
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096})
n, err := io.CopyN(dw, &datasource{}, nbytes)
if err != nil {
b.Fatal(err)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment