Commit 136bece8 authored by Jeromy's avatar Jeromy

Code cleanup

parent 9832545d
...@@ -17,7 +17,6 @@ import ( ...@@ -17,7 +17,6 @@ import (
pin "github.com/jbenet/go-ipfs/pin" pin "github.com/jbenet/go-ipfs/pin"
ft "github.com/jbenet/go-ipfs/unixfs" ft "github.com/jbenet/go-ipfs/unixfs"
uio "github.com/jbenet/go-ipfs/unixfs/io" uio "github.com/jbenet/go-ipfs/unixfs/io"
ftpb "github.com/jbenet/go-ipfs/unixfs/pb"
u "github.com/jbenet/go-ipfs/util" u "github.com/jbenet/go-ipfs/util"
) )
...@@ -56,9 +55,10 @@ func NewDagModifier(ctx context.Context, from *mdag.Node, serv mdag.DAGService, ...@@ -56,9 +55,10 @@ func NewDagModifier(ctx context.Context, from *mdag.Node, serv mdag.DAGService,
} }
// WriteAt will modify a dag file in place // WriteAt will modify a dag file in place
// NOTE: it currently assumes only a single level of indirection
func (dm *DagModifier) WriteAt(b []byte, offset int64) (int, error) { func (dm *DagModifier) WriteAt(b []byte, offset int64) (int, error) {
// TODO: this is currently VERY inneficient // TODO: this is currently VERY inneficient
// each write that happens at an offset other than the current one causes a
// flush to disk, and dag rewrite
if uint64(offset) != dm.curWrOff { if uint64(offset) != dm.curWrOff {
size, err := dm.Size() size, err := dm.Size()
if err != nil { if err != nil {
...@@ -91,6 +91,8 @@ func (zr zeroReader) Read(b []byte) (int, error) { ...@@ -91,6 +91,8 @@ func (zr zeroReader) Read(b []byte) (int, error) {
return len(b), nil return len(b), nil
} }
// expandSparse grows the file with zero blocks of 4096
// A small blocksize is chosen to aid in deduplication
func (dm *DagModifier) expandSparse(size int64) error { func (dm *DagModifier) expandSparse(size int64) error {
spl := chunk.SizeSplitter{4096} spl := chunk.SizeSplitter{4096}
r := io.LimitReader(zeroReader{}, size) r := io.LimitReader(zeroReader{}, size)
...@@ -107,6 +109,7 @@ func (dm *DagModifier) expandSparse(size int64) error { ...@@ -107,6 +109,7 @@ func (dm *DagModifier) expandSparse(size int64) error {
return nil return nil
} }
// Write continues writing to the dag at the current offset
func (dm *DagModifier) Write(b []byte) (int, error) { func (dm *DagModifier) Write(b []byte) (int, error) {
if dm.read != nil { if dm.read != nil {
dm.read = nil dm.read = nil
...@@ -114,6 +117,7 @@ func (dm *DagModifier) Write(b []byte) (int, error) { ...@@ -114,6 +117,7 @@ func (dm *DagModifier) Write(b []byte) (int, error) {
if dm.wrBuf == nil { if dm.wrBuf == nil {
dm.wrBuf = new(bytes.Buffer) dm.wrBuf = new(bytes.Buffer)
} }
n, err := dm.wrBuf.Write(b) n, err := dm.wrBuf.Write(b)
if err != nil { if err != nil {
return n, err return n, err
...@@ -143,7 +147,9 @@ func (dm *DagModifier) Size() (int64, error) { ...@@ -143,7 +147,9 @@ func (dm *DagModifier) Size() (int64, error) {
return int64(pbn.GetFilesize()), nil return int64(pbn.GetFilesize()), nil
} }
// Flush writes changes to this dag to disk
func (dm *DagModifier) Flush() error { func (dm *DagModifier) Flush() error {
// No buffer? Nothing to do
if dm.wrBuf == nil { if dm.wrBuf == nil {
return nil return nil
} }
...@@ -154,9 +160,11 @@ func (dm *DagModifier) Flush() error { ...@@ -154,9 +160,11 @@ func (dm *DagModifier) Flush() error {
dm.readCancel() dm.readCancel()
} }
// Number of bytes we're going to write
buflen := dm.wrBuf.Len() buflen := dm.wrBuf.Len()
k, _, done, err := dm.modifyDag(dm.curNode, dm.writeStart, dm.wrBuf) // overwrite existing dag nodes
k, done, err := dm.modifyDag(dm.curNode, dm.writeStart, dm.wrBuf)
if err != nil { if err != nil {
return err return err
} }
...@@ -168,6 +176,7 @@ func (dm *DagModifier) Flush() error { ...@@ -168,6 +176,7 @@ func (dm *DagModifier) Flush() error {
dm.curNode = nd dm.curNode = nd
// need to write past end of current dag
if !done { if !done {
blks := dm.splitter.Split(dm.wrBuf) blks := dm.splitter.Split(dm.wrBuf)
nd, err = dm.appendData(dm.curNode, blks) nd, err = dm.appendData(dm.curNode, blks)
...@@ -189,28 +198,30 @@ func (dm *DagModifier) Flush() error { ...@@ -189,28 +198,30 @@ func (dm *DagModifier) Flush() error {
return nil return nil
} }
func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (u.Key, int, bool, error) { // modifyDag writes the data in 'data' over the data in 'node' starting at 'offset'
func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (u.Key, bool, error) {
f, err := ft.FromBytes(node.Data) f, err := ft.FromBytes(node.Data)
if err != nil { if err != nil {
return "", 0, false, err return "", false, err
} }
if len(node.Links) == 0 && (f.GetType() == ftpb.Data_Raw || f.GetType() == ftpb.Data_File) { // If we've reached a leaf node.
if len(node.Links) == 0 {
n, err := data.Read(f.Data[offset:]) n, err := data.Read(f.Data[offset:])
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return "", 0, false, err return "", false, err
} }
// Update newly written node.. // Update newly written node..
b, err := proto.Marshal(f) b, err := proto.Marshal(f)
if err != nil { if err != nil {
return "", 0, false, err return "", false, err
} }
nd := &mdag.Node{Data: b} nd := &mdag.Node{Data: b}
k, err := dm.dagserv.Add(nd) k, err := dm.dagserv.Add(nd)
if err != nil { if err != nil {
return "", 0, false, err return "", false, err
} }
// Hey look! we're done! // Hey look! we're done!
...@@ -219,23 +230,21 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) ...@@ -219,23 +230,21 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
done = true done = true
} }
return k, n, done, nil return k, done, nil
} }
var cur uint64 var cur uint64
var done bool var done bool
var totread int
for i, bs := range f.GetBlocksizes() { for i, bs := range f.GetBlocksizes() {
if cur+bs > offset { if cur+bs > offset {
child, err := node.Links[i].GetNode(dm.dagserv) child, err := node.Links[i].GetNode(dm.dagserv)
if err != nil { if err != nil {
return "", 0, false, err return "", false, err
} }
k, nread, sdone, err := dm.modifyDag(child, offset-cur, data) k, sdone, err := dm.modifyDag(child, offset-cur, data)
if err != nil { if err != nil {
return "", 0, false, err return "", false, err
} }
totread += nread
offset += bs offset += bs
node.Links[i].Hash = mh.Multihash(k) node.Links[i].Hash = mh.Multihash(k)
...@@ -249,9 +258,10 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) ...@@ -249,9 +258,10 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
} }
k, err := dm.dagserv.Add(node) k, err := dm.dagserv.Add(node)
return k, totread, done, err return k, done, err
} }
// appendData appends the blocks from the given chan to the end of this dag
func (dm *DagModifier) appendData(node *mdag.Node, blks <-chan []byte) (*mdag.Node, error) { func (dm *DagModifier) appendData(node *mdag.Node, blks <-chan []byte) (*mdag.Node, error) {
dbp := &help.DagBuilderParams{ dbp := &help.DagBuilderParams{
Dagserv: dm.dagserv, Dagserv: dm.dagserv,
...@@ -262,6 +272,7 @@ func (dm *DagModifier) appendData(node *mdag.Node, blks <-chan []byte) (*mdag.No ...@@ -262,6 +272,7 @@ func (dm *DagModifier) appendData(node *mdag.Node, blks <-chan []byte) (*mdag.No
return trickle.TrickleAppend(node, dbp.New(blks)) return trickle.TrickleAppend(node, dbp.New(blks))
} }
// Read data from this dag starting at the current offset
func (dm *DagModifier) Read(b []byte) (int, error) { func (dm *DagModifier) Read(b []byte) (int, error) {
err := dm.Flush() err := dm.Flush()
if err != nil { if err != nil {
...@@ -322,6 +333,7 @@ func (dm *DagModifier) GetNode() (*mdag.Node, error) { ...@@ -322,6 +333,7 @@ func (dm *DagModifier) GetNode() (*mdag.Node, error) {
return dm.curNode.Copy(), nil return dm.curNode.Copy(), nil
} }
// HasChanges returned whether or not there are unflushed changes to this dag
func (dm *DagModifier) HasChanges() bool { func (dm *DagModifier) HasChanges() bool {
return dm.wrBuf != nil return dm.wrBuf != nil
} }
...@@ -366,8 +378,9 @@ func (dm *DagModifier) Truncate(size int64) error { ...@@ -366,8 +378,9 @@ func (dm *DagModifier) Truncate(size int64) error {
return err return err
} }
// Truncate can also be used to expand the file
if size > int64(realSize) { if size > int64(realSize) {
return errors.New("Cannot extend file through truncate") return dm.expandSparse(int64(size) - realSize)
} }
nnode, err := dagTruncate(dm.curNode, uint64(size), dm.dagserv) nnode, err := dagTruncate(dm.curNode, uint64(size), dm.dagserv)
...@@ -384,6 +397,7 @@ func (dm *DagModifier) Truncate(size int64) error { ...@@ -384,6 +397,7 @@ func (dm *DagModifier) Truncate(size int64) error {
return nil return nil
} }
// dagTruncate truncates the given node to 'size' and returns the modified Node
func dagTruncate(nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) { func dagTruncate(nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) {
if len(nd.Links) == 0 { if len(nd.Links) == 0 {
// TODO: this can likely be done without marshaling and remarshaling // TODO: this can likely be done without marshaling and remarshaling
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment