Unverified Commit f68092e8 authored by Steven Allen's avatar Steven Allen Committed by GitHub

Merge pull request #18 from overbool/fix/issue-#17

fix(fsnode): issue #17
parents f2f968df 8bea61e6
......@@ -102,28 +102,29 @@ func NewHamtFromDag(dserv ipld.DAGService, nd ipld.Node) (*Shard, error) {
return nil, dag.ErrNotProtobuf
}
pbd, err := format.FromBytes(pbnd.Data())
fsn, err := format.FSNodeFromBytes(pbnd.Data())
if err != nil {
return nil, err
}
if pbd.GetType() != upb.Data_HAMTShard {
if fsn.Type() != upb.Data_HAMTShard {
return nil, fmt.Errorf("node was not a dir shard")
}
if pbd.GetHashType() != HashMurmur3 {
if fsn.HashType() != HashMurmur3 {
return nil, fmt.Errorf("only murmur3 supported as hash function")
}
ds, err := makeShard(dserv, int(pbd.GetFanout()))
ds, err := makeShard(dserv, int(fsn.Fanout()))
if err != nil {
return nil, err
}
ds.nd = pbnd.Copy().(*dag.ProtoNode)
ds.children = make([]child, len(pbnd.Links()))
ds.bitfield.SetBytes(pbd.GetData())
ds.hashFunc = pbd.GetHashType()
ds.bitfield.SetBytes(fsn.Data())
ds.hashFunc = fsn.HashType()
ds.builder = ds.nd.CidBuilder()
return ds, nil
......
......@@ -277,12 +277,12 @@ func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
// zero depth dag is raw data block
switch nd := n.(type) {
case *dag.ProtoNode:
pbn, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if pbn.GetType() != ft.TRaw {
if fsn.Type() != ft.TRaw {
return errors.New("expected raw block")
}
......@@ -325,16 +325,16 @@ func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
}
// Verify this is a branch node
pbn, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if pbn.GetType() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", pbn.GetType())
if fsn.Type() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", fsn.Type())
}
if len(pbn.Data) > 0 {
if len(fsn.Data()) > 0 {
return errors.New("branch node should not have data")
}
......
......@@ -15,7 +15,7 @@ import (
func ResolveUnixfsOnce(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {
switch nd := nd.(type) {
case *dag.ProtoNode:
upb, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
// Not a unixfs node, use standard object traversal code
lnk, err := nd.GetNodeLink(names[0])
......@@ -26,7 +26,7 @@ func ResolveUnixfsOnce(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, na
return lnk, names[1:], nil
}
switch upb.GetType() {
switch fsn.Type() {
case ft.THAMTShard:
rods := dag.NewReadOnlyDagService(ds)
s, err := hamt.NewHamtFromDag(rods, nd)
......
......@@ -13,7 +13,6 @@ import (
trickle "github.com/ipfs/go-unixfs/importer/trickle"
uio "github.com/ipfs/go-unixfs/io"
proto "github.com/gogo/protobuf/proto"
cid "github.com/ipfs/go-cid"
chunker "github.com/ipfs/go-ipfs-chunker"
ipld "github.com/ipfs/go-ipld-format"
......@@ -173,11 +172,11 @@ func (dm *DagModifier) Size() (int64, error) {
func fileSize(n ipld.Node) (uint64, error) {
switch nd := n.(type) {
case *mdag.ProtoNode:
f, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return 0, err
}
return f.GetFilesize(), nil
return fsn.FileSize(), nil
case *mdag.RawNode:
return uint64(len(nd.RawData())), nil
default:
......@@ -238,18 +237,18 @@ func (dm *DagModifier) modifyDag(n ipld.Node, offset uint64) (cid.Cid, error) {
if len(n.Links()) == 0 {
switch nd0 := n.(type) {
case *mdag.ProtoNode:
f, err := ft.FromBytes(nd0.Data())
fsn, err := ft.FSNodeFromBytes(nd0.Data())
if err != nil {
return cid.Cid{}, err
}
_, err = dm.wrBuf.Read(f.Data[offset:])
_, err = dm.wrBuf.Read(fsn.Data()[offset:])
if err != nil && err != io.EOF {
return cid.Cid{}, err
}
// Update newly written node..
b, err := proto.Marshal(f)
b, err := fsn.GetBytes()
if err != nil {
return cid.Cid{}, err
}
......@@ -300,13 +299,13 @@ func (dm *DagModifier) modifyDag(n ipld.Node, offset uint64) (cid.Cid, error) {
return cid.Cid{}, ErrNotUnixfs
}
f, err := ft.FromBytes(node.Data())
fsn, err := ft.FSNodeFromBytes(node.Data())
if err != nil {
return cid.Cid{}, err
}
var cur uint64
for i, bs := range f.GetBlocksizes() {
for i, bs := range fsn.BlockSizes() {
// We found the correct child to write into
if cur+bs > offset {
child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv)
......@@ -510,11 +509,11 @@ func (dm *DagModifier) dagTruncate(ctx context.Context, n ipld.Node, size uint64
switch nd := n.(type) {
case *mdag.ProtoNode:
// TODO: this can likely be done without marshaling and remarshaling
pbn, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return nil, err
}
nd.SetData(ft.WrapData(pbn.Data[:size]))
nd.SetData(ft.WrapData(fsn.Data()[:size]))
return nd, nil
case *mdag.RawNode:
return mdag.NewRawNodeWPrefix(nd.RawData()[:size], nd.Cid().Prefix())
......
......@@ -107,7 +107,7 @@ func ArrComp(a, b []byte) error {
// PrintDag pretty-prints the given dag to stdout.
func PrintDag(nd *mdag.ProtoNode, ds ipld.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
panic(err)
}
......@@ -115,7 +115,7 @@ func PrintDag(nd *mdag.ProtoNode, ds ipld.DAGService, indent int) {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
fmt.Printf("{size = %d, type = %s, children = %d", fsn.FileSize(), fsn.Type().String(), fsn.NumChildren())
if len(nd.Links()) > 0 {
fmt.Println()
}
......
......@@ -29,6 +29,7 @@ var (
)
// FromBytes unmarshals a byte slice as protobuf Data.
// Deprecated: Use `FSNodeFromBytes` instead to avoid direct manipulation of `pb.Data`.
func FromBytes(data []byte) (*pb.Data, error) {
pbdata := new(pb.Data)
err := proto.Unmarshal(data, pbdata)
......@@ -182,6 +183,16 @@ func NewFSNode(dataType pb.Data_DataType) *FSNode {
return n
}
// HashType gets hash type of format
func (n *FSNode) HashType() uint64 {
return n.format.GetHashType()
}
// Fanout gets fanout of format
func (n *FSNode) Fanout() uint64 {
return n.format.GetFanout()
}
// AddBlockSize adds the size of the next child block of this node
func (n *FSNode) AddBlockSize(s uint64) {
n.UpdateFilesize(int64(s))
......@@ -200,6 +211,11 @@ func (n *FSNode) BlockSize(i int) uint64 {
return n.format.Blocksizes[i]
}
// BlockSizes gets blocksizes of format
func (n *FSNode) BlockSizes() []uint64 {
return n.format.GetBlocksizes()
}
// RemoveAllBlockSizes removes all the child block sizes of this node.
func (n *FSNode) RemoveAllBlockSizes() {
n.format.Blocksizes = []uint64{}
......
......@@ -76,12 +76,12 @@ func TestPBdataTools(t *testing.T) {
t.Fatal("Unwrap failed to produce the correct wrapped data.")
}
rawPBdata, err := FromBytes(rawPB)
rawPBdata, err := FSNodeFromBytes(rawPB)
if err != nil {
t.Fatal(err)
}
isRaw := rawPBdata.GetType() == TRaw
isRaw := rawPBdata.Type() == TRaw
if !isRaw {
t.Fatal("WrapData does not create pb.Data_Raw!")
}
......@@ -97,8 +97,8 @@ func TestPBdataTools(t *testing.T) {
}
dirPB := FolderPBData()
dir, err := FromBytes(dirPB)
isDir := dir.GetType() == TDirectory
dir, err := FSNodeFromBytes(dirPB)
isDir := dir.Type() == TDirectory
if !isDir {
t.Fatal("FolderPBData does not create a directory!")
}
......@@ -115,8 +115,8 @@ func TestPBdataTools(t *testing.T) {
t.Fatal(err)
}
catSymPB, err := FromBytes(catSym)
isSym := catSymPB.GetType() == TSymlink
catSymPB, err := FSNodeFromBytes(catSym)
isSym := catSymPB.Type() == TSymlink
if !isSym {
t.Fatal("Failed to make a Symlink.")
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment