Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
10
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
dms3
go-dms3
Commits
f718bd6a
Commit
f718bd6a
authored
10 years ago
by
Juan Batiz-Benet
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
go lint
link errors left: - protocol buffers output is not lint-friendly
parent
b5b2e8ae
Changes
17
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
134 additions
and
37 deletions
+134
-37
blocks/blocks.go
blocks/blocks.go
+9
-2
cli/add.go
cli/add.go
+8
-7
cli/ipfs.go
cli/ipfs.go
+1
-0
cli/version.go
cli/version.go
+1
-0
config/config.go
config/config.go
+4
-0
config/serialize.go
config/serialize.go
+4
-5
core/core.go
core/core.go
+3
-2
dht/dht.go
dht/dht.go
+1
-1
fuse/readonly/readonly.go
fuse/readonly/readonly.go
+14
-0
importer/importer.go
importer/importer.go
+9
-4
merkledag/coding.go
merkledag/coding.go
+9
-0
merkledag/merkledag.go
merkledag/merkledag.go
+15
-7
path/path.go
path/path.go
+12
-3
peer/peer.go
peer/peer.go
+7
-1
swarm/conn.go
swarm/conn.go
+6
-0
swarm/swarm.go
swarm/swarm.go
+18
-0
util/util.go
util/util.go
+13
-5
No files found.
blocks/blocks.go
View file @
f718bd6a
...
...
@@ -7,14 +7,14 @@ import (
mh
"github.com/jbenet/go-multihash"
)
// Block
s
is the ipfs blocks service. It is the way
// Block is the ipfs blocks service. It is the way
// to retrieve blocks by the higher level ipfs modules
type
Block
struct
{
Multihash
mh
.
Multihash
Data
[]
byte
}
// NewBlock creates a Block object from opaque data. It will hash the data.
func
NewBlock
(
data
[]
byte
)
(
*
Block
,
error
)
{
h
,
err
:=
u
.
Hash
(
data
)
if
err
!=
nil
{
...
...
@@ -23,15 +23,19 @@ func NewBlock(data []byte) (*Block, error) {
return
&
Block
{
Data
:
data
,
Multihash
:
h
},
nil
}
// Key returns the block's Multihash as a Key value.
func
(
b
*
Block
)
Key
()
u
.
Key
{
return
u
.
Key
(
b
.
Multihash
)
}
// BlockService is a block datastore.
// It uses an internal `datastore.Datastore` instance to store values.
type
BlockService
struct
{
Datastore
ds
.
Datastore
// Remote *bitswap.BitSwap // eventually.
}
// NewBlockService creates a BlockService with given datastore instance.
func
NewBlockService
(
d
ds
.
Datastore
)
(
*
BlockService
,
error
)
{
if
d
==
nil
{
return
nil
,
fmt
.
Errorf
(
"BlockService requires valid datastore"
)
...
...
@@ -39,12 +43,15 @@ func NewBlockService(d ds.Datastore) (*BlockService, error) {
return
&
BlockService
{
Datastore
:
d
},
nil
}
// AddBlock adds a particular block to the service, Putting it into the datastore.
func
(
s
*
BlockService
)
AddBlock
(
b
*
Block
)
(
u
.
Key
,
error
)
{
k
:=
b
.
Key
()
dsk
:=
ds
.
NewKey
(
string
(
k
))
return
k
,
s
.
Datastore
.
Put
(
dsk
,
b
.
Data
)
}
// GetBlock retrieves a particular block from the service,
// Getting it from the datastore using the key (hash).
func
(
s
*
BlockService
)
GetBlock
(
k
u
.
Key
)
(
*
Block
,
error
)
{
dsk
:=
ds
.
NewKey
(
string
(
k
))
datai
,
err
:=
s
.
Datastore
.
Get
(
dsk
)
...
...
This diff is collapsed.
Click to expand it.
cli/add.go
View file @
f718bd6a
...
...
@@ -14,7 +14,8 @@ import (
"path/filepath"
)
var
DepthLimitExceeded
=
fmt
.
Errorf
(
"depth limit exceeded"
)
// Error indicating the max depth has been exceded.
var
ErrDepthLimitExceeded
=
fmt
.
Errorf
(
"depth limit exceeded"
)
var
cmdIpfsAdd
=
&
commander
.
Command
{
UsageLine
:
"add"
,
...
...
@@ -57,10 +58,10 @@ func addCmd(c *commander.Command, inp []string) error {
_
,
err
:=
addPath
(
n
,
fpath
,
depth
)
if
err
!=
nil
{
if
!
recursive
{
return
fmt
.
Errorf
(
"%s is a directory. Use -r to add recursively."
,
fpath
)
}
else
{
u
.
PErr
(
"error adding %s: %v
\n
"
,
fpath
,
err
)
return
fmt
.
Errorf
(
"%s is a directory. Use -r to add recursively"
,
fpath
)
}
u
.
PErr
(
"error adding %s: %v
\n
"
,
fpath
,
err
)
}
}
return
err
...
...
@@ -68,7 +69,7 @@ func addCmd(c *commander.Command, inp []string) error {
func
addPath
(
n
*
core
.
IpfsNode
,
fpath
string
,
depth
int
)
(
*
dag
.
Node
,
error
)
{
if
depth
==
0
{
return
nil
,
DepthLimitExceeded
return
nil
,
Err
DepthLimitExceeded
}
fi
,
err
:=
os
.
Stat
(
fpath
)
...
...
@@ -78,9 +79,9 @@ func addPath(n *core.IpfsNode, fpath string, depth int) (*dag.Node, error) {
if
fi
.
IsDir
()
{
return
addDir
(
n
,
fpath
,
depth
)
}
else
{
return
addFile
(
n
,
fpath
,
depth
)
}
return
addFile
(
n
,
fpath
,
depth
)
}
func
addDir
(
n
*
core
.
IpfsNode
,
fpath
string
,
depth
int
)
(
*
dag
.
Node
,
error
)
{
...
...
This diff is collapsed.
Click to expand it.
cli/ipfs.go
View file @
f718bd6a
...
...
@@ -10,6 +10,7 @@ import (
"os"
)
// The IPFS command tree. It is an instance of `commander.Command`.
var
CmdIpfs
=
&
commander
.
Command
{
UsageLine
:
"ipfs [<flags>] <command> [<args>]"
,
Short
:
"global versioned p2p merkledag file system"
,
...
...
This diff is collapsed.
Click to expand it.
cli/version.go
View file @
f718bd6a
...
...
@@ -5,6 +5,7 @@ import (
u
"github.com/jbenet/go-ipfs/util"
)
// The IPFS version.
const
Version
=
"0.1.0"
var
cmdIpfsVersion
=
&
commander
.
Command
{
...
...
This diff is collapsed.
Click to expand it.
config/config.go
View file @
f718bd6a
...
...
@@ -5,15 +5,18 @@ import (
u
"github.com/jbenet/go-ipfs/util"
)
// Identity tracks the configuration of the local node's identity.
type
Identity
struct
{
PeerId
string
}
// Datastore tracks the configuration of the datastore.
type
Datastore
struct
{
Type
string
Path
string
}
// Config is used to load IPFS config files.
type
Config
struct
{
Identity
*
Identity
Datastore
*
Datastore
...
...
@@ -29,6 +32,7 @@ var defaultConfigFile = `{
}
`
// LoadConfig reads given file and returns the read config, or error.
func
LoadConfig
(
filename
string
)
(
*
Config
,
error
)
{
if
len
(
filename
)
==
0
{
filename
=
defaultConfigFilePath
...
...
This diff is collapsed.
Click to expand it.
config/serialize.go
View file @
f718bd6a
...
...
@@ -7,10 +7,7 @@ import (
"path"
)
func
ReadFile
(
filename
string
)
([]
byte
,
error
)
{
return
ioutil
.
ReadFile
(
filename
)
}
// WriteFile writes the given buffer `buf` into file named `filename`.
func
WriteFile
(
filename
string
,
buf
[]
byte
)
error
{
err
:=
os
.
MkdirAll
(
path
.
Dir
(
filename
),
0777
)
if
err
!=
nil
{
...
...
@@ -20,8 +17,9 @@ func WriteFile(filename string, buf []byte) error {
return
ioutil
.
WriteFile
(
filename
,
buf
,
0666
)
}
// ReadConfigFile reads the config from `filename` into `cfg`.
func
ReadConfigFile
(
filename
string
,
cfg
*
Config
)
error
{
buf
,
err
:=
ReadFile
(
filename
)
buf
,
err
:=
ioutil
.
ReadFile
(
filename
)
if
err
!=
nil
{
return
err
}
...
...
@@ -29,6 +27,7 @@ func ReadConfigFile(filename string, cfg *Config) error {
return
json
.
Unmarshal
(
buf
,
cfg
)
}
// WriteConfigFile writes the config from `cfg` into `filename`.
func
WriteConfigFile
(
filename
string
,
cfg
*
Config
)
error
{
buf
,
err
:=
json
.
MarshalIndent
(
cfg
,
""
,
" "
)
if
err
!=
nil
{
...
...
This diff is collapsed.
Click to expand it.
core/core.go
View file @
f718bd6a
...
...
@@ -10,7 +10,7 @@ import (
peer
"github.com/jbenet/go-ipfs/peer"
)
// IPFS Core module. It represents an IPFS instance.
//
IpfsNode is
IPFS Core module. It represents an IPFS instance.
type
IpfsNode
struct
{
// the node's configuration
...
...
@@ -47,9 +47,10 @@ type IpfsNode struct {
// Namesys *namesys.Namesys
}
// NewIpfsNode constructs a new IpfsNode based on the given config.
func
NewIpfsNode
(
cfg
*
config
.
Config
)
(
*
IpfsNode
,
error
)
{
if
cfg
==
nil
{
return
nil
,
fmt
.
Errorf
(
"configuration required
.
"
)
return
nil
,
fmt
.
Errorf
(
"configuration required"
)
}
d
,
err
:=
makeDatastore
(
cfg
.
Datastore
)
...
...
This diff is collapsed.
Click to expand it.
dht/dht.go
View file @
f718bd6a
...
...
@@ -4,4 +4,4 @@ package dht
// Coral and S/Kademlia modifications. It is used to
// implement the base IPFS Routing module.
TODO
.
SEE
https
:
//github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js
//
TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js
This diff is collapsed.
Click to expand it.
fuse/readonly/readonly.go
View file @
f718bd6a
...
...
@@ -16,26 +16,32 @@ import (
"time"
)
// FileSystem is the readonly Ipfs Fuse Filesystem.
type
FileSystem
struct
{
Ipfs
*
core
.
IpfsNode
}
// NewFileSystem constructs new fs using given core.IpfsNode instance.
func
NewFileSystem
(
ipfs
*
core
.
IpfsNode
)
*
FileSystem
{
return
&
FileSystem
{
Ipfs
:
ipfs
}
}
// Root constructs the Root of the filesystem, a Root object.
func
(
f
FileSystem
)
Root
()
(
fs
.
Node
,
fuse
.
Error
)
{
return
&
Root
{
Ipfs
:
f
.
Ipfs
},
nil
}
// Root is the root object of the filesystem tree.
type
Root
struct
{
Ipfs
*
core
.
IpfsNode
}
// Attr returns file attributes.
func
(
*
Root
)
Attr
()
fuse
.
Attr
{
return
fuse
.
Attr
{
Mode
:
os
.
ModeDir
|
0111
}
// -rw+x
}
// Lookup performs a lookup under this node.
func
(
s
*
Root
)
Lookup
(
name
string
,
intr
fs
.
Intr
)
(
fs
.
Node
,
fuse
.
Error
)
{
switch
name
{
case
"mach_kernel"
,
".hidden"
,
"._."
:
...
...
@@ -52,15 +58,18 @@ func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
return
&
Node
{
Ipfs
:
s
.
Ipfs
,
Nd
:
nd
},
nil
}
// ReadDir reads a particular directory. Disallowed for root.
func
(
*
Root
)
ReadDir
(
intr
fs
.
Intr
)
([]
fuse
.
Dirent
,
fuse
.
Error
)
{
return
nil
,
fuse
.
EPERM
}
// Node is the core object representing a filesystem tree node.
type
Node
struct
{
Ipfs
*
core
.
IpfsNode
Nd
*
mdag
.
Node
}
// Attr returns the attributes of a given node.
func
(
s
*
Node
)
Attr
()
fuse
.
Attr
{
if
len
(
s
.
Nd
.
Links
)
>
0
{
return
fuse
.
Attr
{
Mode
:
os
.
ModeDir
|
0555
}
...
...
@@ -70,6 +79,7 @@ func (s *Node) Attr() fuse.Attr {
return
fuse
.
Attr
{
Mode
:
0444
,
Size
:
uint64
(
size
)}
}
// Lookup performs a lookup under this node.
func
(
s
*
Node
)
Lookup
(
name
string
,
intr
fs
.
Intr
)
(
fs
.
Node
,
fuse
.
Error
)
{
nd
,
err
:=
s
.
Ipfs
.
Resolver
.
ResolveLinks
(
s
.
Nd
,
[]
string
{
name
})
if
err
!=
nil
{
...
...
@@ -80,6 +90,7 @@ func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
return
&
Node
{
Ipfs
:
s
.
Ipfs
,
Nd
:
nd
},
nil
}
// ReadDir reads the link structure as directory entries
func
(
s
*
Node
)
ReadDir
(
intr
fs
.
Intr
)
([]
fuse
.
Dirent
,
fuse
.
Error
)
{
entries
:=
make
([]
fuse
.
Dirent
,
len
(
s
.
Nd
.
Links
))
for
i
,
link
:=
range
s
.
Nd
.
Links
{
...
...
@@ -96,10 +107,13 @@ func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
return
nil
,
fuse
.
ENOENT
}
// ReadAll reads the object data as file data
func
(
s
*
Node
)
ReadAll
(
intr
fs
.
Intr
)
([]
byte
,
fuse
.
Error
)
{
return
[]
byte
(
s
.
Nd
.
Data
),
nil
}
// Mount mounts an IpfsNode instance at a particular path. It
// serves until the process receives exit signals (to Unmount).
func
Mount
(
ipfs
*
core
.
IpfsNode
,
fpath
string
)
error
{
sigc
:=
make
(
chan
os
.
Signal
,
1
)
...
...
This diff is collapsed.
Click to expand it.
importer/importer.go
View file @
f718bd6a
...
...
@@ -8,13 +8,17 @@ import (
"os"
)
// BlockSizeLimit specifies the maximum size an imported block can have.
var
BlockSizeLimit
=
int64
(
1048576
)
// 1 MB
var
SizeLimitExceeded
=
fmt
.
Errorf
(
"object size limit exceeded"
)
// ErrSizeLimitExceeded signals that a block is larger than BlockSizeLimit.
var
ErrSizeLimitExceeded
=
fmt
.
Errorf
(
"object size limit exceeded"
)
// todo: incremental construction with an ipfs node. dumping constructed
// objects into the datastore, to avoid buffering all in memory
// size required for block construction
// NewDagFromReader constructs a Merkle DAG from the given io.Reader.
// size required for block construction.
func
NewDagFromReader
(
r
io
.
Reader
,
size
int64
)
(
*
dag
.
Node
,
error
)
{
// todo: block-splitting based on rabin fingerprinting
// todo: block-splitting with user-defined function
...
...
@@ -22,7 +26,7 @@ func NewDagFromReader(r io.Reader, size int64) (*dag.Node, error) {
// totally just trusts the reported size. fix later.
if
size
>
BlockSizeLimit
{
// 1 MB limit for now.
return
nil
,
SizeLimitExceeded
return
nil
,
Err
SizeLimitExceeded
}
// we're doing it live!
...
...
@@ -32,7 +36,7 @@ func NewDagFromReader(r io.Reader, size int64) (*dag.Node, error) {
}
if
int64
(
len
(
buf
))
>
BlockSizeLimit
{
return
nil
,
SizeLimitExceeded
// lying punk.
return
nil
,
Err
SizeLimitExceeded
// lying punk.
}
root
:=
&
dag
.
Node
{
Data
:
buf
}
...
...
@@ -40,6 +44,7 @@ func NewDagFromReader(r io.Reader, size int64) (*dag.Node, error) {
return
root
,
nil
}
// NewDagFromFile constructs a Merkle DAG from the file at given path.
func
NewDagFromFile
(
fpath
string
)
(
*
dag
.
Node
,
error
)
{
stat
,
err
:=
os
.
Stat
(
fpath
)
if
err
!=
nil
{
...
...
This diff is collapsed.
Click to expand it.
merkledag/coding.go
View file @
f718bd6a
...
...
@@ -8,6 +8,8 @@ import (
// for now, we use a PBNode intermediate thing.
// because native go objects are nice.
// Unmarshal decodes raw data into a *Node instance.
// The conversion uses an intermediate PBNode.
func
(
n
*
Node
)
Unmarshal
(
encoded
[]
byte
)
error
{
var
pbn
PBNode
if
err
:=
pbn
.
Unmarshal
(
encoded
);
err
!=
nil
{
...
...
@@ -29,6 +31,8 @@ func (n *Node) Unmarshal(encoded []byte) error {
return
nil
}
// MarshalTo encodes a *Node instance into a given byte slice.
// The conversion uses an intermediate PBNode.
func
(
n
*
Node
)
MarshalTo
(
encoded
[]
byte
)
error
{
pbn
:=
n
.
getPBNode
()
if
_
,
err
:=
pbn
.
MarshalTo
(
encoded
);
err
!=
nil
{
...
...
@@ -37,6 +41,8 @@ func (n *Node) MarshalTo(encoded []byte) error {
return
nil
}
// Marshal encodes a *Node instance into a new byte slice.
// The conversion uses an intermediate PBNode.
func
(
n
*
Node
)
Marshal
()
([]
byte
,
error
)
{
pbn
:=
n
.
getPBNode
()
data
,
err
:=
pbn
.
Marshal
()
...
...
@@ -60,6 +66,8 @@ func (n *Node) getPBNode() *PBNode {
return
pbn
}
// Encoded returns the encoded raw data version of a Node instance.
// It may use a cached encoded version, unless the force flag is given.
func
(
n
*
Node
)
Encoded
(
force
bool
)
([]
byte
,
error
)
{
if
n
.
encoded
==
nil
||
force
{
var
err
error
...
...
@@ -72,6 +80,7 @@ func (n *Node) Encoded(force bool) ([]byte, error) {
return
n
.
encoded
,
nil
}
// Decoded decodes raw data and returns a new Node instance.
func
Decoded
(
encoded
[]
byte
)
(
*
Node
,
error
)
{
n
:=
&
Node
{}
err
:=
n
.
Unmarshal
(
encoded
)
...
...
This diff is collapsed.
Click to expand it.
merkledag/merkledag.go
View file @
f718bd6a
...
...
@@ -7,11 +7,12 @@ import (
mh
"github.com/jbenet/go-multihash"
)
// can't use []byte/Multihash for keys :(
// so have to convert Multihash bytes to string
// NodeMap maps u.Keys to Nodes.
// We cannot use []byte/Multihash for keys :(
// so have to convert Multihash bytes to string (u.Key)
type
NodeMap
map
[
u
.
Key
]
*
Node
//
A
node in the IPFS Merkle DAG.
//
Node represents a
node in the IPFS Merkle DAG.
// nodes have opaque data and a set of navigable links.
type
Node
struct
{
Links
[]
*
Link
...
...
@@ -21,7 +22,7 @@ type Node struct {
encoded
[]
byte
}
//
A
n IPFS Merkle DAG Link
//
Link represents a
n IPFS Merkle DAG Link
between Nodes.
type
Link
struct
{
// utf string name. should be unique per object
Name
string
// utf8
...
...
@@ -36,6 +37,7 @@ type Link struct {
Node
*
Node
}
// AddNodeLink adds a link to another node.
func
(
n
*
Node
)
AddNodeLink
(
name
string
,
that
*
Node
)
error
{
s
,
err
:=
that
.
Size
()
if
err
!=
nil
{
...
...
@@ -55,6 +57,8 @@ func (n *Node) AddNodeLink(name string, that *Node) error {
return
nil
}
// Size returns the total size of the data addressed by node,
// including the total sizes of references.
func
(
n
*
Node
)
Size
()
(
uint64
,
error
)
{
b
,
err
:=
n
.
Encoded
(
false
)
if
err
!=
nil
{
...
...
@@ -68,6 +72,7 @@ func (n *Node) Size() (uint64, error) {
return
s
,
nil
}
// Multihash hashes the encoded data of this node.
func
(
n
*
Node
)
Multihash
()
(
mh
.
Multihash
,
error
)
{
b
,
err
:=
n
.
Encoded
(
false
)
if
err
!=
nil
{
...
...
@@ -77,18 +82,20 @@ func (n *Node) Multihash() (mh.Multihash, error) {
return
u
.
Hash
(
b
)
}
// Key returns the Multihash as a key, for maps.
func
(
n
*
Node
)
Key
()
(
u
.
Key
,
error
)
{
h
,
err
:=
n
.
Multihash
()
return
u
.
Key
(
h
),
err
}
//
A
n IPFS Merkle DAG service.
// the root is virtual (like a forest)
// stores nodes' data in a
b
lockService
//
DAGService is a
n IPFS Merkle DAG service.
//
-
the root is virtual (like a forest)
//
-
stores nodes' data in a
B
lockService
type
DAGService
struct
{
Blocks
*
blocks
.
BlockService
}
// Put adds a node to the DAGService, storing the block in the BlockService
func
(
n
*
DAGService
)
Put
(
nd
*
Node
)
(
u
.
Key
,
error
)
{
if
n
==
nil
{
return
""
,
fmt
.
Errorf
(
"DAGService is nil"
)
...
...
@@ -107,6 +114,7 @@ func (n *DAGService) Put(nd *Node) (u.Key, error) {
return
n
.
Blocks
.
AddBlock
(
b
)
}
// Get retrieves a node from the DAGService, fetching the block in the BlockService
func
(
n
*
DAGService
)
Get
(
k
u
.
Key
)
(
*
Node
,
error
)
{
if
n
==
nil
{
return
nil
,
fmt
.
Errorf
(
"DAGService is nil"
)
...
...
This diff is collapsed.
Click to expand it.
path/path.go
View file @
f718bd6a
...
...
@@ -9,12 +9,15 @@ import (
"strings"
)
//
P
ath resolution
for
IPFS
//
Resolver provides p
ath resolution
to
IPFS
// It has a pointer to a DAGService, which is uses to resolve nodes.
type
Resolver
struct
{
DAG
*
merkledag
.
DAGService
}
// ResolvePath fetches the node for given path. It uses the first
// path component as a hash (key) of the first node, then resolves
// all other components walking the links, with ResolveLinks.
func
(
s
*
Resolver
)
ResolvePath
(
fpath
string
)
(
*
merkledag
.
Node
,
error
)
{
fpath
=
path
.
Clean
(
fpath
)
...
...
@@ -27,7 +30,7 @@ func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) {
// if nothing, bail.
if
len
(
parts
)
==
0
{
return
nil
,
fmt
.
Errorf
(
"ipfs path must contain at least one component
.
"
)
return
nil
,
fmt
.
Errorf
(
"ipfs path must contain at least one component"
)
}
// first element in the path is a b58 hash (for now)
...
...
@@ -44,6 +47,12 @@ func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) {
return
s
.
ResolveLinks
(
nd
,
parts
[
1
:
])
}
// ResolveLinks iteratively resolves names by walking the link hierarchy.
// Every node is fetched from the DAGService, resolving the next name.
// Returns the last node found.
//
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
func
(
s
*
Resolver
)
ResolveLinks
(
ndd
*
merkledag
.
Node
,
names
[]
string
)
(
nd
*
merkledag
.
Node
,
err
error
)
{
...
...
This diff is collapsed.
Click to expand it.
peer/peer.go
View file @
f718bd6a
...
...
@@ -6,24 +6,30 @@ import (
mh
"github.com/jbenet/go-multihash"
)
// PeerId is a byte slice representing the identity of a peer.
type
PeerId
mh
.
Multihash
//
have to
map Key (string) : *Peer
because
slices are not comparable.
//
PeerBook
map
s
Key (string) : *Peer
(
slices are not comparable
)
.
type
PeerBook
map
[
u
.
Key
]
*
Peer
// Peer represents the identity information of an IPFS Node, including
// a PeerId, and relevant Addresses.
type
Peer
struct
{
Id
PeerId
Addresses
[]
*
ma
.
Multiaddr
}
// Key returns the PeerId as a Key (string) for maps.
func
(
p
*
Peer
)
Key
()
u
.
Key
{
return
u
.
Key
(
p
.
Id
)
}
// AddAddress adds the given Multiaddr address to Peer's addresses.
func
(
p
*
Peer
)
AddAddress
(
a
*
ma
.
Multiaddr
)
{
p
.
Addresses
=
append
(
p
.
Addresses
,
a
)
}
// NetAddress returns the first Multiaddr found for a given network.
func
(
p
*
Peer
)
NetAddress
(
n
string
)
*
ma
.
Multiaddr
{
for
_
,
a
:=
range
p
.
Addresses
{
ps
,
err
:=
a
.
Protocols
()
...
...
This diff is collapsed.
Click to expand it.
swarm/conn.go
View file @
f718bd6a
...
...
@@ -9,8 +9,10 @@ import (
"net"
)
// ChanBuffer is the size of the buffer in the Conn Chan
const
ChanBuffer
=
10
// Conn represents a connection to another Peer (IPFS Node).
type
Conn
struct
{
Peer
*
peer
.
Peer
Addr
*
ma
.
Multiaddr
...
...
@@ -21,8 +23,11 @@ type Conn struct {
Incoming
*
msgio
.
Chan
}
// ConnMap maps Keys (PeerIds) to Connections.
type
ConnMap
map
[
u
.
Key
]
*
Conn
// Dial connects to a particular peer, over a given network
// Example: Dial("udp", peer)
func
Dial
(
network
string
,
peer
*
peer
.
Peer
)
(
*
Conn
,
error
)
{
addr
:=
peer
.
NetAddress
(
network
)
if
addr
==
nil
{
...
...
@@ -58,6 +63,7 @@ func Dial(network string, peer *peer.Peer) (*Conn, error) {
return
conn
,
nil
}
// Close closes the connection, and associated channels.
func
(
s
*
Conn
)
Close
()
error
{
if
s
.
Conn
==
nil
{
return
fmt
.
Errorf
(
"Already closed."
)
// already closed
...
...
This diff is collapsed.
Click to expand it.
swarm/swarm.go
View file @
f718bd6a
...
...
@@ -6,6 +6,8 @@ import (
"sync"
)
// Message represents a packet of information sent to or received from a
// particular Peer.
type
Message
struct
{
// To or from, depending on direction.
Peer
*
peer
.
Peer
...
...
@@ -14,6 +16,7 @@ type Message struct {
Data
[]
byte
}
// Chan is a swam channel, which provides duplex communication and errors.
type
Chan
struct
{
Outgoing
chan
Message
Incoming
chan
Message
...
...
@@ -21,6 +24,7 @@ type Chan struct {
Close
chan
bool
}
// NewChan constructs a Chan instance, with given buffer size bufsize.
func
NewChan
(
bufsize
int
)
*
Chan
{
return
&
Chan
{
Outgoing
:
make
(
chan
Message
,
bufsize
),
...
...
@@ -30,12 +34,17 @@ func NewChan(bufsize int) *Chan {
}
}
// Swarm is a connection muxer, allowing connections to other peers to
// be opened and closed, while still using the same Chan for all
// communication. The Chan sends/receives Messages, which note the
// destination or source Peer.
type
Swarm
struct
{
Chan
*
Chan
conns
ConnMap
connsLock
sync
.
RWMutex
}
// NewSwarm constructs a Swarm, with a Chan.
func
NewSwarm
()
*
Swarm
{
s
:=
&
Swarm
{
Chan
:
NewChan
(
10
),
...
...
@@ -45,6 +54,7 @@ func NewSwarm() *Swarm {
return
s
}
// Close closes a swam.
func
(
s
*
Swarm
)
Close
()
{
s
.
connsLock
.
RLock
()
l
:=
len
(
s
.
conns
)
...
...
@@ -57,6 +67,14 @@ func (s *Swarm) Close() {
s
.
Chan
.
Close
<-
true
// listener
}
// Dial connects to a peer.
//
// The idea is that the client of Swarm does not need to know what network
// the connection will happen over. Swarm can use whichever it choses.
// This allows us to use various transport protocols, do NAT traversal/relay,
// etc. to achive connection.
//
// For now, Dial uses only TCP. This will be extended.
func
(
s
*
Swarm
)
Dial
(
peer
*
peer
.
Peer
)
(
*
Conn
,
error
)
{
k
:=
peer
.
Key
()
...
...
This diff is collapsed.
Click to expand it.
util/util.go
View file @
f718bd6a
...
...
@@ -8,18 +8,21 @@ import (
"strings"
)
// Debug is a global flag for debugging.
var
Debug
bool
var
NotImplementedError
=
fmt
.
Errorf
(
"Error: not implemented yet."
)
// a Key for maps. It's a string (rep of a multihash).
// ErrNotImplemented signifies a function has not been implemented yet.
var
ErrNotImplemented
=
fmt
.
Errorf
(
"Error: not implemented yet."
)
// Key is a string representation of multihash for use with maps.
type
Key
string
// global hash function. uses multihash SHA2_256, 256 bits
//
Hash is the
global
IPFS
hash function. uses multihash SHA2_256, 256 bits
func
Hash
(
data
[]
byte
)
(
mh
.
Multihash
,
error
)
{
return
mh
.
Sum
(
data
,
mh
.
SHA2_256
,
-
1
)
}
//
t
ilde
expansion
//
T
ilde
Expansion expands a filename, which may begin with a tilde.
func
TildeExpansion
(
filename
string
)
(
string
,
error
)
{
if
strings
.
HasPrefix
(
filename
,
"~/"
)
{
usr
,
err
:=
user
.
Current
()
...
...
@@ -33,21 +36,26 @@ func TildeExpansion(filename string) (string, error) {
return
filename
,
nil
}
//
S
horthand printing function
s
.
//
PErr is a s
horthand printing function
to output to Stderr
.
func
PErr
(
format
string
,
a
...
interface
{})
{
fmt
.
Fprintf
(
os
.
Stderr
,
format
,
a
...
)
}
// POut is a shorthand printing function to output to Stdout.
func
POut
(
format
string
,
a
...
interface
{})
{
fmt
.
Fprintf
(
os
.
Stdout
,
format
,
a
...
)
}
// DErr is a shorthand debug printing function to output to Stderr.
// Will only print if Debug is true.
func
DErr
(
format
string
,
a
...
interface
{})
{
if
Debug
{
PErr
(
format
,
a
...
)
}
}
// DOut is a shorthand debug printing function to output to Stdout.
// Will only print if Debug is true.
func
DOut
(
format
string
,
a
...
interface
{})
{
if
Debug
{
POut
(
format
,
a
...
)
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment