Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
10
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
dms3
go-dms3
Commits
adbc85bf
Commit
adbc85bf
authored
Apr 01, 2019
by
Łukasz Magiera
Committed by
Steven Allen
Apr 17, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove old constructor code
License: MIT Signed-off-by:
Łukasz Magiera
<
magik6k@gmail.com
>
parent
c5f887dc
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
17 additions
and
783 deletions
+17
-783
core/builder.go
core/builder.go
+2
-152
core/core.go
core/core.go
+15
-631
No files found.
core/builder.go
View file @
adbc85bf
...
...
@@ -7,37 +7,23 @@ import (
"errors"
"os"
"syscall"
"time"
"go.uber.org/fx"
"github.com/ipfs/go-ipfs/p2p"
"github.com/ipfs/go-ipfs/provider"
filestore
"github.com/ipfs/go-ipfs/filestore"
namesys
"github.com/ipfs/go-ipfs/namesys"
pin
"github.com/ipfs/go-ipfs/pin"
repo
"github.com/ipfs/go-ipfs/repo"
cidv0v1
"github.com/ipfs/go-ipfs/thirdparty/cidv0v1"
"github.com/ipfs/go-ipfs/thirdparty/verifbs"
bserv
"github.com/ipfs/go-blockservice"
ds
"github.com/ipfs/go-datastore"
retry
"github.com/ipfs/go-datastore/retrystore"
dsync
"github.com/ipfs/go-datastore/sync"
bstore
"github.com/ipfs/go-ipfs-blockstore"
cfg
"github.com/ipfs/go-ipfs-config"
offline
"github.com/ipfs/go-ipfs-exchange-offline"
offroute
"github.com/ipfs/go-ipfs-routing/offline"
dag
"github.com/ipfs/go-merkledag"
metrics
"github.com/ipfs/go-metrics-interface"
resolver
"github.com/ipfs/go-path/resolver"
uio
"github.com/ipfs/go-unixfs/io"
libp2p
"github.com/libp2p/go-libp2p"
ci
"github.com/libp2p/go-libp2p-crypto"
p2phost
"github.com/libp2p/go-libp2p-host"
peer
"github.com/libp2p/go-libp2p-peer"
pstore
"github.com/libp2p/go-libp2p-peerstore"
)
type
BuildCfg
struct
{
...
...
@@ -245,6 +231,8 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
)
go
func
()
{
// Note that some services use contexts to signal shutting down, which is
// very suboptimal. This needs to be here until that's addressed somehow
<-
ctx
.
Done
()
app
.
Stop
(
context
.
Background
())
}()
...
...
@@ -252,25 +240,6 @@ func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
n
.
IsOnline
=
cfg
.
Online
n
.
app
=
app
/* n := &IpfsNode{
IsOnline: cfg.Online,
Repo: cfg.Repo,
ctx: ctx,
Peerstore: pstoremem.NewPeerstore(),
}
n.RecordValidator = record.NamespacedValidator{
"pk": record.PublicKeyValidator{},
"ipns": ipns.Validator{KeyBook: n.Peerstore},
}
*/
// TODO: port to lifetimes
// n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
/*if err := setupNode(ctx, n, cfg); err != nil {
n.Close()
return nil, err
}*/
if
app
.
Err
()
!=
nil
{
return
nil
,
app
.
Err
()
}
...
...
@@ -295,122 +264,3 @@ func isTooManyFDError(err error) bool {
return
false
}
func
setupNode
(
ctx
context
.
Context
,
n
*
IpfsNode
,
cfg
*
BuildCfg
)
error
{
// setup local identity
if
err
:=
n
.
loadID
();
err
!=
nil
{
return
err
}
// load the private key (if present)
if
err
:=
n
.
loadPrivateKey
();
err
!=
nil
{
return
err
}
rds
:=
&
retry
.
Datastore
{
Batching
:
n
.
Repo
.
Datastore
(),
Delay
:
time
.
Millisecond
*
200
,
Retries
:
6
,
TempErrFunc
:
isTooManyFDError
,
}
// hash security
bs
:=
bstore
.
NewBlockstore
(
rds
)
bs
=
&
verifbs
.
VerifBS
{
Blockstore
:
bs
}
opts
:=
bstore
.
DefaultCacheOpts
()
conf
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
// TEMP: setting global sharding switch here
uio
.
UseHAMTSharding
=
conf
.
Experimental
.
ShardingEnabled
opts
.
HasBloomFilterSize
=
conf
.
Datastore
.
BloomFilterSize
if
!
cfg
.
Permanent
{
opts
.
HasBloomFilterSize
=
0
}
if
!
cfg
.
NilRepo
{
bs
,
err
=
bstore
.
CachedBlockstore
(
ctx
,
bs
,
opts
)
if
err
!=
nil
{
return
err
}
}
bs
=
bstore
.
NewIdStore
(
bs
)
bs
=
cidv0v1
.
NewBlockstore
(
bs
)
n
.
BaseBlocks
=
bs
n
.
GCLocker
=
bstore
.
NewGCLocker
()
n
.
Blockstore
=
bstore
.
NewGCBlockstore
(
bs
,
n
.
GCLocker
)
if
conf
.
Experimental
.
FilestoreEnabled
||
conf
.
Experimental
.
UrlstoreEnabled
{
// hash security
n
.
Filestore
=
filestore
.
NewFilestore
(
bs
,
n
.
Repo
.
FileManager
())
n
.
Blockstore
=
bstore
.
NewGCBlockstore
(
n
.
Filestore
,
n
.
GCLocker
)
n
.
Blockstore
=
&
verifbs
.
VerifBSGC
{
GCBlockstore
:
n
.
Blockstore
}
}
rcfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
if
rcfg
.
Datastore
.
HashOnRead
{
bs
.
HashOnRead
(
true
)
}
hostOption
:=
cfg
.
Host
if
cfg
.
DisableEncryptedConnections
{
innerHostOption
:=
hostOption
hostOption
=
func
(
ctx
context
.
Context
,
id
peer
.
ID
,
ps
pstore
.
Peerstore
,
options
...
libp2p
.
Option
)
(
p2phost
.
Host
,
error
)
{
return
innerHostOption
(
ctx
,
id
,
ps
,
append
(
options
,
libp2p
.
NoSecurity
)
...
)
}
// TODO: shouldn't this be Errorf to guarantee visibility?
log
.
Warningf
(
`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS.
You will not be able to connect to any nodes configured to use encrypted connections`
)
}
if
cfg
.
Online
{
do
:=
setupDiscoveryOption
(
rcfg
.
Discovery
)
if
err
:=
n
.
startOnlineServices
(
ctx
,
cfg
.
Routing
,
hostOption
,
do
,
cfg
.
getOpt
(
"pubsub"
),
cfg
.
getOpt
(
"ipnsps"
),
cfg
.
getOpt
(
"mplex"
));
err
!=
nil
{
return
err
}
}
else
{
n
.
Exchange
=
offline
.
Exchange
(
n
.
Blockstore
)
n
.
Routing
=
offroute
.
NewOfflineRouter
(
n
.
Repo
.
Datastore
(),
n
.
RecordValidator
)
n
.
Namesys
=
namesys
.
NewNameSystem
(
n
.
Routing
,
n
.
Repo
.
Datastore
(),
0
)
}
n
.
Blocks
=
bserv
.
New
(
n
.
Blockstore
,
n
.
Exchange
)
n
.
DAG
=
dag
.
NewDAGService
(
n
.
Blocks
)
internalDag
:=
dag
.
NewDAGService
(
bserv
.
New
(
n
.
Blockstore
,
offline
.
Exchange
(
n
.
Blockstore
)))
n
.
Pinning
,
err
=
pin
.
LoadPinner
(
n
.
Repo
.
Datastore
(),
n
.
DAG
,
internalDag
)
if
err
!=
nil
{
// TODO: we should move towards only running 'NewPinner' explicitly on
// node init instead of implicitly here as a result of the pinner keys
// not being found in the datastore.
// this is kinda sketchy and could cause data loss
n
.
Pinning
=
pin
.
NewPinner
(
n
.
Repo
.
Datastore
(),
n
.
DAG
,
internalDag
)
}
n
.
Resolver
=
resolver
.
NewBasicResolver
(
n
.
DAG
)
// Provider
queue
,
err
:=
provider
.
NewQueue
(
ctx
,
"provider-v1"
,
n
.
Repo
.
Datastore
())
if
err
!=
nil
{
return
err
}
n
.
Provider
=
provider
.
NewProvider
(
ctx
,
queue
,
n
.
Routing
)
if
cfg
.
Online
{
if
err
:=
n
.
startLateOnlineServices
(
ctx
);
err
!=
nil
{
return
err
}
}
return
n
.
loadFilesRoot
()
}
core/core.go
View file @
adbc85bf
...
...
@@ -10,9 +10,7 @@ interfaces and how core/... fits into the bigger IPFS picture, see:
package
core
import
(
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
...
...
@@ -24,36 +22,32 @@ import (
version
"github.com/ipfs/go-ipfs"
rp
"github.com/ipfs/go-ipfs/exchange/reprovide"
filestore
"github.com/ipfs/go-ipfs/filestore"
mount
"github.com/ipfs/go-ipfs/fuse/mount"
namesys
"github.com/ipfs/go-ipfs/namesys"
"github.com/ipfs/go-ipfs/filestore"
"github.com/ipfs/go-ipfs/fuse/mount"
"github.com/ipfs/go-ipfs/namesys"
ipnsrp
"github.com/ipfs/go-ipfs/namesys/republisher"
p2p
"github.com/ipfs/go-ipfs/p2p"
pin
"github.com/ipfs/go-ipfs/pin"
provider
"github.com/ipfs/go-ipfs/provider"
repo
"github.com/ipfs/go-ipfs/repo"
"github.com/ipfs/go-ipfs/p2p"
"github.com/ipfs/go-ipfs/pin"
"github.com/ipfs/go-ipfs/provider"
"github.com/ipfs/go-ipfs/repo"
bitswap
"github.com/ipfs/go-bitswap"
bsnet
"github.com/ipfs/go-bitswap/network"
bserv
"github.com/ipfs/go-blockservice"
cid
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cid"
ds
"github.com/ipfs/go-datastore"
bstore
"github.com/ipfs/go-ipfs-blockstore"
config
"github.com/ipfs/go-ipfs-config"
exchange
"github.com/ipfs/go-ipfs-exchange-interface"
nilrouting
"github.com/ipfs/go-ipfs-routing/none"
u
"github.com/ipfs/go-ipfs-util"
ipld
"github.com/ipfs/go-ipld-format"
logging
"github.com/ipfs/go-log"
merkledag
"github.com/ipfs/go-merkledag"
mfs
"github.com/ipfs/go-mfs"
resolver
"github.com/ipfs/go-path/resolver"
"github.com/ipfs/go-merkledag"
"github.com/ipfs/go-mfs"
"github.com/ipfs/go-path/resolver"
ft
"github.com/ipfs/go-unixfs"
goprocess
"github.com/jbenet/goprocess"
libp2p
"github.com/libp2p/go-libp2p"
"github.com/jbenet/goprocess"
"github.com/libp2p/go-libp2p"
autonat
"github.com/libp2p/go-libp2p-autonat-svc"
circuit
"github.com/libp2p/go-libp2p-circuit"
connmgr
"github.com/libp2p/go-libp2p-connmgr"
ic
"github.com/libp2p/go-libp2p-crypto"
p2phost
"github.com/libp2p/go-libp2p-host"
ifconnmgr
"github.com/libp2p/go-libp2p-interface-connmgr"
...
...
@@ -62,17 +56,13 @@ import (
metrics
"github.com/libp2p/go-libp2p-metrics"
peer
"github.com/libp2p/go-libp2p-peer"
pstore
"github.com/libp2p/go-libp2p-peerstore"
pnet
"github.com/libp2p/go-libp2p-pnet"
pubsub
"github.com/libp2p/go-libp2p-pubsub"
psrouter
"github.com/libp2p/go-libp2p-pubsub-router"
quic
"github.com/libp2p/go-libp2p-quic-transport"
record
"github.com/libp2p/go-libp2p-record"
routing
"github.com/libp2p/go-libp2p-routing"
rhelpers
"github.com/libp2p/go-libp2p-routing-helpers"
discovery
"github.com/libp2p/go-libp2p/p2p/discovery"
"github.com/libp2p/go-libp2p/p2p/discovery"
p2pbhost
"github.com/libp2p/go-libp2p/p2p/host/basic"
rhost
"github.com/libp2p/go-libp2p/p2p/host/routed"
identify
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
mafilter
"github.com/libp2p/go-maddr-filter"
smux
"github.com/libp2p/go-stream-muxer"
ma
"github.com/multiformats/go-multiaddr"
...
...
@@ -153,218 +143,6 @@ type Mounts struct {
Ipns
mount
.
Mount
}
func
(
n
*
IpfsNode
)
startOnlineServices
(
ctx
context
.
Context
,
routingOption
RoutingOption
,
hostOption
HostOption
,
do
DiscoveryOption
,
pubsub
,
ipnsps
,
mplex
bool
)
error
{
if
n
.
PeerHost
!=
nil
{
// already online.
return
errors
.
New
(
"node already online"
)
}
if
n
.
PrivateKey
==
nil
{
return
fmt
.
Errorf
(
"private key not available"
)
}
// get undialable addrs from config
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
var
libp2pOpts
[]
libp2p
.
Option
for
_
,
s
:=
range
cfg
.
Swarm
.
AddrFilters
{
f
,
err
:=
mamask
.
NewMask
(
s
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"incorrectly formatted address filter in config: %s"
,
s
)
}
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
FilterAddresses
(
f
))
}
if
!
cfg
.
Swarm
.
DisableBandwidthMetrics
{
// Set reporter
n
.
Reporter
=
metrics
.
NewBandwidthCounter
()
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
BandwidthReporter
(
n
.
Reporter
))
}
swarmkey
,
err
:=
n
.
Repo
.
SwarmKey
()
if
err
!=
nil
{
return
err
}
if
swarmkey
!=
nil
{
protec
,
err
:=
pnet
.
NewProtector
(
bytes
.
NewReader
(
swarmkey
))
if
err
!=
nil
{
return
fmt
.
Errorf
(
"failed to configure private network: %s"
,
err
)
}
n
.
PNetFingerprint
=
protec
.
Fingerprint
()
go
func
()
{
t
:=
time
.
NewTicker
(
30
*
time
.
Second
)
<-
t
.
C
// swallow one tick
for
{
select
{
case
<-
t
.
C
:
if
ph
:=
n
.
PeerHost
;
ph
!=
nil
{
if
len
(
ph
.
Network
()
.
Peers
())
==
0
{
log
.
Warning
(
"We are in private network and have no peers."
)
log
.
Warning
(
"This might be configuration mistake."
)
}
}
//case <-n.Process().Closing():
// t.Stop()
// return
}
}
}()
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
PrivateNetwork
(
protec
))
}
addrsFactory
,
err
:=
makeAddrsFactory
(
cfg
.
Addresses
)
if
err
!=
nil
{
return
err
}
if
!
cfg
.
Swarm
.
DisableRelay
{
addrsFactory
=
composeAddrsFactory
(
addrsFactory
,
filterRelayAddrs
)
}
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
AddrsFactory
(
addrsFactory
))
connm
,
err
:=
constructConnMgr
(
cfg
.
Swarm
.
ConnMgr
)
if
err
!=
nil
{
return
err
}
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
ConnectionManager
(
connm
))
libp2pOpts
=
append
(
libp2pOpts
,
makeSmuxTransportOption
(
mplex
))
if
!
cfg
.
Swarm
.
DisableNatPortMap
{
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
NATPortMap
())
}
// disable the default listen addrs
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
NoListenAddrs
)
if
cfg
.
Swarm
.
DisableRelay
{
// Enabled by default.
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
DisableRelay
())
}
else
{
relayOpts
:=
[]
circuit
.
RelayOpt
{
circuit
.
OptDiscovery
}
if
cfg
.
Swarm
.
EnableRelayHop
{
relayOpts
=
append
(
relayOpts
,
circuit
.
OptHop
)
}
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
EnableRelay
(
relayOpts
...
))
}
// explicitly enable the default transports
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
DefaultTransports
)
if
cfg
.
Experimental
.
QUIC
{
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
Transport
(
quic
.
NewTransport
))
}
// enable routing
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
Routing
(
func
(
h
p2phost
.
Host
)
(
routing
.
PeerRouting
,
error
)
{
r
,
err
:=
routingOption
(
ctx
,
h
,
n
.
Repo
.
Datastore
(),
n
.
RecordValidator
)
n
.
Routing
=
r
return
r
,
err
}))
// enable autorelay
if
cfg
.
Swarm
.
EnableAutoRelay
{
libp2pOpts
=
append
(
libp2pOpts
,
libp2p
.
EnableAutoRelay
())
}
peerhost
,
err
:=
hostOption
(
ctx
,
n
.
Identity
,
n
.
Peerstore
,
libp2pOpts
...
)
if
err
!=
nil
{
return
err
}
n
.
PeerHost
=
peerhost
if
err
:=
n
.
startOnlineServicesWithHost
(
ctx
,
routingOption
,
pubsub
,
ipnsps
);
err
!=
nil
{
return
err
}
// Ok, now we're ready to listen.
if
err
:=
startListening
(
n
.
PeerHost
,
cfg
);
err
!=
nil
{
return
err
}
n
.
P2P
=
p2p
.
NewP2P
(
n
.
Identity
,
n
.
PeerHost
,
n
.
Peerstore
)
// setup local discovery
if
do
!=
nil
{
service
,
err
:=
do
(
ctx
,
n
.
PeerHost
)
if
err
!=
nil
{
log
.
Error
(
"mdns error: "
,
err
)
}
else
{
service
.
RegisterNotifee
(
n
)
n
.
Discovery
=
service
}
}
return
n
.
Bootstrap
(
DefaultBootstrapConfig
)
}
func
constructConnMgr
(
cfg
config
.
ConnMgr
)
(
ifconnmgr
.
ConnManager
,
error
)
{
switch
cfg
.
Type
{
case
""
:
// 'default' value is the basic connection manager
return
connmgr
.
NewConnManager
(
config
.
DefaultConnMgrLowWater
,
config
.
DefaultConnMgrHighWater
,
config
.
DefaultConnMgrGracePeriod
),
nil
case
"none"
:
return
nil
,
nil
case
"basic"
:
grace
,
err
:=
time
.
ParseDuration
(
cfg
.
GracePeriod
)
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"parsing Swarm.ConnMgr.GracePeriod: %s"
,
err
)
}
return
connmgr
.
NewConnManager
(
cfg
.
LowWater
,
cfg
.
HighWater
,
grace
),
nil
default
:
return
nil
,
fmt
.
Errorf
(
"unrecognized ConnMgr.Type: %q"
,
cfg
.
Type
)
}
}
func
(
n
*
IpfsNode
)
startLateOnlineServices
(
ctx
context
.
Context
)
error
{
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
// Provider
n
.
Provider
.
Run
()
// Reprovider
var
keyProvider
rp
.
KeyChanFunc
switch
cfg
.
Reprovider
.
Strategy
{
case
"all"
:
fallthrough
case
""
:
keyProvider
=
rp
.
NewBlockstoreProvider
(
n
.
Blockstore
)
case
"roots"
:
keyProvider
=
rp
.
NewPinnedProvider
(
n
.
Pinning
,
n
.
DAG
,
true
)
case
"pinned"
:
keyProvider
=
rp
.
NewPinnedProvider
(
n
.
Pinning
,
n
.
DAG
,
false
)
default
:
return
fmt
.
Errorf
(
"unknown reprovider strategy '%s'"
,
cfg
.
Reprovider
.
Strategy
)
}
n
.
Reprovider
=
rp
.
NewReprovider
(
ctx
,
n
.
Routing
,
keyProvider
)
reproviderInterval
:=
kReprovideFrequency
if
cfg
.
Reprovider
.
Interval
!=
""
{
dur
,
err
:=
time
.
ParseDuration
(
cfg
.
Reprovider
.
Interval
)
if
err
!=
nil
{
return
err
}
reproviderInterval
=
dur
}
go
n
.
Reprovider
.
Run
(
reproviderInterval
)
return
nil
}
func
makeAddrsFactory
(
cfg
config
.
Addresses
)
(
p2pbhost
.
AddrsFactory
,
error
)
{
var
annAddrs
[]
ma
.
Multiaddr
for
_
,
addr
:=
range
cfg
.
Announce
{
...
...
@@ -453,200 +231,6 @@ func makeSmuxTransportOption(mplexExp bool) libp2p.Option {
return
libp2p
.
ChainOptions
(
opts
...
)
}
func
setupDiscoveryOption
(
d
config
.
Discovery
)
DiscoveryOption
{
if
d
.
MDNS
.
Enabled
{
return
func
(
ctx
context
.
Context
,
h
p2phost
.
Host
)
(
discovery
.
Service
,
error
)
{
if
d
.
MDNS
.
Interval
==
0
{
d
.
MDNS
.
Interval
=
5
}
return
discovery
.
NewMdnsService
(
ctx
,
h
,
time
.
Duration
(
d
.
MDNS
.
Interval
)
*
time
.
Second
,
discovery
.
ServiceTag
)
}
}
return
nil
}
// HandlePeerFound attempts to connect to peer from `PeerInfo`, if it fails
// logs a warning log.
func
(
n
*
IpfsNode
)
HandlePeerFound
(
p
pstore
.
PeerInfo
)
{
log
.
Warning
(
"trying peer info: "
,
p
)
ctx
,
cancel
:=
context
.
WithTimeout
(
n
.
Context
(),
discoveryConnTimeout
)
defer
cancel
()
if
err
:=
n
.
PeerHost
.
Connect
(
ctx
,
p
);
err
!=
nil
{
log
.
Warning
(
"Failed to connect to peer found by discovery: "
,
err
)
}
}
// startOnlineServicesWithHost is the set of services which need to be
// initialized with the host and _before_ we start listening.
func
(
n
*
IpfsNode
)
startOnlineServicesWithHost
(
ctx
context
.
Context
,
routingOption
RoutingOption
,
enablePubsub
bool
,
enableIpnsps
bool
)
error
{
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
if
cfg
.
Swarm
.
EnableAutoNATService
{
var
opts
[]
libp2p
.
Option
if
cfg
.
Experimental
.
QUIC
{
opts
=
append
(
opts
,
libp2p
.
DefaultTransports
,
libp2p
.
Transport
(
quic
.
NewTransport
))
}
svc
,
err
:=
autonat
.
NewAutoNATService
(
ctx
,
n
.
PeerHost
,
opts
...
)
if
err
!=
nil
{
return
err
}
n
.
AutoNAT
=
svc
}
if
enablePubsub
||
enableIpnsps
{
var
service
*
pubsub
.
PubSub
var
pubsubOptions
[]
pubsub
.
Option
if
cfg
.
Pubsub
.
DisableSigning
{
pubsubOptions
=
append
(
pubsubOptions
,
pubsub
.
WithMessageSigning
(
false
))
}
if
cfg
.
Pubsub
.
StrictSignatureVerification
{
pubsubOptions
=
append
(
pubsubOptions
,
pubsub
.
WithStrictSignatureVerification
(
true
))
}
switch
cfg
.
Pubsub
.
Router
{
case
""
:
fallthrough
case
"floodsub"
:
service
,
err
=
pubsub
.
NewFloodSub
(
ctx
,
n
.
PeerHost
,
pubsubOptions
...
)
case
"gossipsub"
:
service
,
err
=
pubsub
.
NewGossipSub
(
ctx
,
n
.
PeerHost
,
pubsubOptions
...
)
default
:
err
=
fmt
.
Errorf
(
"Unknown pubsub router %s"
,
cfg
.
Pubsub
.
Router
)
}
if
err
!=
nil
{
return
err
}
n
.
PubSub
=
service
}
// this code is necessary just for tests: mock network constructions
// ignore the libp2p constructor options that actually construct the routing!
if
n
.
Routing
==
nil
{
r
,
err
:=
routingOption
(
ctx
,
n
.
PeerHost
,
n
.
Repo
.
Datastore
(),
n
.
RecordValidator
)
if
err
!=
nil
{
return
err
}
n
.
Routing
=
r
n
.
PeerHost
=
rhost
.
Wrap
(
n
.
PeerHost
,
n
.
Routing
)
}
// TODO: I'm not a fan of type assertions like this but the
// `RoutingOption` system doesn't currently provide access to the
// IpfsNode.
//
// Ideally, we'd do something like:
//
// 1. Add some fancy method to introspect into tiered routers to extract
// things like the pubsub router or the DHT (complicated, messy,
// probably not worth it).
// 2. Pass the IpfsNode into the RoutingOption (would also remove the
// PSRouter case below.
// 3. Introduce some kind of service manager? (my personal favorite but
// that requires a fair amount of work).
if
dht
,
ok
:=
n
.
Routing
.
(
*
dht
.
IpfsDHT
);
ok
{
n
.
DHT
=
dht
}
if
enableIpnsps
{
n
.
PSRouter
=
psrouter
.
NewPubsubValueStore
(
ctx
,
n
.
PeerHost
,
n
.
Routing
,
n
.
PubSub
,
n
.
RecordValidator
,
)
n
.
Routing
=
rhelpers
.
Tiered
{
Routers
:
[]
routing
.
IpfsRouting
{
// Always check pubsub first.
&
rhelpers
.
Compose
{
ValueStore
:
&
rhelpers
.
LimitedValueStore
{
ValueStore
:
n
.
PSRouter
,
Namespaces
:
[]
string
{
"ipns"
},
},
},
n
.
Routing
,
},
Validator
:
n
.
RecordValidator
,
}
}
// setup exchange service
bitswapNetwork
:=
bsnet
.
NewFromIpfsHost
(
n
.
PeerHost
,
n
.
Routing
)
n
.
Exchange
=
bitswap
.
New
(
ctx
,
bitswapNetwork
,
n
.
Blockstore
)
size
,
err
:=
n
.
getCacheSize
()
if
err
!=
nil
{
return
err
}
// setup name system
n
.
Namesys
=
namesys
.
NewNameSystem
(
n
.
Routing
,
n
.
Repo
.
Datastore
(),
size
)
// setup ipns republishing
return
n
.
setupIpnsRepublisher
()
}
// getCacheSize returns cache life and cache size
func
(
n
*
IpfsNode
)
getCacheSize
()
(
int
,
error
)
{
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
0
,
err
}
cs
:=
cfg
.
Ipns
.
ResolveCacheSize
if
cs
==
0
{
cs
=
DefaultIpnsCacheSize
}
if
cs
<
0
{
return
0
,
fmt
.
Errorf
(
"cannot specify negative resolve cache size"
)
}
return
cs
,
nil
}
func
(
n
*
IpfsNode
)
setupIpnsRepublisher
()
error
{
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
n
.
IpnsRepub
=
ipnsrp
.
NewRepublisher
(
n
.
Namesys
,
n
.
Repo
.
Datastore
(),
n
.
PrivateKey
,
n
.
Repo
.
Keystore
())
if
cfg
.
Ipns
.
RepublishPeriod
!=
""
{
d
,
err
:=
time
.
ParseDuration
(
cfg
.
Ipns
.
RepublishPeriod
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"failure to parse config setting IPNS.RepublishPeriod: %s"
,
err
)
}
if
!
u
.
Debug
&&
(
d
<
time
.
Minute
||
d
>
(
time
.
Hour
*
24
))
{
return
fmt
.
Errorf
(
"config setting IPNS.RepublishPeriod is not between 1min and 1day: %s"
,
d
)
}
n
.
IpnsRepub
.
Interval
=
d
}
if
cfg
.
Ipns
.
RecordLifetime
!=
""
{
d
,
err
:=
time
.
ParseDuration
(
cfg
.
Ipns
.
RecordLifetime
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"failure to parse config setting IPNS.RecordLifetime: %s"
,
err
)
}
n
.
IpnsRepub
.
RecordLifetime
=
d
}
//n.Process().Go(n.IpnsRepub.Run)
return
nil
}
// Close calls Close() on the App object
func
(
n
*
IpfsNode
)
Close
()
error
{
return
n
.
app
.
Stop
(
n
.
ctx
)
...
...
@@ -660,68 +244,6 @@ func (n *IpfsNode) Context() context.Context {
return
n
.
ctx
}
// teardown closes owned children. If any errors occur, this function returns
// the first error.
func
(
n
*
IpfsNode
)
teardown
()
error
{
log
.
Debug
(
"core is shutting down..."
)
// owned objects are closed in this teardown to ensure that they're closed
// regardless of which constructor was used to add them to the node.
var
closers
[]
io
.
Closer
// NOTE: The order that objects are added(closed) matters, if an object
// needs to use another during its shutdown/cleanup process, it should be
// closed before that other object
if
n
.
Provider
!=
nil
{
closers
=
append
(
closers
,
n
.
Provider
)
}
if
n
.
FilesRoot
!=
nil
{
closers
=
append
(
closers
,
n
.
FilesRoot
)
}
if
n
.
Exchange
!=
nil
{
closers
=
append
(
closers
,
n
.
Exchange
)
}
if
n
.
Mounts
.
Ipfs
!=
nil
&&
!
n
.
Mounts
.
Ipfs
.
IsActive
()
{
//TODO
closers
=
append
(
closers
,
mount
.
Closer
(
n
.
Mounts
.
Ipfs
))
}
if
n
.
Mounts
.
Ipns
!=
nil
&&
!
n
.
Mounts
.
Ipns
.
IsActive
()
{
// TODO
closers
=
append
(
closers
,
mount
.
Closer
(
n
.
Mounts
.
Ipns
))
}
if
n
.
DHT
!=
nil
{
closers
=
append
(
closers
,
n
.
DHT
.
Process
())
}
if
n
.
Blocks
!=
nil
{
closers
=
append
(
closers
,
n
.
Blocks
)
}
if
n
.
Bootstrapper
!=
nil
{
closers
=
append
(
closers
,
n
.
Bootstrapper
)
}
if
n
.
PeerHost
!=
nil
{
closers
=
append
(
closers
,
n
.
PeerHost
)
}
// Repo closed last, most things need to preserve state here
closers
=
append
(
closers
,
n
.
Repo
)
var
errs
[]
error
for
_
,
closer
:=
range
closers
{
if
err
:=
closer
.
Close
();
err
!=
nil
{
errs
=
append
(
errs
,
err
)
}
}
if
len
(
errs
)
>
0
{
return
errs
[
0
]
}
return
nil
}
// Bootstrap will set and call the IpfsNodes bootstrap function.
func
(
n
*
IpfsNode
)
Bootstrap
(
cfg
BootstrapConfig
)
error
{
// TODO what should return value be when in offlineMode?
...
...
@@ -751,80 +273,6 @@ func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error {
return
err
}
func
(
n
*
IpfsNode
)
loadID
()
error
{
if
n
.
Identity
!=
""
{
return
errors
.
New
(
"identity already loaded"
)
}
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
cid
:=
cfg
.
Identity
.
PeerID
if
cid
==
""
{
return
errors
.
New
(
"identity was not set in config (was 'ipfs init' run?)"
)
}
if
len
(
cid
)
==
0
{
return
errors
.
New
(
"no peer ID in config! (was 'ipfs init' run?)"
)
}
id
,
err
:=
peer
.
IDB58Decode
(
cid
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"peer ID invalid: %s"
,
err
)
}
n
.
Identity
=
id
return
nil
}
// GetKey will return a key from the Keystore with name `name`.
func
(
n
*
IpfsNode
)
GetKey
(
name
string
)
(
ic
.
PrivKey
,
error
)
{
if
name
==
"self"
{
if
n
.
PrivateKey
==
nil
{
return
nil
,
fmt
.
Errorf
(
"private key not available"
)
}
return
n
.
PrivateKey
,
nil
}
else
{
return
n
.
Repo
.
Keystore
()
.
Get
(
name
)
}
}
// loadPrivateKey loads the private key *if* available
func
(
n
*
IpfsNode
)
loadPrivateKey
()
error
{
if
n
.
Identity
==
""
||
n
.
Peerstore
==
nil
{
return
errors
.
New
(
"loaded private key out of order"
)
}
if
n
.
PrivateKey
!=
nil
{
log
.
Warning
(
"private key already loaded"
)
return
nil
}
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
return
err
}
if
cfg
.
Identity
.
PrivKey
==
""
{
return
nil
}
sk
,
err
:=
loadPrivateKey
(
&
cfg
.
Identity
,
n
.
Identity
)
if
err
!=
nil
{
return
err
}
if
err
:=
n
.
Peerstore
.
AddPrivKey
(
n
.
Identity
,
sk
);
err
!=
nil
{
return
err
}
if
err
:=
n
.
Peerstore
.
AddPubKey
(
n
.
Identity
,
sk
.
GetPublic
());
err
!=
nil
{
return
err
}
n
.
PrivateKey
=
sk
return
nil
}
func
(
n
*
IpfsNode
)
loadBootstrapPeers
()
([]
pstore
.
PeerInfo
,
error
)
{
cfg
,
err
:=
n
.
Repo
.
Config
()
if
err
!=
nil
{
...
...
@@ -838,70 +286,6 @@ func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) {
return
toPeerInfos
(
parsed
),
nil
}
func
(
n
*
IpfsNode
)
loadFilesRoot
()
error
{
dsk
:=
ds
.
NewKey
(
"/local/filesroot"
)
pf
:=
func
(
ctx
context
.
Context
,
c
cid
.
Cid
)
error
{
return
n
.
Repo
.
Datastore
()
.
Put
(
dsk
,
c
.
Bytes
())
}
var
nd
*
merkledag
.
ProtoNode
val
,
err
:=
n
.
Repo
.
Datastore
()
.
Get
(
dsk
)
switch
{
case
err
==
ds
.
ErrNotFound
||
val
==
nil
:
nd
=
ft
.
EmptyDirNode
()
err
:=
n
.
DAG
.
Add
(
n
.
Context
(),
nd
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"failure writing to dagstore: %s"
,
err
)
}
case
err
==
nil
:
c
,
err
:=
cid
.
Cast
(
val
)
if
err
!=
nil
{
return
err
}
rnd
,
err
:=
n
.
DAG
.
Get
(
n
.
Context
(),
c
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"error loading filesroot from DAG: %s"
,
err
)
}
pbnd
,
ok
:=
rnd
.
(
*
merkledag
.
ProtoNode
)
if
!
ok
{
return
merkledag
.
ErrNotProtobuf
}
nd
=
pbnd
default
:
return
err
}
mr
,
err
:=
mfs
.
NewRoot
(
n
.
Context
(),
n
.
DAG
,
nd
,
pf
)
if
err
!=
nil
{
return
err
}
n
.
FilesRoot
=
mr
return
nil
}
func
loadPrivateKey
(
cfg
*
config
.
Identity
,
id
peer
.
ID
)
(
ic
.
PrivKey
,
error
)
{
sk
,
err
:=
cfg
.
DecodePrivateKey
(
"passphrase todo!"
)
if
err
!=
nil
{
return
nil
,
err
}
id2
,
err
:=
peer
.
IDFromPrivateKey
(
sk
)
if
err
!=
nil
{
return
nil
,
err
}
if
id2
!=
id
{
return
nil
,
fmt
.
Errorf
(
"private key in config does not match id: %s != %s"
,
id
,
id2
)
}
return
sk
,
nil
}
func
listenAddresses
(
cfg
*
config
.
Config
)
([]
ma
.
Multiaddr
,
error
)
{
var
listen
[]
ma
.
Multiaddr
for
_
,
addr
:=
range
cfg
.
Addresses
.
Swarm
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment