core.go 27 KB
Newer Older
1 2 3 4
/*
Package core implements the IpfsNode object and related methods.

Packages underneath core/ provide a (relatively) stable, low-level API
5 6 7 8
to carry out most IPFS-related tasks.  For more details on the other
interfaces and how core/... fits into the bigger IPFS picture, see:

  $ godoc github.com/ipfs/go-ipfs
9
*/
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
10 11
package core

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
12
import (
Jakub Sztandera's avatar
Jakub Sztandera committed
13
	"bytes"
14
	"context"
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
15
	"errors"
Juan Batiz-Benet's avatar
go fmt  
Juan Batiz-Benet committed
16
	"fmt"
17
	"io"
18 19 20
	"io/ioutil"
	"os"
	"strings"
21
	"time"
22

23
	version "github.com/ipfs/go-ipfs"
24
	rp "github.com/ipfs/go-ipfs/exchange/reprovide"
25
	filestore "github.com/ipfs/go-ipfs/filestore"
26 27 28
	mount "github.com/ipfs/go-ipfs/fuse/mount"
	namesys "github.com/ipfs/go-ipfs/namesys"
	ipnsrp "github.com/ipfs/go-ipfs/namesys/republisher"
Łukasz Magiera's avatar
Łukasz Magiera committed
29
	p2p "github.com/ipfs/go-ipfs/p2p"
30 31
	pin "github.com/ipfs/go-ipfs/pin"
	repo "github.com/ipfs/go-ipfs/repo"
32

33
	u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util"
34
	psrouter "gx/ipfs/QmPksYjuFjt2M63ufuDYEFdwVAmFcDGJgoED8T8dwfcwED/go-libp2p-pubsub-router"
Steven Allen's avatar
Steven Allen committed
35
	ic "gx/ipfs/QmPvyPwuCgJ7pDmrKDxRtsScJgBaM5h4EpRL2qQJsmXf4n/go-libp2p-crypto"
36 37 38 39 40 41 42 43 44 45
	libp2p "gx/ipfs/QmQiaskfWpdRJ4x2spEQjPFTUkEB87KDYu91qnNYBqvvcX/go-libp2p"
	discovery "gx/ipfs/QmQiaskfWpdRJ4x2spEQjPFTUkEB87KDYu91qnNYBqvvcX/go-libp2p/p2p/discovery"
	p2pbhost "gx/ipfs/QmQiaskfWpdRJ4x2spEQjPFTUkEB87KDYu91qnNYBqvvcX/go-libp2p/p2p/host/basic"
	rhost "gx/ipfs/QmQiaskfWpdRJ4x2spEQjPFTUkEB87KDYu91qnNYBqvvcX/go-libp2p/p2p/host/routed"
	identify "gx/ipfs/QmQiaskfWpdRJ4x2spEQjPFTUkEB87KDYu91qnNYBqvvcX/go-libp2p/p2p/protocol/identify"
	ping "gx/ipfs/QmQiaskfWpdRJ4x2spEQjPFTUkEB87KDYu91qnNYBqvvcX/go-libp2p/p2p/protocol/ping"
	ft "gx/ipfs/QmQjEpRiwVvtowhq69dAtB4jhioPVFXiCcWZm9Sfgn7eqc/go-unixfs"
	peer "gx/ipfs/QmQsErDt8Qgw1XrsXf2BpEzDgGWtB1YLsTAARBup5b6B9W/go-libp2p-peer"
	connmgr "gx/ipfs/QmRAvQVfAFfzqwk1VBYJMci9SrqJcMULNJYeZzzprDRn5V/go-libp2p-connmgr"
	p2phost "gx/ipfs/QmRRCrNRs4qxotXx7WJT6SpCvSNEhXvyBcVjXY2K71pcjE/go-libp2p-host"
Steven Allen's avatar
Steven Allen committed
46
	logging "gx/ipfs/QmRREK2CAZ5Re2Bd9zZFG6FeYDppUWt5cMgsoUEp3ktgSr/go-log"
47 48
	merkledag "gx/ipfs/QmRiQCJZ91B7VNmLvA6sxzDuBJGSojS3uXHHVuNr3iueNZ/go-merkledag"
	routing "gx/ipfs/QmS4niovD1U6pRjUBXivr1zvvLBqiTKbERjFo994JU7oQS/go-libp2p-routing"
49
	goprocess "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
Jeromy's avatar
Jeromy committed
50
	mamask "gx/ipfs/QmSMZwvs3n4GBikZ7hKzT17c3bk65FmyZo2JqtJ16swqCv/multiaddr-filter"
Steven Allen's avatar
Steven Allen committed
51
	mafilter "gx/ipfs/QmSW4uNHbvQia8iZDXzbwjiyHQtnyo9aFqfQAMasj3TJ6Y/go-maddr-filter"
52 53 54 55 56 57
	dht "gx/ipfs/QmTRj8mj6X5LtjVochPPSNX6MTbJ6iVojcfakWJKG13re7/go-libp2p-kad-dht"
	dhtopts "gx/ipfs/QmTRj8mj6X5LtjVochPPSNX6MTbJ6iVojcfakWJKG13re7/go-libp2p-kad-dht/opts"
	bitswap "gx/ipfs/QmTtmrK4iiM3MxWNA3pvbM9ekQiGZAiFyo57GP8B9FFgtz/go-bitswap"
	bsnet "gx/ipfs/QmTtmrK4iiM3MxWNA3pvbM9ekQiGZAiFyo57GP8B9FFgtz/go-bitswap/network"
	ifconnmgr "gx/ipfs/QmUPz6FCzCCU7sTY9Sore5NGSUA8YSF2yMkLPjDFq7wGqD/go-libp2p-interface-connmgr"
	floodsub "gx/ipfs/QmVFB6rGJEZnzJrQwoEhbyDs1tA8RVsQvCS6JXpuw9Xtta/go-libp2p-floodsub"
58
	ds "gx/ipfs/QmVG5gxteQNEMhrS8prJSmU2C9rebtFuTd3SYZ5kE3YZ5k/go-datastore"
59 60
	exchange "gx/ipfs/QmWw71Mz9PXKgYG8ZfTYN7Ax2Zm48Eurbne3wC2y7CKmLz/go-ipfs-exchange-interface"
	ipld "gx/ipfs/QmX5CsuHyVZeTLxgRSYkgLSDQKb9UjE8xnhQzCEJWWWFsC/go-ipld-format"
Steven Allen's avatar
Steven Allen committed
61
	smux "gx/ipfs/QmY9JXR3FupnYAYJWK9aMr9bCpqWKcToQ1tz8DVGTrHpHw/go-stream-muxer"
Steven Allen's avatar
Steven Allen committed
62
	ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr"
63
	cid "gx/ipfs/QmZFbDTY9jfSBms2MchvYM9oYRbAF19K7Pby47yDBfpPrb/go-cid"
Steven Allen's avatar
Steven Allen committed
64
	pnet "gx/ipfs/QmZaQ3K9PRd5sYYoG1xbTGPtd3N7TYiKBRmcBUTsx8HVET/go-libp2p-pnet"
65 66 67
	bserv "gx/ipfs/QmbSB9Uh3wVgmiCb1fAb8zuC3qAE6un4kd1jvatUurfAmB/go-blockservice"
	circuit "gx/ipfs/Qmbc6WjgbkaYhPw5dd6X2RLGiJz854dPPKkyZf52vE2PTS/go-libp2p-circuit"
	bstore "gx/ipfs/QmcmpX42gtDv1fz24kau4wjS9hfwWj5VexWBKgGnWzsyag/go-ipfs-blockstore"
Steven Allen's avatar
Steven Allen committed
68
	yamux "gx/ipfs/QmcsgrV3nCAKjiHKZhKVXWc4oY3WBECJCqahXEMpHeMrev/go-smux-yamux"
69 70 71 72 73 74 75
	rhelpers "gx/ipfs/Qmd22J9AnyR3QUH56WPXkrTbCNkQ4x7TWWinHcZBhQkgVw/go-libp2p-routing-helpers"
	nilrouting "gx/ipfs/Qmd45r5jHr1PKMNQqifnbZy1ZQwHdtXUDJFamUEvUJE544/go-ipfs-routing/none"
	offroute "gx/ipfs/Qmd45r5jHr1PKMNQqifnbZy1ZQwHdtXUDJFamUEvUJE544/go-ipfs-routing/offline"
	record "gx/ipfs/QmdHb9aBELnQKTVhvvA3hsQbRgUAwsWUzBP2vZ6Y5FBYvE/go-libp2p-record"
	"gx/ipfs/QmdMPBephdLYNESkruDX2hcDTgFYhoCt4LimWhgnomSdV2/go-path/resolver"
	mfs "gx/ipfs/QmdghKsSDa2AD1kC4qYRnVYWqZecdSBRZjeXRdhMYYhafj/go-mfs"
	metrics "gx/ipfs/QmdhwKw53CTV8EJSAsR1bpmMT5kXiWBgeAyv1EXeeDiXqR/go-libp2p-metrics"
Steven Allen's avatar
Steven Allen committed
76
	mplex "gx/ipfs/QmdiBZzwGtN2yHJrWD9ojQ7ASS48nv7BcojWLkYd1ZtrV2/go-smux-multiplex"
77
	config "gx/ipfs/Qmdpmn9dQFSFeCfwpaZdbeYSFxJmbtSTArU4kMZByjmPAJ/go-ipfs-config"
78
	pstore "gx/ipfs/QmeKD8YT7887Xu6Z86iZmpYNxrLogJexqxEugSmaf14k64/go-libp2p-peerstore"
Łukasz Magiera's avatar
Łukasz Magiera committed
79
)
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
80

Jeromy's avatar
Jeromy committed
81
const IpnsValidatorTag = "ipns"
82

83
const kReprovideFrequency = time.Hour * 12
84
const discoveryConnTimeout = time.Second * 30
Jeromy's avatar
Jeromy committed
85

Jeromy's avatar
Jeromy committed
86
var log = logging.Logger("core")
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
87

88 89 90 91
type mode int

const (
	// zero value is not a valid mode, must be explicitly set
92
	localMode mode = iota
93 94 95 96
	offlineMode
	onlineMode
)

97
func init() {
98
	identify.ClientVersion = "go-ipfs/" + version.CurrentVersionNumber + "/" + version.CurrentCommit
99 100
}

Juan Batiz-Benet's avatar
go lint  
Juan Batiz-Benet committed
101
// IpfsNode is IPFS Core module. It represents an IPFS instance.
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
102 103
type IpfsNode struct {

104
	// Self
105
	Identity peer.ID // the local node's identity
106

107
	Repo repo.Repo
108 109

	// Local node
110 111 112 113
	Pinning         pin.Pinner // the pinning manager
	Mounts          Mounts     // current mount state, if any.
	PrivateKey      ic.PrivKey // the local node's private Key
	PNetFingerprint []byte     // fingerprint of private network
114 115

	// Services
116 117 118 119 120 121 122 123 124 125 126 127
	Peerstore       pstore.Peerstore     // storage for other Peer instances
	Blockstore      bstore.GCBlockstore  // the block store (lower level)
	Filestore       *filestore.Filestore // the filestore blockstore
	BaseBlocks      bstore.Blockstore    // the raw blockstore, no filestore wrapping
	GCLocker        bstore.GCLocker      // the locker used to protect the blockstore during gc
	Blocks          bserv.BlockService   // the block service, get/add blocks.
	DAG             ipld.DAGService      // the merkle dag service, get/add objects.
	Resolver        *resolver.Resolver   // the path resolution system
	Reporter        metrics.Reporter
	Discovery       discovery.Service
	FilesRoot       *mfs.Root
	RecordValidator record.Validator
128 129

	// Online
130 131 132 133 134
	PeerHost     p2phost.Host        // the network host (server+client)
	Bootstrapper io.Closer           // the periodic bootstrapper
	Routing      routing.IpfsRouting // the routing system. recommend ipfs-dht
	Exchange     exchange.Interface  // the block exchange + strategy (bitswap)
	Namesys      namesys.NameSystem  // the name system, resolves paths to hashes
Jeromy's avatar
Jeromy committed
135 136
	Ping         *ping.PingService
	Reprovider   *rp.Reprovider // the value reprovider system
Jeromy's avatar
Jeromy committed
137
	IpnsRepub    *ipnsrp.Republisher
138

Jeromy's avatar
Jeromy committed
139
	Floodsub *floodsub.PubSub
140
	PSRouter *psrouter.PubsubValueStore
141
	DHT      *dht.IpfsDHT
Łukasz Magiera's avatar
Łukasz Magiera committed
142
	P2P      *p2p.P2P
Jeromy's avatar
Jeromy committed
143

144 145
	proc goprocess.Process
	ctx  context.Context
146

Jeromy's avatar
Jeromy committed
147
	mode         mode
148
	localModeSet bool
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
149 150
}

151 152 153 154 155 156 157 158
// Mounts defines what the node's mount state is. This should
// perhaps be moved to the daemon or mount. It's here because
// it needs to be accessible across daemon requests.
type Mounts struct {
	Ipfs mount.Mount
	Ipns mount.Mount
}

vyzo's avatar
vyzo committed
159
func (n *IpfsNode) startOnlineServices(ctx context.Context, routingOption RoutingOption, hostOption HostOption, do DiscoveryOption, pubsub, ipnsps, mplex bool) error {
160
	if n.PeerHost != nil { // already online.
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
161
		return errors.New("node already online")
162 163 164
	}

	// load private key
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
165
	if err := n.LoadPrivateKey(); err != nil {
166 167 168
		return err
	}

Jeromy's avatar
Jeromy committed
169
	// get undialable addrs from config
170 171 172 173
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}
Steven Allen's avatar
Steven Allen committed
174 175

	var libp2pOpts []libp2p.Option
176
	for _, s := range cfg.Swarm.AddrFilters {
Jeromy's avatar
Jeromy committed
177 178
		f, err := mamask.NewMask(s)
		if err != nil {
179
			return fmt.Errorf("incorrectly formatted address filter in config: %s", s)
Jeromy's avatar
Jeromy committed
180
		}
Steven Allen's avatar
Steven Allen committed
181
		libp2pOpts = append(libp2pOpts, libp2p.FilterAddresses(f))
Jeromy's avatar
Jeromy committed
182 183
	}

184 185 186
	if !cfg.Swarm.DisableBandwidthMetrics {
		// Set reporter
		n.Reporter = metrics.NewBandwidthCounter()
Steven Allen's avatar
Steven Allen committed
187
		libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(n.Reporter))
188 189
	}

Jakub Sztandera's avatar
Jakub Sztandera committed
190 191 192 193 194 195
	swarmkey, err := n.Repo.SwarmKey()
	if err != nil {
		return err
	}

	if swarmkey != nil {
Steven Allen's avatar
Steven Allen committed
196
		protec, err := pnet.NewProtector(bytes.NewReader(swarmkey))
Jakub Sztandera's avatar
Jakub Sztandera committed
197
		if err != nil {
198
			return fmt.Errorf("failed to configure private network: %s", err)
Jakub Sztandera's avatar
Jakub Sztandera committed
199
		}
200
		n.PNetFingerprint = protec.Fingerprint()
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
		go func() {
			t := time.NewTicker(30 * time.Second)
			<-t.C // swallow one tick
			for {
				select {
				case <-t.C:
					if ph := n.PeerHost; ph != nil {
						if len(ph.Network().Peers()) == 0 {
							log.Warning("We are in private network and have no peers.")
							log.Warning("This might be configuration mistake.")
						}
					}
				case <-n.Process().Closing():
					t.Stop()
					return
				}
			}
		}()
Steven Allen's avatar
Steven Allen committed
219 220

		libp2pOpts = append(libp2pOpts, libp2p.PrivateNetwork(protec))
Jakub Sztandera's avatar
Jakub Sztandera committed
221 222
	}

223 224 225 226
	addrsFactory, err := makeAddrsFactory(cfg.Addresses)
	if err != nil {
		return err
	}
Steven Allen's avatar
Steven Allen committed
227 228 229 230
	if !cfg.Swarm.DisableRelay {
		addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs)
	}
	libp2pOpts = append(libp2pOpts, libp2p.AddrsFactory(addrsFactory))
231

Steven Allen's avatar
Steven Allen committed
232
	connm, err := constructConnMgr(cfg.Swarm.ConnMgr)
Jeromy's avatar
Jeromy committed
233 234 235
	if err != nil {
		return err
	}
Steven Allen's avatar
Steven Allen committed
236 237 238
	libp2pOpts = append(libp2pOpts, libp2p.ConnectionManager(connm))

	libp2pOpts = append(libp2pOpts, makeSmuxTransportOption(mplex))
Jeromy's avatar
Jeromy committed
239

Steven Allen's avatar
Steven Allen committed
240 241
	if !cfg.Swarm.DisableNatPortMap {
		libp2pOpts = append(libp2pOpts, libp2p.NATPortMap())
242
	}
Steven Allen's avatar
Steven Allen committed
243 244 245 246 247 248 249 250 251
	if !cfg.Swarm.DisableRelay {
		var opts []circuit.RelayOpt
		if cfg.Swarm.EnableRelayHop {
			opts = append(opts, circuit.OptHop)
		}
		libp2pOpts = append(libp2pOpts, libp2p.EnableRelay(opts...))
	}

	peerhost, err := hostOption(ctx, n.Identity, n.Peerstore, libp2pOpts...)
vyzo's avatar
vyzo committed
252

253
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
254
		return err
255 256
	}

257
	if err := n.startOnlineServicesWithHost(ctx, peerhost, routingOption, pubsub, ipnsps); err != nil {
258
		return err
259 260 261
	}

	// Ok, now we're ready to listen.
Łukasz Magiera's avatar
Łukasz Magiera committed
262
	if err := startListening(n.PeerHost, cfg); err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
263
		return err
264
	}
265

Łukasz Magiera's avatar
Łukasz Magiera committed
266
	n.P2P = p2p.NewP2P(n.Identity, n.PeerHost, n.Peerstore)
267

268
	// setup local discovery
Jeromy's avatar
Jeromy committed
269
	if do != nil {
Jeromy's avatar
Jeromy committed
270
		service, err := do(ctx, n.PeerHost)
Jeromy's avatar
Jeromy committed
271
		if err != nil {
Jeromy's avatar
Jeromy committed
272 273 274 275
			log.Error("mdns error: ", err)
		} else {
			service.RegisterNotifee(n)
			n.Discovery = service
Jeromy's avatar
Jeromy committed
276
		}
277 278
	}

279
	return n.Bootstrap(DefaultBootstrapConfig)
280 281
}

Jeromy's avatar
Jeromy committed
282 283
func constructConnMgr(cfg config.ConnMgr) (ifconnmgr.ConnManager, error) {
	switch cfg.Type {
284 285 286 287
	case "":
		// 'default' value is the basic connection manager
		return connmgr.NewConnManager(config.DefaultConnMgrLowWater, config.DefaultConnMgrHighWater, config.DefaultConnMgrGracePeriod), nil
	case "none":
Jeromy's avatar
Jeromy committed
288 289 290 291 292 293 294 295 296 297 298 299 300
		return nil, nil
	case "basic":
		grace, err := time.ParseDuration(cfg.GracePeriod)
		if err != nil {
			return nil, fmt.Errorf("parsing Swarm.ConnMgr.GracePeriod: %s", err)
		}

		return connmgr.NewConnManager(cfg.LowWater, cfg.HighWater, grace), nil
	default:
		return nil, fmt.Errorf("unrecognized ConnMgr.Type: %q", cfg.Type)
	}
}

Łukasz Magiera's avatar
Łukasz Magiera committed
301 302 303 304 305 306
func (n *IpfsNode) startLateOnlineServices(ctx context.Context) error {
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

307
	var keyProvider rp.KeyChanFunc
Łukasz Magiera's avatar
Łukasz Magiera committed
308 309 310 311 312 313 314 315 316 317 318

	switch cfg.Reprovider.Strategy {
	case "all":
		fallthrough
	case "":
		keyProvider = rp.NewBlockstoreProvider(n.Blockstore)
	case "roots":
		keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, true)
	case "pinned":
		keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, false)
	default:
319
		return fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy)
Łukasz Magiera's avatar
Łukasz Magiera committed
320
	}
321
	n.Reprovider = rp.NewReprovider(ctx, n.Routing, keyProvider)
Łukasz Magiera's avatar
Łukasz Magiera committed
322

323 324 325 326 327
	reproviderInterval := kReprovideFrequency
	if cfg.Reprovider.Interval != "" {
		dur, err := time.ParseDuration(cfg.Reprovider.Interval)
		if err != nil {
			return err
Łukasz Magiera's avatar
Łukasz Magiera committed
328 329
		}

330
		reproviderInterval = dur
Łukasz Magiera's avatar
Łukasz Magiera committed
331 332
	}

333 334
	go n.Reprovider.Run(reproviderInterval)

Łukasz Magiera's avatar
Łukasz Magiera committed
335 336 337
	return nil
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
func makeAddrsFactory(cfg config.Addresses) (p2pbhost.AddrsFactory, error) {
	var annAddrs []ma.Multiaddr
	for _, addr := range cfg.Announce {
		maddr, err := ma.NewMultiaddr(addr)
		if err != nil {
			return nil, err
		}
		annAddrs = append(annAddrs, maddr)
	}

	filters := mafilter.NewFilters()
	noAnnAddrs := map[string]bool{}
	for _, addr := range cfg.NoAnnounce {
		f, err := mamask.NewMask(addr)
		if err == nil {
			filters.AddDialFilter(f)
			continue
		}
		maddr, err := ma.NewMultiaddr(addr)
		if err != nil {
			return nil, err
		}
		noAnnAddrs[maddr.String()] = true
	}

	return func(allAddrs []ma.Multiaddr) []ma.Multiaddr {
		var addrs []ma.Multiaddr
		if len(annAddrs) > 0 {
			addrs = annAddrs
		} else {
			addrs = allAddrs
		}

		var out []ma.Multiaddr
		for _, maddr := range addrs {
			// check for exact matches
			ok, _ := noAnnAddrs[maddr.String()]
			// check for /ipcidr matches
			if !ok && !filters.AddrBlocked(maddr) {
				out = append(out, maddr)
			}
		}
		return out
	}, nil
}

Steven Allen's avatar
Steven Allen committed
384 385 386
func makeSmuxTransportOption(mplexExp bool) libp2p.Option {
	const yamuxID = "/yamux/1.0.0"
	const mplexID = "/mplex/6.7.0"
387 388

	ymxtpt := &yamux.Transport{
Steven Allen's avatar
Steven Allen committed
389
		AcceptBacklog:          512,
390 391 392 393 394 395 396
		ConnectionWriteTimeout: time.Second * 10,
		KeepAliveInterval:      time.Second * 30,
		EnableKeepAlive:        true,
		MaxStreamWindowSize:    uint32(1024 * 512),
		LogOutput:              ioutil.Discard,
	}

397 398 399 400
	if os.Getenv("YAMUX_DEBUG") != "" {
		ymxtpt.LogOutput = os.Stderr
	}

Steven Allen's avatar
Steven Allen committed
401
	muxers := map[string]smux.Transport{yamuxID: ymxtpt}
402
	if mplexExp {
Steven Allen's avatar
Steven Allen committed
403
		muxers[mplexID] = mplex.DefaultTransport
404 405 406
	}

	// Allow muxer preference order overriding
Steven Allen's avatar
Steven Allen committed
407
	order := []string{yamuxID, mplexID}
408
	if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" {
Steven Allen's avatar
Steven Allen committed
409 410 411 412 413 414 415 416 417 418 419 420
		order = strings.Fields(prefs)
	}

	opts := make([]libp2p.Option, 0, len(order))
	for _, id := range order {
		tpt, ok := muxers[id]
		if !ok {
			log.Warning("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id)
			continue
		}
		delete(muxers, id)
		opts = append(opts, libp2p.Muxer(id, tpt))
421 422
	}

Steven Allen's avatar
Steven Allen committed
423
	return libp2p.ChainOptions(opts...)
424 425
}

Jeromy's avatar
Jeromy committed
426 427
func setupDiscoveryOption(d config.Discovery) DiscoveryOption {
	if d.MDNS.Enabled {
Jeromy's avatar
Jeromy committed
428
		return func(ctx context.Context, h p2phost.Host) (discovery.Service, error) {
Jeromy's avatar
Jeromy committed
429 430 431
			if d.MDNS.Interval == 0 {
				d.MDNS.Interval = 5
			}
Jeromy's avatar
Jeromy committed
432
			return discovery.NewMdnsService(ctx, h, time.Duration(d.MDNS.Interval)*time.Second, discovery.ServiceTag)
Jeromy's avatar
Jeromy committed
433 434 435 436 437
		}
	}
	return nil
}

438 439
// HandlePeerFound attempts to connect to peer from `PeerInfo`, if it fails
// logs a warning log.
Jeromy's avatar
Jeromy committed
440
func (n *IpfsNode) HandlePeerFound(p pstore.PeerInfo) {
441
	log.Warning("trying peer info: ", p)
442
	ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout)
rht's avatar
rht committed
443
	defer cancel()
444
	if err := n.PeerHost.Connect(ctx, p); err != nil {
445 446 447 448
		log.Warning("Failed to connect to peer found by discovery: ", err)
	}
}

449 450
// startOnlineServicesWithHost  is the set of services which need to be
// initialized with the host and _before_ we start listening.
451
func (n *IpfsNode) startOnlineServicesWithHost(ctx context.Context, host p2phost.Host, routingOption RoutingOption, pubsub bool, ipnsps bool) error {
452
	// setup diagnostics service
Jeromy's avatar
Jeromy committed
453
	n.Ping = ping.NewPingService(host)
454

455
	if pubsub || ipnsps {
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
		cfg, err := n.Repo.Config()
		if err != nil {
			return err
		}

		var service *floodsub.PubSub

		switch cfg.Pubsub.Router {
		case "":
			fallthrough
		case "floodsub":
			service, err = floodsub.NewFloodSub(ctx, host)

		case "gossipsub":
			service, err = floodsub.NewGossipSub(ctx, host)

		default:
			err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router)
		}

476 477 478 479 480 481
		if err != nil {
			return err
		}
		n.Floodsub = service
	}

482
	// setup routing service
483
	r, err := routingOption(ctx, host, n.Repo.Datastore(), n.RecordValidator)
Jeromy's avatar
Jeromy committed
484
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
485
		return err
486
	}
Jeromy's avatar
Jeromy committed
487
	n.Routing = r
488

489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	// TODO: I'm not a fan of type assertions like this but the
	// `RoutingOption` system doesn't currently provide access to the
	// IpfsNode.
	//
	// Ideally, we'd do something like:
	//
	// 1. Add some fancy method to introspect into tiered routers to extract
	//    things like the pubsub router or the DHT (complicated, messy,
	//    probably not worth it).
	// 2. Pass the IpfsNode into the RoutingOption (would also remove the
	//    PSRouter case below.
	// 3. Introduce some kind of service manager? (my personal favorite but
	//    that requires a fair amount of work).
	if dht, ok := r.(*dht.IpfsDHT); ok {
		n.DHT = dht
	}

506 507 508 509 510 511
	if ipnsps {
		n.PSRouter = psrouter.NewPubsubValueStore(
			ctx,
			host,
			n.Routing,
			n.Floodsub,
512
			n.RecordValidator,
513 514 515 516 517 518 519 520 521 522 523 524 525
		)
		n.Routing = rhelpers.Tiered{
			// Always check pubsub first.
			&rhelpers.Compose{
				ValueStore: &rhelpers.LimitedValueStore{
					ValueStore: n.PSRouter,
					Namespaces: []string{"ipns"},
				},
			},
			n.Routing,
		}
	}

526 527 528
	// Wrap standard peer host with routing system to allow unknown peer lookups
	n.PeerHost = rhost.Wrap(host, n.Routing)

529
	// setup exchange service
530
	bitswapNetwork := bsnet.NewFromIpfsHost(n.PeerHost, n.Routing)
Łukasz Magiera's avatar
Łukasz Magiera committed
531
	n.Exchange = bitswap.New(ctx, bitswapNetwork, n.Blockstore)
532

533 534 535 536 537
	size, err := n.getCacheSize()
	if err != nil {
		return err
	}

538
	// setup name system
539
	n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size)
540

Jeromy's avatar
Jeromy committed
541
	// setup ipns republishing
542
	return n.setupIpnsRepublisher()
543 544
}

545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
// getCacheSize returns cache life and cache size
func (n *IpfsNode) getCacheSize() (int, error) {
	cfg, err := n.Repo.Config()
	if err != nil {
		return 0, err
	}

	cs := cfg.Ipns.ResolveCacheSize
	if cs == 0 {
		cs = 128
	}
	if cs < 0 {
		return 0, fmt.Errorf("cannot specify negative resolve cache size")
	}
	return cs, nil
}

562
func (n *IpfsNode) setupIpnsRepublisher() error {
Jeromy's avatar
Jeromy committed
563 564 565 566
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}
567

568
	n.IpnsRepub = ipnsrp.NewRepublisher(n.Namesys, n.Repo.Datastore(), n.PrivateKey, n.Repo.Keystore())
569

Jeromy's avatar
Jeromy committed
570 571 572 573 574 575
	if cfg.Ipns.RepublishPeriod != "" {
		d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod)
		if err != nil {
			return fmt.Errorf("failure to parse config setting IPNS.RepublishPeriod: %s", err)
		}

576
		if !u.Debug && (d < time.Minute || d > (time.Hour*24)) {
Jeromy's avatar
Jeromy committed
577 578 579 580 581 582
			return fmt.Errorf("config setting IPNS.RepublishPeriod is not between 1min and 1day: %s", d)
		}

		n.IpnsRepub.Interval = d
	}

583 584 585 586 587 588 589 590 591
	if cfg.Ipns.RecordLifetime != "" {
		d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod)
		if err != nil {
			return fmt.Errorf("failure to parse config setting IPNS.RecordLifetime: %s", err)
		}

		n.IpnsRepub.RecordLifetime = d
	}

Jeromy's avatar
Jeromy committed
592 593
	n.Process().Go(n.IpnsRepub.Run)

594 595 596
	return nil
}

597 598 599 600 601 602 603 604 605 606 607 608
// Process returns the Process object
func (n *IpfsNode) Process() goprocess.Process {
	return n.proc
}

// Close calls Close() on the Process object
func (n *IpfsNode) Close() error {
	return n.proc.Close()
}

// Context returns the IpfsNode context
func (n *IpfsNode) Context() context.Context {
609 610 611
	if n.ctx == nil {
		n.ctx = context.TODO()
	}
612 613 614
	return n.ctx
}

615 616
// teardown closes owned children. If any errors occur, this function returns
// the first error.
Brian Tiger Chow's avatar
Brian Tiger Chow committed
617
func (n *IpfsNode) teardown() error {
618
	log.Debug("core is shutting down...")
619 620
	// owned objects are closed in this teardown to ensure that they're closed
	// regardless of which constructor was used to add them to the node.
Jeromy's avatar
Jeromy committed
621 622
	var closers []io.Closer

623
	// NOTE: The order that objects are added(closed) matters, if an object
Jeromy's avatar
Jeromy committed
624 625 626 627 628
	// needs to use another during its shutdown/cleanup process, it should be
	// closed before that other object

	if n.FilesRoot != nil {
		closers = append(closers, n.FilesRoot)
Jeromy's avatar
Jeromy committed
629
	}
630

631 632 633 634
	if n.Exchange != nil {
		closers = append(closers, n.Exchange)
	}

635
	if n.Mounts.Ipfs != nil && !n.Mounts.Ipfs.IsActive() {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
636 637
		closers = append(closers, mount.Closer(n.Mounts.Ipfs))
	}
638
	if n.Mounts.Ipns != nil && !n.Mounts.Ipns.IsActive() {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
639 640 641
		closers = append(closers, mount.Closer(n.Mounts.Ipns))
	}

642 643
	if n.DHT != nil {
		closers = append(closers, n.DHT.Process())
Jeromy's avatar
Jeromy committed
644 645
	}

Jeromy's avatar
Jeromy committed
646 647 648 649
	if n.Blocks != nil {
		closers = append(closers, n.Blocks)
	}

Jeromy's avatar
Jeromy committed
650 651
	if n.Bootstrapper != nil {
		closers = append(closers, n.Bootstrapper)
652 653
	}

Jeromy's avatar
Jeromy committed
654 655
	if n.PeerHost != nil {
		closers = append(closers, n.PeerHost)
656
	}
657

Jeromy's avatar
Jeromy committed
658 659 660
	// Repo closed last, most things need to preserve state here
	closers = append(closers, n.Repo)

661
	var errs []error
662
	for _, closer := range closers {
663 664
		if err := closer.Close(); err != nil {
			errs = append(errs, err)
665 666 667 668
		}
	}
	if len(errs) > 0 {
		return errs[0]
Brian Tiger Chow's avatar
Brian Tiger Chow committed
669 670
	}
	return nil
Brian Tiger Chow's avatar
Brian Tiger Chow committed
671 672
}

673
// OnlineMode returns whether or not the IpfsNode is in OnlineMode.
Brian Tiger Chow's avatar
Brian Tiger Chow committed
674
func (n *IpfsNode) OnlineMode() bool {
675
	return n.mode == onlineMode
Brian Tiger Chow's avatar
Brian Tiger Chow committed
676 677
}

678
// SetLocal will set the IpfsNode to local mode
679 680 681 682 683 684 685
func (n *IpfsNode) SetLocal(isLocal bool) {
	if isLocal {
		n.mode = localMode
	}
	n.localModeSet = true
}

686
// LocalMode returns whether or not the IpfsNode is in LocalMode
687 688 689 690 691
func (n *IpfsNode) LocalMode() bool {
	if !n.localModeSet {
		// programmer error should not happen
		panic("local mode not set")
	}
692
	return n.mode == localMode
693 694
}

695
// Bootstrap will set and call the IpfsNodes bootstrap function.
696
func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error {
697
	// TODO what should return value be when in offlineMode?
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
698 699 700 701
	if n.Routing == nil {
		return nil
	}

702 703 704 705 706 707 708
	if n.Bootstrapper != nil {
		n.Bootstrapper.Close() // stop previous bootstrap process.
	}

	// if the caller did not specify a bootstrap peer function, get the
	// freshest bootstrap peers from config. this responds to live changes.
	if cfg.BootstrapPeers == nil {
Jeromy's avatar
Jeromy committed
709
		cfg.BootstrapPeers = func() []pstore.PeerInfo {
710
			ps, err := n.loadBootstrapPeers()
711
			if err != nil {
712
				log.Warning("failed to parse bootstrap peers from config")
713 714 715 716 717 718 719 720 721
				return nil
			}
			return ps
		}
	}

	var err error
	n.Bootstrapper, err = Bootstrap(n, cfg)
	return err
722 723
}

724 725
func (n *IpfsNode) loadID() error {
	if n.Identity != "" {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
726
		return errors.New("identity already loaded")
727 728
	}

729 730 731 732 733 734
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	cid := cfg.Identity.PeerID
735
	if cid == "" {
736
		return errors.New("identity was not set in config (was 'ipfs init' run?)")
737 738
	}
	if len(cid) == 0 {
739
		return errors.New("no peer ID in config! (was 'ipfs init' run?)")
740 741
	}

Steven Allen's avatar
Steven Allen committed
742 743 744 745 746 747
	id, err := peer.IDB58Decode(cid)
	if err != nil {
		return fmt.Errorf("peer ID invalid: %s", err)
	}

	n.Identity = id
748 749
	return nil
}
750

751
// GetKey will return a key from the Keystore with name `name`.
752 753 754 755 756 757 758 759
func (n *IpfsNode) GetKey(name string) (ic.PrivKey, error) {
	if name == "self" {
		return n.PrivateKey, nil
	} else {
		return n.Repo.Keystore().Get(name)
	}
}

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
760
func (n *IpfsNode) LoadPrivateKey() error {
761
	if n.Identity == "" || n.Peerstore == nil {
Łukasz Magiera's avatar
Łukasz Magiera committed
762
		return errors.New("loaded private key out of order")
763 764
	}

765
	if n.PrivateKey != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
766
		return errors.New("private key already loaded")
767 768
	}

769 770 771 772 773 774
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	sk, err := loadPrivateKey(&cfg.Identity, n.Identity)
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
775
	if err != nil {
776
		return err
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
777
	}
778

779 780
	n.PrivateKey = sk
	n.Peerstore.AddPrivKey(n.Identity, n.PrivateKey)
Jeromy's avatar
Jeromy committed
781 782 783 784
	n.Peerstore.AddPubKey(n.Identity, sk.GetPublic())
	return nil
}

Jeromy's avatar
Jeromy committed
785
func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) {
786 787 788 789 790 791
	cfg, err := n.Repo.Config()
	if err != nil {
		return nil, err
	}

	parsed, err := cfg.BootstrapPeers()
792 793 794 795 796 797
	if err != nil {
		return nil, err
	}
	return toPeerInfos(parsed), nil
}

Jeromy's avatar
Jeromy committed
798
func (n *IpfsNode) loadFilesRoot() error {
Jeromy's avatar
Jeromy committed
799
	dsk := ds.NewKey("/local/filesroot")
Jeromy's avatar
Jeromy committed
800 801
	pf := func(ctx context.Context, c *cid.Cid) error {
		return n.Repo.Datastore().Put(dsk, c.Bytes())
Jeromy's avatar
Jeromy committed
802 803
	}

804
	var nd *merkledag.ProtoNode
Jeromy's avatar
Jeromy committed
805 806 807 808
	val, err := n.Repo.Datastore().Get(dsk)

	switch {
	case err == ds.ErrNotFound || val == nil:
809
		nd = ft.EmptyDirNode()
810
		err := n.DAG.Add(n.Context(), nd)
Jeromy's avatar
Jeromy committed
811 812 813 814
		if err != nil {
			return fmt.Errorf("failure writing to dagstore: %s", err)
		}
	case err == nil:
815
		c, err := cid.Cast(val)
Jeromy's avatar
Jeromy committed
816 817 818 819
		if err != nil {
			return err
		}

820
		rnd, err := n.DAG.Get(n.Context(), c)
Jeromy's avatar
Jeromy committed
821 822 823
		if err != nil {
			return fmt.Errorf("error loading filesroot from DAG: %s", err)
		}
824 825 826 827 828 829 830

		pbnd, ok := rnd.(*merkledag.ProtoNode)
		if !ok {
			return merkledag.ErrNotProtobuf
		}

		nd = pbnd
Jeromy's avatar
Jeromy committed
831 832 833 834 835 836 837 838 839 840 841 842 843
	default:
		return err
	}

	mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf)
	if err != nil {
		return err
	}

	n.FilesRoot = mr
	return nil
}

844 845
// SetupOfflineRouting instantiates a routing system in offline mode. This is
// primarily used for offline ipns modifications.
Jeromy's avatar
Jeromy committed
846
func (n *IpfsNode) SetupOfflineRouting() error {
847 848 849 850
	if n.Routing != nil {
		// Routing was already set up
		return nil
	}
851 852

	// TODO: move this somewhere else.
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
853
	err := n.LoadPrivateKey()
Jeromy's avatar
Jeromy committed
854 855 856 857
	if err != nil {
		return err
	}

858
	n.Routing = offroute.NewOfflineRouter(n.Repo.Datastore(), n.RecordValidator)
859

860 861 862 863 864 865
	size, err := n.getCacheSize()
	if err != nil {
		return err
	}

	n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size)
866

867
	return nil
868 869 870 871
}

func loadPrivateKey(cfg *config.Identity, id peer.ID) (ic.PrivKey, error) {
	sk, err := cfg.DecodePrivateKey("passphrase todo!")
872 873 874
	if err != nil {
		return nil, err
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
875

876 877 878 879
	id2, err := peer.IDFromPrivateKey(sk)
	if err != nil {
		return nil, err
	}
880

881 882
	if id2 != id {
		return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2)
883 884
	}

885
	return sk, nil
886
}
887

888
func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
889 890 891
	var listen []ma.Multiaddr
	for _, addr := range cfg.Addresses.Swarm {
		maddr, err := ma.NewMultiaddr(addr)
892
		if err != nil {
Łukasz Magiera's avatar
Łukasz Magiera committed
893
			return nil, fmt.Errorf("failure to parse config.Addresses.Swarm: %s", cfg.Addresses.Swarm)
894
		}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
895
		listen = append(listen, maddr)
896 897 898 899
	}

	return listen, nil
}
900

Kevin Atkinson's avatar
Kevin Atkinson committed
901
type ConstructPeerHostOpts struct {
902
	AddrsFactory      p2pbhost.AddrsFactory
vyzo's avatar
vyzo committed
903 904 905
	DisableNatPortMap bool
	DisableRelay      bool
	EnableRelayHop    bool
Jeromy's avatar
Jeromy committed
906
	ConnectionManager ifconnmgr.ConnManager
Kevin Atkinson's avatar
Kevin Atkinson committed
907 908
}

Steven Allen's avatar
Steven Allen committed
909
type HostOption func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error)
Jeromy's avatar
Jeromy committed
910 911 912

var DefaultHostOption HostOption = constructPeerHost

913
// isolates the complex initialization steps
Steven Allen's avatar
Steven Allen committed
914 915 916 917
func constructPeerHost(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) {
	pkey := ps.PrivKey(id)
	if pkey == nil {
		return nil, fmt.Errorf("missing private key for node ID: %s", id.Pretty())
918
	}
Steven Allen's avatar
Steven Allen committed
919 920
	options = append([]libp2p.Option{libp2p.Identity(pkey), libp2p.Peerstore(ps)}, options...)
	return libp2p.New(ctx, options...)
921 922
}

923 924 925 926 927 928 929 930 931 932 933 934
func filterRelayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
	var raddrs []ma.Multiaddr
	for _, addr := range addrs {
		_, err := addr.ValueForProtocol(circuit.P_CIRCUIT)
		if err == nil {
			continue
		}
		raddrs = append(raddrs, addr)
	}
	return raddrs
}

935 936 937 938 939 940
func composeAddrsFactory(f, g p2pbhost.AddrsFactory) p2pbhost.AddrsFactory {
	return func(addrs []ma.Multiaddr) []ma.Multiaddr {
		return f(g(addrs))
	}
}

941
// startListening on the network addresses
Łukasz Magiera's avatar
Łukasz Magiera committed
942
func startListening(host p2phost.Host, cfg *config.Config) error {
943 944
	listenAddrs, err := listenAddresses(cfg)
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
945
		return err
946 947 948
	}

	// Actually start listening:
Steven Allen's avatar
Steven Allen committed
949
	if err := host.Network().Listen(listenAddrs...); err != nil {
950
		return err
951 952
	}

953
	// list out our addresses
954
	addrs, err := host.Network().InterfaceListenAddresses()
955
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
956
		return err
957
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
958
	log.Infof("Swarm listening at: %s", addrs)
959
	return nil
960
}
961

962 963 964 965 966 967
func constructDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) {
	return dht.New(
		ctx, host,
		dhtopts.Datastore(dstore),
		dhtopts.Validator(validator),
	)
968
}
Jeromy's avatar
Jeromy committed
969

970 971 972 973 974 975 976
func constructClientDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) {
	return dht.New(
		ctx, host,
		dhtopts.Client(true),
		dhtopts.Datastore(dstore),
		dhtopts.Validator(validator),
	)
Jeromy's avatar
Jeromy committed
977 978
}

979
type RoutingOption func(context.Context, p2phost.Host, ds.Batching, record.Validator) (routing.IpfsRouting, error)
Jeromy's avatar
Jeromy committed
980

Jeromy's avatar
Jeromy committed
981
type DiscoveryOption func(context.Context, p2phost.Host) (discovery.Service, error)
Jeromy's avatar
Jeromy committed
982

983
var DHTOption RoutingOption = constructDHTRouting
Jeromy's avatar
Jeromy committed
984
var DHTClientOption RoutingOption = constructClientDHTRouting
985
var NilRouterOption RoutingOption = nilrouting.ConstructNilRouting