core.go 27.3 KB
Newer Older
1 2 3 4
/*
Package core implements the IpfsNode object and related methods.

Packages underneath core/ provide a (relatively) stable, low-level API
5 6 7 8
to carry out most IPFS-related tasks.  For more details on the other
interfaces and how core/... fits into the bigger IPFS picture, see:

  $ godoc github.com/ipfs/go-ipfs
9
*/
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
10 11
package core

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
12
import (
Jakub Sztandera's avatar
Jakub Sztandera committed
13
	"bytes"
14
	"context"
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
15
	"errors"
Juan Batiz-Benet's avatar
go fmt  
Juan Batiz-Benet committed
16
	"fmt"
17
	"io"
18 19 20
	"io/ioutil"
	"os"
	"strings"
21
	"time"
22

23
	version "github.com/ipfs/go-ipfs"
24
	rp "github.com/ipfs/go-ipfs/exchange/reprovide"
25
	filestore "github.com/ipfs/go-ipfs/filestore"
26 27 28
	mount "github.com/ipfs/go-ipfs/fuse/mount"
	namesys "github.com/ipfs/go-ipfs/namesys"
	ipnsrp "github.com/ipfs/go-ipfs/namesys/republisher"
Łukasz Magiera's avatar
Łukasz Magiera committed
29
	p2p "github.com/ipfs/go-ipfs/p2p"
30 31
	pin "github.com/ipfs/go-ipfs/pin"
	repo "github.com/ipfs/go-ipfs/repo"
32

Steven Allen's avatar
Steven Allen committed
33 34 35
	dht "gx/ipfs/QmNesMxTot4Spt6qZkT45DWMSniPJgUfc4BprhbCpPi6Qk/go-libp2p-kad-dht"
	dhtopts "gx/ipfs/QmNesMxTot4Spt6qZkT45DWMSniPJgUfc4BprhbCpPi6Qk/go-libp2p-kad-dht/opts"
	mfs "gx/ipfs/QmPVjJyJAosfwtiFr7LHoatQszdzCgyx6oE9nnWnuKhSMt/go-mfs"
36
	u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util"
Steven Allen's avatar
Steven Allen committed
37
	ic "gx/ipfs/QmPvyPwuCgJ7pDmrKDxRtsScJgBaM5h4EpRL2qQJsmXf4n/go-libp2p-crypto"
38
	peer "gx/ipfs/QmQsErDt8Qgw1XrsXf2BpEzDgGWtB1YLsTAARBup5b6B9W/go-libp2p-peer"
Steven Allen's avatar
Steven Allen committed
39
	merkledag "gx/ipfs/QmRDaC5z6yXkXTTSWzaxs2sSVBon5RRCN6eNtMmpuHtKCr/go-merkledag"
Steven Allen's avatar
Steven Allen committed
40
	logging "gx/ipfs/QmRREK2CAZ5Re2Bd9zZFG6FeYDppUWt5cMgsoUEp3ktgSr/go-log"
41
	routing "gx/ipfs/QmS4niovD1U6pRjUBXivr1zvvLBqiTKbERjFo994JU7oQS/go-libp2p-routing"
42
	goprocess "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
Jeromy's avatar
Jeromy committed
43
	mamask "gx/ipfs/QmSMZwvs3n4GBikZ7hKzT17c3bk65FmyZo2JqtJ16swqCv/multiaddr-filter"
Steven Allen's avatar
Steven Allen committed
44
	mafilter "gx/ipfs/QmSW4uNHbvQia8iZDXzbwjiyHQtnyo9aFqfQAMasj3TJ6Y/go-maddr-filter"
Steven Allen's avatar
Steven Allen committed
45 46
	"gx/ipfs/QmTKaiDxQqVxmA1bRipSuP7hnTSgnMSmEa98NYeS6fcoiv/go-path/resolver"
	circuit "gx/ipfs/QmUveY3vGb6uQuzccihinJshn3KUFjvb5PaBZUVhqGx8TJ/go-libp2p-circuit"
47
	ds "gx/ipfs/QmVG5gxteQNEMhrS8prJSmU2C9rebtFuTd3SYZ5kE3YZ5k/go-datastore"
Steven Allen's avatar
Steven Allen committed
48 49
	ft "gx/ipfs/QmVNEJ5Vk1e2G5kHMiuVbpD6VQZiK1oS6aWZKjcUQW7hEy/go-unixfs"
	ifconnmgr "gx/ipfs/QmVz2p8ZVZ5GcWPNWGs2HZHiZyHumZcJpQdMRpxkMDhc2C/go-libp2p-interface-connmgr"
50 51
	exchange "gx/ipfs/QmWw71Mz9PXKgYG8ZfTYN7Ax2Zm48Eurbne3wC2y7CKmLz/go-ipfs-exchange-interface"
	ipld "gx/ipfs/QmX5CsuHyVZeTLxgRSYkgLSDQKb9UjE8xnhQzCEJWWWFsC/go-ipld-format"
Steven Allen's avatar
Steven Allen committed
52 53
	config "gx/ipfs/QmXUU23sGKdT7AHpyJ4aSvYpXbWjbiuYG1CYhZ3ai3btkG/go-ipfs-config"
	floodsub "gx/ipfs/QmXxivgdXWBfkKPdKDhuRWkrugqEqhXuzyyiZ5DvAy9Zds/go-libp2p-floodsub"
Steven Allen's avatar
Steven Allen committed
54
	smux "gx/ipfs/QmY9JXR3FupnYAYJWK9aMr9bCpqWKcToQ1tz8DVGTrHpHw/go-stream-muxer"
Steven Allen's avatar
Steven Allen committed
55
	connmgr "gx/ipfs/QmYUsYp9aGkj9qDcTPG5VBSHVjyueS9jqtJUnNeAsfNfsV/go-libp2p-connmgr"
Marten Seemann's avatar
Marten Seemann committed
56
	quic "gx/ipfs/QmYfvQoJCUTCJ54Rog3zWSCDWAkwLWFTMKzcwaNMpXYSQT/go-libp2p-quic-transport"
Steven Allen's avatar
Steven Allen committed
57
	ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr"
58
	cid "gx/ipfs/QmZFbDTY9jfSBms2MchvYM9oYRbAF19K7Pby47yDBfpPrb/go-cid"
Steven Allen's avatar
Steven Allen committed
59
	psrouter "gx/ipfs/QmZN7ABT3JJPGKP55DCoUm9U7L1BKYqasAPrdgkMEQDN7n/go-libp2p-pubsub-router"
Steven Allen's avatar
Steven Allen committed
60
	pnet "gx/ipfs/QmZaQ3K9PRd5sYYoG1xbTGPtd3N7TYiKBRmcBUTsx8HVET/go-libp2p-pnet"
Steven Allen's avatar
Steven Allen committed
61 62
	nilrouting "gx/ipfs/QmZdn8S4FLTfDrmLZb7JoLkrRvTYnyuMWEG6ZGZ3YKwEiK/go-ipfs-routing/none"
	offroute "gx/ipfs/QmZdn8S4FLTfDrmLZb7JoLkrRvTYnyuMWEG6ZGZ3YKwEiK/go-ipfs-routing/offline"
63
	bstore "gx/ipfs/QmcmpX42gtDv1fz24kau4wjS9hfwWj5VexWBKgGnWzsyag/go-ipfs-blockstore"
Steven Allen's avatar
Steven Allen committed
64
	yamux "gx/ipfs/QmcsgrV3nCAKjiHKZhKVXWc4oY3WBECJCqahXEMpHeMrev/go-smux-yamux"
65
	rhelpers "gx/ipfs/Qmd22J9AnyR3QUH56WPXkrTbCNkQ4x7TWWinHcZBhQkgVw/go-libp2p-routing-helpers"
Steven Allen's avatar
Steven Allen committed
66 67
	bitswap "gx/ipfs/Qmd8rU7X3VZzsgPnf2LSGUFu35zizYKajzXTRuHMUMqYJQ/go-bitswap"
	bsnet "gx/ipfs/Qmd8rU7X3VZzsgPnf2LSGUFu35zizYKajzXTRuHMUMqYJQ/go-bitswap/network"
68
	record "gx/ipfs/QmdHb9aBELnQKTVhvvA3hsQbRgUAwsWUzBP2vZ6Y5FBYvE/go-libp2p-record"
Steven Allen's avatar
Steven Allen committed
69
	bserv "gx/ipfs/QmdHqV7L4bpmMtEXVCrgn8RN6CXqMr3aUeogSkXbJGRtwk/go-blockservice"
70
	metrics "gx/ipfs/QmdhwKw53CTV8EJSAsR1bpmMT5kXiWBgeAyv1EXeeDiXqR/go-libp2p-metrics"
Steven Allen's avatar
Steven Allen committed
71
	mplex "gx/ipfs/QmdiBZzwGtN2yHJrWD9ojQ7ASS48nv7BcojWLkYd1ZtrV2/go-smux-multiplex"
72
	pstore "gx/ipfs/QmeKD8YT7887Xu6Z86iZmpYNxrLogJexqxEugSmaf14k64/go-libp2p-peerstore"
Steven Allen's avatar
Steven Allen committed
73 74 75 76 77 78 79
	libp2p "gx/ipfs/Qmf1u2efhjXYtuyP8SMHYtw4dCkbghnniex2PSp7baA7FP/go-libp2p"
	discovery "gx/ipfs/Qmf1u2efhjXYtuyP8SMHYtw4dCkbghnniex2PSp7baA7FP/go-libp2p/p2p/discovery"
	p2pbhost "gx/ipfs/Qmf1u2efhjXYtuyP8SMHYtw4dCkbghnniex2PSp7baA7FP/go-libp2p/p2p/host/basic"
	rhost "gx/ipfs/Qmf1u2efhjXYtuyP8SMHYtw4dCkbghnniex2PSp7baA7FP/go-libp2p/p2p/host/routed"
	identify "gx/ipfs/Qmf1u2efhjXYtuyP8SMHYtw4dCkbghnniex2PSp7baA7FP/go-libp2p/p2p/protocol/identify"
	ping "gx/ipfs/Qmf1u2efhjXYtuyP8SMHYtw4dCkbghnniex2PSp7baA7FP/go-libp2p/p2p/protocol/ping"
	p2phost "gx/ipfs/QmfH9FKYv3Jp1xiyL8sPchGBUBg6JA6XviwajAo3qgnT3B/go-libp2p-host"
Łukasz Magiera's avatar
Łukasz Magiera committed
80
)
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
81

Jeromy's avatar
Jeromy committed
82
const IpnsValidatorTag = "ipns"
83

84
const kReprovideFrequency = time.Hour * 12
85
const discoveryConnTimeout = time.Second * 30
Jeromy's avatar
Jeromy committed
86

Jeromy's avatar
Jeromy committed
87
var log = logging.Logger("core")
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
88

89 90 91 92
type mode int

const (
	// zero value is not a valid mode, must be explicitly set
93
	localMode mode = iota
94 95 96 97
	offlineMode
	onlineMode
)

98
func init() {
99
	identify.ClientVersion = "go-ipfs/" + version.CurrentVersionNumber + "/" + version.CurrentCommit
100 101
}

Juan Batiz-Benet's avatar
go lint  
Juan Batiz-Benet committed
102
// IpfsNode is IPFS Core module. It represents an IPFS instance.
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
103 104
type IpfsNode struct {

105
	// Self
106
	Identity peer.ID // the local node's identity
107

108
	Repo repo.Repo
109 110

	// Local node
111 112 113 114
	Pinning         pin.Pinner // the pinning manager
	Mounts          Mounts     // current mount state, if any.
	PrivateKey      ic.PrivKey // the local node's private Key
	PNetFingerprint []byte     // fingerprint of private network
115 116

	// Services
117 118 119 120 121 122 123 124 125 126 127 128
	Peerstore       pstore.Peerstore     // storage for other Peer instances
	Blockstore      bstore.GCBlockstore  // the block store (lower level)
	Filestore       *filestore.Filestore // the filestore blockstore
	BaseBlocks      bstore.Blockstore    // the raw blockstore, no filestore wrapping
	GCLocker        bstore.GCLocker      // the locker used to protect the blockstore during gc
	Blocks          bserv.BlockService   // the block service, get/add blocks.
	DAG             ipld.DAGService      // the merkle dag service, get/add objects.
	Resolver        *resolver.Resolver   // the path resolution system
	Reporter        metrics.Reporter
	Discovery       discovery.Service
	FilesRoot       *mfs.Root
	RecordValidator record.Validator
129 130

	// Online
131 132 133 134 135
	PeerHost     p2phost.Host        // the network host (server+client)
	Bootstrapper io.Closer           // the periodic bootstrapper
	Routing      routing.IpfsRouting // the routing system. recommend ipfs-dht
	Exchange     exchange.Interface  // the block exchange + strategy (bitswap)
	Namesys      namesys.NameSystem  // the name system, resolves paths to hashes
Jeromy's avatar
Jeromy committed
136 137
	Ping         *ping.PingService
	Reprovider   *rp.Reprovider // the value reprovider system
Jeromy's avatar
Jeromy committed
138
	IpnsRepub    *ipnsrp.Republisher
139

Jeromy's avatar
Jeromy committed
140
	Floodsub *floodsub.PubSub
141
	PSRouter *psrouter.PubsubValueStore
142
	DHT      *dht.IpfsDHT
Łukasz Magiera's avatar
Łukasz Magiera committed
143
	P2P      *p2p.P2P
Jeromy's avatar
Jeromy committed
144

145 146
	proc goprocess.Process
	ctx  context.Context
147

Jeromy's avatar
Jeromy committed
148
	mode         mode
149
	localModeSet bool
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
150 151
}

152 153 154 155 156 157 158 159
// Mounts defines what the node's mount state is. This should
// perhaps be moved to the daemon or mount. It's here because
// it needs to be accessible across daemon requests.
type Mounts struct {
	Ipfs mount.Mount
	Ipns mount.Mount
}

vyzo's avatar
vyzo committed
160
func (n *IpfsNode) startOnlineServices(ctx context.Context, routingOption RoutingOption, hostOption HostOption, do DiscoveryOption, pubsub, ipnsps, mplex bool) error {
161
	if n.PeerHost != nil { // already online.
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
162
		return errors.New("node already online")
163 164 165
	}

	// load private key
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
166
	if err := n.LoadPrivateKey(); err != nil {
167 168 169
		return err
	}

Jeromy's avatar
Jeromy committed
170
	// get undialable addrs from config
171 172 173 174
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}
Steven Allen's avatar
Steven Allen committed
175 176

	var libp2pOpts []libp2p.Option
177
	for _, s := range cfg.Swarm.AddrFilters {
Jeromy's avatar
Jeromy committed
178 179
		f, err := mamask.NewMask(s)
		if err != nil {
180
			return fmt.Errorf("incorrectly formatted address filter in config: %s", s)
Jeromy's avatar
Jeromy committed
181
		}
Steven Allen's avatar
Steven Allen committed
182
		libp2pOpts = append(libp2pOpts, libp2p.FilterAddresses(f))
Jeromy's avatar
Jeromy committed
183 184
	}

185 186 187
	if !cfg.Swarm.DisableBandwidthMetrics {
		// Set reporter
		n.Reporter = metrics.NewBandwidthCounter()
Steven Allen's avatar
Steven Allen committed
188
		libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(n.Reporter))
189 190
	}

Jakub Sztandera's avatar
Jakub Sztandera committed
191 192 193 194 195 196
	swarmkey, err := n.Repo.SwarmKey()
	if err != nil {
		return err
	}

	if swarmkey != nil {
Steven Allen's avatar
Steven Allen committed
197
		protec, err := pnet.NewProtector(bytes.NewReader(swarmkey))
Jakub Sztandera's avatar
Jakub Sztandera committed
198
		if err != nil {
199
			return fmt.Errorf("failed to configure private network: %s", err)
Jakub Sztandera's avatar
Jakub Sztandera committed
200
		}
201
		n.PNetFingerprint = protec.Fingerprint()
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
		go func() {
			t := time.NewTicker(30 * time.Second)
			<-t.C // swallow one tick
			for {
				select {
				case <-t.C:
					if ph := n.PeerHost; ph != nil {
						if len(ph.Network().Peers()) == 0 {
							log.Warning("We are in private network and have no peers.")
							log.Warning("This might be configuration mistake.")
						}
					}
				case <-n.Process().Closing():
					t.Stop()
					return
				}
			}
		}()
Steven Allen's avatar
Steven Allen committed
220 221

		libp2pOpts = append(libp2pOpts, libp2p.PrivateNetwork(protec))
Jakub Sztandera's avatar
Jakub Sztandera committed
222 223
	}

224 225 226 227
	addrsFactory, err := makeAddrsFactory(cfg.Addresses)
	if err != nil {
		return err
	}
Steven Allen's avatar
Steven Allen committed
228 229 230 231
	if !cfg.Swarm.DisableRelay {
		addrsFactory = composeAddrsFactory(addrsFactory, filterRelayAddrs)
	}
	libp2pOpts = append(libp2pOpts, libp2p.AddrsFactory(addrsFactory))
232

Steven Allen's avatar
Steven Allen committed
233
	connm, err := constructConnMgr(cfg.Swarm.ConnMgr)
Jeromy's avatar
Jeromy committed
234 235 236
	if err != nil {
		return err
	}
Steven Allen's avatar
Steven Allen committed
237 238 239
	libp2pOpts = append(libp2pOpts, libp2p.ConnectionManager(connm))

	libp2pOpts = append(libp2pOpts, makeSmuxTransportOption(mplex))
Jeromy's avatar
Jeromy committed
240

Steven Allen's avatar
Steven Allen committed
241 242
	if !cfg.Swarm.DisableNatPortMap {
		libp2pOpts = append(libp2pOpts, libp2p.NATPortMap())
243
	}
Steven Allen's avatar
Steven Allen committed
244 245 246 247 248 249 250 251
	if !cfg.Swarm.DisableRelay {
		var opts []circuit.RelayOpt
		if cfg.Swarm.EnableRelayHop {
			opts = append(opts, circuit.OptHop)
		}
		libp2pOpts = append(libp2pOpts, libp2p.EnableRelay(opts...))
	}

252 253 254
	// explicitly enable the default transports
	libp2pOpts = append(libp2pOpts, libp2p.DefaultTransports)

Marten Seemann's avatar
Marten Seemann committed
255 256 257 258
	if cfg.Experimental.QUIC {
		libp2pOpts = append(libp2pOpts, libp2p.Transport(quic.NewTransport))
	}

Steven Allen's avatar
Steven Allen committed
259
	peerhost, err := hostOption(ctx, n.Identity, n.Peerstore, libp2pOpts...)
vyzo's avatar
vyzo committed
260

261
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
262
		return err
263 264
	}

265
	if err := n.startOnlineServicesWithHost(ctx, peerhost, routingOption, pubsub, ipnsps); err != nil {
266
		return err
267 268 269
	}

	// Ok, now we're ready to listen.
Łukasz Magiera's avatar
Łukasz Magiera committed
270
	if err := startListening(n.PeerHost, cfg); err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
271
		return err
272
	}
273

Łukasz Magiera's avatar
Łukasz Magiera committed
274
	n.P2P = p2p.NewP2P(n.Identity, n.PeerHost, n.Peerstore)
275

276
	// setup local discovery
Jeromy's avatar
Jeromy committed
277
	if do != nil {
Jeromy's avatar
Jeromy committed
278
		service, err := do(ctx, n.PeerHost)
Jeromy's avatar
Jeromy committed
279
		if err != nil {
Jeromy's avatar
Jeromy committed
280 281 282 283
			log.Error("mdns error: ", err)
		} else {
			service.RegisterNotifee(n)
			n.Discovery = service
Jeromy's avatar
Jeromy committed
284
		}
285 286
	}

287
	return n.Bootstrap(DefaultBootstrapConfig)
288 289
}

Jeromy's avatar
Jeromy committed
290 291
func constructConnMgr(cfg config.ConnMgr) (ifconnmgr.ConnManager, error) {
	switch cfg.Type {
292 293 294 295
	case "":
		// 'default' value is the basic connection manager
		return connmgr.NewConnManager(config.DefaultConnMgrLowWater, config.DefaultConnMgrHighWater, config.DefaultConnMgrGracePeriod), nil
	case "none":
Jeromy's avatar
Jeromy committed
296 297 298 299 300 301 302 303 304 305 306 307 308
		return nil, nil
	case "basic":
		grace, err := time.ParseDuration(cfg.GracePeriod)
		if err != nil {
			return nil, fmt.Errorf("parsing Swarm.ConnMgr.GracePeriod: %s", err)
		}

		return connmgr.NewConnManager(cfg.LowWater, cfg.HighWater, grace), nil
	default:
		return nil, fmt.Errorf("unrecognized ConnMgr.Type: %q", cfg.Type)
	}
}

Łukasz Magiera's avatar
Łukasz Magiera committed
309 310 311 312 313 314
func (n *IpfsNode) startLateOnlineServices(ctx context.Context) error {
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

315
	var keyProvider rp.KeyChanFunc
Łukasz Magiera's avatar
Łukasz Magiera committed
316 317 318 319 320 321 322 323 324 325 326

	switch cfg.Reprovider.Strategy {
	case "all":
		fallthrough
	case "":
		keyProvider = rp.NewBlockstoreProvider(n.Blockstore)
	case "roots":
		keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, true)
	case "pinned":
		keyProvider = rp.NewPinnedProvider(n.Pinning, n.DAG, false)
	default:
327
		return fmt.Errorf("unknown reprovider strategy '%s'", cfg.Reprovider.Strategy)
Łukasz Magiera's avatar
Łukasz Magiera committed
328
	}
329
	n.Reprovider = rp.NewReprovider(ctx, n.Routing, keyProvider)
Łukasz Magiera's avatar
Łukasz Magiera committed
330

331 332 333 334 335
	reproviderInterval := kReprovideFrequency
	if cfg.Reprovider.Interval != "" {
		dur, err := time.ParseDuration(cfg.Reprovider.Interval)
		if err != nil {
			return err
Łukasz Magiera's avatar
Łukasz Magiera committed
336 337
		}

338
		reproviderInterval = dur
Łukasz Magiera's avatar
Łukasz Magiera committed
339 340
	}

341 342
	go n.Reprovider.Run(reproviderInterval)

Łukasz Magiera's avatar
Łukasz Magiera committed
343 344 345
	return nil
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
func makeAddrsFactory(cfg config.Addresses) (p2pbhost.AddrsFactory, error) {
	var annAddrs []ma.Multiaddr
	for _, addr := range cfg.Announce {
		maddr, err := ma.NewMultiaddr(addr)
		if err != nil {
			return nil, err
		}
		annAddrs = append(annAddrs, maddr)
	}

	filters := mafilter.NewFilters()
	noAnnAddrs := map[string]bool{}
	for _, addr := range cfg.NoAnnounce {
		f, err := mamask.NewMask(addr)
		if err == nil {
			filters.AddDialFilter(f)
			continue
		}
		maddr, err := ma.NewMultiaddr(addr)
		if err != nil {
			return nil, err
		}
		noAnnAddrs[maddr.String()] = true
	}

	return func(allAddrs []ma.Multiaddr) []ma.Multiaddr {
		var addrs []ma.Multiaddr
		if len(annAddrs) > 0 {
			addrs = annAddrs
		} else {
			addrs = allAddrs
		}

		var out []ma.Multiaddr
		for _, maddr := range addrs {
			// check for exact matches
			ok, _ := noAnnAddrs[maddr.String()]
			// check for /ipcidr matches
			if !ok && !filters.AddrBlocked(maddr) {
				out = append(out, maddr)
			}
		}
		return out
	}, nil
}

Steven Allen's avatar
Steven Allen committed
392 393 394
func makeSmuxTransportOption(mplexExp bool) libp2p.Option {
	const yamuxID = "/yamux/1.0.0"
	const mplexID = "/mplex/6.7.0"
395 396

	ymxtpt := &yamux.Transport{
Steven Allen's avatar
Steven Allen committed
397
		AcceptBacklog:          512,
398 399 400 401 402 403 404
		ConnectionWriteTimeout: time.Second * 10,
		KeepAliveInterval:      time.Second * 30,
		EnableKeepAlive:        true,
		MaxStreamWindowSize:    uint32(1024 * 512),
		LogOutput:              ioutil.Discard,
	}

405 406 407 408
	if os.Getenv("YAMUX_DEBUG") != "" {
		ymxtpt.LogOutput = os.Stderr
	}

Steven Allen's avatar
Steven Allen committed
409
	muxers := map[string]smux.Transport{yamuxID: ymxtpt}
410
	if mplexExp {
Steven Allen's avatar
Steven Allen committed
411
		muxers[mplexID] = mplex.DefaultTransport
412 413 414
	}

	// Allow muxer preference order overriding
Steven Allen's avatar
Steven Allen committed
415
	order := []string{yamuxID, mplexID}
416
	if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" {
Steven Allen's avatar
Steven Allen committed
417 418 419 420 421 422 423 424 425 426 427 428
		order = strings.Fields(prefs)
	}

	opts := make([]libp2p.Option, 0, len(order))
	for _, id := range order {
		tpt, ok := muxers[id]
		if !ok {
			log.Warning("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id)
			continue
		}
		delete(muxers, id)
		opts = append(opts, libp2p.Muxer(id, tpt))
429 430
	}

Steven Allen's avatar
Steven Allen committed
431
	return libp2p.ChainOptions(opts...)
432 433
}

Jeromy's avatar
Jeromy committed
434 435
func setupDiscoveryOption(d config.Discovery) DiscoveryOption {
	if d.MDNS.Enabled {
Jeromy's avatar
Jeromy committed
436
		return func(ctx context.Context, h p2phost.Host) (discovery.Service, error) {
Jeromy's avatar
Jeromy committed
437 438 439
			if d.MDNS.Interval == 0 {
				d.MDNS.Interval = 5
			}
Jeromy's avatar
Jeromy committed
440
			return discovery.NewMdnsService(ctx, h, time.Duration(d.MDNS.Interval)*time.Second, discovery.ServiceTag)
Jeromy's avatar
Jeromy committed
441 442 443 444 445
		}
	}
	return nil
}

446 447
// HandlePeerFound attempts to connect to peer from `PeerInfo`, if it fails
// logs a warning log.
Jeromy's avatar
Jeromy committed
448
func (n *IpfsNode) HandlePeerFound(p pstore.PeerInfo) {
449
	log.Warning("trying peer info: ", p)
450
	ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout)
rht's avatar
rht committed
451
	defer cancel()
452
	if err := n.PeerHost.Connect(ctx, p); err != nil {
453 454 455 456
		log.Warning("Failed to connect to peer found by discovery: ", err)
	}
}

457 458
// startOnlineServicesWithHost  is the set of services which need to be
// initialized with the host and _before_ we start listening.
459
func (n *IpfsNode) startOnlineServicesWithHost(ctx context.Context, host p2phost.Host, routingOption RoutingOption, pubsub bool, ipnsps bool) error {
460
	// setup diagnostics service
Jeromy's avatar
Jeromy committed
461
	n.Ping = ping.NewPingService(host)
462

463
	if pubsub || ipnsps {
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
		cfg, err := n.Repo.Config()
		if err != nil {
			return err
		}

		var service *floodsub.PubSub

		switch cfg.Pubsub.Router {
		case "":
			fallthrough
		case "floodsub":
			service, err = floodsub.NewFloodSub(ctx, host)

		case "gossipsub":
			service, err = floodsub.NewGossipSub(ctx, host)

		default:
			err = fmt.Errorf("Unknown pubsub router %s", cfg.Pubsub.Router)
		}

484 485 486 487 488 489
		if err != nil {
			return err
		}
		n.Floodsub = service
	}

490
	// setup routing service
491
	r, err := routingOption(ctx, host, n.Repo.Datastore(), n.RecordValidator)
Jeromy's avatar
Jeromy committed
492
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
493
		return err
494
	}
Jeromy's avatar
Jeromy committed
495
	n.Routing = r
496

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	// TODO: I'm not a fan of type assertions like this but the
	// `RoutingOption` system doesn't currently provide access to the
	// IpfsNode.
	//
	// Ideally, we'd do something like:
	//
	// 1. Add some fancy method to introspect into tiered routers to extract
	//    things like the pubsub router or the DHT (complicated, messy,
	//    probably not worth it).
	// 2. Pass the IpfsNode into the RoutingOption (would also remove the
	//    PSRouter case below.
	// 3. Introduce some kind of service manager? (my personal favorite but
	//    that requires a fair amount of work).
	if dht, ok := r.(*dht.IpfsDHT); ok {
		n.DHT = dht
	}

514 515 516 517 518 519
	if ipnsps {
		n.PSRouter = psrouter.NewPubsubValueStore(
			ctx,
			host,
			n.Routing,
			n.Floodsub,
520
			n.RecordValidator,
521 522 523 524 525 526 527 528 529 530 531 532 533
		)
		n.Routing = rhelpers.Tiered{
			// Always check pubsub first.
			&rhelpers.Compose{
				ValueStore: &rhelpers.LimitedValueStore{
					ValueStore: n.PSRouter,
					Namespaces: []string{"ipns"},
				},
			},
			n.Routing,
		}
	}

534 535 536
	// Wrap standard peer host with routing system to allow unknown peer lookups
	n.PeerHost = rhost.Wrap(host, n.Routing)

537
	// setup exchange service
538
	bitswapNetwork := bsnet.NewFromIpfsHost(n.PeerHost, n.Routing)
Łukasz Magiera's avatar
Łukasz Magiera committed
539
	n.Exchange = bitswap.New(ctx, bitswapNetwork, n.Blockstore)
540

541 542 543 544 545
	size, err := n.getCacheSize()
	if err != nil {
		return err
	}

546
	// setup name system
547
	n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size)
548

Jeromy's avatar
Jeromy committed
549
	// setup ipns republishing
550
	return n.setupIpnsRepublisher()
551 552
}

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
// getCacheSize returns cache life and cache size
func (n *IpfsNode) getCacheSize() (int, error) {
	cfg, err := n.Repo.Config()
	if err != nil {
		return 0, err
	}

	cs := cfg.Ipns.ResolveCacheSize
	if cs == 0 {
		cs = 128
	}
	if cs < 0 {
		return 0, fmt.Errorf("cannot specify negative resolve cache size")
	}
	return cs, nil
}

570
func (n *IpfsNode) setupIpnsRepublisher() error {
Jeromy's avatar
Jeromy committed
571 572 573 574
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}
575

576
	n.IpnsRepub = ipnsrp.NewRepublisher(n.Namesys, n.Repo.Datastore(), n.PrivateKey, n.Repo.Keystore())
577

Jeromy's avatar
Jeromy committed
578 579 580 581 582 583
	if cfg.Ipns.RepublishPeriod != "" {
		d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod)
		if err != nil {
			return fmt.Errorf("failure to parse config setting IPNS.RepublishPeriod: %s", err)
		}

584
		if !u.Debug && (d < time.Minute || d > (time.Hour*24)) {
Jeromy's avatar
Jeromy committed
585 586 587 588 589 590
			return fmt.Errorf("config setting IPNS.RepublishPeriod is not between 1min and 1day: %s", d)
		}

		n.IpnsRepub.Interval = d
	}

591 592 593 594 595 596 597 598 599
	if cfg.Ipns.RecordLifetime != "" {
		d, err := time.ParseDuration(cfg.Ipns.RepublishPeriod)
		if err != nil {
			return fmt.Errorf("failure to parse config setting IPNS.RecordLifetime: %s", err)
		}

		n.IpnsRepub.RecordLifetime = d
	}

Jeromy's avatar
Jeromy committed
600 601
	n.Process().Go(n.IpnsRepub.Run)

602 603 604
	return nil
}

605 606 607 608 609 610 611 612 613 614 615 616
// Process returns the Process object
func (n *IpfsNode) Process() goprocess.Process {
	return n.proc
}

// Close calls Close() on the Process object
func (n *IpfsNode) Close() error {
	return n.proc.Close()
}

// Context returns the IpfsNode context
func (n *IpfsNode) Context() context.Context {
617 618 619
	if n.ctx == nil {
		n.ctx = context.TODO()
	}
620 621 622
	return n.ctx
}

623 624
// teardown closes owned children. If any errors occur, this function returns
// the first error.
Brian Tiger Chow's avatar
Brian Tiger Chow committed
625
func (n *IpfsNode) teardown() error {
626
	log.Debug("core is shutting down...")
627 628
	// owned objects are closed in this teardown to ensure that they're closed
	// regardless of which constructor was used to add them to the node.
Jeromy's avatar
Jeromy committed
629 630
	var closers []io.Closer

631
	// NOTE: The order that objects are added(closed) matters, if an object
Jeromy's avatar
Jeromy committed
632 633 634 635 636
	// needs to use another during its shutdown/cleanup process, it should be
	// closed before that other object

	if n.FilesRoot != nil {
		closers = append(closers, n.FilesRoot)
Jeromy's avatar
Jeromy committed
637
	}
638

639 640 641 642
	if n.Exchange != nil {
		closers = append(closers, n.Exchange)
	}

643
	if n.Mounts.Ipfs != nil && !n.Mounts.Ipfs.IsActive() {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
644 645
		closers = append(closers, mount.Closer(n.Mounts.Ipfs))
	}
646
	if n.Mounts.Ipns != nil && !n.Mounts.Ipns.IsActive() {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
647 648 649
		closers = append(closers, mount.Closer(n.Mounts.Ipns))
	}

650 651
	if n.DHT != nil {
		closers = append(closers, n.DHT.Process())
Jeromy's avatar
Jeromy committed
652 653
	}

Jeromy's avatar
Jeromy committed
654 655 656 657
	if n.Blocks != nil {
		closers = append(closers, n.Blocks)
	}

Jeromy's avatar
Jeromy committed
658 659
	if n.Bootstrapper != nil {
		closers = append(closers, n.Bootstrapper)
660 661
	}

Jeromy's avatar
Jeromy committed
662 663
	if n.PeerHost != nil {
		closers = append(closers, n.PeerHost)
664
	}
665

Jeromy's avatar
Jeromy committed
666 667 668
	// Repo closed last, most things need to preserve state here
	closers = append(closers, n.Repo)

669
	var errs []error
670
	for _, closer := range closers {
671 672
		if err := closer.Close(); err != nil {
			errs = append(errs, err)
673 674 675 676
		}
	}
	if len(errs) > 0 {
		return errs[0]
Brian Tiger Chow's avatar
Brian Tiger Chow committed
677 678
	}
	return nil
Brian Tiger Chow's avatar
Brian Tiger Chow committed
679 680
}

681
// OnlineMode returns whether or not the IpfsNode is in OnlineMode.
Brian Tiger Chow's avatar
Brian Tiger Chow committed
682
func (n *IpfsNode) OnlineMode() bool {
683
	return n.mode == onlineMode
Brian Tiger Chow's avatar
Brian Tiger Chow committed
684 685
}

686
// SetLocal will set the IpfsNode to local mode
687 688 689 690 691 692 693
func (n *IpfsNode) SetLocal(isLocal bool) {
	if isLocal {
		n.mode = localMode
	}
	n.localModeSet = true
}

694
// LocalMode returns whether or not the IpfsNode is in LocalMode
695 696 697 698 699
func (n *IpfsNode) LocalMode() bool {
	if !n.localModeSet {
		// programmer error should not happen
		panic("local mode not set")
	}
700
	return n.mode == localMode
701 702
}

703
// Bootstrap will set and call the IpfsNodes bootstrap function.
704
func (n *IpfsNode) Bootstrap(cfg BootstrapConfig) error {
705
	// TODO what should return value be when in offlineMode?
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
706 707 708 709
	if n.Routing == nil {
		return nil
	}

710 711 712 713 714 715 716
	if n.Bootstrapper != nil {
		n.Bootstrapper.Close() // stop previous bootstrap process.
	}

	// if the caller did not specify a bootstrap peer function, get the
	// freshest bootstrap peers from config. this responds to live changes.
	if cfg.BootstrapPeers == nil {
Jeromy's avatar
Jeromy committed
717
		cfg.BootstrapPeers = func() []pstore.PeerInfo {
718
			ps, err := n.loadBootstrapPeers()
719
			if err != nil {
720
				log.Warning("failed to parse bootstrap peers from config")
721 722 723 724 725 726 727 728 729
				return nil
			}
			return ps
		}
	}

	var err error
	n.Bootstrapper, err = Bootstrap(n, cfg)
	return err
730 731
}

732 733
func (n *IpfsNode) loadID() error {
	if n.Identity != "" {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
734
		return errors.New("identity already loaded")
735 736
	}

737 738 739 740 741 742
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	cid := cfg.Identity.PeerID
743
	if cid == "" {
744
		return errors.New("identity was not set in config (was 'ipfs init' run?)")
745 746
	}
	if len(cid) == 0 {
747
		return errors.New("no peer ID in config! (was 'ipfs init' run?)")
748 749
	}

Steven Allen's avatar
Steven Allen committed
750 751 752 753 754 755
	id, err := peer.IDB58Decode(cid)
	if err != nil {
		return fmt.Errorf("peer ID invalid: %s", err)
	}

	n.Identity = id
756 757
	return nil
}
758

759
// GetKey will return a key from the Keystore with name `name`.
760 761 762 763 764 765 766 767
func (n *IpfsNode) GetKey(name string) (ic.PrivKey, error) {
	if name == "self" {
		return n.PrivateKey, nil
	} else {
		return n.Repo.Keystore().Get(name)
	}
}

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
768
func (n *IpfsNode) LoadPrivateKey() error {
769
	if n.Identity == "" || n.Peerstore == nil {
Łukasz Magiera's avatar
Łukasz Magiera committed
770
		return errors.New("loaded private key out of order")
771 772
	}

773
	if n.PrivateKey != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
774
		return errors.New("private key already loaded")
775 776
	}

777 778 779 780 781 782
	cfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	sk, err := loadPrivateKey(&cfg.Identity, n.Identity)
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
783
	if err != nil {
784
		return err
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
785
	}
786

787 788
	n.PrivateKey = sk
	n.Peerstore.AddPrivKey(n.Identity, n.PrivateKey)
Jeromy's avatar
Jeromy committed
789 790 791 792
	n.Peerstore.AddPubKey(n.Identity, sk.GetPublic())
	return nil
}

Jeromy's avatar
Jeromy committed
793
func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) {
794 795 796 797 798 799
	cfg, err := n.Repo.Config()
	if err != nil {
		return nil, err
	}

	parsed, err := cfg.BootstrapPeers()
800 801 802 803 804 805
	if err != nil {
		return nil, err
	}
	return toPeerInfos(parsed), nil
}

Jeromy's avatar
Jeromy committed
806
func (n *IpfsNode) loadFilesRoot() error {
Jeromy's avatar
Jeromy committed
807
	dsk := ds.NewKey("/local/filesroot")
Jeromy's avatar
Jeromy committed
808 809
	pf := func(ctx context.Context, c *cid.Cid) error {
		return n.Repo.Datastore().Put(dsk, c.Bytes())
Jeromy's avatar
Jeromy committed
810 811
	}

812
	var nd *merkledag.ProtoNode
Jeromy's avatar
Jeromy committed
813 814 815 816
	val, err := n.Repo.Datastore().Get(dsk)

	switch {
	case err == ds.ErrNotFound || val == nil:
817
		nd = ft.EmptyDirNode()
818
		err := n.DAG.Add(n.Context(), nd)
Jeromy's avatar
Jeromy committed
819 820 821 822
		if err != nil {
			return fmt.Errorf("failure writing to dagstore: %s", err)
		}
	case err == nil:
823
		c, err := cid.Cast(val)
Jeromy's avatar
Jeromy committed
824 825 826 827
		if err != nil {
			return err
		}

828
		rnd, err := n.DAG.Get(n.Context(), c)
Jeromy's avatar
Jeromy committed
829 830 831
		if err != nil {
			return fmt.Errorf("error loading filesroot from DAG: %s", err)
		}
832 833 834 835 836 837 838

		pbnd, ok := rnd.(*merkledag.ProtoNode)
		if !ok {
			return merkledag.ErrNotProtobuf
		}

		nd = pbnd
Jeromy's avatar
Jeromy committed
839 840 841 842 843 844 845 846 847 848 849 850 851
	default:
		return err
	}

	mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf)
	if err != nil {
		return err
	}

	n.FilesRoot = mr
	return nil
}

852 853
// SetupOfflineRouting instantiates a routing system in offline mode. This is
// primarily used for offline ipns modifications.
Jeromy's avatar
Jeromy committed
854
func (n *IpfsNode) SetupOfflineRouting() error {
855 856 857 858
	if n.Routing != nil {
		// Routing was already set up
		return nil
	}
859 860

	// TODO: move this somewhere else.
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
861
	err := n.LoadPrivateKey()
Jeromy's avatar
Jeromy committed
862 863 864 865
	if err != nil {
		return err
	}

866
	n.Routing = offroute.NewOfflineRouter(n.Repo.Datastore(), n.RecordValidator)
867

868 869 870 871 872 873
	size, err := n.getCacheSize()
	if err != nil {
		return err
	}

	n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size)
874

875
	return nil
876 877 878 879
}

func loadPrivateKey(cfg *config.Identity, id peer.ID) (ic.PrivKey, error) {
	sk, err := cfg.DecodePrivateKey("passphrase todo!")
880 881 882
	if err != nil {
		return nil, err
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
883

884 885 886 887
	id2, err := peer.IDFromPrivateKey(sk)
	if err != nil {
		return nil, err
	}
888

889 890
	if id2 != id {
		return nil, fmt.Errorf("private key in config does not match id: %s != %s", id, id2)
891 892
	}

893
	return sk, nil
894
}
895

896
func listenAddresses(cfg *config.Config) ([]ma.Multiaddr, error) {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
897 898 899
	var listen []ma.Multiaddr
	for _, addr := range cfg.Addresses.Swarm {
		maddr, err := ma.NewMultiaddr(addr)
900
		if err != nil {
Łukasz Magiera's avatar
Łukasz Magiera committed
901
			return nil, fmt.Errorf("failure to parse config.Addresses.Swarm: %s", cfg.Addresses.Swarm)
902
		}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
903
		listen = append(listen, maddr)
904 905 906 907
	}

	return listen, nil
}
908

Kevin Atkinson's avatar
Kevin Atkinson committed
909
type ConstructPeerHostOpts struct {
910
	AddrsFactory      p2pbhost.AddrsFactory
vyzo's avatar
vyzo committed
911 912 913
	DisableNatPortMap bool
	DisableRelay      bool
	EnableRelayHop    bool
Jeromy's avatar
Jeromy committed
914
	ConnectionManager ifconnmgr.ConnManager
Kevin Atkinson's avatar
Kevin Atkinson committed
915 916
}

Steven Allen's avatar
Steven Allen committed
917
type HostOption func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error)
Jeromy's avatar
Jeromy committed
918 919 920

var DefaultHostOption HostOption = constructPeerHost

921
// isolates the complex initialization steps
Steven Allen's avatar
Steven Allen committed
922 923 924 925
func constructPeerHost(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) {
	pkey := ps.PrivKey(id)
	if pkey == nil {
		return nil, fmt.Errorf("missing private key for node ID: %s", id.Pretty())
926
	}
Steven Allen's avatar
Steven Allen committed
927 928
	options = append([]libp2p.Option{libp2p.Identity(pkey), libp2p.Peerstore(ps)}, options...)
	return libp2p.New(ctx, options...)
929 930
}

931 932 933 934 935 936 937 938 939 940 941 942
func filterRelayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
	var raddrs []ma.Multiaddr
	for _, addr := range addrs {
		_, err := addr.ValueForProtocol(circuit.P_CIRCUIT)
		if err == nil {
			continue
		}
		raddrs = append(raddrs, addr)
	}
	return raddrs
}

943 944 945 946 947 948
func composeAddrsFactory(f, g p2pbhost.AddrsFactory) p2pbhost.AddrsFactory {
	return func(addrs []ma.Multiaddr) []ma.Multiaddr {
		return f(g(addrs))
	}
}

949
// startListening on the network addresses
Łukasz Magiera's avatar
Łukasz Magiera committed
950
func startListening(host p2phost.Host, cfg *config.Config) error {
951 952
	listenAddrs, err := listenAddresses(cfg)
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
953
		return err
954 955 956
	}

	// Actually start listening:
Steven Allen's avatar
Steven Allen committed
957
	if err := host.Network().Listen(listenAddrs...); err != nil {
958
		return err
959 960
	}

961
	// list out our addresses
962
	addrs, err := host.Network().InterfaceListenAddresses()
963
	if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
964
		return err
965
	}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
966
	log.Infof("Swarm listening at: %s", addrs)
967
	return nil
968
}
969

970 971 972 973 974 975
func constructDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) {
	return dht.New(
		ctx, host,
		dhtopts.Datastore(dstore),
		dhtopts.Validator(validator),
	)
976
}
Jeromy's avatar
Jeromy committed
977

978 979 980 981 982 983 984
func constructClientDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.Batching, validator record.Validator) (routing.IpfsRouting, error) {
	return dht.New(
		ctx, host,
		dhtopts.Client(true),
		dhtopts.Datastore(dstore),
		dhtopts.Validator(validator),
	)
Jeromy's avatar
Jeromy committed
985 986
}

987
type RoutingOption func(context.Context, p2phost.Host, ds.Batching, record.Validator) (routing.IpfsRouting, error)
Jeromy's avatar
Jeromy committed
988

Jeromy's avatar
Jeromy committed
989
type DiscoveryOption func(context.Context, p2phost.Host) (discovery.Service, error)
Jeromy's avatar
Jeromy committed
990

991
var DHTOption RoutingOption = constructDHTRouting
Jeromy's avatar
Jeromy committed
992
var DHTClientOption RoutingOption = constructClientDHTRouting
993
var NilRouterOption RoutingOption = nilrouting.ConstructNilRouting