builder.go 5.67 KB
Newer Older
Jeromy's avatar
Jeromy committed
1 2 3
package core

import (
Jeromy's avatar
Jeromy committed
4
	"context"
5 6
	"crypto/rand"
	"encoding/base64"
Jeromy's avatar
Jeromy committed
7
	"errors"
8 9 10
	"os"
	"syscall"
	"time"
Jeromy's avatar
Jeromy committed
11

12 13 14
	bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
	bserv "github.com/ipfs/go-ipfs/blockservice"
	offline "github.com/ipfs/go-ipfs/exchange/offline"
15
	filestore "github.com/ipfs/go-ipfs/filestore"
16 17 18
	dag "github.com/ipfs/go-ipfs/merkledag"
	path "github.com/ipfs/go-ipfs/path"
	pin "github.com/ipfs/go-ipfs/pin"
19
	repo "github.com/ipfs/go-ipfs/repo"
20
	cfg "github.com/ipfs/go-ipfs/repo/config"
Jeromy's avatar
Jeromy committed
21
	uio "github.com/ipfs/go-ipfs/unixfs/io"
Jeromy's avatar
Jeromy committed
22

23
	ci "gx/ipfs/QmPGxZ1DP2w45WcogpW1h43BvseXbfke9N91qotpoQcUeS/go-libp2p-crypto"
Jeromy's avatar
Jeromy committed
24 25
	ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore"
	dsync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync"
26
	metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
27
	goprocessctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
Jeromy's avatar
Jeromy committed
28
	retry "gx/ipfs/QmUaGhKyLgTuYDdQsbKST1tYr2CVoix59rqaxdxqk2UbfK/retry-datastore"
29 30
	peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer"
	pstore "gx/ipfs/Qme1g4e3m2SmdiSGGU3vSWmUStwUjc5oECnEriaK9Xa1HU/go-libp2p-peerstore"
Jeromy's avatar
Jeromy committed
31 32
)

33 34 35
type BuildCfg struct {
	// If online is set, the node will have networking enabled
	Online bool
Jeromy's avatar
Jeromy committed
36

Jeromy's avatar
Jeromy committed
37 38 39
	// ExtraOpts is a map of extra options used to configure the ipfs nodes creation
	ExtraOpts map[string]bool

40 41 42 43
	// If permament then node should run more expensive processes
	// that will improve performance in long run
	Permament bool

44 45 46 47 48 49
	// If NilRepo is set, a repo backed by a nil datastore will be constructed
	NilRepo bool

	Routing RoutingOption
	Host    HostOption
	Repo    repo.Repo
Jeromy's avatar
Jeromy committed
50 51
}

Jeromy's avatar
Jeromy committed
52 53 54 55 56 57 58 59
func (cfg *BuildCfg) getOpt(key string) bool {
	if cfg.ExtraOpts == nil {
		return false
	}

	return cfg.ExtraOpts[key]
}

60 61 62
func (cfg *BuildCfg) fillDefaults() error {
	if cfg.Repo != nil && cfg.NilRepo {
		return errors.New("cannot set a repo and specify nilrepo at the same time")
Jeromy's avatar
Jeromy committed
63
	}
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86

	if cfg.Repo == nil {
		var d ds.Datastore
		d = ds.NewMapDatastore()
		if cfg.NilRepo {
			d = ds.NewNullDatastore()
		}
		r, err := defaultRepo(dsync.MutexWrap(d))
		if err != nil {
			return err
		}
		cfg.Repo = r
	}

	if cfg.Routing == nil {
		cfg.Routing = DHTOption
	}

	if cfg.Host == nil {
		cfg.Host = DefaultHostOption
	}

	return nil
Jeromy's avatar
Jeromy committed
87 88
}

89
func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
90 91 92 93 94 95
	c := cfg.Config{}
	priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
	if err != nil {
		return nil, err
	}

Jeromy's avatar
Jeromy committed
96
	pid, err := peer.IDFromPublicKey(pub)
97 98 99 100 101 102 103 104 105 106 107
	if err != nil {
		return nil, err
	}

	privkeyb, err := priv.Bytes()
	if err != nil {
		return nil, err
	}

	c.Bootstrap = cfg.DefaultBootstrapAddresses
	c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
Jeromy's avatar
Jeromy committed
108
	c.Identity.PeerID = pid.Pretty()
109 110
	c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)

Jeromy's avatar
Jeromy committed
111
	return &repo.Mock{
Jeromy's avatar
Jeromy committed
112
		D: dstore,
113 114
		C: c,
	}, nil
Jeromy's avatar
Jeromy committed
115 116
}

117 118 119 120
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
	if cfg == nil {
		cfg = new(BuildCfg)
	}
Jeromy's avatar
Jeromy committed
121

122 123 124 125
	err := cfg.fillDefaults()
	if err != nil {
		return nil, err
	}
Jakub Sztandera's avatar
Jakub Sztandera committed
126
	ctx = metrics.CtxScope(ctx, "ipfs")
Jeromy's avatar
Jeromy committed
127

128 129 130 131
	n := &IpfsNode{
		mode:      offlineMode,
		Repo:      cfg.Repo,
		ctx:       ctx,
Jeromy's avatar
Jeromy committed
132
		Peerstore: pstore.NewPeerstore(),
133 134 135 136
	}
	if cfg.Online {
		n.mode = onlineMode
	}
Jeromy's avatar
Jeromy committed
137

138 139
	// TODO: this is a weird circular-ish dependency, rework it
	n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
Jeromy's avatar
Jeromy committed
140

141 142 143 144
	if err := setupNode(ctx, n, cfg); err != nil {
		n.Close()
		return nil, err
	}
Jeromy's avatar
Jeromy committed
145

146
	return n, nil
Jeromy's avatar
Jeromy committed
147 148
}

149 150 151 152 153 154 155 156 157
func isTooManyFDError(err error) bool {
	perr, ok := err.(*os.PathError)
	if ok && perr.Err == syscall.EMFILE {
		return true
	}

	return false
}

158 159 160 161
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
	// setup local peer ID (private key is loaded in online setup)
	if err := n.loadID(); err != nil {
		return err
Jeromy's avatar
Jeromy committed
162
	}
163

164 165 166 167 168 169 170 171
	rds := &retry.Datastore{
		Batching:    n.Repo.Datastore(),
		Delay:       time.Millisecond * 200,
		Retries:     6,
		TempErrFunc: isTooManyFDError,
	}

	bs := bstore.NewBlockstore(rds)
172

173
	opts := bstore.DefaultCacheOpts()
174 175 176 177 178
	conf, err := n.Repo.Config()
	if err != nil {
		return err
	}

Jeromy's avatar
Jeromy committed
179 180 181
	// TEMP: setting global sharding switch here
	uio.UseHAMTSharding = conf.Experimental.ShardingEnabled

182
	opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize
183 184 185 186
	if !cfg.Permament {
		opts.HasBloomFilterSize = 0
	}

187
	cbs, err := bstore.CachedBlockstore(ctx, bs, opts)
188 189 190 191
	if err != nil {
		return err
	}

192 193 194 195 196 197 198 199
	n.BaseBlocks = cbs
	n.GCLocker = bstore.NewGCLocker()
	n.Blockstore = bstore.NewGCBlockstore(cbs, n.GCLocker)

	if conf.Experimental.FilestoreEnabled {
		n.Filestore = filestore.NewFilestore(bs, n.Repo.FileManager())
		n.Blockstore = bstore.NewGCBlockstore(n.Filestore, n.GCLocker)
	}
200

201 202 203 204 205 206
	rcfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	if rcfg.Datastore.HashOnRead {
207
		bs.HashOnRead(true)
208 209
	}

210
	if cfg.Online {
211
		do := setupDiscoveryOption(rcfg.Discovery)
212
		if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do, cfg.getOpt("pubsub"), cfg.getOpt("mplex")); err != nil {
213
			return err
214
		}
215 216
	} else {
		n.Exchange = offline.Exchange(n.Blockstore)
Jeromy's avatar
Jeromy committed
217
	}
218 219 220

	n.Blocks = bserv.New(n.Blockstore, n.Exchange)
	n.DAG = dag.NewDAGService(n.Blocks)
221 222 223

	internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore)))
	n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag)
224 225 226 227 228
	if err != nil {
		// TODO: we should move towards only running 'NewPinner' explicity on
		// node init instead of implicitly here as a result of the pinner keys
		// not being found in the datastore.
		// this is kinda sketchy and could cause data loss
229
		n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag)
230
	}
231
	n.Resolver = path.NewBasicResolver(n.DAG)
232

Jeromy's avatar
Jeromy committed
233 234 235 236 237
	err = n.loadFilesRoot()
	if err != nil {
		return err
	}

238
	return nil
Jeromy's avatar
Jeromy committed
239
}