builder.go 4.87 KB
Newer Older
Jeromy's avatar
Jeromy committed
1 2 3
package core

import (
4 5
	"crypto/rand"
	"encoding/base64"
Jeromy's avatar
Jeromy committed
6
	"errors"
7 8 9
	"os"
	"syscall"
	"time"
Jeromy's avatar
Jeromy committed
10

11
	bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
12
	key "github.com/ipfs/go-ipfs/blocks/key"
13 14 15 16 17
	bserv "github.com/ipfs/go-ipfs/blockservice"
	offline "github.com/ipfs/go-ipfs/exchange/offline"
	dag "github.com/ipfs/go-ipfs/merkledag"
	path "github.com/ipfs/go-ipfs/path"
	pin "github.com/ipfs/go-ipfs/pin"
18
	repo "github.com/ipfs/go-ipfs/repo"
19
	cfg "github.com/ipfs/go-ipfs/repo/config"
Jeromy's avatar
Jeromy committed
20

21
	pstore "gx/ipfs/QmQdnfvZQuhdT93LNc5bos52wAmdr3G2p6G8teLJMEN32P/go-libp2p-peerstore"
22
	goprocessctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context"
23 24
	ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore"
	dsync "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync"
25
	ci "gx/ipfs/QmUWER4r4qMvaCnX5zREcfyiWN7cXN9g3a7fkRqNz8qWPP/go-libp2p-crypto"
26
	retry "gx/ipfs/QmY6UVhgS2ZxhbM5qU23Fnz3daJwfyAuNErd3StmVofnAU/retry-datastore"
Jeromy's avatar
Jeromy committed
27
	context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
Jeromy's avatar
Jeromy committed
28 29
)

30 31 32
type BuildCfg struct {
	// If online is set, the node will have networking enabled
	Online bool
Jeromy's avatar
Jeromy committed
33

34 35 36 37
	// If permament then node should run more expensive processes
	// that will improve performance in long run
	Permament bool

38 39 40 41 42 43
	// If NilRepo is set, a repo backed by a nil datastore will be constructed
	NilRepo bool

	Routing RoutingOption
	Host    HostOption
	Repo    repo.Repo
Jeromy's avatar
Jeromy committed
44 45
}

46 47 48
func (cfg *BuildCfg) fillDefaults() error {
	if cfg.Repo != nil && cfg.NilRepo {
		return errors.New("cannot set a repo and specify nilrepo at the same time")
Jeromy's avatar
Jeromy committed
49
	}
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72

	if cfg.Repo == nil {
		var d ds.Datastore
		d = ds.NewMapDatastore()
		if cfg.NilRepo {
			d = ds.NewNullDatastore()
		}
		r, err := defaultRepo(dsync.MutexWrap(d))
		if err != nil {
			return err
		}
		cfg.Repo = r
	}

	if cfg.Routing == nil {
		cfg.Routing = DHTOption
	}

	if cfg.Host == nil {
		cfg.Host = DefaultHostOption
	}

	return nil
Jeromy's avatar
Jeromy committed
73 74
}

75
func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	c := cfg.Config{}
	priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
	if err != nil {
		return nil, err
	}

	data, err := pub.Hash()
	if err != nil {
		return nil, err
	}

	privkeyb, err := priv.Bytes()
	if err != nil {
		return nil, err
	}

	c.Bootstrap = cfg.DefaultBootstrapAddresses
	c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
	c.Identity.PeerID = key.Key(data).B58String()
	c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)

Jeromy's avatar
Jeromy committed
97
	return &repo.Mock{
Jeromy's avatar
Jeromy committed
98
		D: dstore,
99 100
		C: c,
	}, nil
Jeromy's avatar
Jeromy committed
101 102
}

103 104 105 106
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
	if cfg == nil {
		cfg = new(BuildCfg)
	}
Jeromy's avatar
Jeromy committed
107

108 109 110 111
	err := cfg.fillDefaults()
	if err != nil {
		return nil, err
	}
Jeromy's avatar
Jeromy committed
112

113 114 115 116
	n := &IpfsNode{
		mode:      offlineMode,
		Repo:      cfg.Repo,
		ctx:       ctx,
Jeromy's avatar
Jeromy committed
117
		Peerstore: pstore.NewPeerstore(),
118 119 120 121
	}
	if cfg.Online {
		n.mode = onlineMode
	}
Jeromy's avatar
Jeromy committed
122

123 124
	// TODO: this is a weird circular-ish dependency, rework it
	n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
Jeromy's avatar
Jeromy committed
125

126 127 128 129
	if err := setupNode(ctx, n, cfg); err != nil {
		n.Close()
		return nil, err
	}
Jeromy's avatar
Jeromy committed
130

131
	return n, nil
Jeromy's avatar
Jeromy committed
132 133
}

134 135 136 137 138 139 140 141 142
func isTooManyFDError(err error) bool {
	perr, ok := err.(*os.PathError)
	if ok && perr.Err == syscall.EMFILE {
		return true
	}

	return false
}

143 144 145 146
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
	// setup local peer ID (private key is loaded in online setup)
	if err := n.loadID(); err != nil {
		return err
Jeromy's avatar
Jeromy committed
147
	}
148

149 150 151 152 153 154 155
	rds := &retry.Datastore{
		Batching:    n.Repo.Datastore(),
		Delay:       time.Millisecond * 200,
		Retries:     6,
		TempErrFunc: isTooManyFDError,
	}

156
	var err error
157
	bs := bstore.NewBlockstore(rds)
158
	opts := bstore.DefaultCacheOpts()
159 160 161 162 163 164
	conf, err := n.Repo.Config()
	if err != nil {
		return err
	}

	opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize
165 166 167 168 169
	if !cfg.Permament {
		opts.HasBloomFilterSize = 0
	}

	n.Blockstore, err = bstore.CachedBlockstore(bs, ctx, opts)
170 171 172 173
	if err != nil {
		return err
	}

174 175 176 177 178 179 180 181 182
	rcfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	if rcfg.Datastore.HashOnRead {
		bs.RuntimeHashing(true)
	}

183
	if cfg.Online {
184
		do := setupDiscoveryOption(rcfg.Discovery)
185 186
		if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil {
			return err
187
		}
188 189
	} else {
		n.Exchange = offline.Exchange(n.Blockstore)
Jeromy's avatar
Jeromy committed
190
	}
191 192 193

	n.Blocks = bserv.New(n.Blockstore, n.Exchange)
	n.DAG = dag.NewDAGService(n.Blocks)
194 195 196

	internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore)))
	n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag)
197 198 199 200 201
	if err != nil {
		// TODO: we should move towards only running 'NewPinner' explicity on
		// node init instead of implicitly here as a result of the pinner keys
		// not being found in the datastore.
		// this is kinda sketchy and could cause data loss
202
		n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag)
203 204 205
	}
	n.Resolver = &path.Resolver{DAG: n.DAG}

Jeromy's avatar
Jeromy committed
206 207 208 209 210
	err = n.loadFilesRoot()
	if err != nil {
		return err
	}

211
	return nil
Jeromy's avatar
Jeromy committed
212
}