package core import ( "context" "crypto/rand" "encoding/base64" "errors" "os" "syscall" "time" filestore "github.com/ipfs/go-ipfs/filestore" pin "github.com/ipfs/go-ipfs/pin" repo "github.com/ipfs/go-ipfs/repo" cidv0v1 "github.com/ipfs/go-ipfs/thirdparty/cidv0v1" "github.com/ipfs/go-ipfs/thirdparty/verifbs" resolver "gx/ipfs/QmRG3XuGwT7GYuAqgWDJBKTzdaHMwAnc1x7J2KHEXNHxzG/go-path/resolver" bserv "gx/ipfs/QmVPeMNK9DfGLXDZzs2W4RoFWC9Zq1EnLGmLXtYtWrNdcW/go-blockservice" uio "gx/ipfs/QmXLCwhHh7bxRsBnCKNE9BAN87V44aSxXLquZYTtjr6fZ3/go-unixfs/io" dag "gx/ipfs/QmaDBne4KeY3UepeqSVKYpSmQGa3q9zP6x3LfVF2UjF3Hc/go-merkledag" ci "gx/ipfs/QmNiJiXwWE3kRhZrC5ej3kSjWHm337pYfhjLGSCDNKJP2s/go-libp2p-crypto" offline "gx/ipfs/QmPpnbwgAuvhUkA9jGooR88ZwZtTUHXXvoQNKdjZC6nYku/go-ipfs-exchange-offline" goprocessctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" bstore "gx/ipfs/QmSNLNnL3kq3A1NGdQA9AtgxM9CWKiiSEup3W435jCkRQS/go-ipfs-blockstore" record "gx/ipfs/QmSoeYGNm8v8jAF49hX7UwHwkXjoeobSrn9sya5NPPsxXP/go-libp2p-record" pstore "gx/ipfs/QmUymf8fJtideyv3z727BcZUifGBjMZMpCJqu3Gxk5aRUk/go-libp2p-peerstore" pstoremem "gx/ipfs/QmUymf8fJtideyv3z727BcZUifGBjMZMpCJqu3Gxk5aRUk/go-libp2p-peerstore/pstoremem" p2phost "gx/ipfs/QmVrjR2KMe57y4YyfHdYa3yKD278gN8W7CTiqSuYmxjA7F/go-libp2p-host" libp2p "gx/ipfs/QmXnpYYg2onGLXVxM4Q5PEFcx29k8zeJQkPeLAk9h9naxg/go-libp2p" ipns "gx/ipfs/QmZMJfrt7fU33oFQ9WvWnovhiiZ8T6qkWkFXNCFreJTzgT/go-ipns" ds "gx/ipfs/QmaRb5yNXKonhbkpNxNawoydk4N6es6b4fPj19sjEKsh5D/go-datastore" retry "gx/ipfs/QmaRb5yNXKonhbkpNxNawoydk4N6es6b4fPj19sjEKsh5D/go-datastore/retrystore" dsync "gx/ipfs/QmaRb5yNXKonhbkpNxNawoydk4N6es6b4fPj19sjEKsh5D/go-datastore/sync" cfg "gx/ipfs/QmbK4EmM2Xx5fmbqK38TGP3PpY66r3tkXLZTcc7dF9mFwM/go-ipfs-config" peer "gx/ipfs/QmcqU6QUDSXprb1518vYDGczrTJTyGwLG9eUa5iNX4xUtS/go-libp2p-peer" metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface" ) type BuildCfg struct { // If online is set, the node will have networking enabled Online bool // ExtraOpts is a map of extra options used to configure the ipfs nodes creation ExtraOpts map[string]bool // If permanent then node should run more expensive processes // that will improve performance in long run Permanent bool // DisableEncryptedConnections disables connection encryption *entirely*. // DO NOT SET THIS UNLESS YOU'RE TESTING. DisableEncryptedConnections bool // If NilRepo is set, a repo backed by a nil datastore will be constructed NilRepo bool Routing RoutingOption Host HostOption Repo repo.Repo } func (cfg *BuildCfg) getOpt(key string) bool { if cfg.ExtraOpts == nil { return false } return cfg.ExtraOpts[key] } func (cfg *BuildCfg) fillDefaults() error { if cfg.Repo != nil && cfg.NilRepo { return errors.New("cannot set a repo and specify nilrepo at the same time") } if cfg.Repo == nil { var d ds.Datastore d = ds.NewMapDatastore() if cfg.NilRepo { d = ds.NewNullDatastore() } r, err := defaultRepo(dsync.MutexWrap(d)) if err != nil { return err } cfg.Repo = r } if cfg.Routing == nil { cfg.Routing = DHTOption } if cfg.Host == nil { cfg.Host = DefaultHostOption } return nil } func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { c := cfg.Config{} priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader) if err != nil { return nil, err } pid, err := peer.IDFromPublicKey(pub) if err != nil { return nil, err } privkeyb, err := priv.Bytes() if err != nil { return nil, err } c.Bootstrap = cfg.DefaultBootstrapAddresses c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} c.Identity.PeerID = pid.Pretty() c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) return &repo.Mock{ D: dstore, C: c, }, nil } // NewNode constructs and returns an IpfsNode using the given cfg. func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) { if cfg == nil { cfg = new(BuildCfg) } err := cfg.fillDefaults() if err != nil { return nil, err } ctx = metrics.CtxScope(ctx, "ipfs") n := &IpfsNode{ mode: offlineMode, Repo: cfg.Repo, ctx: ctx, Peerstore: pstoremem.NewPeerstore(), } n.RecordValidator = record.NamespacedValidator{ "pk": record.PublicKeyValidator{}, "ipns": ipns.Validator{KeyBook: n.Peerstore}, } if cfg.Online { n.mode = onlineMode } // TODO: this is a weird circular-ish dependency, rework it n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown) if err := setupNode(ctx, n, cfg); err != nil { n.Close() return nil, err } return n, nil } func isTooManyFDError(err error) bool { perr, ok := err.(*os.PathError) if ok && perr.Err == syscall.EMFILE { return true } return false } func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { // setup local peer ID (private key is loaded in online setup) if err := n.loadID(); err != nil { return err } rds := &retry.Datastore{ Batching: n.Repo.Datastore(), Delay: time.Millisecond * 200, Retries: 6, TempErrFunc: isTooManyFDError, } // hash security bs := bstore.NewBlockstore(rds) bs = &verifbs.VerifBS{Blockstore: bs} opts := bstore.DefaultCacheOpts() conf, err := n.Repo.Config() if err != nil { return err } // TEMP: setting global sharding switch here uio.UseHAMTSharding = conf.Experimental.ShardingEnabled opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize if !cfg.Permanent { opts.HasBloomFilterSize = 0 } if !cfg.NilRepo { bs, err = bstore.CachedBlockstore(ctx, bs, opts) if err != nil { return err } } bs = bstore.NewIdStore(bs) bs = cidv0v1.NewBlockstore(bs) n.BaseBlocks = bs n.GCLocker = bstore.NewGCLocker() n.Blockstore = bstore.NewGCBlockstore(bs, n.GCLocker) if conf.Experimental.FilestoreEnabled || conf.Experimental.UrlstoreEnabled { // hash security n.Filestore = filestore.NewFilestore(bs, n.Repo.FileManager()) n.Blockstore = bstore.NewGCBlockstore(n.Filestore, n.GCLocker) n.Blockstore = &verifbs.VerifBSGC{GCBlockstore: n.Blockstore} } rcfg, err := n.Repo.Config() if err != nil { return err } if rcfg.Datastore.HashOnRead { bs.HashOnRead(true) } hostOption := cfg.Host if cfg.DisableEncryptedConnections { innerHostOption := hostOption hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) { return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...) } log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS. You will not be able to connect to any nodes configured to use encrypted connections`) } if cfg.Online { do := setupDiscoveryOption(rcfg.Discovery) if err := n.startOnlineServices(ctx, cfg.Routing, hostOption, do, cfg.getOpt("pubsub"), cfg.getOpt("ipnsps"), cfg.getOpt("mplex")); err != nil { return err } } else { n.Exchange = offline.Exchange(n.Blockstore) } n.Blocks = bserv.New(n.Blockstore, n.Exchange) n.DAG = dag.NewDAGService(n.Blocks) internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore))) n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag) if err != nil { // TODO: we should move towards only running 'NewPinner' explicitly on // node init instead of implicitly here as a result of the pinner keys // not being found in the datastore. // this is kinda sketchy and could cause data loss n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag) } n.Resolver = resolver.NewBasicResolver(n.DAG) if cfg.Online { if err := n.startLateOnlineServices(ctx); err != nil { return err } } return n.loadFilesRoot() }