Commit 8d05fa47 authored by Lars Gierth's avatar Lars Gierth

cmd: remove dead ipfs_routingd and ipfs_bootstrapd

License: MIT
Signed-off-by: default avatarLars Gierth <larsg@systemli.org>
parent 7d434dae
package main
import (
"bufio"
"errors"
"flag"
"log"
"os"
"time"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
core "github.com/ipfs/go-ipfs/core"
corehttp "github.com/ipfs/go-ipfs/core/corehttp"
corerepo "github.com/ipfs/go-ipfs/core/corerepo"
coreunix "github.com/ipfs/go-ipfs/core/coreunix"
config "github.com/ipfs/go-ipfs/repo/config"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
)
var (
blocklistFilepath = flag.String("blocklist", "", "keys that should not be served by the gateway")
writable = flag.Bool("writable", false, "enable writing objects (with POST, PUT and DELETE)")
refreshBlockListInterval = flag.Duration("refresh-blocklist-interval", 30*time.Second, "refresh blocklist")
refreshAssetsInterval = flag.Duration("refresh-assets-interval", 30*time.Second, "refresh assets")
garbageCollectInterval = flag.Duration("gc-interval", 24*time.Hour, "frequency of repo garbage collection")
assetsPath = flag.String("assets-path", "", "if provided, periodically adds contents of path to IPFS")
host = flag.String("host", "/ip4/0.0.0.0/tcp/8080", "override the HTTP host listening address")
performGC = flag.Bool("gc", false, "perform garbage collection")
nBitsForKeypair = flag.Int("b", 1024, "number of bits for keypair (if repo is uninitialized)")
)
func main() {
flag.Parse()
if *assetsPath == "" {
log.Println("asset-path not provided. hosting gateway without file server functionality...")
}
if err := run(); err != nil {
log.Println(err)
}
}
func run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
repoPath, err := fsrepo.BestKnownPath()
if err != nil {
return err
}
if !fsrepo.IsInitialized(repoPath) {
conf, err := config.Init(os.Stdout, *nBitsForKeypair)
if err != nil {
return err
}
if err := fsrepo.Init(repoPath, conf); err != nil {
return err
}
}
repo, err := fsrepo.Open(repoPath)
if err != nil { // owned by node
return err
}
node, err := core.NewIPFSNode(ctx, core.Online(repo))
if err != nil {
return err
}
defer node.Close()
if *performGC {
if err := runGarbageCollectorWorker(ctx, node); err != nil {
return err
}
}
if *assetsPath != "" {
if err := runFileServerWorker(ctx, node); err != nil {
return err
}
}
blocklist := &corehttp.BlockList{}
gateway := corehttp.NewGateway(corehttp.GatewayConfig{
Writable: *writable,
BlockList: blocklist,
})
if err := runBlockListWorker(blocklist, *blocklistFilepath); err != nil {
return err
}
opts := []corehttp.ServeOption{
corehttp.VersionOption(),
corehttp.IPNSHostnameOption(),
gateway.ServeOption(),
}
return corehttp.ListenAndServe(node, *host, opts...)
}
func runGarbageCollectorWorker(ctx context.Context, node *core.IpfsNode) error {
go func() {
for _ = range time.Tick(*garbageCollectInterval) {
if err := corerepo.GarbageCollect(node, ctx); err != nil {
log.Println("failed to run garbage collection", err)
}
}
}()
return nil
}
func runFileServerWorker(ctx context.Context, node *core.IpfsNode) error {
fi, err := os.Stat(*assetsPath)
if err != nil {
return err
}
if !fi.IsDir() {
return errors.New("asset path must be a directory")
}
go func() {
for _ = range time.Tick(*refreshAssetsInterval) {
_, err := coreunix.AddR(node, *assetsPath)
if err != nil {
log.Println(err)
}
}
}()
return nil
}
func runBlockListWorker(blocklist *corehttp.BlockList, filepath string) error {
if filepath == "" {
return nil
}
go func() {
for _ = range time.Tick(*refreshBlockListInterval) {
log.Println("updating the blocklist...")
func() { // in a func to allow defer f.Close()
f, err := os.Open(filepath)
if err != nil {
log.Println(err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
blocked := make(map[string]struct{}) // Implement using Bloom Filter hybrid if blocklist gets large
for scanner.Scan() {
t := scanner.Text()
blocked[t] = struct{}{}
}
// If an error occurred, do not change the existing decider. This
// is to avoid accidentally clearing the list if the deploy is
// botched.
if err := scanner.Err(); err != nil {
log.Println(err)
} else {
blocklist.SetDecider(func(s string) bool {
_, ok := blocked[s]
return !ok
})
log.Printf("updated the blocklist (%d entries)", len(blocked))
}
}()
}
}()
return nil
}
package main
import (
"errors"
"flag"
"fmt"
"log"
"os"
"os/signal"
"time"
aws "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/crowdmob/goamz/aws"
s3 "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/crowdmob/goamz/s3"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/fzzy/radix/redis"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
core "github.com/ipfs/go-ipfs/core"
corerouting "github.com/ipfs/go-ipfs/core/corerouting"
config "github.com/ipfs/go-ipfs/repo/config"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
redisds "github.com/ipfs/go-ipfs/thirdparty/redis-datastore"
s3datastore "github.com/ipfs/go-ipfs/thirdparty/s3-datastore"
ds2 "github.com/ipfs/go-ipfs/util/datastore2"
)
var (
ttl = flag.Duration("ttl", 12*time.Hour, "period after which routing keys expire")
redisHost = flag.String("redis-host", "localhost:6379", "redis tcp host address:port")
redisPassword = flag.String("redis-pass", "", "redis password if required")
datastoreOption = flag.String("datastore", "redis", "routing datastore (also available: aws)")
s3bucket = flag.String("aws-bucket", "", "S3 bucket for aws routing datastore")
s3region = flag.String("aws-region", aws.USWest2.Name, "S3 region")
nBitsForKeypair = flag.Int("b", 1024, "number of bits for keypair (if repo is uninitialized)")
)
func main() {
flag.Parse()
if err := run(); err != nil {
log.Println(err)
}
}
func run() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
repoPath, err := fsrepo.BestKnownPath()
if err != nil {
return err
}
if !fsrepo.IsInitialized(repoPath) {
conf, err := config.Init(os.Stdout, *nBitsForKeypair)
if err != nil {
return err
}
if err := fsrepo.Init(repoPath, conf); err != nil {
return err
}
}
repo, err := fsrepo.Open(repoPath)
if err != nil { // owned by node
return err
}
var ds datastore.ThreadSafeDatastore
switch *datastoreOption {
case "redis":
redisClient, err := redis.Dial("tcp", *redisHost)
if err != nil {
return fmt.Errorf("could not connect to redis: %s", err)
}
if *redisPassword != "" {
if err := redisClient.Cmd("AUTH", *redisPassword).Err; err != nil {
return err
}
}
redisds, err := redisds.NewExpiringDatastore(redisClient, *ttl)
if err != nil {
return err
}
ds = redisds
case "aws":
s3raw, err := makeS3Datastore()
if err != nil {
return err
}
s3, err := enhanceDatastore(s3raw)
if err != nil {
return err
}
ds = s3
default:
return errors.New("unsupported datastore type")
}
node, err := core.NewIPFSNode(ctx,
core.OnlineWithOptions(
repo,
corerouting.SupernodeServer(ds),
core.DefaultHostOption),
)
if err != nil {
return err
}
defer node.Close()
interrupt := make(chan os.Signal)
signal.Notify(interrupt, os.Kill, os.Interrupt)
<-interrupt
return nil
}
func makeS3Datastore() (*s3datastore.S3Datastore, error) {
// FIXME get ENV through flags?
auth, err := aws.EnvAuth()
if err != nil {
return nil, err
}
s3c := s3.New(auth, aws.Regions[*s3region])
b := s3c.Bucket(*s3bucket)
exists, err := b.Exists("initialized") // TODO lazily instantiate
if err != nil {
return nil, err
}
if !exists {
if err := b.PutBucket(s3.PublicRead); err != nil {
switch e := err.(type) {
case *s3.Error:
log.Println(e.Code)
default:
return nil, err
}
}
// TODO create the initial value
}
return &s3datastore.S3Datastore{
Bucket: *s3bucket,
Client: s3c,
}, nil
}
func enhanceDatastore(d datastore.ThreadSafeDatastore) (datastore.ThreadSafeDatastore, error) {
// TODO cache
return ds2.CloserWrap(d), nil
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment