Commit e3c70dfd authored by Michael Avila's avatar Michael Avila

Introduce first strategic provider: do nothing

License: MIT
Signed-off-by: default avatarMichael Avila <davidmichaelavila@gmail.com>
parent dbfc1c39
package provider package provider
import "github.com/ipfs/go-cid" import (
"context"
"github.com/ipfs/go-cid"
)
type offlineProvider struct{} type offlineProvider struct{}
// NewOfflineProvider creates a Provider that does nothing // NewOfflineProvider creates a ProviderSystem that does nothing
func NewOfflineProvider() Provider { func NewOfflineProvider() System {
return &offlineProvider{} return &offlineProvider{}
} }
func (op *offlineProvider) Run() {} func (op *offlineProvider) Run() {
}
func (op *offlineProvider) Provide(cid cid.Cid) error { func (op *offlineProvider) Close() error {
return nil return nil
} }
func (op *offlineProvider) Close() error { func (op *offlineProvider) Provide(_ cid.Cid) error {
return nil
}
func (op *offlineProvider) Reprovide(_ context.Context) error {
return nil return nil
} }
// Package provider implements structures and methods to provide blocks,
// keep track of which blocks are provided, and to allow those blocks to
// be reprovided.
package provider package provider
import ( import (
"context" "context"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log" logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p-routing"
) )
var log = logging.Logger("provider") var (
// StrategicProvidingEnabled toggles between the original providing mechanism
// and the new strategic providing system
StrategicProvidingEnabled = false
const provideOutgoingWorkerLimit = 8 log = logging.Logger("provider")
)
// Provider announces blocks to the network // Provider announces blocks to the network
type Provider interface { type Provider interface {
...@@ -24,56 +24,10 @@ type Provider interface { ...@@ -24,56 +24,10 @@ type Provider interface {
Close() error Close() error
} }
type provider struct { // Reprovider reannounces blocks to the network
ctx context.Context type Reprovider interface {
// the CIDs for which provide announcements should be made // Run is used to begin processing the reprovider work and waiting for reprovide triggers
queue *Queue Run()
// used to announce providing to the network // Trigger a reprovide
contentRouting routing.ContentRouting Trigger(context.Context) error
}
// NewProvider creates a provider that announces blocks to the network using a content router
func NewProvider(ctx context.Context, queue *Queue, contentRouting routing.ContentRouting) Provider {
return &provider{
ctx: ctx,
queue: queue,
contentRouting: contentRouting,
}
}
// Close stops the provider
func (p *provider) Close() error {
p.queue.Close()
return nil
}
// Start workers to handle provide requests.
func (p *provider) Run() {
p.handleAnnouncements()
}
// Provide the given cid using specified strategy.
func (p *provider) Provide(root cid.Cid) error {
p.queue.Enqueue(root)
return nil
}
// Handle all outgoing cids by providing (announcing) them
func (p *provider) handleAnnouncements() {
for workers := 0; workers < provideOutgoingWorkerLimit; workers++ {
go func() {
for p.ctx.Err() == nil {
select {
case <-p.ctx.Done():
return
case c := <-p.queue.Dequeue():
log.Info("announce - start - ", c)
if err := p.contentRouting.Provide(p.ctx, c, true); err != nil {
log.Warningf("Unable to provide entry: %s, %s", c, err)
}
log.Info("announce - end - ", c)
}
}
}()
}
} }
package provider package queue
import ( import (
"context" "context"
...@@ -10,8 +10,11 @@ import ( ...@@ -10,8 +10,11 @@ import (
datastore "github.com/ipfs/go-datastore" datastore "github.com/ipfs/go-datastore"
namespace "github.com/ipfs/go-datastore/namespace" namespace "github.com/ipfs/go-datastore/namespace"
query "github.com/ipfs/go-datastore/query" query "github.com/ipfs/go-datastore/query"
logging "github.com/ipfs/go-log"
) )
var log = logging.Logger("provider.queue")
// Queue provides a durable, FIFO interface to the datastore for storing cids // Queue provides a durable, FIFO interface to the datastore for storing cids
// //
// Durability just means that cids in the process of being provided when a // Durability just means that cids in the process of being provided when a
......
package provider package queue
import ( import (
"context" "context"
...@@ -8,8 +8,11 @@ import ( ...@@ -8,8 +8,11 @@ import (
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
datastore "github.com/ipfs/go-datastore" datastore "github.com/ipfs/go-datastore"
sync "github.com/ipfs/go-datastore/sync" sync "github.com/ipfs/go-datastore/sync"
"github.com/ipfs/go-ipfs-blocksutil"
) )
var blockGenerator = blocksutil.NewBlockGenerator()
func makeCids(n int) []cid.Cid { func makeCids(n int) []cid.Cid {
cids := make([]cid.Cid, 0, n) cids := make([]cid.Cid, 0, n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
......
// Package simple implements structures and methods to provide blocks,
// keep track of which blocks are provided, and to allow those blocks to
// be reprovided.
package simple
import (
"context"
cid "github.com/ipfs/go-cid"
q "github.com/ipfs/go-ipfs/provider/queue"
logging "github.com/ipfs/go-log"
routing "github.com/libp2p/go-libp2p-routing"
)
var logP = logging.Logger("provider.simple")
const provideOutgoingWorkerLimit = 8
// Provider announces blocks to the network
type Provider struct {
ctx context.Context
// the CIDs for which provide announcements should be made
queue *q.Queue
// used to announce providing to the network
contentRouting routing.ContentRouting
}
// NewProvider creates a provider that announces blocks to the network using a content router
func NewProvider(ctx context.Context, queue *q.Queue, contentRouting routing.ContentRouting) *Provider {
return &Provider{
ctx: ctx,
queue: queue,
contentRouting: contentRouting,
}
}
// Close stops the provider
func (p *Provider) Close() error {
p.queue.Close()
return nil
}
// Run workers to handle provide requests.
func (p *Provider) Run() {
p.handleAnnouncements()
}
// Provide the given cid using specified strategy.
func (p *Provider) Provide(root cid.Cid) error {
p.queue.Enqueue(root)
return nil
}
// Handle all outgoing cids by providing (announcing) them
func (p *Provider) handleAnnouncements() {
for workers := 0; workers < provideOutgoingWorkerLimit; workers++ {
go func() {
for p.ctx.Err() == nil {
select {
case <-p.ctx.Done():
return
case c := <-p.queue.Dequeue():
logP.Info("announce - start - ", c)
if err := p.contentRouting.Provide(p.ctx, c, true); err != nil {
logP.Warningf("Unable to provide entry: %s, %s", c, err)
}
logP.Info("announce - end - ", c)
}
}
}()
}
}
package provider package simple_test
import ( import (
"context" "context"
...@@ -11,6 +11,10 @@ import ( ...@@ -11,6 +11,10 @@ import (
sync "github.com/ipfs/go-datastore/sync" sync "github.com/ipfs/go-datastore/sync"
blocksutil "github.com/ipfs/go-ipfs-blocksutil" blocksutil "github.com/ipfs/go-ipfs-blocksutil"
pstore "github.com/libp2p/go-libp2p-peerstore" pstore "github.com/libp2p/go-libp2p-peerstore"
q "github.com/ipfs/go-ipfs/provider/queue"
. "github.com/ipfs/go-ipfs/provider/simple"
) )
var blockGenerator = blocksutil.NewBlockGenerator() var blockGenerator = blocksutil.NewBlockGenerator()
...@@ -39,15 +43,15 @@ func TestAnnouncement(t *testing.T) { ...@@ -39,15 +43,15 @@ func TestAnnouncement(t *testing.T) {
defer ctx.Done() defer ctx.Done()
ds := sync.MutexWrap(datastore.NewMapDatastore()) ds := sync.MutexWrap(datastore.NewMapDatastore())
queue, err := NewQueue(ctx, "test", ds) queue, err := q.NewQueue(ctx, "test", ds)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
r := mockContentRouting() r := mockContentRouting()
provider := NewProvider(ctx, queue, r) prov := NewProvider(ctx, queue, r)
provider.Run() prov.Run()
cids := cid.NewSet() cids := cid.NewSet()
...@@ -58,7 +62,7 @@ func TestAnnouncement(t *testing.T) { ...@@ -58,7 +62,7 @@ func TestAnnouncement(t *testing.T) {
go func() { go func() {
for _, c := range cids.Keys() { for _, c := range cids.Keys() {
err = provider.Provide(c) err = prov.Provide(c)
// A little goroutine stirring to exercise some different states // A little goroutine stirring to exercise some different states
r := rand.Intn(10) r := rand.Intn(10)
time.Sleep(time.Microsecond * time.Duration(r)) time.Sleep(time.Microsecond * time.Duration(r))
......
package simple
import (
"context"
"fmt"
"time"
backoff "github.com/cenkalti/backoff"
cid "github.com/ipfs/go-cid"
cidutil "github.com/ipfs/go-cidutil"
blocks "github.com/ipfs/go-ipfs-blockstore"
pin "github.com/ipfs/go-ipfs/pin"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
merkledag "github.com/ipfs/go-merkledag"
verifcid "github.com/ipfs/go-verifcid"
routing "github.com/libp2p/go-libp2p-routing"
)
var logR = logging.Logger("reprovider.simple")
//KeyChanFunc is function streaming CIDs to pass to content routing
type KeyChanFunc func(context.Context) (<-chan cid.Cid, error)
type doneFunc func(error)
// Reprovider reannounces blocks to the network
type Reprovider struct {
ctx context.Context
trigger chan doneFunc
// The routing system to provide values through
rsys routing.ContentRouting
keyProvider KeyChanFunc
tick time.Duration
}
// NewReprovider creates new Reprovider instance.
func NewReprovider(ctx context.Context, reprovideIniterval time.Duration, rsys routing.ContentRouting, keyProvider KeyChanFunc) *Reprovider {
return &Reprovider{
ctx: ctx,
trigger: make(chan doneFunc),
rsys: rsys,
keyProvider: keyProvider,
tick: reprovideIniterval,
}
}
// Close the reprovider
func (rp *Reprovider) Close() error {
return nil
}
// Run re-provides keys with 'tick' interval or when triggered
func (rp *Reprovider) Run() {
// dont reprovide immediately.
// may have just started the daemon and shutting it down immediately.
// probability( up another minute | uptime ) increases with uptime.
after := time.After(time.Minute)
var done doneFunc
for {
if rp.tick == 0 {
after = make(chan time.Time)
}
select {
case <-rp.ctx.Done():
return
case done = <-rp.trigger:
case <-after:
}
//'mute' the trigger channel so when `ipfs bitswap reprovide` is called
//a 'reprovider is already running' error is returned
unmute := rp.muteTrigger()
err := rp.Reprovide()
if err != nil {
logR.Debug(err)
}
if done != nil {
done(err)
}
unmute()
after = time.After(rp.tick)
}
}
// Reprovide registers all keys given by rp.keyProvider to libp2p content routing
func (rp *Reprovider) Reprovide() error {
keychan, err := rp.keyProvider(rp.ctx)
if err != nil {
return fmt.Errorf("failed to get key chan: %s", err)
}
for c := range keychan {
// hash security
if err := verifcid.ValidateCid(c); err != nil {
logR.Errorf("insecure hash in reprovider, %s (%s)", c, err)
continue
}
op := func() error {
err := rp.rsys.Provide(rp.ctx, c, true)
if err != nil {
logR.Debugf("Failed to provide key: %s", err)
}
return err
}
// TODO: this backoff library does not respect our context, we should
// eventually work contexts into it. low priority.
err := backoff.Retry(op, backoff.NewExponentialBackOff())
if err != nil {
logR.Debugf("Providing failed after number of retries: %s", err)
return err
}
}
return nil
}
// Trigger starts reprovision process in rp.Run and waits for it
func (rp *Reprovider) Trigger(ctx context.Context) error {
progressCtx, done := context.WithCancel(ctx)
var err error
df := func(e error) {
err = e
done()
}
select {
case <-rp.ctx.Done():
return context.Canceled
case <-ctx.Done():
return context.Canceled
case rp.trigger <- df:
<-progressCtx.Done()
return err
}
}
func (rp *Reprovider) muteTrigger() context.CancelFunc {
ctx, cf := context.WithCancel(rp.ctx)
go func() {
defer cf()
for {
select {
case <-ctx.Done():
return
case done := <-rp.trigger:
done(fmt.Errorf("reprovider is already running"))
}
}
}()
return cf
}
// Strategies
// NewBlockstoreProvider returns key provider using bstore.AllKeysChan
func NewBlockstoreProvider(bstore blocks.Blockstore) KeyChanFunc {
return func(ctx context.Context) (<-chan cid.Cid, error) {
return bstore.AllKeysChan(ctx)
}
}
// NewPinnedProvider returns provider supplying pinned keys
func NewPinnedProvider(onlyRoots bool) func(pin.Pinner, ipld.DAGService) KeyChanFunc {
return func(pinning pin.Pinner, dag ipld.DAGService) KeyChanFunc {
return func(ctx context.Context) (<-chan cid.Cid, error) {
set, err := pinSet(ctx, pinning, dag, onlyRoots)
if err != nil {
return nil, err
}
outCh := make(chan cid.Cid)
go func() {
defer close(outCh)
for c := range set.New {
select {
case <-ctx.Done():
return
case outCh <- c:
}
}
}()
return outCh, nil
}
}
}
func pinSet(ctx context.Context, pinning pin.Pinner, dag ipld.DAGService, onlyRoots bool) (*cidutil.StreamingSet, error) {
set := cidutil.NewStreamingSet()
go func() {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
defer close(set.New)
for _, key := range pinning.DirectKeys() {
set.Visitor(ctx)(key)
}
for _, key := range pinning.RecursiveKeys() {
set.Visitor(ctx)(key)
if !onlyRoots {
err := merkledag.EnumerateChildren(ctx, merkledag.GetLinksWithDAG(dag), key, set.Visitor(ctx))
if err != nil {
logR.Errorf("reprovide indirect pins: %s", err)
return
}
}
}
}()
return set, nil
}
package simple_test
import (
"context"
"testing"
"time"
blocks "github.com/ipfs/go-block-format"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
"github.com/ipfs/go-ipfs-blockstore"
mock "github.com/ipfs/go-ipfs-routing/mock"
pstore "github.com/libp2p/go-libp2p-peerstore"
"github.com/libp2p/go-testutil"
. "github.com/ipfs/go-ipfs/provider/simple"
)
func TestReprovide(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mrserv := mock.NewServer()
idA := testutil.RandIdentityOrFatal(t)
idB := testutil.RandIdentityOrFatal(t)
clA := mrserv.Client(idA)
clB := mrserv.Client(idB)
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
blk := blocks.NewBlock([]byte("this is a test"))
err := bstore.Put(blk)
if err != nil {
t.Fatal(err)
}
keyProvider := NewBlockstoreProvider(bstore)
reprov := NewReprovider(ctx, time.Hour, clA, keyProvider)
err = reprov.Reprovide()
if err != nil {
t.Fatal(err)
}
var providers []pstore.PeerInfo
maxProvs := 100
provChan := clB.FindProvidersAsync(ctx, blk.Cid(), maxProvs)
for p := range provChan {
providers = append(providers, p)
}
if len(providers) == 0 {
t.Fatal("Should have gotten a provider")
}
if providers[0].ID != idA.ID() {
t.Fatal("Somehow got the wrong peer back as a provider.")
}
}
package provider
import (
"context"
"github.com/ipfs/go-cid"
)
// System defines the interface for interacting with the value
// provider system
type System interface {
Run()
Close() error
Provide(cid.Cid) error
Reprovide(context.Context) error
}
type system struct {
provider Provider
reprovider Reprovider
}
// NewSystem constructs a new provider system from a provider and reprovider
func NewSystem(provider Provider, reprovider Reprovider) System {
return &system{provider, reprovider}
}
// Run the provider system by running the provider and reprovider
func (s *system) Run() {
go s.provider.Run()
go s.reprovider.Run()
}
// Close the provider and reprovider
func (s *system) Close() error {
// TODO: Close reprovider here
return s.provider.Close()
}
// Provide a value
func (s *system) Provide(cid cid.Cid) error {
return s.provider.Provide(cid)
}
// Reprovide all the previously provided values
func (s *system) Reprovide(ctx context.Context) error {
return s.reprovider.Trigger(ctx)
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment