Commit e2ca9248 authored by tavit ohanian's avatar tavit ohanian

Merge remote-tracking branch 'upstream/master' into reference

parents 7a694137 6f1e7bb7
Pipeline #63 failed with stages
in 0 seconds
version: 2.1
orbs:
ci-go: ipfs/ci-go@0.1
workflows:
version: 2
test:
jobs:
- ci-go/build
- ci-go/lint
- ci-go/test
blank_issues_enabled: false
contact_links:
- name: Getting Help on IPFS
url: https://ipfs.io/help
about: All information about how and where to get help on IPFS.
- name: IPFS Official Forum
url: https://discuss.ipfs.io
about: Please post general questions, support requests, and discussions here.
---
name: Open an issue
about: Only for actionable issues relevant to this repository.
title: ''
labels: need/triage
assignees: ''
---
<!--
Hello! To ensure this issue is correctly addressed as soon as possible by the IPFS team, please try to make sure:
- This issue is relevant to this repository's topic or codebase.
- A clear description is provided. It should includes as much relevant information as possible and clear scope for the issue to be actionable.
FOR GENERAL DISCUSSION, HELP OR QUESTIONS, please see the options at https://ipfs.io/help or head directly to https://discuss.ipfs.io.
(you can delete this section after reading)
-->
# Configuration for welcome - https://github.com/behaviorbot/welcome
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
# Comment to be posted to on first time issues
newIssueWelcomeComment: >
Thank you for submitting your first issue to this repository! A maintainer
will be here shortly to triage and review.
In the meantime, please double-check that you have provided all the
necessary information to make this process easy! Any information that can
help save additional round trips is useful! We currently aim to give
initial feedback within **two business days**. If this does not happen, feel
free to leave a comment.
Please keep an eye on how this issue will be labeled, as labels give an
overview of priorities, assignments and additional actions requested by the
maintainers:
- "Priority" labels will show how urgent this is for the team.
- "Status" labels will show if this is ready to be worked on, blocked, or in progress.
- "Need" labels will indicate if additional input or analysis is required.
Finally, remember to use https://discuss.ipfs.io if you just need general
support.
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
# Comment to be posted to on PRs from first time contributors in your repository
newPRWelcomeComment: >
Thank you for submitting this PR!
A maintainer will be here shortly to review it.
We are super grateful, but we are also overloaded! Help us by making sure
that:
* The context for this PR is clear, with relevant discussion, decisions
and stakeholders linked/mentioned.
* Your contribution itself is clear (code comments, self-review for the
rest) and in its best form. Follow the [code contribution
guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md#code-contribution-guidelines)
if they apply.
Getting other community members to do a review would be great help too on
complex PRs (you can ask in the chats/forums). If you are unsure about
something, just leave us a comment.
Next steps:
* A maintainer will triage and assign priority to this PR, commenting on
any missing things and potentially assigning a reviewer for high
priority items.
* The PR gets reviews, discussed and approvals as needed.
* The PR is merged by maintainers when it has been approved and comments addressed.
We currently aim to provide initial feedback/triaging within **two business
days**. Please keep an eye on any labelling actions, as these will indicate
priorities and status of your contribution.
We are very grateful for your contribution!
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
# Comment to be posted to on pull requests merged by a first time user
# Currently disabled
#firstPRMergeComment: ""
1.1.31: QmTRbLgKn2BKNvr9z5JQ3uZC4FSyLkad9t7qTVPkbH1LRB
The MIT License (MIT)
Copyright (c) 2014-2018 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
# go-blockservice
go-blockservice
==================
dms3 block service
\ No newline at end of file
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io)
[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/)
[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs)
[![Coverage Status](https://codecov.io/gh/ipfs/go-block-format/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-block-format/branch/master)
[![Build Status](https://circleci.com/gh/ipfs/go-blockservice.svg?style=svg)](https://circleci.com/gh/ipfs/go-blockservice)
> go-blockservice provides a seamless interface to both local and remote storage backends.
## Lead Maintainer
[Steven Allen](https://github.com/Stebalien)
## Table of Contents
- [TODO](#todo)
- [Contribute](#contribute)
- [License](#license)
## TODO
The interfaces here really would like to be merged with the blockstore interfaces.
The 'dagservice' constructor currently takes a blockservice, but it would be really nice
if it could just take a blockstore, and have this package implement a blockstore.
## Contribute
PRs are welcome!
Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
## License
MIT © Juan Batiz-Benet
// package blockservice implements a BlockService interface that provides
// a single GetBlock/AddBlock interface that seamlessly retrieves data either
// locally or from a remote peer through the exchange.
package blockservice
import (
"context"
"errors"
"io"
"sync"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
exchange "github.com/ipfs/go-ipfs-exchange-interface"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-verifcid"
)
var log = logging.Logger("blockservice")
var ErrNotFound = errors.New("blockservice: key not found")
// BlockGetter is the common interface shared between blockservice sessions and
// the blockservice.
type BlockGetter interface {
// GetBlock gets the requested block.
GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error)
// GetBlocks does a batch request for the given cids, returning blocks as
// they are found, in no particular order.
//
// It may not be able to find all requested blocks (or the context may
// be canceled). In that case, it will close the channel early. It is up
// to the consumer to detect this situation and keep track which blocks
// it has received and which it hasn't.
GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block
}
// BlockService is a hybrid block datastore. It stores data in a local
// datastore and may retrieve data from a remote Exchange.
// It uses an internal `datastore.Datastore` instance to store values.
type BlockService interface {
io.Closer
BlockGetter
// Blockstore returns a reference to the underlying blockstore
Blockstore() blockstore.Blockstore
// Exchange returns a reference to the underlying exchange (usually bitswap)
Exchange() exchange.Interface
// AddBlock puts a given block to the underlying datastore
AddBlock(o blocks.Block) error
// AddBlocks adds a slice of blocks at the same time using batching
// capabilities of the underlying datastore whenever possible.
AddBlocks(bs []blocks.Block) error
// DeleteBlock deletes the given block from the blockservice.
DeleteBlock(o cid.Cid) error
}
type blockService struct {
blockstore blockstore.Blockstore
exchange exchange.Interface
// If checkFirst is true then first check that a block doesn't
// already exist to avoid republishing the block on the exchange.
checkFirst bool
}
// NewBlockService creates a BlockService with given datastore instance.
func New(bs blockstore.Blockstore, rem exchange.Interface) BlockService {
if rem == nil {
log.Debug("blockservice running in local (offline) mode.")
}
return &blockService{
blockstore: bs,
exchange: rem,
checkFirst: true,
}
}
// NewWriteThrough ceates a BlockService that guarantees writes will go
// through to the blockstore and are not skipped by cache checks.
func NewWriteThrough(bs blockstore.Blockstore, rem exchange.Interface) BlockService {
if rem == nil {
log.Debug("blockservice running in local (offline) mode.")
}
return &blockService{
blockstore: bs,
exchange: rem,
checkFirst: false,
}
}
// Blockstore returns the blockstore behind this blockservice.
func (s *blockService) Blockstore() blockstore.Blockstore {
return s.blockstore
}
// Exchange returns the exchange behind this blockservice.
func (s *blockService) Exchange() exchange.Interface {
return s.exchange
}
// NewSession creates a new session that allows for
// controlled exchange of wantlists to decrease the bandwidth overhead.
// If the current exchange is a SessionExchange, a new exchange
// session will be created. Otherwise, the current exchange will be used
// directly.
func NewSession(ctx context.Context, bs BlockService) *Session {
exch := bs.Exchange()
if sessEx, ok := exch.(exchange.SessionExchange); ok {
return &Session{
sessCtx: ctx,
ses: nil,
sessEx: sessEx,
bs: bs.Blockstore(),
}
}
return &Session{
ses: exch,
sessCtx: ctx,
bs: bs.Blockstore(),
}
}
// AddBlock adds a particular block to the service, Putting it into the datastore.
// TODO pass a context into this if the remote.HasBlock is going to remain here.
func (s *blockService) AddBlock(o blocks.Block) error {
c := o.Cid()
// hash security
err := verifcid.ValidateCid(c)
if err != nil {
return err
}
if s.checkFirst {
if has, err := s.blockstore.Has(c); has || err != nil {
return err
}
}
if err := s.blockstore.Put(o); err != nil {
return err
}
log.Event(context.TODO(), "BlockService.BlockAdded", c)
if s.exchange != nil {
if err := s.exchange.HasBlock(o); err != nil {
log.Errorf("HasBlock: %s", err.Error())
}
}
return nil
}
func (s *blockService) AddBlocks(bs []blocks.Block) error {
// hash security
for _, b := range bs {
err := verifcid.ValidateCid(b.Cid())
if err != nil {
return err
}
}
var toput []blocks.Block
if s.checkFirst {
toput = make([]blocks.Block, 0, len(bs))
for _, b := range bs {
has, err := s.blockstore.Has(b.Cid())
if err != nil {
return err
}
if !has {
toput = append(toput, b)
}
}
} else {
toput = bs
}
if len(toput) == 0 {
return nil
}
err := s.blockstore.PutMany(toput)
if err != nil {
return err
}
if s.exchange != nil {
for _, o := range toput {
log.Event(context.TODO(), "BlockService.BlockAdded", o.Cid())
if err := s.exchange.HasBlock(o); err != nil {
log.Errorf("HasBlock: %s", err.Error())
}
}
}
return nil
}
// GetBlock retrieves a particular block from the service,
// Getting it from the datastore using the key (hash).
func (s *blockService) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) {
log.Debugf("BlockService GetBlock: '%s'", c)
var f func() exchange.Fetcher
if s.exchange != nil {
f = s.getExchange
}
return getBlock(ctx, c, s.blockstore, f) // hash security
}
func (s *blockService) getExchange() exchange.Fetcher {
return s.exchange
}
func getBlock(ctx context.Context, c cid.Cid, bs blockstore.Blockstore, fget func() exchange.Fetcher) (blocks.Block, error) {
err := verifcid.ValidateCid(c) // hash security
if err != nil {
return nil, err
}
block, err := bs.Get(c)
if err == nil {
return block, nil
}
if err == blockstore.ErrNotFound && fget != nil {
f := fget() // Don't load the exchange until we have to
// TODO be careful checking ErrNotFound. If the underlying
// implementation changes, this will break.
log.Debug("Blockservice: Searching bitswap")
blk, err := f.GetBlock(ctx, c)
if err != nil {
if err == blockstore.ErrNotFound {
return nil, ErrNotFound
}
return nil, err
}
log.Event(ctx, "BlockService.BlockFetched", c)
return blk, nil
}
log.Debug("Blockservice GetBlock: Not found")
if err == blockstore.ErrNotFound {
return nil, ErrNotFound
}
return nil, err
}
// GetBlocks gets a list of blocks asynchronously and returns through
// the returned channel.
// NB: No guarantees are made about order.
func (s *blockService) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block {
var f func() exchange.Fetcher
if s.exchange != nil {
f = s.getExchange
}
return getBlocks(ctx, ks, s.blockstore, f) // hash security
}
func getBlocks(ctx context.Context, ks []cid.Cid, bs blockstore.Blockstore, fget func() exchange.Fetcher) <-chan blocks.Block {
out := make(chan blocks.Block)
go func() {
defer close(out)
allValid := true
for _, c := range ks {
if err := verifcid.ValidateCid(c); err != nil {
allValid = false
break
}
}
if !allValid {
ks2 := make([]cid.Cid, 0, len(ks))
for _, c := range ks {
// hash security
if err := verifcid.ValidateCid(c); err == nil {
ks2 = append(ks2, c)
} else {
log.Errorf("unsafe CID (%s) passed to blockService.GetBlocks: %s", c, err)
}
}
ks = ks2
}
var misses []cid.Cid
for _, c := range ks {
hit, err := bs.Get(c)
if err != nil {
misses = append(misses, c)
continue
}
select {
case out <- hit:
case <-ctx.Done():
return
}
}
if len(misses) == 0 || fget == nil {
return
}
f := fget() // don't load exchange unless we have to
rblocks, err := f.GetBlocks(ctx, misses)
if err != nil {
log.Debugf("Error with GetBlocks: %s", err)
return
}
for b := range rblocks {
log.Event(ctx, "BlockService.BlockFetched", b.Cid())
select {
case out <- b:
case <-ctx.Done():
return
}
}
}()
return out
}
// DeleteBlock deletes a block in the blockservice from the datastore
func (s *blockService) DeleteBlock(c cid.Cid) error {
err := s.blockstore.DeleteBlock(c)
if err == nil {
log.Event(context.TODO(), "BlockService.BlockDeleted", c)
}
return err
}
func (s *blockService) Close() error {
log.Debug("blockservice is shutting down...")
return s.exchange.Close()
}
// Session is a helper type to provide higher level access to bitswap sessions
type Session struct {
bs blockstore.Blockstore
ses exchange.Fetcher
sessEx exchange.SessionExchange
sessCtx context.Context
lk sync.Mutex
}
func (s *Session) getSession() exchange.Fetcher {
s.lk.Lock()
defer s.lk.Unlock()
if s.ses == nil {
s.ses = s.sessEx.NewSession(s.sessCtx)
}
return s.ses
}
// GetBlock gets a block in the context of a request session
func (s *Session) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) {
var f func() exchange.Fetcher
if s.sessEx != nil {
f = s.getSession
}
return getBlock(ctx, c, s.bs, f) // hash security
}
// GetBlocks gets blocks in the context of a request session
func (s *Session) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block {
var f func() exchange.Fetcher
if s.sessEx != nil {
f = s.getSession
}
return getBlocks(ctx, ks, s.bs, f) // hash security
}
var _ BlockGetter = (*Session)(nil)
package blockservice
import (
"context"
"testing"
blocks "github.com/ipfs/go-block-format"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
blockstore "github.com/ipfs/go-ipfs-blockstore"
butil "github.com/ipfs/go-ipfs-blocksutil"
exchange "github.com/ipfs/go-ipfs-exchange-interface"
offline "github.com/ipfs/go-ipfs-exchange-offline"
)
func TestWriteThroughWorks(t *testing.T) {
bstore := &PutCountingBlockstore{
blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())),
0,
}
bstore2 := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
exch := offline.Exchange(bstore2)
bserv := NewWriteThrough(bstore, exch)
bgen := butil.NewBlockGenerator()
block := bgen.Next()
t.Logf("PutCounter: %d", bstore.PutCounter)
err := bserv.AddBlock(block)
if err != nil {
t.Fatal(err)
}
if bstore.PutCounter != 1 {
t.Fatalf("expected just one Put call, have: %d", bstore.PutCounter)
}
err = bserv.AddBlock(block)
if err != nil {
t.Fatal(err)
}
if bstore.PutCounter != 2 {
t.Fatalf("Put should have called again, should be 2 is: %d", bstore.PutCounter)
}
}
func TestLazySessionInitialization(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
bstore2 := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
bstore3 := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
session := offline.Exchange(bstore2)
exchange := offline.Exchange(bstore3)
sessionExch := &fakeSessionExchange{Interface: exchange, session: session}
bservSessEx := NewWriteThrough(bstore, sessionExch)
bgen := butil.NewBlockGenerator()
block := bgen.Next()
err := bstore.Put(block)
if err != nil {
t.Fatal(err)
}
block2 := bgen.Next()
err = session.HasBlock(block2)
if err != nil {
t.Fatal(err)
}
bsession := NewSession(ctx, bservSessEx)
if bsession.ses != nil {
t.Fatal("Session exchange should not instantiated session immediately")
}
returnedBlock, err := bsession.GetBlock(ctx, block.Cid())
if err != nil {
t.Fatal("Should have fetched block locally")
}
if returnedBlock.Cid() != block.Cid() {
t.Fatal("Got incorrect block")
}
if bsession.ses != nil {
t.Fatal("Session exchange should not instantiated session if local store had block")
}
returnedBlock, err = bsession.GetBlock(ctx, block2.Cid())
if err != nil {
t.Fatal("Should have fetched block remotely")
}
if returnedBlock.Cid() != block2.Cid() {
t.Fatal("Got incorrect block")
}
if bsession.ses != session {
t.Fatal("Should have initialized session to fetch block")
}
}
var _ blockstore.Blockstore = (*PutCountingBlockstore)(nil)
type PutCountingBlockstore struct {
blockstore.Blockstore
PutCounter int
}
func (bs *PutCountingBlockstore) Put(block blocks.Block) error {
bs.PutCounter++
return bs.Blockstore.Put(block)
}
var _ exchange.SessionExchange = (*fakeSessionExchange)(nil)
type fakeSessionExchange struct {
exchange.Interface
session exchange.Fetcher
}
func (fe *fakeSessionExchange) NewSession(ctx context.Context) exchange.Fetcher {
if ctx == nil {
panic("nil context")
}
return fe.session
}
func TestNilExchange(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
bgen := butil.NewBlockGenerator()
block := bgen.Next()
bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
bserv := NewWriteThrough(bs, nil)
sess := NewSession(ctx, bserv)
_, err := sess.GetBlock(ctx, block.Cid())
if err != ErrNotFound {
t.Fatal("expected block to not be found")
}
err = bs.Put(block)
if err != nil {
t.Fatal(err)
}
b, err := sess.GetBlock(ctx, block.Cid())
if err != nil {
t.Fatal(err)
}
if b.Cid() != block.Cid() {
t.Fatal("got the wrong block")
}
}
This diff is collapsed.
package bstest
import (
"bytes"
"context"
"fmt"
"testing"
"time"
. "github.com/ipfs/go-blockservice"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
blockstore "github.com/ipfs/go-ipfs-blockstore"
offline "github.com/ipfs/go-ipfs-exchange-offline"
u "github.com/ipfs/go-ipfs-util"
)
func newObject(data []byte) blocks.Block {
return blocks.NewBlock(data)
}
func TestBlocks(t *testing.T) {
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
bs := New(bstore, offline.Exchange(bstore))
defer bs.Close()
o := newObject([]byte("beep boop"))
h := cid.NewCidV0(u.Hash([]byte("beep boop")))
if !o.Cid().Equals(h) {
t.Error("Block key and data multihash key not equal")
}
err := bs.AddBlock(o)
if err != nil {
t.Error("failed to add block to BlockService", err)
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
b2, err := bs.GetBlock(ctx, o.Cid())
if err != nil {
t.Error("failed to retrieve block from BlockService", err)
return
}
if !o.Cid().Equals(b2.Cid()) {
t.Error("Block keys not equal.")
}
if !bytes.Equal(o.RawData(), b2.RawData()) {
t.Error("Block data is not equal.")
}
}
func makeObjects(n int) []blocks.Block {
var out []blocks.Block
for i := 0; i < n; i++ {
out = append(out, newObject([]byte(fmt.Sprintf("object %d", i))))
}
return out
}
func TestGetBlocksSequential(t *testing.T) {
var servs = Mocks(4)
for _, s := range servs {
defer s.Close()
}
objs := makeObjects(50)
var cids []cid.Cid
for _, o := range objs {
cids = append(cids, o.Cid())
err := servs[0].AddBlock(o)
if err != nil {
t.Fatal(err)
}
}
t.Log("one instance at a time, get blocks concurrently")
for i := 1; i < len(servs); i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*50)
defer cancel()
out := servs[i].GetBlocks(ctx, cids)
gotten := make(map[string]blocks.Block)
for blk := range out {
if _, ok := gotten[blk.Cid().KeyString()]; ok {
t.Fatal("Got duplicate block!")
}
gotten[blk.Cid().KeyString()] = blk
}
if len(gotten) != len(objs) {
t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(objs))
}
}
}
package bstest
import (
. "github.com/ipfs/go-blockservice"
testinstance "github.com/ipfs/go-bitswap/testinstance"
tn "github.com/ipfs/go-bitswap/testnet"
delay "github.com/ipfs/go-ipfs-delay"
mockrouting "github.com/ipfs/go-ipfs-routing/mock"
)
// Mocks returns |n| connected mock Blockservices
func Mocks(n int) []BlockService {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0))
sg := testinstance.NewTestInstanceGenerator(net)
instances := sg.Instances(n)
var servs []BlockService
for _, i := range instances {
servs = append(servs, New(i.Blockstore(), i.Exchange))
}
return servs
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment