diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..3c342889d2b348ca77726395a833d6bfd1dee82c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+*~
+*.log
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool
+*.out
diff --git a/dsindex/error.go b/dsindex/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..f3b685bb95a8389da01fdfd57354fab8f1593ab6
--- /dev/null
+++ b/dsindex/error.go
@@ -0,0 +1,8 @@
+package dsindex
+
+import "errors"
+
+var (
+	ErrEmptyKey   = errors.New("key is empty")
+	ErrEmptyValue = errors.New("value is empty")
+)
diff --git a/dsindex/indexer.go b/dsindex/indexer.go
new file mode 100644
index 0000000000000000000000000000000000000000..e48af2e17b1d7d381eeb25d67dfa6a6be20a35f3
--- /dev/null
+++ b/dsindex/indexer.go
@@ -0,0 +1,285 @@
+// Package dsindex provides secondary indexing functionality for a datastore.
+package dsindex
+
+import (
+	"context"
+	"fmt"
+	"path"
+
+	ds "github.com/ipfs/go-datastore"
+	"github.com/ipfs/go-datastore/namespace"
+	"github.com/ipfs/go-datastore/query"
+	"github.com/multiformats/go-multibase"
+)
+
+// Indexer maintains a secondary index.  An index is a collection of key-value
+// mappings where the key is the secondary index that maps to one or more
+// values, where each value is a unique key being indexed.
+type Indexer interface {
+	// Add adds the specified value to the key
+	Add(ctx context.Context, key, value string) error
+
+	// Delete deletes the specified value from the key.  If the value is not in
+	// the datastore, this method returns no error.
+	Delete(ctx context.Context, key, value string) error
+
+	// DeleteKey deletes all values in the given key.  If a key is not in the
+	// datastore, this method returns no error.  Returns a count of values that
+	// were deleted.
+	DeleteKey(ctx context.Context, key string) (count int, err error)
+
+	// DeleteAll deletes all keys managed by this Indexer.  Returns a count of
+	// the values that were deleted.
+	DeleteAll(ctx context.Context) (count int, err error)
+
+	// ForEach calls the function for each value in the specified key, until
+	// there are no more values, or until the function returns false.  If key
+	// is empty string, then all keys are iterated.
+	ForEach(ctx context.Context, key string, fn func(key, value string) bool) error
+
+	// HasValue determines if the key contains the specified value
+	HasValue(ctx context.Context, key, value string) (bool, error)
+
+	// HasAny determines if any value is in the specified key.  If key is
+	// empty string, then all values are searched.
+	HasAny(ctx context.Context, key string) (bool, error)
+
+	// Search returns all values for the given key
+	Search(ctx context.Context, key string) (values []string, err error)
+}
+
+// indexer is a simple implementation of Indexer.  This implementation relies
+// on the underlying data store to support efficient querying by prefix.
+//
+// TODO: Consider adding caching
+type indexer struct {
+	dstore ds.Datastore
+}
+
+// New creates a new datastore index.  All indexes are stored under the
+// specified index name.
+//
+// To persist the actions of calling Indexer functions, it is necessary to call
+// dstore.Sync.
+func New(dstore ds.Datastore, name ds.Key) Indexer {
+	return &indexer{
+		dstore: namespace.Wrap(dstore, name),
+	}
+}
+
+func (x *indexer) Add(ctx context.Context, key, value string) error {
+	if key == "" {
+		return ErrEmptyKey
+	}
+	if value == "" {
+		return ErrEmptyValue
+	}
+	dsKey := ds.NewKey(encode(key)).ChildString(encode(value))
+	return x.dstore.Put(dsKey, []byte{})
+}
+
+func (x *indexer) Delete(ctx context.Context, key, value string) error {
+	if key == "" {
+		return ErrEmptyKey
+	}
+	if value == "" {
+		return ErrEmptyValue
+	}
+	return x.dstore.Delete(ds.NewKey(encode(key)).ChildString(encode(value)))
+}
+
+func (x *indexer) DeleteKey(ctx context.Context, key string) (int, error) {
+	if key == "" {
+		return 0, ErrEmptyKey
+	}
+	return x.deletePrefix(ctx, encode(key))
+}
+
+func (x *indexer) DeleteAll(ctx context.Context) (int, error) {
+	return x.deletePrefix(ctx, "")
+}
+
+func (x *indexer) ForEach(ctx context.Context, key string, fn func(key, value string) bool) error {
+	if key != "" {
+		key = encode(key)
+	}
+
+	q := query.Query{
+		Prefix:   key,
+		KeysOnly: true,
+	}
+	results, err := x.dstore.Query(q)
+	if err != nil {
+		return err
+	}
+
+	for {
+		r, ok := results.NextSync()
+		if !ok {
+			break
+		}
+		if r.Error != nil {
+			err = r.Error
+			break
+		}
+		if ctx.Err() != nil {
+			err = ctx.Err()
+			break
+		}
+		ent := r.Entry
+		decIdx, err := decode(path.Base(path.Dir(ent.Key)))
+		if err != nil {
+			err = fmt.Errorf("cannot decode index: %v", err)
+			break
+		}
+		decKey, err := decode(path.Base(ent.Key))
+		if err != nil {
+			err = fmt.Errorf("cannot decode key: %v", err)
+			break
+		}
+		if !fn(decIdx, decKey) {
+			break
+		}
+	}
+	results.Close()
+
+	return err
+}
+
+func (x *indexer) HasValue(ctx context.Context, key, value string) (bool, error) {
+	if key == "" {
+		return false, ErrEmptyKey
+	}
+	if value == "" {
+		return false, ErrEmptyValue
+	}
+	return x.dstore.Has(ds.NewKey(encode(key)).ChildString(encode(value)))
+}
+
+func (x *indexer) HasAny(ctx context.Context, key string) (bool, error) {
+	var any bool
+	err := x.ForEach(ctx, key, func(key, value string) bool {
+		any = true
+		return false
+	})
+	return any, err
+}
+
+func (x *indexer) Search(ctx context.Context, key string) ([]string, error) {
+	if key == "" {
+		return nil, ErrEmptyKey
+	}
+	ents, err := x.queryPrefix(ctx, encode(key))
+	if err != nil {
+		return nil, err
+	}
+	if len(ents) == 0 {
+		return nil, nil
+	}
+
+	values := make([]string, len(ents))
+	for i := range ents {
+		values[i], err = decode(path.Base(ents[i].Key))
+		if err != nil {
+			return nil, fmt.Errorf("cannot decode value: %v", err)
+		}
+	}
+	return values, nil
+}
+
+// SyncIndex synchronizes the keys in the target Indexer to match those of the
+// ref Indexer. This function does not change this indexer's key root (name
+// passed into New).
+func SyncIndex(ctx context.Context, ref, target Indexer) (bool, error) {
+	// Build reference index map
+	refs := map[string]string{}
+	err := ref.ForEach(ctx, "", func(key, value string) bool {
+		refs[value] = key
+		return true
+	})
+	if err != nil {
+		return false, err
+	}
+	if len(refs) == 0 {
+		return false, nil
+	}
+
+	// Compare current indexes
+	dels := map[string]string{}
+	err = target.ForEach(ctx, "", func(key, value string) bool {
+		refKey, ok := refs[value]
+		if ok && refKey == key {
+			// same in both; delete from refs, do not add to dels
+			delete(refs, value)
+		} else {
+			dels[value] = key
+		}
+		return true
+	})
+	if err != nil {
+		return false, err
+	}
+
+	// Items in dels are keys that no longer exist
+	for value, key := range dels {
+		err = target.Delete(ctx, key, value)
+		if err != nil {
+			return false, err
+		}
+	}
+
+	// What remains in refs are keys that need to be added
+	for value, key := range refs {
+		err = target.Add(ctx, key, value)
+		if err != nil {
+			return false, err
+		}
+	}
+
+	return len(refs) != 0 || len(dels) != 0, nil
+}
+
+func (x *indexer) deletePrefix(ctx context.Context, prefix string) (int, error) {
+	ents, err := x.queryPrefix(ctx, prefix)
+	if err != nil {
+		return 0, err
+	}
+
+	for i := range ents {
+		err = x.dstore.Delete(ds.NewKey(ents[i].Key))
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	return len(ents), nil
+}
+
+func (x *indexer) queryPrefix(ctx context.Context, prefix string) ([]query.Entry, error) {
+	q := query.Query{
+		Prefix:   prefix,
+		KeysOnly: true,
+	}
+	results, err := x.dstore.Query(q)
+	if err != nil {
+		return nil, err
+	}
+	return results.Rest()
+}
+
+func encode(data string) string {
+	encData, err := multibase.Encode(multibase.Base64url, []byte(data))
+	if err != nil {
+		// programming error; using unsupported encoding
+		panic(err.Error())
+	}
+	return encData
+}
+
+func decode(data string) (string, error) {
+	_, b, err := multibase.Decode(data)
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
diff --git a/dsindex/indexer_test.go b/dsindex/indexer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..45372c605362b75a405fcd5159bca1245540de20
--- /dev/null
+++ b/dsindex/indexer_test.go
@@ -0,0 +1,286 @@
+package dsindex
+
+import (
+	"context"
+	"testing"
+
+	ds "github.com/ipfs/go-datastore"
+)
+
+func createIndexer() Indexer {
+	dstore := ds.NewMapDatastore()
+	nameIndex := New(dstore, ds.NewKey("/data/nameindex"))
+
+	ctx := context.Background()
+	nameIndex.Add(ctx, "alice", "a1")
+	nameIndex.Add(ctx, "bob", "b1")
+	nameIndex.Add(ctx, "bob", "b2")
+	nameIndex.Add(ctx, "cathy", "c1")
+
+	return nameIndex
+}
+
+func TestAdd(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nameIndex := createIndexer()
+	err := nameIndex.Add(ctx, "someone", "s1")
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = nameIndex.Add(ctx, "someone", "s1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = nameIndex.Add(ctx, "", "noindex")
+	if err != ErrEmptyKey {
+		t.Fatal("unexpected error:", err)
+	}
+
+	err = nameIndex.Add(ctx, "nokey", "")
+	if err != ErrEmptyValue {
+		t.Fatal("unexpected error:", err)
+	}
+}
+
+func TestHasValue(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nameIndex := createIndexer()
+
+	ok, err := nameIndex.HasValue(ctx, "bob", "b1")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !ok {
+		t.Fatal("missing index")
+	}
+
+	ok, err = nameIndex.HasValue(ctx, "bob", "b3")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ok {
+		t.Fatal("should not have index")
+	}
+
+	_, err = nameIndex.HasValue(ctx, "", "b1")
+	if err != ErrEmptyKey {
+		t.Fatal("unexpected error:", err)
+	}
+
+	_, err = nameIndex.HasValue(ctx, "bob", "")
+	if err != ErrEmptyValue {
+		t.Fatal("unexpected error:", err)
+	}
+}
+
+func TestHasAny(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nameIndex := createIndexer()
+
+	ok, err := nameIndex.HasAny(ctx, "nothere")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ok {
+		t.Fatal("should return false")
+	}
+
+	for _, idx := range []string{"alice", "bob", ""} {
+		ok, err = nameIndex.HasAny(ctx, idx)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !ok {
+			t.Fatal("missing index", idx)
+		}
+	}
+
+	count, err := nameIndex.DeleteAll(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 4 {
+		t.Fatal("expected 4 deletions")
+	}
+
+	ok, err = nameIndex.HasAny(ctx, "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ok {
+		t.Fatal("should return false")
+	}
+}
+
+func TestForEach(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nameIndex := createIndexer()
+
+	found := make(map[string]struct{})
+	err := nameIndex.ForEach(ctx, "bob", func(key, value string) bool {
+		found[value] = struct{}{}
+		return true
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for _, value := range []string{"b1", "b2"} {
+		_, ok := found[value]
+		if !ok {
+			t.Fatal("missing key for value", value)
+		}
+	}
+
+	values := map[string]string{}
+	err = nameIndex.ForEach(ctx, "", func(key, value string) bool {
+		values[value] = key
+		return true
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(values) != 4 {
+		t.Fatal("expected 4 keys")
+	}
+
+	if values["a1"] != "alice" {
+		t.Error("expected a1: alice")
+	}
+	if values["b1"] != "bob" {
+		t.Error("expected b1: bob")
+	}
+	if values["b2"] != "bob" {
+		t.Error("expected b2: bob")
+	}
+	if values["c1"] != "cathy" {
+		t.Error("expected c1: cathy")
+	}
+}
+
+func TestSearch(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nameIndex := createIndexer()
+
+	ids, err := nameIndex.Search(ctx, "bob")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(ids) != 2 {
+		t.Fatal("wrong number of ids - expected 2 got", ids)
+	}
+	for _, id := range ids {
+		if id != "b1" && id != "b2" {
+			t.Fatal("wrong value in id set")
+		}
+	}
+	if ids[0] == ids[1] {
+		t.Fatal("duplicate id")
+	}
+
+	ids, err = nameIndex.Search(ctx, "cathy")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(ids) != 1 || ids[0] != "c1" {
+		t.Fatal("wrong ids")
+	}
+
+	ids, err = nameIndex.Search(ctx, "amit")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(ids) != 0 {
+		t.Fatal("unexpected ids returned")
+	}
+}
+
+func TestDelete(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nameIndex := createIndexer()
+
+	err := nameIndex.Delete(ctx, "bob", "b3")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = nameIndex.Delete(ctx, "alice", "a1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ok, err := nameIndex.HasValue(ctx, "alice", "a1")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ok {
+		t.Fatal("index key should have been deleted")
+	}
+
+	count, err := nameIndex.DeleteKey(ctx, "bob")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Fatal("wrong deleted count")
+	}
+	ok, _ = nameIndex.HasValue(ctx, "bob", "b1")
+	if ok {
+		t.Fatal("index not deleted")
+	}
+}
+
+func TestSyncIndex(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nameIndex := createIndexer()
+
+	dstore := ds.NewMapDatastore()
+	refIndex := New(dstore, ds.NewKey("/ref"))
+	refIndex.Add(ctx, "alice", "a1")
+	refIndex.Add(ctx, "cathy", "zz")
+	refIndex.Add(ctx, "dennis", "d1")
+
+	changed, err := SyncIndex(ctx, refIndex, nameIndex)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !changed {
+		t.Error("change was not indicated")
+	}
+
+	// Create map of id->index in sync target
+	syncs := map[string]string{}
+	err = nameIndex.ForEach(ctx, "", func(key, value string) bool {
+		syncs[value] = key
+		return true
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Iterate items in sync source and make sure they appear in target
+	var itemCount int
+	err = refIndex.ForEach(ctx, "", func(key, value string) bool {
+		itemCount++
+		syncKey, ok := syncs[value]
+		if !ok || key != syncKey {
+			t.Fatal("key", key, "-->", value, "was not synced")
+		}
+		return true
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if itemCount != len(syncs) {
+		t.Fatal("different number of items in sync source and target")
+	}
+}
diff --git a/dspinner/pin.go b/dspinner/pin.go
new file mode 100644
index 0000000000000000000000000000000000000000..5fd65e7bf28c35d219e9deea7d5ba12ca63e3033
--- /dev/null
+++ b/dspinner/pin.go
@@ -0,0 +1,961 @@
+// Package dspinner implements structures and methods to keep track of
+// which objects a user wants to keep stored locally.  This implementation
+// stores pin data in a datastore.
+package dspinner
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"path"
+	"sync"
+
+	"github.com/ipfs/go-cid"
+	ds "github.com/ipfs/go-datastore"
+	"github.com/ipfs/go-datastore/query"
+	ipfspinner "github.com/ipfs/go-ipfs-pinner"
+	"github.com/ipfs/go-ipfs-pinner/dsindex"
+	ipld "github.com/ipfs/go-ipld-format"
+	logging "github.com/ipfs/go-log"
+	mdag "github.com/ipfs/go-merkledag"
+	"github.com/ipfs/go-merkledag/dagutils"
+	"github.com/polydawn/refmt/cbor"
+	"github.com/polydawn/refmt/obj/atlas"
+)
+
+const (
+	basePath     = "/pins"
+	pinKeyPath   = "/pins/pin"
+	indexKeyPath = "/pins/index"
+	dirtyKeyPath = "/pins/state/dirty"
+)
+
+var (
+	// ErrNotPinned is returned when trying to unpin items that are not pinned.
+	ErrNotPinned = errors.New("not pinned or pinned indirectly")
+
+	log logging.StandardLogger = logging.Logger("pin")
+
+	linkDirect, linkRecursive string
+
+	pinCidDIndexPath string
+	pinCidRIndexPath string
+	pinNameIndexPath string
+
+	dirtyKey = ds.NewKey(dirtyKeyPath)
+
+	pinAtl atlas.Atlas
+)
+
+func init() {
+	directStr, ok := ipfspinner.ModeToString(ipfspinner.Direct)
+	if !ok {
+		panic("could not find Direct pin enum")
+	}
+	linkDirect = directStr
+
+	recursiveStr, ok := ipfspinner.ModeToString(ipfspinner.Recursive)
+	if !ok {
+		panic("could not find Recursive pin enum")
+	}
+	linkRecursive = recursiveStr
+
+	pinCidRIndexPath = path.Join(indexKeyPath, "cidRindex")
+	pinCidDIndexPath = path.Join(indexKeyPath, "cidDindex")
+	pinNameIndexPath = path.Join(indexKeyPath, "nameIndex")
+
+	pinAtl = atlas.MustBuild(
+		atlas.BuildEntry(pin{}).StructMap().
+			AddField("Cid", atlas.StructMapEntry{SerialName: "cid"}).
+			AddField("Metadata", atlas.StructMapEntry{SerialName: "metadata", OmitEmpty: true}).
+			AddField("Mode", atlas.StructMapEntry{SerialName: "mode"}).
+			AddField("Name", atlas.StructMapEntry{SerialName: "name", OmitEmpty: true}).
+			Complete(),
+		atlas.BuildEntry(cid.Cid{}).Transform().
+			TransformMarshal(atlas.MakeMarshalTransformFunc(func(live cid.Cid) ([]byte, error) { return live.MarshalBinary() })).
+			TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(func(serializable []byte) (cid.Cid, error) {
+				c := cid.Cid{}
+				err := c.UnmarshalBinary(serializable)
+				if err != nil {
+					return cid.Cid{}, err
+				}
+				return c, nil
+			})).Complete(),
+	)
+	pinAtl = pinAtl.WithMapMorphism(atlas.MapMorphism{KeySortMode: atlas.KeySortMode_Strings})
+}
+
+// pinner implements the Pinner interface
+type pinner struct {
+	lock sync.RWMutex
+
+	dserv  ipld.DAGService
+	dstore ds.Datastore
+
+	cidDIndex dsindex.Indexer
+	cidRIndex dsindex.Indexer
+	nameIndex dsindex.Indexer
+
+	clean int64
+	dirty int64
+}
+
+var _ ipfspinner.Pinner = (*pinner)(nil)
+
+type pin struct {
+	Id       string
+	Cid      cid.Cid
+	Metadata map[string]interface{}
+	Mode     ipfspinner.Mode
+	Name     string
+}
+
+func (p *pin) dsKey() ds.Key {
+	return ds.NewKey(path.Join(pinKeyPath, p.Id))
+}
+
+func newPin(c cid.Cid, mode ipfspinner.Mode, name string) *pin {
+	return &pin{
+		Id:   ds.RandomKey().String(),
+		Cid:  c,
+		Name: name,
+		Mode: mode,
+	}
+}
+
+type syncDAGService interface {
+	ipld.DAGService
+	Sync() error
+}
+
+// New creates a new pinner and loads its keysets from the given datastore. If
+// there is no data present in the datastore, then an empty pinner is returned.
+func New(ctx context.Context, dstore ds.Datastore, dserv ipld.DAGService) (ipfspinner.Pinner, error) {
+	p := &pinner{
+		cidDIndex: dsindex.New(dstore, ds.NewKey(pinCidDIndexPath)),
+		cidRIndex: dsindex.New(dstore, ds.NewKey(pinCidRIndexPath)),
+		nameIndex: dsindex.New(dstore, ds.NewKey(pinNameIndexPath)),
+		dserv:     dserv,
+		dstore:    dstore,
+	}
+
+	data, err := dstore.Get(dirtyKey)
+	if err != nil {
+		if err == ds.ErrNotFound {
+			return p, nil
+		}
+		return nil, fmt.Errorf("cannot load dirty flag: %v", err)
+	}
+	if data[0] == 1 {
+		p.dirty = 1
+
+		pins, err := p.loadAllPins(ctx)
+		if err != nil {
+			return nil, fmt.Errorf("cannot load pins: %v", err)
+		}
+
+		err = p.rebuildIndexes(ctx, pins)
+		if err != nil {
+			return nil, fmt.Errorf("cannot rebuild indexes: %v", err)
+		}
+	}
+
+	return p, nil
+}
+
+// Pin the given node, optionally recursive
+func (p *pinner) Pin(ctx context.Context, node ipld.Node, recurse bool) error {
+	err := p.dserv.Add(ctx, node)
+	if err != nil {
+		return err
+	}
+
+	c := node.Cid()
+	cidKey := c.KeyString()
+
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	if recurse {
+		found, err := p.cidRIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return err
+		}
+		if found {
+			return nil
+		}
+
+		dirtyBefore := p.dirty
+
+		// temporary unlock to fetch the entire graph
+		p.lock.Unlock()
+		// Fetch graph starting at node identified by cid
+		err = mdag.FetchGraph(ctx, c, p.dserv)
+		p.lock.Lock()
+		if err != nil {
+			return err
+		}
+
+		// Only look again if something has changed.
+		if p.dirty != dirtyBefore {
+			found, err = p.cidRIndex.HasAny(ctx, cidKey)
+			if err != nil {
+				return err
+			}
+			if found {
+				return nil
+			}
+		}
+
+		// TODO: remove this to support multiple pins per CID
+		found, err = p.cidDIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return err
+		}
+		if found {
+			p.removePinsForCid(ctx, c, ipfspinner.Direct)
+		}
+
+		_, err = p.addPin(ctx, c, ipfspinner.Recursive, "")
+		if err != nil {
+			return err
+		}
+	} else {
+		found, err := p.cidRIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return err
+		}
+		if found {
+			return fmt.Errorf("%s already pinned recursively", c.String())
+		}
+
+		_, err = p.addPin(ctx, c, ipfspinner.Direct, "")
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *pinner) addPin(ctx context.Context, c cid.Cid, mode ipfspinner.Mode, name string) (string, error) {
+	// Create new pin and store in datastore
+	pp := newPin(c, mode, name)
+
+	// Serialize pin
+	pinData, err := encodePin(pp)
+	if err != nil {
+		return "", fmt.Errorf("could not encode pin: %v", err)
+	}
+
+	p.setDirty(ctx, true)
+
+	// Store CID index
+	switch mode {
+	case ipfspinner.Recursive:
+		err = p.cidRIndex.Add(ctx, c.KeyString(), pp.Id)
+	case ipfspinner.Direct:
+		err = p.cidDIndex.Add(ctx, c.KeyString(), pp.Id)
+	default:
+		panic("pin mode must be recursive or direct")
+	}
+	if err != nil {
+		return "", fmt.Errorf("could not add pin cid index: %v", err)
+	}
+
+	if name != "" {
+		// Store name index
+		err = p.nameIndex.Add(ctx, name, pp.Id)
+		if err != nil {
+			return "", fmt.Errorf("could not add pin name index: %v", err)
+		}
+	}
+
+	// Store the pin.  Pin must be stored after index for recovery to work.
+	err = p.dstore.Put(pp.dsKey(), pinData)
+	if err != nil {
+		if mode == ipfspinner.Recursive {
+			p.cidRIndex.Delete(ctx, c.KeyString(), pp.Id)
+		} else {
+			p.cidDIndex.Delete(ctx, c.KeyString(), pp.Id)
+		}
+		if name != "" {
+			p.nameIndex.Delete(ctx, name, pp.Id)
+		}
+		return "", err
+	}
+
+	return pp.Id, nil
+}
+
+func (p *pinner) removePin(ctx context.Context, pp *pin) error {
+	p.setDirty(ctx, true)
+
+	// Remove pin from datastore.  Pin must be removed before index for
+	// recovery to work.
+	err := p.dstore.Delete(pp.dsKey())
+	if err != nil {
+		return err
+	}
+	// Remove cid index from datastore
+	if pp.Mode == ipfspinner.Recursive {
+		err = p.cidRIndex.Delete(ctx, pp.Cid.KeyString(), pp.Id)
+	} else {
+		err = p.cidDIndex.Delete(ctx, pp.Cid.KeyString(), pp.Id)
+	}
+	if err != nil {
+		return err
+	}
+
+	if pp.Name != "" {
+		// Remove name index from datastore
+		err = p.nameIndex.Delete(ctx, pp.Name, pp.Id)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Unpin a given key
+func (p *pinner) Unpin(ctx context.Context, c cid.Cid, recursive bool) error {
+	cidKey := c.KeyString()
+
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	// TODO: use Ls() to lookup pins when new pinning API available
+	/*
+		matchSpec := map[string][]string {
+			"cid": []string{c.String}
+		}
+		matches := p.Ls(matchSpec)
+	*/
+	has, err := p.cidRIndex.HasAny(ctx, cidKey)
+	if err != nil {
+		return err
+	}
+
+	if has {
+		if !recursive {
+			return fmt.Errorf("%s is pinned recursively", c.String())
+		}
+	} else {
+		has, err = p.cidDIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return err
+		}
+		if !has {
+			return ErrNotPinned
+		}
+	}
+
+	_, err = p.removePinsForCid(ctx, c, ipfspinner.Any)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// IsPinned returns whether or not the given key is pinned
+// and an explanation of why its pinned
+func (p *pinner) IsPinned(ctx context.Context, c cid.Cid) (string, bool, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+	return p.isPinnedWithType(ctx, c, ipfspinner.Any)
+}
+
+// IsPinnedWithType returns whether or not the given cid is pinned with the
+// given pin type, as well as returning the type of pin its pinned with.
+func (p *pinner) IsPinnedWithType(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (string, bool, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+	return p.isPinnedWithType(ctx, c, mode)
+}
+
+func (p *pinner) isPinnedWithType(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (string, bool, error) {
+	cidKey := c.KeyString()
+	switch mode {
+	case ipfspinner.Recursive:
+		has, err := p.cidRIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return "", false, err
+		}
+		if has {
+			return linkRecursive, true, nil
+		}
+		return "", false, nil
+	case ipfspinner.Direct:
+		has, err := p.cidDIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return "", false, err
+		}
+		if has {
+			return linkDirect, true, nil
+		}
+		return "", false, nil
+	case ipfspinner.Internal:
+		return "", false, nil
+	case ipfspinner.Indirect:
+	case ipfspinner.Any:
+		has, err := p.cidRIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return "", false, err
+		}
+		if has {
+			return linkRecursive, true, nil
+		}
+		has, err = p.cidDIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return "", false, err
+		}
+		if has {
+			return linkDirect, true, nil
+		}
+	default:
+		err := fmt.Errorf(
+			"invalid Pin Mode '%d', must be one of {%d, %d, %d, %d, %d}",
+			mode, ipfspinner.Direct, ipfspinner.Indirect, ipfspinner.Recursive,
+			ipfspinner.Internal, ipfspinner.Any)
+		return "", false, err
+	}
+
+	// Default is Indirect
+	visitedSet := cid.NewSet()
+
+	// No index for given CID, so search children of all recursive pinned CIDs
+	var has bool
+	var rc cid.Cid
+	var e error
+	err := p.cidRIndex.ForEach(ctx, "", func(key, value string) bool {
+		rc, e = cid.Cast([]byte(key))
+		if e != nil {
+			return false
+		}
+		has, e = hasChild(ctx, p.dserv, rc, c, visitedSet.Visit)
+		if e != nil {
+			return false
+		}
+		if has {
+			return false
+		}
+		return true
+	})
+	if err != nil {
+		return "", false, err
+	}
+	if e != nil {
+		return "", false, e
+	}
+
+	if has {
+		return rc.String(), true, nil
+	}
+
+	return "", false, nil
+}
+
+// CheckIfPinned checks if a set of keys are pinned, more efficient than
+// calling IsPinned for each key, returns the pinned status of cid(s)
+//
+// TODO: If a CID is pinned by multiple pins, should they all be reported?
+func (p *pinner) CheckIfPinned(ctx context.Context, cids ...cid.Cid) ([]ipfspinner.Pinned, error) {
+	pinned := make([]ipfspinner.Pinned, 0, len(cids))
+	toCheck := cid.NewSet()
+
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	// First check for non-Indirect pins directly
+	for _, c := range cids {
+		cidKey := c.KeyString()
+		has, err := p.cidRIndex.HasAny(ctx, cidKey)
+		if err != nil {
+			return nil, err
+		}
+		if has {
+			pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Recursive})
+		} else {
+			has, err = p.cidDIndex.HasAny(ctx, cidKey)
+			if err != nil {
+				return nil, err
+			}
+			if has {
+				pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Direct})
+			} else {
+				toCheck.Add(c)
+			}
+		}
+	}
+
+	// Now walk all recursive pins to check for indirect pins
+	var checkChildren func(cid.Cid, cid.Cid) error
+	checkChildren = func(rk, parentKey cid.Cid) error {
+		links, err := ipld.GetLinks(ctx, p.dserv, parentKey)
+		if err != nil {
+			return err
+		}
+		for _, lnk := range links {
+			c := lnk.Cid
+
+			if toCheck.Has(c) {
+				pinned = append(pinned,
+					ipfspinner.Pinned{Key: c, Mode: ipfspinner.Indirect, Via: rk})
+				toCheck.Remove(c)
+			}
+
+			err = checkChildren(rk, c)
+			if err != nil {
+				return err
+			}
+
+			if toCheck.Len() == 0 {
+				return nil
+			}
+		}
+		return nil
+	}
+
+	var e error
+	err := p.cidRIndex.ForEach(ctx, "", func(key, value string) bool {
+		var rk cid.Cid
+		rk, e = cid.Cast([]byte(key))
+		if e != nil {
+			return false
+		}
+		e = checkChildren(rk, rk)
+		if e != nil {
+			return false
+		}
+		if toCheck.Len() == 0 {
+			return false
+		}
+		return true
+	})
+	if err != nil {
+		return nil, err
+	}
+	if e != nil {
+		return nil, e
+	}
+
+	// Anything left in toCheck is not pinned
+	for _, k := range toCheck.Keys() {
+		pinned = append(pinned, ipfspinner.Pinned{Key: k, Mode: ipfspinner.NotPinned})
+	}
+
+	return pinned, nil
+}
+
+// RemovePinWithMode is for manually editing the pin structure.
+// Use with care! If used improperly, garbage collection may not
+// be successful.
+func (p *pinner) RemovePinWithMode(c cid.Cid, mode ipfspinner.Mode) {
+	ctx := context.TODO()
+	// Check cache to see if CID is pinned
+	switch mode {
+	case ipfspinner.Direct, ipfspinner.Recursive:
+	default:
+		// programmer error, panic OK
+		panic("unrecognized pin type")
+	}
+
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	p.removePinsForCid(ctx, c, mode)
+}
+
+// removePinsForCid removes all pins for a cid that has the specified mode.
+// Returns true if any pins, and all corresponding CID index entries, were
+// removed.  Otherwise, returns false.
+func (p *pinner) removePinsForCid(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (bool, error) {
+	// Search for pins by CID
+	var ids []string
+	var err error
+	cidKey := c.KeyString()
+	switch mode {
+	case ipfspinner.Recursive:
+		ids, err = p.cidRIndex.Search(ctx, cidKey)
+	case ipfspinner.Direct:
+		ids, err = p.cidDIndex.Search(ctx, cidKey)
+	case ipfspinner.Any:
+		ids, err = p.cidRIndex.Search(ctx, cidKey)
+		if err != nil {
+			return false, err
+		}
+		dIds, err := p.cidDIndex.Search(ctx, cidKey)
+		if err != nil {
+			return false, err
+		}
+		if len(dIds) != 0 {
+			ids = append(ids, dIds...)
+		}
+	}
+	if err != nil {
+		return false, err
+	}
+
+	var removed bool
+
+	// Remove the pin with the requested mode
+	for _, pid := range ids {
+		var pp *pin
+		pp, err = p.loadPin(ctx, pid)
+		if err != nil {
+			if err == ds.ErrNotFound {
+				p.setDirty(ctx, true)
+				// Fix index; remove index for pin that does not exist
+				switch mode {
+				case ipfspinner.Recursive:
+					p.cidRIndex.DeleteKey(ctx, cidKey)
+				case ipfspinner.Direct:
+					p.cidDIndex.DeleteKey(ctx, cidKey)
+				case ipfspinner.Any:
+					p.cidRIndex.DeleteKey(ctx, cidKey)
+					p.cidDIndex.DeleteKey(ctx, cidKey)
+				}
+				log.Error("found CID index with missing pin")
+				continue
+			}
+			return false, err
+		}
+		if mode == ipfspinner.Any || pp.Mode == mode {
+			err = p.removePin(ctx, pp)
+			if err != nil {
+				return false, err
+			}
+			removed = true
+		}
+	}
+	return removed, nil
+}
+
+// loadPin loads a single pin from the datastore.
+func (p *pinner) loadPin(ctx context.Context, pid string) (*pin, error) {
+	pinData, err := p.dstore.Get(ds.NewKey(path.Join(pinKeyPath, pid)))
+	if err != nil {
+		return nil, err
+	}
+	return decodePin(pid, pinData)
+}
+
+// loadAllPins loads all pins from the datastore.
+func (p *pinner) loadAllPins(ctx context.Context) ([]*pin, error) {
+	q := query.Query{
+		Prefix: pinKeyPath,
+	}
+	results, err := p.dstore.Query(q)
+	if err != nil {
+		return nil, err
+	}
+	ents, err := results.Rest()
+	if err != nil {
+		return nil, err
+	}
+	if len(ents) == 0 {
+		return nil, nil
+	}
+
+	pins := make([]*pin, len(ents))
+	for i := range ents {
+		if ctx.Err() != nil {
+			return nil, ctx.Err()
+		}
+		var p *pin
+		p, err = decodePin(path.Base(ents[i].Key), ents[i].Value)
+		if err != nil {
+			return nil, err
+		}
+		pins[i] = p
+	}
+	return pins, nil
+}
+
+// rebuildIndexes uses the stored pins to rebuild secondary indexes.  This
+// resolves any discrepancy between secondary indexes and pins that could
+// result from a program termination between saving the two.
+func (p *pinner) rebuildIndexes(ctx context.Context, pins []*pin) error {
+	// Build temporary in-memory CID index from pins
+	dstoreMem := ds.NewMapDatastore()
+	tmpCidDIndex := dsindex.New(dstoreMem, ds.NewKey(pinCidDIndexPath))
+	tmpCidRIndex := dsindex.New(dstoreMem, ds.NewKey(pinCidRIndexPath))
+	tmpNameIndex := dsindex.New(dstoreMem, ds.NewKey(pinNameIndexPath))
+	var hasNames bool
+	for _, pp := range pins {
+		if ctx.Err() != nil {
+			return ctx.Err()
+		}
+		if pp.Mode == ipfspinner.Recursive {
+			tmpCidRIndex.Add(ctx, pp.Cid.KeyString(), pp.Id)
+		} else if pp.Mode == ipfspinner.Direct {
+			tmpCidDIndex.Add(ctx, pp.Cid.KeyString(), pp.Id)
+		}
+		if pp.Name != "" {
+			tmpNameIndex.Add(ctx, pp.Name, pp.Id)
+			hasNames = true
+		}
+	}
+
+	// Sync the CID index to what was build from pins.  This fixes any invalid
+	// indexes, which could happen if ipfs was terminated between writing pin
+	// and writing secondary index.
+	changed, err := dsindex.SyncIndex(ctx, tmpCidRIndex, p.cidRIndex)
+	if err != nil {
+		return fmt.Errorf("cannot sync indexes: %v", err)
+	}
+	if changed {
+		log.Info("invalid recursive indexes detected - rebuilt")
+	}
+
+	changed, err = dsindex.SyncIndex(ctx, tmpCidDIndex, p.cidDIndex)
+	if err != nil {
+		return fmt.Errorf("cannot sync indexes: %v", err)
+	}
+	if changed {
+		log.Info("invalid direct indexes detected - rebuilt")
+	}
+
+	if hasNames {
+		changed, err = dsindex.SyncIndex(ctx, tmpNameIndex, p.nameIndex)
+		if err != nil {
+			return fmt.Errorf("cannot sync name indexes: %v", err)
+		}
+		if changed {
+			log.Info("invalid name indexes detected - rebuilt")
+		}
+	}
+
+	return p.Flush(ctx)
+}
+
+// DirectKeys returns a slice containing the directly pinned keys
+func (p *pinner) DirectKeys(ctx context.Context) ([]cid.Cid, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	cidSet := cid.NewSet()
+	var e error
+	err := p.cidDIndex.ForEach(ctx, "", func(key, value string) bool {
+		var c cid.Cid
+		c, e = cid.Cast([]byte(key))
+		if e != nil {
+			return false
+		}
+		cidSet.Add(c)
+		return true
+	})
+	if err != nil {
+		return nil, err
+	}
+	if e != nil {
+		return nil, e
+	}
+
+	return cidSet.Keys(), nil
+}
+
+// RecursiveKeys returns a slice containing the recursively pinned keys
+func (p *pinner) RecursiveKeys(ctx context.Context) ([]cid.Cid, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	cidSet := cid.NewSet()
+	var e error
+	err := p.cidRIndex.ForEach(ctx, "", func(key, value string) bool {
+		var c cid.Cid
+		c, e = cid.Cast([]byte(key))
+		if e != nil {
+			return false
+		}
+		cidSet.Add(c)
+		return true
+	})
+	if err != nil {
+		return nil, err
+	}
+	if e != nil {
+		return nil, e
+	}
+
+	return cidSet.Keys(), nil
+}
+
+// InternalPins returns all cids kept pinned for the internal state of the
+// pinner
+func (p *pinner) InternalPins(ctx context.Context) ([]cid.Cid, error) {
+	return nil, nil
+}
+
+// Update updates a recursive pin from one cid to another.  This is equivalent
+// to pinning the new one and unpinning the old one.
+//
+// TODO: This will not work when multiple pins are supported
+func (p *pinner) Update(ctx context.Context, from, to cid.Cid, unpin bool) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	found, err := p.cidRIndex.HasAny(ctx, from.KeyString())
+	if err != nil {
+		return err
+	}
+	if !found {
+		return errors.New("'from' cid was not recursively pinned already")
+	}
+
+	// If `from` already recursively pinned and `to` is the same, then all done
+	if from == to {
+		return nil
+	}
+
+	// Check if the `to` cid is already recursively pinned
+	found, err = p.cidRIndex.HasAny(ctx, to.KeyString())
+	if err != nil {
+		return err
+	}
+	if found {
+		return errors.New("'to' cid was already recursively pinned")
+	}
+
+	// Temporarily unlock while we fetch the differences.
+	p.lock.Unlock()
+	err = dagutils.DiffEnumerate(ctx, p.dserv, from, to)
+	p.lock.Lock()
+
+	if err != nil {
+		return err
+	}
+
+	_, err = p.addPin(ctx, to, ipfspinner.Recursive, "")
+	if err != nil {
+		return err
+	}
+
+	if !unpin {
+		return nil
+	}
+
+	_, err = p.removePinsForCid(ctx, from, ipfspinner.Recursive)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Flush encodes and writes pinner keysets to the datastore
+func (p *pinner) Flush(ctx context.Context) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	if syncDServ, ok := p.dserv.(syncDAGService); ok {
+		if err := syncDServ.Sync(); err != nil {
+			return fmt.Errorf("cannot sync pinned data: %v", err)
+		}
+	}
+
+	// Sync pins and indexes
+	if err := p.dstore.Sync(ds.NewKey(basePath)); err != nil {
+		return fmt.Errorf("cannot sync pin state: %v", err)
+	}
+
+	p.setDirty(ctx, false)
+
+	return nil
+}
+
+// PinWithMode allows the user to have fine grained control over pin
+// counts
+func (p *pinner) PinWithMode(c cid.Cid, mode ipfspinner.Mode) {
+	ctx := context.TODO()
+
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	// TODO: remove his to support multiple pins per CID
+	switch mode {
+	case ipfspinner.Recursive:
+		if has, _ := p.cidRIndex.HasAny(ctx, c.KeyString()); has {
+			return // already a recursive pin for this CID
+		}
+	case ipfspinner.Direct:
+		if has, _ := p.cidDIndex.HasAny(ctx, c.KeyString()); has {
+			return // already a direct pin for this CID
+		}
+	default:
+		panic("unrecognized pin mode")
+	}
+
+	_, err := p.addPin(ctx, c, mode, "")
+	if err != nil {
+		return
+	}
+}
+
+// hasChild recursively looks for a Cid among the children of a root Cid.
+// The visit function can be used to shortcut already-visited branches.
+func hasChild(ctx context.Context, ng ipld.NodeGetter, root cid.Cid, child cid.Cid, visit func(cid.Cid) bool) (bool, error) {
+	links, err := ipld.GetLinks(ctx, ng, root)
+	if err != nil {
+		return false, err
+	}
+	for _, lnk := range links {
+		c := lnk.Cid
+		if lnk.Cid.Equals(child) {
+			return true, nil
+		}
+		if visit(c) {
+			has, err := hasChild(ctx, ng, c, child, visit)
+			if err != nil {
+				return false, err
+			}
+
+			if has {
+				return has, nil
+			}
+		}
+	}
+	return false, nil
+}
+
+func encodePin(p *pin) ([]byte, error) {
+	b, err := cbor.MarshalAtlased(p, pinAtl)
+	if err != nil {
+		return nil, err
+	}
+	return b, nil
+}
+
+func decodePin(pid string, data []byte) (*pin, error) {
+	p := &pin{Id: pid}
+	err := cbor.UnmarshalAtlased(cbor.DecodeOptions{}, data, p, pinAtl)
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+// setDirty saves a boolean dirty flag in the datastore whenever there is a
+// transition between a dirty (counter > 0) and non-dirty (counter == 0) state.
+func (p *pinner) setDirty(ctx context.Context, dirty bool) {
+	isClean := p.dirty == p.clean
+	if dirty {
+		p.dirty++
+		if !isClean {
+			return // do not save; was already dirty
+		}
+	} else if isClean {
+		return // already clean
+	} else {
+		p.clean = p.dirty // set clean
+	}
+
+	// Do edge-triggered write to datastore
+	data := []byte{0}
+	if dirty {
+		data[0] = 1
+	}
+	p.dstore.Put(dirtyKey, data)
+	p.dstore.Sync(dirtyKey)
+}
diff --git a/dspinner/pin_test.go b/dspinner/pin_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..40e2c70cac15ea52425e26e03b6b5a2195d7f46c
--- /dev/null
+++ b/dspinner/pin_test.go
@@ -0,0 +1,1137 @@
+package dspinner
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"testing"
+	"time"
+
+	bs "github.com/ipfs/go-blockservice"
+	mdag "github.com/ipfs/go-merkledag"
+
+	cid "github.com/ipfs/go-cid"
+	ds "github.com/ipfs/go-datastore"
+	dssync "github.com/ipfs/go-datastore/sync"
+	lds "github.com/ipfs/go-ds-leveldb"
+	blockstore "github.com/ipfs/go-ipfs-blockstore"
+	offline "github.com/ipfs/go-ipfs-exchange-offline"
+	ipfspin "github.com/ipfs/go-ipfs-pinner"
+	"github.com/ipfs/go-ipfs-pinner/ipldpinner"
+	util "github.com/ipfs/go-ipfs-util"
+	ipld "github.com/ipfs/go-ipld-format"
+	logging "github.com/ipfs/go-log"
+)
+
+var rand = util.NewTimeSeededRand()
+
+type fakeLogger struct {
+	logging.StandardLogger
+	lastError error
+}
+
+func (f *fakeLogger) Error(args ...interface{}) {
+	f.lastError = errors.New(fmt.Sprint(args...))
+}
+
+func (f *fakeLogger) Errorf(format string, args ...interface{}) {
+	f.lastError = fmt.Errorf(format, args...)
+}
+
+func randNode() (*mdag.ProtoNode, cid.Cid) {
+	nd := new(mdag.ProtoNode)
+	nd.SetData(make([]byte, 32))
+	_, err := io.ReadFull(rand, nd.Data())
+	if err != nil {
+		panic(err)
+	}
+	k := nd.Cid()
+	return nd, k
+}
+
+func assertPinned(t *testing.T, p ipfspin.Pinner, c cid.Cid, failmsg string) {
+	_, pinned, err := p.IsPinned(context.Background(), c)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !pinned {
+		t.Fatal(failmsg)
+	}
+}
+
+func assertPinnedWithType(t *testing.T, p ipfspin.Pinner, c cid.Cid, mode ipfspin.Mode, failmsg string) {
+	modeText, pinned, err := p.IsPinnedWithType(context.Background(), c, mode)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expect, ok := ipfspin.ModeToString(mode)
+	if !ok {
+		t.Fatal("unrecognized pin mode")
+	}
+
+	if !pinned {
+		t.Fatal(failmsg)
+	}
+
+	if mode == ipfspin.Any {
+		return
+	}
+
+	if expect != modeText {
+		t.Fatal("expected", expect, "pin, got", modeText)
+	}
+}
+
+func assertUnpinned(t *testing.T, p ipfspin.Pinner, c cid.Cid, failmsg string) {
+	_, pinned, err := p.IsPinned(context.Background(), c)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if pinned {
+		t.Fatal(failmsg)
+	}
+}
+
+func TestPinnerBasic(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+
+	dserv := mdag.NewDAGService(bserv)
+
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	a, ak := randNode()
+	err = dserv.Add(ctx, a)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Pin A{}
+	err = p.Pin(ctx, a, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertPinned(t, p, ak, "Failed to find key")
+	assertPinnedWithType(t, p, ak, ipfspin.Direct, "Expected direct pin")
+
+	// create new node c, to be indirectly pinned through b
+	c, _ := randNode()
+	err = dserv.Add(ctx, c)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ck := c.Cid()
+
+	// Create new node b, to be parent to a and c
+	b, _ := randNode()
+	err = b.AddNodeLink("child", a)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = b.AddNodeLink("otherchild", c)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = dserv.Add(ctx, b)
+	if err != nil {
+		t.Fatal(err)
+	}
+	bk := b.Cid()
+
+	// recursively pin B{A,C}
+	err = p.Pin(ctx, b, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertPinned(t, p, ck, "child of recursively pinned node not found")
+
+	assertPinned(t, p, bk, "Pinned node not found")
+	assertPinnedWithType(t, p, bk, ipfspin.Recursive, "Recursively pinned node not found")
+
+	d, _ := randNode()
+	d.AddNodeLink("a", a)
+	d.AddNodeLink("c", c)
+
+	e, _ := randNode()
+	d.AddNodeLink("e", e)
+
+	// Must be in dagserv for unpin to work
+	err = dserv.Add(ctx, e)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = dserv.Add(ctx, d)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Add D{A,C,E}
+	err = p.Pin(ctx, d, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	dk := d.Cid()
+	assertPinned(t, p, dk, "pinned node not found.")
+
+	cids, err := p.RecursiveKeys(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(cids) != 2 {
+		t.Error("expected 2 recursive pins")
+	}
+	if !(bk == cids[0] || bk == cids[1]) {
+		t.Error("expected recursive pin of B")
+	}
+	if !(dk == cids[0] || dk == cids[1]) {
+		t.Error("expected recursive pin of D")
+	}
+
+	pinned, err := p.CheckIfPinned(ctx, ak, bk, ck, dk)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(pinned) != 4 {
+		t.Error("incorrect number of results")
+	}
+	for _, pn := range pinned {
+		switch pn.Key {
+		case ak:
+			if pn.Mode != ipfspin.Direct {
+				t.Error("A pinned with wrong mode")
+			}
+		case bk:
+			if pn.Mode != ipfspin.Recursive {
+				t.Error("B pinned with wrong mode")
+			}
+		case ck:
+			if pn.Mode != ipfspin.Indirect {
+				t.Error("C should be pinned indirectly")
+			}
+			if pn.Via != dk && pn.Via != bk {
+				t.Error("C should be pinned via D or B")
+			}
+		case dk:
+			if pn.Mode != ipfspin.Recursive {
+				t.Error("D pinned with wrong mode")
+			}
+		}
+	}
+
+	cids, err = p.DirectKeys(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(cids) != 1 {
+		t.Error("expected 1 direct pin")
+	}
+	if cids[0] != ak {
+		t.Error("wrong direct pin")
+	}
+
+	cids, _ = p.InternalPins(ctx)
+	if len(cids) != 0 {
+		t.Error("shound not have internal keys")
+	}
+
+	err = p.Unpin(ctx, dk, false)
+	if err == nil {
+		t.Fatal("expected error unpinning recursive pin without specifying recursive")
+	}
+
+	// Test recursive unpin
+	err = p.Unpin(ctx, dk, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = p.Unpin(ctx, dk, true)
+	if err != ErrNotPinned {
+		t.Fatal("expected error:", ErrNotPinned)
+	}
+
+	err = p.Flush(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	p, err = New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Test directly pinned
+	assertPinned(t, p, ak, "Could not find pinned node!")
+
+	// Test recursively pinned
+	assertPinned(t, p, bk, "could not find recursively pinned node")
+
+	// Remove the pin but not the index to simulate corruption
+	dsp := p.(*pinner)
+	ids, err := dsp.cidDIndex.Search(ctx, ak.KeyString())
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(ids) == 0 {
+		t.Fatal("did not find pin for cid", ak.String())
+	}
+	pp, err := dsp.loadPin(ctx, ids[0])
+	if err != nil {
+		t.Fatal(err)
+	}
+	if pp.Mode != ipfspin.Direct {
+		t.Error("loaded pin has wrong mode")
+	}
+	if pp.Cid != ak {
+		t.Error("loaded pin has wrong cid")
+	}
+	err = dsp.dstore.Delete(pp.dsKey())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	realLog := log
+	fakeLog := &fakeLogger{}
+	fakeLog.StandardLogger = log
+	log = fakeLog
+	err = p.Pin(ctx, a, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if fakeLog.lastError == nil {
+		t.Error("expected error to be logged")
+	} else if fakeLog.lastError.Error() != "found CID index with missing pin" {
+		t.Error("did not get expected log message")
+	}
+
+	log = realLog
+}
+
+func TestAddLoadPin(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+
+	dserv := mdag.NewDAGService(bserv)
+
+	ipfsPin, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	p := ipfsPin.(*pinner)
+
+	a, ak := randNode()
+	dserv.Add(ctx, a)
+
+	mode := ipfspin.Recursive
+	name := "my-pin"
+	pid, err := p.addPin(ctx, ak, mode, name)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Load pin and check that data decoded correctly
+	pinData, err := p.loadPin(ctx, pid)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if pinData.Mode != mode {
+		t.Error("worng pin mode")
+	}
+	if pinData.Cid != ak {
+		t.Error("wrong pin cid")
+	}
+	if pinData.Name != name {
+		t.Error("wrong pin name; expected", name, "got", pinData.Name)
+	}
+}
+
+func TestRemovePinWithMode(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+
+	dserv := mdag.NewDAGService(bserv)
+
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	a, ak := randNode()
+	dserv.Add(ctx, a)
+
+	p.Pin(ctx, a, false)
+
+	ok, err := p.(*pinner).removePinsForCid(ctx, ak, ipfspin.Recursive)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ok {
+		t.Error("pin should not have been removed")
+	}
+
+	p.RemovePinWithMode(ak, ipfspin.Direct)
+
+	assertUnpinned(t, p, ak, "pin was not removed")
+}
+
+func TestIsPinnedLookup(t *testing.T) {
+	// Test that lookups work in pins which share
+	// the same branches.  For that construct this tree:
+	//
+	// A5->A4->A3->A2->A1->A0
+	//         /           /
+	// B-------           /
+	//  \                /
+	//   C---------------
+	//
+	// This ensures that IsPinned works for all objects both when they
+	// are pinned and once they have been unpinned.
+	aBranchLen := 6
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+
+	dserv := mdag.NewDAGService(bserv)
+
+	// Create new pinner.  New will not load anything since there are
+	// no pins saved in the datastore yet.
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	aKeys, bk, ck, err := makeTree(ctx, aBranchLen, dserv, p)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertPinned(t, p, aKeys[0], "A0 should be pinned")
+	assertPinned(t, p, aKeys[1], "A1 should be pinned")
+	assertPinned(t, p, ck, "C should be pinned")
+	assertPinned(t, p, bk, "B should be pinned")
+
+	// Unpin A5 recursively
+	if err = p.Unpin(ctx, aKeys[5], true); err != nil {
+		t.Fatal(err)
+	}
+
+	assertPinned(t, p, aKeys[0], "A0 should still be pinned through B")
+	assertUnpinned(t, p, aKeys[4], "A4 should be unpinned")
+
+	// Unpin B recursively
+	if err = p.Unpin(ctx, bk, true); err != nil {
+		t.Fatal(err)
+	}
+	assertUnpinned(t, p, bk, "B should be unpinned")
+	assertUnpinned(t, p, aKeys[1], "A1 should be unpinned")
+	assertPinned(t, p, aKeys[0], "A0 should still be pinned through C")
+}
+
+func TestDuplicateSemantics(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+
+	dserv := mdag.NewDAGService(bserv)
+
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	a, _ := randNode()
+	err = dserv.Add(ctx, a)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// pin is recursively
+	err = p.Pin(ctx, a, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// pinning directly should fail
+	err = p.Pin(ctx, a, false)
+	if err == nil {
+		t.Fatal("expected direct pin to fail")
+	}
+
+	// pinning recursively again should succeed
+	err = p.Pin(ctx, a, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFlush(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+
+	dserv := mdag.NewDAGService(bserv)
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, k := randNode()
+
+	p.PinWithMode(k, ipfspin.Recursive)
+	if err = p.Flush(ctx); err != nil {
+		t.Fatal(err)
+	}
+	assertPinned(t, p, k, "expected key to still be pinned")
+}
+
+func TestPinRecursiveFail(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+	dserv := mdag.NewDAGService(bserv)
+
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	a, _ := randNode()
+	b, _ := randNode()
+	err = a.AddNodeLink("child", b)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// NOTE: This isnt a time based test, we expect the pin to fail
+	mctx, cancel := context.WithTimeout(ctx, time.Millisecond)
+	defer cancel()
+
+	err = p.Pin(mctx, a, true)
+	if err == nil {
+		t.Fatal("should have failed to pin here")
+	}
+
+	err = dserv.Add(ctx, b)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = dserv.Add(ctx, a)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// this one is time based... but shouldnt cause any issues
+	mctx, cancel = context.WithTimeout(ctx, time.Second)
+	defer cancel()
+	err = p.Pin(mctx, a, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestPinUpdate(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+
+	dserv := mdag.NewDAGService(bserv)
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+	n1, c1 := randNode()
+	n2, c2 := randNode()
+	_, c3 := randNode()
+
+	if err = dserv.Add(ctx, n1); err != nil {
+		t.Fatal(err)
+	}
+	if err = dserv.Add(ctx, n2); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = p.Pin(ctx, n1, true); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = p.Update(ctx, c1, c2, true); err != nil {
+		t.Fatal(err)
+	}
+
+	assertPinned(t, p, c2, "c2 should be pinned now")
+	assertUnpinned(t, p, c1, "c1 should no longer be pinned")
+
+	if err = p.Update(ctx, c2, c1, false); err != nil {
+		t.Fatal(err)
+	}
+
+	// Test updating same pin that is already pinned.
+	if err = p.Update(ctx, c2, c2, true); err != nil {
+		t.Fatal(err)
+	}
+	// Check that pin is still pinned.
+	_, ok, err := p.IsPinned(ctx, c2)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !ok {
+		t.Fatal("c2 should still be pinned")
+	}
+
+	// Test updating same pin that is not pinned.
+	if err = p.Update(ctx, c3, c3, false); err == nil {
+		t.Fatal("expected error updating unpinned cid")
+	}
+	_, ok, err = p.IsPinned(ctx, c3)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ok {
+		t.Fatal("c3 should not be pinned")
+	}
+
+	assertPinned(t, p, c2, "c2 should be pinned still")
+	assertPinned(t, p, c1, "c1 should be pinned now")
+}
+
+func TestLoadDirty(t *testing.T) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	dstore := dssync.MutexWrap(ds.NewMapDatastore())
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+	dserv := mdag.NewDAGService(bserv)
+
+	p, err := New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	a, ak := randNode()
+	err = dserv.Add(ctx, a)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, bk := randNode()
+
+	err = p.Pin(ctx, a, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	cidAKey := ak.KeyString()
+	cidBKey := bk.KeyString()
+
+	// Corrupt index
+	cidRIndex := p.(*pinner).cidRIndex
+	cidRIndex.DeleteKey(ctx, cidAKey)
+	cidRIndex.Add(ctx, cidBKey, "not-a-pin-id")
+
+	// Verify dirty
+	data, err := dstore.Get(dirtyKey)
+	if err != nil {
+		t.Fatalf("could not read dirty flag: %v", err)
+	}
+	if data[0] != 1 {
+		t.Fatal("dirty flag not set")
+	}
+
+	has, err := cidRIndex.HasAny(ctx, cidAKey)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if has {
+		t.Fatal("index should be deleted")
+	}
+
+	// Create new pinner on same datastore that was never flushed.
+	p, err = New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Verify not dirty
+	data, err = dstore.Get(dirtyKey)
+	if err != nil {
+		t.Fatalf("could not read dirty flag: %v", err)
+	}
+	if data[0] != 0 {
+		t.Fatal("dirty flag is set")
+	}
+
+	// Verify index rebuilt
+	cidRIndex = p.(*pinner).cidRIndex
+	has, err = cidRIndex.HasAny(ctx, cidAKey)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !has {
+		t.Fatal("index should have been rebuilt")
+	}
+
+	has, err = cidRIndex.HasAny(ctx, cidBKey)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if has {
+		t.Fatal("index should have been removed by rebuild")
+	}
+}
+
+func TestEncodeDecodePin(t *testing.T) {
+	_, c := randNode()
+
+	pin := newPin(c, ipfspin.Recursive, "testpin")
+	pin.Metadata = make(map[string]interface{}, 2)
+	pin.Metadata["hello"] = "world"
+	pin.Metadata["foo"] = "bar"
+
+	encBytes, err := encodePin(pin)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	decPin, err := decodePin(pin.Id, encBytes)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if decPin.Id != pin.Id {
+		t.Errorf("wrong pin id: expect %q got %q", pin.Id, decPin.Id)
+	}
+	if decPin.Cid != pin.Cid {
+		t.Errorf("wrong pin cid: expect %q got %q", pin.Cid.String(), decPin.Cid.String())
+	}
+	if decPin.Mode != pin.Mode {
+		expect, _ := ipfspin.ModeToString(pin.Mode)
+		got, _ := ipfspin.ModeToString(decPin.Mode)
+		t.Errorf("wrong pin mode: expect %s got %s", expect, got)
+	}
+	if decPin.Name != pin.Name {
+		t.Errorf("wrong pin name: expect %q got %q", pin.Name, decPin.Name)
+	}
+	for key, val := range pin.Metadata {
+		dval, ok := decPin.Metadata[key]
+		if !ok {
+			t.Errorf("decoded pin missing metadata key %q", key)
+		}
+		if dval != val {
+			t.Errorf("wrong metadata value: expected %q got %q", val, dval)
+		}
+	}
+}
+
+func makeTree(ctx context.Context, aBranchLen int, dserv ipld.DAGService, p ipfspin.Pinner) (aKeys []cid.Cid, bk cid.Cid, ck cid.Cid, err error) {
+	if aBranchLen < 3 {
+		err = errors.New("set aBranchLen to at least 3")
+		return
+	}
+
+	aNodes := make([]*mdag.ProtoNode, aBranchLen)
+	aKeys = make([]cid.Cid, aBranchLen)
+	for i := 0; i < aBranchLen; i++ {
+		a, _ := randNode()
+		if i >= 1 {
+			if err = a.AddNodeLink("child", aNodes[i-1]); err != nil {
+				return
+			}
+		}
+
+		if err = dserv.Add(ctx, a); err != nil {
+			return
+		}
+		aNodes[i] = a
+		aKeys[i] = a.Cid()
+	}
+
+	// Pin last A recursively
+	if err = p.Pin(ctx, aNodes[aBranchLen-1], true); err != nil {
+		return
+	}
+
+	// Create node B and add A3 as child
+	b, _ := randNode()
+	if err = b.AddNodeLink("mychild", aNodes[3]); err != nil {
+		return
+	}
+
+	// Create C node
+	c, _ := randNode()
+	// Add A0 as child of C
+	if err = c.AddNodeLink("child", aNodes[0]); err != nil {
+		return
+	}
+
+	// Add C
+	if err = dserv.Add(ctx, c); err != nil {
+		return
+	}
+	ck = c.Cid()
+
+	// Add C to B and Add B
+	if err = b.AddNodeLink("myotherchild", c); err != nil {
+		return
+	}
+	if err = dserv.Add(ctx, b); err != nil {
+		return
+	}
+	bk = b.Cid()
+
+	// Pin C recursively
+	if err = p.Pin(ctx, c, true); err != nil {
+		return
+	}
+
+	// Pin B recursively
+	if err = p.Pin(ctx, b, true); err != nil {
+		return
+	}
+
+	if err = p.Flush(ctx); err != nil {
+		return
+	}
+
+	return
+}
+
+func makeNodes(count int, dserv ipld.DAGService) []ipld.Node {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nodes := make([]ipld.Node, count)
+	for i := 0; i < count; i++ {
+		n, _ := randNode()
+		err := dserv.Add(ctx, n)
+		if err != nil {
+			panic(err)
+		}
+		nodes[i] = n
+	}
+	return nodes
+}
+
+func pinNodes(nodes []ipld.Node, p ipfspin.Pinner, recursive bool) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	var err error
+
+	for i := range nodes {
+		err = p.Pin(ctx, nodes[i], recursive)
+		if err != nil {
+			panic(err)
+		}
+	}
+	err = p.Flush(ctx)
+	if err != nil {
+		panic(err)
+	}
+}
+
+func unpinNodes(nodes []ipld.Node, p ipfspin.Pinner) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	var err error
+
+	for i := range nodes {
+		err = p.Unpin(ctx, nodes[i].Cid(), true)
+		if err != nil {
+			panic(err)
+		}
+	}
+	err = p.Flush(ctx)
+	if err != nil {
+		panic(err)
+	}
+}
+
+type batchWrap struct {
+	ds.Datastore
+}
+
+func (d *batchWrap) Batch() (ds.Batch, error) {
+	return ds.NewBasicBatch(d), nil
+}
+
+func makeStore() (ds.Datastore, ipld.DAGService) {
+	ldstore, err := lds.NewDatastore("", nil)
+	if err != nil {
+		panic(err)
+	}
+	var dstore ds.Batching
+	dstore = &batchWrap{ldstore}
+
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+	dserv := mdag.NewDAGService(bserv)
+	return dstore, dserv
+}
+
+// BenchmarkLoadRebuild loads a pinner that has some number of saved pins, and
+// compares the load time when rebuilding indexes to loading without rebuilding
+// indexes.
+func BenchmarkLoadRebuild(b *testing.B) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	dstore, dserv := makeStore()
+	pinner, err := New(ctx, dstore, dserv)
+	if err != nil {
+		panic(err.Error())
+	}
+
+	nodes := makeNodes(4096, dserv)
+	pinNodes(nodes, pinner, true)
+
+	b.Run("RebuildTrue", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			dstore.Put(dirtyKey, []byte{1})
+
+			_, err = New(ctx, dstore, dserv)
+			if err != nil {
+				panic(err.Error())
+			}
+		}
+	})
+
+	b.Run("RebuildFalse", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			dstore.Put(dirtyKey, []byte{0})
+
+			_, err = New(ctx, dstore, dserv)
+			if err != nil {
+				panic(err.Error())
+			}
+		}
+	})
+}
+
+// BenchmarkNthPins shows the time it takes to create/save 1 pin when a number
+// of other pins already exist.  Each run in the series shows performance for
+// creating a pin in a larger number of existing pins.
+func BenchmarkNthPin(b *testing.B) {
+	dstore, dserv := makeStore()
+	pinner, err := New(context.Background(), dstore, dserv)
+	if err != nil {
+		panic(err.Error())
+	}
+	pinnerIPLD, err := ipldpinner.New(dstore, dserv, dserv)
+	if err != nil {
+		panic(err.Error())
+	}
+
+	for count := 1000; count <= 10000; count += 1000 {
+		b.Run(fmt.Sprint("PinDS-", count), func(b *testing.B) {
+			benchmarkNthPin(b, count, pinner, dserv)
+		})
+
+		b.Run(fmt.Sprint("PinIPLD-", count), func(b *testing.B) {
+			benchmarkNthPin(b, count, pinnerIPLD, dserv)
+		})
+	}
+}
+
+func benchmarkNthPin(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nodes := makeNodes(count, dserv)
+	pinNodes(nodes[:count-1], pinner, true)
+	b.ResetTimer()
+
+	which := count - 1
+	for i := 0; i < b.N; i++ {
+		// Pin the Nth node and Flush
+		err := pinner.Pin(ctx, nodes[which], true)
+		if err != nil {
+			panic(err)
+		}
+		err = pinner.Flush(ctx)
+		if err != nil {
+			panic(err)
+		}
+		// Unpin the nodes so that it can pinned next iter.
+		b.StopTimer()
+		err = pinner.Unpin(ctx, nodes[which].Cid(), true)
+		if err != nil {
+			panic(err)
+		}
+		err = pinner.Flush(ctx)
+		if err != nil {
+			panic(err)
+		}
+		b.StartTimer()
+	}
+}
+
+// BenchmarkNPins demonstrates creating individual pins.  Each run in the
+// series shows performance for a larger number of individual pins.
+func BenchmarkNPins(b *testing.B) {
+	for count := 128; count < 16386; count <<= 1 {
+		b.Run(fmt.Sprint("PinDS-", count), func(b *testing.B) {
+			dstore, dserv := makeStore()
+			pinner, err := New(context.Background(), dstore, dserv)
+			if err != nil {
+				panic(err.Error())
+			}
+			benchmarkNPins(b, count, pinner, dserv)
+		})
+
+		b.Run(fmt.Sprint("PinIPLD-", count), func(b *testing.B) {
+			dstore, dserv := makeStore()
+			pinner, err := ipldpinner.New(dstore, dserv, dserv)
+			if err != nil {
+				panic(err.Error())
+			}
+			benchmarkNPins(b, count, pinner, dserv)
+		})
+	}
+}
+
+func benchmarkNPins(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nodes := makeNodes(count, dserv)
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		// Pin all the nodes one at a time.
+		for j := range nodes {
+			err := pinner.Pin(ctx, nodes[j], true)
+			if err != nil {
+				panic(err)
+			}
+			err = pinner.Flush(ctx)
+			if err != nil {
+				panic(err)
+			}
+		}
+
+		// Unpin all nodes so that they can be pinned next iter.
+		b.StopTimer()
+		unpinNodes(nodes, pinner)
+		b.StartTimer()
+	}
+}
+
+// BenchmarkNUnpins demonstrates unpinning individual pins. Each run in the
+// series shows performance for a larger number of individual unpins.
+func BenchmarkNUnpins(b *testing.B) {
+	for count := 128; count < 16386; count <<= 1 {
+		b.Run(fmt.Sprint("UnpinDS-", count), func(b *testing.B) {
+			dstore, dserv := makeStore()
+			pinner, err := New(context.Background(), dstore, dserv)
+			if err != nil {
+				panic(err.Error())
+			}
+			benchmarkNUnpins(b, count, pinner, dserv)
+		})
+
+		b.Run(fmt.Sprint("UninIPLD-", count), func(b *testing.B) {
+			dstore, dserv := makeStore()
+			pinner, err := ipldpinner.New(dstore, dserv, dserv)
+			if err != nil {
+				panic(err.Error())
+			}
+			benchmarkNUnpins(b, count, pinner, dserv)
+		})
+	}
+}
+
+func benchmarkNUnpins(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	nodes := makeNodes(count, dserv)
+	pinNodes(nodes, pinner, true)
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		for j := range nodes {
+			// Unpin nodes one at a time.
+			err := pinner.Unpin(ctx, nodes[j].Cid(), true)
+			if err != nil {
+				panic(err)
+			}
+			err = pinner.Flush(ctx)
+			if err != nil {
+				panic(err)
+			}
+		}
+		// Pin all nodes so that they can be unpinned next iter.
+		b.StopTimer()
+		pinNodes(nodes, pinner, true)
+		b.StartTimer()
+	}
+}
+
+// BenchmarkPinAllSeries shows times to pin all nodes with only one Flush at
+// the end.
+func BenchmarkPinAll(b *testing.B) {
+	for count := 128; count < 16386; count <<= 1 {
+		b.Run(fmt.Sprint("PinAllDS-", count), func(b *testing.B) {
+			dstore, dserv := makeStore()
+			pinner, err := New(context.Background(), dstore, dserv)
+			if err != nil {
+				panic(err)
+			}
+			benchmarkPinAll(b, count, pinner, dserv)
+		})
+
+		b.Run(fmt.Sprint("PinAllIPLD-", count), func(b *testing.B) {
+			dstore, dserv := makeStore()
+			pinner, err := ipldpinner.New(dstore, dserv, dserv)
+			if err != nil {
+				panic(err.Error())
+			}
+			benchmarkPinAll(b, count, pinner, dserv)
+		})
+	}
+}
+
+func benchmarkPinAll(b *testing.B, count int, pinner ipfspin.Pinner, dserv ipld.DAGService) {
+	nodes := makeNodes(count, dserv)
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		pinNodes(nodes, pinner, true)
+
+		b.StopTimer()
+		unpinNodes(nodes, pinner)
+		b.StartTimer()
+	}
+}
diff --git a/go.mod b/go.mod
index 48479aefacac60df5d1fbf99eeb61842aac46fd0..5fb384a659aba9dd3b7c06f698c4843212b360db 100644
--- a/go.mod
+++ b/go.mod
@@ -4,13 +4,16 @@ go 1.13
 
 require (
 	github.com/gogo/protobuf v1.3.1
-	github.com/ipfs/go-blockservice v0.1.2
-	github.com/ipfs/go-cid v0.0.3
-	github.com/ipfs/go-datastore v0.3.0
-	github.com/ipfs/go-ipfs-blockstore v0.1.0
+	github.com/ipfs/go-blockservice v0.1.4
+	github.com/ipfs/go-cid v0.0.7
+	github.com/ipfs/go-datastore v0.4.5
+	github.com/ipfs/go-ds-leveldb v0.4.2
+	github.com/ipfs/go-ipfs-blockstore v0.1.4
 	github.com/ipfs/go-ipfs-exchange-offline v0.0.1
-	github.com/ipfs/go-ipfs-util v0.0.1
-	github.com/ipfs/go-ipld-format v0.0.2
-	github.com/ipfs/go-log v0.0.1
+	github.com/ipfs/go-ipfs-util v0.0.2
+	github.com/ipfs/go-ipld-format v0.2.0
+	github.com/ipfs/go-log v1.0.4
 	github.com/ipfs/go-merkledag v0.3.0
+	github.com/multiformats/go-multibase v0.0.3
+	github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1
 )
diff --git a/go.sum b/go.sum
index c6a12e4cb87256c4ebc9a6b5b632aaf8c876347a..fc7de89623f7348f4d92caa599178143d3146a4b 100644
--- a/go.sum
+++ b/go.sum
@@ -1,4 +1,6 @@
 github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/Kubuxu/go-os-helper v0.0.1 h1:EJiD2VUQyh5A9hWJLmc6iWg6yIcJ7jpBcwC8GMGXfDk=
 github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
 github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
@@ -40,6 +42,7 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg
 github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
@@ -49,6 +52,8 @@ github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfm
 github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
 github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
 github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
 github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
@@ -59,33 +64,40 @@ github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0r
 github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
 github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
 github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0=
-github.com/ipfs/go-bitswap v0.1.3 h1:jAl9Z/TYObpGeGATUemnOZ7RYb0F/kzNVlhcYZesz+0=
-github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps=
+github.com/ipfs/go-bitswap v0.1.8 h1:38X1mKXkiU6Nzw4TOSWD8eTVY5eX3slQunv3QEWfXKg=
+github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM=
 github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
 github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE=
 github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
 github.com/ipfs/go-blockservice v0.1.0 h1:dh2i7xjMbCtf0ZSMyQAF2qpV/pEEmM7yVpQ00+gik6U=
 github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
-github.com/ipfs/go-blockservice v0.1.2 h1:fqFeeu1EG0lGVrqUo+BVJv7LZV31I4ZsyNthCOMAJRc=
-github.com/ipfs/go-blockservice v0.1.2/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I=
+github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA=
+github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
 github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
 github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
-github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms=
-github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
+github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
+github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
+github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY=
+github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
 github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
 github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
-github.com/ipfs/go-datastore v0.1.0 h1:TOxI04l8CmO4zGtesENhzm4PwkFwJXY3rKiYaaMf9fI=
-github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
-github.com/ipfs/go-datastore v0.3.0 h1:9au0tYi/+n7xeUnGHG6davnS8x9hWbOzP/388Vx3CMs=
-github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
+github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
+github.com/ipfs/go-datastore v0.4.1 h1:W4ZfzyhNi3xmuU5dQhjfuRn/wFuqEE1KnOmmQiOevEY=
+github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
+github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
+github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg=
+github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs=
 github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
 github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
 github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
+github.com/ipfs/go-ds-leveldb v0.0.1 h1:Z0lsTFciec9qYsyngAw1f/czhRU35qBLR2vhavPFgqA=
 github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
+github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw=
+github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
 github.com/ipfs/go-ipfs-blockstore v0.0.1 h1:O9n3PbmTYZoNhkgkEyrXTznbmktIXif62xLX+8dPHzc=
 github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
-github.com/ipfs/go-ipfs-blockstore v0.1.0 h1:V1GZorHFUIB6YgTJQdq7mcaIpUfCM3fCyVi+MTo9O88=
-github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
+github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ=
+github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=
 github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=
 github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
 github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
@@ -93,6 +105,8 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I
 github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
 github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU=
 github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo=
+github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc=
+github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs=
 github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM=
 github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM=
 github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew=
@@ -103,13 +117,21 @@ github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRD
 github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY=
 github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
 github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
+github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
+github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
 github.com/ipfs/go-ipld-cbor v0.0.2 h1:amzFztBQQQ69UA5+f7JRfoXF/z2l//MGfEDHVkS20+s=
 github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
 github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms=
 github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs=
 github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
+github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA=
+github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs=
 github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
 github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
+github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY=
+github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs=
+github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU=
+github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
 github.com/ipfs/go-merkledag v0.3.0 h1:1bXv/ZRPZLVdij/a33CkXMVdxUdred9sz4xyph+0ls0=
 github.com/ipfs/go-merkledag v0.3.0/go.mod h1:4pymaZLhSLNVuiCITYrpViD6vmfZ/Ws4n/L9tfNv3S4=
 github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
@@ -131,6 +153,8 @@ github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod
 github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
 github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=
 github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
+github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
+github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
 github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
 github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
 github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
@@ -143,6 +167,8 @@ github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muM
 github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
 github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -205,8 +231,8 @@ github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTW
 github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0=
 github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU=
 github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
-github.com/libp2p/go-msgio v0.0.3 h1:VsOlWispTivSsOMg70e0W77y6oiSBSRCyP6URrWvE04=
-github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
+github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA=
+github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
 github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI=
 github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI=
 github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw=
@@ -238,12 +264,18 @@ github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+
 github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
 github.com/minio/sha256-simd v0.1.0 h1:U41/2erhAKcmSI14xh/ZTUdBPOzDOIfS93ibzUSl8KM=
 github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
 github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
 github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
 github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78=
 github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
+github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
 github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
 github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
+github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
+github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
 github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
 github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
 github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4=
@@ -257,11 +289,18 @@ github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi
 github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU=
 github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=
 github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
+github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
+github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
 github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
 github.com/multiformats/go-multihash v0.0.5 h1:1wxmCvTXAifAepIMyF39vZinRw5sbqjPs/UIi93+uik=
 github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
+github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
+github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc=
+github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
 github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ=
 github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
+github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg=
+github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
@@ -278,6 +317,9 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE=
 github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
+github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI=
+github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA=
@@ -291,6 +333,8 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
 github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
 github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc=
@@ -306,24 +350,40 @@ github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvX
 github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4=
 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds=
 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI=
+go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
+go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
 golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
 golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
 golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
 golang.org/x/net v0.0.0-20190611141213-3f473d35a33a h1:+KkCgOMgnKSgenxTBoiwkMqTiouMIy/3o8RLdmSbGoY=
 golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -340,12 +400,20 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
@@ -353,3 +421,5 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/ipldpinner/pin.go b/ipldpinner/pin.go
new file mode 100644
index 0000000000000000000000000000000000000000..d0824b349b3869c36ca18c6dac9cecbb2a03a509
--- /dev/null
+++ b/ipldpinner/pin.go
@@ -0,0 +1,528 @@
+// Package ipldpinner implements structures and methods to keep track of
+// which objects a user wants to keep stored locally.  This implementation
+// stores pin information in a mdag structure.
+package ipldpinner
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+
+	cid "github.com/ipfs/go-cid"
+	ds "github.com/ipfs/go-datastore"
+	ipld "github.com/ipfs/go-ipld-format"
+	logging "github.com/ipfs/go-log"
+	mdag "github.com/ipfs/go-merkledag"
+	"github.com/ipfs/go-merkledag/dagutils"
+
+	ipfspinner "github.com/ipfs/go-ipfs-pinner"
+)
+
+const loadTimeout = 5 * time.Second
+
+var log = logging.Logger("pin")
+
+var pinDatastoreKey = ds.NewKey("/local/pins")
+
+var emptyKey cid.Cid
+
+var linkDirect, linkRecursive, linkInternal string
+
+func init() {
+	e, err := cid.Decode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
+	if err != nil {
+		log.Error("failed to decode empty key constant")
+		os.Exit(1)
+	}
+	emptyKey = e
+
+	directStr, ok := ipfspinner.ModeToString(ipfspinner.Direct)
+	if !ok {
+		panic("could not find Direct pin enum")
+	}
+	linkDirect = directStr
+
+	recursiveStr, ok := ipfspinner.ModeToString(ipfspinner.Recursive)
+	if !ok {
+		panic("could not find Recursive pin enum")
+	}
+	linkRecursive = recursiveStr
+
+	internalStr, ok := ipfspinner.ModeToString(ipfspinner.Internal)
+	if !ok {
+		panic("could not find Internal pin enum")
+	}
+	linkInternal = internalStr
+}
+
+// pinner implements the Pinner interface
+type pinner struct {
+	lock       sync.RWMutex
+	recursePin *cid.Set
+	directPin  *cid.Set
+
+	// Track the keys used for storing the pinning state, so gc does
+	// not delete them.
+	internalPin *cid.Set
+	dserv       ipld.DAGService
+	internal    ipld.DAGService // dagservice used to store internal objects
+	dstore      ds.Datastore
+}
+
+var _ ipfspinner.Pinner = (*pinner)(nil)
+
+type syncDAGService interface {
+	ipld.DAGService
+	Sync() error
+}
+
+// New creates a new pinner using the given datastore as a backend, and loads
+// the pinner's keysets from the datastore
+func New(dstore ds.Datastore, dserv, internal ipld.DAGService) (*pinner, error) {
+	rootKey, err := dstore.Get(pinDatastoreKey)
+	if err != nil {
+		if err == ds.ErrNotFound {
+			return &pinner{
+				recursePin:  cid.NewSet(),
+				directPin:   cid.NewSet(),
+				internalPin: cid.NewSet(),
+				dserv:       dserv,
+				internal:    internal,
+				dstore:      dstore,
+			}, nil
+		}
+		return nil, err
+	}
+	rootCid, err := cid.Cast(rootKey)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, cancel := context.WithTimeout(context.TODO(), loadTimeout)
+	defer cancel()
+
+	root, err := internal.Get(ctx, rootCid)
+	if err != nil {
+		return nil, fmt.Errorf("cannot find pinning root object: %v", err)
+	}
+
+	rootpb, ok := root.(*mdag.ProtoNode)
+	if !ok {
+		return nil, mdag.ErrNotProtobuf
+	}
+
+	internalset := cid.NewSet()
+	internalset.Add(rootCid)
+	recordInternal := internalset.Add
+
+	// load recursive set
+	recurseKeys, err := loadSet(ctx, internal, rootpb, linkRecursive, recordInternal)
+	if err != nil {
+		return nil, fmt.Errorf("cannot load recursive pins: %v", err)
+	}
+
+	// load direct set
+	directKeys, err := loadSet(ctx, internal, rootpb, linkDirect, recordInternal)
+	if err != nil {
+		return nil, fmt.Errorf("cannot load direct pins: %v", err)
+	}
+
+	return &pinner{
+		// assign pinsets
+		recursePin:  cidSetWithValues(recurseKeys),
+		directPin:   cidSetWithValues(directKeys),
+		internalPin: internalset,
+		// assign services
+		dserv:    dserv,
+		dstore:   dstore,
+		internal: internal,
+	}, nil
+}
+
+// Pin the given node, optionally recursive
+func (p *pinner) Pin(ctx context.Context, node ipld.Node, recurse bool) error {
+	err := p.dserv.Add(ctx, node)
+	if err != nil {
+		return err
+	}
+
+	c := node.Cid()
+
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	if recurse {
+		if p.recursePin.Has(c) {
+			return nil
+		}
+
+		p.lock.Unlock()
+		// temporary unlock to fetch the entire graph
+		err := mdag.FetchGraph(ctx, c, p.dserv)
+		p.lock.Lock()
+		if err != nil {
+			return err
+		}
+
+		if p.recursePin.Has(c) {
+			return nil
+		}
+
+		if p.directPin.Has(c) {
+			p.directPin.Remove(c)
+		}
+
+		p.recursePin.Add(c)
+	} else {
+		if p.recursePin.Has(c) {
+			return fmt.Errorf("%s already pinned recursively", c.String())
+		}
+
+		p.directPin.Add(c)
+	}
+	return nil
+}
+
+// ErrNotPinned is returned when trying to unpin items which are not pinned.
+var ErrNotPinned = fmt.Errorf("not pinned or pinned indirectly")
+
+// Unpin a given key
+func (p *pinner) Unpin(ctx context.Context, c cid.Cid, recursive bool) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+	if p.recursePin.Has(c) {
+		if !recursive {
+			return fmt.Errorf("%s is pinned recursively", c)
+		}
+		p.recursePin.Remove(c)
+		return nil
+	}
+	if p.directPin.Has(c) {
+		p.directPin.Remove(c)
+		return nil
+	}
+	return ErrNotPinned
+}
+
+func (p *pinner) isInternalPin(c cid.Cid) bool {
+	return p.internalPin.Has(c)
+}
+
+// IsPinned returns whether or not the given key is pinned
+// and an explanation of why its pinned
+func (p *pinner) IsPinned(ctx context.Context, c cid.Cid) (string, bool, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+	return p.isPinnedWithType(ctx, c, ipfspinner.Any)
+}
+
+// IsPinnedWithType returns whether or not the given cid is pinned with the
+// given pin type, as well as returning the type of pin its pinned with.
+func (p *pinner) IsPinnedWithType(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (string, bool, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+	return p.isPinnedWithType(ctx, c, mode)
+}
+
+// isPinnedWithType is the implementation of IsPinnedWithType that does not lock.
+// intended for use by other pinned methods that already take locks
+func (p *pinner) isPinnedWithType(ctx context.Context, c cid.Cid, mode ipfspinner.Mode) (string, bool, error) {
+	switch mode {
+	case ipfspinner.Any, ipfspinner.Direct, ipfspinner.Indirect, ipfspinner.Recursive, ipfspinner.Internal:
+	default:
+		err := fmt.Errorf("invalid Pin Mode '%d', must be one of {%d, %d, %d, %d, %d}",
+			mode, ipfspinner.Direct, ipfspinner.Indirect, ipfspinner.Recursive, ipfspinner.Internal, ipfspinner.Any)
+		return "", false, err
+	}
+	if (mode == ipfspinner.Recursive || mode == ipfspinner.Any) && p.recursePin.Has(c) {
+		return linkRecursive, true, nil
+	}
+	if mode == ipfspinner.Recursive {
+		return "", false, nil
+	}
+
+	if (mode == ipfspinner.Direct || mode == ipfspinner.Any) && p.directPin.Has(c) {
+		return linkDirect, true, nil
+	}
+	if mode == ipfspinner.Direct {
+		return "", false, nil
+	}
+
+	if (mode == ipfspinner.Internal || mode == ipfspinner.Any) && p.isInternalPin(c) {
+		return linkInternal, true, nil
+	}
+	if mode == ipfspinner.Internal {
+		return "", false, nil
+	}
+
+	// Default is Indirect
+	visitedSet := cid.NewSet()
+	for _, rc := range p.recursePin.Keys() {
+		has, err := hasChild(ctx, p.dserv, rc, c, visitedSet.Visit)
+		if err != nil {
+			return "", false, err
+		}
+		if has {
+			return rc.String(), true, nil
+		}
+	}
+	return "", false, nil
+}
+
+// CheckIfPinned Checks if a set of keys are pinned, more efficient than
+// calling IsPinned for each key, returns the pinned status of cid(s)
+func (p *pinner) CheckIfPinned(ctx context.Context, cids ...cid.Cid) ([]ipfspinner.Pinned, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+	pinned := make([]ipfspinner.Pinned, 0, len(cids))
+	toCheck := cid.NewSet()
+
+	// First check for non-Indirect pins directly
+	for _, c := range cids {
+		if p.recursePin.Has(c) {
+			pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Recursive})
+		} else if p.directPin.Has(c) {
+			pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Direct})
+		} else if p.isInternalPin(c) {
+			pinned = append(pinned, ipfspinner.Pinned{Key: c, Mode: ipfspinner.Internal})
+		} else {
+			toCheck.Add(c)
+		}
+	}
+
+	// Now walk all recursive pins to check for indirect pins
+	var checkChildren func(cid.Cid, cid.Cid) error
+	checkChildren = func(rk, parentKey cid.Cid) error {
+		links, err := ipld.GetLinks(ctx, p.dserv, parentKey)
+		if err != nil {
+			return err
+		}
+		for _, lnk := range links {
+			c := lnk.Cid
+
+			if toCheck.Has(c) {
+				pinned = append(pinned,
+					ipfspinner.Pinned{Key: c, Mode: ipfspinner.Indirect, Via: rk})
+				toCheck.Remove(c)
+			}
+
+			err := checkChildren(rk, c)
+			if err != nil {
+				return err
+			}
+
+			if toCheck.Len() == 0 {
+				return nil
+			}
+		}
+		return nil
+	}
+
+	for _, rk := range p.recursePin.Keys() {
+		err := checkChildren(rk, rk)
+		if err != nil {
+			return nil, err
+		}
+		if toCheck.Len() == 0 {
+			break
+		}
+	}
+
+	// Anything left in toCheck is not pinned
+	for _, k := range toCheck.Keys() {
+		pinned = append(pinned, ipfspinner.Pinned{Key: k, Mode: ipfspinner.NotPinned})
+	}
+
+	return pinned, nil
+}
+
+// RemovePinWithMode is for manually editing the pin structure.
+// Use with care! If used improperly, garbage collection may not
+// be successful.
+func (p *pinner) RemovePinWithMode(c cid.Cid, mode ipfspinner.Mode) {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+	switch mode {
+	case ipfspinner.Direct:
+		p.directPin.Remove(c)
+	case ipfspinner.Recursive:
+		p.recursePin.Remove(c)
+	default:
+		// programmer error, panic OK
+		panic("unrecognized pin type")
+	}
+}
+
+func cidSetWithValues(cids []cid.Cid) *cid.Set {
+	out := cid.NewSet()
+	for _, c := range cids {
+		out.Add(c)
+	}
+	return out
+}
+
+// DirectKeys returns a slice containing the directly pinned keys
+func (p *pinner) DirectKeys(ctx context.Context) ([]cid.Cid, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	return p.directPin.Keys(), nil
+}
+
+// RecursiveKeys returns a slice containing the recursively pinned keys
+func (p *pinner) RecursiveKeys(ctx context.Context) ([]cid.Cid, error) {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	return p.recursePin.Keys(), nil
+}
+
+// Update updates a recursive pin from one cid to another
+// this is more efficient than simply pinning the new one and unpinning the
+// old one
+func (p *pinner) Update(ctx context.Context, from, to cid.Cid, unpin bool) error {
+	if from == to {
+		// Nothing to do. Don't remove this check or we'll end up
+		// _removing_ the pin.
+		//
+		// See #6648
+		return nil
+	}
+
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	if !p.recursePin.Has(from) {
+		return fmt.Errorf("'from' cid was not recursively pinned already")
+	}
+
+	// Temporarily unlock while we fetch the differences.
+	p.lock.Unlock()
+	err := dagutils.DiffEnumerate(ctx, p.dserv, from, to)
+	p.lock.Lock()
+
+	if err != nil {
+		return err
+	}
+
+	p.recursePin.Add(to)
+	if unpin {
+		p.recursePin.Remove(from)
+	}
+	return nil
+}
+
+// Flush encodes and writes pinner keysets to the datastore
+func (p *pinner) Flush(ctx context.Context) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	internalset := cid.NewSet()
+	recordInternal := internalset.Add
+
+	root := &mdag.ProtoNode{}
+	{
+		n, err := storeSet(ctx, p.internal, p.directPin.Keys(), recordInternal)
+		if err != nil {
+			return err
+		}
+		if err := root.AddNodeLink(linkDirect, n); err != nil {
+			return err
+		}
+	}
+
+	{
+		n, err := storeSet(ctx, p.internal, p.recursePin.Keys(), recordInternal)
+		if err != nil {
+			return err
+		}
+		if err := root.AddNodeLink(linkRecursive, n); err != nil {
+			return err
+		}
+	}
+
+	// add the empty node, its referenced by the pin sets but never created
+	err := p.internal.Add(ctx, new(mdag.ProtoNode))
+	if err != nil {
+		return err
+	}
+
+	err = p.internal.Add(ctx, root)
+	if err != nil {
+		return err
+	}
+
+	k := root.Cid()
+
+	internalset.Add(k)
+
+	if syncDServ, ok := p.dserv.(syncDAGService); ok {
+		if err := syncDServ.Sync(); err != nil {
+			return fmt.Errorf("cannot sync pinned data: %v", err)
+		}
+	}
+
+	if syncInternal, ok := p.internal.(syncDAGService); ok {
+		if err := syncInternal.Sync(); err != nil {
+			return fmt.Errorf("cannot sync pinning data: %v", err)
+		}
+	}
+
+	if err := p.dstore.Put(pinDatastoreKey, k.Bytes()); err != nil {
+		return fmt.Errorf("cannot store pin state: %v", err)
+	}
+	if err := p.dstore.Sync(pinDatastoreKey); err != nil {
+		return fmt.Errorf("cannot sync pin state: %v", err)
+	}
+	p.internalPin = internalset
+	return nil
+}
+
+// InternalPins returns all cids kept pinned for the internal state of the
+// pinner
+func (p *pinner) InternalPins(ctx context.Context) ([]cid.Cid, error) {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+	return p.internalPin.Keys(), nil
+}
+
+// PinWithMode allows the user to have fine grained control over pin
+// counts
+func (p *pinner) PinWithMode(c cid.Cid, mode ipfspinner.Mode) {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+	switch mode {
+	case ipfspinner.Recursive:
+		p.recursePin.Add(c)
+	case ipfspinner.Direct:
+		p.directPin.Add(c)
+	}
+}
+
+// hasChild recursively looks for a Cid among the children of a root Cid.
+// The visit function can be used to shortcut already-visited branches.
+func hasChild(ctx context.Context, ng ipld.NodeGetter, root cid.Cid, child cid.Cid, visit func(cid.Cid) bool) (bool, error) {
+	links, err := ipld.GetLinks(ctx, ng, root)
+	if err != nil {
+		return false, err
+	}
+	for _, lnk := range links {
+		c := lnk.Cid
+		if lnk.Cid.Equals(child) {
+			return true, nil
+		}
+		if visit(c) {
+			has, err := hasChild(ctx, ng, c, child, visit)
+			if err != nil {
+				return false, err
+			}
+
+			if has {
+				return has, nil
+			}
+		}
+	}
+	return false, nil
+}
diff --git a/pin_test.go b/ipldpinner/pin_test.go
similarity index 90%
rename from pin_test.go
rename to ipldpinner/pin_test.go
index e477ac07fe023e2984d0aaeab68204540d141c13..e193aa96c34d3017707737c398cb536181c3f1c4 100644
--- a/pin_test.go
+++ b/ipldpinner/pin_test.go
@@ -1,4 +1,4 @@
-package pin
+package ipldpinner
 
 import (
 	"context"
@@ -14,6 +14,7 @@ import (
 	dssync "github.com/ipfs/go-datastore/sync"
 	blockstore "github.com/ipfs/go-ipfs-blockstore"
 	offline "github.com/ipfs/go-ipfs-exchange-offline"
+	pin "github.com/ipfs/go-ipfs-pinner"
 	util "github.com/ipfs/go-ipfs-util"
 )
 
@@ -30,7 +31,7 @@ func randNode() (*mdag.ProtoNode, cid.Cid) {
 	return nd, k
 }
 
-func assertPinned(t *testing.T, p Pinner, c cid.Cid, failmsg string) {
+func assertPinned(t *testing.T, p pin.Pinner, c cid.Cid, failmsg string) {
 	_, pinned, err := p.IsPinned(context.Background(), c)
 	if err != nil {
 		t.Fatal(err)
@@ -41,7 +42,7 @@ func assertPinned(t *testing.T, p Pinner, c cid.Cid, failmsg string) {
 	}
 }
 
-func assertUnpinned(t *testing.T, p Pinner, c cid.Cid, failmsg string) {
+func assertUnpinned(t *testing.T, p pin.Pinner, c cid.Cid, failmsg string) {
 	_, pinned, err := p.IsPinned(context.Background(), c)
 	if err != nil {
 		t.Fatal(err)
@@ -62,10 +63,13 @@ func TestPinnerBasic(t *testing.T) {
 	dserv := mdag.NewDAGService(bserv)
 
 	// TODO does pinner need to share datastore with blockservice?
-	p := NewPinner(dstore, dserv, dserv)
+	p, err := New(dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	a, ak := randNode()
-	err := dserv.Add(ctx, a)
+	err = dserv.Add(ctx, a)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -151,7 +155,7 @@ func TestPinnerBasic(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	np, err := LoadPinner(dstore, dserv, dserv)
+	np, err := New(dstore, dserv, dserv)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -188,7 +192,10 @@ func TestIsPinnedLookup(t *testing.T) {
 	dserv := mdag.NewDAGService(bserv)
 
 	// TODO does pinner need to share datastore with blockservice?
-	p := NewPinner(dstore, dserv, dserv)
+	p, err := New(dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	aNodes := make([]*mdag.ProtoNode, aBranchLen)
 	aKeys := make([]cid.Cid, aBranchLen)
@@ -229,7 +236,7 @@ func TestIsPinnedLookup(t *testing.T) {
 	}
 
 	// Add C
-	err := dserv.Add(ctx, c)
+	err = dserv.Add(ctx, c)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -289,11 +296,13 @@ func TestDuplicateSemantics(t *testing.T) {
 
 	dserv := mdag.NewDAGService(bserv)
 
-	// TODO does pinner need to share datastore with blockservice?
-	p := NewPinner(dstore, dserv, dserv)
+	p, err := New(dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	a, _ := randNode()
-	err := dserv.Add(ctx, a)
+	err = dserv.Add(ctx, a)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -323,10 +332,13 @@ func TestFlush(t *testing.T) {
 	bserv := bs.New(bstore, offline.Exchange(bstore))
 
 	dserv := mdag.NewDAGService(bserv)
-	p := NewPinner(dstore, dserv, dserv)
+	p, err := New(dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
 	_, k := randNode()
 
-	p.PinWithMode(k, Recursive)
+	p.PinWithMode(k, pin.Recursive)
 	if err := p.Flush(context.Background()); err != nil {
 		t.Fatal(err)
 	}
@@ -340,11 +352,14 @@ func TestPinRecursiveFail(t *testing.T) {
 	bserv := bs.New(bstore, offline.Exchange(bstore))
 	dserv := mdag.NewDAGService(bserv)
 
-	p := NewPinner(dstore, dserv, dserv)
+	p, err := New(dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	a, _ := randNode()
 	b, _ := randNode()
-	err := a.AddNodeLink("child", b)
+	err = a.AddNodeLink("child", b)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -385,7 +400,10 @@ func TestPinUpdate(t *testing.T) {
 	bserv := bs.New(bstore, offline.Exchange(bstore))
 
 	dserv := mdag.NewDAGService(bserv)
-	p := NewPinner(dstore, dserv, dserv)
+	p, err := New(dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
 	n1, c1 := randNode()
 	n2, c2 := randNode()
 
diff --git a/set.go b/ipldpinner/set.go
similarity index 94%
rename from set.go
rename to ipldpinner/set.go
index ca437974ff3b135d02959582401a9c089b5a2f2c..2fb931f93b4356bfa013290c02dede8ca024e6db 100644
--- a/set.go
+++ b/ipldpinner/set.go
@@ -1,4 +1,4 @@
-package pin
+package ipldpinner
 
 import (
 	"bytes"
@@ -55,9 +55,14 @@ func (s sortByHash) Swap(a, b int) {
 }
 
 func storeItems(ctx context.Context, dag ipld.DAGService, estimatedLen uint64, depth uint32, iter itemIterator, internalKeys keyObserver) (*merkledag.ProtoNode, error) {
-	links := make([]*ipld.Link, 0, defaultFanout+maxItems)
+	// Each node wastes up to defaultFanout in empty links.
+	var leafLinks uint64
+	if estimatedLen < maxItems {
+		leafLinks = estimatedLen
+	}
+	links := make([]*ipld.Link, defaultFanout, defaultFanout+leafLinks)
 	for i := 0; i < defaultFanout; i++ {
-		links = append(links, &ipld.Link{Cid: emptyKey})
+		links[i] = &ipld.Link{Cid: emptyKey}
 	}
 
 	// add emptyKey to our set of internal pinset objects
@@ -97,7 +102,7 @@ func storeItems(ctx context.Context, dag ipld.DAGService, estimatedLen uint64, d
 		sort.Stable(s)
 	}
 
-	hashed := make([][]cid.Cid, defaultFanout)
+	var hashed [][]cid.Cid
 	for {
 		// This loop essentially enumerates every single item in the set
 		// and maps them all into a set of buckets. Each bucket will be recursively
@@ -116,6 +121,9 @@ func storeItems(ctx context.Context, dag ipld.DAGService, estimatedLen uint64, d
 		if !ok {
 			break
 		}
+		if hashed == nil {
+			hashed = make([][]cid.Cid, defaultFanout)
+		}
 		h := hash(depth, k) % defaultFanout
 		hashed[h] = append(hashed[h], k)
 	}
diff --git a/set_test.go b/ipldpinner/set_test.go
similarity index 99%
rename from set_test.go
rename to ipldpinner/set_test.go
index 61a3118b2627c94b370bb3aec98b14a4c7ec97fa..0f32e6b5e362ad340a3b79227dd57c0c3b889e5b 100644
--- a/set_test.go
+++ b/ipldpinner/set_test.go
@@ -1,4 +1,4 @@
-package pin
+package ipldpinner
 
 import (
 	"context"
diff --git a/pin.go b/pin.go
index aa74c51854509cba3f63f732e72194d81e19b158..7e1d886020a3e3810eed763086f2a57e08a123d7 100644
--- a/pin.go
+++ b/pin.go
@@ -5,33 +5,14 @@ package pin
 import (
 	"context"
 	"fmt"
-	"os"
-	"sync"
-	"time"
 
 	cid "github.com/ipfs/go-cid"
-	ds "github.com/ipfs/go-datastore"
 	ipld "github.com/ipfs/go-ipld-format"
 	logging "github.com/ipfs/go-log"
-	mdag "github.com/ipfs/go-merkledag"
-	"github.com/ipfs/go-merkledag/dagutils"
 )
 
 var log = logging.Logger("pin")
 
-var pinDatastoreKey = ds.NewKey("/local/pins")
-
-var emptyKey cid.Cid
-
-func init() {
-	e, err := cid.Decode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
-	if err != nil {
-		log.Error("failed to decode empty key constant")
-		os.Exit(1)
-	}
-	emptyKey = e
-}
-
 const (
 	linkRecursive = "recursive"
 	linkDirect    = "direct"
@@ -177,482 +158,3 @@ func (p Pinned) String() string {
 		return fmt.Sprintf("pinned: %s", modeStr)
 	}
 }
-
-// pinner implements the Pinner interface
-type pinner struct {
-	lock       sync.RWMutex
-	recursePin *cid.Set
-	directPin  *cid.Set
-
-	// Track the keys used for storing the pinning state, so gc does
-	// not delete them.
-	internalPin *cid.Set
-	dserv       ipld.DAGService
-	internal    ipld.DAGService // dagservice used to store internal objects
-	dstore      ds.Datastore
-}
-
-type syncDAGService interface {
-	ipld.DAGService
-	Sync() error
-}
-
-// NewPinner creates a new pinner using the given datastore as a backend
-func NewPinner(dstore ds.Datastore, serv, internal ipld.DAGService) Pinner {
-
-	rcset := cid.NewSet()
-	dirset := cid.NewSet()
-
-	return &pinner{
-		recursePin:  rcset,
-		directPin:   dirset,
-		dserv:       serv,
-		dstore:      dstore,
-		internal:    internal,
-		internalPin: cid.NewSet(),
-	}
-}
-
-// Pin the given node, optionally recursive
-func (p *pinner) Pin(ctx context.Context, node ipld.Node, recurse bool) error {
-	err := p.dserv.Add(ctx, node)
-	if err != nil {
-		return err
-	}
-
-	c := node.Cid()
-
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if recurse {
-		if p.recursePin.Has(c) {
-			return nil
-		}
-
-		p.lock.Unlock()
-		// temporary unlock to fetch the entire graph
-		err := mdag.FetchGraph(ctx, c, p.dserv)
-		p.lock.Lock()
-		if err != nil {
-			return err
-		}
-
-		if p.recursePin.Has(c) {
-			return nil
-		}
-
-		if p.directPin.Has(c) {
-			p.directPin.Remove(c)
-		}
-
-		p.recursePin.Add(c)
-	} else {
-		if p.recursePin.Has(c) {
-			return fmt.Errorf("%s already pinned recursively", c.String())
-		}
-
-		p.directPin.Add(c)
-	}
-	return nil
-}
-
-// ErrNotPinned is returned when trying to unpin items which are not pinned.
-var ErrNotPinned = fmt.Errorf("not pinned or pinned indirectly")
-
-// Unpin a given key
-func (p *pinner) Unpin(ctx context.Context, c cid.Cid, recursive bool) error {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	if p.recursePin.Has(c) {
-		if !recursive {
-			return fmt.Errorf("%s is pinned recursively", c)
-		}
-		p.recursePin.Remove(c)
-		return nil
-	}
-	if p.directPin.Has(c) {
-		p.directPin.Remove(c)
-		return nil
-	}
-	return ErrNotPinned
-}
-
-func (p *pinner) isInternalPin(c cid.Cid) bool {
-	return p.internalPin.Has(c)
-}
-
-// IsPinned returns whether or not the given key is pinned
-// and an explanation of why its pinned
-func (p *pinner) IsPinned(ctx context.Context, c cid.Cid) (string, bool, error) {
-	p.lock.RLock()
-	defer p.lock.RUnlock()
-	return p.isPinnedWithType(ctx, c, Any)
-}
-
-// IsPinnedWithType returns whether or not the given cid is pinned with the
-// given pin type, as well as returning the type of pin its pinned with.
-func (p *pinner) IsPinnedWithType(ctx context.Context, c cid.Cid, mode Mode) (string, bool, error) {
-	p.lock.RLock()
-	defer p.lock.RUnlock()
-	return p.isPinnedWithType(ctx, c, mode)
-}
-
-// isPinnedWithType is the implementation of IsPinnedWithType that does not lock.
-// intended for use by other pinned methods that already take locks
-func (p *pinner) isPinnedWithType(ctx context.Context, c cid.Cid, mode Mode) (string, bool, error) {
-	switch mode {
-	case Any, Direct, Indirect, Recursive, Internal:
-	default:
-		err := fmt.Errorf("invalid Pin Mode '%d', must be one of {%d, %d, %d, %d, %d}",
-			mode, Direct, Indirect, Recursive, Internal, Any)
-		return "", false, err
-	}
-	if (mode == Recursive || mode == Any) && p.recursePin.Has(c) {
-		return linkRecursive, true, nil
-	}
-	if mode == Recursive {
-		return "", false, nil
-	}
-
-	if (mode == Direct || mode == Any) && p.directPin.Has(c) {
-		return linkDirect, true, nil
-	}
-	if mode == Direct {
-		return "", false, nil
-	}
-
-	if (mode == Internal || mode == Any) && p.isInternalPin(c) {
-		return linkInternal, true, nil
-	}
-	if mode == Internal {
-		return "", false, nil
-	}
-
-	// Default is Indirect
-	visitedSet := cid.NewSet()
-	for _, rc := range p.recursePin.Keys() {
-		has, err := hasChild(ctx, p.dserv, rc, c, visitedSet.Visit)
-		if err != nil {
-			return "", false, err
-		}
-		if has {
-			return rc.String(), true, nil
-		}
-	}
-	return "", false, nil
-}
-
-// CheckIfPinned Checks if a set of keys are pinned, more efficient than
-// calling IsPinned for each key, returns the pinned status of cid(s)
-func (p *pinner) CheckIfPinned(ctx context.Context, cids ...cid.Cid) ([]Pinned, error) {
-	p.lock.RLock()
-	defer p.lock.RUnlock()
-	pinned := make([]Pinned, 0, len(cids))
-	toCheck := cid.NewSet()
-
-	// First check for non-Indirect pins directly
-	for _, c := range cids {
-		if p.recursePin.Has(c) {
-			pinned = append(pinned, Pinned{Key: c, Mode: Recursive})
-		} else if p.directPin.Has(c) {
-			pinned = append(pinned, Pinned{Key: c, Mode: Direct})
-		} else if p.isInternalPin(c) {
-			pinned = append(pinned, Pinned{Key: c, Mode: Internal})
-		} else {
-			toCheck.Add(c)
-		}
-	}
-
-	// Now walk all recursive pins to check for indirect pins
-	var checkChildren func(cid.Cid, cid.Cid) error
-	checkChildren = func(rk, parentKey cid.Cid) error {
-		links, err := ipld.GetLinks(ctx, p.dserv, parentKey)
-		if err != nil {
-			return err
-		}
-		for _, lnk := range links {
-			c := lnk.Cid
-
-			if toCheck.Has(c) {
-				pinned = append(pinned,
-					Pinned{Key: c, Mode: Indirect, Via: rk})
-				toCheck.Remove(c)
-			}
-
-			err := checkChildren(rk, c)
-			if err != nil {
-				return err
-			}
-
-			if toCheck.Len() == 0 {
-				return nil
-			}
-		}
-		return nil
-	}
-
-	for _, rk := range p.recursePin.Keys() {
-		err := checkChildren(rk, rk)
-		if err != nil {
-			return nil, err
-		}
-		if toCheck.Len() == 0 {
-			break
-		}
-	}
-
-	// Anything left in toCheck is not pinned
-	for _, k := range toCheck.Keys() {
-		pinned = append(pinned, Pinned{Key: k, Mode: NotPinned})
-	}
-
-	return pinned, nil
-}
-
-// RemovePinWithMode is for manually editing the pin structure.
-// Use with care! If used improperly, garbage collection may not
-// be successful.
-func (p *pinner) RemovePinWithMode(c cid.Cid, mode Mode) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	switch mode {
-	case Direct:
-		p.directPin.Remove(c)
-	case Recursive:
-		p.recursePin.Remove(c)
-	default:
-		// programmer error, panic OK
-		panic("unrecognized pin type")
-	}
-}
-
-func cidSetWithValues(cids []cid.Cid) *cid.Set {
-	out := cid.NewSet()
-	for _, c := range cids {
-		out.Add(c)
-	}
-	return out
-}
-
-// LoadPinner loads a pinner and its keysets from the given datastore
-func LoadPinner(d ds.Datastore, dserv, internal ipld.DAGService) (Pinner, error) {
-	p := new(pinner)
-
-	rootKey, err := d.Get(pinDatastoreKey)
-	if err != nil {
-		return nil, fmt.Errorf("cannot load pin state: %v", err)
-	}
-	rootCid, err := cid.Cast(rootKey)
-	if err != nil {
-		return nil, err
-	}
-
-	ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
-	defer cancel()
-
-	root, err := internal.Get(ctx, rootCid)
-	if err != nil {
-		return nil, fmt.Errorf("cannot find pinning root object: %v", err)
-	}
-
-	rootpb, ok := root.(*mdag.ProtoNode)
-	if !ok {
-		return nil, mdag.ErrNotProtobuf
-	}
-
-	internalset := cid.NewSet()
-	internalset.Add(rootCid)
-	recordInternal := internalset.Add
-
-	{ // load recursive set
-		recurseKeys, err := loadSet(ctx, internal, rootpb, linkRecursive, recordInternal)
-		if err != nil {
-			return nil, fmt.Errorf("cannot load recursive pins: %v", err)
-		}
-		p.recursePin = cidSetWithValues(recurseKeys)
-	}
-
-	{ // load direct set
-		directKeys, err := loadSet(ctx, internal, rootpb, linkDirect, recordInternal)
-		if err != nil {
-			return nil, fmt.Errorf("cannot load direct pins: %v", err)
-		}
-		p.directPin = cidSetWithValues(directKeys)
-	}
-
-	p.internalPin = internalset
-
-	// assign services
-	p.dserv = dserv
-	p.dstore = d
-	p.internal = internal
-
-	return p, nil
-}
-
-// DirectKeys returns a slice containing the directly pinned keys
-func (p *pinner) DirectKeys(ctx context.Context) ([]cid.Cid, error) {
-	p.lock.RLock()
-	defer p.lock.RUnlock()
-
-	return p.directPin.Keys(), nil
-}
-
-// RecursiveKeys returns a slice containing the recursively pinned keys
-func (p *pinner) RecursiveKeys(ctx context.Context) ([]cid.Cid, error) {
-	p.lock.RLock()
-	defer p.lock.RUnlock()
-
-	return p.recursePin.Keys(), nil
-}
-
-// Update updates a recursive pin from one cid to another
-// this is more efficient than simply pinning the new one and unpinning the
-// old one
-func (p *pinner) Update(ctx context.Context, from, to cid.Cid, unpin bool) error {
-	if from == to {
-		// Nothing to do. Don't remove this check or we'll end up
-		// _removing_ the pin.
-		//
-		// See #6648
-		return nil
-	}
-
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if !p.recursePin.Has(from) {
-		return fmt.Errorf("'from' cid was not recursively pinned already")
-	}
-
-	// Temporarily unlock while we fetch the differences.
-	p.lock.Unlock()
-	err := dagutils.DiffEnumerate(ctx, p.dserv, from, to)
-	p.lock.Lock()
-
-	if err != nil {
-		return err
-	}
-
-	p.recursePin.Add(to)
-	if unpin {
-		p.recursePin.Remove(from)
-	}
-	return nil
-}
-
-// Flush encodes and writes pinner keysets to the datastore
-func (p *pinner) Flush(ctx context.Context) error {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	internalset := cid.NewSet()
-	recordInternal := internalset.Add
-
-	root := &mdag.ProtoNode{}
-	{
-		n, err := storeSet(ctx, p.internal, p.directPin.Keys(), recordInternal)
-		if err != nil {
-			return err
-		}
-		if err := root.AddNodeLink(linkDirect, n); err != nil {
-			return err
-		}
-	}
-
-	{
-		n, err := storeSet(ctx, p.internal, p.recursePin.Keys(), recordInternal)
-		if err != nil {
-			return err
-		}
-		if err := root.AddNodeLink(linkRecursive, n); err != nil {
-			return err
-		}
-	}
-
-	// add the empty node, its referenced by the pin sets but never created
-	err := p.internal.Add(ctx, new(mdag.ProtoNode))
-	if err != nil {
-		return err
-	}
-
-	err = p.internal.Add(ctx, root)
-	if err != nil {
-		return err
-	}
-
-	k := root.Cid()
-
-	internalset.Add(k)
-
-	if syncDServ, ok := p.dserv.(syncDAGService); ok {
-		if err := syncDServ.Sync(); err != nil {
-			return fmt.Errorf("cannot sync pinned data: %v", err)
-		}
-	}
-
-	if syncInternal, ok := p.internal.(syncDAGService); ok {
-		if err := syncInternal.Sync(); err != nil {
-			return fmt.Errorf("cannot sync pinning data: %v", err)
-		}
-	}
-
-	if err := p.dstore.Put(pinDatastoreKey, k.Bytes()); err != nil {
-		return fmt.Errorf("cannot store pin state: %v", err)
-	}
-	if err := p.dstore.Sync(pinDatastoreKey); err != nil {
-		return fmt.Errorf("cannot sync pin state: %v", err)
-	}
-	p.internalPin = internalset
-	return nil
-}
-
-// InternalPins returns all cids kept pinned for the internal state of the
-// pinner
-func (p *pinner) InternalPins(ctx context.Context) ([]cid.Cid, error) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	var out []cid.Cid
-	out = append(out, p.internalPin.Keys()...)
-	return out, nil
-}
-
-// PinWithMode allows the user to have fine grained control over pin
-// counts
-func (p *pinner) PinWithMode(c cid.Cid, mode Mode) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	switch mode {
-	case Recursive:
-		p.recursePin.Add(c)
-	case Direct:
-		p.directPin.Add(c)
-	}
-}
-
-// hasChild recursively looks for a Cid among the children of a root Cid.
-// The visit function can be used to shortcut already-visited branches.
-func hasChild(ctx context.Context, ng ipld.NodeGetter, root cid.Cid, child cid.Cid, visit func(cid.Cid) bool) (bool, error) {
-	links, err := ipld.GetLinks(ctx, ng, root)
-	if err != nil {
-		return false, err
-	}
-	for _, lnk := range links {
-		c := lnk.Cid
-		if lnk.Cid.Equals(child) {
-			return true, nil
-		}
-		if visit(c) {
-			has, err := hasChild(ctx, ng, c, child, visit)
-			if err != nil {
-				return false, err
-			}
-
-			if has {
-				return has, nil
-			}
-		}
-	}
-	return false, nil
-}
diff --git a/pinconv/pinconv.go b/pinconv/pinconv.go
new file mode 100644
index 0000000000000000000000000000000000000000..9aee703a710fac2b4d73d2ebc9faa501486181fa
--- /dev/null
+++ b/pinconv/pinconv.go
@@ -0,0 +1,128 @@
+// Package pinconv converts pins between the dag-based ipldpinner and the
+// datastore-based dspinner.  Once conversion is complete, the pins from the
+// source pinner are removed.
+package pinconv
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/ipfs/go-cid"
+	ds "github.com/ipfs/go-datastore"
+	ipfspinner "github.com/ipfs/go-ipfs-pinner"
+	"github.com/ipfs/go-ipfs-pinner/dspinner"
+	"github.com/ipfs/go-ipfs-pinner/ipldpinner"
+	ipld "github.com/ipfs/go-ipld-format"
+)
+
+// ConvertPinsFromIPLDToDS converts pins stored in mdag based storage to pins
+// stores in the datastore.  Returns a dspinner loaded with the converted pins,
+// and a count of the recursive and direct pins converted.
+//
+// After pins are stored in datastore, the root pin key is deleted to unlink
+// the pin data in the DAGService.
+func ConvertPinsFromIPLDToDS(ctx context.Context, dstore ds.Datastore, dserv ipld.DAGService, internal ipld.DAGService) (ipfspinner.Pinner, int, error) {
+	const ipldPinPath = "/local/pins"
+
+	ipldPinner, err := ipldpinner.New(dstore, dserv, internal)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	dsPinner, err := dspinner.New(ctx, dstore, dserv)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	seen := cid.NewSet()
+	cids, err := ipldPinner.RecursiveKeys(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+	for i := range cids {
+		seen.Add(cids[i])
+		dsPinner.PinWithMode(cids[i], ipfspinner.Recursive)
+	}
+	convCount := len(cids)
+
+	cids, err = ipldPinner.DirectKeys(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+	for i := range cids {
+		if seen.Has(cids[i]) {
+			// Pin was already pinned recursively
+			continue
+		}
+		dsPinner.PinWithMode(cids[i], ipfspinner.Direct)
+	}
+	convCount += len(cids)
+
+	err = dsPinner.Flush(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	// Delete root mdag key from datastore to remove old pin storage.
+	ipldPinDatastoreKey := ds.NewKey(ipldPinPath)
+	if err = dstore.Delete(ipldPinDatastoreKey); err != nil {
+		return nil, 0, fmt.Errorf("cannot delete old pin state: %v", err)
+	}
+	if err = dstore.Sync(ipldPinDatastoreKey); err != nil {
+		return nil, 0, fmt.Errorf("cannot sync old pin state: %v", err)
+	}
+
+	return dsPinner, convCount, nil
+}
+
+// ConvertPinsFromDSToIPLD converts the pins stored in the datastore by
+// dspinner, into pins stored in the given internal DAGService by ipldpinner.
+// Returns an ipldpinner loaded with the converted pins, and a count of the
+// recursive and direct pins converted.
+//
+// After the pins are stored in the DAGService, the pins and their indexes are
+// removed from the dspinner.
+func ConvertPinsFromDSToIPLD(ctx context.Context, dstore ds.Datastore, dserv ipld.DAGService, internal ipld.DAGService) (ipfspinner.Pinner, int, error) {
+	dsPinner, err := dspinner.New(ctx, dstore, dserv)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	ipldPinner, err := ipldpinner.New(dstore, dserv, internal)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	cids, err := dsPinner.RecursiveKeys(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+	for i := range cids {
+		ipldPinner.PinWithMode(cids[i], ipfspinner.Recursive)
+		dsPinner.RemovePinWithMode(cids[i], ipfspinner.Recursive)
+	}
+	convCount := len(cids)
+
+	cids, err = dsPinner.DirectKeys(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+	for i := range cids {
+		ipldPinner.PinWithMode(cids[i], ipfspinner.Direct)
+		dsPinner.RemovePinWithMode(cids[i], ipfspinner.Direct)
+	}
+	convCount += len(cids)
+
+	// Save the ipldpinner pins
+	err = ipldPinner.Flush(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	err = dsPinner.Flush(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	return ipldPinner, convCount, nil
+}
diff --git a/pinconv/pinconv_test.go b/pinconv/pinconv_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ac7f8ffc5f31e15e740f2b0d2cdf8ff26c0b6586
--- /dev/null
+++ b/pinconv/pinconv_test.go
@@ -0,0 +1,153 @@
+package pinconv
+
+import (
+	"context"
+	"errors"
+	"io"
+	"testing"
+
+	bs "github.com/ipfs/go-blockservice"
+	cid "github.com/ipfs/go-cid"
+	ds "github.com/ipfs/go-datastore"
+	lds "github.com/ipfs/go-ds-leveldb"
+	blockstore "github.com/ipfs/go-ipfs-blockstore"
+	offline "github.com/ipfs/go-ipfs-exchange-offline"
+	ipfspin "github.com/ipfs/go-ipfs-pinner"
+	"github.com/ipfs/go-ipfs-pinner/dspinner"
+	util "github.com/ipfs/go-ipfs-util"
+	ipld "github.com/ipfs/go-ipld-format"
+	mdag "github.com/ipfs/go-merkledag"
+)
+
+var rand = util.NewTimeSeededRand()
+
+type batchWrap struct {
+	ds.Datastore
+}
+
+func randNode() (*mdag.ProtoNode, cid.Cid) {
+	nd := new(mdag.ProtoNode)
+	nd.SetData(make([]byte, 32))
+	_, err := io.ReadFull(rand, nd.Data())
+	if err != nil {
+		panic(err)
+	}
+	k := nd.Cid()
+	return nd, k
+}
+
+func (d *batchWrap) Batch() (ds.Batch, error) {
+	return ds.NewBasicBatch(d), nil
+}
+
+func makeStore() (ds.Datastore, ipld.DAGService) {
+	ldstore, err := lds.NewDatastore("", nil)
+	if err != nil {
+		panic(err)
+	}
+	var dstore ds.Batching
+	dstore = &batchWrap{ldstore}
+
+	bstore := blockstore.NewBlockstore(dstore)
+	bserv := bs.New(bstore, offline.Exchange(bstore))
+	dserv := mdag.NewDAGService(bserv)
+	return dstore, dserv
+}
+
+func TestConversions(t *testing.T) {
+	ctx := context.Background()
+	dstore, dserv := makeStore()
+
+	dsPinner, err := dspinner.New(ctx, dstore, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	a, ak := randNode()
+	err = dsPinner.Pin(ctx, a, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// create new node c, to be indirectly pinned through b
+	c, ck := randNode()
+	dserv.Add(ctx, c)
+
+	// Create new node b, to be parent to a and c
+	b, _ := randNode()
+	b.AddNodeLink("child", a)
+	b.AddNodeLink("otherchild", c)
+	bk := b.Cid() // CID changed after adding links
+
+	// recursively pin B{A,C}
+	err = dsPinner.Pin(ctx, b, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = dsPinner.Flush(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	verifyPins := func(pinner ipfspin.Pinner) error {
+		pinned, err := pinner.CheckIfPinned(ctx, ak, bk, ck)
+		if err != nil {
+			return err
+		}
+		if len(pinned) != 3 {
+			return errors.New("incorrect number of results")
+		}
+		for _, pn := range pinned {
+			switch pn.Key {
+			case ak:
+				if pn.Mode != ipfspin.Direct {
+					return errors.New("A pinned with wrong mode")
+				}
+			case bk:
+				if pn.Mode != ipfspin.Recursive {
+					return errors.New("B pinned with wrong mode")
+				}
+			case ck:
+				if pn.Mode != ipfspin.Indirect {
+					return errors.New("C should be pinned indirectly")
+				}
+				if pn.Via != bk {
+					return errors.New("C should be pinned via B")
+				}
+			}
+		}
+		return nil
+	}
+
+	err = verifyPins(dsPinner)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ipldPinner, toIPLDCount, err := ConvertPinsFromDSToIPLD(ctx, dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if toIPLDCount != 2 {
+		t.Fatal("expected 2 ds-to-ipld pins, got", toIPLDCount)
+	}
+
+	err = verifyPins(ipldPinner)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	toDSPinner, toDSCount, err := ConvertPinsFromIPLDToDS(ctx, dstore, dserv, dserv)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if toDSCount != toIPLDCount {
+		t.Fatal("ds-to-ipld pins", toIPLDCount, "not equal to ipld-to-ds-pins", toDSCount)
+	}
+
+	err = verifyPins(toDSPinner)
+	if err != nil {
+		t.Fatal(err)
+	}
+}