Commit 49cdb3a5 authored by Jeromy Johnson's avatar Jeromy Johnson Committed by GitHub

Merge pull request #49 from ipfs/feat/extraction

extract more subpackages
parents 07bea830 dfb94c6d
0.3.0: QmeZBFvNreVkpZkBTFxh56zHWS8wA5S1XxKswHjux76kyz
1.0.0: QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU
package leveldb
import (
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
"github.com/jbenet/goprocess"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
type datastore struct {
DB *leveldb.DB
}
type Options opt.Options
func NewDatastore(path string, opts *Options) (*datastore, error) {
var nopts opt.Options
if opts != nil {
nopts = opt.Options(*opts)
}
db, err := leveldb.OpenFile(path, &nopts)
if err != nil {
return nil, err
}
return &datastore{
DB: db,
}, nil
}
// Returns ErrInvalidType if value is not of type []byte.
//
// Note: using sync = false.
// see http://godoc.org/github.com/syndtr/goleveldb/leveldb/opt#WriteOptions
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
val, ok := value.([]byte)
if !ok {
return ds.ErrInvalidType
}
return d.DB.Put(key.Bytes(), val, nil)
}
func (d *datastore) Get(key ds.Key) (value interface{}, err error) {
val, err := d.DB.Get(key.Bytes(), nil)
if err != nil {
if err == leveldb.ErrNotFound {
return nil, ds.ErrNotFound
}
return nil, err
}
return val, nil
}
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
return d.DB.Has(key.Bytes(), nil)
}
func (d *datastore) Delete(key ds.Key) (err error) {
// leveldb Delete will not return an error if the key doesn't
// exist (see https://github.com/syndtr/goleveldb/issues/109),
// so check that the key exists first and if not return an
// error
exists, err := d.DB.Has(key.Bytes(), nil)
if !exists {
return ds.ErrNotFound
} else if err != nil {
return err
}
return d.DB.Delete(key.Bytes(), nil)
}
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
// we can use multiple iterators concurrently. see:
// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator
// advance the iterator only if the reader reads
//
// run query in own sub-process tied to Results.Process(), so that
// it waits for us to finish AND so that clients can signal to us
// that resources should be reclaimed.
qrb := dsq.NewResultBuilder(q)
qrb.Process.Go(func(worker goprocess.Process) {
d.runQuery(worker, qrb)
})
// go wait on the worker (without signaling close)
go qrb.Process.CloseAfterChildren()
// Now, apply remaining things (filters, order)
qr := qrb.Results()
for _, f := range q.Filters {
qr = dsq.NaiveFilter(qr, f)
}
for _, o := range q.Orders {
qr = dsq.NaiveOrder(qr, o)
}
return qr, nil
}
func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) {
var rnge *util.Range
if qrb.Query.Prefix != "" {
rnge = util.BytesPrefix([]byte(qrb.Query.Prefix))
}
i := d.DB.NewIterator(rnge, nil)
defer i.Release()
// advance iterator for offset
if qrb.Query.Offset > 0 {
for j := 0; j < qrb.Query.Offset; j++ {
i.Next()
}
}
// iterate, and handle limit, too
for sent := 0; i.Next(); sent++ {
// end early if we hit the limit
if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit {
break
}
k := ds.NewKey(string(i.Key())).String()
e := dsq.Entry{Key: k}
if !qrb.Query.KeysOnly {
buf := make([]byte, len(i.Value()))
copy(buf, i.Value())
e.Value = buf
}
select {
case qrb.Output <- dsq.Result{Entry: e}: // we sent it out
case <-worker.Closing(): // client told us to end early.
break
}
}
if err := i.Error(); err != nil {
select {
case qrb.Output <- dsq.Result{Error: err}: // client read our error
case <-worker.Closing(): // client told us to end.
return
}
}
}
// LevelDB needs to be closed.
func (d *datastore) Close() (err error) {
return d.DB.Close()
}
func (d *datastore) IsThreadSafe() {}
type leveldbBatch struct {
b *leveldb.Batch
db *leveldb.DB
}
func (d *datastore) Batch() (ds.Batch, error) {
return &leveldbBatch{
b: new(leveldb.Batch),
db: d.DB,
}, nil
}
func (b *leveldbBatch) Put(key ds.Key, value interface{}) error {
val, ok := value.([]byte)
if !ok {
return ds.ErrInvalidType
}
b.b.Put(key.Bytes(), val)
return nil
}
func (b *leveldbBatch) Commit() error {
return b.db.Write(b.b, nil)
}
func (b *leveldbBatch) Delete(key ds.Key) error {
b.b.Delete(key.Bytes())
return nil
}
package leveldb
import (
"io/ioutil"
"os"
"testing"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
)
var testcases = map[string]string{
"/a": "a",
"/a/b": "ab",
"/a/b/c": "abc",
"/a/b/d": "a/b/d",
"/a/c": "ac",
"/a/d": "ad",
"/e": "e",
"/f": "f",
}
// returns datastore, and a function to call on exit.
// (this garbage collects). So:
//
// d, close := newDS(t)
// defer close()
func newDS(t *testing.T) (*datastore, func()) {
path, err := ioutil.TempDir("/tmp", "testing_leveldb_")
if err != nil {
t.Fatal(err)
}
d, err := NewDatastore(path, nil)
if err != nil {
t.Fatal(err)
}
return d, func() {
os.RemoveAll(path)
d.Close()
}
}
func addTestCases(t *testing.T, d *datastore, testcases map[string]string) {
for k, v := range testcases {
dsk := ds.NewKey(k)
if err := d.Put(dsk, []byte(v)); err != nil {
t.Fatal(err)
}
}
for k, v := range testcases {
dsk := ds.NewKey(k)
v2, err := d.Get(dsk)
if err != nil {
t.Fatal(err)
}
v2b := v2.([]byte)
if string(v2b) != v {
t.Errorf("%s values differ: %s != %s", k, v, v2)
}
}
}
func TestQuery(t *testing.T) {
d, close := newDS(t)
defer close()
addTestCases(t, d, testcases)
rs, err := d.Query(dsq.Query{Prefix: "/a/"})
if err != nil {
t.Fatal(err)
}
expectMatches(t, []string{
"/a/b",
"/a/b/c",
"/a/b/d",
"/a/c",
"/a/d",
}, rs)
// test offset and limit
rs, err = d.Query(dsq.Query{Prefix: "/a/", Offset: 2, Limit: 2})
if err != nil {
t.Fatal(err)
}
expectMatches(t, []string{
"/a/b/d",
"/a/c",
}, rs)
}
func TestQueryRespectsProcess(t *testing.T) {
d, close := newDS(t)
defer close()
addTestCases(t, d, testcases)
}
func expectMatches(t *testing.T, expect []string, actualR dsq.Results) {
actual, err := actualR.Rest()
if err != nil {
t.Error(err)
}
if len(actual) != len(expect) {
t.Error("not enough", expect, actual)
}
for _, k := range expect {
found := false
for _, e := range actual {
if e.Key == k {
found = true
}
}
if !found {
t.Error(k, "not found")
}
}
}
func TestBatching(t *testing.T) {
d, done := newDS(t)
defer done()
b, err := d.Batch()
if err != nil {
t.Fatal(err)
}
for k, v := range testcases {
err := b.Put(ds.NewKey(k), []byte(v))
if err != nil {
t.Fatal(err)
}
}
err = b.Commit()
if err != nil {
t.Fatal(err)
}
for k, v := range testcases {
val, err := d.Get(ds.NewKey(k))
if err != nil {
t.Fatal(err)
}
if v != string(val.([]byte)) {
t.Fatal("got wrong data!")
}
}
}
package lru
import (
"errors"
lru "github.com/hashicorp/golang-lru"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
)
// Datastore uses golang-lru for internal storage.
type Datastore struct {
cache *lru.Cache
}
// NewDatastore constructs a new LRU Datastore with given capacity.
func NewDatastore(capacity int) (*Datastore, error) {
cache, err := lru.New(capacity)
if err != nil {
return nil, err
}
return &Datastore{cache: cache}, nil
}
// Put stores the object `value` named by `key`.
func (d *Datastore) Put(key ds.Key, value interface{}) (err error) {
d.cache.Add(key, value)
return nil
}
// Get retrieves the object `value` named by `key`.
func (d *Datastore) Get(key ds.Key) (value interface{}, err error) {
val, ok := d.cache.Get(key)
if !ok {
return nil, ds.ErrNotFound
}
return val, nil
}
// Has returns whether the `key` is mapped to a `value`.
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
return ds.GetBackedHas(d, key)
}
// Delete removes the value for given `key`.
func (d *Datastore) Delete(key ds.Key) (err error) {
d.cache.Remove(key)
return nil
}
// KeyList returns a list of keys in the datastore
func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) {
return nil, errors.New("KeyList not implemented.")
}
func (d *Datastore) Close() error {
return nil
}
func (d *Datastore) Batch() (ds.Batch, error) {
return nil, ds.ErrBatchUnsupported
}
package lru_test
import (
"strconv"
"testing"
ds "github.com/ipfs/go-datastore"
. "gopkg.in/check.v1"
lru "github.com/ipfs/go-datastore/lru"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type DSSuite struct{}
var _ = Suite(&DSSuite{})
func (ks *DSSuite) TestBasic(c *C) {
var size = 1000
d, err := lru.NewDatastore(size)
c.Check(err, Equals, nil)
for i := 0; i < size; i++ {
err := d.Put(ds.NewKey(strconv.Itoa(i)), i)
c.Check(err, Equals, nil)
}
for i := 0; i < size; i++ {
j, err := d.Get(ds.NewKey(strconv.Itoa(i)))
c.Check(j, Equals, i)
c.Check(err, Equals, nil)
}
for i := 0; i < size; i++ {
err := d.Put(ds.NewKey(strconv.Itoa(i+size)), i)
c.Check(err, Equals, nil)
}
for i := 0; i < size; i++ {
j, err := d.Get(ds.NewKey(strconv.Itoa(i)))
c.Check(j, Equals, nil)
c.Check(err, Equals, ds.ErrNotFound)
}
for i := 0; i < size; i++ {
j, err := d.Get(ds.NewKey(strconv.Itoa(i + size)))
c.Check(j, Equals, i)
c.Check(err, Equals, nil)
}
}
// Package measure provides a Datastore wrapper that records metrics
// using github.com/codahale/metrics.
package measure
import (
"io"
"time"
"github.com/codahale/metrics"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
)
// Histogram measurements exceeding these limits are dropped. TODO
// maybe it would be better to cap the value? Should we keep track of
// drops?
const (
maxLatency = int64(1 * time.Second)
maxSize = int64(1 << 32)
)
// New wraps the datastore, providing metrics on the operations. The
// metrics are registered with names starting with prefix and a dot.
//
// If prefix is not unique, New will panic. Call Close to release the
// prefix.
func New(prefix string, ds datastore.Datastore) *measure {
m := &measure{
backend: ds,
putNum: metrics.Counter(prefix + ".Put.num"),
putErr: metrics.Counter(prefix + ".Put.err"),
putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3),
putSize: metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3),
getNum: metrics.Counter(prefix + ".Get.num"),
getErr: metrics.Counter(prefix + ".Get.err"),
getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3),
getSize: metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3),
hasNum: metrics.Counter(prefix + ".Has.num"),
hasErr: metrics.Counter(prefix + ".Has.err"),
hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3),
deleteNum: metrics.Counter(prefix + ".Delete.num"),
deleteErr: metrics.Counter(prefix + ".Delete.err"),
deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3),
queryNum: metrics.Counter(prefix + ".Query.num"),
queryErr: metrics.Counter(prefix + ".Query.err"),
queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3),
}
return m
}
type measure struct {
backend datastore.Datastore
putNum metrics.Counter
putErr metrics.Counter
putLatency *metrics.Histogram
putSize *metrics.Histogram
getNum metrics.Counter
getErr metrics.Counter
getLatency *metrics.Histogram
getSize *metrics.Histogram
hasNum metrics.Counter
hasErr metrics.Counter
hasLatency *metrics.Histogram
deleteNum metrics.Counter
deleteErr metrics.Counter
deleteLatency *metrics.Histogram
queryNum metrics.Counter
queryErr metrics.Counter
queryLatency *metrics.Histogram
}
var _ datastore.Datastore = (*measure)(nil)
func recordLatency(h *metrics.Histogram, start time.Time) {
elapsed := time.Now().Sub(start) / time.Microsecond
_ = h.RecordValue(int64(elapsed))
}
func (m *measure) Put(key datastore.Key, value interface{}) error {
defer recordLatency(m.putLatency, time.Now())
m.putNum.Add()
if b, ok := value.([]byte); ok {
_ = m.putSize.RecordValue(int64(len(b)))
}
err := m.backend.Put(key, value)
if err != nil {
m.putErr.Add()
}
return err
}
func (m *measure) Get(key datastore.Key) (value interface{}, err error) {
defer recordLatency(m.getLatency, time.Now())
m.getNum.Add()
value, err = m.backend.Get(key)
if err != nil {
m.getErr.Add()
} else {
if b, ok := value.([]byte); ok {
_ = m.getSize.RecordValue(int64(len(b)))
}
}
return value, err
}
func (m *measure) Has(key datastore.Key) (exists bool, err error) {
defer recordLatency(m.hasLatency, time.Now())
m.hasNum.Add()
exists, err = m.backend.Has(key)
if err != nil {
m.hasErr.Add()
}
return exists, err
}
func (m *measure) Delete(key datastore.Key) error {
defer recordLatency(m.deleteLatency, time.Now())
m.deleteNum.Add()
err := m.backend.Delete(key)
if err != nil {
m.deleteErr.Add()
}
return err
}
func (m *measure) Query(q query.Query) (query.Results, error) {
defer recordLatency(m.queryLatency, time.Now())
m.queryNum.Add()
res, err := m.backend.Query(q)
if err != nil {
m.queryErr.Add()
}
return res, err
}
type measuredBatch struct {
puts int
deletes int
putts datastore.Batch
delts datastore.Batch
m *measure
}
func (m *measure) Batch() (datastore.Batch, error) {
bds, ok := m.backend.(datastore.Batching)
if !ok {
return nil, datastore.ErrBatchUnsupported
}
pb, err := bds.Batch()
if err != nil {
return nil, err
}
db, err := bds.Batch()
if err != nil {
return nil, err
}
return &measuredBatch{
putts: pb,
delts: db,
m: m,
}, nil
}
func (mt *measuredBatch) Put(key datastore.Key, val interface{}) error {
mt.puts++
valb, ok := val.([]byte)
if ok {
_ = mt.m.putSize.RecordValue(int64(len(valb)))
}
return mt.putts.Put(key, val)
}
func (mt *measuredBatch) Delete(key datastore.Key) error {
mt.deletes++
return mt.delts.Delete(key)
}
func (mt *measuredBatch) Commit() error {
err := logBatchCommit(mt.delts, mt.deletes, mt.m.deleteNum, mt.m.deleteErr, mt.m.deleteLatency)
if err != nil {
return err
}
err = logBatchCommit(mt.putts, mt.puts, mt.m.putNum, mt.m.putErr, mt.m.putLatency)
if err != nil {
return err
}
return nil
}
func logBatchCommit(b datastore.Batch, n int, num, errs metrics.Counter, lat *metrics.Histogram) error {
if n > 0 {
before := time.Now()
err := b.Commit()
took := int(time.Now().Sub(before)/time.Microsecond) / n
num.AddN(uint64(n))
for i := 0; i < n; i++ {
_ = lat.RecordValue(int64(took))
}
if err != nil {
errs.Add()
return err
}
}
return nil
}
func (m *measure) Close() error {
m.putNum.Remove()
m.putErr.Remove()
m.putLatency.Remove()
m.putSize.Remove()
m.getNum.Remove()
m.getErr.Remove()
m.getLatency.Remove()
m.getSize.Remove()
m.hasNum.Remove()
m.hasErr.Remove()
m.hasLatency.Remove()
m.deleteNum.Remove()
m.deleteErr.Remove()
m.deleteLatency.Remove()
m.queryNum.Remove()
m.queryErr.Remove()
m.queryLatency.Remove()
if c, ok := m.backend.(io.Closer); ok {
return c.Close()
}
return nil
}
......@@ -13,40 +13,16 @@
"name": "go.uuid",
"version": "1.0.0"
},
{
"author": "syndtr",
"hash": "QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g",
"name": "goleveldb",
"version": "0.0.1"
},
{
"author": "whyrusleeping",
"hash": "QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn",
"hash": "QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP",
"name": "goprocess",
"version": "0.0.0"
},
{
"author": "codahale",
"hash": "QmV3NSS3A1kX5s28r7yLczhDsXzkgo65cqRgKFXYunWZmD",
"name": "metrics",
"version": "0.0.0"
},
{
"author": "fzzy",
"hash": "QmTsDAZ4xQsVkTwR299nBfYtjrbG9B1pJcheKodubm99Wr",
"name": "radix",
"version": "0.5.6"
},
{
"author": "hashicorp",
"hash": "QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy",
"name": "golang-lru",
"version": "0.0.0"
"version": "1.0.0"
}
],
"gxVersion": "0.7.0",
"language": "go",
"license": "MIT",
"name": "go-datastore",
"version": "0.3.0"
"version": "1.0.0"
}
package redis
import (
"errors"
"fmt"
"sync"
"time"
"github.com/fzzy/radix/redis"
datastore "github.com/ipfs/go-datastore"
query "github.com/ipfs/go-datastore/query"
)
var _ datastore.Datastore = &Datastore{}
var _ datastore.ThreadSafeDatastore = &Datastore{}
var ErrInvalidType = errors.New("redis datastore: invalid type error. this datastore only supports []byte values")
func NewExpiringDatastore(client *redis.Client, ttl time.Duration) (*Datastore, error) {
return &Datastore{
client: client,
ttl: ttl,
}, nil
}
func NewDatastore(client *redis.Client) (*Datastore, error) {
return &Datastore{
client: client,
}, nil
}
type Datastore struct {
mu sync.Mutex
client *redis.Client
ttl time.Duration
}
func (ds *Datastore) Put(key datastore.Key, value interface{}) error {
ds.mu.Lock()
defer ds.mu.Unlock()
data, ok := value.([]byte)
if !ok {
return ErrInvalidType
}
ds.client.Append("SET", key.String(), data)
if ds.ttl != 0 {
ds.client.Append("EXPIRE", key.String(), ds.ttl.Seconds())
}
if err := ds.client.GetReply().Err; err != nil {
return fmt.Errorf("failed to put value: %s", err)
}
if ds.ttl != 0 {
if err := ds.client.GetReply().Err; err != nil {
return fmt.Errorf("failed to set expiration: %s", err)
}
}
return nil
}
func (ds *Datastore) Get(key datastore.Key) (value interface{}, err error) {
ds.mu.Lock()
defer ds.mu.Unlock()
return ds.client.Cmd("GET", key.String()).Bytes()
}
func (ds *Datastore) Has(key datastore.Key) (exists bool, err error) {
ds.mu.Lock()
defer ds.mu.Unlock()
return ds.client.Cmd("EXISTS", key.String()).Bool()
}
func (ds *Datastore) Delete(key datastore.Key) (err error) {
ds.mu.Lock()
defer ds.mu.Unlock()
return ds.client.Cmd("DEL", key.String()).Err
}
func (ds *Datastore) Query(q query.Query) (query.Results, error) {
return nil, errors.New("TODO implement query for redis datastore?")
}
func (ds *Datastore) IsThreadSafe() {}
func (ds *Datastore) Batch() (datastore.Batch, error) {
return nil, datastore.ErrBatchUnsupported
}
func (ds *Datastore) Close() error {
return ds.client.Close()
}
package redis
import (
"bytes"
"os"
"testing"
"time"
"github.com/fzzy/radix/redis"
datastore "github.com/ipfs/go-datastore"
dstest "github.com/ipfs/go-datastore/test"
)
const RedisEnv = "REDIS_DATASTORE_TEST_HOST"
func TestPutGetBytes(t *testing.T) {
client := clientOrAbort(t)
ds, err := NewDatastore(client)
if err != nil {
t.Fatal(err)
}
key, val := datastore.NewKey("foo"), []byte("bar")
dstest.Nil(ds.Put(key, val), t)
v, err := ds.Get(key)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(v.([]byte), val) != 0 {
t.Fail()
}
}
func TestHasBytes(t *testing.T) {
client := clientOrAbort(t)
ds, err := NewDatastore(client)
if err != nil {
t.Fatal(err)
}
key, val := datastore.NewKey("foo"), []byte("bar")
has, err := ds.Has(key)
if err != nil {
t.Fatal(err)
}
if has {
t.Fail()
}
dstest.Nil(ds.Put(key, val), t)
hasAfterPut, err := ds.Has(key)
if err != nil {
t.Fatal(err)
}
if !hasAfterPut {
t.Fail()
}
}
func TestDelete(t *testing.T) {
client := clientOrAbort(t)
ds, err := NewDatastore(client)
if err != nil {
t.Fatal(err)
}
key, val := datastore.NewKey("foo"), []byte("bar")
dstest.Nil(ds.Put(key, val), t)
dstest.Nil(ds.Delete(key), t)
hasAfterDelete, err := ds.Has(key)
if err != nil {
t.Fatal(err)
}
if hasAfterDelete {
t.Fail()
}
}
func TestExpiry(t *testing.T) {
ttl := 1 * time.Second
client := clientOrAbort(t)
ds, err := NewExpiringDatastore(client, ttl)
if err != nil {
t.Fatal(err)
}
key, val := datastore.NewKey("foo"), []byte("bar")
dstest.Nil(ds.Put(key, val), t)
time.Sleep(ttl + 1*time.Second)
dstest.Nil(ds.Delete(key), t)
hasAfterExpiration, err := ds.Has(key)
if err != nil {
t.Fatal(err)
}
if hasAfterExpiration {
t.Fail()
}
}
func clientOrAbort(t *testing.T) *redis.Client {
c, err := redis.Dial("tcp", os.Getenv(RedisEnv))
if err != nil {
t.Log("could not connect to a redis instance")
t.SkipNow()
}
if err := c.Cmd("FLUSHALL").Err; err != nil {
t.Fatal(err)
}
return c
}
......@@ -3,9 +3,9 @@ package dstest
import (
"bytes"
"encoding/base32"
"math/rand"
"testing"
rand "github.com/dustin/randbo"
dstore "github.com/ipfs/go-datastore"
)
......@@ -15,12 +15,11 @@ func RunBatchTest(t *testing.T, ds dstore.Batching) {
t.Fatal(err)
}
r := rand.New()
var blocks [][]byte
var keys []dstore.Key
for i := 0; i < 20; i++ {
blk := make([]byte, 256*1024)
r.Read(blk)
rand.Read(blk)
blocks = append(blocks, blk)
key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8]))
......@@ -59,11 +58,10 @@ func RunBatchTest(t *testing.T, ds dstore.Batching) {
}
func RunBatchDeleteTest(t *testing.T, ds dstore.Batching) {
r := rand.New()
var keys []dstore.Key
for i := 0; i < 20; i++ {
blk := make([]byte, 16)
r.Read(blk)
rand.Read(blk)
key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8]))
keys = append(keys, key)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment