Commit 9186c869 authored by tavit ohanian's avatar tavit ohanian

Merge remote-tracking branch 'upstream/master' into reference

parents b06d267d 6881dfdc
Pipeline #113 failed with stages
in 0 seconds
version: 2.1
orbs:
ci-go: ipfs/ci-go@0.2.1
win: circleci/windows@2.4.0
jobs:
"windows-test":
executor: win/default
steps:
- checkout
- run:
name: "Go test"
command: go test .
workflows:
version: 2
test:
jobs:
- ci-go/build
- ci-go/lint
- ci-go/test
- windows-test
blank_issues_enabled: false
contact_links:
- name: Getting Help on IPFS
url: https://ipfs.io/help
about: All information about how and where to get help on IPFS.
- name: IPFS Official Forum
url: https://discuss.ipfs.io
about: Please post general questions, support requests, and discussions here.
---
name: Open an issue
about: Only for actionable issues relevant to this repository.
title: ''
labels: need/triage
assignees: ''
---
<!--
Hello! To ensure this issue is correctly addressed as soon as possible by the IPFS team, please try to make sure:
- This issue is relevant to this repository's topic or codebase.
- A clear description is provided. It should includes as much relevant information as possible and clear scope for the issue to be actionable.
FOR GENERAL DISCUSSION, HELP OR QUESTIONS, please see the options at https://ipfs.io/help or head directly to https://discuss.ipfs.io.
(you can delete this section after reading)
-->
# Configuration for welcome - https://github.com/behaviorbot/welcome
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
# Comment to be posted to on first time issues
newIssueWelcomeComment: >
Thank you for submitting your first issue to this repository! A maintainer
will be here shortly to triage and review.
In the meantime, please double-check that you have provided all the
necessary information to make this process easy! Any information that can
help save additional round trips is useful! We currently aim to give
initial feedback within **two business days**. If this does not happen, feel
free to leave a comment.
Please keep an eye on how this issue will be labeled, as labels give an
overview of priorities, assignments and additional actions requested by the
maintainers:
- "Priority" labels will show how urgent this is for the team.
- "Status" labels will show if this is ready to be worked on, blocked, or in progress.
- "Need" labels will indicate if additional input or analysis is required.
Finally, remember to use https://discuss.ipfs.io if you just need general
support.
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
# Comment to be posted to on PRs from first time contributors in your repository
newPRWelcomeComment: >
Thank you for submitting this PR!
A maintainer will be here shortly to review it.
We are super grateful, but we are also overloaded! Help us by making sure
that:
* The context for this PR is clear, with relevant discussion, decisions
and stakeholders linked/mentioned.
* Your contribution itself is clear (code comments, self-review for the
rest) and in its best form. Follow the [code contribution
guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md#code-contribution-guidelines)
if they apply.
Getting other community members to do a review would be great help too on
complex PRs (you can ask in the chats/forums). If you are unsure about
something, just leave us a comment.
Next steps:
* A maintainer will triage and assign priority to this PR, commenting on
any missing things and potentially assigning a reviewer for high
priority items.
* The PR gets reviews, discussed and approvals as needed.
* The PR is merged by maintainers when it has been approved and comments addressed.
We currently aim to provide initial feedback/triaging within **two business
days**. Please keep an eye on any labelling actions, as these will indicate
priorities and status of your contribution.
We are very grateful for your contribution!
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
# Comment to be posted to on pull requests merged by a first time user
# Currently disabled
#firstPRMergeComment: ""
1.3.7: QmcjM6sfVtgGFBCCJaZo33HNi7K4rPkrUQAzLewgWTNkeg
The MIT License
Copyright (c) 2016 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
# go-ds-flatfs # go-ds-flatfs
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io)
[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/)
[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs)
[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)
[![GoDoc](https://godoc.org/github.com/ipfs/go-ds-flatfs?status.svg)](https://godoc.org/github.com/ipfs/go-ds-flatfs)
[![Build Status](https://travis-ci.org/ipfs/go-ds-flatfs.svg?branch=master)](https://travis-ci.org/ipfs/go-ds-flatfs)
[![Coverage Status](https://img.shields.io/codecov/c/github/ipfs/go-ds-flatfs.svg)](https://codecov.io/gh/ipfs/go-ds-flatfs)
> A datastore implementation using sharded directories and flat files to store data
`go-ds-flatfs` is used by `go-ipfs` to store raw block contents on disk. It supports several sharding functions (prefix, suffix, next-to-last/*).
It is _not_ a general-purpose datastore and has several important restrictions.
See the restrictions section for details.
## Lead Maintainer
[Jakub Sztandera](https://github.com/kubuxu)
## Table of Contents
- [Install](#install)
- [Usage](#usage)
- [Contribute](#contribute)
- [License](#license)
## Install
`go-ds-flatfs` can be used like any Go module:
```
import "github.com/ipfs/go-ds-flatfs"
```
## Usage
Check the [GoDoc module documentation](https://godoc.org/github.com/ipfs/go-ds-flatfs) for an overview of this module's
functionality.
### Restrictions
FlatFS keys are severely restricted. Only keys that match `/[0-9A-Z+-_=]\+` are
allowed. That is, keys may only contain upper-case alpha-numeric characters,
'-', '+', '_', and '='. This is because values are written directly to the
filesystem without encoding.
Importantly, this means namespaced keys (e.g., /FOO/BAR), are _not_ allowed.
Attempts to write to such keys will result in an error.
### DiskUsage and Accuracy
This datastore implements the [`PersistentDatastore`](https://godoc.org/github.com/ipfs/go-datastore#PersistentDatastore) interface. It offers a `DiskUsage()` method which strives to find a balance between accuracy and performance. This implies:
* The total disk usage of a datastore is calculated when opening the datastore
* The current disk usage is cached frequently in a file in the datastore root (`diskUsage.cache` by default). This file is also
written when the datastore is closed.
* If this file is not present when the datastore is opened:
* The disk usage will be calculated by walking the datastore's directory tree and estimating the size of each folder.
* This may be a very slow operation for huge datastores or datastores with slow disks
* The operation is time-limited (5 minutes by default).
* Upon timeout, the remaining folders will be assumed to have the average of the previously processed ones.
* After opening, the disk usage is updated in every write/delete operation.
This means that for certain datastores (huge ones, those with very slow disks or special content), the values reported by
`DiskUsage()` might be reduced accuracy and the first startup (without a `diskUsage.cache` file present), might be slow.
If you need increased accuracy or a fast start from the first time, you can manually create or update the
`diskUsage.cache` file.
The file `diskUsage.cache` is a JSON file with two fields `diskUsage` and `accuracy`. For example the JSON file for a
small repo might be:
```
{"diskUsage":6357,"accuracy":"initial-exact"}
```
`diskUsage` is the calculated disk usage and `accuracy` is a note on the accuracy of the initial calculation. If the
initial calculation was accurate the file will contain the value `initial-exact`. If some of the directories have too
many entries and the disk usage for that directory was estimated based on the first 2000 entries, the file will contain
`initial-approximate`. If the calculation took too long and timed out as indicated above, the file will contain
`initial-timed-out`.
If the initial calculation timed out the JSON file might be:
```
{"diskUsage":7589482442898,"accuracy":"initial-timed-out"}
```
To fix this with a more accurate value you could do (in the datastore root):
$ du -sb .
7536515831332 .
$ echo -n '{"diskUsage":7536515831332,"accuracy":"initial-exact"}' > diskUsage.cache
## Contribute
PRs accepted.
Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
## License
MIT © Protocol Labs, Inc.
comment: off
// Package flatfs is a Datastore implementation that stores all
// objects in a two-level directory structure in the local file
// system, regardless of the hierarchy of the keys.
package flatfs
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
)
func UpgradeV0toV1(path string, prefixLen int) error {
fun := Prefix(prefixLen)
err := WriteShardFunc(path, fun)
if err != nil {
return err
}
err = WriteReadme(path, fun)
if err != nil {
return err
}
return nil
}
func DowngradeV1toV0(path string) error {
fun, err := ReadShardFunc(path)
if err != nil {
return err
} else if fun.funName != "prefix" {
return fmt.Errorf("%s: can only downgrade datastore that use the 'prefix' sharding function", path)
}
err = os.Remove(filepath.Join(path, SHARDING_FN))
if err != nil {
return err
}
err = os.Remove(filepath.Join(path, README_FN))
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
func Move(oldPath string, newPath string, out io.Writer) error {
oldDS, err := Open(oldPath, false)
if err != nil {
return fmt.Errorf("%s: %v", oldPath, err)
}
oldDS.deactivate()
newDS, err := Open(newPath, false)
if err != nil {
return fmt.Errorf("%s: %v", newPath, err)
}
newDS.deactivate()
res, err := oldDS.Query(query.Query{KeysOnly: true})
if err != nil {
return err
}
if out != nil {
fmt.Fprintf(out, "Moving Keys...\n")
}
// first move the keys
count := 0
for {
e, ok := res.NextSync()
if !ok {
break
}
if e.Error != nil {
res.Close()
return e.Error
}
err := moveKey(oldDS, newDS, datastore.RawKey(e.Key))
if err != nil {
res.Close()
return err
}
count++
if out != nil && count%10 == 0 {
fmt.Fprintf(out, "\r%d keys so far", count)
}
}
res.Close()
if out != nil {
fmt.Fprintf(out, "\nCleaning Up...\n")
}
// now walk the old top-level directory
dir, err := os.Open(oldDS.path)
if err != nil {
return err
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
return err
}
for _, fn := range names {
if fn == "." || fn == ".." {
continue
}
oldPath := filepath.Join(oldDS.path, fn)
inf, err := os.Stat(oldPath)
if err != nil {
return err
}
if inf.IsDir() {
indir, err := os.Open(oldPath)
if err != nil {
return err
}
names, err := indir.Readdirnames(-1)
indir.Close()
if err != nil {
return err
}
for _, n := range names {
p := filepath.Join(oldPath, n)
// part of unfinished write transaction
// remove it
if strings.HasPrefix(n, "put-") {
err := os.Remove(p)
if err != nil {
return err
}
} else {
return errors.New("unknown file in flatfs: " + p)
}
}
err = os.Remove(oldPath)
if err != nil {
return err
}
} else if fn == SHARDING_FN || fn == README_FN {
// generated file so just remove it
err := os.Remove(oldPath)
if err != nil {
return err
}
} else {
// else we found something unexpected, so to be safe just move it
log.Warnw("found unexpected file in datastore directory, moving anyways", "file", fn)
newPath := filepath.Join(newDS.path, fn)
err := rename(oldPath, newPath)
if err != nil {
return err
}
}
}
if out != nil {
fmt.Fprintf(out, "All Done.\n")
}
return nil
}
func moveKey(oldDS *Datastore, newDS *Datastore, key datastore.Key) error {
_, oldPath := oldDS.encode(key)
dir, newPath := newDS.encode(key)
err := os.Mkdir(dir, 0755)
if err != nil && !os.IsExist(err) {
return err
}
err = rename(oldPath, newPath)
if err != nil {
return err
}
return nil
}
package flatfs_test
import (
"bytes"
"encoding/base32"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/ipfs/go-datastore"
flatfs "github.com/ipfs/go-ds-flatfs"
)
func TestMove(t *testing.T) {
tempdir, cleanup := tempdir(t)
defer cleanup()
v1dir := filepath.Join(tempdir, "v1")
createDatastore(t, v1dir, flatfs.Prefix(3))
err := ioutil.WriteFile(filepath.Join(v1dir, "README_ALSO"), []byte("something"), 0666)
if err != nil {
t.Fatalf("WriteFile fail: %v\n", err)
}
keys, blocks := populateDatastore(t, v1dir)
v2dir := filepath.Join(tempdir, "v2")
createDatastore(t, v2dir, flatfs.NextToLast(2))
err = flatfs.Move(v1dir, v2dir, nil)
if err != nil {
t.Fatalf("%v\n", err)
}
// make sure the directory empty
rmEmptyDatastore(t, v1dir)
// make sure the README file moved
_, err = os.Stat(filepath.Join(v2dir, "README_ALSO"))
if err != nil {
t.Fatalf(err.Error())
}
// check that all keys are available
checkKeys(t, v2dir, keys, blocks)
// check that a key is in the correct format
shard := filepath.Join(v2dir, flatfs.NextToLast(2).Func()(keys[0].String()))
_, err = os.Stat(shard)
if err != nil {
t.Fatalf(err.Error())
}
}
func TestMoveRestart(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
tempdir, cleanup := tempdir(t)
defer cleanup()
v1dir := filepath.Join(tempdir, "v1")
v2dir := filepath.Join(tempdir, "v2")
createDatastore(t, v1dir, flatfs.Prefix(3))
createDatastore(t, v2dir, flatfs.NextToLast(5))
keys, blocks := populateDatastore(t, v1dir)
checkKeys(t, v1dir, keys, blocks)
// get a directory in the datastore
noslash := keys[0].String()[1:]
aDir := filepath.Join(tempdir, "v1", flatfs.Prefix(3).Func()(noslash))
// create a permission problem on the directory
err := os.Chmod(aDir, 0500)
if err != nil {
t.Fatalf("%v\n", err)
}
// try the move it should fail partly through
err = flatfs.Move(v1dir, v2dir, nil)
if err == nil {
t.Fatal("Move should have failed.", err)
}
// okay try to undo should be okay
err = flatfs.Move(v2dir, v1dir, nil)
if err != nil {
t.Fatal("Could not undo the move.", err)
}
checkKeys(t, v1dir, keys, blocks)
// there should be nothing left in the new datastore
rmEmptyDatastore(t, v2dir)
// try the move again, again should fail
createDatastore(t, v2dir, flatfs.NextToLast(2))
err = flatfs.Move(v1dir, v2dir, nil)
if err == nil {
t.Fatal("Move should have failed.", err)
}
// fix the permission problem
err = os.Chmod(aDir, 0700)
if err != nil {
t.Fatalf("%v\n", err)
}
// restart the move, it should be okay now
err = flatfs.Move(v1dir, v2dir, nil)
if err != nil {
t.Fatalf("Move not okay: %v\n", err)
}
// make sure everything moved by removing the old directory
rmEmptyDatastore(t, v1dir)
// make sure everything moved by checking all keys
checkKeys(t, v2dir, keys, blocks)
// check that a key is in the correct format
shard := filepath.Join(v2dir, flatfs.NextToLast(2).Func()(keys[0].String()))
_, err = os.Stat(shard)
if err != nil {
t.Fatalf(err.Error())
}
}
func TestUpgradeDownload(t *testing.T) {
tempdir, cleanup := tempdir(t)
defer cleanup()
createDatastore(t, tempdir, flatfs.Prefix(3))
keys, blocks := populateDatastore(t, tempdir)
checkKeys(t, tempdir, keys, blocks)
err := flatfs.UpgradeV0toV1(tempdir, 3)
if err == nil {
t.Fatalf("UpgradeV0toV1 on already v1 should fail.")
}
err = flatfs.DowngradeV1toV0(tempdir)
if err != nil {
t.Fatalf("DowngradeV1toV0 fail: %v\n", err)
}
_, err = os.Stat(filepath.Join(tempdir, flatfs.SHARDING_FN))
if err == nil {
t.Fatalf("%v not in v0 format, SHARDING FILE exists", tempdir)
} else if !os.IsNotExist(err) {
t.Fatalf("Stat fail: %v\n", err)
}
err = flatfs.UpgradeV0toV1(tempdir, 3)
if err != nil {
t.Fatalf("UpgradeV0toV1 fail %v\n", err)
}
// This will fail unless the repository is in the new version
checkKeys(t, tempdir, keys, blocks)
}
func TestDownloadNonPrefix(t *testing.T) {
tempdir, cleanup := tempdir(t)
defer cleanup()
createDatastore(t, tempdir, flatfs.NextToLast(2))
err := flatfs.DowngradeV1toV0(tempdir)
if err == nil {
t.Fatal("DowngradeV1toV0 should have failed", err)
}
}
func createDatastore(t *testing.T, dir string, fun *flatfs.ShardIdV1) {
err := flatfs.Create(dir, fun)
if err != nil {
t.Fatalf("Create fail: %s: %v\n", dir, err)
}
}
func rmEmptyDatastore(t *testing.T, dir string) {
err := os.Remove(dir)
if err != nil {
t.Fatalf("Remove fail: %v\n", err)
}
}
func populateDatastore(t *testing.T, dir string) ([]datastore.Key, [][]byte) {
ds, err := flatfs.Open(dir, false)
if err != nil {
t.Fatalf("Open fail: %v\n", err)
}
defer ds.Close()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var blocks [][]byte
var keys []datastore.Key
for i := 0; i < 256; i++ {
blk := make([]byte, 1000)
r.Read(blk)
blocks = append(blocks, blk)
key := "X" + base32.StdEncoding.EncodeToString(blk[:8])
keys = append(keys, datastore.NewKey(key))
err := ds.Put(keys[i], blocks[i])
if err != nil {
t.Fatalf("Put fail: %v\n", err)
}
}
return keys, blocks
}
func checkKeys(t *testing.T, dir string, keys []datastore.Key, blocks [][]byte) {
ds, err := flatfs.Open(dir, false)
if err != nil {
t.Fatalf("Open fail: %v\n", err)
}
defer ds.Close()
for i, key := range keys {
data, err := ds.Get(key)
if err != nil {
t.Fatalf("Get fail: %v\n", err)
}
if !bytes.Equal(data, blocks[i]) {
t.Fatalf("block context differ for key %s\n", key.String())
}
}
}
This diff is collapsed.
package main
import (
"fmt"
"os"
"strconv"
"github.com/ipfs/go-ds-flatfs"
)
// To convert from the old format to a new format with a different
// sharding function use:
// flatfs upgrade blocks 5
// flatfs create blocks-new v1/next-to-last/2
// flatfs move blocks blocks-new
// rmdir blocks
// mv blocks-new blocks
// to do the reverse
// flatfs create blocks-new v1/prefix/5
// flatfs move blocks blocks-new
// rmdir blocks
// mv blocks-new blocks
// flatfs downgrade blocks
func usage() {
fmt.Fprintf(os.Stderr, "usage: %s create DIR SHARDFUN | upgrade DIR PREFIXLEN | downgrade DIR | move OLDDIR NEWDIR\n", os.Args[0])
os.Exit(1)
}
func fail(err error) {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
func main() {
if len(os.Args) < 2 {
usage()
}
switch os.Args[1] {
case "create":
if len(os.Args) != 4 {
usage()
}
dir := os.Args[2]
funStr := os.Args[3]
if funStr[0] != '/' {
if funStr[0] != 'v' { // and version if not provided
funStr = "v1/" + funStr
}
funStr = flatfs.PREFIX + funStr
}
fun, err := flatfs.ParseShardFunc(funStr)
if err != nil {
fail(err)
}
err = flatfs.Create(dir, fun)
if err != nil {
fail(err)
}
case "upgrade":
if len(os.Args) != 4 {
usage()
}
dir := os.Args[2]
prefixLen, err := strconv.Atoi(os.Args[3])
if err != nil {
fail(err)
}
err = flatfs.UpgradeV0toV1(dir, prefixLen)
if err != nil {
fail(err)
}
case "downgrade":
if len(os.Args) != 3 {
usage()
}
dir := os.Args[2]
err := flatfs.DowngradeV1toV0(dir)
if err != nil {
fail(err)
}
case "move":
if len(os.Args) != 4 {
usage()
}
oldDir := os.Args[2]
newDir := os.Args[3]
err := flatfs.Move(oldDir, newDir, os.Stderr)
if err != nil {
fail(err)
}
default:
usage()
}
}
This diff is collapsed.
github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM=
github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8=
github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI=
github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A=
github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc=
github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
package flatfs
import (
"github.com/ipfs/go-datastore"
)
// keyIsValid returns true if the key is valid for flatfs.
// Allows keys that match [0-9A-Z+-_=].
func keyIsValid(key datastore.Key) bool {
ks := key.String()
if len(ks) < 2 || ks[0] != '/' {
return false
}
for _, b := range ks[1:] {
if '0' <= b && b <= '9' {
continue
}
if 'A' <= b && b <= 'Z' {
continue
}
switch b {
case '+', '-', '_', '=':
continue
}
return false
}
return true
}
package flatfs
import (
"testing"
"github.com/ipfs/go-datastore"
)
var (
validKeys = []string{
"/FOO",
"/1BAR1",
"/=EMACS-IS-KING=",
}
invalidKeys = []string{
"/foo/bar",
`/foo\bar`,
"/foo\000bar",
"/=Vim-IS-KING=",
}
)
func TestKeyIsValid(t *testing.T) {
for _, key := range validKeys {
k := datastore.NewKey(key)
if !keyIsValid(k) {
t.Errorf("expected key %s to be valid", k)
}
}
for _, key := range invalidKeys {
k := datastore.NewKey(key)
if keyIsValid(k) {
t.Errorf("expected key %s to be invalid", k)
}
}
}
package flatfs
var README_IPFS_DEF_SHARD = `This is a repository of IPLD objects. Each IPLD object is in a single file,
named <base32 encoding of cid>.data. Where <base32 encoding of cid> is the
"base32" encoding of the CID (as specified in
https://github.com/multiformats/multibase) without the 'B' prefix.
All the object files are placed in a tree of directories, based on a
function of the CID. This is a form of sharding similar to
the objects directory in git repositories. Previously, we used
prefixes, we now use the next-to-last two characters.
func NextToLast(base32cid string) {
nextToLastLen := 2
offset := len(base32cid) - nextToLastLen - 1
return str[offset : offset+nextToLastLen]
}
For example, an object with a base58 CIDv1 of
zb2rhYSxw4ZjuzgCnWSt19Q94ERaeFhu9uSqRgjSdx9bsgM6f
has a base32 CIDv1 of
BAFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA
and will be placed at
SC/AFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA.data
with 'SC' being the last-to-next two characters and the 'B' at the
beginning of the CIDv1 string is the multibase prefix that is not
stored in the filename.
`
// +build !plan9
package flatfs
import "os"
var rename = os.Rename
package flatfs
import (
"io"
"os"
"path/filepath"
"syscall"
)
// rename behaves like os.Rename but can rename files across directories.
func rename(oldpath, newpath string) error {
err := os.Rename(oldpath, newpath)
if le, ok := err.(*os.LinkError); !ok || le.Err != os.ErrInvalid {
return err
}
if filepath.Dir(oldpath) == filepath.Dir(newpath) {
// We should not get here, but just in case
// os.ErrInvalid is used for something else in the future.
return err
}
src, err := os.Open(oldpath)
if err != nil {
return &os.LinkError{"rename", oldpath, newpath, err}
}
defer src.Close()
fi, err := src.Stat()
if err != nil {
return &os.LinkError{"rename", oldpath, newpath, err}
}
if fi.Mode().IsDir() {
return &os.LinkError{"rename", oldpath, newpath, syscall.EISDIR}
}
dst, err := os.OpenFile(newpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
if err != nil {
return &os.LinkError{"rename", oldpath, newpath, err}
}
if _, err := io.Copy(dst, src); err != nil {
dst.Close()
os.Remove(newpath)
return &os.LinkError{"rename", oldpath, newpath, err}
}
if err := dst.Close(); err != nil {
os.Remove(newpath)
return &os.LinkError{"rename", oldpath, newpath, err}
}
// Copy mtime and mode from original file.
// We need only one syscall if we avoid os.Chmod and os.Chtimes.
dir := fi.Sys().(*syscall.Dir)
var d syscall.Dir
d.Null()
d.Mtime = dir.Mtime
d.Mode = dir.Mode
_ = dirwstat(newpath, &d) // ignore error, as per mv(1)
if err := os.Remove(oldpath); err != nil {
return &os.LinkError{"rename", oldpath, newpath, err}
}
return nil
}
func dirwstat(name string, d *syscall.Dir) error {
var buf [syscall.STATFIXLEN]byte
n, err := d.Marshal(buf[:])
if err != nil {
return &os.PathError{"dirwstat", name, err}
}
if err = syscall.Wstat(name, buf[:n]); err != nil {
return &os.PathError{"dirwstat", name, err}
}
return nil
}
package flatfs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
)
var IPFS_DEF_SHARD = NextToLast(2)
var IPFS_DEF_SHARD_STR = IPFS_DEF_SHARD.String()
const PREFIX = "/repo/flatfs/shard/"
const SHARDING_FN = "SHARDING"
const README_FN = "_README"
type ShardIdV1 struct {
funName string
param int
fun ShardFunc
}
func (f *ShardIdV1) String() string {
return fmt.Sprintf("%sv1/%s/%d", PREFIX, f.funName, f.param)
}
func (f *ShardIdV1) Func() ShardFunc {
return f.fun
}
func Prefix(prefixLen int) *ShardIdV1 {
padding := strings.Repeat("_", prefixLen)
return &ShardIdV1{
funName: "prefix",
param: prefixLen,
fun: func(noslash string) string {
return (noslash + padding)[:prefixLen]
},
}
}
func Suffix(suffixLen int) *ShardIdV1 {
padding := strings.Repeat("_", suffixLen)
return &ShardIdV1{
funName: "suffix",
param: suffixLen,
fun: func(noslash string) string {
str := padding + noslash
return str[len(str)-suffixLen:]
},
}
}
func NextToLast(suffixLen int) *ShardIdV1 {
padding := strings.Repeat("_", suffixLen+1)
return &ShardIdV1{
funName: "next-to-last",
param: suffixLen,
fun: func(noslash string) string {
str := padding + noslash
offset := len(str) - suffixLen - 1
return str[offset : offset+suffixLen]
},
}
}
func ParseShardFunc(str string) (*ShardIdV1, error) {
str = strings.TrimSpace(str)
if len(str) == 0 {
return nil, fmt.Errorf("empty shard identifier")
}
trimmed := strings.TrimPrefix(str, PREFIX)
if str == trimmed { // nothing trimmed
return nil, fmt.Errorf("invalid or no prefix in shard identifier: %s", str)
}
str = trimmed
parts := strings.Split(str, "/")
if len(parts) != 3 {
return nil, fmt.Errorf("invalid shard identifier: %s", str)
}
version := parts[0]
if version != "v1" {
return nil, fmt.Errorf("expected 'v1' for version string got: %s\n", version)
}
funName := parts[1]
param, err := strconv.Atoi(parts[2])
if err != nil {
return nil, fmt.Errorf("invalid parameter: %v", err)
}
switch funName {
case "prefix":
return Prefix(param), nil
case "suffix":
return Suffix(param), nil
case "next-to-last":
return NextToLast(param), nil
default:
return nil, fmt.Errorf("expected 'prefix', 'suffix' or 'next-to-last' got: %s", funName)
}
}
func ReadShardFunc(dir string) (*ShardIdV1, error) {
buf, err := ioutil.ReadFile(filepath.Join(dir, SHARDING_FN))
if os.IsNotExist(err) {
return nil, ErrShardingFileMissing
} else if err != nil {
return nil, err
}
return ParseShardFunc(string(buf))
}
func WriteShardFunc(dir string, id *ShardIdV1) error {
file, err := os.OpenFile(filepath.Join(dir, SHARDING_FN), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return err
}
defer file.Close()
_, err = file.WriteString(id.String())
if err != nil {
return err
}
_, err = file.WriteString("\n")
return err
}
func WriteReadme(dir string, id *ShardIdV1) error {
if id.String() == IPFS_DEF_SHARD.String() {
err := ioutil.WriteFile(filepath.Join(dir, README_FN), []byte(README_IPFS_DEF_SHARD), 0444)
if err != nil {
return err
}
}
return nil
}
package flatfs
import (
"os"
"runtime"
)
// don't block more than 16 threads on sync opearation
// 16 should be able to sataurate most RAIDs
// in case of two used disks per write (RAID 1, 5) and queue depth of 2,
// 16 concurrent Sync calls should be able to saturate 16 HDDs RAID
//TODO: benchmark it out, maybe provide tweak parmeter
const SyncThreadsMax = 16
var syncSemaphore chan struct{} = make(chan struct{}, SyncThreadsMax)
func syncDir(dir string) error {
if runtime.GOOS == "windows" {
// dir sync on windows doesn't work: https://git.io/vPnCI
return nil
}
dirF, err := os.Open(dir)
if err != nil {
return err
}
defer dirF.Close()
syncSemaphore <- struct{}{}
defer func() { <-syncSemaphore }()
if err := dirF.Sync(); err != nil {
return err
}
return nil
}
func syncFile(file *os.File) error {
syncSemaphore <- struct{}{}
defer func() { <-syncSemaphore }()
return file.Sync()
}
package flatfs
import (
"io"
"os"
"time"
)
// From: http://stackoverflow.com/questions/30697324/how-to-check-if-directory-on-path-is-empty
func DirIsEmpty(name string) (bool, error) {
f, err := os.Open(name)
if err != nil {
return false, err
}
defer f.Close()
_, err = f.Readdirnames(1) // Or f.Readdir(1)
if err == io.EOF {
return true, nil
}
return false, err // Either not empty or error, suits both cases
}
func readFile(filename string) (data []byte, err error) {
// Fallback retry for temporary error.
for i := 0; i < RetryAttempts; i++ {
data, err = readFileOnce(filename)
if err == nil || !isTooManyFDError(err) {
break
}
time.Sleep(time.Duration(i+1) * RetryDelay)
}
return data, err
}
func tempFile(dir, pattern string) (fi *os.File, err error) {
for i := 0; i < RetryAttempts; i++ {
fi, err = tempFileOnce(dir, pattern)
if err == nil || !isTooManyFDError(err) {
break
}
time.Sleep(time.Duration(i+1) * RetryDelay)
}
return fi, err
}
// +build !windows
package flatfs
import (
"io/ioutil"
"os"
)
func tempFileOnce(dir, pattern string) (*os.File, error) {
return ioutil.TempFile(dir, pattern)
}
func readFileOnce(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
// +build windows
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Note: This file is a variant of a subset of the golang standard library
// src/io/ioutil/tempfile.go
// with calls to os.Open replaced with the goissue34681.Open variant.
package flatfs
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
goissue34681 "github.com/alexbrainman/goissue34681"
)
var tmpRand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextRandom() string {
randmu.Lock()
r := tmpRand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
tmpRand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
func prefixAndSuffix(pattern string) (prefix, suffix string) {
if pos := strings.LastIndex(pattern, "*"); pos != -1 {
prefix, suffix = pattern[:pos], pattern[pos+1:]
} else {
prefix = pattern
}
return
}
func tempFileOnce(dir, pattern string) (f *os.File, err error) {
if dir == "" {
dir = os.TempDir()
}
prefix, suffix := prefixAndSuffix(pattern)
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextRandom()+suffix)
f, err = goissue34681.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
tmpRand = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}
func readFileOnce(filename string) ([]byte, error) {
f, err := goissue34681.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
// It's a good but not certain bet that FileInfo will tell us exactly how much to
// read, so let's try it but be prepared for the answer to be wrong.
var n int64 = bytes.MinRead
if fi, err := f.Stat(); err == nil {
// As initial capacity for readAll, use Size + a little extra in case Size
// is zero, and to avoid another allocation after Read has filled the
// buffer. The readAll call will read into its allocated internal buffer
// cheaply. If the size was wrong, we'll either waste some space off the end
// or reallocate as needed, but in the overwhelmingly common case we'll get
// it just right.
if size := fi.Size() + bytes.MinRead; size > n {
n = size
}
}
return ioutil.ReadAll(f)
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment