Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
10
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
dms3
go-ds-flatfs
Commits
1d9a4fcf
Commit
1d9a4fcf
authored
Feb 28, 2019
by
Steven Allen
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix panic on write after close
(and make Close threadsafe)
parent
43fc5aaf
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
66 additions
and
15 deletions
+66
-15
convert.go
convert.go
+2
-2
flatfs.go
flatfs.go
+40
-13
flatfs_test.go
flatfs_test.go
+24
-0
No files found.
convert.go
View file @
1d9a4fcf
...
...
@@ -170,8 +170,8 @@ func Move(oldPath string, newPath string, out io.Writer) error {
func
moveKey
(
oldDS
*
Datastore
,
newDS
*
Datastore
,
key
datastore
.
Key
)
error
{
_
,
oldPath
:=
oldDS
.
encode
(
key
)
dir
,
newPath
:=
newDS
.
encode
(
key
)
err
:=
newDS
.
makeDirNoSync
(
dir
)
if
err
!=
nil
{
err
:=
os
.
Mkdir
(
dir
,
0755
)
if
err
!=
nil
&&
!
os
.
IsExist
(
err
)
{
return
err
}
err
=
os
.
Rename
(
oldPath
,
newPath
)
...
...
flatfs.go
View file @
1d9a4fcf
...
...
@@ -94,6 +94,7 @@ var (
ErrDatastoreExists
=
errors
.
New
(
"datastore already exists"
)
ErrDatastoreDoesNotExist
=
errors
.
New
(
"datastore directory does not exist"
)
ErrShardingFileMissing
=
fmt
.
Errorf
(
"%s file not found in datastore"
,
SHARDING_FN
)
ErrClosed
=
errors
.
New
(
"datastore closed"
)
)
func
init
()
{
...
...
@@ -123,9 +124,13 @@ type Datastore struct {
dirty
bool
storedValue
diskUsageValue
// Used to trigger a checkpoint.
checkpointCh
chan
struct
{}
done
chan
struct
{}
shutdownLock
sync
.
RWMutex
shutdown
bool
// opMap handles concurrent write operations (put/delete)
// to the same key
opMap
*
opMap
...
...
@@ -242,6 +247,8 @@ func Open(path string, syncFiles bool) (*Datastore, error) {
shardStr
:
shardId
.
String
(),
getDir
:
shardId
.
Func
(),
sync
:
syncFiles
,
checkpointCh
:
make
(
chan
struct
{},
1
),
done
:
make
(
chan
struct
{}),
diskUsage
:
0
,
opMap
:
new
(
opMap
),
}
...
...
@@ -257,8 +264,6 @@ func Open(path string, syncFiles bool) (*Datastore, error) {
return
nil
,
err
}
fs
.
checkpointCh
=
make
(
chan
struct
{},
1
)
fs
.
done
=
make
(
chan
struct
{})
go
fs
.
checkpointLoop
()
return
fs
,
nil
}
...
...
@@ -356,6 +361,12 @@ var putMaxRetries = 6
// concurrent Put and a Delete operation, we cannot guarantee which one
// will win.
func
(
fs
*
Datastore
)
Put
(
key
datastore
.
Key
,
value
[]
byte
)
error
{
fs
.
shutdownLock
.
RLock
()
defer
fs
.
shutdownLock
.
RUnlock
()
if
fs
.
shutdown
{
return
ErrClosed
}
var
err
error
for
i
:=
1
;
i
<=
putMaxRetries
;
i
++
{
err
=
fs
.
doWriteOp
(
&
op
{
...
...
@@ -466,6 +477,12 @@ func (fs *Datastore) doPut(key datastore.Key, val []byte) error {
}
func
(
fs
*
Datastore
)
putMany
(
data
map
[
datastore
.
Key
]
interface
{})
error
{
fs
.
shutdownLock
.
RLock
()
defer
fs
.
shutdownLock
.
RUnlock
()
if
fs
.
shutdown
{
return
ErrClosed
}
var
dirsToSync
[]
string
files
:=
make
(
map
[
*
os
.
File
]
*
op
)
...
...
@@ -594,6 +611,12 @@ func (fs *Datastore) GetSize(key datastore.Key) (size int, err error) {
// the Put() explanation about the handling of concurrent write
// operations to the same key.
func
(
fs
*
Datastore
)
Delete
(
key
datastore
.
Key
)
error
{
fs
.
shutdownLock
.
RLock
()
defer
fs
.
shutdownLock
.
RUnlock
()
if
fs
.
shutdown
{
return
ErrClosed
}
return
fs
.
doWriteOp
(
&
op
{
typ
:
opDelete
,
key
:
key
,
...
...
@@ -845,6 +868,8 @@ func (fs *Datastore) checkpointDiskUsage() {
}
func
(
fs
*
Datastore
)
checkpointLoop
()
{
defer
close
(
fs
.
done
)
timerActive
:=
true
timer
:=
time
.
NewTimer
(
0
)
defer
timer
.
Stop
()
...
...
@@ -858,7 +883,6 @@ func (fs *Datastore) checkpointLoop() {
if
fs
.
dirty
{
log
.
Errorf
(
"could not store final value of disk usage to file, future estimates may be inaccurate"
)
}
fs
.
done
<-
struct
{}{}
return
}
// If the difference between the checkpointed disk usage and
...
...
@@ -1023,11 +1047,14 @@ func (fs *Datastore) walk(path string, result *query.ResultBuilder) error {
// operations will fail but readonly operations will continue to
// function
func
(
fs
*
Datastore
)
deactivate
()
error
{
if
fs
.
checkpointCh
!=
nil
{
fs
.
shutdownLock
.
Lock
()
defer
fs
.
shutdownLock
.
Unlock
()
if
fs
.
shutdown
{
return
nil
}
fs
.
shutdown
=
true
close
(
fs
.
checkpointCh
)
<-
fs
.
done
fs
.
checkpointCh
=
nil
}
return
nil
}
...
...
flatfs_test.go
View file @
1d9a4fcf
...
...
@@ -810,6 +810,30 @@ func testBatchDelete(dirFunc mkShardFunc, t *testing.T) {
func
TestBatchDelete
(
t
*
testing
.
T
)
{
tryAllShardFuncs
(
t
,
testBatchDelete
)
}
func
testClose
(
dirFunc
mkShardFunc
,
t
*
testing
.
T
)
{
temp
,
cleanup
:=
tempdir
(
t
)
defer
cleanup
()
fs
,
err
:=
flatfs
.
CreateOrOpen
(
temp
,
dirFunc
(
2
),
false
)
if
err
!=
nil
{
t
.
Fatalf
(
"New fail: %v
\n
"
,
err
)
}
err
=
fs
.
Put
(
datastore
.
NewKey
(
"quux"
),
[]
byte
(
"foobar"
))
if
err
!=
nil
{
t
.
Fatalf
(
"Put fail: %v
\n
"
,
err
)
}
fs
.
Close
()
err
=
fs
.
Put
(
datastore
.
NewKey
(
"qaax"
),
[]
byte
(
"foobar"
))
if
err
==
nil
{
t
.
Fatal
(
"expected put on closed datastore to fail"
)
}
}
func
TestClose
(
t
*
testing
.
T
)
{
tryAllShardFuncs
(
t
,
testClose
)
}
func
TestSHARDINGFile
(
t
*
testing
.
T
)
{
tempdir
,
cleanup
:=
tempdir
(
t
)
defer
cleanup
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment