good morning!!!!
Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
B
bor
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package Registry
Container Registry
Harbor Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
open
bor
Commits
a6942b9f
Commit
a6942b9f
authored
6 years ago
by
Louis Holbrook
Committed by
Anton Evangelatov
6 years ago
Browse files
Options
Downloads
Patches
Plain Diff
swarm/storage: Batched database migration (#18113)
parent
17d67c58
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
swarm/storage/ldbstore.go
+95
-28
95 additions, 28 deletions
swarm/storage/ldbstore.go
swarm/storage/ldbstore_test.go
+32
-0
32 additions, 0 deletions
swarm/storage/ldbstore_test.go
with
127 additions
and
28 deletions
swarm/storage/ldbstore.go
+
95
−
28
View file @
a6942b9f
...
...
@@ -284,7 +284,7 @@ func getGCIdxValue(index *dpaDBIndex, po uint8, addr Address) []byte {
return
val
}
func
parse
GC
IdxKey
(
key
[]
byte
)
(
byte
,
[]
byte
)
{
func
parseIdxKey
(
key
[]
byte
)
(
byte
,
[]
byte
)
{
return
key
[
0
],
key
[
1
:
]
}
...
...
@@ -589,7 +589,7 @@ func (s *LDBStore) CleanGCIndex() error {
it
.
Seek
([]
byte
{
keyGCIdx
})
var
gcDeletes
int
for
it
.
Valid
()
{
rowType
,
_
:=
parse
GC
IdxKey
(
it
.
Key
())
rowType
,
_
:=
parseIdxKey
(
it
.
Key
())
if
rowType
!=
keyGCIdx
{
break
}
...
...
@@ -601,47 +601,113 @@ func (s *LDBStore) CleanGCIndex() error {
if
err
:=
s
.
db
.
Write
(
&
batch
);
err
!=
nil
{
return
err
}
batch
.
Reset
()
it
.
Seek
([]
byte
{
keyIndex
})
var
idx
dpaDBIndex
it
.
Release
()
// corrected po index pointer values
var
poPtrs
[
256
]
uint64
for
it
.
Valid
()
{
rowType
,
chunkHash
:=
parseGCIdxKey
(
it
.
Key
())
if
rowType
!=
keyIndex
{
break
// set to true if chunk count not on 4096 iteration boundary
var
doneIterating
bool
// last key index in previous iteration
lastIdxKey
:=
[]
byte
{
keyIndex
}
// counter for debug output
var
cleanBatchCount
int
// go through all key index entries
for
!
doneIterating
{
cleanBatchCount
++
var
idxs
[]
dpaDBIndex
var
chunkHashes
[][]
byte
var
pos
[]
uint8
it
:=
s
.
db
.
NewIterator
()
it
.
Seek
(
lastIdxKey
)
// 4096 is just a nice number, don't look for any hidden meaning here...
var
i
int
for
i
=
0
;
i
<
4096
;
i
++
{
// this really shouldn't happen unless database is empty
// but let's keep it to be safe
if
!
it
.
Valid
()
{
doneIterating
=
true
break
}
// if it's not keyindex anymore we're done iterating
rowType
,
chunkHash
:=
parseIdxKey
(
it
.
Key
())
if
rowType
!=
keyIndex
{
doneIterating
=
true
break
}
// decode the retrieved index
var
idx
dpaDBIndex
err
:=
decodeIndex
(
it
.
Value
(),
&
idx
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"corrupt index: %v"
,
err
)
}
po
:=
s
.
po
(
chunkHash
)
lastIdxKey
=
it
.
Key
()
// if we don't find the data key, remove the entry
// if we find it, add to the array of new gc indices to create
dataKey
:=
getDataKey
(
idx
.
Idx
,
po
)
_
,
err
=
s
.
db
.
Get
(
dataKey
)
if
err
!=
nil
{
log
.
Warn
(
"deleting inconsistent index (missing data)"
,
"key"
,
chunkHash
)
batch
.
Delete
(
it
.
Key
())
}
else
{
idxs
=
append
(
idxs
,
idx
)
chunkHashes
=
append
(
chunkHashes
,
chunkHash
)
pos
=
append
(
pos
,
po
)
okEntryCount
++
if
idx
.
Idx
>
poPtrs
[
po
]
{
poPtrs
[
po
]
=
idx
.
Idx
}
}
totalEntryCount
++
it
.
Next
()
}
err
:=
decodeIndex
(
it
.
Value
(),
&
idx
)
it
.
Release
()
// flush the key index corrections
err
:=
s
.
db
.
Write
(
&
batch
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"corrupt index: %v"
,
err
)
return
err
}
po
:=
s
.
po
(
chunkHash
)
batch
.
Reset
(
)
// if we don't find the data key, remove the entry
dataKey
:=
getDataKey
(
idx
.
Idx
,
po
)
_
,
err
=
s
.
db
.
Get
(
dataKey
)
if
err
!=
nil
{
log
.
Warn
(
"deleting inconsistent index (missing data)"
,
"key"
,
chunkHash
)
batch
.
Delete
(
it
.
Key
())
}
else
{
gcIdxKey
:=
getGCIdxKey
(
&
idx
)
gcIdxData
:=
getGCIdxValue
(
&
idx
,
po
,
chunkHash
)
// add correct gc indices
for
i
,
okIdx
:=
range
idxs
{
gcIdxKey
:=
getGCIdxKey
(
&
okIdx
)
gcIdxData
:=
getGCIdxValue
(
&
okIdx
,
pos
[
i
],
chunkHashes
[
i
])
batch
.
Put
(
gcIdxKey
,
gcIdxData
)
log
.
Trace
(
"clean ok"
,
"key"
,
chunkHash
,
"gcKey"
,
gcIdxKey
,
"gcData"
,
gcIdxData
)
okEntryCount
++
if
idx
.
Idx
>
poPtrs
[
po
]
{
poPtrs
[
po
]
=
idx
.
Idx
}
log
.
Trace
(
"clean ok"
,
"key"
,
chunkHashes
[
i
],
"gcKey"
,
gcIdxKey
,
"gcData"
,
gcIdxData
)
}
totalEntryCount
++
it
.
Next
()
// flush them
err
=
s
.
db
.
Write
(
&
batch
)
if
err
!=
nil
{
return
err
}
batch
.
Reset
()
log
.
Debug
(
"clean gc index pass"
,
"batch"
,
cleanBatchCount
,
"checked"
,
i
,
"kept"
,
len
(
idxs
))
}
it
.
Release
()
log
.
Debug
(
"gc cleanup entries"
,
"ok"
,
okEntryCount
,
"total"
,
totalEntryCount
,
"batchlen"
,
batch
.
Len
())
// lastly add updated entry count
var
entryCount
[
8
]
byte
binary
.
BigEndian
.
PutUint64
(
entryCount
[
:
],
okEntryCount
)
batch
.
Put
(
keyEntryCnt
,
entryCount
[
:
])
// and add the new po index pointers
var
poKey
[
2
]
byte
poKey
[
0
]
=
keyDistanceCnt
for
i
,
poPtr
:=
range
poPtrs
{
...
...
@@ -655,6 +721,7 @@ func (s *LDBStore) CleanGCIndex() error {
}
}
// if you made it this far your harddisk has survived. Congratulations
return
s
.
db
.
Write
(
&
batch
)
}
...
...
This diff is collapsed.
Click to expand it.
swarm/storage/ldbstore_test.go
+
32
−
0
View file @
a6942b9f
...
...
@@ -761,6 +761,38 @@ func TestCleanIndex(t *testing.T) {
t
.
Fatalf
(
"expected sum of bin indices to be 3, was %d"
,
binTotal
)
}
}
// check that the iterator quits properly
chunks
,
err
=
mputRandomChunks
(
ldb
,
4100
,
4096
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
po
=
ldb
.
po
(
chunks
[
4099
]
.
Address
()[
:
])
dataKey
=
make
([]
byte
,
10
)
dataKey
[
0
]
=
keyData
dataKey
[
1
]
=
byte
(
po
)
binary
.
BigEndian
.
PutUint64
(
dataKey
[
2
:
],
4099
+
3
)
if
_
,
err
:=
ldb
.
db
.
Get
(
dataKey
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
err
:=
ldb
.
db
.
Delete
(
dataKey
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
err
:=
ldb
.
CleanGCIndex
();
err
!=
nil
{
t
.
Fatal
(
err
)
}
// entrycount should now be one less of added chunks
c
,
err
=
ldb
.
db
.
Get
(
keyEntryCnt
)
if
err
!=
nil
{
t
.
Fatalf
(
"expected gc 2 idx to be present: %v"
,
idxKey
)
}
entryCount
=
binary
.
BigEndian
.
Uint64
(
c
)
if
entryCount
!=
4099
+
2
{
t
.
Fatalf
(
"expected entrycnt to be 2, was %d"
,
c
)
}
}
func
waitGc
(
ctx
context
.
Context
,
ldb
*
LDBStore
)
{
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment