good morning!!!!
Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
B
bor
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package Registry
Container Registry
Harbor Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
open
bor
Commits
34c85def
Commit
34c85def
authored
6 years ago
by
Elad
Committed by
Anton Evangelatov
6 years ago
Browse files
Options
Downloads
Patches
Plain Diff
cmd/swarm/swarm-smoke: sliding window test should not time out (#19152)
parent
81ed7001
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
cmd/swarm/flags.go
+2
-1
2 additions, 1 deletion
cmd/swarm/flags.go
cmd/swarm/swarm-smoke/sliding_window.go
+43
-29
43 additions, 29 deletions
cmd/swarm/swarm-smoke/sliding_window.go
with
45 additions
and
30 deletions
cmd/swarm/flags.go
+
2
−
1
View file @
34c85def
...
@@ -149,8 +149,9 @@ var (
...
@@ -149,8 +149,9 @@ var (
}
}
SwarmStoreCacheCapacity
=
cli
.
UintFlag
{
SwarmStoreCacheCapacity
=
cli
.
UintFlag
{
Name
:
"store.cache.size"
,
Name
:
"store.cache.size"
,
Usage
:
"Number of recent chunks cached in memory
(default 5000)
"
,
Usage
:
"Number of recent chunks cached in memory"
,
EnvVar
:
SwarmEnvStoreCacheCapacity
,
EnvVar
:
SwarmEnvStoreCacheCapacity
,
Value
:
10000
,
}
}
SwarmCompressedFlag
=
cli
.
BoolFlag
{
SwarmCompressedFlag
=
cli
.
BoolFlag
{
Name
:
"compressed"
,
Name
:
"compressed"
,
...
...
This diff is collapsed.
Click to expand it.
cmd/swarm/swarm-smoke/sliding_window.go
+
43
−
29
View file @
34c85def
...
@@ -42,23 +42,16 @@ func slidingWindowCmd(ctx *cli.Context, tuid string) error {
...
@@ -42,23 +42,16 @@ func slidingWindowCmd(ctx *cli.Context, tuid string) error {
errc
<-
slidingWindow
(
ctx
,
tuid
)
errc
<-
slidingWindow
(
ctx
,
tuid
)
}()
}()
select
{
err
:=
<-
errc
case
err
:=
<-
errc
:
if
err
!=
nil
{
if
err
!=
nil
{
metrics
.
GetOrRegisterCounter
(
fmt
.
Sprintf
(
"%s.fail"
,
commandName
),
nil
)
.
Inc
(
1
)
metrics
.
GetOrRegisterCounter
(
fmt
.
Sprintf
(
"%s.fail"
,
commandName
),
nil
)
.
Inc
(
1
)
}
return
err
case
<-
time
.
After
(
time
.
Duration
(
timeout
)
*
time
.
Second
)
:
metrics
.
GetOrRegisterCounter
(
fmt
.
Sprintf
(
"%s.timeout"
,
commandName
),
nil
)
.
Inc
(
1
)
return
fmt
.
Errorf
(
"timeout after %v sec"
,
timeout
)
}
}
return
err
}
}
func
slidingWindow
(
ctx
*
cli
.
Context
,
tuid
string
)
error
{
func
slidingWindow
(
ctx
*
cli
.
Context
,
tuid
string
)
error
{
var
hashes
[]
uploadResult
//swarm hashes of the uploads
var
hashes
[]
uploadResult
//swarm hashes of the uploads
nodes
:=
len
(
hosts
)
nodes
:=
len
(
hosts
)
const
iterationTimeout
=
30
*
time
.
Second
log
.
Info
(
"sliding window test started"
,
"tuid"
,
tuid
,
"nodes"
,
nodes
,
"filesize(kb)"
,
filesize
,
"timeout"
,
timeout
)
log
.
Info
(
"sliding window test started"
,
"tuid"
,
tuid
,
"nodes"
,
nodes
,
"filesize(kb)"
,
filesize
,
"timeout"
,
timeout
)
uploadedBytes
:=
0
uploadedBytes
:=
0
networkDepth
:=
0
networkDepth
:=
0
...
@@ -66,6 +59,7 @@ func slidingWindow(ctx *cli.Context, tuid string) error {
...
@@ -66,6 +59,7 @@ func slidingWindow(ctx *cli.Context, tuid string) error {
outer
:
outer
:
for
{
for
{
seed
=
int
(
time
.
Now
()
.
UTC
()
.
UnixNano
())
log
.
Info
(
"uploading to "
+
httpEndpoint
(
hosts
[
0
])
+
" and syncing"
,
"seed"
,
seed
)
log
.
Info
(
"uploading to "
+
httpEndpoint
(
hosts
[
0
])
+
" and syncing"
,
"seed"
,
seed
)
t1
:=
time
.
Now
()
t1
:=
time
.
Now
()
...
@@ -79,6 +73,7 @@ outer:
...
@@ -79,6 +73,7 @@ outer:
}
}
metrics
.
GetOrRegisterResettingTimer
(
"sliding-window.upload-time"
,
nil
)
.
UpdateSince
(
t1
)
metrics
.
GetOrRegisterResettingTimer
(
"sliding-window.upload-time"
,
nil
)
.
UpdateSince
(
t1
)
metrics
.
GetOrRegisterGauge
(
"sliding-window.upload-depth"
,
nil
)
.
Update
(
int64
(
len
(
hashes
)))
fhash
,
err
:=
digest
(
bytes
.
NewReader
(
randomBytes
))
fhash
,
err
:=
digest
(
bytes
.
NewReader
(
randomBytes
))
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -90,37 +85,56 @@ outer:
...
@@ -90,37 +85,56 @@ outer:
hashes
=
append
(
hashes
,
uploadResult
{
hash
:
hash
,
digest
:
fhash
})
hashes
=
append
(
hashes
,
uploadResult
{
hash
:
hash
,
digest
:
fhash
})
time
.
Sleep
(
time
.
Duration
(
syncDelay
)
*
time
.
Second
)
time
.
Sleep
(
time
.
Duration
(
syncDelay
)
*
time
.
Second
)
uploadedBytes
+=
filesize
*
1000
uploadedBytes
+=
filesize
*
1000
q
:=
make
(
chan
struct
{},
1
)
d
:=
make
(
chan
struct
{})
defer
close
(
q
)
defer
close
(
d
)
for
i
,
v
:=
range
hashes
{
for
i
,
v
:=
range
hashes
{
timeout
:=
time
.
After
(
time
.
Duration
(
timeout
)
*
time
.
Second
)
timeout
C
:=
time
.
After
(
time
.
Duration
(
timeout
)
*
time
.
Second
)
errored
=
false
errored
=
false
inner
:
task
:
for
{
for
{
select
{
select
{
case
<-
timeout
:
case
q
<-
struct
{}{}
:
go
func
()
{
var
start
time
.
Time
done
:=
false
for
!
done
{
log
.
Info
(
"trying to retrieve hash"
,
"hash"
,
v
.
hash
)
idx
:=
1
+
rand
.
Intn
(
len
(
hosts
)
-
1
)
ruid
:=
uuid
.
New
()[
:
8
]
start
=
time
.
Now
()
// fetch hangs when swarm dies out, so we have to jump through a bit more hoops to actually
// catch the timeout, but also allow this retry logic
err
:=
fetch
(
v
.
hash
,
httpEndpoint
(
hosts
[
idx
]),
v
.
digest
,
ruid
,
""
)
if
err
!=
nil
{
log
.
Error
(
"error fetching hash"
,
"err"
,
err
)
continue
}
done
=
true
}
metrics
.
GetOrRegisterResettingTimer
(
"sliding-window.single.fetch-time"
,
nil
)
.
UpdateSince
(
start
)
d
<-
struct
{}{}
}()
case
<-
d
:
<-
q
break
task
case
<-
timeoutC
:
errored
=
true
errored
=
true
log
.
Error
(
"error retrieving hash. timeout"
,
"hash idx"
,
i
,
"err"
,
err
)
log
.
Error
(
"error retrieving hash. timeout"
,
"hash idx"
,
i
)
metrics
.
GetOrRegisterCounter
(
"sliding-window.single.error"
,
nil
)
.
Inc
(
1
)
metrics
.
GetOrRegisterCounter
(
"sliding-window.single.error"
,
nil
)
.
Inc
(
1
)
break
inn
er
break
out
er
default
:
default
:
idx
:=
1
+
rand
.
Intn
(
len
(
hosts
)
-
1
)
ruid
:=
uuid
.
New
()[
:
8
]
start
:=
time
.
Now
()
err
:=
fetch
(
v
.
hash
,
httpEndpoint
(
hosts
[
idx
]),
v
.
digest
,
ruid
,
""
)
if
err
!=
nil
{
continue
inner
}
metrics
.
GetOrRegisterResettingTimer
(
"sliding-window.single.fetch-time"
,
nil
)
.
UpdateSince
(
start
)
break
inner
}
}
}
}
if
errored
{
break
outer
}
networkDepth
=
i
networkDepth
=
i
metrics
.
GetOrRegisterGauge
(
"sliding-window.network-depth"
,
nil
)
.
Update
(
int64
(
networkDepth
))
metrics
.
GetOrRegisterGauge
(
"sliding-window.network-depth"
,
nil
)
.
Update
(
int64
(
networkDepth
))
log
.
Info
(
"sliding window test successfully fetched file"
,
"currentDepth"
,
networkDepth
)
// this test might take a long time to finish - but we'd like to see metrics while they accumulate and not just when
// the test finishes. therefore emit the metrics on each iteration
emitMetrics
(
ctx
)
}
}
}
}
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment