tests: support `// vtest retry: 4` for marking flaky tests
parent
eec930b86a
commit
df8a4a03a0
|
@ -306,22 +306,35 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
|
||||||
}
|
}
|
||||||
if show_stats {
|
if show_stats {
|
||||||
ts.append_message(.ok, term.h_divider('-'))
|
ts.append_message(.ok, term.h_divider('-'))
|
||||||
status := os.system(cmd)
|
mut status := os.system(cmd)
|
||||||
if status == 0 {
|
if status != 0 {
|
||||||
ts.benchmark.ok()
|
details := get_test_details(file)
|
||||||
tls_bench.ok()
|
os.setenv('VTEST_RETRY_MAX', '$details.retry', true)
|
||||||
} else {
|
for retry := 1; retry <= details.retry; retry++ {
|
||||||
|
ts.append_message(.info, ' retrying $retry/$details.retry of $relative_file ...')
|
||||||
|
os.setenv('VTEST_RETRY', '$retry', true)
|
||||||
|
status = os.system(cmd)
|
||||||
|
if status == 0 {
|
||||||
|
unsafe {
|
||||||
|
goto test_passed_system
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
ts.failed = true
|
ts.failed = true
|
||||||
ts.benchmark.fail()
|
ts.benchmark.fail()
|
||||||
tls_bench.fail()
|
tls_bench.fail()
|
||||||
ts.add_failed_cmd(cmd)
|
ts.add_failed_cmd(cmd)
|
||||||
return pool.no_result
|
return pool.no_result
|
||||||
|
} else {
|
||||||
|
test_passed_system:
|
||||||
|
ts.benchmark.ok()
|
||||||
|
tls_bench.ok()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if testing.show_start {
|
if testing.show_start {
|
||||||
ts.append_message(.info, ' starting $relative_file ...')
|
ts.append_message(.info, ' starting $relative_file ...')
|
||||||
}
|
}
|
||||||
r := os.execute(cmd)
|
mut r := os.execute(cmd)
|
||||||
if r.exit_code < 0 {
|
if r.exit_code < 0 {
|
||||||
ts.failed = true
|
ts.failed = true
|
||||||
ts.benchmark.fail()
|
ts.benchmark.fail()
|
||||||
|
@ -331,6 +344,18 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
|
||||||
return pool.no_result
|
return pool.no_result
|
||||||
}
|
}
|
||||||
if r.exit_code != 0 {
|
if r.exit_code != 0 {
|
||||||
|
details := get_test_details(file)
|
||||||
|
os.setenv('VTEST_RETRY_MAX', '$details.retry', true)
|
||||||
|
for retry := 1; retry <= details.retry; retry++ {
|
||||||
|
ts.append_message(.info, ' retrying $retry/$details.retry of $relative_file ...')
|
||||||
|
os.setenv('VTEST_RETRY', '$retry', true)
|
||||||
|
r = os.execute(cmd)
|
||||||
|
if r.exit_code == 0 {
|
||||||
|
unsafe {
|
||||||
|
goto test_passed_execute
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
ts.failed = true
|
ts.failed = true
|
||||||
ts.benchmark.fail()
|
ts.benchmark.fail()
|
||||||
tls_bench.fail()
|
tls_bench.fail()
|
||||||
|
@ -338,6 +363,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
|
||||||
ts.append_message(.fail, tls_bench.step_message_fail('$normalised_relative_file\n$r.output.trim_space()$ending_newline'))
|
ts.append_message(.fail, tls_bench.step_message_fail('$normalised_relative_file\n$r.output.trim_space()$ending_newline'))
|
||||||
ts.add_failed_cmd(cmd)
|
ts.add_failed_cmd(cmd)
|
||||||
} else {
|
} else {
|
||||||
|
test_passed_execute:
|
||||||
ts.benchmark.ok()
|
ts.benchmark.ok()
|
||||||
tls_bench.ok()
|
tls_bench.ok()
|
||||||
if !testing.hide_oks {
|
if !testing.hide_oks {
|
||||||
|
@ -484,3 +510,19 @@ pub fn setup_new_vtmp_folder() string {
|
||||||
os.setenv('VTMP', new_vtmp_dir, true)
|
os.setenv('VTMP', new_vtmp_dir, true)
|
||||||
return new_vtmp_dir
|
return new_vtmp_dir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct TestDetails {
|
||||||
|
pub mut:
|
||||||
|
retry int
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_test_details(file string) TestDetails {
|
||||||
|
mut res := TestDetails{}
|
||||||
|
lines := os.read_lines(file) or { [] }
|
||||||
|
for line in lines {
|
||||||
|
if line.starts_with('// vtest retry:') {
|
||||||
|
res.retry = line.all_after(':').trim_space().int()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
// vtest retry: 4
|
||||||
|
|
||||||
/*
|
/*
|
||||||
The goal of this test, is to simulate a developer, that has run a program, compiled with -live flag.
|
The goal of this test, is to simulate a developer, that has run a program, compiled with -live flag.
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
// vtest retry: 4
|
||||||
/*
|
/*
|
||||||
* To verify the effect of "[keep_args_alive]", this attribute may be commented out.
|
* To verify the effect of "[keep_args_alive]", this attribute may be commented out.
|
||||||
* However it is not guaranteed that then this test will fail.
|
* However it is not guaranteed that then this test will fail.
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
// vtest retry: 2
|
||||||
|
|
||||||
|
// This tests whether the V test runner, can handle retries.
|
||||||
|
//
|
||||||
|
// The comment above, should make it try re-running the same test,
|
||||||
|
// a maximum of 2 times. It will fail for all, but the last retry.
|
||||||
|
// This is useful for reducing false positives on the CI, due to
|
||||||
|
// flakyness of specific tests like `vlib/v/live/live_test.v` for example.
|
||||||
|
|
||||||
|
// NB: this test is supposed to be run with `v test retry_test.v`.
|
||||||
|
// Running just `v retry_test.v` WILL fail.
|
||||||
|
|
||||||
|
fn test_test_runner_retrying_failing_tests() {
|
||||||
|
n := os.getenv('VTEST_RETRY').int()
|
||||||
|
maxn := os.getenv('VTEST_RETRY_MAX').int()
|
||||||
|
eprintln('> n: $n | maxn: $maxn')
|
||||||
|
if n > 0 && n == maxn {
|
||||||
|
assert true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert false
|
||||||
|
}
|
Loading…
Reference in New Issue