preludes,builder,cgen: add support for VTEST_RUNNER=tap and -test-runner tap (#12523)
parent
caac89d6ca
commit
6ff953d936
|
@ -40,24 +40,31 @@ fn cleanup_tdir() {
|
|||
os.rmdir_all(tdir) or { eprintln(err) }
|
||||
}
|
||||
|
||||
fn create_test(tname string, tcontent string) ?string {
|
||||
tpath := os.join_path(tdir, tname)
|
||||
os.write_file(tpath, tcontent) ?
|
||||
eprintln('>>>>>>>> tpath: $tpath | tcontent: $tcontent')
|
||||
return tpath
|
||||
}
|
||||
|
||||
fn main() {
|
||||
defer {
|
||||
os.chdir(os.wd_at_startup) or {}
|
||||
}
|
||||
println('> vroot: $vroot | vexe: $vexe | tdir: $tdir')
|
||||
ok_fpath := os.join_path(tdir, 'single_test.v')
|
||||
os.write_file(ok_fpath, 'fn test_ok(){ assert true }') ?
|
||||
check_ok('"$vexe" $ok_fpath')
|
||||
check_ok('"$vexe" test $ok_fpath')
|
||||
fail_fpath := os.join_path(tdir, 'failing_test.v')
|
||||
os.write_file(fail_fpath, 'fn test_fail(){ assert 1 == 2 }') ?
|
||||
check_fail('"$vexe" $fail_fpath')
|
||||
check_fail('"$vexe" test $fail_fpath')
|
||||
check_fail('"$vexe" test $tdir')
|
||||
ok_fpath := create_test('a_single_ok_test.v', 'fn test_ok(){ assert true }') ?
|
||||
check_ok('"$vexe" "$ok_fpath"')
|
||||
check_ok('"$vexe" test "$ok_fpath"')
|
||||
check_ok('"$vexe" test "$tdir"')
|
||||
fail_fpath := create_test('a_single_failing_test.v', 'fn test_fail(){ assert 1 == 2 }') ?
|
||||
check_fail('"$vexe" "$fail_fpath"')
|
||||
check_fail('"$vexe" test "$fail_fpath"')
|
||||
check_fail('"$vexe" test "$tdir"')
|
||||
rel_dir := os.join_path(tdir, rand.ulid())
|
||||
os.mkdir(rel_dir) ?
|
||||
os.chdir(rel_dir) ?
|
||||
check_ok('"$vexe" test ..${os.path_separator + os.base(ok_fpath)}')
|
||||
check_ok('"$vexe" test "..${os.path_separator + os.base(ok_fpath)}"')
|
||||
println('> all done')
|
||||
}
|
||||
|
||||
fn check_ok(cmd string) string {
|
||||
|
|
|
@ -11,8 +11,23 @@ and then you can perform:
|
|||
... to run all the module's '_test.v' files.
|
||||
|
||||
NB 2: V builtin testing requires you to name your files with a _test.v
|
||||
suffix, and to name your test functions with test_ prefix. Each 'test_'
|
||||
function in a '_test.v' file will be called automatically by the test
|
||||
framework. You can use `assert condition` inside each 'test_' function.
|
||||
If the asserted condition fails, then v will record that and produce a
|
||||
more detailed error message about where the failure was.
|
||||
suffix, and to name your test functions with test_ prefix. Each function,
|
||||
that starts with 'fn test_', and that is in a '_test.v' file will be called
|
||||
automatically by the test framework.
|
||||
|
||||
NB 3: You can use `assert condition` inside each 'test_' function. If the
|
||||
asserted condition fails, then v will record that, and produce a more detailed
|
||||
error message, about where the failure was.
|
||||
|
||||
NB 4: Alternative test runners (for IDE integrations):
|
||||
You can use several alternative test result formats, using `-test-runner name`,
|
||||
or by setting VTEST_RUNNER (the command line option has higher priority).
|
||||
|
||||
The names of the available test runners are:
|
||||
`simple` Fastest, does not import additional modules, does no processing.
|
||||
`tap` Format the output as required by the Test Anything Protocol (TAP).
|
||||
`normal` Supports color output, nicest/most human readable, the default.
|
||||
|
||||
You can also implement your own custom test runner, by providing the path to
|
||||
your .v file, that implements it to this option. For example, see:
|
||||
vlib/v/preludes/test_runner_tap.v .
|
||||
|
|
|
@ -173,8 +173,25 @@ pub fn eprint(s string) {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn flush_stdout() {
|
||||
$if freestanding {
|
||||
not_implemented := 'flush_stdout is not implemented\n'
|
||||
bare_eprint(not_implemented.str, u64(not_implemented.len))
|
||||
} $else {
|
||||
C.fflush(C.stdout)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush_stderr() {
|
||||
$if freestanding {
|
||||
not_implemented := 'flush_stderr is not implemented\n'
|
||||
bare_eprint(not_implemented.str, u64(not_implemented.len))
|
||||
} $else {
|
||||
C.fflush(C.stderr)
|
||||
}
|
||||
}
|
||||
|
||||
// print prints a message to stdout. Unlike `println` stdout is not automatically flushed.
|
||||
// A call to `flush()` will flush the output buffer to stdout.
|
||||
[manualfree]
|
||||
pub fn print(s string) {
|
||||
$if android {
|
||||
|
|
|
@ -41,8 +41,8 @@ fn __as_cast(obj voidptr, obj_type int, expected_type int) voidptr {
|
|||
return obj
|
||||
}
|
||||
|
||||
// VAssertMetaInfo is used during assertions. An instance of it
|
||||
// is filled in by compile time generated code, when an assertion fails.
|
||||
// VAssertMetaInfo is used during assertions. An instance of it is filled in by
|
||||
// compile time generated code, when an assertion fails.
|
||||
pub struct VAssertMetaInfo {
|
||||
pub:
|
||||
fpath string // the source file path of the assertion
|
||||
|
@ -56,9 +56,8 @@ pub:
|
|||
rvalue string // the stringified *actual value* of the right side of a failed assertion
|
||||
}
|
||||
|
||||
// free is used to free the memory occupied by the assertion meta data.
|
||||
// It is called by cb_assertion_failed, and cb_assertion_ok in the preludes,
|
||||
// once they are done with reporting/formatting the meta data.
|
||||
// free frees the memory occupied by the assertion meta data. It is called automatically by
|
||||
// the code, that V's test framework generates, after all other callbacks have been called.
|
||||
[manualfree; unsafe]
|
||||
pub fn (ami &VAssertMetaInfo) free() {
|
||||
unsafe {
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
import rand
|
||||
|
||||
const (
|
||||
strings = unique_strings(20000, 10)
|
||||
)
|
||||
const strings = unique_strings(7000, 10)
|
||||
|
||||
fn unique_strings(arr_len int, str_len int) []string {
|
||||
mut arr := []string{cap: arr_len}
|
||||
|
@ -448,7 +446,7 @@ fn test_map_in() {
|
|||
'Foo': 'bar'
|
||||
}
|
||||
if 'foo'.capitalize() in m {
|
||||
println('ok')
|
||||
assert true
|
||||
} else {
|
||||
assert false
|
||||
}
|
||||
|
|
|
@ -7,14 +7,14 @@ const (
|
|||
fn test_sorting_simple() {
|
||||
mut a := unsorted.clone()
|
||||
a.sort()
|
||||
eprintln(' a: $a')
|
||||
println(' a: $a')
|
||||
assert a == sorted_asc
|
||||
}
|
||||
|
||||
fn test_sorting_with_condition_expression() {
|
||||
mut a := unsorted.clone()
|
||||
a.sort(a > b)
|
||||
eprintln(' a: $a')
|
||||
println(' a: $a')
|
||||
assert a == sorted_desc
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ fn mysort(mut a []int) {
|
|||
fn test_sorting_by_passing_a_mut_array_to_a_function() {
|
||||
mut a := unsorted.clone()
|
||||
mysort(mut a)
|
||||
eprintln(' a: $a')
|
||||
println(' a: $a')
|
||||
assert a == sorted_asc
|
||||
}
|
||||
|
||||
|
@ -52,17 +52,17 @@ fn test_sorting_by_passing_a_mut_array_to_a_function() {
|
|||
fn test_sorting_by_passing_an_anonymous_sorting_function() {
|
||||
mut a := unsorted
|
||||
a.sort(fn(a &int, b &int) int { return *b - *a })
|
||||
eprintln(' a: $a')
|
||||
println(' a: $a')
|
||||
assert a == sort_desc
|
||||
}
|
||||
*/
|
||||
fn test_sorting_u64s() {
|
||||
mut a := [u64(3), 2, 1, 9, 0, 8]
|
||||
a.sort()
|
||||
eprintln(' a: $a')
|
||||
println(' a: $a')
|
||||
assert a == [u64(0), 1, 2, 3, 8, 9]
|
||||
a.sort(a > b)
|
||||
eprintln(' a: $a')
|
||||
println(' a: $a')
|
||||
assert a == [u64(9), 8, 3, 2, 1, 0]
|
||||
}
|
||||
|
||||
|
|
|
@ -1444,6 +1444,7 @@ pub fn (s &string) free() {
|
|||
return
|
||||
}
|
||||
unsafe {
|
||||
// C.printf(c's: %x %s\n', s.str, s.str)
|
||||
free(s.str)
|
||||
}
|
||||
s.is_lit = -98761234
|
||||
|
@ -1712,8 +1713,8 @@ pub fn (s string) strip_margin() string {
|
|||
pub fn (s string) strip_margin_custom(del byte) string {
|
||||
mut sep := del
|
||||
if sep.is_space() {
|
||||
eprintln('Warning: `strip_margin` cannot use white-space as a delimiter')
|
||||
eprintln(' Defaulting to `|`')
|
||||
println('Warning: `strip_margin` cannot use white-space as a delimiter')
|
||||
println(' Defaulting to `|`')
|
||||
sep = `|`
|
||||
}
|
||||
// don't know how much space the resulting string will be, but the max it
|
||||
|
|
|
@ -91,6 +91,7 @@ pub:
|
|||
is_keep_alive bool // passed memory must not be freed (by GC) before function returns
|
||||
no_body bool // a pure declaration like `fn abc(x int)`; used in .vh files, C./JS. fns.
|
||||
mod string
|
||||
file string
|
||||
file_mode Language
|
||||
pos token.Position
|
||||
return_type_pos token.Position
|
||||
|
|
|
@ -271,7 +271,24 @@ pub fn (v &Builder) get_user_files() []string {
|
|||
user_files << os.join_path(preludes_path, 'live_shared.v')
|
||||
}
|
||||
if v.pref.is_test {
|
||||
user_files << os.join_path(preludes_path, 'tests_assertions.v')
|
||||
user_files << os.join_path(preludes_path, 'test_runner.v')
|
||||
//
|
||||
mut v_test_runner_prelude := os.getenv('VTEST_RUNNER')
|
||||
if v.pref.test_runner != '' {
|
||||
v_test_runner_prelude = v.pref.test_runner
|
||||
}
|
||||
if v_test_runner_prelude == '' {
|
||||
v_test_runner_prelude = 'normal'
|
||||
}
|
||||
if !v_test_runner_prelude.contains('/') && !v_test_runner_prelude.contains('\\')
|
||||
&& !v_test_runner_prelude.ends_with('.v') {
|
||||
v_test_runner_prelude = os.join_path(preludes_path, 'test_runner_${v_test_runner_prelude}.v')
|
||||
}
|
||||
if !os.is_file(v_test_runner_prelude) || !os.is_readable(v_test_runner_prelude) {
|
||||
eprintln('test runner error: File $v_test_runner_prelude should be readable.')
|
||||
verror('supported test runners are: tap, json, simple, normal')
|
||||
}
|
||||
user_files << v_test_runner_prelude
|
||||
}
|
||||
if v.pref.is_test && v.pref.is_stats {
|
||||
user_files << os.join_path(preludes_path, 'tests_with_stats.v')
|
||||
|
|
|
@ -1431,7 +1431,9 @@ fn (mut c Checker) fail_if_immutable(expr ast.Expr) (string, token.Position) {
|
|||
}
|
||||
}
|
||||
} else if expr.obj is ast.ConstField && expr.name in c.const_names {
|
||||
c.error('cannot modify constant `$expr.name`', expr.pos)
|
||||
if !c.inside_unsafe {
|
||||
c.error('cannot modify constant `$expr.name`', expr.pos)
|
||||
}
|
||||
}
|
||||
}
|
||||
ast.IndexExpr {
|
||||
|
|
|
@ -18,6 +18,10 @@ const skip_on_ubuntu_musl = [
|
|||
|
||||
const turn_off_vcolors = os.setenv('VCOLORS', 'never', true)
|
||||
|
||||
// This is needed, because some of the .vv files are tests, and we do need stable
|
||||
// output from them, that can be compared against their .out files:
|
||||
const turn_on_normal_test_runner = os.setenv('VTEST_RUNNER', 'normal', true)
|
||||
|
||||
const should_autofix = os.getenv('VAUTOFIX') != ''
|
||||
|
||||
const github_job = os.getenv('GITHUB_JOB')
|
||||
|
@ -246,6 +250,7 @@ fn (mut tasks Tasks) run() {
|
|||
line_can_be_erased = false
|
||||
} else {
|
||||
bench.ok()
|
||||
assert true
|
||||
if tasks.show_cmd {
|
||||
eprintln(bstep_message(mut bench, benchmark.b_ok, '$task.cli_cmd $task.path',
|
||||
task.took))
|
||||
|
|
|
@ -26,13 +26,11 @@ fn (mut g Gen) gen_assert_stmt(original_assert_statement ast.AssertStmt) {
|
|||
g.write(')')
|
||||
g.decrement_inside_ternary()
|
||||
g.writeln(' {')
|
||||
g.writeln('\tg_test_oks++;')
|
||||
metaname_ok := g.gen_assert_metainfo(node)
|
||||
g.writeln('\tmain__cb_assertion_ok(&$metaname_ok);')
|
||||
g.writeln('\tmain__TestRunner_name_table[test_runner._typ]._method_assert_pass(test_runner._object, &$metaname_ok);')
|
||||
g.writeln('} else {')
|
||||
g.writeln('\tg_test_fails++;')
|
||||
metaname_fail := g.gen_assert_metainfo(node)
|
||||
g.writeln('\tmain__cb_assertion_failed(&$metaname_fail);')
|
||||
g.writeln('\tmain__TestRunner_name_table[test_runner._typ]._method_assert_fail(test_runner._object, &$metaname_fail);')
|
||||
g.gen_assert_postfailure_mode(node)
|
||||
g.writeln('\tlongjmp(g_jump_buffer, 1);')
|
||||
g.writeln('\t// TODO')
|
||||
|
|
|
@ -151,8 +151,6 @@ sapp_desc sokol_main(int argc, char* argv[]) {
|
|||
|
||||
pub fn (mut g Gen) write_tests_definitions() {
|
||||
g.includes.writeln('#include <setjmp.h> // write_tests_main')
|
||||
g.definitions.writeln('int g_test_oks = 0;')
|
||||
g.definitions.writeln('int g_test_fails = 0;')
|
||||
g.definitions.writeln('jmp_buf g_jump_buffer;')
|
||||
}
|
||||
|
||||
|
@ -161,8 +159,7 @@ pub fn (mut g Gen) gen_failing_error_propagation_for_test_fn(or_block ast.OrExpr
|
|||
// `or { cb_propagate_test_error(@LINE, @FILE, @MOD, @FN, err.msg) }`
|
||||
// and the test is considered failed
|
||||
paline, pafile, pamod, pafn := g.panic_debug_info(or_block.pos)
|
||||
g.writeln('\tmain__cb_propagate_test_error($paline, tos3("$pafile"), tos3("$pamod"), tos3("$pafn"), *(${cvar_name}.err.msg) );')
|
||||
g.writeln('\tg_test_fails++;')
|
||||
g.writeln('\tmain__TestRunner_name_table[test_runner._typ]._method_fn_error(test_runner._object, $paline, tos3("$pafile"), tos3("$pamod"), tos3("$pafn"), *(${cvar_name}.err.msg) );')
|
||||
g.writeln('\tlongjmp(g_jump_buffer, 1);')
|
||||
}
|
||||
|
||||
|
@ -171,8 +168,7 @@ pub fn (mut g Gen) gen_failing_return_error_for_test_fn(return_stmt ast.Return,
|
|||
// `or { err := error('something') cb_propagate_test_error(@LINE, @FILE, @MOD, @FN, err.msg) return err }`
|
||||
// and the test is considered failed
|
||||
paline, pafile, pamod, pafn := g.panic_debug_info(return_stmt.pos)
|
||||
g.writeln('\tmain__cb_propagate_test_error($paline, tos3("$pafile"), tos3("$pamod"), tos3("$pafn"), *(${cvar_name}.err.msg) );')
|
||||
g.writeln('\tg_test_fails++;')
|
||||
g.writeln('\tmain__TestRunner_name_table[test_runner._typ]._method_fn_error(test_runner._object, $paline, tos3("$pafile"), tos3("$pamod"), tos3("$pafn"), *(${cvar_name}.err.msg) );')
|
||||
g.writeln('\tlongjmp(g_jump_buffer, 1);')
|
||||
}
|
||||
|
||||
|
@ -191,28 +187,62 @@ pub fn (mut g Gen) gen_c_main_for_tests() {
|
|||
}
|
||||
g.writeln('#endif')
|
||||
}
|
||||
g.writeln('\tmain__vtest_init();')
|
||||
g.writeln('\t_vinit(___argc, (voidptr)___argv);')
|
||||
//
|
||||
all_tfuncs := g.get_all_test_function_names()
|
||||
g.writeln('\tstring v_test_file = ${ctoslit(g.pref.path)};')
|
||||
if g.pref.is_stats {
|
||||
g.writeln('\tmain__BenchedTests bt = main__start_testing($all_tfuncs.len, _SLIT("$g.pref.path"));')
|
||||
g.writeln('\tmain__BenchedTests bt = main__start_testing($all_tfuncs.len, v_test_file);')
|
||||
}
|
||||
g.writeln('')
|
||||
for tname in all_tfuncs {
|
||||
g.writeln('\tstruct _main__TestRunner_interface_methods _vtrunner = main__TestRunner_name_table[test_runner._typ];')
|
||||
g.writeln('\tvoid * _vtobj = test_runner._object;')
|
||||
g.writeln('')
|
||||
g.writeln('\tmain__VTestFileMetaInfo_free(test_runner.file_test_info);')
|
||||
g.writeln('\t*(test_runner.file_test_info) = main__vtest_new_filemetainfo(v_test_file, $all_tfuncs.len);')
|
||||
g.writeln('\t_vtrunner._method_start(_vtobj, $all_tfuncs.len);')
|
||||
g.writeln('')
|
||||
for tnumber, tname in all_tfuncs {
|
||||
tcname := util.no_dots(tname)
|
||||
testfn := g.table.fns[tname]
|
||||
lnum := testfn.pos.line_nr + 1
|
||||
g.writeln('\tmain__VTestFnMetaInfo_free(test_runner.fn_test_info);')
|
||||
g.writeln('\tstring tcname_$tnumber = _SLIT("$tcname");')
|
||||
g.writeln('\tstring tcmod_$tnumber = _SLIT("$testfn.mod");')
|
||||
g.writeln('\tstring tcfile_$tnumber = ${ctoslit(testfn.file)};')
|
||||
g.writeln('\t*(test_runner.fn_test_info) = main__vtest_new_metainfo(tcname_$tnumber, tcmod_$tnumber, tcfile_$tnumber, $lnum);')
|
||||
g.writeln('\t_vtrunner._method_fn_start(_vtobj);')
|
||||
g.writeln('\tif (!setjmp(g_jump_buffer)) {')
|
||||
//
|
||||
if g.pref.is_stats {
|
||||
g.writeln('\tmain__BenchedTests_testing_step_start(&bt, _SLIT("$tcname"));')
|
||||
g.writeln('\t\tmain__BenchedTests_testing_step_start(&bt, tcname_$tnumber);')
|
||||
}
|
||||
g.writeln('\tif (!setjmp(g_jump_buffer)) ${tcname}();')
|
||||
g.writeln('\t\t${tcname}();')
|
||||
g.writeln('\t\t_vtrunner._method_fn_pass(_vtobj);')
|
||||
//
|
||||
g.writeln('\t}else{')
|
||||
//
|
||||
g.writeln('\t\t_vtrunner._method_fn_fail(_vtobj);')
|
||||
//
|
||||
g.writeln('\t}')
|
||||
if g.pref.is_stats {
|
||||
g.writeln('\tmain__BenchedTests_testing_step_end(&bt);')
|
||||
}
|
||||
g.writeln('')
|
||||
}
|
||||
g.writeln('')
|
||||
if g.pref.is_stats {
|
||||
g.writeln('\tmain__BenchedTests_end_testing(&bt);')
|
||||
}
|
||||
g.writeln('')
|
||||
g.writeln('\t_vtrunner._method_finish(_vtobj);')
|
||||
g.writeln('\tint test_exit_code = _vtrunner._method_exit_code(_vtobj);')
|
||||
//
|
||||
g.writeln('\t_vtrunner._method__v_free(_vtobj);')
|
||||
g.writeln('')
|
||||
g.writeln('\t_vcleanup();')
|
||||
g.writeln('\treturn g_test_fails > 0;')
|
||||
g.writeln('')
|
||||
g.writeln('\treturn test_exit_code;')
|
||||
g.writeln('}')
|
||||
if g.pref.printfn_list.len > 0 && 'main' in g.pref.printfn_list {
|
||||
println(g.out.after(main_fn_start_pos))
|
||||
|
|
|
@ -29,10 +29,10 @@ fn (mut g Gen) is_used_by_main(node ast.FnDecl) bool {
|
|||
}
|
||||
|
||||
fn (mut g Gen) fn_decl(node ast.FnDecl) {
|
||||
if !g.is_used_by_main(node) {
|
||||
if node.should_be_skipped {
|
||||
return
|
||||
}
|
||||
if node.should_be_skipped {
|
||||
if !g.is_used_by_main(node) {
|
||||
return
|
||||
}
|
||||
if g.is_builtin_mod && g.pref.gc_mode == .boehm_leak && node.name == 'malloc' {
|
||||
|
|
|
@ -110,7 +110,10 @@ pub fn mark_used(mut table ast.Table, pref &pref.Preferences, ast_files []&ast.F
|
|||
'json.encode_u64',
|
||||
'json.json_print',
|
||||
'json.json_parse',
|
||||
'main.cb_propagate_test_error',
|
||||
'main.nasserts',
|
||||
'main.vtest_init',
|
||||
'main.vtest_new_metainfo',
|
||||
'main.vtest_new_filemetainfo',
|
||||
'os.getwd',
|
||||
'os.init_os_args',
|
||||
'os.init_os_args_wide',
|
||||
|
@ -385,6 +388,15 @@ pub fn mark_used(mut table ast.Table, pref &pref.Preferences, ast_files []&ast.F
|
|||
}
|
||||
}
|
||||
|
||||
for kcon, con in all_consts {
|
||||
if pref.is_shared && con.is_pub {
|
||||
walker.mark_const_as_used(kcon)
|
||||
}
|
||||
if !pref.is_shared && con.is_pub && con.name.starts_with('main.') {
|
||||
walker.mark_const_as_used(kcon)
|
||||
}
|
||||
}
|
||||
|
||||
table.used_fns = walker.used_fns.move()
|
||||
table.used_consts = walker.used_consts.move()
|
||||
table.used_globals = walker.used_globals.move()
|
||||
|
|
|
@ -409,6 +409,9 @@ fn (mut p Parser) fn_decl() ast.FnDecl {
|
|||
//
|
||||
no_body: no_body
|
||||
mod: p.mod
|
||||
file: p.file_name
|
||||
pos: start_pos
|
||||
language: language
|
||||
})
|
||||
} else {
|
||||
if language == .c {
|
||||
|
@ -454,6 +457,8 @@ fn (mut p Parser) fn_decl() ast.FnDecl {
|
|||
//
|
||||
no_body: no_body
|
||||
mod: p.mod
|
||||
file: p.file_name
|
||||
pos: start_pos
|
||||
language: language
|
||||
})
|
||||
}
|
||||
|
|
|
@ -107,6 +107,7 @@ pub mut:
|
|||
is_shared bool // an ordinary shared library, -shared, no matter if it is live or not
|
||||
is_o bool // building an .o file
|
||||
is_prof bool // benchmark every function
|
||||
test_runner string // can be 'simple' (fastest, but much less detailed), 'tap', 'normal'
|
||||
profile_file string // the profile results will be stored inside profile_file
|
||||
profile_no_inline bool // when true, [inline] functions would not be profiled
|
||||
translated bool // `v translate doom.v` are we running V code translated from C? allow globals, ++ expressions, etc
|
||||
|
@ -465,6 +466,10 @@ pub fn parse_args_and_show_errors(known_external_commands []string, args []strin
|
|||
'-show-depgraph' {
|
||||
res.show_depgraph = true
|
||||
}
|
||||
'-test-runner' {
|
||||
res.test_runner = cmdline.option(current_args, arg, res.test_runner)
|
||||
i++
|
||||
}
|
||||
'-dump-c-flags' {
|
||||
res.dump_c_flags = cmdline.option(current_args, arg, '-')
|
||||
i++
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
[has_globals]
|
||||
module main
|
||||
|
||||
__global test_runner TestRunner
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// This file will be compiled as part of the main program, for a _test.v file.
|
||||
// The methods defined here are called back by the test program's assert
|
||||
// statements, on each success/fail. The goal is to make customizing the look &
|
||||
// feel of the assertions results easier, since it is done in normal V code.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
interface TestRunner {
|
||||
mut:
|
||||
file_test_info VTestFileMetaInfo // filled in by generated code, before .start() is called.
|
||||
fn_test_info VTestFnMetaInfo // filled in by generated code, before .fn_start() is called.
|
||||
fn_assert_passes u64 // reset this to 0 in .fn_start(), increase it in .assert_pass()
|
||||
fn_passes u64 // increase this in .fn_pass()
|
||||
fn_fails u64 // increase this in .fn_fails()
|
||||
total_assert_passes u64 // increase this in .assert_pass()
|
||||
total_assert_fails u64 // increase this in .assert_fail()
|
||||
start(ntests int) // called before all tests, you can initialise private data here. ntests is the number of test functions in the _test.v file.
|
||||
finish() // called after all tests are finished, you can print some stats if you want here.
|
||||
exit_code() int // called right after finish(), it should return the exit code, that the test program will exit with.
|
||||
//
|
||||
fn_start() bool // called before the start of each test_ function. Return false, if the function should be skipped.
|
||||
fn_pass() // called after the end of each test_ function, with NO failed assertion.
|
||||
fn_fail() // called after the end of each test_ function, with a failed assertion, *or* returning an error.
|
||||
fn_error(line_nr int, file string, mod string, fn_name string, errmsg string) // called only for `fn test_xyz() ? { return error('message') }`, before .fn_fail() is called.
|
||||
//
|
||||
assert_pass(i &VAssertMetaInfo) // called after each `assert true`.
|
||||
assert_fail(i &VAssertMetaInfo) // called after each `assert false`.
|
||||
//
|
||||
free() // you should free all the private data of your runner here.
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
struct VTestFileMetaInfo {
|
||||
file string
|
||||
tests int
|
||||
}
|
||||
|
||||
// vtest_new_filemetainfo will be called right before .start(ntests),
|
||||
// to fill in the .file_test_info field of the runner interface.
|
||||
fn vtest_new_filemetainfo(file string, tests int) VTestFileMetaInfo {
|
||||
return VTestFileMetaInfo{
|
||||
file: file
|
||||
tests: tests
|
||||
}
|
||||
}
|
||||
|
||||
[unsafe]
|
||||
fn (i &VTestFileMetaInfo) free() {
|
||||
unsafe {
|
||||
i.file.free()
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
struct VTestFnMetaInfo {
|
||||
name string
|
||||
mod string
|
||||
file string
|
||||
line_nr int
|
||||
}
|
||||
|
||||
// vtest_new_metainfo will be called once per each test function.
|
||||
fn vtest_new_metainfo(name string, mod string, file string, line_nr int) VTestFnMetaInfo {
|
||||
return VTestFnMetaInfo{
|
||||
name: name
|
||||
mod: mod
|
||||
file: file
|
||||
line_nr: line_nr
|
||||
}
|
||||
}
|
||||
|
||||
[unsafe]
|
||||
fn (i &VTestFnMetaInfo) free() {
|
||||
unsafe {
|
||||
i.name.free()
|
||||
i.mod.free()
|
||||
i.file.free()
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
[typedef]
|
||||
struct C.main__TestRunner {
|
||||
mut:
|
||||
_object voidptr
|
||||
}
|
||||
|
||||
// change_test_runner should be called by preludes that implement the
|
||||
// the TestRunner interface, in their vtest_init fn (see below), to
|
||||
// customize the way that V shows test results
|
||||
[manualfree]
|
||||
pub fn change_test_runner(x &TestRunner) {
|
||||
pobj := unsafe { &C.main__TestRunner(&test_runner)._object }
|
||||
if pobj != 0 {
|
||||
test_runner.free()
|
||||
unsafe {
|
||||
(&C.main__TestRunner(&test_runner))._object = voidptr(0)
|
||||
}
|
||||
}
|
||||
test_runner = *x
|
||||
}
|
||||
|
||||
// vtest_init will be caled *before* the normal _vinit() function,
|
||||
// to give a chance to the test runner implemenation to change the
|
||||
// test_runner global variable. The reason vtest_init is called before
|
||||
// _vinit, is because a _test.v file can define consts, and they in turn
|
||||
// may use function calls in their declaration, which may do assertions.
|
||||
// fn vtest_init() {
|
||||
// change_test_runner(&TestRunner(AnotherTestRunner{}))
|
||||
// }
|
||||
|
||||
// TODO: remove vtest_option_cludge, it is only here so that
|
||||
// `vlib/sync/channel_close_test.v` compiles with simpler runners,
|
||||
// that do not `import os` (which has other `fn() ?`). Without it,
|
||||
// the C `Option_void` type is undefined -> C compilation error.
|
||||
fn vtest_option_cludge() ? {
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
module main
|
||||
|
||||
import os
|
||||
import term
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// This file gets compiled as part of the main program, for
|
||||
// each _test.v file. It implements the default/normal test
|
||||
// output for `v run file_test.v`
|
||||
// See also test_runner.v .
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
fn vtest_init() {
|
||||
change_test_runner(&TestRunner(new_normal_test_runner()))
|
||||
}
|
||||
|
||||
struct NormalTestRunner {
|
||||
pub mut:
|
||||
fname string
|
||||
use_color bool
|
||||
use_relative_paths bool
|
||||
all_assertsions []&VAssertMetaInfo
|
||||
//
|
||||
mut:
|
||||
file_test_info VTestFileMetaInfo
|
||||
fn_test_info VTestFnMetaInfo
|
||||
fn_assert_passes u64
|
||||
fn_passes u64
|
||||
fn_fails u64
|
||||
//
|
||||
total_assert_passes u64
|
||||
total_assert_fails u64
|
||||
}
|
||||
|
||||
fn new_normal_test_runner() &TestRunner {
|
||||
mut tr := &NormalTestRunner{}
|
||||
tr.use_color = term.can_show_color_on_stderr()
|
||||
tr.use_relative_paths = match os.getenv('VERROR_PATHS') {
|
||||
'absolute' { false }
|
||||
else { true }
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) free() {
|
||||
unsafe {
|
||||
runner.all_assertsions.free()
|
||||
runner.fname.free()
|
||||
runner.fn_test_info.free()
|
||||
runner.file_test_info.free()
|
||||
}
|
||||
}
|
||||
|
||||
fn normalise_fname(name string) string {
|
||||
return 'fn ' + name.replace('__', '.').replace('main.', '')
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) start(ntests int) {
|
||||
runner.all_assertsions = []&VAssertMetaInfo{cap: 1000}
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) finish() {
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) exit_code() int {
|
||||
if runner.fn_fails > 0 {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) fn_start() bool {
|
||||
runner.fn_assert_passes = 0
|
||||
runner.fname = normalise_fname(runner.fn_test_info.name)
|
||||
return true
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) fn_pass() {
|
||||
runner.fn_passes++
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) fn_fail() {
|
||||
runner.fn_fails++
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) fn_error(line_nr int, file string, mod string, fn_name string, errmsg string) {
|
||||
filepath := if runner.use_relative_paths { file.clone() } else { os.real_path(file) }
|
||||
mut final_filepath := filepath + ':$line_nr:'
|
||||
if runner.use_color {
|
||||
final_filepath = term.gray(final_filepath)
|
||||
}
|
||||
mut final_funcname := 'fn ' + fn_name.replace('main.', '').replace('__', '.')
|
||||
if runner.use_color {
|
||||
final_funcname = term.red('✗ ' + final_funcname)
|
||||
}
|
||||
final_msg := if runner.use_color { term.dim(errmsg) } else { errmsg.clone() }
|
||||
eprintln('$final_filepath $final_funcname failed propagation with error: $final_msg')
|
||||
if os.is_file(file) {
|
||||
source_lines := os.read_lines(file) or { []string{len: line_nr + 1} }
|
||||
eprintln('${line_nr:5} | ${source_lines[line_nr - 1]}')
|
||||
}
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) assert_pass(i &VAssertMetaInfo) {
|
||||
runner.total_assert_passes++
|
||||
runner.fn_assert_passes++
|
||||
runner.all_assertsions << i
|
||||
}
|
||||
|
||||
fn (mut runner NormalTestRunner) assert_fail(i &VAssertMetaInfo) {
|
||||
runner.total_assert_fails++
|
||||
filepath := if runner.use_relative_paths { i.fpath.clone() } else { os.real_path(i.fpath) }
|
||||
mut final_filepath := filepath + ':${i.line_nr + 1}:'
|
||||
if runner.use_color {
|
||||
final_filepath = term.gray(final_filepath)
|
||||
}
|
||||
mut final_funcname := 'fn ' + i.fn_name.replace('main.', '').replace('__', '.')
|
||||
if runner.use_color {
|
||||
final_funcname = term.red('✗ ' + final_funcname)
|
||||
}
|
||||
final_src := if runner.use_color {
|
||||
term.dim('assert ${term.bold(i.src)}')
|
||||
} else {
|
||||
'assert ' + i.src
|
||||
}
|
||||
eprintln('$final_filepath $final_funcname')
|
||||
if i.op.len > 0 && i.op != 'call' {
|
||||
mut lvtitle := ' Left value:'
|
||||
mut rvtitle := ' Right value:'
|
||||
mut slvalue := '$i.lvalue'
|
||||
mut srvalue := '$i.rvalue'
|
||||
if runner.use_color {
|
||||
slvalue = term.yellow(slvalue)
|
||||
srvalue = term.yellow(srvalue)
|
||||
lvtitle = term.gray(lvtitle)
|
||||
rvtitle = term.gray(rvtitle)
|
||||
}
|
||||
cutoff_limit := 30
|
||||
if slvalue.len > cutoff_limit || srvalue.len > cutoff_limit {
|
||||
eprintln(' > $final_src')
|
||||
eprintln(lvtitle)
|
||||
eprintln(' $slvalue')
|
||||
eprintln(rvtitle)
|
||||
eprintln(' $srvalue')
|
||||
} else {
|
||||
eprintln(' > $final_src')
|
||||
eprintln(' $lvtitle $slvalue')
|
||||
eprintln('$rvtitle $srvalue')
|
||||
}
|
||||
} else {
|
||||
eprintln(' $final_src')
|
||||
}
|
||||
eprintln('')
|
||||
runner.all_assertsions << i
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
module main
|
||||
|
||||
// Provide a no-frills implementation of the TestRunner interface:
|
||||
|
||||
fn vtest_init() {
|
||||
change_test_runner(&TestRunner(SimpleTestRunner{}))
|
||||
}
|
||||
|
||||
struct SimpleTestRunner {
|
||||
mut:
|
||||
fname string
|
||||
//
|
||||
file_test_info VTestFileMetaInfo
|
||||
fn_test_info VTestFnMetaInfo
|
||||
fn_assert_passes u64
|
||||
fn_passes u64
|
||||
fn_fails u64
|
||||
//
|
||||
total_assert_passes u64
|
||||
total_assert_fails u64
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) free() {
|
||||
unsafe {
|
||||
runner.fname.free()
|
||||
runner.fn_test_info.free()
|
||||
runner.file_test_info.free()
|
||||
}
|
||||
}
|
||||
|
||||
fn normalise_fname(name string) string {
|
||||
return 'fn ' + name.replace('__', '.').replace('main.', '')
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) start(ntests int) {
|
||||
eprintln('SimpleTestRunner testing start; expected: $ntests test functions')
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) finish() {
|
||||
eprintln('SimpleTestRunner testing finish; fn:[passes: $runner.fn_passes, fails: $runner.fn_fails], assert:[passes: $runner.total_assert_passes, fails: $runner.total_assert_fails]')
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) exit_code() int {
|
||||
if runner.fn_fails > 0 {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
fn (mut runner SimpleTestRunner) fn_start() bool {
|
||||
runner.fn_assert_passes = 0
|
||||
runner.fname = normalise_fname(runner.fn_test_info.name)
|
||||
return true
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) fn_pass() {
|
||||
runner.fn_passes++
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) fn_fail() {
|
||||
runner.fn_fails++
|
||||
eprintln('>>> fail $runner.fname')
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) fn_error(line_nr int, file string, mod string, fn_name string, errmsg string) {
|
||||
eprintln('>>> SimpleTestRunner fn_error $runner.fname, line_nr: $line_nr, file: $file, mod: $mod, fn_name: $fn_name, errmsg: $errmsg')
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
fn (mut runner SimpleTestRunner) assert_pass(i &VAssertMetaInfo) {
|
||||
runner.total_assert_passes++
|
||||
runner.fn_assert_passes++
|
||||
unsafe { i.free() }
|
||||
}
|
||||
|
||||
fn (mut runner SimpleTestRunner) assert_fail(i &VAssertMetaInfo) {
|
||||
runner.total_assert_fails++
|
||||
eprintln('> failed assert ${runner.fn_assert_passes + 1} in $runner.fname, assert was in ${normalise_fname(i.fn_name)}, line: ${
|
||||
i.line_nr + 1}')
|
||||
unsafe { i.free() }
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
module main
|
||||
|
||||
// TAP, the Test Anything Protocol, is a simple text-based interface
|
||||
// between testing modules in a test harness.
|
||||
// TAP started life as part of the test harness for Perl but now has
|
||||
// implementations in C, C++, Python, PHP, Perl, Java, JavaScript,
|
||||
// Go, Rust, and others.
|
||||
// Consumers and producers do not have to be written in the same
|
||||
// language to interoperate. It decouples the reporting of errors
|
||||
// from the presentation of the reports.
|
||||
// For more details: https://testanything.org/
|
||||
|
||||
// This file implements a TAP producer for V tests.
|
||||
// You can use it with:
|
||||
// `VTEST_RUNNER=tap v run file_test.v`
|
||||
// or
|
||||
// `v -test-runner tap run file_test.v`
|
||||
|
||||
fn vtest_init() {
|
||||
change_test_runner(&TestRunner(TAPTestRunner{}))
|
||||
}
|
||||
|
||||
struct TAPTestRunner {
|
||||
mut:
|
||||
fname string
|
||||
plan_tests int
|
||||
test_counter int
|
||||
//
|
||||
file_test_info VTestFileMetaInfo
|
||||
fn_test_info VTestFnMetaInfo
|
||||
fn_assert_passes u64
|
||||
fn_passes u64
|
||||
fn_fails u64
|
||||
//
|
||||
total_assert_passes u64
|
||||
total_assert_fails u64
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) free() {
|
||||
unsafe {
|
||||
runner.fname.free()
|
||||
runner.fn_test_info.free()
|
||||
runner.file_test_info.free()
|
||||
}
|
||||
}
|
||||
|
||||
fn normalise_fname(name string) string {
|
||||
return 'fn ' + name.replace('__', '.').replace('main.', '')
|
||||
}
|
||||
|
||||
fn flush_println(s string) {
|
||||
println(s)
|
||||
flush_stdout()
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) start(ntests int) {
|
||||
runner.plan_tests = ntests
|
||||
flush_println('1..$ntests')
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) finish() {
|
||||
flush_println('# $runner.plan_tests tests, ${runner.total_assert_fails +
|
||||
runner.total_assert_passes} assertions, $runner.total_assert_fails failures')
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) exit_code() int {
|
||||
if runner.fn_fails > 0 {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
fn (mut runner TAPTestRunner) fn_start() bool {
|
||||
runner.fn_assert_passes = 0
|
||||
runner.test_counter++
|
||||
runner.fname = normalise_fname(runner.fn_test_info.name)
|
||||
return true
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) fn_pass() {
|
||||
runner.fn_passes++
|
||||
flush_println('ok $runner.test_counter - $runner.fname')
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) fn_fail() {
|
||||
flush_println('not ok $runner.test_counter - $runner.fname')
|
||||
runner.fn_fails++
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) fn_error(line_nr int, file string, mod string, fn_name string, errmsg string) {
|
||||
flush_println('# test function propagated error: $runner.fname, line_nr: $line_nr, file: $file, mod: $mod, fn_name: $fn_name, errmsg: $errmsg')
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
fn (mut runner TAPTestRunner) assert_pass(i &VAssertMetaInfo) {
|
||||
runner.total_assert_passes++
|
||||
runner.fn_assert_passes++
|
||||
unsafe { i.free() }
|
||||
}
|
||||
|
||||
fn (mut runner TAPTestRunner) assert_fail(i &VAssertMetaInfo) {
|
||||
runner.total_assert_fails++
|
||||
flush_println('# failed assert: ${runner.fn_assert_passes + 1} in $runner.fname, assert was in ${normalise_fname(i.fn_name)}, line: ${
|
||||
i.line_nr + 1}')
|
||||
unsafe { i.free() }
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
module main
|
||||
|
||||
import os
|
||||
import term
|
||||
|
||||
const use_color = term.can_show_color_on_stderr()
|
||||
|
||||
const use_relative_paths = can_use_relative_paths()
|
||||
|
||||
fn can_use_relative_paths() bool {
|
||||
return match os.getenv('VERROR_PATHS') {
|
||||
'absolute' { false }
|
||||
else { true }
|
||||
}
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////
|
||||
// / This file will get compiled as part of the main program,
|
||||
// / for a _test.v file.
|
||||
// / The methods defined here are called back by the test program's
|
||||
// / assert statements, on each success/fail. The goal is to make
|
||||
// / customizing the look & feel of the assertions results easier,
|
||||
// / since it is done in normal V code, instead of in embedded C ...
|
||||
// //////////////////////////////////////////////////////////////////
|
||||
// TODO copy pasta builtin.v fn ___print_assert_failure
|
||||
fn cb_assertion_failed(i &VAssertMetaInfo) {
|
||||
filepath := if use_relative_paths { i.fpath } else { os.real_path(i.fpath) }
|
||||
mut final_filepath := filepath + ':${i.line_nr + 1}:'
|
||||
if use_color {
|
||||
final_filepath = term.gray(final_filepath)
|
||||
}
|
||||
mut final_funcname := 'fn ' + i.fn_name.replace('main.', '').replace('__', '.')
|
||||
if use_color {
|
||||
final_funcname = term.red('✗ ' + final_funcname)
|
||||
}
|
||||
final_src := if use_color { term.dim('assert ${term.bold(i.src)}') } else { 'assert ' + i.src }
|
||||
eprintln('$final_filepath $final_funcname')
|
||||
if i.op.len > 0 && i.op != 'call' {
|
||||
mut lvtitle := ' Left value:'
|
||||
mut rvtitle := ' Right value:'
|
||||
mut slvalue := '$i.lvalue'
|
||||
mut srvalue := '$i.rvalue'
|
||||
if use_color {
|
||||
slvalue = term.yellow(slvalue)
|
||||
srvalue = term.yellow(srvalue)
|
||||
lvtitle = term.gray(lvtitle)
|
||||
rvtitle = term.gray(rvtitle)
|
||||
}
|
||||
cutoff_limit := 30
|
||||
if slvalue.len > cutoff_limit || srvalue.len > cutoff_limit {
|
||||
eprintln(' > $final_src')
|
||||
eprintln(lvtitle)
|
||||
eprintln(' $slvalue')
|
||||
eprintln(rvtitle)
|
||||
eprintln(' $srvalue')
|
||||
} else {
|
||||
eprintln(' > $final_src')
|
||||
eprintln(' $lvtitle $slvalue')
|
||||
eprintln('$rvtitle $srvalue')
|
||||
}
|
||||
} else {
|
||||
eprintln(' $final_src')
|
||||
}
|
||||
eprintln('')
|
||||
unsafe { i.free() }
|
||||
}
|
||||
|
||||
fn cb_assertion_ok(i &VAssertMetaInfo) {
|
||||
// prints for every assertion instead of per test function
|
||||
// TODO: needs to be changed
|
||||
/*
|
||||
use_color := term.can_show_color_on_stderr()
|
||||
use_relative_paths := match os.getenv('VERROR_PATHS') {
|
||||
'absolute' { false }
|
||||
else { true }
|
||||
}
|
||||
filepath := if use_relative_paths { i.fpath } else { os.real_path(i.fpath) }
|
||||
final_filepath := if use_color {
|
||||
term.gray(filepath + ':${i.line_nr+1}')
|
||||
} else {
|
||||
filepath + ':${i.line_nr+1}'
|
||||
}
|
||||
mut final_funcname := i.fn_name.replace('main.', '').replace('__', '.')
|
||||
if use_color {
|
||||
final_funcname = term.green('✓ ' + final_funcname)
|
||||
}
|
||||
println('$final_funcname ($final_filepath)')
|
||||
*/
|
||||
unsafe { i.free() }
|
||||
}
|
||||
|
||||
fn cb_propagate_test_error(line_nr int, file string, mod string, fn_name string, errmsg string) {
|
||||
filepath := if use_relative_paths { file } else { os.real_path(file) }
|
||||
mut final_filepath := filepath + ':$line_nr:'
|
||||
if use_color {
|
||||
final_filepath = term.gray(final_filepath)
|
||||
}
|
||||
mut final_funcname := 'fn ' + fn_name.replace('main.', '').replace('__', '.')
|
||||
if use_color {
|
||||
final_funcname = term.red('✗ ' + final_funcname)
|
||||
}
|
||||
final_msg := if use_color { term.dim(errmsg) } else { errmsg }
|
||||
eprintln('$final_filepath $final_funcname failed propagation with error: $final_msg')
|
||||
if os.is_file(file) {
|
||||
source_lines := os.read_lines(file) or { []string{len: line_nr + 1} }
|
||||
eprintln('${line_nr:5} | ${source_lines[line_nr - 1]}')
|
||||
}
|
||||
}
|
|
@ -17,8 +17,9 @@ const (
|
|||
struct BenchedTests {
|
||||
mut:
|
||||
bench benchmark.Benchmark
|
||||
oks int
|
||||
fails int
|
||||
oks u64
|
||||
fails u64
|
||||
fn_fails u64
|
||||
test_suit_file string
|
||||
step_func_name string
|
||||
}
|
||||
|
@ -38,17 +39,19 @@ fn start_testing(total_number_of_tests int, vfilename string) BenchedTests {
|
|||
// Called before each test_ function, defined in file_test.v
|
||||
fn (mut b BenchedTests) testing_step_start(stepfunc string) {
|
||||
b.step_func_name = stepfunc.replace('main.', '').replace('__', '.')
|
||||
b.oks = C.g_test_oks
|
||||
b.fails = C.g_test_fails
|
||||
b.oks = test_runner.total_assert_passes
|
||||
b.fails = test_runner.total_assert_fails
|
||||
b.fn_fails = test_runner.fn_fails
|
||||
b.bench.step()
|
||||
}
|
||||
|
||||
// Called after each test_ function, defined in file_test.v
|
||||
fn (mut b BenchedTests) testing_step_end() {
|
||||
ok_diff := C.g_test_oks - b.oks
|
||||
fail_diff := C.g_test_fails - b.fails
|
||||
ok_diff := int(test_runner.total_assert_passes - b.oks)
|
||||
fail_diff := int(test_runner.total_assert_fails - b.fails)
|
||||
fn_fail_diff := int(test_runner.fn_fails - b.fn_fails)
|
||||
// ////////////////////////////////////////////////////////////////
|
||||
if ok_diff == 0 && fail_diff == 0 {
|
||||
if ok_diff == 0 && fn_fail_diff == 0 {
|
||||
b.bench.neither_fail_nor_ok()
|
||||
println(inner_indent + b.bench.step_message_ok(' NO asserts | ') + b.fn_name())
|
||||
return
|
||||
|
@ -57,16 +60,18 @@ fn (mut b BenchedTests) testing_step_end() {
|
|||
if ok_diff > 0 {
|
||||
b.bench.ok_many(ok_diff)
|
||||
}
|
||||
if fail_diff > 0 {
|
||||
b.bench.fail_many(fail_diff)
|
||||
if fn_fail_diff > 0 {
|
||||
b.bench.fail_many(fn_fail_diff)
|
||||
}
|
||||
// ////////////////////////////////////////////////////////////////
|
||||
if ok_diff > 0 && fail_diff == 0 {
|
||||
println(inner_indent + b.bench.step_message_ok(nasserts(ok_diff)) + b.fn_name())
|
||||
if fn_fail_diff > 0 {
|
||||
sfail_diff := nasserts(ok_diff + fail_diff)
|
||||
println(inner_indent + b.bench.step_message_fail(sfail_diff) + b.fn_name())
|
||||
return
|
||||
}
|
||||
if fail_diff > 0 {
|
||||
println(inner_indent + b.bench.step_message_fail(nasserts(fail_diff)) + b.fn_name())
|
||||
if ok_diff > 0 {
|
||||
sok_diff := nasserts(ok_diff)
|
||||
println(inner_indent + b.bench.step_message_ok(sok_diff) + b.fn_name())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -78,8 +83,10 @@ fn (b &BenchedTests) fn_name() string {
|
|||
// Called at the end of the test program produced by `v -stats file_test.v`
|
||||
fn (mut b BenchedTests) end_testing() {
|
||||
b.bench.stop()
|
||||
println(inner_indent + b.bench.total_message('running V tests in "' +
|
||||
os.file_name(b.test_suit_file) + '"'))
|
||||
fname := os.file_name(b.test_suit_file)
|
||||
msg := 'running V tests in "$fname"'
|
||||
final := inner_indent + b.bench.total_message(msg)
|
||||
println(final)
|
||||
}
|
||||
|
||||
// ///////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
module main
|
|
@ -0,0 +1 @@
|
|||
module main
|
|
@ -0,0 +1 @@
|
|||
module main
|
|
@ -5,7 +5,7 @@ fn vroot_path(relpath string) string {
|
|||
}
|
||||
|
||||
fn vexecute(relpath string) os.Result {
|
||||
return os.execute('${@VEXE} ' + vroot_path(relpath))
|
||||
return os.execute('${@VEXE} -test-runner normal ' + vroot_path(relpath))
|
||||
}
|
||||
|
||||
fn testsuite_begin() {
|
||||
|
|
Loading…
Reference in New Issue