v/tools/preludes/tests_with_stats.v

105 lines
3.0 KiB
V
Raw Normal View History

module main
2019-12-30 05:23:54 +01:00
// /////////////////////////////////////////////////////////////////////
// / This file will get compiled as a part of the same module,
// / in which a given _test.v file is, when v is given -stats argument
// / The methods defined here are called back by the test program's
// / main function, generated by compiler/main.v so that customizing the
// / look & feel of the results is easy, since it is done in normal V
// / code, instead of in embedded C ...
// /////////////////////////////////////////////////////////////////////
2019-12-23 11:09:22 +01:00
import (
filepath
benchmark
)
2019-12-30 05:23:54 +01:00
const (
INNER_INDENT = ' '
)
struct BenchedTests {
mut:
2019-12-30 05:23:54 +01:00
oks int
fails int
test_suit_file string
step_func_name string
2019-12-30 05:23:54 +01:00
bench benchmark.Benchmark
}
2019-12-30 05:23:54 +01:00
// ///////////////////////////////////////////////////////////////////
// Called at the start of the test program produced by `v -stats file_test.v`
2019-12-30 05:23:54 +01:00
fn start_testing(total_number_of_tests int, vfilename string) BenchedTests {
mut b := BenchedTests{
bench: benchmark.new_benchmark()
}
b.bench.set_total_expected_steps(total_number_of_tests)
b.test_suit_file = vfilename
println('running tests in: $b.test_suit_file')
return b
}
// Called before each test_ function, defined in file_test.v
fn (b mut BenchedTests) testing_step_start(stepfunc string) {
2019-12-30 05:23:54 +01:00
b.step_func_name = stepfunc.replace('main__', '').replace('__', '.')
b.oks = C.g_test_oks
b.fails = C.g_test_fails
b.bench.step()
}
// Called after each test_ function, defined in file_test.v
fn (b mut BenchedTests) testing_step_end() {
2019-12-30 05:23:54 +01:00
ok_diff := C.g_test_oks - b.oks
fail_diff := C.g_test_fails - b.fails
2019-12-30 05:23:54 +01:00
// ////////////////////////////////////////////////////////////////
if ok_diff == 0 && fail_diff == 0 {
b.bench.neither_fail_nor_ok()
2019-12-30 05:23:54 +01:00
println(INNER_INDENT + b.bench.step_message_ok('NO asserts | ') + b.fn_name())
return
2019-12-30 05:23:54 +01:00
}
// ////////////////////////////////////////////////////////////////
if ok_diff > 0 {
b.bench.ok_many(ok_diff)
}
if fail_diff > 0 {
b.bench.fail_many(fail_diff)
}
2019-12-30 05:23:54 +01:00
// ////////////////////////////////////////////////////////////////
if ok_diff > 0 && fail_diff == 0 {
println(INNER_INDENT + b.bench.step_message_ok(nasserts(ok_diff)) + b.fn_name())
return
}
2019-12-30 05:23:54 +01:00
if fail_diff > 0 {
println(INNER_INDENT + b.bench.step_message_fail(nasserts(fail_diff)) + b.fn_name())
return
}
}
fn (b &BenchedTests) fn_name() string {
return b.step_func_name + '()'
}
// Called at the end of the test program produced by `v -stats file_test.v`
fn (b mut BenchedTests) end_testing() {
b.bench.stop()
2019-12-30 05:23:54 +01:00
println(INNER_INDENT + b.bench.total_message('running V tests in "' + filepath.filename(b.test_suit_file) + '"'))
}
2019-12-30 05:23:54 +01:00
// ///////////////////////////////////////////////////////////////////
fn nasserts(n int) string {
2019-12-30 05:23:54 +01:00
if n == 0 {
return '${n:2d} asserts | '
}
if n == 1 {
return '${n:2d} assert | '
}
if n < 10 {
return '${n:2d} asserts | '
}
if n < 100 {
return '${n:3d} asserts | '
}
if n < 1000 {
return '${n:4d} asserts | '
}
return '${n:5d} asserts | '
}