tests: improved test output formatting
parent
4f173c8900
commit
a0f32f5c29
|
@ -14,8 +14,6 @@ pub mut:
|
||||||
vargs string
|
vargs string
|
||||||
failed bool
|
failed bool
|
||||||
benchmark benchmark.Benchmark
|
benchmark benchmark.Benchmark
|
||||||
ok string
|
|
||||||
fail string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_test_session(vargs string) TestSession {
|
pub fn new_test_session(vargs string) TestSession {
|
||||||
|
@ -34,8 +32,6 @@ pub fn vexe_path() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (ts mut TestSession) init() {
|
pub fn (ts mut TestSession) init() {
|
||||||
ts.ok = term.ok_message('OK')
|
|
||||||
ts.fail = term.fail_message('FAIL')
|
|
||||||
ts.benchmark = benchmark.new_benchmark()
|
ts.benchmark = benchmark.new_benchmark()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,6 +39,7 @@ pub fn (ts mut TestSession) test() {
|
||||||
tmpd := os.tmpdir()
|
tmpd := os.tmpdir()
|
||||||
ts.init()
|
ts.init()
|
||||||
show_stats := '-stats' in ts.vargs.split(' ')
|
show_stats := '-stats' in ts.vargs.split(' ')
|
||||||
|
mut remaining_files := []string
|
||||||
for dot_relative_file in ts.files {
|
for dot_relative_file in ts.files {
|
||||||
relative_file := dot_relative_file.replace('./', '')
|
relative_file := dot_relative_file.replace('./', '')
|
||||||
file := os.realpath(relative_file)
|
file := os.realpath(relative_file)
|
||||||
|
@ -66,6 +63,12 @@ pub fn (ts mut TestSession) test() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
remaining_files << dot_relative_file
|
||||||
|
}
|
||||||
|
ts.benchmark.set_total_expected_steps(remaining_files.len)
|
||||||
|
for dot_relative_file in remaining_files {
|
||||||
|
relative_file := dot_relative_file.replace('./', '')
|
||||||
|
file := os.realpath(relative_file)
|
||||||
// Ensure that the generated binaries will be stored in the temporary folder.
|
// Ensure that the generated binaries will be stored in the temporary folder.
|
||||||
// Remove them after a test passes/fails.
|
// Remove them after a test passes/fails.
|
||||||
fname := filepath.filename(file)
|
fname := filepath.filename(file)
|
||||||
|
@ -97,17 +100,17 @@ pub fn (ts mut TestSession) test() {
|
||||||
r := os.exec(cmd) or {
|
r := os.exec(cmd) or {
|
||||||
ts.benchmark.fail()
|
ts.benchmark.fail()
|
||||||
ts.failed = true
|
ts.failed = true
|
||||||
eprintln(ts.benchmark.step_message('$relative_file ${ts.fail}'))
|
eprintln(ts.benchmark.step_message_fail(relative_file))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if r.exit_code != 0 {
|
if r.exit_code != 0 {
|
||||||
ts.benchmark.fail()
|
ts.benchmark.fail()
|
||||||
ts.failed = true
|
ts.failed = true
|
||||||
eprintln(ts.benchmark.step_message('$relative_file ${ts.fail}\n`$file`\n (\n$r.output\n)'))
|
eprintln(ts.benchmark.step_message_fail('${relative_file}\n`$file`\n (\n$r.output\n)'))
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ts.benchmark.ok()
|
ts.benchmark.ok()
|
||||||
eprintln(ts.benchmark.step_message('$relative_file ${ts.ok}'))
|
eprintln(ts.benchmark.step_message_ok(relative_file))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if os.exists(generated_binary_fpath) {
|
if os.exists(generated_binary_fpath) {
|
||||||
|
@ -171,19 +174,17 @@ pub fn building_any_v_binaries_failed() bool {
|
||||||
'$vexe -o v_prod -prod v.v',
|
'$vexe -o v_prod -prod v.v',
|
||||||
]
|
]
|
||||||
mut bmark := benchmark.new_benchmark()
|
mut bmark := benchmark.new_benchmark()
|
||||||
bok := term.ok_message('OK')
|
|
||||||
bfail := term.fail_message('FAIL')
|
|
||||||
for cmd in v_build_commands {
|
for cmd in v_build_commands {
|
||||||
bmark.step()
|
bmark.step()
|
||||||
if build_v_cmd_failed(cmd) {
|
if build_v_cmd_failed(cmd) {
|
||||||
bmark.fail()
|
bmark.fail()
|
||||||
failed = true
|
failed = true
|
||||||
eprintln(bmark.step_message('$cmd => ${bfail} . See details above ^^^^^^^'))
|
eprintln(bmark.step_message_fail('command: ${cmd} . See details above ^^^^^^^'))
|
||||||
eprintln('')
|
eprintln('')
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bmark.ok()
|
bmark.ok()
|
||||||
eprintln(bmark.step_message('$cmd => ${bok}'))
|
eprintln(bmark.step_message_ok('command: ${cmd}'))
|
||||||
}
|
}
|
||||||
bmark.stop()
|
bmark.stop()
|
||||||
eprintln(term.h_divider())
|
eprintln(term.h_divider())
|
||||||
|
|
|
@ -26,6 +26,11 @@ println( bmark.total_message('remarks about the benchmark') )
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
const (
|
||||||
|
BOK = term.ok_message('OK')
|
||||||
|
BFAIL = term.fail_message('FAIL')
|
||||||
|
)
|
||||||
|
|
||||||
pub struct Benchmark {
|
pub struct Benchmark {
|
||||||
pub mut:
|
pub mut:
|
||||||
bench_start_time i64
|
bench_start_time i64
|
||||||
|
@ -36,6 +41,10 @@ pub mut:
|
||||||
nok int
|
nok int
|
||||||
nfail int
|
nfail int
|
||||||
verbose bool
|
verbose bool
|
||||||
|
nexpected_steps int
|
||||||
|
cstep int
|
||||||
|
bok string
|
||||||
|
bfail string
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_benchmark() Benchmark {
|
pub fn new_benchmark() Benchmark {
|
||||||
|
@ -45,12 +54,17 @@ pub fn new_benchmark() Benchmark {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn (b mut Benchmark) set_total_expected_steps(n int) {
|
||||||
|
b.nexpected_steps = n
|
||||||
|
}
|
||||||
|
|
||||||
pub fn (b mut Benchmark) stop() {
|
pub fn (b mut Benchmark) stop() {
|
||||||
b.bench_end_time = benchmark.now()
|
b.bench_end_time = benchmark.now()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (b mut Benchmark) step() {
|
pub fn (b mut Benchmark) step() {
|
||||||
b.step_start_time = benchmark.now()
|
b.step_start_time = benchmark.now()
|
||||||
|
b.cstep++
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (b mut Benchmark) fail() {
|
pub fn (b mut Benchmark) fail() {
|
||||||
|
@ -81,16 +95,45 @@ pub fn (b mut Benchmark) neither_fail_nor_ok() {
|
||||||
b.step_end_time = benchmark.now()
|
b.step_end_time = benchmark.now()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
|
||||||
|
mut timed_line := ''
|
||||||
|
if b.nexpected_steps > 0 {
|
||||||
|
mut sprogress := ''
|
||||||
|
if b.nexpected_steps < 10 {
|
||||||
|
sprogress = '${b.cstep:1d}/${b.nexpected_steps:1d}'
|
||||||
|
}
|
||||||
|
if b.nexpected_steps >= 10 && b.nexpected_steps < 100 {
|
||||||
|
sprogress = '${b.cstep:2d}/${b.nexpected_steps:2d}'
|
||||||
|
}
|
||||||
|
if b.nexpected_steps >= 100 && b.nexpected_steps < 1000 {
|
||||||
|
sprogress = '${b.cstep:3d}/${b.nexpected_steps:3d}'
|
||||||
|
}
|
||||||
|
timed_line = b.tdiff_in_ms('[${sprogress}] $msg', b.step_start_time, b.step_end_time)
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
timed_line = b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time)
|
||||||
|
}
|
||||||
|
return '${label:-5s}${timed_line}'
|
||||||
|
}
|
||||||
|
|
||||||
pub fn (b &Benchmark) step_message(msg string) string {
|
pub fn (b &Benchmark) step_message(msg string) string {
|
||||||
return b.tdiff_in_ms(msg, b.step_start_time, b.step_end_time)
|
return b.step_message_with_label('', msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn (b &Benchmark) step_message_ok(msg string) string {
|
||||||
|
return b.step_message_with_label(BOK, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn (b &Benchmark) step_message_fail(msg string) string {
|
||||||
|
return b.step_message_with_label(BFAIL, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (b &Benchmark) total_message(msg string) string {
|
pub fn (b &Benchmark) total_message(msg string) string {
|
||||||
mut tmsg := '$msg \n ok, fail, total = ' + term.ok_message('${b.nok:5d}') + ', ' + if b.nfail > 0 { term.fail_message('${b.nfail:5d}') } else { '${b.nfail:5d}' } + ', ' + '${b.ntotal:5d}'
|
mut tmsg := '${msg}\n ok, fail, total = ' + term.ok_message('${b.nok:5d}') + ', ' + if b.nfail > 0 { term.fail_message('${b.nfail:5d}') } else { '${b.nfail:5d}' } + ', ' + '${b.ntotal:5d}'
|
||||||
if b.verbose {
|
if b.verbose {
|
||||||
tmsg = '<=== total time spent $tmsg'
|
tmsg = '<=== total time spent $tmsg'
|
||||||
}
|
}
|
||||||
return b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time)
|
return ' ' + b.tdiff_in_ms(tmsg, b.bench_start_time, b.bench_end_time)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (b &Benchmark) total_duration() i64 {
|
pub fn (b &Benchmark) total_duration() i64 {
|
||||||
|
@ -101,7 +144,7 @@ pub fn (b &Benchmark) total_duration() i64 {
|
||||||
fn (b &Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
|
fn (b &Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
|
||||||
if b.verbose {
|
if b.verbose {
|
||||||
tdiff := (eticks - sticks)
|
tdiff := (eticks - sticks)
|
||||||
return '${tdiff:6lld} ms | $s'
|
return '${tdiff:6lld} ms $s'
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -109,4 +152,3 @@ fn (b &Benchmark) tdiff_in_ms(s string, sticks i64, eticks i64) string {
|
||||||
fn now() i64 {
|
fn now() i64 {
|
||||||
return time.ticks()
|
return time.ticks()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -510,10 +510,10 @@ pub fn (v mut V) generate_main() {
|
||||||
cgen.genln('$cgen.fn_main;')
|
cgen.genln('$cgen.fn_main;')
|
||||||
v.gen_main_end('return 0')
|
v.gen_main_end('return 0')
|
||||||
}
|
}
|
||||||
else if !v.pref.is_repl {
|
else if v.v_fmt_file=='' && !v.pref.is_repl {
|
||||||
verror('function `main` is not declared in the main module')
|
verror('function `main` is not declared in the main module')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if v.pref.is_test {
|
else if v.pref.is_test {
|
||||||
if v.table.main_exists() {
|
if v.table.main_exists() {
|
||||||
verror('test files cannot have function `main`')
|
verror('test files cannot have function `main`')
|
||||||
|
@ -525,13 +525,13 @@ pub fn (v mut V) generate_main() {
|
||||||
// Generate a C `main`, which calls every single test function
|
// Generate a C `main`, which calls every single test function
|
||||||
v.gen_main_start(false)
|
v.gen_main_start(false)
|
||||||
if v.pref.is_stats {
|
if v.pref.is_stats {
|
||||||
cgen.genln('BenchedTests bt = main__start_testing();')
|
cgen.genln('BenchedTests bt = main__start_testing(${test_fn_names.len},tos3("$v.dir"));')
|
||||||
}
|
}
|
||||||
for tfname in test_fn_names {
|
for tfname in test_fn_names {
|
||||||
if v.pref.is_stats {
|
if v.pref.is_stats {
|
||||||
cgen.genln('BenchedTests_testing_step_start(&bt, tos3("$tfname"));')
|
cgen.genln('BenchedTests_testing_step_start(&bt, tos3("$tfname"));')
|
||||||
}
|
}
|
||||||
cgen.genln('$tfname ();')
|
cgen.genln('${tfname}();')
|
||||||
if v.pref.is_stats {
|
if v.pref.is_stats {
|
||||||
cgen.genln('BenchedTests_testing_step_end(&bt);')
|
cgen.genln('BenchedTests_testing_step_end(&bt);')
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,6 @@ import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
const (
|
const (
|
||||||
os_used = os.MAX_PATH
|
os_used = os.MAX_PATH
|
||||||
time_used = time.now()
|
time_used = time.now()
|
||||||
)
|
)
|
||||||
|
|
|
@ -4,6 +4,6 @@ import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
const (
|
const (
|
||||||
os_used = os.MAX_PATH
|
os_used = os.MAX_PATH
|
||||||
time_used = time.now()
|
time_used = time.now()
|
||||||
)
|
)
|
||||||
|
|
|
@ -17,7 +17,8 @@ fn cb_assertion_failed(filename string, line int, sourceline string, funcname st
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
true}}
|
true}
|
||||||
|
}
|
||||||
final_filename := if use_relative_paths { filename } else { os.realpath(filename) }
|
final_filename := if use_relative_paths { filename } else { os.realpath(filename) }
|
||||||
final_funcname := funcname.replace('main__', '').replace('__', '.')
|
final_funcname := funcname.replace('main__', '').replace('__', '.')
|
||||||
mut fail_message := 'FAILED assertion'
|
mut fail_message := 'FAILED assertion'
|
||||||
|
@ -32,5 +33,4 @@ fn cb_assertion_failed(filename string, line int, sourceline string, funcname st
|
||||||
fn cb_assertion_ok(filename string, line int, sourceline string, funcname string) {
|
fn cb_assertion_ok(filename string, line int, sourceline string, funcname string) {
|
||||||
// do nothing for now on an OK assertion
|
// do nothing for now on an OK assertion
|
||||||
// println('OK ${line:5d}|$sourceline ')
|
// println('OK ${line:5d}|$sourceline ')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,72 +1,75 @@
|
||||||
module main
|
module main
|
||||||
|
// /////////////////////////////////////////////////////////////////////
|
||||||
///////////////////////////////////////////////////////////////////////
|
// / This file will get compiled as a part of the same module,
|
||||||
/// This file will get compiled as a part of the same module,
|
// / in which a given _test.v file is, when v is given -stats argument
|
||||||
/// in which a given _test.v file is, when v is given -stats argument
|
// / The methods defined here are called back by the test program's
|
||||||
/// The methods defined here are called back by the test program's
|
// / main function, generated by compiler/main.v so that customizing the
|
||||||
/// main function, generated by compiler/main.v so that customizing the
|
// / look & feel of the results is easy, since it is done in normal V
|
||||||
/// look & feel of the results is easy, since it is done in normal V
|
// / code, instead of in embedded C ...
|
||||||
/// code, instead of in embedded C ...
|
// /////////////////////////////////////////////////////////////////////
|
||||||
///////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
os
|
|
||||||
term
|
term
|
||||||
filepath
|
filepath
|
||||||
benchmark
|
benchmark
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
INNER_INDENT = ' '
|
||||||
|
)
|
||||||
|
|
||||||
struct BenchedTests {
|
struct BenchedTests {
|
||||||
mut:
|
mut:
|
||||||
oks int
|
oks int
|
||||||
fails int
|
fails int
|
||||||
test_suit_file string
|
test_suit_file string
|
||||||
step_func_name string
|
step_func_name string
|
||||||
bench benchmark.Benchmark
|
bench benchmark.Benchmark
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
// ///////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
// Called at the start of the test program produced by `v -stats file_test.v`
|
// Called at the start of the test program produced by `v -stats file_test.v`
|
||||||
fn start_testing() BenchedTests {
|
fn start_testing(total_number_of_tests int, vfilename string) BenchedTests {
|
||||||
mut b := BenchedTests{ bench: benchmark.new_benchmark() }
|
mut b := BenchedTests{
|
||||||
b.test_suit_file = os.executable() + '.v'
|
bench: benchmark.new_benchmark()
|
||||||
|
}
|
||||||
|
b.bench.set_total_expected_steps(total_number_of_tests)
|
||||||
|
b.test_suit_file = vfilename
|
||||||
println('running tests in: $b.test_suit_file')
|
println('running tests in: $b.test_suit_file')
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called before each test_ function, defined in file_test.v
|
// Called before each test_ function, defined in file_test.v
|
||||||
fn (b mut BenchedTests) testing_step_start(stepfunc string) {
|
fn (b mut BenchedTests) testing_step_start(stepfunc string) {
|
||||||
b.step_func_name = stepfunc.replace('main__','').replace('__','.')
|
b.step_func_name = stepfunc.replace('main__', '').replace('__', '.')
|
||||||
b.oks = C.g_test_oks
|
b.oks = C.g_test_oks
|
||||||
b.fails = C.g_test_fails
|
b.fails = C.g_test_fails
|
||||||
b.bench.step()
|
b.bench.step()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called after each test_ function, defined in file_test.v
|
// Called after each test_ function, defined in file_test.v
|
||||||
fn (b mut BenchedTests) testing_step_end() {
|
fn (b mut BenchedTests) testing_step_end() {
|
||||||
ok_diff := C.g_test_oks - b.oks
|
ok_diff := C.g_test_oks - b.oks
|
||||||
fail_diff := C.g_test_fails - b.fails
|
fail_diff := C.g_test_fails - b.fails
|
||||||
//////////////////////////////////////////////////////////////////
|
// ////////////////////////////////////////////////////////////////
|
||||||
if ok_diff == 0 && fail_diff == 0 {
|
if ok_diff == 0 && fail_diff == 0 {
|
||||||
b.bench.neither_fail_nor_ok()
|
b.bench.neither_fail_nor_ok()
|
||||||
println(' ' + b.bench.step_message('NO asserts | ') + b.fn_name() )
|
println(INNER_INDENT + b.bench.step_message_ok('NO asserts | ') + b.fn_name())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//////////////////////////////////////////////////////////////////
|
// ////////////////////////////////////////////////////////////////
|
||||||
if ok_diff > 0 {
|
if ok_diff > 0 {
|
||||||
b.bench.ok_many(ok_diff)
|
b.bench.ok_many(ok_diff)
|
||||||
}
|
}
|
||||||
if fail_diff > 0 {
|
if fail_diff > 0 {
|
||||||
b.bench.fail_many(fail_diff)
|
b.bench.fail_many(fail_diff)
|
||||||
}
|
}
|
||||||
//////////////////////////////////////////////////////////////////
|
// ////////////////////////////////////////////////////////////////
|
||||||
if ok_diff > 0 && fail_diff == 0 {
|
if ok_diff > 0 && fail_diff == 0 {
|
||||||
println(ok_text('OK') + b.bench.step_message(nasserts(ok_diff)) + b.fn_name() )
|
println(INNER_INDENT + b.bench.step_message_ok(nasserts(ok_diff)) + b.fn_name())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if fail_diff > 0 {
|
if fail_diff > 0 {
|
||||||
println(fail_text('FAIL') + b.bench.step_message(nasserts(fail_diff)) + b.fn_name() )
|
println(INNER_INDENT + b.bench.step_message_fail(nasserts(fail_diff)) + b.fn_name())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,22 +81,25 @@ fn (b &BenchedTests) fn_name() string {
|
||||||
// Called at the end of the test program produced by `v -stats file_test.v`
|
// Called at the end of the test program produced by `v -stats file_test.v`
|
||||||
fn (b mut BenchedTests) end_testing() {
|
fn (b mut BenchedTests) end_testing() {
|
||||||
b.bench.stop()
|
b.bench.stop()
|
||||||
println( ' ' + b.bench.total_message('running V tests in "' + filepath.filename(b.test_suit_file) + '"' ) )
|
println(INNER_INDENT + b.bench.total_message('running V tests in "' + filepath.filename(b.test_suit_file) + '"'))
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
// ///////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
fn nasserts(n int) string {
|
fn nasserts(n int) string {
|
||||||
if n==0 { return '${n:2d} asserts | ' }
|
if n == 0 {
|
||||||
if n==1 { return '${n:2d} assert | ' }
|
return '${n:2d} asserts | '
|
||||||
return '${n:2d} asserts | '
|
}
|
||||||
|
if n == 1 {
|
||||||
|
return '${n:2d} assert | '
|
||||||
|
}
|
||||||
|
if n < 10 {
|
||||||
|
return '${n:2d} asserts | '
|
||||||
|
}
|
||||||
|
if n < 100 {
|
||||||
|
return '${n:3d} asserts | '
|
||||||
|
}
|
||||||
|
if n < 1000 {
|
||||||
|
return '${n:4d} asserts | '
|
||||||
|
}
|
||||||
|
return '${n:5d} asserts | '
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ok_text(s string) string {
|
|
||||||
return term.ok_message('${s:5s}')
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fail_text(s string) string {
|
|
||||||
return term.fail_message('${s:5s}')
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
// Build and run files in ./prod/ folder, comparing their output to *.expected.txt files.
|
// Build and run files in ./prod/ folder, comparing their output to *.expected.txt files.
|
||||||
// (Similar to REPL tests, but in -prod mode.)
|
// (Similar to REPL tests, but in -prod mode.)
|
||||||
|
|
||||||
// import os
|
// import os
|
||||||
import compiler.tests.repl.runner
|
import compiler.tests.repl.runner
|
||||||
import benchmark
|
import benchmark
|
||||||
|
@ -15,15 +14,15 @@ fn test_all_v_prod_files() {
|
||||||
bmark.step()
|
bmark.step()
|
||||||
fres := runner.run_prod_file(options.wd, options.vexec, file) or {
|
fres := runner.run_prod_file(options.wd, options.vexec, file) or {
|
||||||
bmark.fail()
|
bmark.fail()
|
||||||
eprintln( bmark.step_message(err) )
|
eprintln(bmark.step_message_fail(err))
|
||||||
assert false
|
assert false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bmark.ok()
|
bmark.ok()
|
||||||
println( bmark.step_message(fres) )
|
println(bmark.step_message_ok(fres))
|
||||||
assert true
|
assert true
|
||||||
}
|
}
|
||||||
bmark.stop()
|
bmark.stop()
|
||||||
println( bmark.total_message('total time spent running PROD files') )
|
println(bmark.total_message('total time spent running PROD files'))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,15 +8,17 @@ fn test_the_v_compiler_can_be_invoked() {
|
||||||
vexec := runner.full_path_to_v(5)
|
vexec := runner.full_path_to_v(5)
|
||||||
println('vexecutable: $vexec')
|
println('vexecutable: $vexec')
|
||||||
assert vexec != ''
|
assert vexec != ''
|
||||||
|
|
||||||
vcmd := '"$vexec" --version'
|
vcmd := '"$vexec" --version'
|
||||||
r := os.exec(vcmd) or { panic(err) }
|
r := os.exec(vcmd) or {
|
||||||
//println('"$vcmd" exit_code: $r.exit_code | output: $r.output')
|
panic(err)
|
||||||
|
}
|
||||||
|
// println('"$vcmd" exit_code: $r.exit_code | output: $r.output')
|
||||||
assert r.exit_code == 0
|
assert r.exit_code == 0
|
||||||
|
|
||||||
vcmd_error := '"$vexec" nonexisting.v'
|
vcmd_error := '"$vexec" nonexisting.v'
|
||||||
r_error := os.exec(vcmd_error) or { panic(err) }
|
r_error := os.exec(vcmd_error) or {
|
||||||
//println('"$vcmd_error" exit_code: $r_error.exit_code | output: $r_error.output')
|
panic(err)
|
||||||
|
}
|
||||||
|
// println('"$vcmd_error" exit_code: $r_error.exit_code | output: $r_error.output')
|
||||||
assert r_error.exit_code == 1
|
assert r_error.exit_code == 1
|
||||||
assert r_error.output == '`nonexisting.v` does not exist'
|
assert r_error.output == '`nonexisting.v` does not exist'
|
||||||
}
|
}
|
||||||
|
@ -28,15 +30,14 @@ fn test_all_v_repl_files() {
|
||||||
bmark.step()
|
bmark.step()
|
||||||
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
|
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
|
||||||
bmark.fail()
|
bmark.fail()
|
||||||
eprintln( bmark.step_message(err) )
|
eprintln(bmark.step_message_fail(err))
|
||||||
assert false
|
assert false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bmark.ok()
|
bmark.ok()
|
||||||
println( bmark.step_message(fres) )
|
println(bmark.step_message_ok(fres))
|
||||||
assert true
|
assert true
|
||||||
}
|
}
|
||||||
bmark.stop()
|
bmark.stop()
|
||||||
println( bmark.total_message('total time spent running REPL files') )
|
println(bmark.total_message('total time spent running REPL files'))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,22 +4,21 @@ import compiler.tests.repl.runner
|
||||||
import log
|
import log
|
||||||
import benchmark
|
import benchmark
|
||||||
|
|
||||||
fn main(){
|
fn main() {
|
||||||
mut logger := log.Log{}
|
mut logger := log.Log{}
|
||||||
logger.set_level(log.DEBUG)
|
logger.set_level(log.DEBUG)
|
||||||
options := runner.new_options()
|
options := runner.new_options()
|
||||||
|
|
||||||
mut bmark := benchmark.new_benchmark()
|
mut bmark := benchmark.new_benchmark()
|
||||||
for file in options.files {
|
for file in options.files {
|
||||||
bmark.step()
|
bmark.step()
|
||||||
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
|
fres := runner.run_repl_file(options.wd, options.vexec, file) or {
|
||||||
bmark.fail()
|
bmark.fail()
|
||||||
logger.error( bmark.step_message( err ) )
|
logger.error(bmark.step_message_fail(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bmark.ok()
|
bmark.ok()
|
||||||
logger.info( bmark.step_message( fres ) )
|
logger.info(bmark.step_message_ok(fres))
|
||||||
}
|
}
|
||||||
bmark.stop()
|
bmark.stop()
|
||||||
logger.info( bmark.total_message('total time spent running REPL files') )
|
logger.info(bmark.total_message('total time spent running REPL files'))
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ struct C.dirent {
|
||||||
|
|
||||||
fn C.readdir(voidptr) C.dirent
|
fn C.readdir(voidptr) C.dirent
|
||||||
|
|
||||||
|
|
||||||
pub const (
|
pub const (
|
||||||
args = []string
|
args = []string
|
||||||
MAX_PATH = 4096
|
MAX_PATH = 4096
|
||||||
|
@ -972,7 +973,7 @@ pub fn walk_ext(path, ext string) []string {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p := path + separator + file
|
p := path + separator + file
|
||||||
if os.is_dir(p) {
|
if os.is_dir(p) && !os.is_link(p) {
|
||||||
res << walk_ext(p, ext)
|
res << walk_ext(p, ext)
|
||||||
}
|
}
|
||||||
else if file.ends_with(ext) {
|
else if file.ends_with(ext) {
|
||||||
|
@ -993,7 +994,7 @@ pub fn walk(path string, fnc fn(path string)) {
|
||||||
}
|
}
|
||||||
for file in files {
|
for file in files {
|
||||||
p := path + os.path_separator + file
|
p := path + os.path_separator + file
|
||||||
if os.is_dir(p) {
|
if os.is_dir(p) && !os.is_link(p) {
|
||||||
walk(p, fnc)
|
walk(p, fnc)
|
||||||
}
|
}
|
||||||
else if os.exists(p) {
|
else if os.exists(p) {
|
||||||
|
|
Loading…
Reference in New Issue