tests: parallelize compiler_errors_test.v using channels & threads
							parent
							
								
									34d03801de
								
							
						
					
					
						commit
						55b8cc1bb2
					
				| 
						 | 
				
			
			@ -3,7 +3,7 @@ module benchmark
 | 
			
		|||
import time
 | 
			
		||||
import term
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
pub const (
 | 
			
		||||
	b_ok = term.ok_message('OK  ')
 | 
			
		||||
	b_fail = term.fail_message('FAIL')
 | 
			
		||||
	b_skip  = term.warn_message('SKIP')
 | 
			
		||||
| 
						 | 
				
			
			@ -111,8 +111,8 @@ pub fn (mut b Benchmark) measure(label string) i64 {
 | 
			
		|||
	return res
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
 | 
			
		||||
	timed_line := b.tdiff_in_ms(msg, b.step_timer.elapsed().microseconds())
 | 
			
		||||
pub fn (b &Benchmark) step_message_with_label_and_duration(label string, msg string, sduration time.Duration) string {
 | 
			
		||||
	timed_line := b.tdiff_in_ms(msg, sduration.microseconds())
 | 
			
		||||
	if b.nexpected_steps > 1 {
 | 
			
		||||
		mut sprogress := ''
 | 
			
		||||
		if b.nexpected_steps < 10 {
 | 
			
		||||
| 
						 | 
				
			
			@ -137,6 +137,10 @@ pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
 | 
			
		|||
	return '${label:-5s}${timed_line}'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn (b &Benchmark) step_message_with_label(label string, msg string) string {
 | 
			
		||||
	return b.step_message_with_label_and_duration(label, msg, b.step_timer.elapsed())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn (b &Benchmark) step_message(msg string) string {
 | 
			
		||||
	return b.step_message_with_label('', msg)
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2,9 +2,25 @@ import os
 | 
			
		|||
import term
 | 
			
		||||
import v.util
 | 
			
		||||
import v.util.vtest
 | 
			
		||||
import time
 | 
			
		||||
import sync
 | 
			
		||||
import runtime
 | 
			
		||||
import benchmark
 | 
			
		||||
 | 
			
		||||
struct TaskDescription {
 | 
			
		||||
	vexe             string
 | 
			
		||||
	dir              string
 | 
			
		||||
	voptions         string
 | 
			
		||||
	result_extension string
 | 
			
		||||
	path             string
 | 
			
		||||
mut:
 | 
			
		||||
	is_error         bool
 | 
			
		||||
	expected         string
 | 
			
		||||
	found___         string
 | 
			
		||||
	took             time.Duration
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn test_all() {
 | 
			
		||||
	mut total_errors := 0
 | 
			
		||||
	vexe := os.getenv('VEXE')
 | 
			
		||||
	vroot := os.dir(vexe)
 | 
			
		||||
	os.chdir(vroot)
 | 
			
		||||
| 
						 | 
				
			
			@ -17,60 +33,115 @@ fn test_all() {
 | 
			
		|||
	parser_dir := 'vlib/v/parser/tests'
 | 
			
		||||
	parser_tests := get_tests_in_dir(parser_dir)
 | 
			
		||||
	// -prod so that warns are errors
 | 
			
		||||
	total_errors += check_path(vexe, classic_dir, '-prod', '.out', classic_tests)
 | 
			
		||||
	total_errors += check_path(vexe, global_dir, '--enable-globals', '.out', global_tests)
 | 
			
		||||
	total_errors += check_path(vexe, classic_dir, '--enable-globals run', '.run.out',
 | 
			
		||||
		['globals_error.vv'])
 | 
			
		||||
	total_errors += check_path(vexe, run_dir, 'run', '.run.out', run_tests)
 | 
			
		||||
	total_errors += check_path(vexe, parser_dir, '-prod', '.out', parser_tests)
 | 
			
		||||
	mut tasks := []TaskDescription{}
 | 
			
		||||
	tasks << new_tasks(vexe, classic_dir, '-prod', '.out', classic_tests)
 | 
			
		||||
	tasks << new_tasks(vexe, global_dir, '--enable-globals', '.out', global_tests)
 | 
			
		||||
	tasks <<
 | 
			
		||||
		new_tasks(vexe, classic_dir, '--enable-globals run', '.run.out', ['globals_error.vv'])
 | 
			
		||||
	tasks << new_tasks(vexe, run_dir, 'run', '.run.out', run_tests)
 | 
			
		||||
	tasks << new_tasks(vexe, parser_dir, '-prod', '.out', parser_tests)
 | 
			
		||||
	tasks.run()
 | 
			
		||||
	total_errors := tasks.filter(it.is_error).len
 | 
			
		||||
	assert total_errors == 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn get_tests_in_dir(dir string) []string {
 | 
			
		||||
	files := os.ls(dir) or {
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	mut tests := files.filter(it.ends_with('.vv'))
 | 
			
		||||
	tests.sort()
 | 
			
		||||
	return tests
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn check_path(vexe, dir, voptions, result_extension string, tests []string) int {
 | 
			
		||||
	mut nb_fail := 0
 | 
			
		||||
fn new_tasks(vexe, dir, voptions, result_extension string, tests []string) []TaskDescription {
 | 
			
		||||
	paths := vtest.filter_vtest_only(tests, {
 | 
			
		||||
		basepath: dir
 | 
			
		||||
	})
 | 
			
		||||
	mut res := []TaskDescription{}
 | 
			
		||||
	for path in paths {
 | 
			
		||||
		program := path.replace('.vv', '.v')
 | 
			
		||||
		print(path + ' ')
 | 
			
		||||
		os.cp(path, program) or {
 | 
			
		||||
			panic(err)
 | 
			
		||||
		res << TaskDescription{
 | 
			
		||||
			vexe: vexe
 | 
			
		||||
			dir: dir
 | 
			
		||||
			voptions: voptions
 | 
			
		||||
			result_extension: result_extension
 | 
			
		||||
			path: path
 | 
			
		||||
		}
 | 
			
		||||
		res := os.exec('$vexe $voptions $program') or {
 | 
			
		||||
			panic(err)
 | 
			
		||||
	}
 | 
			
		||||
		mut expected := os.read_file(program.replace('.v', '') + result_extension) or {
 | 
			
		||||
			panic(err)
 | 
			
		||||
	return res
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// process an array of tasks in parallel, using no more than vjobs worker threads
 | 
			
		||||
fn (mut tasks []TaskDescription) run() {
 | 
			
		||||
	vjobs := runtime.nr_jobs()
 | 
			
		||||
	mut bench := benchmark.new_benchmark()
 | 
			
		||||
	bench.set_total_expected_steps(tasks.len)
 | 
			
		||||
	// TODO: close work channel instead of using sentinel items
 | 
			
		||||
	task_sentinel := TaskDescription{
 | 
			
		||||
		path: ''
 | 
			
		||||
	}
 | 
			
		||||
		expected = clean_line_endings(expected)
 | 
			
		||||
		found := clean_line_endings(res.output)
 | 
			
		||||
		if expected != found {
 | 
			
		||||
			println(term.red('FAIL'))
 | 
			
		||||
	mut work := sync.new_channel<TaskDescription>(tasks.len + vjobs)
 | 
			
		||||
	mut results := sync.new_channel<TaskDescription>(tasks.len)
 | 
			
		||||
	for i in 0 .. tasks.len {
 | 
			
		||||
		work.push(&tasks[i])
 | 
			
		||||
	}
 | 
			
		||||
	for _ in 0 .. vjobs {
 | 
			
		||||
		work.push(&task_sentinel)
 | 
			
		||||
		go work_processor(mut work, mut results)
 | 
			
		||||
	}
 | 
			
		||||
	for _ in 0 .. tasks.len {
 | 
			
		||||
		mut task := TaskDescription{}
 | 
			
		||||
		results.pop(&task)
 | 
			
		||||
		bench.step()
 | 
			
		||||
		if task.is_error {
 | 
			
		||||
			bench.fail()
 | 
			
		||||
			eprintln(bench.step_message_with_label_and_duration(benchmark.b_fail, task.path,
 | 
			
		||||
				task.took))
 | 
			
		||||
			println('============')
 | 
			
		||||
			println('expected:')
 | 
			
		||||
			println(expected)
 | 
			
		||||
			println(task.expected)
 | 
			
		||||
			println('============')
 | 
			
		||||
			println('found:')
 | 
			
		||||
			println(found)
 | 
			
		||||
			println(task.found___)
 | 
			
		||||
			println('============\n')
 | 
			
		||||
			diff_content(expected, found)
 | 
			
		||||
			nb_fail++
 | 
			
		||||
			diff_content(task.expected, task.found___)
 | 
			
		||||
		} else {
 | 
			
		||||
			bench.ok()
 | 
			
		||||
			eprintln(bench.step_message_with_label_and_duration(benchmark.b_ok, task.path,
 | 
			
		||||
				task.took))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	bench.stop()
 | 
			
		||||
	eprintln(term.h_divider('-'))
 | 
			
		||||
	eprintln(bench.total_message('all tests'))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// a single worker thread spends its time getting work from the `work` channel,
 | 
			
		||||
// processing the task, and then putting the task in the `results` channel
 | 
			
		||||
fn work_processor(mut work sync.Channel, mut results sync.Channel) {
 | 
			
		||||
	for {
 | 
			
		||||
		mut task := TaskDescription{}
 | 
			
		||||
		work.pop(&task)
 | 
			
		||||
		if task.path == '' {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
		sw := time.new_stopwatch({})
 | 
			
		||||
		task.execute()
 | 
			
		||||
		task.took = sw.elapsed()
 | 
			
		||||
		results.push(&task)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// actual processing; NB: no output is done here at all
 | 
			
		||||
fn (mut task TaskDescription) execute() {
 | 
			
		||||
	program := task.path.replace('.vv', '.v')
 | 
			
		||||
	os.cp(task.path, program) or {
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	res := os.exec('$task.vexe $task.voptions $program') or {
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	mut expected := os.read_file(program.replace('.v', '') + task.result_extension) or {
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	task.expected = clean_line_endings(expected)
 | 
			
		||||
	task.found___ = clean_line_endings(res.output)
 | 
			
		||||
	if task.expected != task.found___ {
 | 
			
		||||
		task.is_error = true
 | 
			
		||||
	} else {
 | 
			
		||||
			println(term.green('OK'))
 | 
			
		||||
		os.rm(program)
 | 
			
		||||
	}
 | 
			
		||||
	}
 | 
			
		||||
	return nb_fail
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn clean_line_endings(s string) string {
 | 
			
		||||
| 
						 | 
				
			
			@ -90,3 +161,12 @@ fn diff_content(s1, s2 string) {
 | 
			
		|||
	println(util.color_compare_strings(diff_cmd, s1, s2))
 | 
			
		||||
	println('============\n')
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn get_tests_in_dir(dir string) []string {
 | 
			
		||||
	files := os.ls(dir) or {
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	mut tests := files.filter(it.ends_with('.vv'))
 | 
			
		||||
	tests.sort()
 | 
			
		||||
	return tests
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue