Compare commits

...

83 Commits

Author SHA1 Message Date
ChAoS_UnItY 5b16d5fdf4
compress.gzip / deflate: rename gzip module into deflate module (#14682)
ci/woodpecker/push/vc Pipeline was successful Details
ci/woodpecker/push/docker Pipeline was successful Details
ci/woodpecker/push/arch Pipeline was successful Details
2022-06-05 11:12:14 +02:00
yuyi 799d2dce4d
checker: check using literal argument as reference parameter (#14674) 2022-06-05 11:12:14 +02:00
ChAoS_UnItY a0f1c1ffef
cgen: fix array type as interface (fix #14677) (#14681) 2022-06-05 11:12:14 +02:00
yuyi a4829f64e8
json: fix json decode with missing map type field (#14678) 2022-06-05 11:12:14 +02:00
Wertzui123 ad923f9a20
thirdparty/sokol: apply speaker/headset bug fix from latest upstream sokol_audio.h (#14676) 2022-06-05 11:12:13 +02:00
Delyan Angelov bed93ad891
examples: simplify the shebang in the v_script.vsh example 2022-06-05 11:12:13 +02:00
Delyan Angelov cf7ae39e62
cgen: fix `for (int i = 0; i < 10; i++, a++) {` (multiple expressions in the inc part) 2022-06-05 11:12:13 +02:00
Delyan Angelov 726d0acfb6
ci: on windows-msvc, skip const_fixed_array_containing_references_to_itself_test.v 2022-06-05 11:12:13 +02:00
Delyan Angelov 5e5b931765
ci: fix macos clang failures with const_fixed_array_containing_references_to_itself_test.v 2022-06-05 11:12:13 +02:00
Delyan Angelov 55386627b2
checker: allow for references to fixed array consts inside their initialisation `const a = [ ... &a[0] ...]!` 2022-06-05 11:12:13 +02:00
Alexander Medvednikov 8bb2ccfdbc
vweb: use http.Cookie 2022-06-05 11:12:13 +02:00
yuyi bff84aab59
ast: cleanup is_lit() (#14672) 2022-06-05 11:12:13 +02:00
Delyan Angelov 54e851f8de
cgen,ci: fix cast_bool_to_int_test.v on windows-tcc 2022-06-05 11:12:13 +02:00
Delyan Angelov 3e1ff72247
cgen: do not initialise externally declared globals (with -cstrict with [c_extern]). 2022-06-05 11:12:13 +02:00
Delyan Angelov b387554840
cgen: simplify int(bool_var) casts; support [c_extern] tag for global declarations 2022-06-05 11:12:13 +02:00
yuyi 3d6d120d4c
cgen: fix autofree_variable() (fix #14576) (#14602) 2022-06-05 11:12:13 +02:00
yuyi 9699afc1fd
cgen: fix if expr with optional method call (#14600) 2022-06-05 11:12:12 +02:00
ChAoS_UnItY 4288c40bee
compress: Add gzip module & refactor compress & compress.zlib module (#14599) 2022-06-05 11:12:12 +02:00
yuyi e755008c29
vrepl: fix error for exitasdfasdf in repl (fix #14593) (#14598) 2022-06-05 11:12:12 +02:00
Delyan Angelov 28af327062
ci: vfmt vlib/v/checker/check_types.v 2022-06-05 11:12:12 +02:00
Delyan Angelov f8ad43be34
checker: speed up check_expected_call_arg, by only calling Table.type_to_str on errors 2022-06-05 11:12:12 +02:00
Hunam 28fb66118b
vlib: add `net.http.mime` (#14516) 2022-06-05 11:12:12 +02:00
Delyan Angelov f674787fac
ast.table: cache the returned values of Table.type_to_str_using_aliases/2
This results in 9% speedup of the checker stage for V itself.
2022-06-05 11:12:12 +02:00
Delyan Angelov ddb8c0ffaa
ci: vfmt vlib/v/parser/parser.v 2022-06-05 11:12:12 +02:00
Delyan Angelov 7ca1d2a93c
tools: add cmd/tools/measure/scanner_speed.v and cmd/tools/measure/parser_speed.v 2022-06-05 11:12:12 +02:00
Delyan Angelov d20eae2d34
strings: update docs for .str() and for .free() 2022-06-05 11:12:12 +02:00
yuyi 47300ae03f
ast: fix IfExpr.str() (#14595) 2022-06-05 11:12:12 +02:00
Alexander Medvednikov 34d30b0ee5
checker, cgen: c2v variadic fixes 2022-06-05 11:12:12 +02:00
yuyi ada04cfb6a
parser: fix optional with multiple statements (#14592) 2022-06-05 11:12:11 +02:00
Claudio Cesar de Sá 9dedc4b664
examples: some new graphs algorithms and improving 2 others (#14556) 2022-06-05 11:12:11 +02:00
Ben 5b97307c5a
os: fix file_ext function (#14566) 2022-06-05 11:12:11 +02:00
Wertzui123 df4dae6d40
help: add Windows to the list of supported native backends in `v help build-native` (#14589) 2022-06-05 11:12:11 +02:00
ChAoS_UnItY f174bb6d78
cgen: fix type not being unaliased (fix #14568) (#14591) 2022-06-05 11:12:11 +02:00
yuyi 48d6e702f2
checker: minor cleanup in if_expr() (#14584) 2022-06-05 11:12:11 +02:00
Hunam b70c60dcb8
ci: re-enable Go2V test suite (#14588) 2022-06-05 11:12:11 +02:00
yuyi 9459fb549f
cgen: fix fixed array of aliases struct (#14583) 2022-06-05 11:12:11 +02:00
Delyan Angelov 8cd891c9b6
v: add support for `v crun examples/hello_world.v`, use crun mode for .vsh files by default. (#14554) 2022-06-05 11:12:11 +02:00
Delyan Angelov 80879586df
examples: document how to produce the shared library, needed for a standalone run of examples/dynamic_library_loading/use.v 2022-06-05 11:12:11 +02:00
yuyi ee547994bb
parser: fix comptime if script mode (fix #6419) (#14578) 2022-06-05 11:12:11 +02:00
Delyan Angelov f268cf7858
cgen: do not #include signal.h, on -os wasm32 and -d no_segfault_handler 2022-06-05 11:12:10 +02:00
Delyan Angelov 9871434daa
ci: skip embed_file_test.v for now 2022-06-05 11:12:10 +02:00
yuyi d4b90827d0
checker, cgen: fix array index optional with if expr (#14575) 2022-06-05 11:12:10 +02:00
Delyan Angelov 215d7875e6
v: always embed file data of \$embed_file(file) constructs, even without -prod, unless `-d embed_only_metadata` is given. 2022-06-05 11:12:10 +02:00
Alexander Medvednikov 0e384bb60d
cgen: fix goto label 2022-06-05 11:12:10 +02:00
Alexander Medvednikov 0bdc213dc6
cgen: c2v infix fix 2022-06-05 11:12:10 +02:00
Larpon 2ab6ef7f0a
toml: update readme with value_opt() usage (#14569) 2022-06-05 11:12:10 +02:00
yuyi d2b097fbf8
scanner: minor cleanup in scanner.v (#14565) 2022-06-05 11:12:10 +02:00
playX 9e038d1a64
builtin.js: fix string.int method (#14564) 2022-06-05 11:12:10 +02:00
Delyan Angelov eb688d7fa1
v.util, v.builder: fix util.module_is_builtin on macos with -usecache 2022-06-05 11:12:10 +02:00
Ben 2308eec024
os: add existing_path function (#14536) 2022-06-05 11:12:10 +02:00
Delyan Angelov 0338ae98c2
strconv: make f64_to_str_lnd1 public (fix building vlang/coreutils printf) 2022-06-05 11:12:10 +02:00
Delyan Angelov 936622039f
crypto.md5: improve performance of md5.blockblock_generic 2022-06-05 11:12:10 +02:00
Delyan Angelov 77ce385a1a
builtin: use C.fwrite (buffered) for _write_buf_to_fd (#14558) 2022-06-05 11:12:09 +02:00
Delyan Angelov b7232df73c
builtin: fix sporadic linking failures on `v -cc gcc -gc boehm examples/hello_world.v` 2022-06-05 11:12:09 +02:00
yuyi 76c92715e6
checker: vfmt overload_return_type.vv (#14557) 2022-06-05 11:12:09 +02:00
Hunam 6beac6f4b7
net.http: `Response.text` -> `Response.body` (#14478) 2022-06-05 11:12:09 +02:00
yuyi 8698bb375f
scanner: fix string interpolation with inner curly braces (fix #12242) (#14553) 2022-06-05 11:12:09 +02:00
yuyi a396496b93
parser: fix cast or dump arguments ending with comma (#14552) 2022-06-05 11:12:09 +02:00
Delyan Angelov 0a3d41c5d7
docs: document explicitly, that maps support `if v := m[k] {` too 2022-06-05 11:12:09 +02:00
Delyan Angelov 8cecea9965
cgen: add support for `v -cmain SDL_main sdl_example.v` 2022-06-05 11:12:09 +02:00
yuyi e50d73983f
cgen: fix cross assign in closure (#14549) 2022-06-05 11:12:09 +02:00
yuyi 0a81074b1e
ast: fix call_expr.str() with propagate_option or propagate_result (#14550) 2022-06-05 11:12:09 +02:00
Delyan Angelov f02f2e4708
ci: temporary workaround for cross assignment in a closure leading to cgen error 2022-06-05 11:12:09 +02:00
yuyi e9da92c61d
parser, cgen: fix cross assign with parentheses (#14545) 2022-06-05 11:12:08 +02:00
yuyi 458f6f09e1
checker: fix declare assign literal with closure (#14544) 2022-06-05 11:12:08 +02:00
Delyan Angelov f4ccbcd2cf
toml: add `pub fn (d Doc) value_opt(key string) ?Any {` and some tests for toml.parse_dotted_key/1 2022-06-05 11:12:08 +02:00
yuyi 740a862dcd
parser: fix match expr case with struct init (#14538) 2022-06-05 11:12:08 +02:00
Larpon 31efb48fc5
tools: implement `v missdoc --diff oldv newv` (#14537) 2022-06-05 11:12:08 +02:00
spaceface 3d18c884d4
cgen: fix a race condition in the closure implementation (#14532) 2022-06-05 11:12:08 +02:00
Delyan Angelov 565561e0bd
checker: fix error position in `fn f() int { return 1,2 }` 2022-06-05 11:12:08 +02:00
Delyan Angelov 1fcc248d2e
ci: vfmt cmd/tools/vpm.v 2022-06-05 11:12:08 +02:00
Delyan Angelov 16bcfa7da3
tools: fix `v install https://github.com/nedpals/vex.git` (fix #14483) 2022-06-05 11:12:08 +02:00
Louis Schmieder 29fc96c040
orm: document & fix pg (#14533) 2022-06-05 11:12:08 +02:00
yuyi dca8739eeb
checker: cleanup checker.v (#14530) 2022-06-05 11:12:08 +02:00
Delyan Angelov c4783628e6
cgen: fix parallel cgen for json encoding of struct fields that have default values 2022-06-05 11:12:08 +02:00
Delyan Angelov 7541d84038
tests: make json_test.v less noisy, to see errors easier 2022-06-05 11:12:07 +02:00
Wertzui123 a7d7e34125
x.ttf: fix typo in README (#14528) 2022-06-05 11:12:07 +02:00
yuyi 82332344de
fmt: fix fn return types list ending with comma (#14529) 2022-06-05 11:12:07 +02:00
Delyan Angelov 140d494d4c
all: add support for struct field deprecation (#14527) 2022-06-05 11:12:07 +02:00
Larpon a61316ceea
ci: use missdoc as subcmd (#14524) 2022-06-05 11:12:07 +02:00
Larpon f13369dad3
tools: add v missdoc --verify mode (#14525) 2022-06-05 11:12:07 +02:00
Larpon b9c283b2b8
embed_file: rename debug_embed_file_in_prod -> force_embed_file (#14523) 2022-06-05 11:12:07 +02:00
yuyi b97a04abd9
fmt: fix fmt of Ok<[]Token>{[]} (#14522) 2022-06-05 11:12:07 +02:00
160 changed files with 18378 additions and 1038 deletions

View File

@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
timeout-minutes: 5 timeout-minutes: 5
env: env:
MOPTIONS: --no-line-numbers --relative-paths --exclude /vlib/v/ --exclude /builtin/linux_bare/ --exclude /testdata/ --exclude /tests/ vlib/ MOPTIONS: --relative-paths --exclude /vlib/v/ --exclude /builtin/linux_bare/ --exclude /testdata/ --exclude /tests/
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Build V - name: Build V
@ -35,14 +35,4 @@ jobs:
- name: Check against parent commit - name: Check against parent commit
run: | run: |
./v run cmd/tools/missdoc.v $MOPTIONS | sort > /tmp/n_v.txt ./v missdoc --diff $MOPTIONS pv/vlib vlib
cd pv/ && ../v run ../cmd/tools/missdoc.v $MOPTIONS | sort > /tmp/o_v.txt
count_new=$(cat /tmp/n_v.txt | wc -l)
count_old=$(cat /tmp/o_v.txt | wc -l)
echo "new pubs: $count_new | old pubs: $count_old"
echo "new head: $(head -n1 /tmp/n_v.txt)"
echo "old head: $(head -n1 /tmp/o_v.txt)"
if [[ ${count_new} -gt ${count_old} ]]; then
echo "The following $((count_new-count_old)) function(s) are introduced with no documentation:"
diff /tmp/n_v.txt /tmp/o_v.txt ## diff does exit(1) when files are different
fi

View File

@ -131,12 +131,12 @@ jobs:
- name: Build go2v - name: Build go2v
continue-on-error: true continue-on-error: true
run: | run: |
echo "Clone go2v" echo "Clone Go2V"
clone --depth=1 https://github.com/vlang/go2v go2v/ clone --depth=1 https://github.com/vlang/go2v go2v/
echo "Build go2v" echo "Build Go2V"
./v go2v/ ./v go2v/
## echo "Run tests for go2v" echo "Run Go2V tests"
## VJOBS=1 ./v -stats test go2v/ VJOBS=1 ./v -stats test go2v/
- name: Build vlang/pdf - name: Build vlang/pdf
continue-on-error: true continue-on-error: true

View File

@ -191,7 +191,6 @@ to create a copy of the compiler rather than replacing it with `v self`.
| `debug_codegen` | Prints automatically generated V code during the scanning phase | | `debug_codegen` | Prints automatically generated V code during the scanning phase |
| `debug_interface_table` | Prints generated interfaces during C generation | | `debug_interface_table` | Prints generated interfaces during C generation |
| `debug_interface_type_implements` | Prints debug information when checking that a type implements in interface | | `debug_interface_type_implements` | Prints debug information when checking that a type implements in interface |
| `debug_embed_file_in_prod` | Prints debug information about the embedded files with `$embed_file('somefile')` |
| `print_vweb_template_expansions` | Prints vweb compiled HTML files | | `print_vweb_template_expansions` | Prints vweb compiled HTML files |
| `time_checking` | Prints the time spent checking files and other related information | | `time_checking` | Prints the time spent checking files and other related information |
| `time_parsing` | Prints the time spent parsing files and other related information | | `time_parsing` | Prints the time spent parsing files and other related information |
@ -204,3 +203,4 @@ to create a copy of the compiler rather than replacing it with `v self`.
| `trace_thirdparty_obj_files` | Prints details about built thirdparty obj files | | `trace_thirdparty_obj_files` | Prints details about built thirdparty obj files |
| `trace_usecache` | Prints details when -usecache is used | | `trace_usecache` | Prints details when -usecache is used |
| `trace_embed_file` | Prints details when $embed_file is used | | `trace_embed_file` | Prints details when $embed_file is used |
| `embed_only_metadata` | Embed only the metadata for the file(s) with `$embed_file('somefile')`; faster; for development, *not* distribution |

View File

@ -0,0 +1,67 @@
import os
import time
import v.ast
import v.pref
import v.parser
import v.errors
import v.scanner
fn main() {
files := os.args#[1..]
if files.len > 0 && files[0].starts_with('@') {
lst_path := files[0].all_after('@')
listed_files := os.read_file(lst_path)?.split('\n')
process_files(listed_files)?
return
}
process_files(files)?
}
fn process_files(files []string) ? {
mut table := ast.new_table()
mut pref := pref.new_preferences()
pref.is_fmt = true
pref.skip_warnings = true
pref.output_mode = .silent
mut sw := time.new_stopwatch()
mut total_us := i64(0)
mut total_bytes := i64(0)
mut total_tokens := i64(0)
for f in files {
if f == '' {
continue
}
if f.ends_with('_test.v') {
continue
}
// do not measure the scanning, but only the parsing:
mut p := new_parser(f, .skip_comments, table, pref)
///
sw.restart()
_ := p.parse()
f_us := sw.elapsed().microseconds()
///
total_us += f_us
total_bytes += p.scanner.text.len
total_tokens += p.scanner.all_tokens.len
println('${f_us:10}us ${p.scanner.all_tokens.len:10} ${p.scanner.text.len:10} ${(f64(p.scanner.text.len) / p.scanner.all_tokens.len):7.3} ${p.errors.len:4} $f')
}
println('${total_us:10}us ${total_tokens:10} ${total_bytes:10} ${(f64(total_tokens) / total_bytes):7.3} | speed: ${(f64(total_bytes) / total_us):2.5f} MB/s')
}
fn new_parser(path string, comments_mode scanner.CommentsMode, table &ast.Table, pref &pref.Preferences) &parser.Parser {
mut p := &parser.Parser{
scanner: scanner.new_scanner_file(path, comments_mode, pref) or { panic(err) }
comments_mode: comments_mode
table: table
pref: pref
scope: &ast.Scope{
start_pos: 0
parent: table.global_scope
}
errors: []errors.Error{}
warnings: []errors.Warning{}
}
p.set_path(path)
return p
}

View File

@ -0,0 +1,42 @@
import os
import time
import v.scanner
import v.pref
fn main() {
files := os.args#[1..]
if files.len > 0 && files[0].starts_with('@') {
lst_path := files[0].all_after('@')
listed_files := os.read_file(lst_path)?.split('\n')
process_files(listed_files)?
return
}
process_files(files)?
}
fn process_files(files []string) ? {
mut pref := pref.new_preferences()
pref.is_fmt = true
pref.skip_warnings = true
pref.output_mode = .silent
mut sw := time.new_stopwatch()
mut total_us := i64(0)
mut total_bytes := i64(0)
mut total_tokens := i64(0)
for f in files {
if f == '' {
continue
}
if f.ends_with('_test.v') {
continue
}
sw.restart()
s := scanner.new_scanner_file(f, .skip_comments, pref)?
f_us := sw.elapsed().microseconds()
total_us += f_us
total_bytes += s.text.len
total_tokens += s.all_tokens.len
println('${f_us:10}us ${s.all_tokens.len:10} ${s.text.len:10} ${(f64(s.text.len) / s.all_tokens.len):7.3f} $f')
}
println('${total_us:10}us ${total_tokens:10} ${total_bytes:10} ${(f64(total_tokens) / total_bytes):7.3f} | speed: ${(f64(total_bytes) / total_us):2.5f} MB/s')
}

View File

@ -24,6 +24,7 @@ pub fn cprint(omessage string) {
message = term.cyan(message) message = term.cyan(message)
} }
print(message) print(message)
flush_stdout()
} }
pub fn cprint_strong(omessage string) { pub fn cprint_strong(omessage string) {
@ -32,16 +33,19 @@ pub fn cprint_strong(omessage string) {
message = term.bright_green(message) message = term.bright_green(message)
} }
print(message) print(message)
flush_stdout()
} }
pub fn cprintln(omessage string) { pub fn cprintln(omessage string) {
cprint(omessage) cprint(omessage)
println('') println('')
flush_stdout()
} }
pub fn cprintln_strong(omessage string) { pub fn cprintln_strong(omessage string) {
cprint_strong(omessage) cprint_strong(omessage)
println('') println('')
flush_stdout()
} }
pub fn verbose_trace(label string, message string) { pub fn verbose_trace(label string, message string) {

View File

@ -123,6 +123,7 @@ pub fn (mut ts TestSession) print_messages() {
// progress mode, the last line is rewritten many times: // progress mode, the last line is rewritten many times:
if is_ok && !ts.silent_mode { if is_ok && !ts.silent_mode {
print('\r$empty\r$msg') print('\r$empty\r$msg')
flush_stdout()
} else { } else {
// the last \n is needed, so SKIP/FAIL messages // the last \n is needed, so SKIP/FAIL messages
// will not get overwritten by the OK ones // will not get overwritten by the OK ones

View File

@ -200,8 +200,13 @@ fn (mut context Context) parse_options() ? {
} }
} }
fn flushed_print(s string) {
print(s)
flush_stdout()
}
fn (mut context Context) clear_line() { fn (mut context Context) clear_line() {
print(context.cline) flushed_print(context.cline)
} }
fn (mut context Context) expand_all_commands(commands []string) []string { fn (mut context Context) expand_all_commands(commands []string) []string {
@ -247,7 +252,7 @@ fn (mut context Context) run() {
println('Series: ${si:4}/${context.series:-4}, command: $cmd') println('Series: ${si:4}/${context.series:-4}, command: $cmd')
if context.warmup > 0 && run_warmups < context.commands.len { if context.warmup > 0 && run_warmups < context.commands.len {
for i in 1 .. context.warmup + 1 { for i in 1 .. context.warmup + 1 {
print('${context.cgoback}warming up run: ${i:4}/${context.warmup:-4} for ${cmd:-50s} took ${duration:6} ms ...') flushed_print('${context.cgoback}warming up run: ${i:4}/${context.warmup:-4} for ${cmd:-50s} took ${duration:6} ms ...')
mut sw := time.new_stopwatch() mut sw := time.new_stopwatch()
res := os.execute(cmd) res := os.execute(cmd)
if res.exit_code != 0 { if res.exit_code != 0 {
@ -260,9 +265,9 @@ fn (mut context Context) run() {
context.clear_line() context.clear_line()
for i in 1 .. (context.count + 1) { for i in 1 .. (context.count + 1) {
avg := f64(sum) / f64(i) avg := f64(sum) / f64(i)
print('${context.cgoback}Average: ${avg:9.3f}ms | run: ${i:4}/${context.count:-4} | took ${duration:6} ms') flushed_print('${context.cgoback}Average: ${avg:9.3f}ms | run: ${i:4}/${context.count:-4} | took ${duration:6} ms')
if context.show_output { if context.show_output {
print(' | result: ${oldres:s}') flushed_print(' | result: ${oldres:s}')
} }
mut sw := time.new_stopwatch() mut sw := time.new_stopwatch()
res := scripting.exec(cmd) or { continue } res := scripting.exec(cmd) or { continue }
@ -288,7 +293,7 @@ fn (mut context Context) run() {
context.results[icmd].atiming = new_aints(context.results[icmd].timings, context.nmins, context.results[icmd].atiming = new_aints(context.results[icmd].timings, context.nmins,
context.nmaxs) context.nmaxs)
context.clear_line() context.clear_line()
print(context.cgoback) flushed_print(context.cgoback)
mut m := map[string][]int{} mut m := map[string][]int{}
ioutputs := context.results[icmd].outputs ioutputs := context.results[icmd].outputs
for o in ioutputs { for o in ioutputs {
@ -358,7 +363,7 @@ fn (mut context Context) show_diff_summary() {
println('context: $context') println('context: $context')
} }
if int(base) > context.fail_on_maxtime { if int(base) > context.fail_on_maxtime {
print(performance_regression_label) flushed_print(performance_regression_label)
println('average time: ${base:6.1f} ms > $context.fail_on_maxtime ms threshold.') println('average time: ${base:6.1f} ms > $context.fail_on_maxtime ms threshold.')
exit(2) exit(2)
} }
@ -367,7 +372,7 @@ fn (mut context Context) show_diff_summary() {
} }
fail_threshold_max := f64(context.fail_on_regress_percent) fail_threshold_max := f64(context.fail_on_regress_percent)
if first_cmd_percentage > fail_threshold_max { if first_cmd_percentage > fail_threshold_max {
print(performance_regression_label) flushed_print(performance_regression_label)
println('${first_cmd_percentage:5.1f}% > ${fail_threshold_max:5.1f}% threshold.') println('${first_cmd_percentage:5.1f}% > ${fail_threshold_max:5.1f}% threshold.')
exit(3) exit(3)
} }

View File

@ -259,6 +259,8 @@ const (
'--relative-paths', '--relative-paths',
'-r', '-r',
'--js', '--js',
'--verify',
'--diff',
] ]
auto_complete_flags_self = [ auto_complete_flags_self = [
'-prod', '-prod',

View File

@ -183,6 +183,7 @@ fn (foptions &FormatOptions) format_pipe() {
// checker.new_checker(table, prefs).check(file_ast) // checker.new_checker(table, prefs).check(file_ast)
formatted_content := fmt.fmt(file_ast, table, prefs, foptions.is_debug) formatted_content := fmt.fmt(file_ast, table, prefs, foptions.is_debug)
print(formatted_content) print(formatted_content)
flush_stdout()
foptions.vlog('fmt.fmt worked and $formatted_content.len bytes were written to stdout.') foptions.vlog('fmt.fmt worked and $formatted_content.len bytes were written to stdout.')
} }
@ -279,6 +280,7 @@ fn (mut foptions FormatOptions) post_process_file(file string, formatted_file_pa
return return
} }
print(formatted_fc) print(formatted_fc)
flush_stdout()
} }
fn (f FormatOptions) str() string { fn (f FormatOptions) str() string {

View File

@ -6,12 +6,13 @@ import flag
const ( const (
tool_name = 'v missdoc' tool_name = 'v missdoc'
tool_version = '0.0.4' tool_version = '0.1.0'
tool_description = 'Prints all V functions in .v files under PATH/, that do not yet have documentation comments.' tool_description = 'Prints all V functions in .v files under PATH/, that do not yet have documentation comments.'
work_dir_prefix = normalise_path(os.real_path(os.wd_at_startup) + '/') work_dir_prefix = normalise_path(os.real_path(os.wd_at_startup) + os.path_separator)
) )
struct UndocumentedFN { struct UndocumentedFN {
file string
line int line int
signature string signature string
tags []string tags []string
@ -26,11 +27,15 @@ struct Options {
no_line_numbers bool no_line_numbers bool
exclude []string exclude []string
relative_paths bool relative_paths bool
mut:
verify bool
diff bool
additional_args []string
} }
fn (opt Options) report_undocumented_functions_in_path(path string) { fn (opt Options) collect_undocumented_functions_in_dir(directory string) []UndocumentedFN {
mut files := []string{} mut files := []string{}
collect(path, mut files, fn (npath string, mut accumulated_paths []string) { collect(directory, mut files, fn (npath string, mut accumulated_paths []string) {
if !npath.ends_with('.v') { if !npath.ends_with('.v') {
return return
} }
@ -39,6 +44,7 @@ fn (opt Options) report_undocumented_functions_in_path(path string) {
} }
accumulated_paths << npath accumulated_paths << npath
}) })
mut undocumented_fns := []UndocumentedFN{}
for file in files { for file in files {
if !opt.js && file.ends_with('.js.v') { if !opt.js && file.ends_with('.js.v') {
continue continue
@ -46,15 +52,16 @@ fn (opt Options) report_undocumented_functions_in_path(path string) {
if opt.exclude.len > 0 && opt.exclude.any(file.contains(it)) { if opt.exclude.len > 0 && opt.exclude.any(file.contains(it)) {
continue continue
} }
opt.report_undocumented_functions_in_file(file) undocumented_fns << opt.collect_undocumented_functions_in_file(file)
} }
return undocumented_fns
} }
fn (opt &Options) report_undocumented_functions_in_file(nfile string) { fn (opt &Options) collect_undocumented_functions_in_file(nfile string) []UndocumentedFN {
file := os.real_path(nfile) file := os.real_path(nfile)
contents := os.read_file(file) or { panic(err) } contents := os.read_file(file) or { panic(err) }
lines := contents.split('\n') lines := contents.split('\n')
mut info := []UndocumentedFN{} mut list := []UndocumentedFN{}
for i, line in lines { for i, line in lines {
if line.starts_with('pub fn') || (opt.private && (line.starts_with('fn ') if line.starts_with('pub fn') || (opt.private && (line.starts_with('fn ')
&& !(line.starts_with('fn C.') || line.starts_with('fn main')))) { && !(line.starts_with('fn C.') || line.starts_with('fn main')))) {
@ -78,14 +85,39 @@ fn (opt &Options) report_undocumented_functions_in_file(nfile string) {
} }
if grab { if grab {
clean_line := line.all_before_last(' {') clean_line := line.all_before_last(' {')
info << UndocumentedFN{i + 1, clean_line, tags} list << UndocumentedFN{
line: i + 1
signature: clean_line
tags: tags
file: file
}
} }
} }
} }
} }
} }
if info.len > 0 { return list
for undocumented_fn in info { }
fn (opt &Options) collect_undocumented_functions_in_path(path string) []UndocumentedFN {
mut undocumented_functions := []UndocumentedFN{}
if os.is_file(path) {
undocumented_functions << opt.collect_undocumented_functions_in_file(path)
} else {
undocumented_functions << opt.collect_undocumented_functions_in_dir(path)
}
return undocumented_functions
}
fn (opt &Options) report_undocumented_functions_in_path(path string) int {
mut list := opt.collect_undocumented_functions_in_path(path)
opt.report_undocumented_functions(list)
return list.len
}
fn (opt &Options) report_undocumented_functions(list []UndocumentedFN) {
if list.len > 0 {
for undocumented_fn in list {
mut line_numbers := '$undocumented_fn.line:0:' mut line_numbers := '$undocumented_fn.line:0:'
if opt.no_line_numbers { if opt.no_line_numbers {
line_numbers = '' line_numbers = ''
@ -95,10 +127,11 @@ fn (opt &Options) report_undocumented_functions_in_file(nfile string) {
} else { } else {
'' ''
} }
file := undocumented_fn.file
ofile := if opt.relative_paths { ofile := if opt.relative_paths {
nfile.replace(work_dir_prefix, '') file.replace(work_dir_prefix, '')
} else { } else {
os.real_path(nfile) os.real_path(file)
} }
if opt.deprecated { if opt.deprecated {
println('$ofile:$line_numbers$undocumented_fn.signature $tags_str') println('$ofile:$line_numbers$undocumented_fn.signature $tags_str')
@ -118,6 +151,54 @@ fn (opt &Options) report_undocumented_functions_in_file(nfile string) {
} }
} }
fn (opt &Options) diff_undocumented_functions_in_paths(path_old string, path_new string) []UndocumentedFN {
old := os.real_path(path_old)
new := os.real_path(path_new)
mut old_undocumented_functions := opt.collect_undocumented_functions_in_path(old)
mut new_undocumented_functions := opt.collect_undocumented_functions_in_path(new)
mut differs := []UndocumentedFN{}
if new_undocumented_functions.len > old_undocumented_functions.len {
for new_undoc_fn in new_undocumented_functions {
new_relative_file := new_undoc_fn.file.replace(new, '').trim_string_left(os.path_separator)
mut found := false
for old_undoc_fn in old_undocumented_functions {
old_relative_file := old_undoc_fn.file.replace(old, '').trim_string_left(os.path_separator)
if new_relative_file == old_relative_file
&& new_undoc_fn.signature == old_undoc_fn.signature {
found = true
break
}
}
if !found {
differs << new_undoc_fn
}
}
}
differs.sort_with_compare(sort_undoc_fns)
return differs
}
fn sort_undoc_fns(a &UndocumentedFN, b &UndocumentedFN) int {
if a.file < b.file {
return -1
}
if a.file > b.file {
return 1
}
// same file sort by signature
else {
if a.signature < b.signature {
return -1
}
if a.signature > b.signature {
return 1
}
return 0
}
}
fn normalise_path(path string) string { fn normalise_path(path string) string {
return path.replace('\\', '/') return path.replace('\\', '/')
} }
@ -145,17 +226,15 @@ fn collect_tags(line string) []string {
} }
fn main() { fn main() {
if os.args.len == 1 { mut fp := flag.new_flag_parser(os.args[1..]) // skip the "v" command.
println('Usage: $tool_name PATH \n$tool_description\n$tool_name -h for more help...')
exit(1)
}
mut fp := flag.new_flag_parser(os.args[1..])
fp.application(tool_name) fp.application(tool_name)
fp.version(tool_version) fp.version(tool_version)
fp.description(tool_description) fp.description(tool_description)
fp.arguments_description('PATH [PATH]...') fp.arguments_description('PATH [PATH]...')
fp.skip_executable() // skip the "missdoc" command.
// Collect tool options // Collect tool options
opt := Options{ mut opt := Options{
show_help: fp.bool('help', `h`, false, 'Show this help text.') show_help: fp.bool('help', `h`, false, 'Show this help text.')
deprecated: fp.bool('deprecated', `d`, false, 'Include deprecated functions in output.') deprecated: fp.bool('deprecated', `d`, false, 'Include deprecated functions in output.')
private: fp.bool('private', `p`, false, 'Include private functions in output.') private: fp.bool('private', `p`, false, 'Include private functions in output.')
@ -164,16 +243,58 @@ fn main() {
collect_tags: fp.bool('tags', `t`, false, 'Also print function tags if any is found.') collect_tags: fp.bool('tags', `t`, false, 'Also print function tags if any is found.')
exclude: fp.string_multi('exclude', `e`, '') exclude: fp.string_multi('exclude', `e`, '')
relative_paths: fp.bool('relative-paths', `r`, false, 'Use relative paths in output.') relative_paths: fp.bool('relative-paths', `r`, false, 'Use relative paths in output.')
diff: fp.bool('diff', 0, false, 'exit(1) and show difference between two PATH inputs, return 0 otherwise.')
verify: fp.bool('verify', 0, false, 'exit(1) if documentation is missing, 0 otherwise.')
} }
opt.additional_args = fp.finalize() or { panic(err) }
if opt.show_help { if opt.show_help {
println(fp.usage()) println(fp.usage())
exit(0) exit(0)
} }
for path in os.args[1..] { if opt.additional_args.len == 0 {
if os.is_file(path) { println(fp.usage())
opt.report_undocumented_functions_in_file(path) eprintln('Error: $tool_name is missing PATH input')
} else { exit(1)
opt.report_undocumented_functions_in_path(path) }
// Allow short-long versions to prevent false positive situations, should
// the user miss a `-`. E.g.: the `-verify` flag would be ignored and missdoc
// will return 0 for success plus a list of any undocumented functions.
if '-verify' in opt.additional_args {
opt.verify = true
}
if '-diff' in opt.additional_args {
opt.diff = true
}
if opt.diff {
if opt.additional_args.len < 2 {
println(fp.usage())
eprintln('Error: $tool_name --diff needs two valid PATH inputs')
exit(1)
}
path_old := opt.additional_args[0]
path_new := opt.additional_args[1]
if !(os.is_file(path_old) || os.is_dir(path_old)) || !(os.is_file(path_new)
|| os.is_dir(path_new)) {
println(fp.usage())
eprintln('Error: $tool_name --diff needs two valid PATH inputs')
exit(1)
}
list := opt.diff_undocumented_functions_in_paths(path_old, path_new)
if list.len > 0 {
opt.report_undocumented_functions(list)
exit(1)
}
exit(0)
}
mut total := 0
for path in opt.additional_args {
if os.is_file(path) || os.is_dir(path) {
total += opt.report_undocumented_functions_in_path(path)
} }
} }
if opt.verify && total > 0 {
exit(1)
}
} }

View File

@ -208,24 +208,24 @@ fn vpm_install_from_vpm(module_names []string) {
println('VPM needs `$vcs` to be installed.') println('VPM needs `$vcs` to be installed.')
continue continue
} }
mod_name_as_path := mod.name.replace('.', os.path_separator).replace('-', '_').to_lower() //
final_module_path := os.real_path(os.join_path(settings.vmodules_path, mod_name_as_path)) minfo := mod_name_info(mod.name)
if os.exists(final_module_path) { if os.exists(minfo.final_module_path) {
vpm_update([name]) vpm_update([name])
continue continue
} }
println('Installing module "$name" from "$mod.url" to "$final_module_path" ...') println('Installing module "$name" from "$mod.url" to "$minfo.final_module_path" ...')
vcs_install_cmd := supported_vcs_install_cmds[vcs] vcs_install_cmd := supported_vcs_install_cmds[vcs]
cmd := '$vcs_install_cmd "$mod.url" "$final_module_path"' cmd := '$vcs_install_cmd "$mod.url" "$minfo.final_module_path"'
verbose_println(' command: $cmd') verbose_println(' command: $cmd')
cmdres := os.execute(cmd) cmdres := os.execute(cmd)
if cmdres.exit_code != 0 { if cmdres.exit_code != 0 {
errors++ errors++
println('Failed installing module "$name" to "$final_module_path" .') println('Failed installing module "$name" to "$minfo.final_module_path" .')
print_failed_cmd(cmd, cmdres) print_failed_cmd(cmd, cmdres)
continue continue
} }
resolve_dependencies(name, final_module_path, module_names) resolve_dependencies(name, minfo.final_module_path, module_names)
} }
if errors > 0 { if errors > 0 {
exit(1) exit(1)
@ -270,7 +270,7 @@ fn vpm_install_from_vcs(module_names []string, vcs_key string) {
} }
repo_name := url.substr(second_cut_pos + 1, first_cut_pos) repo_name := url.substr(second_cut_pos + 1, first_cut_pos)
mut name := repo_name + os.path_separator + mod_name mut name := os.join_path(repo_name, mod_name)
mod_name_as_path := name.replace('-', '_').to_lower() mod_name_as_path := name.replace('-', '_').to_lower()
mut final_module_path := os.real_path(os.join_path(settings.vmodules_path, mod_name_as_path)) mut final_module_path := os.real_path(os.join_path(settings.vmodules_path, mod_name_as_path))
if os.exists(final_module_path) { if os.exists(final_module_path) {
@ -297,20 +297,19 @@ fn vpm_install_from_vcs(module_names []string, vcs_key string) {
if os.exists(vmod_path) { if os.exists(vmod_path) {
data := os.read_file(vmod_path) or { return } data := os.read_file(vmod_path) or { return }
vmod := parse_vmod(data) vmod := parse_vmod(data)
mod_path := os.real_path(os.join_path(settings.vmodules_path, vmod.name.replace('.', minfo := mod_name_info(vmod.name)
os.path_separator))) println('Relocating module from "$name" to "$vmod.name" ( "$minfo.final_module_path" ) ...')
println('Relocating module from "$name" to "$vmod.name" ( "$mod_path" ) ...') if os.exists(minfo.final_module_path) {
if os.exists(mod_path) { println('Warning module "$minfo.final_module_path" already exsits!')
println('Warning module "$mod_path" already exsits!') println('Removing module "$minfo.final_module_path" ...')
println('Removing module "$mod_path" ...') os.rmdir_all(minfo.final_module_path) or {
os.rmdir_all(mod_path) or {
errors++ errors++
println('Errors while removing "$mod_path" :') println('Errors while removing "$minfo.final_module_path" :')
println(err) println(err)
continue continue
} }
} }
os.mv(final_module_path, mod_path) or { os.mv(final_module_path, minfo.final_module_path) or {
errors++ errors++
println('Errors while relocating module "$name" :') println('Errors while relocating module "$name" :')
println(err) println(err)
@ -323,7 +322,7 @@ fn vpm_install_from_vcs(module_names []string, vcs_key string) {
continue continue
} }
println('Module "$name" relocated to "$vmod.name" successfully.') println('Module "$name" relocated to "$vmod.name" successfully.')
final_module_path = mod_path final_module_path = minfo.final_module_path
name = vmod.name name = vmod.name
} }
resolve_dependencies(name, final_module_path, module_names) resolve_dependencies(name, final_module_path, module_names)
@ -377,10 +376,7 @@ fn vpm_update(m []string) {
} }
mut errors := 0 mut errors := 0
for modulename in module_names { for modulename in module_names {
mut zname := modulename zname := url_to_module_name(modulename)
if mod := get_mod_by_url(modulename) {
zname = mod.name
}
final_module_path := valid_final_path_of_existing_module(modulename) or { continue } final_module_path := valid_final_path_of_existing_module(modulename) or { continue }
os.chdir(final_module_path) or {} os.chdir(final_module_path) or {}
println('Updating module "$zname" in "$final_module_path" ...') println('Updating module "$zname" in "$final_module_path" ...')
@ -503,26 +499,21 @@ fn vpm_remove(module_names []string) {
} }
fn valid_final_path_of_existing_module(modulename string) ?string { fn valid_final_path_of_existing_module(modulename string) ?string {
mut name := modulename name := if mod := get_mod_by_url(modulename) { mod.name } else { modulename }
if mod := get_mod_by_url(name) { minfo := mod_name_info(name)
name = mod.name if !os.exists(minfo.final_module_path) {
} println('No module with name "$minfo.mname_normalised" exists at $minfo.final_module_path')
mod_name_as_path := name.replace('.', os.path_separator).replace('-', '_').to_lower()
name_of_vmodules_folder := os.join_path(settings.vmodules_path, mod_name_as_path)
final_module_path := os.real_path(name_of_vmodules_folder)
if !os.exists(final_module_path) {
println('No module with name "$name" exists at $name_of_vmodules_folder')
return none return none
} }
if !os.is_dir(final_module_path) { if !os.is_dir(minfo.final_module_path) {
println('Skipping "$name_of_vmodules_folder", since it is not a folder.') println('Skipping "$minfo.final_module_path", since it is not a folder.')
return none return none
} }
vcs_used_in_dir(final_module_path) or { vcs_used_in_dir(minfo.final_module_path) or {
println('Skipping "$name_of_vmodules_folder", since it does not use a supported vcs.') println('Skipping "$minfo.final_module_path", since it does not use a supported vcs.')
return none return none
} }
return final_module_path return minfo.final_module_path
} }
fn ensure_vmodules_dir_exist() { fn ensure_vmodules_dir_exist() {
@ -573,6 +564,31 @@ fn get_installed_modules() []string {
return modules return modules
} }
struct ModNameInfo {
mut:
mname string // The-user.The-mod , *never* The-user.The-mod.git
mname_normalised string // the_user.the_mod
mname_as_path string // the_user/the_mod
final_module_path string // ~/.vmodules/the_user/the_mod
}
fn mod_name_info(mod_name string) ModNameInfo {
mut info := ModNameInfo{}
info.mname = if mod_name.ends_with('.git') { mod_name.replace('.git', '') } else { mod_name }
info.mname_normalised = info.mname.replace('-', '_').to_lower()
info.mname_as_path = info.mname_normalised.replace('.', os.path_separator)
info.final_module_path = os.real_path(os.join_path(settings.vmodules_path, info.mname_as_path))
return info
}
fn url_to_module_name(modulename string) string {
mut res := if mod := get_mod_by_url(modulename) { mod.name } else { modulename }
if res.ends_with('.git') {
res = res.replace('.git', '')
}
return res
}
fn get_all_modules() []string { fn get_all_modules() []string {
url := get_working_server_url() url := get_working_server_url()
r := http.get(url) or { panic(err) } r := http.get(url) or { panic(err) }
@ -580,7 +596,7 @@ fn get_all_modules() []string {
println('Failed to search vpm.vlang.io. Status code: $r.status_code') println('Failed to search vpm.vlang.io. Status code: $r.status_code')
exit(1) exit(1)
} }
s := r.text s := r.body
mut read_len := 0 mut read_len := 0
mut modules := []string{} mut modules := []string{}
for read_len < s.len { for read_len < s.len {
@ -717,7 +733,7 @@ fn get_module_meta_info(name string) ?Mod {
errors << 'Error details: $err' errors << 'Error details: $err'
continue continue
} }
if r.status_code == 404 || r.text.trim_space() == '404' { if r.status_code == 404 || r.body.trim_space() == '404' {
errors << 'Skipping module "$name", since "$server_url" reported that "$name" does not exist.' errors << 'Skipping module "$name", since "$server_url" reported that "$name" does not exist.'
continue continue
} }
@ -725,7 +741,7 @@ fn get_module_meta_info(name string) ?Mod {
errors << 'Skipping module "$name", since "$server_url" responded with $r.status_code http status code. Please try again later.' errors << 'Skipping module "$name", since "$server_url" responded with $r.status_code http status code. Please try again later.'
continue continue
} }
s := r.text s := r.body
if s.len > 0 && s[0] != `{` { if s.len > 0 && s[0] != `{` {
errors << 'Invalid json data' errors << 'Invalid json data'
errors << s.trim_space().limit(100) + ' ...' errors << s.trim_space().limit(100) + ' ...'

View File

@ -305,7 +305,6 @@ fn run_repl(workdir string, vrepl_prefix string) int {
return int(rc) return int(rc)
} }
} }
break
} }
r.line = line r.line = line
if r.line == '\n' { if r.line == '\n' {
@ -388,13 +387,13 @@ fn run_repl(workdir string, vrepl_prefix string) int {
'#include ', '#include ',
'for ', 'for ',
'or ', 'or ',
'insert', 'insert(',
'delete', 'delete(',
'prepend', 'prepend(',
'sort', 'sort(',
'clear', 'clear(',
'trim', 'trim(',
'as', ' as ',
] ]
mut is_statement := false mut is_statement := false
if filter_line.count('=') % 2 == 1 { if filter_line.count('=') % 2 == 1 {

View File

@ -66,6 +66,7 @@ const (
] ]
skip_with_werror = [ skip_with_werror = [
'do_not_remove', 'do_not_remove',
'vlib/v/embed_file/tests/embed_file_test.v',
] ]
skip_with_asan_compiler = [ skip_with_asan_compiler = [
'do_not_remove', 'do_not_remove',
@ -109,6 +110,10 @@ const (
skip_on_non_linux = [ skip_on_non_linux = [
'do_not_remove', 'do_not_remove',
] ]
skip_on_windows_msvc = [
'do_not_remove',
'vlib/v/tests/const_fixed_array_containing_references_to_itself_test.v', // error C2099: initializer is not a constant
]
skip_on_windows = [ skip_on_windows = [
'vlib/context/cancel_test.v', 'vlib/context/cancel_test.v',
'vlib/context/deadline_test.v', 'vlib/context/deadline_test.v',
@ -264,6 +269,9 @@ fn main() {
} }
$if windows { $if windows {
tsession.skip_files << skip_on_windows tsession.skip_files << skip_on_windows
$if msvc {
tsession.skip_files << skip_on_windows_msvc
}
} }
$if !windows { $if !windows {
tsession.skip_files << skip_on_non_windows tsession.skip_files << skip_on_non_windows

View File

@ -25,7 +25,20 @@ see also `v help build`.
-cstrict -cstrict
Turn on additional C warnings. This slows down compilation Turn on additional C warnings. This slows down compilation
slightly (~10% for gcc), but sometimes provides better diagnosis. slightly (~10% for gcc), but sometimes provides better error diagnosis.
-cmain <MainFunctionName>
Useful with framework like code, that uses macros to re-define `main`, like SDL2 does for example.
With that option, V will always generate:
`int MainFunctionName(int ___argc, char** ___argv) {` , for the program entry point function, *no matter* the OS.
Without it, on non Windows systems, it will generate:
`int main(int ___argc, char** ___argv) {`
... and on Windows, it will generate:
a) `int WINAPI wWinMain(HINSTANCE instance, HINSTANCE prev_instance, LPWSTR cmd_line, int show_cmd){`
when you are compiling applications that `import gg`.
... or it will generate:
b) `int wmain(int ___argc, wchar_t* ___argv[], wchar_t* ___envp[]){`
when you are compiling console apps.
-showcc -showcc
Prints the C command that is used to build the program. Prints the C command that is used to build the program.

View File

@ -14,4 +14,4 @@ For more general build help, see also `v help build`.
-os <os>, -target-os <os> -os <os>, -target-os <os>
Change the target OS that V compiles for. Change the target OS that V compiles for.
The supported targets for the native backend are: `macos`, `linux` The supported targets for the native backend are: `macos`, `linux` and 'windows'

View File

@ -7,6 +7,7 @@ Examples:
v hello.v Compile the file `hello.v` and output it as `hello` or `hello.exe`. v hello.v Compile the file `hello.v` and output it as `hello` or `hello.exe`.
v run hello.v Same as above but also run the produced executable immediately after compilation. v run hello.v Same as above but also run the produced executable immediately after compilation.
v -cg run hello.v Same as above, but make debugging easier (in case your program crashes). v -cg run hello.v Same as above, but make debugging easier (in case your program crashes).
v crun hello.v Same as above, but do not recompile, if the executable already exists, and is newer than the sources.
v -o h.c hello.v Translate `hello.v` to `h.c`. Do not compile further. v -o h.c hello.v Translate `hello.v` to `h.c`. Do not compile further.
v -o - hello.v Translate `hello.v` and output the C source code to stdout. Do not compile further. v -o - hello.v Translate `hello.v` and output the C source code to stdout. Do not compile further.
@ -20,7 +21,10 @@ V supports the following commands:
init Setup the file structure for an already existing V project. init Setup the file structure for an already existing V project.
* Ordinary development: * Ordinary development:
run Compile and run a V program. run Compile and run a V program. Delete the executable after the run.
crun Compile and run a V program without deleting the executable.
If you run the same program a second time, without changing the source files,
V will just run the executable, without recompilation. Suitable for scripting.
test Run all test files in the provided directory. test Run all test files in the provided directory.
fmt Format the V code provided. fmt Format the V code provided.
vet Report suspicious code constructs. vet Report suspicious code constructs.

View File

@ -1,4 +1,4 @@
v missdoc 0.0.4 v missdoc 0.1.0
----------------------------------------------- -----------------------------------------------
Usage: v missdoc [options] PATH [PATH]... Usage: v missdoc [options] PATH [PATH]...
@ -12,5 +12,25 @@ Options:
--js Include JavaScript functions in output. --js Include JavaScript functions in output.
-n, --no-line-numbers Exclude line numbers in output. -n, --no-line-numbers Exclude line numbers in output.
-e, --exclude <multiple strings> -e, --exclude <multiple strings>
-r, --relative-paths Use relative paths in output. -r, --relative-paths Use relative paths in output.
--verify exit(1) if documentation is missing, 0 otherwise.
--diff exit(1) and show difference between two PATH inputs, return 0 otherwise.
--version output version information and exit
-----------------------------------------------
PATH can be both files and directories.
The `--verify` flag is useful for use in CI setups for checking if a V project
has all it's functions and methods documented:
```
v missdoc --verify path/to/code
```
The `--diff` flag is useful if your project is not yet fully documented
but you want to ensure that no new functions or methods are introduced
between commits or branches:
```
v missdoc --diff current/code new/code
```

View File

@ -93,17 +93,21 @@ fn main() {
return return
} }
match command { match command {
'run', 'crun', 'build', 'build-module' {
rebuild(prefs)
return
}
'help' { 'help' {
invoke_help_and_exit(args) invoke_help_and_exit(args)
} }
'version' {
println(version.full_v_version(prefs.is_verbose))
return
}
'new', 'init' { 'new', 'init' {
util.launch_tool(prefs.is_verbose, 'vcreate', os.args[1..]) util.launch_tool(prefs.is_verbose, 'vcreate', os.args[1..])
return return
} }
'translate' {
eprintln('Translating C to V will be available in V 0.3')
exit(1)
}
'install', 'list', 'outdated', 'remove', 'search', 'show', 'update', 'upgrade' { 'install', 'list', 'outdated', 'remove', 'search', 'show', 'update', 'upgrade' {
util.launch_tool(prefs.is_verbose, 'vpm', os.args[1..]) util.launch_tool(prefs.is_verbose, 'vpm', os.args[1..])
return return
@ -118,42 +122,24 @@ fn main() {
eprintln('V Error: Use `v install` to install modules from vpm.vlang.io') eprintln('V Error: Use `v install` to install modules from vpm.vlang.io')
exit(1) exit(1)
} }
'version' { 'translate' {
println(version.full_v_version(prefs.is_verbose)) eprintln('Translating C to V will be available in V 0.3')
return exit(1)
} }
else {} else {
} if command.ends_with('.v') || os.exists(command) {
if command in ['run', 'build', 'build-module'] || command.ends_with('.v') || os.exists(command) { // println('command')
// println('command') // println(prefs.path)
// println(prefs.path) rebuild(prefs)
match prefs.backend { return
.c {
$if no_bootstrapv ? {
// TODO: improve the bootstrapping with a split C backend here.
// C code generated by `VEXE=v cmd/tools/builders/c_builder -os cross -o c.c cmd/tools/builders/c_builder.v`
// is enough to bootstrap the C backend, and thus the rest, but currently bootstrapping relies on
// `v -os cross -o v.c cmd/v` having a functional C codegen inside instead.
util.launch_tool(prefs.is_verbose, 'builders/c_builder', os.args[1..])
}
builder.compile('build', prefs, cbuilder.compile_c)
}
.js_node, .js_freestanding, .js_browser {
util.launch_tool(prefs.is_verbose, 'builders/js_builder', os.args[1..])
}
.native {
util.launch_tool(prefs.is_verbose, 'builders/native_builder', os.args[1..])
}
.interpret {
util.launch_tool(prefs.is_verbose, 'builders/interpret_builder', os.args[1..])
} }
} }
return
} }
if prefs.is_help { if prefs.is_help {
invoke_help_and_exit(args) invoke_help_and_exit(args)
} }
eprintln('v $command: unknown command\nRun ${term.highlight_command('v help')} for usage.') eprintln('v $command: unknown command')
eprintln('Run ${term.highlight_command('v help')} for usage.')
exit(1) exit(1)
} }
@ -163,7 +149,31 @@ fn invoke_help_and_exit(remaining []string) {
2 { help.print_and_exit(remaining[1]) } 2 { help.print_and_exit(remaining[1]) }
else {} else {}
} }
println('${term.highlight_command('v help')}: provide only one help topic.') eprintln('${term.highlight_command('v help')}: provide only one help topic.')
println('For usage information, use ${term.highlight_command('v help')}.') eprintln('For usage information, use ${term.highlight_command('v help')}.')
exit(1) exit(1)
} }
fn rebuild(prefs &pref.Preferences) {
match prefs.backend {
.c {
$if no_bootstrapv ? {
// TODO: improve the bootstrapping with a split C backend here.
// C code generated by `VEXE=v cmd/tools/builders/c_builder -os cross -o c.c cmd/tools/builders/c_builder.v`
// is enough to bootstrap the C backend, and thus the rest, but currently bootstrapping relies on
// `v -os cross -o v.c cmd/v` having a functional C codegen inside instead.
util.launch_tool(prefs.is_verbose, 'builders/c_builder', os.args[1..])
}
builder.compile('build', prefs, cbuilder.compile_c)
}
.js_node, .js_freestanding, .js_browser {
util.launch_tool(prefs.is_verbose, 'builders/js_builder', os.args[1..])
}
.native {
util.launch_tool(prefs.is_verbose, 'builders/native_builder', os.args[1..])
}
.interpret {
util.launch_tool(prefs.is_verbose, 'builders/interpret_builder', os.args[1..])
}
}
}

View File

@ -1295,6 +1295,16 @@ mm := map[string]int{}
val := mm['bad_key'] or { panic('key not found') } val := mm['bad_key'] or { panic('key not found') }
``` ```
You can also check, if a key is present, and get its value, if it was present, in one go:
```v
m := {
'abc': 'def'
}
if v := m['abc'] {
println('the map value for that key is: $v')
}
```
The same optional check applies to arrays: The same optional check applies to arrays:
```v ```v
@ -5905,6 +5915,19 @@ fn main() {
} }
``` ```
Struct field deprecations:
```v oksyntax
module abc
// Note that only *direct* accesses to Xyz.d in *other modules*, will produce deprecation notices/warnings:
pub struct Xyz {
pub mut:
a int
d int [deprecated: 'use Xyz.a instead'; deprecated_after: '2999-03-01'] // produce a notice, the deprecation date is in the far future
}
```
Function/method deprecations:
```v ```v
// Calling this function will result in a deprecation warning // Calling this function will result in a deprecation warning
[deprecated] [deprecated]

View File

@ -7,9 +7,9 @@ fn vlang_time(mut wg sync.WaitGroup) ?string {
data := http.get('https://vlang.io/utc_now')? data := http.get('https://vlang.io/utc_now')?
finish := time.ticks() finish := time.ticks()
println('Finish getting time ${finish - start} ms') println('Finish getting time ${finish - start} ms')
println(data.text) println(data.body)
wg.done() wg.done()
return data.text return data.body
} }
fn remote_ip(mut wg sync.WaitGroup) ?string { fn remote_ip(mut wg sync.WaitGroup) ?string {
@ -17,9 +17,9 @@ fn remote_ip(mut wg sync.WaitGroup) ?string {
data := http.get('https://api.ipify.org')? data := http.get('https://api.ipify.org')?
finish := time.ticks() finish := time.ticks()
println('Finish getting ip ${finish - start} ms') println('Finish getting ip ${finish - start} ms')
println(data.text) println(data.body)
wg.done() wg.done()
return data.text return data.body
} }
fn main() { fn main() {

View File

@ -1,12 +1,15 @@
module main module main
// Note: This program, requires that the shared library was already compiled.
// To do so, run `v -d no_backtrace -o library -shared modules/library/library.v`
// before running this program.
import os import os
import dl import dl
type FNAdder = fn (int, int) int type FNAdder = fn (int, int) int
fn main() { fn main() {
library_file_path := os.join_path(os.getwd(), dl.get_libname('library')) library_file_path := os.join_path(os.dir(@FILE), dl.get_libname('library'))
handle := dl.open_opt(library_file_path, dl.rtld_lazy)? handle := dl.open_opt(library_file_path, dl.rtld_lazy)?
eprintln('handle: ${ptr_str(handle)}') eprintln('handle: ${ptr_str(handle)}')
f := FNAdder(dl.sym_opt(handle, 'add_1')?) f := FNAdder(dl.sym_opt(handle, 'add_1')?)

View File

@ -7,6 +7,6 @@ fn main() {
return return
} }
t := time.unix(resp.text.int()) t := time.unix(resp.body.int())
println(t.format()) println(t.format())
} }

View File

@ -46,7 +46,7 @@ fn main() {
return return
} }
weather := json.decode(Weather, resp.text) or { weather := json.decode(Weather, resp.body) or {
println('failed to decode weather json') println('failed to decode weather json')
return return
} }

View File

@ -0,0 +1,163 @@
/*
A V program for Bellman-Ford's single source
shortest path algorithm.
literaly adapted from:
https://www.geeksforgeeks.org/bellman-ford-algorithm-dp-23/
// Adapted from this site... from C++ and Python codes
For Portugese reference
http://rascunhointeligente.blogspot.com/2010/10/o-algoritmo-de-bellman-ford-um.html
By CCS
*/
const large = 999999 // almost inifinity
// a structure to represent a weighted edge in graph
struct EDGE {
mut:
src int
dest int
weight int
}
// building a map of with all edges etc of a graph, represented from a matrix adjacency
// Input: matrix adjacency --> Output: edges list of src, dest and weight
fn build_map_edges_from_graph<T>(g [][]T) map[T]EDGE {
n := g.len // TOTAL OF NODES for this graph -- its dimmension
mut edges_map := map[int]EDGE{} // a graph represented by map of edges
mut edge := 0 // a counter of edges
for i in 0 .. n {
for j in 0 .. n {
// if exist an arc ... include as new edge
if g[i][j] != 0 {
edges_map[edge] = EDGE{i, j, g[i][j]}
edge++
}
}
}
// print('${edges_map}')
return edges_map
}
fn print_sol(dist []int) {
n_vertex := dist.len
print('\n Vertex Distance from Source')
for i in 0 .. n_vertex {
print('\n $i --> ${dist[i]}')
}
}
// The main function that finds shortest distances from src
// to all other vertices using Bellman-Ford algorithm. The
// function also detects negative weight cycle
fn bellman_ford<T>(graph [][]T, src int) {
mut edges := build_map_edges_from_graph(graph)
// this function was done to adapt a graph representation
// by a adjacency matrix, to list of adjacency (using a MAP)
n_edges := edges.len // number of EDGES
// Step 1: Initialize distances from src to all other
// vertices as INFINITE
n_vertex := graph.len // adjc matrix ... n nodes or vertex
mut dist := []int{len: n_vertex, init: large} // dist with -1 instead of INIFINITY
// mut path := []int{len: n , init:-1} // previous node of each shortest paht
dist[src] = 0
// Step 2: Relax all edges |V| - 1 times. A simple
// shortest path from src to any other vertex can have
// at-most |V| - 1 edges
for _ in 0 .. n_vertex {
for j in 0 .. n_edges {
mut u := edges[j].src
mut v := edges[j].dest
mut weight := edges[j].weight
if (dist[u] != large) && (dist[u] + weight < dist[v]) {
dist[v] = dist[u] + weight
}
}
}
// Step 3: check for negative-weight cycles. The above
// step guarantees shortest distances if graph doesn't
// contain negative weight cycle. If we get a shorter
// path, then there is a cycle.
for j in 0 .. n_vertex {
mut u := edges[j].src
mut v := edges[j].dest
mut weight := edges[j].weight
if (dist[u] != large) && (dist[u] + weight < dist[v]) {
print('\n Graph contains negative weight cycle')
// If negative cycle is detected, simply
// return or an exit(1)
return
}
}
print_sol(dist)
}
fn main() {
// adjacency matrix = cost or weight
graph_01 := [
[0, -1, 4, 0, 0],
[0, 0, 3, 2, 2],
[0, 0, 0, 0, 0],
[0, 1, 5, 0, 0],
[0, 0, 0, -3, 0],
]
// data from https://www.geeksforgeeks.org/bellman-ford-algorithm-dp-23/
graph_02 := [
[0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0],
]
// data from https://www.geeksforgeeks.org/prims-minimum-spanning-tree-mst-greedy-algo-5/
/*
The graph:
2 3
(0)--(1)--(2)
| / \ |
6| 8/ \5 |7
| / \ |
(3)-------(4)
9
*/
/*
Let us create following weighted graph
From https://www.geeksforgeeks.org/kruskals-minimum-spanning-tree-algorithm-greedy-algo-2/?ref=lbp
10
0--------1
| \ |
6| 5\ |15
| \ |
2--------3
4
*/
graph_03 := [
[0, 10, 6, 5],
[10, 0, 0, 15],
[6, 0, 0, 4],
[5, 15, 4, 0],
]
// To find number of coluns
// mut cols := an_array[0].len
mut graph := [][]int{} // the graph: adjacency matrix
// for index, g_value in [graph_01, graph_02, graph_03] {
for index, g_value in [graph_01, graph_02, graph_03] {
graph = g_value.clone() // graphs_sample[g].clone() // choice your SAMPLE
// allways starting by node 0
start_node := 0
println('\n\n Graph ${index + 1} using Bellman-Ford algorithm (source node: $start_node)')
bellman_ford(graph, start_node)
}
println('\n BYE -- OK')
}
//=================================================

View File

@ -1,4 +1,4 @@
// Author: ccs // Author: CCS
// I follow literally code in C, done many years ago // I follow literally code in C, done many years ago
fn main() { fn main() {
// Adjacency matrix as a map // Adjacency matrix as a map
@ -20,10 +20,9 @@ fn breadth_first_search_path(graph map[string][]string, start string, target str
mut path := []string{} // ONE PATH with SUCCESS = array mut path := []string{} // ONE PATH with SUCCESS = array
mut queue := []string{} // a queue ... many paths mut queue := []string{} // a queue ... many paths
// all_nodes := graph.keys() // get a key of this map // all_nodes := graph.keys() // get a key of this map
n_nodes := graph.len // numbers of nodes of this graph
// a map to store all the nodes visited to avoid cycles // a map to store all the nodes visited to avoid cycles
// start all them with False, not visited yet // start all them with False, not visited yet
mut visited := a_map_nodes_bool(n_nodes) // a map fully mut visited := visited_init(graph) // a map fully
// false ==> not visited yet: {'A': false, 'B': false, 'C': false, 'D': false, 'E': false} // false ==> not visited yet: {'A': false, 'B': false, 'C': false, 'D': false, 'E': false}
queue << start // first arrival queue << start // first arrival
for queue.len != 0 { for queue.len != 0 {
@ -51,19 +50,6 @@ fn breadth_first_search_path(graph map[string][]string, start string, target str
return path return path
} }
// Creating a map for VISITED nodes ...
// starting by false ===> means this node was not visited yet
fn a_map_nodes_bool(size int) map[string]bool {
mut my_map := map[string]bool{} // look this map ...
base := u8(65)
mut key := base.ascii_str()
for i in 0 .. size {
key = u8(base + i).ascii_str()
my_map[key] = false
}
return my_map
}
// classical removing of a node from the start of a queue // classical removing of a node from the start of a queue
fn departure(mut queue []string) string { fn departure(mut queue []string) string {
mut x := queue[0] mut x := queue[0]
@ -71,6 +57,17 @@ fn departure(mut queue []string) string {
return x return x
} }
// Creating aa map to initialize with of visited nodes .... all with false in the init
// so these nodes are NOT VISITED YET
fn visited_init(a_graph map[string][]string) map[string]bool {
mut array_of_keys := a_graph.keys() // get all keys of this map
mut temp := map[string]bool{} // attention in these initializations with maps
for i in array_of_keys {
temp[i] = false
}
return temp
}
// Based in the current node that is final, search for its parent, already visited, up to the root or start node // Based in the current node that is final, search for its parent, already visited, up to the root or start node
fn build_path_reverse(graph map[string][]string, start string, final string, visited map[string]bool) []string { fn build_path_reverse(graph map[string][]string, start string, final string, visited map[string]bool) []string {
print('\n\n Nodes visited (true) or no (false): $visited') print('\n\n Nodes visited (true) or no (false): $visited')
@ -90,3 +87,5 @@ fn build_path_reverse(graph map[string][]string, start string, final string, vis
} }
return path return path
} }
//======================================================

View File

@ -1,4 +1,4 @@
// Author: ccs // Author: CCS
// I follow literally code in C, done many years ago // I follow literally code in C, done many years ago
fn main() { fn main() {
@ -35,8 +35,7 @@ fn depth_first_search_path(graph map[string][]string, start string, target strin
mut path := []string{} // ONE PATH with SUCCESS = array mut path := []string{} // ONE PATH with SUCCESS = array
mut stack := []string{} // a stack ... many nodes mut stack := []string{} // a stack ... many nodes
// all_nodes := graph.keys() // get a key of this map // all_nodes := graph.keys() // get a key of this map
n_nodes := graph.len // numbers of nodes of this graph mut visited := visited_init(graph) // a map fully with false in all vertex
mut visited := a_map_nodes_bool(n_nodes) // a map fully
// false ... not visited yet: {'A': false, 'B': false, 'C': false, 'D': false, 'E': false} // false ... not visited yet: {'A': false, 'B': false, 'C': false, 'D': false, 'E': false}
stack << start // first push on the stack stack << start // first push on the stack
@ -72,14 +71,15 @@ fn depth_first_search_path(graph map[string][]string, start string, target strin
return path return path
} }
// Creating a map for nodes not VISITED visited ... // Creating aa map to initialize with of visited nodes .... all with false in the init
// starting by false ===> means this node was not visited yet // so these nodes are NOT VISITED YET
fn a_map_nodes_bool(size int) map[string]bool { fn visited_init(a_graph map[string][]string) map[string]bool {
mut my_map := map[string]bool{} // look this map ... mut array_of_keys := a_graph.keys() // get all keys of this map
for i in 0 .. size { mut temp := map[string]bool{} // attention in these initializations with maps
my_map[u8(65 + i).ascii_str()] = false for i in array_of_keys {
temp[i] = false
} }
return my_map return temp
} }
// Based in the current node that is final, search for his parent, that is already visited, up to the root or start node // Based in the current node that is final, search for his parent, that is already visited, up to the root or start node
@ -101,3 +101,5 @@ fn build_path_reverse(graph map[string][]string, start string, final string, vis
} }
return path return path
} }
//*****************************************************

View File

@ -0,0 +1,241 @@
/*
Exploring Dijkstra,
The data example is from
https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/
by CCS
Dijkstra's single source shortest path algorithm.
The program uses an adjacency matrix representation of a graph
This Dijkstra algorithm uses a priority queue to save
the shortest paths. The queue structure has a data
which is the number of the node,
and the priority field which is the shortest distance.
PS: all the pre-requisites of Dijkstra are considered
$ v run file_name.v
// Creating a executable
$ v run file_name.v -o an_executable.EXE
$ ./an_executable.EXE
Code based from : Data Structures and Algorithms Made Easy: Data Structures and Algorithmic Puzzles, Fifth Edition (English Edition)
pseudo code written in C
This idea is quite different: it uses a priority queue to store the current
shortest path evaluted
The priority queue structure built using a list to simulate
the queue. A heap is not used in this case.
*/
// a structure
struct NODE {
mut:
data int // NUMBER OF NODE
priority int // Lower values priority indicate ==> higher priority
}
// Function to push according to priority ... the lower priority is goes ahead
// The "push" always sorted in pq
fn push_pq<T>(mut prior_queue []T, data int, priority int) {
mut temp := []T{}
lenght_pq := prior_queue.len
mut i := 0
for (i < lenght_pq) && (priority > prior_queue[i].priority) {
temp << prior_queue[i]
i++
}
// INSERTING SORTED in the queue
temp << NODE{data, priority} // do the copy in the right place
// copy the another part (tail) of original prior_queue
for i < lenght_pq {
temp << prior_queue[i]
i++
}
prior_queue = temp.clone() // I am not sure if it the right way
// IS IT THE RIGHT WAY?
}
// Change the priority of a value/node ... exist a value, change its priority
fn updating_priority<T>(mut prior_queue []T, search_data int, new_priority int) {
mut i := 0
mut lenght_pq := prior_queue.len
for i < lenght_pq {
if search_data == prior_queue[i].data {
prior_queue[i] = NODE{search_data, new_priority} // do the copy in the right place
break
}
i++
// all the list was examined
if i >= lenght_pq {
print('\n This data $search_data does exist ... PRIORITY QUEUE problem\n')
exit(1) // panic(s string)
}
} // end for
}
// a single departure or remove from queue
fn departure_priority<T>(mut prior_queue []T) int {
mut x := prior_queue[0].data
prior_queue.delete(0) // or .delete_many(0, 1 )
return x
}
// give a NODE v, return a list with all adjacents
// Take care, only positive EDGES
fn all_adjacents<T>(g [][]T, v int) []int {
mut temp := []int{} //
for i in 0 .. (g.len) {
if g[v][i] > 0 {
temp << i
}
}
return temp
}
// print the costs from origin up to all nodes
fn print_solution<T>(dist []T) {
print('Vertex \tDistance from Source')
for node in 0 .. (dist.len) {
print('\n $node ==> \t ${dist[node]}')
}
}
// print all paths and their cost or weight
fn print_paths_dist<T>(path []T, dist []T) {
print('\n Read the nodes from right to left (a path): \n')
for node in 1 .. (path.len) {
print('\n $node ')
mut i := node
for path[i] != -1 {
print(' <= ${path[i]} ')
i = path[i]
}
print('\t PATH COST: ${dist[node]}')
}
}
// check structure from: https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/
// s: source for all nodes
// Two results are obtained ... cost and paths
fn dijkstra(g [][]int, s int) {
mut pq_queue := []NODE{} // creating a priority queue
push_pq(mut pq_queue, s, 0) // goes s with priority 0
mut n := g.len
mut dist := []int{len: n, init: -1} // dist with -1 instead of INIFINITY
mut path := []int{len: n, init: -1} // previous node of each shortest paht
// Distance of source vertex from itself is always 0
dist[s] = 0
for pq_queue.len != 0 {
mut v := departure_priority(mut pq_queue)
// for all W adjcents vertices of v
mut adjs_of_v := all_adjacents(g, v) // all_ADJ of v ....
// print('\n ADJ ${v} is ${adjs_of_v}')
mut new_dist := 0
for w in adjs_of_v {
new_dist = dist[v] + g[v][w]
if dist[w] == -1 {
dist[w] = new_dist
push_pq(mut pq_queue, w, dist[w])
path[w] = v // collecting the previous node -- lowest weight
}
if dist[w] > new_dist {
dist[w] = new_dist
updating_priority(mut pq_queue, w, dist[w])
path[w] = v //
}
}
}
// print the constructed distance array
print_solution(dist)
// print('\n \n Previous node of shortest path: ${path}')
print_paths_dist(path, dist)
}
/*
Solution Expected
Vertex Distance from Source
0 0
1 4
2 12
3 19
4 21
5 11
6 9
7 8
8 14
*/
fn main() {
// adjacency matrix = cost or weight
graph_01 := [
[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0],
]
graph_02 := [
[0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0],
]
// data from https://www.geeksforgeeks.org/prims-minimum-spanning-tree-mst-greedy-algo-5/
/*
The graph:
2 3
(0)--(1)--(2)
| / \ |
6| 8/ \5 |7
| / \ |
(3)-------(4)
9
*/
/*
Let us create following weighted graph
From https://www.geeksforgeeks.org/kruskals-minimum-spanning-tree-algorithm-greedy-algo-2/?ref=lbp
10
0--------1
| \ |
6| 5\ |15
| \ |
2--------3
4
*/
graph_03 := [
[0, 10, 6, 5],
[10, 0, 0, 15],
[6, 0, 0, 4],
[5, 15, 4, 0],
]
// To find number of coluns
// mut cols := an_array[0].len
mut graph := [][]int{} // the graph: adjacency matrix
// for index, g_value in [graph_01, graph_02, graph_03] {
for index, g_value in [graph_01, graph_02, graph_03] {
graph = g_value.clone() // graphs_sample[g].clone() // choice your SAMPLE
// allways starting by node 0
start_node := 0
println('\n\n Graph ${index + 1} using Dijkstra algorithm (source node: $start_node)')
dijkstra(graph, start_node)
}
println('\n BYE -- OK')
}
//********************************************************************

View File

@ -0,0 +1,230 @@
/*
Exploring PRIMS,
The data example is from
https://www.geeksforgeeks.org/prims-minimum-spanning-tree-mst-greedy-algo-5/
by CCS
PS: all the pre-requisites of Dijkstra are considered
$ v run file_name.v
Creating a executable
$ v run file_name.v -o an_executable.EXE
$ ./an_executable.EXE
Code based from : Data Structures and Algorithms Made Easy: Data Structures and Algorithmic Puzzles, Fifth Edition (English Edition)
pseudo code written in C
This idea is quite different: it uses a priority queue to store the current
shortest path evaluted
The priority queue structure built using a list to simulate
the queue. A heap is not used in this case.
*/
// a structure
struct NODE {
mut:
data int // number of nodes
priority int // Lower values priority indicate ==> higher priority
}
// Function to push according to priority ... the lower priority is goes ahead
// The "push" always sorted in pq
fn push_pq<T>(mut prior_queue []T, data int, priority int) {
mut temp := []T{}
lenght_pq := prior_queue.len
mut i := 0
for (i < lenght_pq) && (priority > prior_queue[i].priority) {
temp << prior_queue[i]
i++
}
// INSERTING SORTED in the queue
temp << NODE{data, priority} // do the copy in the right place
// copy the another part (tail) of original prior_queue
for i < lenght_pq {
temp << prior_queue[i]
i++
}
prior_queue = temp.clone()
// I am not sure if it the right way
// IS IT THE RIGHT WAY?
}
// Change the priority of a value/node ... exist a value, change its priority
fn updating_priority<T>(mut prior_queue []T, search_data int, new_priority int) {
mut i := 0
mut lenght_pq := prior_queue.len
for i < lenght_pq {
if search_data == prior_queue[i].data {
prior_queue[i] = NODE{search_data, new_priority} // do the copy in the right place
break
}
i++
// all the list was examined
if i >= lenght_pq {
// print('\n Priority Queue: ${prior_queue}')
// print('\n These data ${search_data} and ${new_priority} do not exist ... PRIORITY QUEUE problem\n')
// if it does not find ... then push it
push_pq(mut prior_queue, search_data, new_priority)
// exit(1) // panic(s string)
}
} // end for
}
// a single departure or remove from queue
fn departure_priority<T>(mut prior_queue []T) int {
mut x := prior_queue[0].data
prior_queue.delete(0) // or .delete_many(0, 1 )
return x
}
// give a NODE v, return a list with all adjacents
// Take care, only positive EDGES
fn all_adjacents<T>(g [][]T, v int) []int {
mut temp := []int{} //
for i in 0 .. (g.len) {
if g[v][i] > 0 {
temp << i
}
}
return temp
}
// print the costs from origin up to all nodes
// A utility function to print the
// constructed MST stored in parent[]
// print all paths and their cost or weight
fn print_solution(path []int, g [][]int) {
// print(' PATH: ${path} ==> ${path.len}')
print(' Edge \tWeight\n')
mut sum := 0
for node in 0 .. (path.len) {
if path[node] == -1 {
print('\n $node <== reference or start node')
} else {
print('\n $node <--> ${path[node]} \t${g[node][path[node]]}')
sum += g[node][path[node]]
}
}
print('\n Minimum Cost Spanning Tree: $sum\n\n')
}
// check structure from: https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/
// s: source for all nodes
// Two results are obtained ... cost and paths
fn prim_mst(g [][]int, s int) {
mut pq_queue := []NODE{} // creating a priority queue
push_pq(mut pq_queue, s, 0) // goes s with priority 0
mut n := g.len
mut dist := []int{len: n, init: -1} // dist with -1 instead of INIFINITY
mut path := []int{len: n, init: -1} // previous node of each shortest paht
// Distance of source vertex from itself is always 0
dist[s] = 0
for pq_queue.len != 0 {
mut v := departure_priority(mut pq_queue)
// for all W adjcents vertices of v
mut adjs_of_v := all_adjacents(g, v) // all_ADJ of v ....
// print('\n :${dist} :: ${pq_queue}')
// print('\n ADJ ${v} is ${adjs_of_v}')
mut new_dist := 0
for w in adjs_of_v {
new_dist = dist[v] + g[v][w]
if dist[w] == -1 {
dist[w] = g[v][w]
push_pq(mut pq_queue, w, dist[w])
path[w] = v // collecting the previous node -- lowest weight
}
if dist[w] > new_dist {
dist[w] = g[v][w] // new_dist//
updating_priority(mut pq_queue, w, dist[w])
path[w] = v // father / previous node
}
}
}
// print('\n \n Previous node of shortest path: ${path}')
// print_paths_dist(path , dist)
print_solution(path, g)
}
/*
Solution Expected graph_02
Edge Weight
0 - 1 2
1 - 2 3
0 - 3 6
1 - 4 5
*/
fn main() {
// adjacency matrix = cost or weight
graph_01 := [
[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0],
]
graph_02 := [
[0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0],
]
// data from https://www.geeksforgeeks.org/prims-minimum-spanning-tree-mst-greedy-algo-5/
/*
The graph:
2 3
(0)--(1)--(2)
| / \ |
6| 8/ \5 |7
| / \ |
(3)-------(4)
9
*/
/*
Let us create following weighted graph
From https://www.geeksforgeeks.org/kruskals-minimum-spanning-tree-algorithm-greedy-algo-2/?ref=lbp
10
0--------1
| \ |
6| 5\ |15
| \ |
2--------3
4
*/
graph_03 := [
[0, 10, 6, 5],
[10, 0, 0, 15],
[6, 0, 0, 4],
[5, 15, 4, 0],
]
// To find number of coluns
// mut cols := an_array[0].len
mut graph := [][]int{} // the graph: adjacency matrix
// for index, g_value in [graph_01, graph_02, graph_03] {
for index, g_value in [graph_01, graph_02, graph_03] {
println('\n Minimal Spanning Tree of graph ${index + 1} using PRIM algorithm')
graph = g_value.clone() // graphs_sample[g].clone() // choice your SAMPLE
// starting by node x ... see the graphs dimmension
start_node := 0
prim_mst(graph, start_node)
}
println('\n BYE -- OK')
}
//********************************************************************

View File

@ -11,7 +11,7 @@ fn (h ExampleHandler) handle(req Request) Response {
}) })
} }
mut status_code := 200 mut status_code := 200
res.text = match req.url { res.body = match req.url {
'/foo' { '/foo' {
'bar\n' 'bar\n'
} }

View File

@ -8,7 +8,7 @@ fn send_request(mut wg sync.WaitGroup) ?string {
finish := time.ticks() finish := time.ticks()
println('Finish getting time ${finish - start} ms') println('Finish getting time ${finish - start} ms')
wg.done() wg.done()
return data.text return data.body
} }
fn main() { fn main() {

View File

@ -16,7 +16,7 @@ fn worker_fetch(p &pool.PoolProcessor, cursor int, worker_id int) voidptr {
println('failed to fetch data from /v0/item/${id}.json') println('failed to fetch data from /v0/item/${id}.json')
return pool.no_result return pool.no_result
} }
story := json.decode(Story, resp.text) or { story := json.decode(Story, resp.body) or {
println('failed to decode a story') println('failed to decode a story')
return pool.no_result return pool.no_result
} }
@ -30,7 +30,7 @@ fn main() {
println('failed to fetch data from /v0/topstories.json') println('failed to fetch data from /v0/topstories.json')
return return
} }
ids := json.decode([]int, resp.text) or { ids := json.decode([]int, resp.body) or {
println('failed to decode topstories.json') println('failed to decode topstories.json')
return return
}#[0..10] }#[0..10]

View File

@ -1,9 +1,13 @@
#!/usr/local/bin/v run #!/usr/local/bin/v
// The shebang above associates the file to V on Unix-like systems, // The shebang above associates the file to V on Unix-like systems,
// so it can be run just by specifying the path to the file // so it can be run just by specifying the path to the file
// once it's made executable using `chmod +x`. // once it's made executable using `chmod +x`.
// Note that you can also use: `#!/usr/bin/env -S v crun`, if your system supports the -S flag to env
// The benefit is that in this case, v could be anywhere in your path, while /usr/bin/env is guaranteed
// to be present on most Unix systems in that exact place.
for _ in 0 .. 3 { for _ in 0 .. 3 {
println('V script') println('V script')
} }

View File

@ -1611,7 +1611,12 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) {
fmtex.Format.nAvgBytesPerSec = fmtex.Format.nSamplesPerSec * fmtex.Format.nBlockAlign; fmtex.Format.nAvgBytesPerSec = fmtex.Format.nSamplesPerSec * fmtex.Format.nBlockAlign;
fmtex.Format.cbSize = 22; /* WORD + DWORD + GUID */ fmtex.Format.cbSize = 22; /* WORD + DWORD + GUID */
fmtex.Samples.wValidBitsPerSample = 32; fmtex.Samples.wValidBitsPerSample = 32;
fmtex.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; if (_saudio.num_channels == 1) {
fmtex.dwChannelMask = SPEAKER_FRONT_CENTER;
}
else {
fmtex.dwChannelMask = SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT;
}
fmtex.SubFormat = _saudio_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; fmtex.SubFormat = _saudio_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
dur = (REFERENCE_TIME) dur = (REFERENCE_TIME)
(((double)_saudio.buffer_frames) / (((double)_saudio.sample_rate) * (1.0/10000000.0))); (((double)_saudio.buffer_frames) / (((double)_saudio.sample_rate) * (1.0/10000000.0)));

View File

@ -268,13 +268,28 @@ fn _write_buf_to_fd(fd int, buf &u8, buf_len int) {
if buf_len <= 0 { if buf_len <= 0 {
return return
} }
unsafe { mut ptr := unsafe { buf }
mut ptr := buf mut remaining_bytes := isize(buf_len)
mut remaining_bytes := buf_len mut x := isize(0)
for remaining_bytes > 0 { $if freestanding || vinix {
x := C.write(fd, ptr, remaining_bytes) unsafe {
ptr += x for remaining_bytes > 0 {
remaining_bytes -= x x = C.write(fd, ptr, remaining_bytes)
ptr += x
remaining_bytes -= x
}
}
} $else {
mut stream := voidptr(C.stdout)
if fd == 2 {
stream = voidptr(C.stderr)
}
unsafe {
for remaining_bytes > 0 {
x = isize(C.fwrite(ptr, 1, remaining_bytes, stream))
ptr += x
remaining_bytes -= x
}
} }
} }
} }

View File

@ -32,7 +32,6 @@ $if dynamic_boehm ? {
$if macos || linux { $if macos || linux {
#flag -DGC_PTHREADS=1 #flag -DGC_PTHREADS=1
#flag -I@VEXEROOT/thirdparty/libgc/include #flag -I@VEXEROOT/thirdparty/libgc/include
#flag -lpthread
$if (prod && !tinyc && !debug) || !(amd64 || arm64 || i386 || arm32) { $if (prod && !tinyc && !debug) || !(amd64 || arm64 || i386 || arm32) {
// TODO: replace the architecture check with a `!$exists("@VEXEROOT/thirdparty/tcc/lib/libgc.a")` comptime call // TODO: replace the architecture check with a `!$exists("@VEXEROOT/thirdparty/tcc/lib/libgc.a")` comptime call
#flag @VEXEROOT/thirdparty/libgc/gc.o #flag @VEXEROOT/thirdparty/libgc/gc.o
@ -40,6 +39,7 @@ $if dynamic_boehm ? {
#flag @VEXEROOT/thirdparty/tcc/lib/libgc.a #flag @VEXEROOT/thirdparty/tcc/lib/libgc.a
} }
#flag -ldl #flag -ldl
#flag -lpthread
} $else $if freebsd { } $else $if freebsd {
// Tested on FreeBSD 13.0-RELEASE-p3, with clang, gcc and tcc: // Tested on FreeBSD 13.0-RELEASE-p3, with clang, gcc and tcc:
#flag -DBUS_PAGE_FAULT=T_PAGEFLT #flag -DBUS_PAGE_FAULT=T_PAGEFLT

View File

@ -205,7 +205,11 @@ pub fn (s string) hash() int {
// int returns the value of the string as an integer `'1'.int() == 1`. // int returns the value of the string as an integer `'1'.int() == 1`.
pub fn (s string) int() int { pub fn (s string) int() int {
return int(JS.parseInt(s.str)) res := int(0)
#if (typeof(s) == "string") { res.val = parseInt(s) }
#else { res.val = parseInt(s.str) }
return res
} }
// i64 returns the value of the string as i64 `'1'.i64() == i64(1)`. // i64 returns the value of the string as i64 `'1'.i64() == i64(1)`.

View File

@ -1,4 +1,4 @@
## Description: ## Description:
`compress` is a namespace for (multiple) compression algorithms supported by V. `compress` is a namespace for (multiple) compression algorithms supported by V.
At the moment, only `compress.zlib` is implemented. At the moment, only `compress.zlib` and `compress.deflate` are implemented.

View File

@ -0,0 +1,44 @@
module compress
#flag -I @VEXEROOT/thirdparty/zip
#include "miniz.h"
pub const max_size = u64(1 << 31)
fn C.tdefl_compress_mem_to_heap(source_buf voidptr, source_buf_len usize, out_len &usize, flags int) voidptr
fn C.tinfl_decompress_mem_to_heap(source_buf voidptr, source_buf_len usize, out_len &usize, flags int) voidptr
// compresses an array of bytes based on providing flags and returns the compressed bytes in a new array
// see `gzip.compress([]u8)` and `zlib.compress([]u8)` for default implementations.
[manualfree]
pub fn compress(data []u8, flags int) ?[]u8 {
if u64(data.len) > compress.max_size {
return error('data too large ($data.len > $compress.max_size)')
}
mut out_len := usize(0)
address := C.tdefl_compress_mem_to_heap(data.data, data.len, &out_len, flags)
if address == 0 {
return error('compression failed')
}
if u64(out_len) > compress.max_size {
return error('compressed data is too large ($out_len > $compress.max_size)')
}
return unsafe { address.vbytes(int(out_len)) }
}
// decompresses an array of bytes based on providing flags and returns the decompressed bytes in a new array
// see `gzip.decompress([]u8)` and `zlib.decompress([]u8)` for default implementations.
[manualfree]
pub fn decompress(data []u8, flags int) ?[]u8 {
mut out_len := usize(0)
address := C.tinfl_decompress_mem_to_heap(data.data, data.len, &out_len, flags)
if address == 0 {
return error('decompression failed')
}
if u64(out_len) > compress.max_size {
return error('decompressed data is too large ($out_len > $compress.max_size)')
}
return unsafe { address.vbytes(int(out_len)) }
}

View File

@ -0,0 +1,21 @@
## Description:
`compress.deflate` is a module that assists in the compression and
decompression of binary data using `deflate` compression
NOTE: To decompress gzip, discard first 10 bytes of compressed bytes
then use `compress.deflate.decompress`. (Header validation won't be
performed in this case)
## Examples:
```v
import compress.deflate
fn main() {
uncompressed := 'Hello world!'
compressed := deflate.compress(uncompressed.bytes())?
decompressed := deflate.decompress(compressed)?
assert decompressed == uncompressed.bytes()
}
```

View File

@ -0,0 +1,16 @@
module deflate
import compress
// compresses an array of bytes using gzip and returns the compressed bytes in a new array
// Example: compressed := gzip.compress(b)?
pub fn compress(data []u8) ?[]u8 {
return compress.compress(data, 0)
}
// decompresses an array of bytes using zlib and returns the decompressed bytes in a new array
// Example: decompressed := zlib.decompress(b)?
[manualfree]
pub fn decompress(data []u8) ?[]u8 {
return compress.decompress(data, 0)
}

View File

@ -0,0 +1,12 @@
module deflate
const gzip_magic_numbers = [u8(0x1f), 0x8b]
fn test_gzip() ? {
uncompressed := 'Hello world!'
compressed := compress(uncompressed.bytes())?
first2 := compressed[0..2]
assert first2 != deflate.gzip_magic_numbers
decompressed := decompress(compressed)?
assert decompressed == uncompressed.bytes()
}

View File

@ -1,60 +1,19 @@
module zlib module zlib
#flag -I @VEXEROOT/thirdparty/zip import compress
#include "miniz.h"
pub const max_size = u64(1 << 31)
fn C.tdefl_compress_mem_to_heap(source_buf voidptr, source_buf_len usize, out_len &usize, flags int) voidptr
fn C.tinfl_decompress_mem_to_heap(source_buf voidptr, source_buf_len usize, out_len &usize, flags int) voidptr
// compresses an array of bytes using zlib and returns the compressed bytes in a new array // compresses an array of bytes using zlib and returns the compressed bytes in a new array
// Example: compressed := zlib.compress(b)? // Example: compressed := zlib.compress(b)?
[manualfree] [manualfree]
pub fn compress(data []u8) ?[]u8 { pub fn compress(data []u8) ?[]u8 {
if u64(data.len) > zlib.max_size {
return error('data too large ($data.len > $zlib.max_size)')
}
mut out_len := usize(0)
// flags = TDEFL_WRITE_ZLIB_HEADER (0x01000) // flags = TDEFL_WRITE_ZLIB_HEADER (0x01000)
address := C.tdefl_compress_mem_to_heap(data.data, data.len, &out_len, 0x01000) return compress.compress(data, 0x01000)
if address == 0 {
return error('compression failed')
}
if u64(out_len) > zlib.max_size {
return error('compressed data is too large ($out_len > $zlib.max_size)')
}
compressed := unsafe {
address.vbytes(int(out_len))
}
copy := compressed.clone()
unsafe {
free(address)
}
return copy
} }
// decompresses an array of bytes using zlib and returns the decompressed bytes in a new array // decompresses an array of bytes using zlib and returns the decompressed bytes in a new array
// Example: decompressed := zlib.decompress(b)? // Example: decompressed := zlib.decompress(b)?
[manualfree] [manualfree]
pub fn decompress(data []u8) ?[]u8 { pub fn decompress(data []u8) ?[]u8 {
mut out_len := usize(0)
// flags = TINFL_FLAG_PARSE_ZLIB_HEADER (0x1) // flags = TINFL_FLAG_PARSE_ZLIB_HEADER (0x1)
address := C.tinfl_decompress_mem_to_heap(data.data, data.len, &out_len, 0x1) return compress.decompress(data, 0x1)
if address == 0 {
return error('decompression failed')
}
if u64(out_len) > zlib.max_size {
return error('decompressed data is too large ($out_len > $zlib.max_size)')
}
decompressed := unsafe {
address.vbytes(int(out_len))
}
copy := decompressed.clone()
unsafe {
free(address)
}
return copy
} }

View File

@ -9,8 +9,14 @@
module md5 module md5
import math.bits import math.bits
import encoding.binary
[direct_array_access; inline]
fn get_le_u32(b []u8, start int) u32 {
return u32(b[start]) | (u32(b[1 + start]) << u32(8)) | (u32(b[2 + start]) << u32(16)) | (u32(b[
3 + start]) << u32(24))
}
[direct_array_access]
fn block_generic(mut dig Digest, p []u8) { fn block_generic(mut dig Digest, p []u8) {
// load state // load state
mut a := dig.s[0] mut a := dig.s[0]
@ -19,8 +25,6 @@ fn block_generic(mut dig Digest, p []u8) {
mut d := dig.s[3] mut d := dig.s[3]
for i := 0; i <= p.len - block_size; i += block_size { for i := 0; i <= p.len - block_size; i += block_size {
mut q := p[i..]
q = q[..block_size]
// save current state // save current state
aa := a aa := a
bb := b bb := b
@ -28,22 +32,22 @@ fn block_generic(mut dig Digest, p []u8) {
dd := d dd := d
// load input block // load input block
x0 := binary.little_endian_u32(q[4 * 0x0..]) x0 := get_le_u32(p, 4 * 0x0 + i)
x1 := binary.little_endian_u32(q[4 * 0x1..]) x1 := get_le_u32(p, 4 * 0x1 + i)
x2 := binary.little_endian_u32(q[4 * 0x2..]) x2 := get_le_u32(p, 4 * 0x2 + i)
x3 := binary.little_endian_u32(q[4 * 0x3..]) x3 := get_le_u32(p, 4 * 0x3 + i)
x4 := binary.little_endian_u32(q[4 * 0x4..]) x4 := get_le_u32(p, 4 * 0x4 + i)
x5 := binary.little_endian_u32(q[4 * 0x5..]) x5 := get_le_u32(p, 4 * 0x5 + i)
x6 := binary.little_endian_u32(q[4 * 0x6..]) x6 := get_le_u32(p, 4 * 0x6 + i)
x7 := binary.little_endian_u32(q[4 * 0x7..]) x7 := get_le_u32(p, 4 * 0x7 + i)
x8 := binary.little_endian_u32(q[4 * 0x8..]) x8 := get_le_u32(p, 4 * 0x8 + i)
x9 := binary.little_endian_u32(q[4 * 0x9..]) x9 := get_le_u32(p, 4 * 0x9 + i)
xa := binary.little_endian_u32(q[4 * 0xa..]) xa := get_le_u32(p, 4 * 0xa + i)
xb := binary.little_endian_u32(q[4 * 0xb..]) xb := get_le_u32(p, 4 * 0xb + i)
xc := binary.little_endian_u32(q[4 * 0xc..]) xc := get_le_u32(p, 4 * 0xc + i)
xd := binary.little_endian_u32(q[4 * 0xd..]) xd := get_le_u32(p, 4 * 0xd + i)
xe := binary.little_endian_u32(q[4 * 0xe..]) xe := get_le_u32(p, 4 * 0xe + i)
xf := binary.little_endian_u32(q[4 * 0xf..]) xf := get_le_u32(p, 4 * 0xf + i)
// round 1 // round 1
a = b + bits.rotate_left_32((((c ^ d) & b) ^ d) + a + x0 + u32(0xd76aa478), 7) a = b + bits.rotate_left_32((((c ^ d) & b) ^ d) + a + x0 + u32(0xd76aa478), 7)

View File

@ -4,26 +4,26 @@
module binary module binary
// Little Endian // Little Endian
[inline] [direct_array_access; inline]
pub fn little_endian_u16(b []u8) u16 { pub fn little_endian_u16(b []u8) u16 {
_ = b[1] // bounds check _ = b[1] // bounds check
return u16(b[0]) | (u16(b[1]) << u16(8)) return u16(b[0]) | (u16(b[1]) << u16(8))
} }
[inline] [direct_array_access; inline]
pub fn little_endian_put_u16(mut b []u8, v u16) { pub fn little_endian_put_u16(mut b []u8, v u16) {
_ = b[1] // bounds check _ = b[1] // bounds check
b[0] = u8(v) b[0] = u8(v)
b[1] = u8(v >> u16(8)) b[1] = u8(v >> u16(8))
} }
[inline] [direct_array_access; inline]
pub fn little_endian_u32(b []u8) u32 { pub fn little_endian_u32(b []u8) u32 {
_ = b[3] // bounds check _ = b[3] // bounds check
return u32(b[0]) | (u32(b[1]) << u32(8)) | (u32(b[2]) << u32(16)) | (u32(b[3]) << u32(24)) return u32(b[0]) | (u32(b[1]) << u32(8)) | (u32(b[2]) << u32(16)) | (u32(b[3]) << u32(24))
} }
[inline] [direct_array_access; inline]
pub fn little_endian_put_u32(mut b []u8, v u32) { pub fn little_endian_put_u32(mut b []u8, v u32) {
_ = b[3] // bounds check _ = b[3] // bounds check
b[0] = u8(v) b[0] = u8(v)
@ -32,13 +32,13 @@ pub fn little_endian_put_u32(mut b []u8, v u32) {
b[3] = u8(v >> u32(24)) b[3] = u8(v >> u32(24))
} }
[inline] [direct_array_access; inline]
pub fn little_endian_u64(b []u8) u64 { pub fn little_endian_u64(b []u8) u64 {
_ = b[7] // bounds check _ = b[7] // bounds check
return u64(b[0]) | (u64(b[1]) << u64(8)) | (u64(b[2]) << u64(16)) | (u64(b[3]) << u64(24)) | (u64(b[4]) << u64(32)) | (u64(b[5]) << u64(40)) | (u64(b[6]) << u64(48)) | (u64(b[7]) << u64(56)) return u64(b[0]) | (u64(b[1]) << u64(8)) | (u64(b[2]) << u64(16)) | (u64(b[3]) << u64(24)) | (u64(b[4]) << u64(32)) | (u64(b[5]) << u64(40)) | (u64(b[6]) << u64(48)) | (u64(b[7]) << u64(56))
} }
[inline] [direct_array_access; inline]
pub fn little_endian_put_u64(mut b []u8, v u64) { pub fn little_endian_put_u64(mut b []u8, v u64) {
_ = b[7] // bounds check _ = b[7] // bounds check
b[0] = u8(v) b[0] = u8(v)
@ -52,26 +52,26 @@ pub fn little_endian_put_u64(mut b []u8, v u64) {
} }
// Big Endian // Big Endian
[inline] [direct_array_access; inline]
pub fn big_endian_u16(b []u8) u16 { pub fn big_endian_u16(b []u8) u16 {
_ = b[1] // bounds check _ = b[1] // bounds check
return u16(b[1]) | (u16(b[0]) << u16(8)) return u16(b[1]) | (u16(b[0]) << u16(8))
} }
[inline] [direct_array_access; inline]
pub fn big_endian_put_u16(mut b []u8, v u16) { pub fn big_endian_put_u16(mut b []u8, v u16) {
_ = b[1] // bounds check _ = b[1] // bounds check
b[0] = u8(v >> u16(8)) b[0] = u8(v >> u16(8))
b[1] = u8(v) b[1] = u8(v)
} }
[inline] [direct_array_access; inline]
pub fn big_endian_u32(b []u8) u32 { pub fn big_endian_u32(b []u8) u32 {
_ = b[3] // bounds check _ = b[3] // bounds check
return u32(b[3]) | (u32(b[2]) << u32(8)) | (u32(b[1]) << u32(16)) | (u32(b[0]) << u32(24)) return u32(b[3]) | (u32(b[2]) << u32(8)) | (u32(b[1]) << u32(16)) | (u32(b[0]) << u32(24))
} }
[inline] [direct_array_access; inline]
pub fn big_endian_put_u32(mut b []u8, v u32) { pub fn big_endian_put_u32(mut b []u8, v u32) {
_ = b[3] // bounds check _ = b[3] // bounds check
b[0] = u8(v >> u32(24)) b[0] = u8(v >> u32(24))
@ -80,13 +80,13 @@ pub fn big_endian_put_u32(mut b []u8, v u32) {
b[3] = u8(v) b[3] = u8(v)
} }
[inline] [direct_array_access; inline]
pub fn big_endian_u64(b []u8) u64 { pub fn big_endian_u64(b []u8) u64 {
_ = b[7] // bounds check _ = b[7] // bounds check
return u64(b[7]) | (u64(b[6]) << u64(8)) | (u64(b[5]) << u64(16)) | (u64(b[4]) << u64(24)) | (u64(b[3]) << u64(32)) | (u64(b[2]) << u64(40)) | (u64(b[1]) << u64(48)) | (u64(b[0]) << u64(56)) return u64(b[7]) | (u64(b[6]) << u64(8)) | (u64(b[5]) << u64(16)) | (u64(b[4]) << u64(24)) | (u64(b[3]) << u64(32)) | (u64(b[2]) << u64(40)) | (u64(b[1]) << u64(48)) | (u64(b[0]) << u64(56))
} }
[inline] [direct_array_access; inline]
pub fn big_endian_put_u64(mut b []u8, v u64) { pub fn big_endian_put_u64(mut b []u8, v u64) {
_ = b[7] // bounds check _ = b[7] // bounds check
b[0] = u8(v >> u64(56)) b[0] = u8(v >> u64(56))

View File

@ -17,10 +17,10 @@ struct Employee {
fn test_simple() ? { fn test_simple() ? {
x := Employee{'Peter', 28, 95000.5, .worker} x := Employee{'Peter', 28, 95000.5, .worker}
s := json.encode(x) s := json.encode(x)
eprintln('Employee x: $s') // eprintln('Employee x: $s')
assert s == '{"name":"Peter","age":28,"salary":95000.5,"title":2}' assert s == '{"name":"Peter","age":28,"salary":95000.5,"title":2}'
y := json.decode(Employee, s)? y := json.decode(Employee, s)?
eprintln('Employee y: $y') // eprintln('Employee y: $y')
assert y.name == 'Peter' assert y.name == 'Peter'
assert y.age == 28 assert y.age == 28
assert y.salary == 95000.5 assert y.salary == 95000.5
@ -90,15 +90,15 @@ fn test_encode_decode_sumtype() ? {
t, t,
] ]
} }
eprintln('Game: $game') // eprintln('Game: $game')
enc := json.encode(game) enc := json.encode(game)
eprintln('Encoded Game: $enc') // eprintln('Encoded Game: $enc')
assert enc == '{"title":"Super Mega Game","player":{"name":"Monke","_type":"Human"},"other":[{"tag":"Pen","_type":"Item"},{"tag":"Cookie","_type":"Item"},1,"Stool",{"_type":"Time","value":$t.unix_time()}]}' assert enc == '{"title":"Super Mega Game","player":{"name":"Monke","_type":"Human"},"other":[{"tag":"Pen","_type":"Item"},{"tag":"Cookie","_type":"Item"},1,"Stool",{"_type":"Time","value":$t.unix_time()}]}'
dec := json.decode(SomeGame, enc)? dec := json.decode(SomeGame, enc)?
eprintln('Decoded Game: $dec') // eprintln('Decoded Game: $dec')
assert game.title == dec.title assert game.title == dec.title
assert game.player == dec.player assert game.player == dec.player
@ -138,9 +138,9 @@ struct User {
fn test_parse_user() ? { fn test_parse_user() ? {
s := '{"age": 10, "nums": [1,2,3], "type": 1, "lastName": "Johnson", "IsRegistered": true, "pet_animals": {"name": "Bob", "animal": "Dog"}}' s := '{"age": 10, "nums": [1,2,3], "type": 1, "lastName": "Johnson", "IsRegistered": true, "pet_animals": {"name": "Bob", "animal": "Dog"}}'
u2 := json.decode(User2, s)? u2 := json.decode(User2, s)?
println(u2) // println(u2)
u := json.decode(User, s)? u := json.decode(User, s)?
println(u) // println(u)
assert u.age == 10 assert u.age == 10
assert u.last_name == 'Johnson' assert u.last_name == 'Johnson'
assert u.is_registered == true assert u.is_registered == true
@ -158,12 +158,12 @@ fn test_encode_decode_time() ? {
reg_date: time.new_time(year: 2020, month: 12, day: 22, hour: 7, minute: 23) reg_date: time.new_time(year: 2020, month: 12, day: 22, hour: 7, minute: 23)
} }
s := json.encode(user) s := json.encode(user)
println(s) // println(s)
assert s.contains('"reg_date":1608621780') assert s.contains('"reg_date":1608621780')
user2 := json.decode(User2, s)? user2 := json.decode(User2, s)?
assert user2.reg_date.str() == '2020-12-22 07:23:00' assert user2.reg_date.str() == '2020-12-22 07:23:00'
println(user2) // println(user2)
println(user2.reg_date) // println(user2.reg_date)
} }
fn (mut u User) foo() string { fn (mut u User) foo() string {
@ -181,7 +181,7 @@ fn test_encode_user() {
} }
expected := '{"age":10,"nums":[1,2,3],"lastName":"Johnson","IsRegistered":true,"type":0,"pet_animals":"foo"}' expected := '{"age":10,"nums":[1,2,3],"lastName":"Johnson","IsRegistered":true,"type":0,"pet_animals":"foo"}'
out := json.encode(usr) out := json.encode(usr)
println(out) // println(out)
assert out == expected assert out == expected
// Test json.encode on mutable pointers // Test json.encode on mutable pointers
assert usr.foo() == expected assert usr.foo() == expected
@ -194,7 +194,7 @@ struct Color {
fn test_raw_json_field() { fn test_raw_json_field() {
color := json.decode(Color, '{"space": "YCbCr", "point": {"Y": 123}}') or { color := json.decode(Color, '{"space": "YCbCr", "point": {"Y": 123}}') or {
println('text') // println('text')
return return
} }
assert color.point == '{"Y":123}' assert color.point == '{"Y":123}'
@ -203,7 +203,7 @@ fn test_raw_json_field() {
fn test_bad_raw_json_field() { fn test_bad_raw_json_field() {
color := json.decode(Color, '{"space": "YCbCr"}') or { color := json.decode(Color, '{"space": "YCbCr"}') or {
println('text') // println('text')
return return
} }
assert color.point == '' assert color.point == ''
@ -225,7 +225,7 @@ fn test_struct_in_struct() ? {
assert country.cities.len == 2 assert country.cities.len == 2
assert country.cities[0].name == 'London' assert country.cities[0].name == 'London'
assert country.cities[1].name == 'Manchester' assert country.cities[1].name == 'Manchester'
println(country.cities) // println(country.cities)
} }
fn test_encode_map() { fn test_encode_map() {
@ -237,7 +237,7 @@ fn test_encode_map() {
'four': 4 'four': 4
} }
out := json.encode(numbers) out := json.encode(numbers)
println(out) // println(out)
assert out == expected assert out == expected
} }
@ -249,7 +249,7 @@ fn test_parse_map() ? {
'four': 4 'four': 4
} }
out := json.decode(map[string]int, '{"one":1,"two":2,"three":3,"four":4}')? out := json.decode(map[string]int, '{"one":1,"two":2,"three":3,"four":4}')?
println(out) // println(out)
assert out == expected assert out == expected
} }
@ -306,7 +306,7 @@ fn test_nested_type() ? {
} }
} }
out := json.encode(data) out := json.encode(data)
println(out) // println(out)
assert out == data_expected assert out == data_expected
data2 := json.decode(Data, data_expected)? data2 := json.decode(Data, data_expected)?
assert data2.countries.len == data.countries.len assert data2.countries.len == data.countries.len
@ -351,7 +351,7 @@ fn test_errors() {
invalid_array := fn () { invalid_array := fn () {
data := '{"countries":[{"cities":[{"name":"London"},{"name":"Manchester"}],"name":"UK"},{"cities":{"name":"Donlon"},"name":"KU"}],"users":{"Foo":{"age":10,"nums":[1,2,3],"lastName":"Johnson","IsRegistered":true,"type":0,"pet_animals":"little foo"},"Boo":{"age":20,"nums":[5,3,1],"lastName":"Smith","IsRegistered":false,"type":4,"pet_animals":"little boo"}},"extra":{"2":{"n1":2,"n2":4,"n3":8,"n4":16},"3":{"n1":3,"n2":9,"n3":27,"n4":81}}}' data := '{"countries":[{"cities":[{"name":"London"},{"name":"Manchester"}],"name":"UK"},{"cities":{"name":"Donlon"},"name":"KU"}],"users":{"Foo":{"age":10,"nums":[1,2,3],"lastName":"Johnson","IsRegistered":true,"type":0,"pet_animals":"little foo"},"Boo":{"age":20,"nums":[5,3,1],"lastName":"Smith","IsRegistered":false,"type":4,"pet_animals":"little boo"}},"extra":{"2":{"n1":2,"n2":4,"n3":8,"n4":16},"3":{"n1":3,"n2":9,"n3":27,"n4":81}}}'
json.decode(Data, data) or { json.decode(Data, data) or {
println(err) // println(err)
assert err.msg().starts_with('Json element is not an array:') assert err.msg().starts_with('Json element is not an array:')
return return
} }
@ -360,7 +360,7 @@ fn test_errors() {
invalid_object := fn () { invalid_object := fn () {
data := '{"countries":[{"cities":[{"name":"London"},{"name":"Manchester"}],"name":"UK"},{"cities":[{"name":"Donlon"},{"name":"Termanches"}],"name":"KU"}],"users":[{"age":10,"nums":[1,2,3],"lastName":"Johnson","IsRegistered":true,"type":0,"pet_animals":"little foo"},{"age":20,"nums":[5,3,1],"lastName":"Smith","IsRegistered":false,"type":4,"pet_animals":"little boo"}],"extra":{"2":{"n1":2,"n2":4,"n3":8,"n4":16},"3":{"n1":3,"n2":9,"n3":27,"n4":81}}}' data := '{"countries":[{"cities":[{"name":"London"},{"name":"Manchester"}],"name":"UK"},{"cities":[{"name":"Donlon"},{"name":"Termanches"}],"name":"KU"}],"users":[{"age":10,"nums":[1,2,3],"lastName":"Johnson","IsRegistered":true,"type":0,"pet_animals":"little foo"},{"age":20,"nums":[5,3,1],"lastName":"Smith","IsRegistered":false,"type":4,"pet_animals":"little boo"}],"extra":{"2":{"n1":2,"n2":4,"n3":8,"n4":16},"3":{"n1":3,"n2":9,"n3":27,"n4":81}}}'
json.decode(Data, data) or { json.decode(Data, data) or {
println(err) // println(err)
assert err.msg().starts_with('Json element is not an object:') assert err.msg().starts_with('Json element is not an object:')
return return
} }
@ -425,6 +425,13 @@ fn test_decode_null_object() ? {
assert '$info.maps' == '{}' assert '$info.maps' == '{}'
} }
fn test_decode_missing_maps_field() ? {
info := json.decode(Info, '{"id": 22, "items": null}')?
assert info.id == 22
assert '$info.items' == '[]'
assert '$info.maps' == '{}'
}
struct Foo2 { struct Foo2 {
name string name string
} }
@ -470,7 +477,7 @@ fn create_game_packet(data &GamePacketData) string {
fn test_encode_sumtype_defined_ahead() { fn test_encode_sumtype_defined_ahead() {
ret := create_game_packet(&GamePacketData(GPScale{})) ret := create_game_packet(&GamePacketData(GPScale{}))
println(ret) // println(ret)
assert ret == '{"value":0,"_type":"GPScale"}' assert ret == '{"value":0,"_type":"GPScale"}'
} }

View File

@ -139,6 +139,13 @@ pub fn ones_count_64(x u64) int {
return int(y) & ((1 << 7) - 1) return int(y) & ((1 << 7) - 1)
} }
const (
n8 = u8(8)
n16 = u16(16)
n32 = u32(32)
n64 = u64(64)
)
// --- RotateLeft --- // --- RotateLeft ---
// rotate_left_8 returns the value of x rotated left by (k mod 8) bits. // rotate_left_8 returns the value of x rotated left by (k mod 8) bits.
// To rotate x right by k bits, call rotate_left_8(x, -k). // To rotate x right by k bits, call rotate_left_8(x, -k).
@ -146,9 +153,8 @@ pub fn ones_count_64(x u64) int {
// This function's execution time does not depend on the inputs. // This function's execution time does not depend on the inputs.
[inline] [inline]
pub fn rotate_left_8(x u8, k int) u8 { pub fn rotate_left_8(x u8, k int) u8 {
n := u8(8) s := u8(k) & (bits.n8 - u8(1))
s := u8(k) & (n - u8(1)) return (x << s) | (x >> (bits.n8 - s))
return (x << s) | (x >> (n - s))
} }
// rotate_left_16 returns the value of x rotated left by (k mod 16) bits. // rotate_left_16 returns the value of x rotated left by (k mod 16) bits.
@ -157,9 +163,8 @@ pub fn rotate_left_8(x u8, k int) u8 {
// This function's execution time does not depend on the inputs. // This function's execution time does not depend on the inputs.
[inline] [inline]
pub fn rotate_left_16(x u16, k int) u16 { pub fn rotate_left_16(x u16, k int) u16 {
n := u16(16) s := u16(k) & (bits.n16 - u16(1))
s := u16(k) & (n - u16(1)) return (x << s) | (x >> (bits.n16 - s))
return (x << s) | (x >> (n - s))
} }
// rotate_left_32 returns the value of x rotated left by (k mod 32) bits. // rotate_left_32 returns the value of x rotated left by (k mod 32) bits.
@ -168,9 +173,8 @@ pub fn rotate_left_16(x u16, k int) u16 {
// This function's execution time does not depend on the inputs. // This function's execution time does not depend on the inputs.
[inline] [inline]
pub fn rotate_left_32(x u32, k int) u32 { pub fn rotate_left_32(x u32, k int) u32 {
n := u32(32) s := u32(k) & (bits.n32 - u32(1))
s := u32(k) & (n - u32(1)) return (x << s) | (x >> (bits.n32 - s))
return (x << s) | (x >> (n - s))
} }
// rotate_left_64 returns the value of x rotated left by (k mod 64) bits. // rotate_left_64 returns the value of x rotated left by (k mod 64) bits.
@ -179,9 +183,8 @@ pub fn rotate_left_32(x u32, k int) u32 {
// This function's execution time does not depend on the inputs. // This function's execution time does not depend on the inputs.
[inline] [inline]
pub fn rotate_left_64(x u64, k int) u64 { pub fn rotate_left_64(x u64, k int) u64 {
n := u64(64) s := u64(k) & (bits.n64 - u64(1))
s := u64(k) & (n - u64(1)) return (x << s) | (x >> (bits.n64 - s))
return (x << s) | (x >> (n - s))
} }
// --- Reverse --- // --- Reverse ---

View File

@ -16,9 +16,9 @@ pub fn download_file(url string, out_file_path string) ? {
return error('received http code $s.status_code') return error('received http code $s.status_code')
} }
$if debug_http ? { $if debug_http ? {
println('http.download_file saving $s.text.len bytes') println('http.download_file saving $s.body.len bytes')
} }
os.write_file(out_file_path, s.text)? os.write_file(out_file_path, s.body)?
} }
// TODO: implement download_file_with_progress // TODO: implement download_file_with_progress

View File

@ -161,7 +161,7 @@ pub fn fetch(config FetchConfig) ?Response {
// get_text sends a GET HTTP request to the URL and returns the text content of the response // get_text sends a GET HTTP request to the URL and returns the text content of the response
pub fn get_text(url string) string { pub fn get_text(url string) string {
resp := fetch(url: url, method: .get) or { return '' } resp := fetch(url: url, method: .get) or { return '' }
return resp.text return resp.body
} }
// url_encode_form_data converts mapped data to an URL encoded string // url_encode_form_data converts mapped data to an URL encoded string

View File

@ -25,7 +25,7 @@ fn http_fetch_mock(_methods []string, _config FetchConfig) ?[]Response {
config.method = method_from_str(method) config.method = method_from_str(method)
res := fetch(FetchConfig{ ...config, url: url + lmethod })? res := fetch(FetchConfig{ ...config, url: url + lmethod })?
// TODO // TODO
// body := json.decode(HttpbinResponseBody,res.text)? // body := json.decode(HttpbinResponseBody,res.body)?
result << res result << res
} }
return result return result
@ -49,7 +49,7 @@ fn test_http_fetch_with_data() {
data: 'hello world' data: 'hello world'
) or { panic(err) } ) or { panic(err) }
for response in responses { for response in responses {
payload := json.decode(HttpbinResponseBody, response.text) or { panic(err) } payload := json.decode(HttpbinResponseBody, response.body) or { panic(err) }
assert payload.data == 'hello world' assert payload.data == 'hello world'
} }
} }
@ -65,7 +65,7 @@ fn test_http_fetch_with_params() {
} }
) or { panic(err) } ) or { panic(err) }
for response in responses { for response in responses {
// payload := json.decode(HttpbinResponseBody,response.text) or { // payload := json.decode(HttpbinResponseBody,response.body) or {
// panic(err) // panic(err)
// } // }
assert response.status() == .ok assert response.status() == .ok
@ -85,7 +85,7 @@ fn test_http_fetch_with_headers() ? {
header: header header: header
) or { panic(err) } ) or { panic(err) }
for response in responses { for response in responses {
// payload := json.decode(HttpbinResponseBody,response.text) or { // payload := json.decode(HttpbinResponseBody,response.body) or {
// panic(err) // panic(err)
// } // }
assert response.status() == .ok assert response.status() == .ok

View File

@ -17,9 +17,9 @@ fn test_http_get_from_vlang_utc_now() {
println('Test getting current time from $url by http.get') println('Test getting current time from $url by http.get')
res := http.get(url) or { panic(err) } res := http.get(url) or { panic(err) }
assert res.status() == .ok assert res.status() == .ok
assert res.text.len > 0 assert res.body.len > 0
assert res.text.int() > 1566403696 assert res.body.int() > 1566403696
println('Current time is: $res.text.int()') println('Current time is: $res.body.int()')
} }
} }
@ -39,7 +39,7 @@ fn test_public_servers() {
println('Testing http.get on public url: $url ') println('Testing http.get on public url: $url ')
res := http.get(url) or { panic(err) } res := http.get(url) or { panic(err) }
assert res.status() == .ok assert res.status() == .ok
assert res.text.len > 0 assert res.body.len > 0
} }
} }
@ -51,6 +51,6 @@ fn test_relative_redirects() {
} // tempfix periodic: httpbin relative redirects are broken } // tempfix periodic: httpbin relative redirects are broken
res := http.get('https://httpbin.org/relative-redirect/3?abc=xyz') or { panic(err) } res := http.get('https://httpbin.org/relative-redirect/3?abc=xyz') or { panic(err) }
assert res.status() == .ok assert res.status() == .ok
assert res.text.len > 0 assert res.body.len > 0
assert res.text.contains('"abc": "xyz"') assert res.body.contains('"abc": "xyz"')
} }

View File

@ -0,0 +1,33 @@
import net.http
import json
struct MimeType {
source string
extensions []string
compressible bool
charset string
}
fn main() {
mt_json := http.get('https://raw.githubusercontent.com/jshttp/mime-db/master/db.json')?
mt_map := json.decode(map[string]MimeType, mt_json.text)?
mut ext_to_mt_str := map[string]string{}
for mt_str, mt in mt_map {
for ext in mt.extensions {
ext_to_mt_str[ext] = mt_str
}
}
write_file('db.v', '
module mime
// FILE AUTOGENERATED BY `build.vsh` - DO NOT MANUALLY EDIT
const (
db = $mt_map
ext_to_mt_str = $ext_to_mt_str
)
')?
execute('${@VEXE} fmt -w db.v')
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
module mime
pub struct MimeType {
source string
extensions []string
compressible bool
charset string
}
// returns a `MimeType` for the given MIME type
pub fn get_complete_mime_type(mt string) MimeType {
return db[mt]
}
// returns the MIME type for the given file extension
pub fn get_mime_type(ext string) string {
return ext_to_mt_str[ext]
}
// returns a `content-type` header ready to use for the given MIME type
pub fn get_content_type(mt string) string {
mt_struct := db[mt]
charset := if mt_struct.charset.len > 0 { mt_struct.charset.to_lower() } else { 'utf-8' }
return '$mt; charset=$charset'
}
// returns the default extension for the given MIME type
pub fn get_default_ext(mt string) string {
return if db[mt].extensions.len > 0 {
db[mt].extensions[0]
} else {
''
}
}
// returns true if the given MIME type exists
pub fn exists(mt string) bool {
return mt in db
}

View File

@ -0,0 +1,29 @@
module mime
fn test_mime() {
assert get_complete_mime_type('application/json') == MimeType{
source: 'iana'
extensions: ['json', 'map']
compressible: true
charset: 'UTF-8'
}
assert get_mime_type('json') == 'application/json'
assert get_content_type('application/json') == 'application/json; charset=utf-8'
assert get_default_ext('application/json') == 'json'
assert get_complete_mime_type('text/markdown') == MimeType{
source: 'iana'
extensions: ['md', 'markdown']
compressible: true
charset: ''
}
assert get_mime_type('md') == 'text/markdown'
assert get_content_type('text/markdown') == 'text/markdown; charset=utf-8'
assert get_default_ext('text/markdown') == 'md'
assert exists('application/json') == true
assert exists('udfsbsfib') == false
assert get_default_ext('application/1d-interleaved-parityfec') == '' // valid mime type without associated extension
assert get_default_ext('invalid mime type') == '' // invalid mime type
}

View File

@ -9,7 +9,8 @@ import strconv
// Response represents the result of the request // Response represents the result of the request
pub struct Response { pub struct Response {
pub mut: pub mut:
text string body string
text string [deprecated: 'use Response.body instead'; deprecated_after: '2022-10-03']
header Header header Header
status_code int status_code int
status_msg string status_msg string
@ -30,7 +31,7 @@ pub fn (resp Response) bytes() []u8 {
pub fn (resp Response) bytestr() string { pub fn (resp Response) bytestr() string {
return 'HTTP/$resp.http_version $resp.status_code $resp.status_msg\r\n' + '${resp.header.render( return 'HTTP/$resp.http_version $resp.status_code $resp.status_msg\r\n' + '${resp.header.render(
version: resp.version() version: resp.version()
)}\r\n' + '$resp.text' )}\r\n' + '$resp.body'
} }
// Parse a raw HTTP response into a Response object // Parse a raw HTTP response into a Response object
@ -39,16 +40,17 @@ pub fn parse_response(resp string) ?Response {
// Build resp header map and separate the body // Build resp header map and separate the body
start_idx, end_idx := find_headers_range(resp)? start_idx, end_idx := find_headers_range(resp)?
header := parse_headers(resp.substr(start_idx, end_idx))? header := parse_headers(resp.substr(start_idx, end_idx))?
mut text := resp.substr(end_idx, resp.len) mut body := resp.substr(end_idx, resp.len)
if header.get(.transfer_encoding) or { '' } == 'chunked' { if header.get(.transfer_encoding) or { '' } == 'chunked' {
text = chunked.decode(text) body = chunked.decode(body)
} }
return Response{ return Response{
http_version: version http_version: version
status_code: status_code status_code: status_code
status_msg: status_msg status_msg: status_msg
header: header header: header
text: text body: body
text: body // TODO: remove as depreciated
} }
} }
@ -113,18 +115,19 @@ pub struct ResponseConfig {
version Version = .v1_1 version Version = .v1_1
status Status = .ok status Status = .ok
header Header header Header
text string body string
text string [deprecated: 'use ResponseConfig.body instead'; deprecated_after: '2022-10-03']
} }
// new_response creates a Response object from the configuration. This // new_response creates a Response object from the configuration. This
// function will add a Content-Length header if text is not empty. // function will add a Content-Length header if body is not empty.
pub fn new_response(conf ResponseConfig) Response { pub fn new_response(conf ResponseConfig) Response {
mut resp := Response{ mut resp := Response{
text: conf.text body: conf.body + conf.text
header: conf.header header: conf.header
} }
if conf.text.len > 0 && !resp.header.contains(.content_length) { if resp.body.len > 0 && !resp.header.contains(.content_length) {
resp.header.add(.content_length, conf.text.len.str()) resp.header.add(.content_length, resp.body.len.str())
} }
resp.set_status(conf.status) resp.set_status(conf.status)
resp.set_version(conf.version) resp.set_version(conf.version)

View File

@ -4,14 +4,14 @@ fn test_response_bytestr() ? {
{ {
resp := new_response( resp := new_response(
status: .ok status: .ok
text: 'Foo' text: 'Foo' // TODO: replace with `body` once deprecaped
) )
assert resp.bytestr() == 'HTTP/1.1 200 OK\r\n' + 'Content-Length: 3\r\n' + '\r\n' + 'Foo' assert resp.bytestr() == 'HTTP/1.1 200 OK\r\n' + 'Content-Length: 3\r\n' + '\r\n' + 'Foo'
} }
{ {
resp := new_response( resp := new_response(
status: .found status: .found
text: 'Foo' body: 'Foo'
header: new_header(key: .location, value: '/') header: new_header(key: .location, value: '/')
) )
lines := resp.bytestr().split_into_lines() lines := resp.bytestr().split_into_lines()

View File

@ -115,7 +115,7 @@ fn (d DebugHandler) handle(req Request) Response {
eprintln('[$time.now()] $req.method $req.url - 200') eprintln('[$time.now()] $req.method $req.url - 200')
} }
mut r := Response{ mut r := Response{
text: req.data body: req.data
header: req.header header: req.header
} }
r.set_status(.ok) r.set_status(.ok)

View File

@ -41,7 +41,7 @@ fn (mut handler MyHttpHandler) handle(req http.Request) http.Response {
handler.counter++ handler.counter++
// eprintln('$time.now() | counter: $handler.counter | $req.method $req.url\n$req.header\n$req.data - 200 OK\n') // eprintln('$time.now() | counter: $handler.counter | $req.method $req.url\n$req.header\n$req.data - 200 OK\n')
mut r := http.Response{ mut r := http.Response{
text: req.data + ', $req.url' body: req.data + ', $req.url'
header: req.header header: req.header
} }
match req.url.all_before('?') { match req.url.all_before('?') {
@ -72,11 +72,11 @@ fn test_server_custom_handler() ? {
time.sleep(10 * time.millisecond) time.sleep(10 * time.millisecond)
} }
x := http.fetch(url: 'http://localhost:$cport/endpoint?abc=xyz', data: 'my data')? x := http.fetch(url: 'http://localhost:$cport/endpoint?abc=xyz', data: 'my data')?
assert x.text == 'my data, /endpoint?abc=xyz' assert x.body == 'my data, /endpoint?abc=xyz'
assert x.status_code == 200 assert x.status_code == 200
assert x.http_version == '1.1' assert x.http_version == '1.1'
y := http.fetch(url: 'http://localhost:$cport/another/endpoint', data: 'abcde')? y := http.fetch(url: 'http://localhost:$cport/another/endpoint', data: 'abcde')?
assert y.text == 'abcde, /another/endpoint' assert y.body == 'abcde, /another/endpoint'
assert y.status_code == 200 assert y.status_code == 200
assert y.status() == .ok assert y.status() == .ok
assert y.http_version == '1.1' assert y.http_version == '1.1'

View File

@ -102,6 +102,10 @@ fn (kind OrderType) to_str() string {
} }
} }
// Examples for QueryData in SQL: abc == 3 && b == 'test'
// => fields[abc, b]; data[3, 'test']; types[index of int, index of string]; kinds[.eq, .eq]; is_and[true];
// Every field, data, type & kind of operation in the expr share the same index in the arrays
// is_and defines how they're addicted to each other either and or or
pub struct QueryData { pub struct QueryData {
pub: pub:
fields []string fields []string
@ -128,6 +132,17 @@ pub:
attrs []StructAttribute attrs []StructAttribute
} }
// table - Table name
// is_count - Either the data will be returned or an integer with the count
// has_where - Select all or use a where expr
// has_order - Order the results
// order - Name of the column which will be ordered
// order_type - Type of order (asc, desc)
// has_limit - Limits the output data
// primary - Name of the primary field
// has_offset - Add an offset to the result
// fields - Fields to select
// types - Types to select
pub struct SelectConfig { pub struct SelectConfig {
pub: pub:
table string table string
@ -143,6 +158,14 @@ pub:
types []int types []int
} }
// Interfaces gets called from the backend and can be implemented
// Since the orm supports arrays aswell, they have to be returned too.
// A row is represented as []Primitive, where the data is connected to the fields of the struct by their
// index. The indices are mapped with the SelectConfig.field array. This is the mapping for a struct.
// To have an array, there has to be an array of structs, basically [][]Primitive
//
// Every function without last_id() returns an optional, which returns an error if present
// last_id returns the last inserted id of the db
pub interface Connection { pub interface Connection {
@select(config SelectConfig, data QueryData, where QueryData) ?[][]Primitive @select(config SelectConfig, data QueryData, where QueryData) ?[][]Primitive
insert(table string, data QueryData) ? insert(table string, data QueryData) ?
@ -153,7 +176,12 @@ pub interface Connection {
last_id() Primitive last_id() Primitive
} }
pub fn orm_stmt_gen(table string, para string, kind StmtKind, num bool, qm string, start_pos int, data QueryData, where QueryData) string { // Generates an sql stmt, from universal parameter
// q - The quotes character, which can be different in every type, so it's variable
// num - Stmt uses nums at prepared statements (? or ?1)
// qm - Character for prepared statment, qm because of quotation mark like in sqlite
// start_pos - When num is true, it's the start position of the counter
pub fn orm_stmt_gen(table string, q string, kind StmtKind, num bool, qm string, start_pos int, data QueryData, where QueryData) string {
mut str := '' mut str := ''
mut c := start_pos mut c := start_pos
@ -163,7 +191,7 @@ pub fn orm_stmt_gen(table string, para string, kind StmtKind, num bool, qm strin
mut values := []string{} mut values := []string{}
for _ in 0 .. data.fields.len { for _ in 0 .. data.fields.len {
// loop over the length of data.field and generate ?0, ?1 or just ? based on the $num parameter for value placeholders // loop over the length of data.field and generate ?0, ?1 or just ? based on the $num qmeter for value placeholders
if num { if num {
values << '$qm$c' values << '$qm$c'
c++ c++
@ -172,16 +200,16 @@ pub fn orm_stmt_gen(table string, para string, kind StmtKind, num bool, qm strin
} }
} }
str += 'INSERT INTO $para$table$para (' str += 'INSERT INTO $q$table$q ('
str += data.fields.map('$para$it$para').join(', ') str += data.fields.map('$q$it$q').join(', ')
str += ') VALUES (' str += ') VALUES ('
str += values.join(', ') str += values.join(', ')
str += ')' str += ')'
} }
.update { .update {
str += 'UPDATE $para$table$para SET ' str += 'UPDATE $q$table$q SET '
for i, field in data.fields { for i, field in data.fields {
str += '$para$field$para = ' str += '$q$field$q = '
if data.data.len > i { if data.data.len > i {
d := data.data[i] d := data.data[i]
if d is InfixType { if d is InfixType {
@ -217,12 +245,12 @@ pub fn orm_stmt_gen(table string, para string, kind StmtKind, num bool, qm strin
str += ' WHERE ' str += ' WHERE '
} }
.delete { .delete {
str += 'DELETE FROM $para$table$para WHERE ' str += 'DELETE FROM $q$table$q WHERE '
} }
} }
if kind == .update || kind == .delete { if kind == .update || kind == .delete {
for i, field in where.fields { for i, field in where.fields {
str += '$para$field$para ${where.kinds[i].to_str()} $qm' str += '$q$field$q ${where.kinds[i].to_str()} $qm'
if num { if num {
str += '$c' str += '$c'
c++ c++
@ -236,28 +264,32 @@ pub fn orm_stmt_gen(table string, para string, kind StmtKind, num bool, qm strin
return str return str
} }
pub fn orm_select_gen(orm SelectConfig, para string, num bool, qm string, start_pos int, where QueryData) string { // Generates an sql select stmt, from universal parameter
// orm - See SelectConfig
// q, num, qm, start_pos - see orm_stmt_gen
// where - See QueryData
pub fn orm_select_gen(orm SelectConfig, q string, num bool, qm string, start_pos int, where QueryData) string {
mut str := 'SELECT ' mut str := 'SELECT '
if orm.is_count { if orm.is_count {
str += 'COUNT(*)' str += 'COUNT(*)'
} else { } else {
for i, field in orm.fields { for i, field in orm.fields {
str += '$para$field$para' str += '$q$field$q'
if i < orm.fields.len - 1 { if i < orm.fields.len - 1 {
str += ', ' str += ', '
} }
} }
} }
str += ' FROM $para$orm.table$para' str += ' FROM $q$orm.table$q'
mut c := start_pos mut c := start_pos
if orm.has_where { if orm.has_where {
str += ' WHERE ' str += ' WHERE '
for i, field in where.fields { for i, field in where.fields {
str += '$para$field$para ${where.kinds[i].to_str()} $qm' str += '$q$field$q ${where.kinds[i].to_str()} $qm'
if num { if num {
str += '$c' str += '$c'
c++ c++
@ -276,7 +308,7 @@ pub fn orm_select_gen(orm SelectConfig, para string, num bool, qm string, start_
// ordering is *slow*, especially if there are no indexes! // ordering is *slow*, especially if there are no indexes!
if orm.has_order { if orm.has_order {
str += ' ORDER BY ' str += ' ORDER BY '
str += '$para$orm.order$para ' str += '$q$orm.order$q '
str += orm.order_type.to_str() str += orm.order_type.to_str()
} }
@ -300,11 +332,19 @@ pub fn orm_select_gen(orm SelectConfig, para string, num bool, qm string, start_
return str return str
} }
pub fn orm_table_gen(table string, para string, defaults bool, def_unique_len int, fields []TableField, sql_from_v fn (int) ?string, alternative bool) ?string { // Generates an sql table stmt, from universal parameter
mut str := 'CREATE TABLE IF NOT EXISTS $para$table$para (' // table - Table name
// q - see orm_stmt_gen
// defaults - enables default values in stmt
// def_unique_len - sets default unique length for texts
// fields - See TableField
// sql_from_v - Function which maps type indices to sql type names
// alternative - Needed for msdb
pub fn orm_table_gen(table string, q string, defaults bool, def_unique_len int, fields []TableField, sql_from_v fn (int) ?string, alternative bool) ?string {
mut str := 'CREATE TABLE IF NOT EXISTS $q$table$q ('
if alternative { if alternative {
str = 'IF NOT EXISTS (SELECT * FROM sysobjects WHERE name=$para$table$para and xtype=${para}U$para) CREATE TABLE $para$table$para (' str = 'IF NOT EXISTS (SELECT * FROM sysobjects WHERE name=$q$table$q and xtype=${q}U$q) CREATE TABLE $q$table$q ('
} }
mut fs := []string{} mut fs := []string{}
@ -368,7 +408,7 @@ pub fn orm_table_gen(table string, para string, defaults bool, def_unique_len in
if ctyp == '' { if ctyp == '' {
return error('Unknown type ($field.typ) for field $field.name in struct $table') return error('Unknown type ($field.typ) for field $field.name in struct $table')
} }
stmt = '$para$field_name$para $ctyp' stmt = '$q$field_name$q $ctyp'
if defaults && field.default_val != '' { if defaults && field.default_val != '' {
stmt += ' DEFAULT $field.default_val' stmt += ' DEFAULT $field.default_val'
} }
@ -376,7 +416,7 @@ pub fn orm_table_gen(table string, para string, defaults bool, def_unique_len in
stmt += ' NOT NULL' stmt += ' NOT NULL'
} }
if is_unique { if is_unique {
mut f := 'UNIQUE($para$field_name$para' mut f := 'UNIQUE($q$field_name$q'
if ctyp == 'TEXT' && def_unique_len > 0 { if ctyp == 'TEXT' && def_unique_len > 0 {
if unique_len > 0 { if unique_len > 0 {
f += '($unique_len)' f += '($unique_len)'
@ -396,18 +436,19 @@ pub fn orm_table_gen(table string, para string, defaults bool, def_unique_len in
for k, v in unique { for k, v in unique {
mut tmp := []string{} mut tmp := []string{}
for f in v { for f in v {
tmp << '$para$f$para' tmp << '$q$f$q'
} }
fs << '/* $k */UNIQUE(${tmp.join(', ')})' fs << '/* $k */UNIQUE(${tmp.join(', ')})'
} }
} }
fs << 'PRIMARY KEY($para$primary$para)' fs << 'PRIMARY KEY($q$primary$q)'
fs << unique_fields fs << unique_fields
str += fs.join(', ') str += fs.join(', ')
str += ');' str += ');'
return str return str
} }
// Get's the sql field type
fn sql_field_type(field TableField) int { fn sql_field_type(field TableField) int {
mut typ := field.typ mut typ := field.typ
if field.is_time { if field.is_time {
@ -426,6 +467,7 @@ fn sql_field_type(field TableField) int {
return typ return typ
} }
// Get's the sql field name
fn sql_field_name(field TableField) string { fn sql_field_name(field TableField) string {
mut name := field.name mut name := field.name
for attr in field.attrs { for attr in field.attrs {

View File

@ -1,7 +1,7 @@
// import os // import os
// import pg
// import term // import term
import time import time
// import pg
import sqlite import sqlite
struct Module { struct Module {
@ -31,9 +31,12 @@ struct TestTime {
create time.Time create time.Time
} }
fn test_orm_sqlite() { fn test_orm() {
db := sqlite.connect(':memory:') or { panic(err) } db := sqlite.connect(':memory:') or { panic(err) }
db.exec('drop table if exists User') // db.exec('drop table if exists User')
// db := pg.connect(host: 'localhost', port: 5432, user: 'louis', password: 'abc', dbname: 'orm') or { panic(err) }
sql db { sql db {
create table Module create table Module
} }
@ -242,7 +245,7 @@ fn test_orm_sqlite() {
// //
offset_const := 2 offset_const := 2
z := sql db { z := sql db {
select from User limit 2 offset offset_const select from User order by id limit 2 offset offset_const
} }
assert z.len == 2 assert z.len == 2
assert z[0].id == 3 assert z[0].id == 3
@ -264,6 +267,7 @@ fn test_orm_sqlite() {
} }
assert updated_oldest.age == 31 assert updated_oldest.age == 31
// Remove this when pg is used
db.exec('insert into User (name, age) values (NULL, 31)') db.exec('insert into User (name, age) values (NULL, 31)')
null_user := sql db { null_user := sql db {
select from User where id == 5 select from User where id == 5
@ -336,11 +340,18 @@ fn test_orm_sqlite() {
sql db { sql db {
update Module set created = t where id == 1 update Module set created = t where id == 1
} }
updated_time_mod := sql db { updated_time_mod := sql db {
select from Module where id == 1 select from Module where id == 1
} }
// Note: usually updated_time_mod.created != t, because t has // Note: usually updated_time_mod.created != t, because t has
// its microseconds set, while the value retrieved from the DB // its microseconds set, while the value retrieved from the DB
// has them zeroed, because the db field resolution is seconds. // has them zeroed, because the db field resolution is seconds.
assert updated_time_mod.created.format_ss() == t.format_ss() assert updated_time_mod.created.format_ss() == t.format_ss()
sql db {
drop table Module
drop table TestTime
}
} }

View File

@ -123,6 +123,52 @@ pub fn norm_path(path string) string {
return res return res
} }
// existing_path returns the existing part of the given `path`.
// An error is returned if there is no existing part of the given `path`.
pub fn existing_path(path string) ?string {
err := error('path does not exist')
if path.len == 0 {
return err
}
if exists(path) {
return path
}
mut volume_len := 0
$if windows {
volume_len = win_volume_len(path)
}
if volume_len > 0 && is_slash(path[volume_len - 1]) {
volume_len++
}
mut sc := textscanner.new(path[volume_len..])
mut recent_path := path[..volume_len]
for sc.next() != -1 {
curr := u8(sc.current())
peek := sc.peek()
back := sc.peek_back()
if is_curr_dir_ref(back, curr, peek) {
continue
}
range := sc.ilen - sc.remaining() + volume_len
if is_slash(curr) && !is_slash(u8(peek)) {
recent_path = path[..range]
continue
}
if !is_slash(curr) && (peek == -1 || is_slash(u8(peek))) {
curr_path := path[..range]
if exists(curr_path) {
recent_path = curr_path
continue
}
if recent_path.len == 0 {
break
}
return recent_path
}
}
return err
}
// clean_path returns the "cleaned" version of the given `path` // clean_path returns the "cleaned" version of the given `path`
// by turning forward slashes into back slashes // by turning forward slashes into back slashes
// on a Windows system and eliminating: // on a Windows system and eliminating:
@ -144,8 +190,7 @@ fn clean_path(path string) string {
continue continue
} }
// skip reference to current dir (.) // skip reference to current dir (.)
if (back == -1 || is_slash(u8(back))) && curr == os.dot if is_curr_dir_ref(back, curr, peek) {
&& (peek == -1 || is_slash(u8(peek))) {
// skip if the next byte is a path separator // skip if the next byte is a path separator
if peek != -1 && is_slash(u8(peek)) { if peek != -1 && is_slash(u8(peek)) {
sc.skip_n(1) sc.skip_n(1)
@ -246,3 +291,13 @@ fn is_normal_path(path string) bool {
return (plen == 1 && is_slash(path[0])) || (plen >= 2 && is_slash(path[0]) return (plen == 1 && is_slash(path[0])) || (plen >= 2 && is_slash(path[0])
&& !is_slash(path[1])) && !is_slash(path[1]))
} }
// is_curr_dir_ref returns `true` if the 3 given integer construct
// a reference to a current directory (.).
// NOTE: a negative integer means that no byte is present
fn is_curr_dir_ref(byte_one int, byte_two int, byte_three int) bool {
if u8(byte_two) != os.dot {
return false
}
return (byte_one < 0 || is_slash(u8(byte_one))) && (byte_three < 0 || is_slash(u8(byte_three)))
}

View File

@ -36,11 +36,13 @@ fn test_clean_path() {
assert clean_path(r'\./path/dir\\file.exe') == r'\path\dir\file.exe' assert clean_path(r'\./path/dir\\file.exe') == r'\path\dir\file.exe'
assert clean_path(r'.') == '' assert clean_path(r'.') == ''
assert clean_path(r'./') == '' assert clean_path(r'./') == ''
assert clean_path('') == ''
assert clean_path(r'\./') == '\\' assert clean_path(r'\./') == '\\'
assert clean_path(r'//\/\/////') == '\\' assert clean_path(r'//\/\/////') == '\\'
return return
} }
assert clean_path('./../.././././//') == '../..' assert clean_path('./../.././././//') == '../..'
assert clean_path('') == ''
assert clean_path('.') == '' assert clean_path('.') == ''
assert clean_path('./path/to/file.v//./') == 'path/to/file.v' assert clean_path('./path/to/file.v//./') == 'path/to/file.v'
assert clean_path('./') == '' assert clean_path('./') == ''
@ -127,3 +129,26 @@ fn test_abs_path() {
assert abs_path('path/../file.v/..') == wd assert abs_path('path/../file.v/..') == wd
assert abs_path('///') == '/' assert abs_path('///') == '/'
} }
fn test_existing_path() {
wd := getwd()
$if windows {
assert existing_path('') or { '' } == ''
assert existing_path('..') or { '' } == '..'
assert existing_path('.') or { '' } == '.'
assert existing_path(wd) or { '' } == wd
assert existing_path('\\') or { '' } == '\\'
assert existing_path('$wd\\.\\\\does/not/exist\\.\\') or { '' } == '$wd\\.\\\\'
assert existing_path('$wd\\\\/\\.\\.\\/.') or { '' } == '$wd\\\\/\\.\\.\\/.'
assert existing_path('$wd\\././/\\/oh') or { '' } == '$wd\\././/\\/'
return
}
assert existing_path('') or { '' } == ''
assert existing_path('..') or { '' } == '..'
assert existing_path('.') or { '' } == '.'
assert existing_path(wd) or { '' } == wd
assert existing_path('/') or { '' } == '/'
assert existing_path('$wd/does/.///not/exist///.//') or { '' } == '$wd/'
assert existing_path('$wd//././/.//') or { '' } == '$wd//././/.//'
assert existing_path('$wd//././/.//oh') or { '' } == '$wd//././/.//'
}

View File

@ -173,8 +173,20 @@ pub fn is_dir_empty(path string) bool {
// file_ext will return the part after the last occurence of `.` in `path`. // file_ext will return the part after the last occurence of `.` in `path`.
// The `.` is included. // The `.` is included.
// Examples:
// ```v
// assert os.file_ext('file.v') == '.v'
// assert os.file_ext('.ignore_me') == ''
// assert os.file_ext('.') == ''
// ```
pub fn file_ext(path string) string { pub fn file_ext(path string) string {
pos := path.last_index('.') or { return '' } if path.len < 3 {
return empty_str
}
pos := path.last_index(dot_str) or { return empty_str }
if pos + 1 >= path.len || pos == 0 {
return empty_str
}
return path[pos..] return path[pos..]
} }

View File

@ -585,9 +585,19 @@ fn test_is_executable_writable_readable() ? {
os.rm(file_name) or { panic(err) } os.rm(file_name) or { panic(err) }
} }
fn test_ext() { fn test_file_ext() {
assert os.file_ext('file.v') == '.v' assert os.file_ext('file.v') == '.v'
assert os.file_ext('file.js.v') == '.v'
assert os.file_ext('file.ext1.ext2.ext3') == '.ext3'
assert os.file_ext('.ignore_me.v') == '.v'
assert os.file_ext('file') == '' assert os.file_ext('file') == ''
assert os.file_ext('.git') == ''
assert os.file_ext('file.') == ''
assert os.file_ext('.') == ''
assert os.file_ext('..') == ''
assert os.file_ext('file...') == ''
assert os.file_ext('.file.') == ''
assert os.file_ext('..file..') == ''
} }
fn test_join() { fn test_join() {

View File

@ -8,9 +8,13 @@ import net.conv
pub fn (db DB) @select(config orm.SelectConfig, data orm.QueryData, where orm.QueryData) ?[][]orm.Primitive { pub fn (db DB) @select(config orm.SelectConfig, data orm.QueryData, where orm.QueryData) ?[][]orm.Primitive {
query := orm.orm_select_gen(config, '"', true, '$', 1, where) query := orm.orm_select_gen(config, '"', true, '$', 1, where)
res := pg_stmt_worker(db, query, where, data)?
mut ret := [][]orm.Primitive{} mut ret := [][]orm.Primitive{}
res := pg_stmt_worker(db, query, orm.QueryData{}, where)? if config.is_count {
}
for row in res { for row in res {
mut row_data := []orm.Primitive{} mut row_data := []orm.Primitive{}
@ -166,7 +170,9 @@ fn pg_stmt_match(mut types []u32, mut vals []&char, mut lens []int, mut formats
} }
time.Time { time.Time {
types << u32(Oid.t_int4) types << u32(Oid.t_int4)
vals << &char(&int(data.unix)) unix := int(data.unix)
num := conv.htn32(unsafe { &u32(&unix) })
vals << &char(&num)
lens << int(sizeof(u32)) lens << int(sizeof(u32))
formats << 1 formats << 1
} }
@ -178,19 +184,22 @@ fn pg_stmt_match(mut types []u32, mut vals []&char, mut lens []int, mut formats
fn pg_type_from_v(typ int) ?string { fn pg_type_from_v(typ int) ?string {
str := match typ { str := match typ {
6, 10 { orm.type_idx['i8'], orm.type_idx['i16'], orm.type_idx['byte'], orm.type_idx['u16'] {
'SMALLINT' 'SMALLINT'
} }
7, 11, orm.time { orm.type_idx['bool'] {
'BOOLEAN'
}
orm.type_idx['int'], orm.type_idx['u32'], orm.time {
'INT' 'INT'
} }
8, 12 { orm.type_idx['i64'], orm.type_idx['u64'] {
'BIGINT' 'BIGINT'
} }
13 { orm.float[0] {
'REAL' 'REAL'
} }
14 { orm.float[1] {
'DOUBLE PRECISION' 'DOUBLE PRECISION'
} }
orm.string { orm.string {
@ -212,54 +221,51 @@ fn pg_type_from_v(typ int) ?string {
fn str_to_primitive(str string, typ int) ?orm.Primitive { fn str_to_primitive(str string, typ int) ?orm.Primitive {
match typ { match typ {
// bool // bool
16 { orm.type_idx['bool'] {
return orm.Primitive(str.i8() == 1)
}
18 {
return orm.Primitive(str == 't') return orm.Primitive(str == 't')
} }
// i8 // i8
5 { orm.type_idx['i8'] {
return orm.Primitive(str.i8()) return orm.Primitive(str.i8())
} }
// i16 // i16
6 { orm.type_idx['i16'] {
return orm.Primitive(str.i16()) return orm.Primitive(str.i16())
} }
// int // int
7 { orm.type_idx['int'] {
return orm.Primitive(str.int()) return orm.Primitive(str.int())
} }
// i64 // i64
8 { orm.type_idx['i64'] {
return orm.Primitive(str.i64()) return orm.Primitive(str.i64())
} }
// byte // byte
9 { orm.type_idx['byte'] {
data := str.i8() data := str.i8()
return orm.Primitive(*unsafe { &u8(&data) }) return orm.Primitive(*unsafe { &u8(&data) })
} }
// u16 // u16
10 { orm.type_idx['u16'] {
data := str.i16() data := str.i16()
return orm.Primitive(*unsafe { &u16(&data) }) return orm.Primitive(*unsafe { &u16(&data) })
} }
// u32 // u32
11 { orm.type_idx['u32'] {
data := str.int() data := str.int()
return orm.Primitive(*unsafe { &u32(&data) }) return orm.Primitive(*unsafe { &u32(&data) })
} }
// u64 // u64
12 { orm.type_idx['u64'] {
data := str.i64() data := str.i64()
return orm.Primitive(*unsafe { &u64(&data) }) return orm.Primitive(*unsafe { &u64(&data) })
} }
// f32 // f32
13 { orm.type_idx['f32'] {
return orm.Primitive(str.f32()) return orm.Primitive(str.f32())
} }
// f64 // f64
14 { orm.type_idx['f64'] {
return orm.Primitive(str.f64()) return orm.Primitive(str.f64())
} }
orm.string { orm.string {

View File

@ -6,6 +6,7 @@ import time
// sql expr // sql expr
pub fn (db DB) @select(config orm.SelectConfig, data orm.QueryData, where orm.QueryData) ?[][]orm.Primitive { pub fn (db DB) @select(config orm.SelectConfig, data orm.QueryData, where orm.QueryData) ?[][]orm.Primitive {
// 1. Create query and bind necessary data
query := orm.orm_select_gen(config, '`', true, '?', 1, where) query := orm.orm_select_gen(config, '`', true, '?', 1, where)
stmt := db.new_init_stmt(query)? stmt := db.new_init_stmt(query)?
mut c := 1 mut c := 1
@ -19,6 +20,7 @@ pub fn (db DB) @select(config orm.SelectConfig, data orm.QueryData, where orm.Qu
mut ret := [][]orm.Primitive{} mut ret := [][]orm.Primitive{}
if config.is_count { if config.is_count {
// 2. Get count of returned values & add it to ret array
step := stmt.step() step := stmt.step()
if step !in [sqlite_row, sqlite_ok, sqlite_done] { if step !in [sqlite_row, sqlite_ok, sqlite_done] {
return db.error_message(step, query) return db.error_message(step, query)
@ -28,6 +30,7 @@ pub fn (db DB) @select(config orm.SelectConfig, data orm.QueryData, where orm.Qu
return ret return ret
} }
for { for {
// 2. Parse returned values
step := stmt.step() step := stmt.step()
if step == sqlite_done { if step == sqlite_done {
break break
@ -83,6 +86,7 @@ pub fn (db DB) drop(table string) ? {
// helper // helper
// Executes query and bind prepared statement data directly
fn sqlite_stmt_worker(db DB, query string, data orm.QueryData, where orm.QueryData) ? { fn sqlite_stmt_worker(db DB, query string, data orm.QueryData, where orm.QueryData) ? {
stmt := db.new_init_stmt(query)? stmt := db.new_init_stmt(query)?
mut c := 1 mut c := 1
@ -92,6 +96,7 @@ fn sqlite_stmt_worker(db DB, query string, data orm.QueryData, where orm.QueryDa
stmt.finalize() stmt.finalize()
} }
// Binds all values of d in the prepared statement
fn sqlite_stmt_binder(stmt Stmt, d orm.QueryData, query string, mut c &int) ? { fn sqlite_stmt_binder(stmt Stmt, d orm.QueryData, query string, mut c &int) ? {
for data in d.data { for data in d.data {
err := bind(stmt, c, data) err := bind(stmt, c, data)
@ -103,6 +108,7 @@ fn sqlite_stmt_binder(stmt Stmt, d orm.QueryData, query string, mut c &int) ? {
} }
} }
// Universal bind function
fn bind(stmt Stmt, c &int, data orm.Primitive) int { fn bind(stmt Stmt, c &int, data orm.Primitive) int {
mut err := 0 mut err := 0
match data { match data {
@ -128,6 +134,7 @@ fn bind(stmt Stmt, c &int, data orm.Primitive) int {
return err return err
} }
// Selects column in result and converts it to an orm.Primitive
fn (stmt Stmt) sqlite_select_column(idx int, typ int) ?orm.Primitive { fn (stmt Stmt) sqlite_select_column(idx int, typ int) ?orm.Primitive {
mut primitive := orm.Primitive(0) mut primitive := orm.Primitive(0)
@ -149,6 +156,7 @@ fn (stmt Stmt) sqlite_select_column(idx int, typ int) ?orm.Primitive {
return primitive return primitive
} }
// Convert type int to sql type string
fn sqlite_type_from_v(typ int) ?string { fn sqlite_type_from_v(typ int) ?string {
return if typ in orm.nums || typ < 0 || typ in orm.num64 || typ == orm.time { return if typ in orm.nums || typ < 0 || typ in orm.num64 || typ == orm.time {
'INTEGER' 'INTEGER'

View File

@ -137,7 +137,7 @@ pub fn format_dec_sb(d u64, p BF_param, mut res strings.Builder) {
// f64_to_str_lnd1 formats a f64 to a `string` with `dec_digit` digits after the dot. // f64_to_str_lnd1 formats a f64 to a `string` with `dec_digit` digits after the dot.
[direct_array_access; manualfree] [direct_array_access; manualfree]
fn f64_to_str_lnd1(f f64, dec_digit int) string { pub fn f64_to_str_lnd1(f f64, dec_digit int) string {
unsafe { unsafe {
// we add the rounding value // we add the rounding value
s := f64_to_str(f + dec_round[dec_digit], 18) s := f64_to_str(f + dec_round[dec_digit], 18)

View File

@ -172,12 +172,9 @@ pub fn (b &Builder) after(n int) string {
} }
// str returns a copy of all of the accumulated buffer content. // str returns a copy of all of the accumulated buffer content.
// Note: after a call to b.str(), the builder b should not be // Note: after a call to b.str(), the builder b will be empty, and could be used again.
// used again, you need to call b.free() first, or just leave // The returned string *owns* its own separate copy of the accumulated data that was in
// it to be freed by -autofree when it goes out of scope. // the string builder, before the .str() call.
// The returned string *owns* its own separate copy of the
// accumulated data that was in the string builder, before the
// .str() call.
pub fn (mut b Builder) str() string { pub fn (mut b Builder) str() string {
b << u8(0) b << u8(0)
bcopy := unsafe { &u8(memdup_noscan(b.data, b.len)) } bcopy := unsafe { &u8(memdup_noscan(b.data, b.len)) }
@ -208,7 +205,8 @@ pub fn (mut b Builder) ensure_cap(n int) {
} }
} }
// free is for manually freeing the contents of the buffer // free frees the memory block, used for the buffer.
// Note: do not use the builder, after a call to free().
[unsafe] [unsafe]
pub fn (mut b Builder) free() { pub fn (mut b Builder) free() {
if b.data != 0 { if b.data != 0 {

View File

@ -16,6 +16,7 @@ module term
// y is the y coordinate // y is the y coordinate
pub fn set_cursor_position(c Coord) { pub fn set_cursor_position(c Coord) {
print('\x1b[$c.y;$c.x' + 'H') print('\x1b[$c.y;$c.x' + 'H')
flush_stdout()
} }
// n is number of cells // n is number of cells
@ -25,6 +26,7 @@ pub fn set_cursor_position(c Coord) {
// direction: D is backward / West // direction: D is backward / West
pub fn move(n int, direction string) { pub fn move(n int, direction string) {
print('\x1b[$n$direction') print('\x1b[$n$direction')
flush_stdout()
} }
pub fn cursor_up(n int) { pub fn cursor_up(n int) {
@ -50,6 +52,7 @@ pub fn cursor_back(n int) {
pub fn erase_display(t string) { pub fn erase_display(t string) {
print('\x1b[' + t + 'J') print('\x1b[' + t + 'J')
flush_stdout()
} }
pub fn erase_toend() { pub fn erase_toend() {
@ -63,6 +66,7 @@ pub fn erase_tobeg() {
// clears entire screen and returns cursor to top left-corner // clears entire screen and returns cursor to top left-corner
pub fn erase_clear() { pub fn erase_clear() {
print('\033[H\033[J') print('\033[H\033[J')
flush_stdout()
} }
pub fn erase_del_clear() { pub fn erase_del_clear() {
@ -75,6 +79,7 @@ pub fn erase_del_clear() {
// Note: Cursor position does not change // Note: Cursor position does not change
pub fn erase_line(t string) { pub fn erase_line(t string) {
print('\x1b[' + t + 'K') print('\x1b[' + t + 'K')
flush_stdout()
} }
pub fn erase_line_toend() { pub fn erase_line_toend() {
@ -92,11 +97,13 @@ pub fn erase_line_clear() {
// Will make cursor appear if not visible // Will make cursor appear if not visible
pub fn show_cursor() { pub fn show_cursor() {
print('\x1b[?25h') print('\x1b[?25h')
flush_stdout()
} }
// Will make cursor invisible // Will make cursor invisible
pub fn hide_cursor() { pub fn hide_cursor() {
print('\x1b[?25l') print('\x1b[?25l')
flush_stdout()
} }
// clear_previous_line - useful for progressbars. // clear_previous_line - useful for progressbars.
@ -105,4 +112,5 @@ pub fn hide_cursor() {
// the previous content. // the previous content.
pub fn clear_previous_line() { pub fn clear_previous_line() {
print('\r\x1b[1A\x1b[2K') print('\r\x1b[1A\x1b[2K')
flush_stdout()
} }

View File

@ -52,6 +52,7 @@ pub fn get_cursor_position() ?Coord {
unsafe { C.tcsetattr(0, C.TCSANOW, &state) } unsafe { C.tcsetattr(0, C.TCSANOW, &state) }
print('\e[6n') print('\e[6n')
flush_stdout()
mut x := 0 mut x := 0
mut y := 0 mut y := 0
@ -87,6 +88,7 @@ pub fn set_terminal_title(title string) bool {
print('\033]0') print('\033]0')
print(title) print(title)
print('\007') print('\007')
flush_stdout()
return true return true
} }
@ -94,4 +96,5 @@ pub fn set_terminal_title(title string) bool {
pub fn clear() { pub fn clear() {
print('\x1b[2J') print('\x1b[2J')
print('\x1b[H') print('\x1b[H')
flush_stdout()
} }

View File

@ -32,12 +32,14 @@ pub fn init(cfg Config) &Context {
fn save_title() { fn save_title() {
// restore the previously saved terminal title // restore the previously saved terminal title
print('\x1b[22;0t') print('\x1b[22;0t')
flush_stdout()
} }
[inline] [inline]
fn load_title() { fn load_title() {
// restore the previously saved terminal title // restore the previously saved terminal title
print('\x1b[23;0t') print('\x1b[23;0t')
flush_stdout()
} }
pub fn (mut ctx Context) run() ? { pub fn (mut ctx Context) run() ? {

View File

@ -26,6 +26,7 @@ fn restore_terminal_state() {
// clear the terminal and set the cursor to the origin // clear the terminal and set the cursor to the origin
print('\x1b[2J\x1b[3J') print('\x1b[2J\x1b[3J')
print('\x1b[?1049l') print('\x1b[?1049l')
flush_stdout()
} }
C.SetConsoleMode(ui.ctx_ptr.stdin_handle, ui.stdin_at_startup) C.SetConsoleMode(ui.ctx_ptr.stdin_handle, ui.stdin_at_startup)
} }
@ -65,6 +66,7 @@ pub fn init(cfg Config) &Context {
print('\x1b[?1049h') print('\x1b[?1049h')
// clear the terminal and set the cursor to the origin // clear the terminal and set the cursor to the origin
print('\x1b[2J\x1b[3J\x1b[1;1H') print('\x1b[2J\x1b[3J\x1b[1;1H')
flush_stdout()
} }
if ctx.cfg.hide_cursor { if ctx.cfg.hide_cursor {
@ -74,6 +76,7 @@ pub fn init(cfg Config) &Context {
if ctx.cfg.window_title != '' { if ctx.cfg.window_title != '' {
print('\x1b]0;$ctx.cfg.window_title\x07') print('\x1b]0;$ctx.cfg.window_title\x07')
flush_stdout()
} }
unsafe { unsafe {
@ -317,10 +320,12 @@ fn (mut ctx Context) parse_events() {
fn save_title() { fn save_title() {
// restore the previously saved terminal title // restore the previously saved terminal title
print('\x1b[22;0t') print('\x1b[22;0t')
flush_stdout()
} }
[inline] [inline]
fn load_title() { fn load_title() {
// restore the previously saved terminal title // restore the previously saved terminal title
print('\x1b[23;0t') print('\x1b[23;0t')
flush_stdout()
} }

View File

@ -79,6 +79,7 @@ fn (mut ctx Context) termios_setup() ? {
if ctx.cfg.window_title != '' { if ctx.cfg.window_title != '' {
print('\x1b]0;$ctx.cfg.window_title\x07') print('\x1b]0;$ctx.cfg.window_title\x07')
flush_stdout()
} }
if !ctx.cfg.skip_init_checks { if !ctx.cfg.skip_init_checks {
@ -90,6 +91,7 @@ fn (mut ctx Context) termios_setup() ? {
// feature-test the SU spec // feature-test the SU spec
sx, sy := get_cursor_position() sx, sy := get_cursor_position()
print('$bsu$esu') print('$bsu$esu')
flush_stdout()
ex, ey := get_cursor_position() ex, ey := get_cursor_position()
if sx == ex && sy == ey { if sx == ex && sy == ey {
// the terminal either ignored or handled the sequence properly, enable SU // the terminal either ignored or handled the sequence properly, enable SU
@ -108,11 +110,14 @@ fn (mut ctx Context) termios_setup() ? {
C.tcsetattr(C.STDIN_FILENO, C.TCSAFLUSH, &termios) C.tcsetattr(C.STDIN_FILENO, C.TCSAFLUSH, &termios)
// enable mouse input // enable mouse input
print('\x1b[?1003h\x1b[?1006h') print('\x1b[?1003h\x1b[?1006h')
flush_stdout()
if ctx.cfg.use_alternate_buffer { if ctx.cfg.use_alternate_buffer {
// switch to the alternate buffer // switch to the alternate buffer
print('\x1b[?1049h') print('\x1b[?1049h')
flush_stdout()
// clear the terminal and set the cursor to the origin // clear the terminal and set the cursor to the origin
print('\x1b[2J\x1b[3J\x1b[1;1H') print('\x1b[2J\x1b[3J\x1b[1;1H')
flush_stdout()
} }
ctx.window_height, ctx.window_width = get_terminal_size() ctx.window_height, ctx.window_width = get_terminal_size()
@ -162,6 +167,7 @@ fn (mut ctx Context) termios_setup() ? {
fn get_cursor_position() (int, int) { fn get_cursor_position() (int, int) {
print('\033[6n') print('\033[6n')
flush_stdout()
mut s := '' mut s := ''
unsafe { unsafe {
buf := malloc_noscan(25) buf := malloc_noscan(25)
@ -183,8 +189,10 @@ fn supports_truecolor() bool {
} }
// set the bg color to some arbirtrary value (#010203), assumed not to be the default // set the bg color to some arbirtrary value (#010203), assumed not to be the default
print('\x1b[48:2:1:2:3m') print('\x1b[48:2:1:2:3m')
flush_stdout()
// andquery the current color // andquery the current color
print('\x1bP\$qm\x1b\\') print('\x1bP\$qm\x1b\\')
flush_stdout()
mut s := '' mut s := ''
unsafe { unsafe {
buf := malloc_noscan(25) buf := malloc_noscan(25)
@ -199,6 +207,7 @@ fn termios_reset() {
// C.TCSANOW ?? // C.TCSANOW ??
C.tcsetattr(C.STDIN_FILENO, C.TCSAFLUSH, &ui.termios_at_startup) C.tcsetattr(C.STDIN_FILENO, C.TCSAFLUSH, &ui.termios_at_startup)
print('\x1b[?1003l\x1b[?1006l\x1b[?25h') print('\x1b[?1003l\x1b[?1006l\x1b[?25h')
flush_stdout()
c := ctx_ptr c := ctx_ptr
if unsafe { c != 0 } && c.cfg.use_alternate_buffer { if unsafe { c != 0 } && c.cfg.use_alternate_buffer {
print('\x1b[?1049l') print('\x1b[?1049l')

View File

@ -116,6 +116,7 @@ pub fn (mut ctx Context) clear() {
[inline] [inline]
pub fn (mut ctx Context) set_window_title(s string) { pub fn (mut ctx Context) set_window_title(s string) {
print('\x1b]0;$s\x07') print('\x1b]0;$s\x07')
flush_stdout()
} }
// draw_point draws a point at position `x`,`y`. // draw_point draws a point at position `x`,`y`.

View File

@ -79,6 +79,10 @@ To query for a value that might not be in the document you
can use the `.default_to(...)` function to provide a can use the `.default_to(...)` function to provide a
default value. default value.
For cases where a default value might not be appropiate or
to check if a value exists you can use `doc.value_opt('query')?`
instead.
```v ```v
import toml import toml
@ -100,8 +104,15 @@ assert doc.value('table.array[0].a').string() == 'A'
// Provides a default value // Provides a default value
assert doc.value('non.existing').default_to(false).bool() == false assert doc.value('non.existing').default_to(false).bool() == false
// Check if value exist
// doc.value_opt('should.exist') or { ... }
// or
if value := doc.value_opt('table.array[1].b') {
assert value.string() == 'B'
}
// You can pass parts of the TOML document around // You can pass parts of the TOML document around
// and still use .value() to get the values // and still use .value()/.value_opt() to get the values
arr := doc.value('table.array') arr := doc.value('table.array')
assert arr.value('[1].b').string() == 'B' assert arr.value('[1].b').string() == 'B'
``` ```

View File

@ -281,24 +281,35 @@ pub fn (a []Any) to_toml() string {
// quoted keys are supported as `a."b.c"` or `a.'b.c'`. // quoted keys are supported as `a."b.c"` or `a.'b.c'`.
// Arrays can be queried with `a[0].b[1].[2]`. // Arrays can be queried with `a[0].b[1].[2]`.
pub fn (a Any) value(key string) Any { pub fn (a Any) value(key string) Any {
key_split := parse_dotted_key(key) or { return Any(Null{}) } key_split := parse_dotted_key(key) or { return null }
return a.value_(a, key_split) return a.value_(a, key_split)
} }
pub fn (a Any) value_opt(key string) ?Any {
key_split := parse_dotted_key(key) or { return error('invalid dotted key') }
x := a.value_(a, key_split)
if x is Null {
return error('no value for key')
}
return x
}
// value_ returns the `Any` value found at `key`. // value_ returns the `Any` value found at `key`.
fn (a Any) value_(value Any, key []string) Any { fn (a Any) value_(value Any, key []string) Any {
assert key.len > 0 if key.len == 0 {
mut any_value := Any(Null{}) return null
}
mut any_value := null
k, index := parse_array_key(key[0]) k, index := parse_array_key(key[0])
if k == '' { if k == '' {
arr := value as []Any arr := value as []Any
any_value = arr[index] or { return Any(Null{}) } any_value = arr[index] or { return null }
} }
if value is map[string]Any { if value is map[string]Any {
any_value = value[k] or { return Any(Null{}) } any_value = value[k] or { return null }
if index > -1 { if index > -1 {
arr := any_value as []Any arr := any_value as []Any
any_value = arr[index] or { return Any(Null{}) } any_value = arr[index] or { return null }
} }
} }
if key.len <= 1 { if key.len <= 1 {

View File

@ -2,11 +2,12 @@ import os
import toml import toml
import toml.to import toml.to
fn test_keys() { fn path_by_extension(ext string) string {
toml_file := return os.join_path(os.dir(@VEXE), 'vlib/toml/tests/testdata/key_test.$ext')
os.real_path(os.join_path(os.dir(@FILE), 'testdata', os.file_name(@FILE).all_before_last('.'))) + }
'.toml'
toml_doc := toml.parse_file(toml_file) or { panic(err) } fn test_keys() ? {
toml_doc := toml.parse_file(path_by_extension('toml'))?
mut value := toml_doc.value('34-11') mut value := toml_doc.value('34-11')
assert value.int() == 23 assert value.int() == 23
@ -18,10 +19,30 @@ fn test_keys() {
assert value.int() == 42 assert value.int() == 42
toml_json := to.json(toml_doc) toml_json := to.json(toml_doc)
out_file := out_file_json := os.read_file(path_by_extension('out'))?
os.real_path(os.join_path(os.dir(@FILE), 'testdata', os.file_name(@FILE).all_before_last('.'))) +
'.out'
out_file_json := os.read_file(out_file) or { panic(err) }
println(toml_json) println(toml_json)
assert toml_json == out_file_json assert toml_json == out_file_json
//
if x := toml_doc.value_opt('unknown key') {
assert false
} else {
assert err.msg() == 'no value for key'
}
if x := toml_doc.value_opt("'a") {
assert false
} else {
assert err.msg() == 'invalid dotted key'
}
}
fn test_parse_dotted_key() ? {
assert toml.parse_dotted_key('')? == []
assert toml.parse_dotted_key('abc')? == ['abc']
assert toml.parse_dotted_key('tube.test."test.test".h."i.j."."k"')? == ['tube', 'test',
'test.test', 'h', 'i.j.', 'k']
if x := toml.parse_dotted_key("'some unclosed string") {
assert false
} else {
assert err.msg().starts_with('parse_dotted_key: could not parse key, missing closing string delimiter')
}
} }

View File

@ -201,26 +201,39 @@ pub fn (d Doc) reflect<T>() T {
// quoted keys are supported as `a."b.c"` or `a.'b.c'`. // quoted keys are supported as `a."b.c"` or `a.'b.c'`.
// Arrays can be queried with `a[0].b[1].[2]`. // Arrays can be queried with `a[0].b[1].[2]`.
pub fn (d Doc) value(key string) Any { pub fn (d Doc) value(key string) Any {
key_split := parse_dotted_key(key) or { return Any(Null{}) } key_split := parse_dotted_key(key) or { return toml.null }
return d.value_(d.ast.table, key_split) return d.value_(d.ast.table, key_split)
} }
pub const null = Any(Null{})
pub fn (d Doc) value_opt(key string) ?Any {
key_split := parse_dotted_key(key) or { return error('invalid dotted key') }
x := d.value_(d.ast.table, key_split)
if x is Null {
return error('no value for key')
}
return x
}
// value_ returns the value found at `key` in the map `values` as `Any` type. // value_ returns the value found at `key` in the map `values` as `Any` type.
fn (d Doc) value_(value ast.Value, key []string) Any { fn (d Doc) value_(value ast.Value, key []string) Any {
assert key.len > 0 if key.len == 0 {
return toml.null
}
mut ast_value := ast.Value(ast.Null{}) mut ast_value := ast.Value(ast.Null{})
k, index := parse_array_key(key[0]) k, index := parse_array_key(key[0])
if k == '' { if k == '' {
a := value as []ast.Value a := value as []ast.Value
ast_value = a[index] or { return Any(Null{}) } ast_value = a[index] or { return toml.null }
} }
if value is map[string]ast.Value { if value is map[string]ast.Value {
ast_value = value[k] or { return Any(Null{}) } ast_value = value[k] or { return toml.null }
if index > -1 { if index > -1 {
a := ast_value as []ast.Value a := ast_value as []ast.Value
ast_value = a[index] or { return Any(Null{}) } ast_value = a[index] or { return toml.null }
} }
} }
@ -298,11 +311,11 @@ pub fn ast_to_any(value ast.Value) Any {
return aa return aa
} }
else { else {
return Any(Null{}) return toml.null
} }
} }
return Any(Null{}) return toml.null
// TODO decide this // TODO decide this
// panic(@MOD + '.' + @STRUCT + '.' + @FN + ' can\'t convert "$value"') // panic(@MOD + '.' + @STRUCT + '.' + @FN + ' can\'t convert "$value"')
// return Any('') // return Any('')

View File

@ -304,6 +304,10 @@ pub:
is_mut bool is_mut bool
is_global bool is_global bool
is_volatile bool is_volatile bool
//
is_deprecated bool
deprecation_msg string
deprecated_after string
pub mut: pub mut:
default_expr Expr default_expr Expr
default_expr_typ Type default_expr_typ Type
@ -1821,9 +1825,9 @@ pub fn (expr Expr) is_expr() bool {
return true return true
} }
pub fn (expr Expr) is_lit() bool { pub fn (expr Expr) is_pure_literal() bool {
return match expr { return match expr {
BoolLiteral, CharLiteral, StringLiteral, IntegerLiteral { true } BoolLiteral, CharLiteral, FloatLiteral, StringLiteral, IntegerLiteral { true }
else { false } else { false }
} }
} }

View File

@ -303,7 +303,13 @@ pub fn (x Expr) str() string {
} }
CallExpr { CallExpr {
sargs := args2str(x.args) sargs := args2str(x.args)
propagate_suffix := if x.or_block.kind == .propagate_option { ' ?' } else { '' } propagate_suffix := if x.or_block.kind == .propagate_option {
'?'
} else if x.or_block.kind == .propagate_result {
'!'
} else {
''
}
if x.is_method { if x.is_method {
return '${x.left.str()}.${x.name}($sargs)$propagate_suffix' return '${x.left.str()}.${x.name}($sargs)$propagate_suffix'
} }
@ -357,6 +363,8 @@ pub fn (x Expr) str() string {
} }
if i < x.branches.len - 1 || !x.has_else { if i < x.branches.len - 1 || !x.has_else {
parts << ' ${dollar}if ' + branch.cond.str() + ' { ' parts << ' ${dollar}if ' + branch.cond.str() + ' { '
} else if x.has_else && i == x.branches.len - 1 {
parts << '{ '
} }
for stmt in branch.stmts { for stmt in branch.stmts {
parts << stmt.str() parts << stmt.str()

View File

@ -45,6 +45,8 @@ pub mut:
mdeprecated_after map[string]time.Time // module deprecation date mdeprecated_after map[string]time.Time // module deprecation date
builtin_pub_fns map[string]bool builtin_pub_fns map[string]bool
pointer_size int pointer_size int
// cache for type_to_str_using_aliases
cached_type_to_str map[u64]string
} }
// used by vls to avoid leaks // used by vls to avoid leaks

View File

@ -1127,8 +1127,17 @@ pub fn (t &Table) clean_generics_type_str(typ Type) string {
// import_aliases is a map of imported symbol aliases 'module.Type' => 'Type' // import_aliases is a map of imported symbol aliases 'module.Type' => 'Type'
pub fn (t &Table) type_to_str_using_aliases(typ Type, import_aliases map[string]string) string { pub fn (t &Table) type_to_str_using_aliases(typ Type, import_aliases map[string]string) string {
cache_key := (u64(import_aliases.len) << 32) | u64(typ)
if cached_res := t.cached_type_to_str[cache_key] {
return cached_res
}
sym := t.sym(typ) sym := t.sym(typ)
mut res := sym.name mut res := sym.name
mut mt := unsafe { &Table(t) }
defer {
// Note, that this relies on `res = value return res` if you want to return early!
mt.cached_type_to_str[cache_key] = res
}
// Note, that the duplication of code in some of the match branches here // Note, that the duplication of code in some of the match branches here
// is VERY deliberate. DO NOT be tempted to use `else {}` instead, because // is VERY deliberate. DO NOT be tempted to use `else {}` instead, because
// that strongly reduces the usefullness of the exhaustive checking that // that strongly reduces the usefullness of the exhaustive checking that
@ -1147,7 +1156,8 @@ pub fn (t &Table) type_to_str_using_aliases(typ Type, import_aliases map[string]
} }
.array { .array {
if typ == ast.array_type { if typ == ast.array_type {
return 'array' res = 'array'
return res
} }
if typ.has_flag(.variadic) { if typ.has_flag(.variadic) {
res = t.type_to_str_using_aliases(t.value_type(typ), import_aliases) res = t.type_to_str_using_aliases(t.value_type(typ), import_aliases)
@ -1202,7 +1212,8 @@ pub fn (t &Table) type_to_str_using_aliases(typ Type, import_aliases map[string]
} }
.map { .map {
if int(typ) == ast.map_type_idx { if int(typ) == ast.map_type_idx {
return 'map' res = 'map'
return res
} }
info := sym.info as Map info := sym.info as Map
key_str := t.type_to_str_using_aliases(info.key_type, import_aliases) key_str := t.type_to_str_using_aliases(info.key_type, import_aliases)
@ -1257,12 +1268,15 @@ pub fn (t &Table) type_to_str_using_aliases(typ Type, import_aliases map[string]
} }
.void { .void {
if typ.has_flag(.optional) { if typ.has_flag(.optional) {
return '?' res = '?'
return res
} }
if typ.has_flag(.result) { if typ.has_flag(.result) {
return '!' res = '!'
return res
} }
return 'void' res = 'void'
return res
} }
.thread { .thread {
rtype := sym.thread_info().return_type rtype := sym.thread_info().return_type

View File

@ -17,7 +17,7 @@ import v.dotgraph
pub struct Builder { pub struct Builder {
pub: pub:
compiled_dir string // contains os.real_path() of the dir of the final file beeing compiled, or the dir itself when doing `v .` compiled_dir string // contains os.real_path() of the dir of the final file being compiled, or the dir itself when doing `v .`
module_path string module_path string
pub mut: pub mut:
checker &checker.Checker checker &checker.Checker
@ -40,6 +40,7 @@ pub mut:
mod_invalidates_paths map[string][]string // changes in mod `os`, invalidate only .v files, that do `import os` mod_invalidates_paths map[string][]string // changes in mod `os`, invalidate only .v files, that do `import os`
mod_invalidates_mods map[string][]string // changes in mod `os`, force invalidation of mods, that do `import os` mod_invalidates_mods map[string][]string // changes in mod `os`, force invalidation of mods, that do `import os`
path_invalidates_mods map[string][]string // changes in a .v file from `os`, invalidates `os` path_invalidates_mods map[string][]string // changes in a .v file from `os`, invalidates `os`
crun_cache_keys []string // target executable + top level source files; filled in by Builder.should_rebuild
} }
pub fn new_builder(pref &pref.Preferences) Builder { pub fn new_builder(pref &pref.Preferences) Builder {

View File

@ -3,9 +3,7 @@
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
module builder module builder
import time
import os import os
import rand
import v.pref import v.pref
import v.util import v.util
import v.checker import v.checker
@ -13,6 +11,19 @@ import v.checker
pub type FnBackend = fn (mut b Builder) pub type FnBackend = fn (mut b Builder)
pub fn compile(command string, pref &pref.Preferences, backend_cb FnBackend) { pub fn compile(command string, pref &pref.Preferences, backend_cb FnBackend) {
check_if_output_folder_is_writable(pref)
// Construct the V object from command line arguments
mut b := new_builder(pref)
if b.should_rebuild() {
b.rebuild(backend_cb)
}
b.exit_on_invalid_syntax()
// running does not require the parsers anymore
unsafe { b.myfree() }
b.run_compiled_executable_and_exit()
}
fn check_if_output_folder_is_writable(pref &pref.Preferences) {
odir := os.dir(pref.out_name) odir := os.dir(pref.out_name)
// When pref.out_name is just the name of an executable, i.e. `./v -o executable main.v` // When pref.out_name is just the name of an executable, i.e. `./v -o executable main.v`
// without a folder component, just use the current folder instead: // without a folder component, just use the current folder instead:
@ -24,56 +35,6 @@ pub fn compile(command string, pref &pref.Preferences, backend_cb FnBackend) {
// An early error here, is better than an unclear C error later: // An early error here, is better than an unclear C error later:
verror(err.msg()) verror(err.msg())
} }
// Construct the V object from command line arguments
mut b := new_builder(pref)
if pref.is_verbose {
println('builder.compile() pref:')
// println(pref)
}
mut sw := time.new_stopwatch()
backend_cb(mut b)
mut timers := util.get_timers()
timers.show_remaining()
if pref.is_stats {
compilation_time_micros := 1 + sw.elapsed().microseconds()
scompilation_time_ms := util.bold('${f64(compilation_time_micros) / 1000.0:6.3f}')
mut all_v_source_lines, mut all_v_source_bytes := 0, 0
for pf in b.parsed_files {
all_v_source_lines += pf.nr_lines
all_v_source_bytes += pf.nr_bytes
}
mut sall_v_source_lines := all_v_source_lines.str()
mut sall_v_source_bytes := all_v_source_bytes.str()
sall_v_source_lines = util.bold('${sall_v_source_lines:10s}')
sall_v_source_bytes = util.bold('${sall_v_source_bytes:10s}')
println(' V source code size: $sall_v_source_lines lines, $sall_v_source_bytes bytes')
//
mut slines := b.stats_lines.str()
mut sbytes := b.stats_bytes.str()
slines = util.bold('${slines:10s}')
sbytes = util.bold('${sbytes:10s}')
println('generated target code size: $slines lines, $sbytes bytes')
//
vlines_per_second := int(1_000_000.0 * f64(all_v_source_lines) / f64(compilation_time_micros))
svlines_per_second := util.bold(vlines_per_second.str())
println('compilation took: $scompilation_time_ms ms, compilation speed: $svlines_per_second vlines/s')
}
b.exit_on_invalid_syntax()
// running does not require the parsers anymore
unsafe { b.myfree() }
if pref.is_test || pref.is_run {
b.run_compiled_executable_and_exit()
}
}
pub fn (mut b Builder) get_vtmp_filename(base_file_name string, postfix string) string {
vtmp := util.get_vtmp_folder()
mut uniq := ''
if !b.pref.reuse_tmpc {
uniq = '.$rand.u64()'
}
fname := os.file_name(os.real_path(base_file_name)) + '$uniq$postfix'
return os.real_path(os.join_path(vtmp, fname))
} }
// Temporary, will be done by -autofree // Temporary, will be done by -autofree
@ -118,47 +79,45 @@ fn (mut b Builder) run_compiled_executable_and_exit() {
if b.pref.os == .ios { if b.pref.os == .ios {
panic('Running iOS apps is not supported yet.') panic('Running iOS apps is not supported yet.')
} }
if !(b.pref.is_test || b.pref.is_run || b.pref.is_crun) {
exit(0)
}
compiled_file := os.real_path(b.pref.out_name)
run_file := if b.pref.backend.is_js() {
node_basename := $if windows { 'node.exe' } $else { 'node' }
os.find_abs_path_of_executable(node_basename) or {
panic('Could not find `node` in system path. Do you have Node.js installed?')
}
} else {
compiled_file
}
mut run_args := []string{cap: b.pref.run_args.len + 1}
if b.pref.backend.is_js() {
run_args << compiled_file
}
run_args << b.pref.run_args
mut run_process := os.new_process(run_file)
run_process.set_args(run_args)
if b.pref.is_verbose { if b.pref.is_verbose {
println('running $run_process.filename with arguments $run_process.args')
} }
if b.pref.is_test || b.pref.is_run { // Ignore sigint and sigquit while running the compiled file,
compiled_file := os.real_path(b.pref.out_name) // so ^C doesn't prevent v from deleting the compiled file.
run_file := if b.pref.backend.is_js() { // See also https://git.musl-libc.org/cgit/musl/tree/src/process/system.c
node_basename := $if windows { 'node.exe' } $else { 'node' } prev_int_handler := os.signal_opt(.int, eshcb) or { serror('set .int', err) }
os.find_abs_path_of_executable(node_basename) or { mut prev_quit_handler := os.SignalHandler(eshcb)
panic('Could not find `node` in system path. Do you have Node.js installed?') $if !windows { // There's no sigquit on windows
} prev_quit_handler = os.signal_opt(.quit, eshcb) or { serror('set .quit', err) }
} else {
compiled_file
}
mut run_args := []string{cap: b.pref.run_args.len + 1}
if b.pref.backend.is_js() {
run_args << compiled_file
}
run_args << b.pref.run_args
mut run_process := os.new_process(run_file)
run_process.set_args(run_args)
if b.pref.is_verbose {
println('running $run_process.filename with arguments $run_process.args')
}
// Ignore sigint and sigquit while running the compiled file,
// so ^C doesn't prevent v from deleting the compiled file.
// See also https://git.musl-libc.org/cgit/musl/tree/src/process/system.c
prev_int_handler := os.signal_opt(.int, eshcb) or { serror('set .int', err) }
mut prev_quit_handler := os.SignalHandler(eshcb)
$if !windows { // There's no sigquit on windows
prev_quit_handler = os.signal_opt(.quit, eshcb) or { serror('set .quit', err) }
}
run_process.wait()
os.signal_opt(.int, prev_int_handler) or { serror('restore .int', err) }
$if !windows {
os.signal_opt(.quit, prev_quit_handler) or { serror('restore .quit', err) }
}
ret := run_process.code
run_process.close()
b.cleanup_run_executable_after_exit(compiled_file)
exit(ret)
} }
exit(0) run_process.wait()
os.signal_opt(.int, prev_int_handler) or { serror('restore .int', err) }
$if !windows {
os.signal_opt(.quit, prev_quit_handler) or { serror('restore .quit', err) }
}
ret := run_process.code
run_process.close()
b.cleanup_run_executable_after_exit(compiled_file)
exit(ret)
} }
fn eshcb(_ os.Signal) { fn eshcb(_ os.Signal) {
@ -171,6 +130,9 @@ fn serror(reason string, e IError) {
} }
fn (mut v Builder) cleanup_run_executable_after_exit(exefile string) { fn (mut v Builder) cleanup_run_executable_after_exit(exefile string) {
if v.pref.is_crun {
return
}
if v.pref.reuse_tmpc { if v.pref.reuse_tmpc {
v.pref.vrun_elog('keeping executable: $exefile , because -keepc was passed') v.pref.vrun_elog('keeping executable: $exefile , because -keepc was passed')
return return

View File

@ -2,6 +2,8 @@ module builder
import os import os
import hash import hash
import time
import rand
import strings import strings
import v.util import v.util
import v.pref import v.pref
@ -11,11 +13,27 @@ pub fn (mut b Builder) rebuild_modules() {
if !b.pref.use_cache || b.pref.build_mode == .build_module { if !b.pref.use_cache || b.pref.build_mode == .build_module {
return return
} }
all_files := b.parsed_files.map(it.path)
$if trace_invalidations ? {
eprintln('> rebuild_modules all_files: $all_files')
}
invalidations := b.find_invalidated_modules_by_files(all_files)
$if trace_invalidations ? {
eprintln('> rebuild_modules invalidations: $invalidations')
}
if invalidations.len > 0 {
vexe := pref.vexe_path()
for imp in invalidations {
b.v_build_module(vexe, imp)
}
}
}
pub fn (mut b Builder) find_invalidated_modules_by_files(all_files []string) []string {
util.timing_start('${@METHOD} source_hashing') util.timing_start('${@METHOD} source_hashing')
mut new_hashes := map[string]string{} mut new_hashes := map[string]string{}
mut old_hashes := map[string]string{} mut old_hashes := map[string]string{}
mut sb_new_hashes := strings.new_builder(1024) mut sb_new_hashes := strings.new_builder(1024)
all_files := b.parsed_files.map(it.path)
// //
mut cm := vcache.new_cache_manager(all_files) mut cm := vcache.new_cache_manager(all_files)
sold_hashes := cm.load('.hashes', 'all_files') or { ' ' } sold_hashes := cm.load('.hashes', 'all_files') or { ' ' }
@ -31,8 +49,7 @@ pub fn (mut b Builder) rebuild_modules() {
old_hashes[cpath] = chash old_hashes[cpath] = chash
} }
// eprintln('old_hashes: $old_hashes') // eprintln('old_hashes: $old_hashes')
for p in b.parsed_files { for cpath in all_files {
cpath := p.path
ccontent := util.read_file(cpath) or { '' } ccontent := util.read_file(cpath) or { '' }
chash := hash.sum64_string(ccontent, 7).hex_full() chash := hash.sum64_string(ccontent, 7).hex_full()
new_hashes[cpath] = chash new_hashes[cpath] = chash
@ -48,6 +65,7 @@ pub fn (mut b Builder) rebuild_modules() {
cm.save('.hashes', 'all_files', snew_hashes) or {} cm.save('.hashes', 'all_files', snew_hashes) or {}
util.timing_measure('${@METHOD} source_hashing') util.timing_measure('${@METHOD} source_hashing')
mut invalidations := []string{}
if new_hashes != old_hashes { if new_hashes != old_hashes {
util.timing_start('${@METHOD} rebuilding') util.timing_start('${@METHOD} rebuilding')
// eprintln('> b.mod_invalidates_paths: $b.mod_invalidates_paths') // eprintln('> b.mod_invalidates_paths: $b.mod_invalidates_paths')
@ -148,13 +166,13 @@ pub fn (mut b Builder) rebuild_modules() {
} }
if invalidated_mod_paths.len > 0 { if invalidated_mod_paths.len > 0 {
impaths := invalidated_mod_paths.keys() impaths := invalidated_mod_paths.keys()
vexe := pref.vexe_path()
for imp in impaths { for imp in impaths {
b.v_build_module(vexe, imp) invalidations << imp
} }
} }
util.timing_measure('${@METHOD} rebuilding') util.timing_measure('${@METHOD} rebuilding')
} }
return invalidations
} }
fn (mut b Builder) v_build_module(vexe string, imp_path string) { fn (mut b Builder) v_build_module(vexe string, imp_path string) {
@ -211,7 +229,7 @@ fn (mut b Builder) handle_usecache(vexe string) {
// strconv is already imported inside builtin, so skip generating its object file // strconv is already imported inside builtin, so skip generating its object file
// TODO: incase we have other modules with the same name, make sure they are vlib // TODO: incase we have other modules with the same name, make sure they are vlib
// is this even doign anything? // is this even doign anything?
if imp in ['strconv', 'strings', 'dlmalloc'] { if util.module_is_builtin(imp) {
continue continue
} }
if imp in built_modules { if imp in built_modules {
@ -237,3 +255,114 @@ fn (mut b Builder) handle_usecache(vexe string) {
} }
b.ccoptions.post_args << libs b.ccoptions.post_args << libs
} }
pub fn (mut b Builder) should_rebuild() bool {
mut exe_name := b.pref.out_name
$if windows {
exe_name = exe_name + '.exe'
}
if !os.is_file(exe_name) {
return true
}
if !b.pref.is_crun {
return true
}
mut v_program_files := []string{}
is_file := os.is_file(b.pref.path)
is_dir := os.is_dir(b.pref.path)
if is_file {
v_program_files << b.pref.path
} else if is_dir {
v_program_files << b.v_files_from_dir(b.pref.path)
}
v_program_files.sort() // ensure stable keys for the dependencies cache
b.crun_cache_keys = v_program_files
b.crun_cache_keys << exe_name
// just check the timestamps for now:
exe_stamp := os.file_last_mod_unix(exe_name)
source_stamp := most_recent_timestamp(v_program_files)
if exe_stamp <= source_stamp {
return true
}
////////////////////////////////////////////////////////////////////////////
// The timestamps for the top level files were found ok,
// however we want to *also* make sure that a full rebuild will be done
// if any of the dependencies (if we know them) are changed.
mut cm := vcache.new_cache_manager(b.crun_cache_keys)
// always rebuild, when the compilation options changed between 2 sequential cruns:
sbuild_options := cm.load('.build_options', '.crun') or { return true }
if sbuild_options != b.pref.build_options.join('\n') {
return true
}
sdependencies := cm.load('.dependencies', '.crun') or {
// empty/wiped out cache, we do not know what the dependencies are, so just
// rebuild, which will fill in the dependencies cache for the next crun
return true
}
dependencies := sdependencies.split('\n')
// we have already compiled these source files, and have their dependencies
dependencies_stamp := most_recent_timestamp(dependencies)
if dependencies_stamp < exe_stamp {
return false
}
return true
}
fn most_recent_timestamp(files []string) i64 {
mut res := i64(0)
for f in files {
f_stamp := os.file_last_mod_unix(f)
if res <= f_stamp {
res = f_stamp
}
}
return res
}
pub fn (mut b Builder) rebuild(backend_cb FnBackend) {
mut sw := time.new_stopwatch()
backend_cb(mut b)
if b.pref.is_crun {
// save the dependencies after the first compilation, they will be used for subsequent ones:
mut cm := vcache.new_cache_manager(b.crun_cache_keys)
dependency_files := b.parsed_files.map(it.path)
cm.save('.dependencies', '.crun', dependency_files.join('\n')) or {}
cm.save('.build_options', '.crun', b.pref.build_options.join('\n')) or {}
}
mut timers := util.get_timers()
timers.show_remaining()
if b.pref.is_stats {
compilation_time_micros := 1 + sw.elapsed().microseconds()
scompilation_time_ms := util.bold('${f64(compilation_time_micros) / 1000.0:6.3f}')
mut all_v_source_lines, mut all_v_source_bytes := 0, 0
for pf in b.parsed_files {
all_v_source_lines += pf.nr_lines
all_v_source_bytes += pf.nr_bytes
}
mut sall_v_source_lines := all_v_source_lines.str()
mut sall_v_source_bytes := all_v_source_bytes.str()
sall_v_source_lines = util.bold('${sall_v_source_lines:10s}')
sall_v_source_bytes = util.bold('${sall_v_source_bytes:10s}')
println(' V source code size: $sall_v_source_lines lines, $sall_v_source_bytes bytes')
//
mut slines := b.stats_lines.str()
mut sbytes := b.stats_bytes.str()
slines = util.bold('${slines:10s}')
sbytes = util.bold('${sbytes:10s}')
println('generated target code size: $slines lines, $sbytes bytes')
//
vlines_per_second := int(1_000_000.0 * f64(all_v_source_lines) / f64(compilation_time_micros))
svlines_per_second := util.bold(vlines_per_second.str())
println('compilation took: $scompilation_time_ms ms, compilation speed: $svlines_per_second vlines/s')
}
}
pub fn (mut b Builder) get_vtmp_filename(base_file_name string, postfix string) string {
vtmp := util.get_vtmp_folder()
mut uniq := ''
if !b.pref.reuse_tmpc {
uniq = '.$rand.u64()'
}
fname := os.file_name(os.real_path(base_file_name)) + '$uniq$postfix'
return os.real_path(os.join_path(vtmp, fname))
}

View File

@ -305,7 +305,7 @@ pub fn (mut c Checker) assign_stmt(mut node ast.AssignStmt) {
} }
} }
} }
left_type_unwrapped := c.unwrap_generic(left_type) left_type_unwrapped := c.unwrap_generic(ast.mktyp(left_type))
right_type_unwrapped := c.unwrap_generic(right_type) right_type_unwrapped := c.unwrap_generic(right_type)
if right_type_unwrapped == 0 { if right_type_unwrapped == 0 {
// right type was a generic `T` // right type was a generic `T`

View File

@ -232,10 +232,6 @@ pub fn (mut c Checker) check_expected_call_arg(got ast.Type, expected_ ast.Type,
return return
} }
} }
got_typ_sym := c.table.sym(got)
got_typ_str := c.table.type_to_str(got.clear_flag(.variadic))
expected_typ_sym := c.table.sym(expected_)
expected_typ_str := c.table.type_to_str(expected.clear_flag(.variadic))
if c.check_types(got, expected) { if c.check_types(got, expected) {
if language != .v || expected.is_ptr() == got.is_ptr() || arg.is_mut if language != .v || expected.is_ptr() == got.is_ptr() || arg.is_mut
@ -244,6 +240,9 @@ pub fn (mut c Checker) check_expected_call_arg(got ast.Type, expected_ ast.Type,
return return
} }
} else { } else {
got_typ_sym := c.table.sym(got)
expected_typ_sym := c.table.sym(expected_)
// Check on Generics types, there are some case where we have the following case // Check on Generics types, there are some case where we have the following case
// `&Type<int> == &Type<>`. This is a common case we are implementing a function // `&Type<int> == &Type<>`. This is a common case we are implementing a function
// with generic parameters like `compare(bst Bst<T> node) {}` // with generic parameters like `compare(bst Bst<T> node) {}`
@ -251,6 +250,7 @@ pub fn (mut c Checker) check_expected_call_arg(got ast.Type, expected_ ast.Type,
// Check if we are making a comparison between two different types of // Check if we are making a comparison between two different types of
// the same type like `Type<int> and &Type<>` // the same type like `Type<int> and &Type<>`
if (got.is_ptr() != expected.is_ptr()) || !c.check_same_module(got, expected) { if (got.is_ptr() != expected.is_ptr()) || !c.check_same_module(got, expected) {
got_typ_str, expected_typ_str := c.get_string_names_of(got, expected)
return error('cannot use `$got_typ_str` as `$expected_typ_str`') return error('cannot use `$got_typ_str` as `$expected_typ_str`')
} }
return return
@ -258,14 +258,22 @@ pub fn (mut c Checker) check_expected_call_arg(got ast.Type, expected_ ast.Type,
if got == ast.void_type { if got == ast.void_type {
return error('`$arg.expr` (no value) used as value') return error('`$arg.expr` (no value) used as value')
} }
got_typ_str, expected_typ_str := c.get_string_names_of(got, expected)
return error('cannot use `$got_typ_str` as `$expected_typ_str`') return error('cannot use `$got_typ_str` as `$expected_typ_str`')
} }
if got != ast.void_type { if got != ast.void_type {
got_typ_str, expected_typ_str := c.get_string_names_of(got, expected)
return error('cannot use `$got_typ_str` as `$expected_typ_str`') return error('cannot use `$got_typ_str` as `$expected_typ_str`')
} }
} }
fn (c Checker) get_string_names_of(got ast.Type, expected ast.Type) (string, string) {
got_typ_str := c.table.type_to_str(got.clear_flag(.variadic))
expected_typ_str := c.table.type_to_str(expected.clear_flag(.variadic))
return got_typ_str, expected_typ_str
}
// helper method to check if the type is of the same module. // helper method to check if the type is of the same module.
// FIXME(vincenzopalazzo) This is a work around to the issue // FIXME(vincenzopalazzo) This is a work around to the issue
// explained in the https://github.com/vlang/v/pull/13718#issuecomment-1074517800 // explained in the https://github.com/vlang/v/pull/13718#issuecomment-1074517800
@ -614,7 +622,7 @@ pub fn (mut c Checker) infer_fn_generic_types(func ast.Fn, mut node ast.CallExpr
sym := c.table.sym(node.receiver_type) sym := c.table.sym(node.receiver_type)
match sym.info { match sym.info {
ast.Struct, ast.Interface, ast.SumType { ast.Struct, ast.Interface, ast.SumType {
if c.table.cur_fn.generic_names.len > 0 { // in generic fn if !isnil(c.table.cur_fn) && c.table.cur_fn.generic_names.len > 0 { // in generic fn
if gt_name in c.table.cur_fn.generic_names if gt_name in c.table.cur_fn.generic_names
&& c.table.cur_fn.generic_names.len == c.table.cur_concrete_types.len { && c.table.cur_fn.generic_names.len == c.table.cur_concrete_types.len {
idx := c.table.cur_fn.generic_names.index(gt_name) idx := c.table.cur_fn.generic_names.index(gt_name)
@ -671,6 +679,7 @@ pub fn (mut c Checker) infer_fn_generic_types(func ast.Fn, mut node ast.CallExpr
mut param_elem_sym := c.table.sym(param_elem_info.elem_type) mut param_elem_sym := c.table.sym(param_elem_info.elem_type)
for { for {
if arg_elem_sym.kind == .array && param_elem_sym.kind == .array if arg_elem_sym.kind == .array && param_elem_sym.kind == .array
&& !isnil(c.table.cur_fn)
&& param_elem_sym.name !in c.table.cur_fn.generic_names { && param_elem_sym.name !in c.table.cur_fn.generic_names {
arg_elem_info = arg_elem_sym.info as ast.Array arg_elem_info = arg_elem_sym.info as ast.Array
arg_elem_sym = c.table.sym(arg_elem_info.elem_type) arg_elem_sym = c.table.sym(arg_elem_info.elem_type)
@ -690,6 +699,7 @@ pub fn (mut c Checker) infer_fn_generic_types(func ast.Fn, mut node ast.CallExpr
mut param_elem_sym := c.table.sym(param_elem_info.elem_type) mut param_elem_sym := c.table.sym(param_elem_info.elem_type)
for { for {
if arg_elem_sym.kind == .array_fixed && param_elem_sym.kind == .array_fixed if arg_elem_sym.kind == .array_fixed && param_elem_sym.kind == .array_fixed
&& !isnil(c.table.cur_fn)
&& param_elem_sym.name !in c.table.cur_fn.generic_names { && param_elem_sym.name !in c.table.cur_fn.generic_names {
arg_elem_info = arg_elem_sym.info as ast.ArrayFixed arg_elem_info = arg_elem_sym.info as ast.ArrayFixed
arg_elem_sym = c.table.sym(arg_elem_info.elem_type) arg_elem_sym = c.table.sym(arg_elem_info.elem_type)

View File

@ -13,16 +13,14 @@ import v.util.version
import v.errors import v.errors
import v.pkgconfig import v.pkgconfig
const int_min = int(0x80000000) const (
int_min = int(0x80000000)
const int_max = int(0x7FFFFFFF) int_max = int(0x7FFFFFFF)
// prevent stack overflows by restricting too deep recursion:
// prevent stack overflows by restricting too deep recursion: expr_level_cutoff_limit = 40
const expr_level_cutoff_limit = 40 stmt_level_cutoff_limit = 40
iface_level_cutoff_limit = 100
const stmt_level_cutoff_limit = 40 )
const iface_level_cutoff_limit = 100
pub const ( pub const (
valid_comptime_if_os = ['windows', 'ios', 'macos', 'mach', 'darwin', 'hpux', 'gnu', valid_comptime_if_os = ['windows', 'ios', 'macos', 'mach', 'darwin', 'hpux', 'gnu',
@ -58,28 +56,27 @@ fn all_valid_comptime_idents() []string {
pub struct Checker { pub struct Checker {
pref &pref.Preferences // Preferences shared from V struct pref &pref.Preferences // Preferences shared from V struct
pub mut: pub mut:
table &ast.Table table &ast.Table
file &ast.File = 0 file &ast.File = 0
nr_errors int nr_errors int
nr_warnings int nr_warnings int
nr_notices int nr_notices int
errors []errors.Error errors []errors.Error
warnings []errors.Warning warnings []errors.Warning
notices []errors.Notice notices []errors.Notice
error_lines []int // to avoid printing multiple errors for the same line error_lines []int // to avoid printing multiple errors for the same line
expected_type ast.Type expected_type ast.Type
expected_or_type ast.Type // fn() or { 'this type' } eg. string. expected or block type expected_or_type ast.Type // fn() or { 'this type' } eg. string. expected or block type
expected_expr_type ast.Type // if/match is_expr: expected_type expected_expr_type ast.Type // if/match is_expr: expected_type
mod string // current module name mod string // current module name
const_decl string const_var &ast.ConstField = voidptr(0) // the current constant, when checking const declarations
const_deps []string const_deps []string
const_names []string const_names []string
global_names []string global_names []string
locked_names []string // vars that are currently locked locked_names []string // vars that are currently locked
rlocked_names []string // vars that are currently read-locked rlocked_names []string // vars that are currently read-locked
in_for_count int // if checker is currently in a for loop in_for_count int // if checker is currently in a for loop
// checked_ident string // to avoid infinite checker loops should_abort bool // when too many errors/warnings/notices are accumulated, .should_abort becomes true. It is checked in statement/expression loops, so the checker can return early, instead of wasting time.
should_abort bool // when too many errors/warnings/notices are accumulated, .should_abort becomes true. It is checked in statement/expression loops, so the checker can return early, instead of wasting time.
returns bool returns bool
scope_returns bool scope_returns bool
is_builtin_mod bool // true inside the 'builtin', 'os' or 'strconv' modules; TODO: remove the need for special casing this is_builtin_mod bool // true inside the 'builtin', 'os' or 'strconv' modules; TODO: remove the need for special casing this
@ -145,7 +142,7 @@ pub fn new_checker(table &ast.Table, pref &pref.Preferences) &Checker {
fn (mut c Checker) reset_checker_state_at_start_of_new_file() { fn (mut c Checker) reset_checker_state_at_start_of_new_file() {
c.expected_type = ast.void_type c.expected_type = ast.void_type
c.expected_or_type = ast.void_type c.expected_or_type = ast.void_type
c.const_decl = '' c.const_var = voidptr(0)
c.in_for_count = 0 c.in_for_count = 0
c.returns = false c.returns = false
c.scope_returns = false c.scope_returns = false
@ -203,7 +200,7 @@ pub fn (mut c Checker) check(ast_file_ &ast.File) {
return return
} }
} }
//
c.stmt_level = 0 c.stmt_level = 0
for mut stmt in ast_file.stmts { for mut stmt in ast_file.stmts {
if stmt is ast.GlobalDecl { if stmt is ast.GlobalDecl {
@ -214,7 +211,7 @@ pub fn (mut c Checker) check(ast_file_ &ast.File) {
return return
} }
} }
//
c.stmt_level = 0 c.stmt_level = 0
for mut stmt in ast_file.stmts { for mut stmt in ast_file.stmts {
if stmt !is ast.ConstDecl && stmt !is ast.GlobalDecl && stmt !is ast.ExprStmt { if stmt !is ast.ConstDecl && stmt !is ast.GlobalDecl && stmt !is ast.ExprStmt {
@ -225,7 +222,7 @@ pub fn (mut c Checker) check(ast_file_ &ast.File) {
return return
} }
} }
//
c.check_scope_vars(c.file.scope) c.check_scope_vars(c.file.scope)
} }
@ -745,7 +742,7 @@ fn (mut c Checker) fail_if_immutable(expr_ ast.Expr) (string, token.Pos) {
return '', pos return '', pos
} }
else { else {
if !expr.is_lit() { if !expr.is_pure_literal() {
c.error('unexpected expression `$expr.type_name()`', expr.pos()) c.error('unexpected expression `$expr.type_name()`', expr.pos())
return '', pos return '', pos
} }
@ -807,7 +804,6 @@ fn (mut c Checker) type_implements(typ ast.Type, interface_type ast.Type, pos to
} }
} }
} }
styp := c.table.type_to_str(utyp)
if utyp.idx() == interface_type.idx() { if utyp.idx() == interface_type.idx() {
// same type -> already casted to the interface // same type -> already casted to the interface
return true return true
@ -816,6 +812,7 @@ fn (mut c Checker) type_implements(typ ast.Type, interface_type ast.Type, pos to
// `none` "implements" the Error interface // `none` "implements" the Error interface
return true return true
} }
styp := c.table.type_to_str(utyp)
if typ_sym.kind == .interface_ && inter_sym.kind == .interface_ && !styp.starts_with('JS.') if typ_sym.kind == .interface_ && inter_sym.kind == .interface_ && !styp.starts_with('JS.')
&& !inter_sym.name.starts_with('JS.') { && !inter_sym.name.starts_with('JS.') {
c.error('cannot implement interface `$inter_sym.name` with a different interface `$styp`', c.error('cannot implement interface `$inter_sym.name` with a different interface `$styp`',
@ -934,8 +931,8 @@ pub fn (mut c Checker) check_expr_opt_call(expr ast.Expr, ret_type ast.Type) ast
pub fn (mut c Checker) check_or_expr(node ast.OrExpr, ret_type ast.Type, expr_return_type ast.Type) { pub fn (mut c Checker) check_or_expr(node ast.OrExpr, ret_type ast.Type, expr_return_type ast.Type) {
if node.kind == .propagate_option { if node.kind == .propagate_option {
if !c.table.cur_fn.return_type.has_flag(.optional) && c.table.cur_fn.name != 'main.main' if !isnil(c.table.cur_fn) && !c.table.cur_fn.return_type.has_flag(.optional)
&& !c.inside_const { && c.table.cur_fn.name != 'main.main' && !c.inside_const {
c.error('to propagate the call, `$c.table.cur_fn.name` must return an optional type', c.error('to propagate the call, `$c.table.cur_fn.name` must return an optional type',
node.pos) node.pos)
} }
@ -951,8 +948,8 @@ pub fn (mut c Checker) check_or_expr(node ast.OrExpr, ret_type ast.Type, expr_re
return return
} }
if node.kind == .propagate_result { if node.kind == .propagate_result {
if !c.table.cur_fn.return_type.has_flag(.result) && c.table.cur_fn.name != 'main.main' if !isnil(c.table.cur_fn) && !c.table.cur_fn.return_type.has_flag(.result)
&& !c.inside_const { && c.table.cur_fn.name != 'main.main' && !c.inside_const {
c.error('to propagate the call, `$c.table.cur_fn.name` must return an result type', c.error('to propagate the call, `$c.table.cur_fn.name` must return an result type',
node.pos) node.pos)
} }
@ -989,7 +986,6 @@ fn (mut c Checker) check_or_last_stmt(stmt ast.Stmt, ret_type ast.Type, expr_ret
if type_fits || is_noreturn { if type_fits || is_noreturn {
return return
} }
expected_type_name := c.table.type_to_str(ret_type.clear_flag(.optional))
if stmt.typ == ast.void_type { if stmt.typ == ast.void_type {
if stmt.expr is ast.IfExpr { if stmt.expr is ast.IfExpr {
for branch in stmt.expr.branches { for branch in stmt.expr.branches {
@ -1002,10 +998,12 @@ fn (mut c Checker) check_or_last_stmt(stmt ast.Stmt, ret_type ast.Type, expr_ret
} }
return return
} }
expected_type_name := c.table.type_to_str(ret_type.clear_flag(.optional))
c.error('`or` block must provide a default value of type `$expected_type_name`, or return/continue/break or call a [noreturn] function like panic(err) or exit(1)', c.error('`or` block must provide a default value of type `$expected_type_name`, or return/continue/break or call a [noreturn] function like panic(err) or exit(1)',
stmt.expr.pos()) stmt.expr.pos())
} else { } else {
type_name := c.table.type_to_str(last_stmt_typ) type_name := c.table.type_to_str(last_stmt_typ)
expected_type_name := c.table.type_to_str(ret_type.clear_flag(.optional))
c.error('wrong return type `$type_name` in the `or {}` block, expected `$expected_type_name`', c.error('wrong return type `$type_name` in the `or {}` block, expected `$expected_type_name`',
stmt.expr.pos()) stmt.expr.pos())
} }
@ -1071,7 +1069,8 @@ pub fn (mut c Checker) selector_expr(mut node ast.SelectorExpr) ast.Type {
match mut node.expr { match mut node.expr {
ast.Ident { ast.Ident {
name := node.expr.name name := node.expr.name
valid_generic := util.is_generic_type_name(name) && name in c.table.cur_fn.generic_names valid_generic := util.is_generic_type_name(name) && !isnil(c.table.cur_fn)
&& name in c.table.cur_fn.generic_names
if valid_generic { if valid_generic {
name_type = ast.Type(c.table.find_type_idx(name)).set_flag(.generic) name_type = ast.Type(c.table.find_type_idx(name)).set_flag(.generic)
} }
@ -1220,11 +1219,23 @@ pub fn (mut c Checker) selector_expr(mut node ast.SelectorExpr) ast.Type {
// <<< // <<<
if has_field { if has_field {
if sym.mod != c.mod && !field.is_pub && sym.language != .c { is_used_outside := sym.mod != c.mod
if is_used_outside && !field.is_pub && sym.language != .c {
unwrapped_sym := c.table.sym(c.unwrap_generic(typ)) unwrapped_sym := c.table.sym(c.unwrap_generic(typ))
c.error('field `${unwrapped_sym.name}.$field_name` is not public', node.pos) c.error('field `${unwrapped_sym.name}.$field_name` is not public', node.pos)
} }
field_sym := c.table.sym(field.typ) field_sym := c.table.sym(field.typ)
if field.is_deprecated && is_used_outside {
now := time.now()
mut after_time := now
if field.deprecated_after != '' {
after_time = time.parse_iso8601(field.deprecated_after) or {
c.error('invalid time format', field.pos)
now
}
}
c.deprecate('field', field_name, field.deprecation_msg, now, after_time, node.pos)
}
if field_sym.kind in [.sum_type, .interface_] { if field_sym.kind in [.sum_type, .interface_] {
if !prevent_sum_type_unwrapping_once { if !prevent_sum_type_unwrapping_once {
if scope_field := node.scope.find_struct_field(node.expr.str(), typ, field_name) { if scope_field := node.scope.find_struct_field(node.expr.str(), typ, field_name) {
@ -1316,8 +1327,9 @@ pub fn (mut c Checker) const_decl(mut node ast.ConstDecl) {
c.const_names << field.name c.const_names << field.name
} }
for i, mut field in node.fields { for i, mut field in node.fields {
c.const_decl = field.name
c.const_deps << field.name c.const_deps << field.name
prev_const_var := c.const_var
c.const_var = unsafe { field }
mut typ := c.check_expr_opt_call(field.expr, c.expr(field.expr)) mut typ := c.check_expr_opt_call(field.expr, c.expr(field.expr))
if ct_value := c.eval_comptime_const_expr(field.expr, 0) { if ct_value := c.eval_comptime_const_expr(field.expr, 0) {
field.comptime_expr_value = ct_value field.comptime_expr_value = ct_value
@ -1327,6 +1339,7 @@ pub fn (mut c Checker) const_decl(mut node ast.ConstDecl) {
} }
node.fields[i].typ = ast.mktyp(typ) node.fields[i].typ = ast.mktyp(typ)
c.const_deps = [] c.const_deps = []
c.const_var = prev_const_var
} }
} }
@ -1452,7 +1465,7 @@ fn (mut c Checker) stmt(node_ ast.Stmt) {
c.inside_const = false c.inside_const = false
} }
ast.DeferStmt { ast.DeferStmt {
if node.idx_in_fn < 0 { if node.idx_in_fn < 0 && !isnil(c.table.cur_fn) {
node.idx_in_fn = c.table.cur_fn.defer_stmts.len node.idx_in_fn = c.table.cur_fn.defer_stmts.len
c.table.cur_fn.defer_stmts << unsafe { &node } c.table.cur_fn.defer_stmts << unsafe { &node }
} }
@ -1541,7 +1554,7 @@ fn (mut c Checker) stmt(node_ ast.Stmt) {
c.warn('`goto` requires `unsafe` (consider using labelled break/continue)', c.warn('`goto` requires `unsafe` (consider using labelled break/continue)',
node.pos) node.pos)
} }
if node.name !in c.table.cur_fn.label_names { if !isnil(c.table.cur_fn) && node.name !in c.table.cur_fn.label_names {
c.error('unknown label `$node.name`', node.pos) c.error('unknown label `$node.name`', node.pos)
} }
// TODO: check label doesn't bypass variable declarations // TODO: check label doesn't bypass variable declarations
@ -1879,7 +1892,6 @@ fn (mut c Checker) hash_stmt(mut node ast.HashStmt) {
} }
} }
} }
// println('adding flag "$flag"')
c.table.parse_cflag(flag, c.mod, c.pref.compile_defines_all) or { c.table.parse_cflag(flag, c.mod, c.pref.compile_defines_all) or {
c.error(err.msg(), node.pos) c.error(err.msg(), node.pos)
} }
@ -1981,7 +1993,7 @@ fn (mut c Checker) stmts_ending_with_expression(stmts []ast.Stmt) {
} }
pub fn (mut c Checker) unwrap_generic(typ ast.Type) ast.Type { pub fn (mut c Checker) unwrap_generic(typ ast.Type) ast.Type {
if typ.has_flag(.generic) { if typ.has_flag(.generic) && !isnil(c.table.cur_fn) {
if t_typ := c.table.resolve_generic_to_concrete(typ, c.table.cur_fn.generic_names, if t_typ := c.table.resolve_generic_to_concrete(typ, c.table.cur_fn.generic_names,
c.table.cur_concrete_types) c.table.cur_concrete_types)
{ {
@ -2094,10 +2106,7 @@ pub fn (mut c Checker) expr(node_ ast.Expr) ast.Type {
return c.chan_init(mut node) return c.chan_init(mut node)
} }
ast.CharLiteral { ast.CharLiteral {
// return int_literal, not rune, so that we can do "bytes << `A`" without a cast etc
// return ast.int_literal_type
return ast.rune_type return ast.rune_type
// return ast.byte_type
} }
ast.Comment { ast.Comment {
return ast.void_type return ast.void_type
@ -2142,10 +2151,7 @@ pub fn (mut c Checker) expr(node_ ast.Expr) ast.Type {
return c.go_expr(mut node) return c.go_expr(mut node)
} }
ast.Ident { ast.Ident {
// c.checked_ident = node.name return c.ident(mut node)
res := c.ident(mut node)
// c.checked_ident = ''
return res
} }
ast.IfExpr { ast.IfExpr {
return c.if_expr(mut node) return c.if_expr(mut node)
@ -2531,9 +2537,15 @@ pub fn (mut c Checker) cast_expr(mut node ast.CastExpr) ast.Type {
fn (mut c Checker) at_expr(mut node ast.AtExpr) ast.Type { fn (mut c Checker) at_expr(mut node ast.AtExpr) ast.Type {
match node.kind { match node.kind {
.fn_name { .fn_name {
if isnil(c.table.cur_fn) {
return ast.void_type
}
node.val = c.table.cur_fn.name.all_after_last('.') node.val = c.table.cur_fn.name.all_after_last('.')
} }
.method_name { .method_name {
if isnil(c.table.cur_fn) {
return ast.void_type
}
fname := c.table.cur_fn.name.all_after_last('.') fname := c.table.cur_fn.name.all_after_last('.')
if c.table.cur_fn.is_method { if c.table.cur_fn.is_method {
node.val = c.table.type_to_str(c.table.cur_fn.receiver.typ).all_after_last('.') + node.val = c.table.type_to_str(c.table.cur_fn.receiver.typ).all_after_last('.') +
@ -2543,6 +2555,9 @@ fn (mut c Checker) at_expr(mut node ast.AtExpr) ast.Type {
} }
} }
.mod_name { .mod_name {
if isnil(c.table.cur_fn) {
return ast.void_type
}
node.val = c.table.cur_fn.mod node.val = c.table.cur_fn.mod
} }
.struct_name { .struct_name {
@ -2607,7 +2622,23 @@ pub fn (mut c Checker) ident(mut node ast.Ident) ast.Type {
if !name.contains('.') && node.mod != 'builtin' { if !name.contains('.') && node.mod != 'builtin' {
name = '${node.mod}.$node.name' name = '${node.mod}.$node.name'
} }
if name == c.const_decl { // detect cycles, while allowing for references to the same constant,
// used inside its initialisation like: `struct Abc { x &Abc } ... const a = [ Abc{0}, Abc{unsafe{&a[0]}} ]!`
// see vlib/v/tests/const_fixed_array_containing_references_to_itself_test.v
if unsafe { c.const_var != 0 } && name == c.const_var.name {
if mut c.const_var.expr is ast.ArrayInit {
if c.const_var.expr.is_fixed && c.expected_type.nr_muls() > 0 {
elem_typ := c.expected_type.deref()
node.kind = .constant
node.name = c.const_var.name
node.info = ast.IdentVar{
typ: elem_typ
}
// c.const_var.typ = elem_typ
node.obj = c.const_var
return c.expected_type
}
}
c.error('cycle in constant `$c.const_decl`', node.pos) c.error('cycle in constant `$c.const_decl`', node.pos)
return ast.void_type return ast.void_type
} }
@ -2693,13 +2724,6 @@ pub fn (mut c Checker) ident(mut node ast.Ident) ast.Type {
typ: typ typ: typ
is_optional: is_optional is_optional: is_optional
} }
// if typ == ast.t_type {
// sym := c.table.sym(c.cur_generic_type)
// println('IDENT T unresolved $node.name typ=$sym.name')
// Got a var with type T, return current generic type
// typ = c.cur_generic_type
// }
// } else {
if !is_sum_type_cast { if !is_sum_type_cast {
obj.typ = typ obj.typ = typ
} }
@ -3052,7 +3076,7 @@ fn (mut c Checker) find_obj_definition(obj ast.ScopeObject) ?ast.Expr {
if mut expr is ast.Ident { if mut expr is ast.Ident {
return c.find_definition(expr) return c.find_definition(expr)
} }
if !expr.is_lit() { if !expr.is_pure_literal() {
return error('definition of `$name` is unknown at compile time') return error('definition of `$name` is unknown at compile time')
} }
return expr return expr
@ -3266,9 +3290,6 @@ pub fn (mut c Checker) prefix_expr(mut node ast.PrefixExpr) ast.Type {
fn (mut c Checker) check_index(typ_sym &ast.TypeSymbol, index ast.Expr, index_type ast.Type, pos token.Pos, range_index bool, is_gated bool) { fn (mut c Checker) check_index(typ_sym &ast.TypeSymbol, index ast.Expr, index_type ast.Type, pos token.Pos, range_index bool, is_gated bool) {
index_type_sym := c.table.sym(index_type) index_type_sym := c.table.sym(index_type)
// println('index expr left=$typ_sym.name $node.pos.line_nr')
// if typ_sym.kind == .array && (!(ast.type_idx(index_type) in ast.number_type_idxs) &&
// index_type_sym.kind != .enum_) {
if typ_sym.kind in [.array, .array_fixed, .string] { if typ_sym.kind in [.array, .array_fixed, .string] {
if !(index_type.is_int() || index_type_sym.kind == .enum_ if !(index_type.is_int() || index_type_sym.kind == .enum_
|| (index_type_sym.kind == .alias || (index_type_sym.kind == .alias
@ -3401,6 +3422,9 @@ pub fn (mut c Checker) index_expr(mut node ast.IndexExpr) ast.Type {
typ = value_type typ = value_type
} }
} }
if node.or_expr.stmts.len > 0 && node.or_expr.stmts.last() is ast.ExprStmt {
c.expected_or_type = typ
}
c.stmts_ending_with_expression(node.or_expr.stmts) c.stmts_ending_with_expression(node.or_expr.stmts)
c.check_expr_opt_call(node, typ) c.check_expr_opt_call(node, typ)
return typ return typ

View File

@ -52,7 +52,8 @@ pub fn (mut c Checker) array_init(mut node ast.ArrayInit) ast.Type {
c.ensure_sumtype_array_has_default_value(node) c.ensure_sumtype_array_has_default_value(node)
} }
c.ensure_type_exists(node.elem_type, node.elem_type_pos) or {} c.ensure_type_exists(node.elem_type, node.elem_type_pos) or {}
if node.typ.has_flag(.generic) && c.table.cur_fn.generic_names.len == 0 { if node.typ.has_flag(.generic) && !isnil(c.table.cur_fn)
&& c.table.cur_fn.generic_names.len == 0 {
c.error('generic struct cannot use in non-generic function', node.pos) c.error('generic struct cannot use in non-generic function', node.pos)
} }
return node.typ return node.typ

View File

@ -213,8 +213,8 @@ fn (mut c Checker) fn_decl(mut node ast.FnDecl) {
} }
} }
} }
if (c.pref.translated || c.file.is_translated) && node.is_variadic //&& node.params.len == 1 && param.typ.is_ptr() {
&& node.params.len == 1 && param.typ.is_ptr() { if (c.pref.translated || c.file.is_translated) && node.is_variadic && param.typ.is_ptr() {
// TODO c2v hack to fix `(const char *s, ...)` // TODO c2v hack to fix `(const char *s, ...)`
param.typ = ast.int_type.ref() param.typ = ast.int_type.ref()
} }
@ -421,8 +421,8 @@ pub fn (mut c Checker) call_expr(mut node ast.CallExpr) ast.Type {
c.expected_or_type = node.return_type.clear_flag(.optional) c.expected_or_type = node.return_type.clear_flag(.optional)
c.stmts_ending_with_expression(node.or_block.stmts) c.stmts_ending_with_expression(node.or_block.stmts)
c.expected_or_type = ast.void_type c.expected_or_type = ast.void_type
if node.or_block.kind == .propagate_option && !c.table.cur_fn.return_type.has_flag(.optional) if node.or_block.kind == .propagate_option && !isnil(c.table.cur_fn)
&& !c.inside_const { && !c.table.cur_fn.return_type.has_flag(.optional) && !c.inside_const {
if !c.table.cur_fn.is_main { if !c.table.cur_fn.is_main {
c.error('to propagate the optional call, `$c.table.cur_fn.name` must return an optional', c.error('to propagate the optional call, `$c.table.cur_fn.name` must return an optional',
node.or_block.pos) node.or_block.pos)
@ -482,7 +482,9 @@ pub fn (mut c Checker) fn_call(mut node ast.CallExpr, mut continue_check &bool)
c.error('JS.await: first argument must be a promise, got `$tsym.name`', node.pos) c.error('JS.await: first argument must be a promise, got `$tsym.name`', node.pos)
return ast.void_type return ast.void_type
} }
c.table.cur_fn.has_await = true if !isnil(c.table.cur_fn) {
c.table.cur_fn.has_await = true
}
match tsym.info { match tsym.info {
ast.Struct { ast.Struct {
mut ret_type := tsym.info.concrete_types[0] mut ret_type := tsym.info.concrete_types[0]
@ -895,6 +897,11 @@ pub fn (mut c Checker) fn_call(mut node ast.CallExpr, mut continue_check &bool)
} }
continue continue
} }
if param.typ.is_ptr() && !param.is_mut && !call_arg.typ.is_real_pointer()
&& call_arg.expr.is_literal() && func.language == .v {
c.error('literal argument cannot be passed as reference parameter `${c.table.type_to_str(param.typ)}`',
call_arg.pos)
}
c.check_expected_call_arg(arg_typ, c.unwrap_generic(param.typ), node.language, c.check_expected_call_arg(arg_typ, c.unwrap_generic(param.typ), node.language,
call_arg) or { call_arg) or {
if param.typ.has_flag(.generic) { if param.typ.has_flag(.generic) {
@ -1026,14 +1033,15 @@ pub fn (mut c Checker) fn_call(mut node ast.CallExpr, mut continue_check &bool)
} }
} }
// resolve return generics struct to concrete type // resolve return generics struct to concrete type
if func.generic_names.len > 0 && func.return_type.has_flag(.generic) if func.generic_names.len > 0 && func.return_type.has_flag(.generic) && !isnil(c.table.cur_fn)
&& c.table.cur_fn.generic_names.len == 0 { && c.table.cur_fn.generic_names.len == 0 {
node.return_type = c.table.unwrap_generic_type(func.return_type, func.generic_names, node.return_type = c.table.unwrap_generic_type(func.return_type, func.generic_names,
concrete_types) concrete_types)
} else { } else {
node.return_type = func.return_type node.return_type = func.return_type
} }
if node.concrete_types.len > 0 && func.return_type != 0 && c.table.cur_fn.generic_names.len == 0 { if node.concrete_types.len > 0 && func.return_type != 0 && !isnil(c.table.cur_fn)
&& c.table.cur_fn.generic_names.len == 0 {
if typ := c.table.resolve_generic_to_concrete(func.return_type, func.generic_names, if typ := c.table.resolve_generic_to_concrete(func.return_type, func.generic_names,
concrete_types) concrete_types)
{ {
@ -1075,7 +1083,7 @@ pub fn (mut c Checker) method_call(mut node ast.CallExpr) ast.Type {
node.return_type = left_type node.return_type = left_type
node.receiver_type = left_type node.receiver_type = left_type
if c.table.cur_fn.generic_names.len > 0 { if !isnil(c.table.cur_fn) && c.table.cur_fn.generic_names.len > 0 {
c.table.unwrap_generic_type(left_type, c.table.cur_fn.generic_names, c.table.cur_concrete_types) c.table.unwrap_generic_type(left_type, c.table.cur_fn.generic_names, c.table.cur_concrete_types)
} }
unwrapped_left_type := c.unwrap_generic(left_type) unwrapped_left_type := c.unwrap_generic(left_type)
@ -1155,7 +1163,9 @@ pub fn (mut c Checker) method_call(mut node ast.CallExpr) ast.Type {
if node.args.len > 0 { if node.args.len > 0 {
c.error('wait() does not have any arguments', node.args[0].pos) c.error('wait() does not have any arguments', node.args[0].pos)
} }
c.table.cur_fn.has_await = true if !isnil(c.table.cur_fn) {
c.table.cur_fn.has_await = true
}
node.return_type = info.concrete_types[0] node.return_type = info.concrete_types[0]
node.return_type.set_flag(.optional) node.return_type.set_flag(.optional)
return node.return_type return node.return_type
@ -1428,6 +1438,10 @@ pub fn (mut c Checker) method_call(mut node ast.CallExpr) ast.Type {
} }
continue continue
} }
if param.typ.is_ptr() && !arg.typ.is_real_pointer() && arg.expr.is_literal() {
c.error('literal argument cannot be passed as reference parameter `${c.table.type_to_str(param.typ)}`',
arg.pos)
}
c.check_expected_call_arg(got_arg_typ, exp_arg_typ, node.language, arg) or { c.check_expected_call_arg(got_arg_typ, exp_arg_typ, node.language, arg) or {
// str method, allow type with str method if fn arg is string // str method, allow type with str method if fn arg is string
// Passing an int or a string array produces a c error here // Passing an int or a string array produces a c error here
@ -1454,7 +1468,7 @@ pub fn (mut c Checker) method_call(mut node ast.CallExpr) ast.Type {
c.warn('method `${left_sym.name}.$method_name` must be called from an `unsafe` block', c.warn('method `${left_sym.name}.$method_name` must be called from an `unsafe` block',
node.pos) node.pos)
} }
if !c.table.cur_fn.is_deprecated && method.is_deprecated { if !isnil(c.table.cur_fn) && !c.table.cur_fn.is_deprecated && method.is_deprecated {
c.deprecate_fnmethod('method', '${left_sym.name}.$method.name', method, node) c.deprecate_fnmethod('method', '${left_sym.name}.$method.name', method, node)
} }
c.set_node_expected_arg_types(mut node, method) c.set_node_expected_arg_types(mut node, method)
@ -1478,13 +1492,13 @@ pub fn (mut c Checker) method_call(mut node ast.CallExpr) ast.Type {
} }
// resolve return generics struct to concrete type // resolve return generics struct to concrete type
if method.generic_names.len > 0 && method.return_type.has_flag(.generic) if method.generic_names.len > 0 && method.return_type.has_flag(.generic)
&& c.table.cur_fn.generic_names.len == 0 { && !isnil(c.table.cur_fn) && c.table.cur_fn.generic_names.len == 0 {
node.return_type = c.table.unwrap_generic_type(method.return_type, method.generic_names, node.return_type = c.table.unwrap_generic_type(method.return_type, method.generic_names,
concrete_types) concrete_types)
} else { } else {
node.return_type = method.return_type node.return_type = method.return_type
} }
if node.concrete_types.len > 0 && method.return_type != 0 if node.concrete_types.len > 0 && method.return_type != 0 && !isnil(c.table.cur_fn)
&& c.table.cur_fn.generic_names.len == 0 { && c.table.cur_fn.generic_names.len == 0 {
if typ := c.table.resolve_generic_to_concrete(method.return_type, method.generic_names, if typ := c.table.resolve_generic_to_concrete(method.return_type, method.generic_names,
concrete_types) concrete_types)
@ -1615,7 +1629,7 @@ fn (mut c Checker) deprecate_fnmethod(kind string, name string, the_fn ast.Fn, n
if attr.name == 'deprecated_after' && attr.arg != '' { if attr.name == 'deprecated_after' && attr.arg != '' {
after_time = time.parse_iso8601(attr.arg) or { after_time = time.parse_iso8601(attr.arg) or {
c.error('invalid time format', attr.pos) c.error('invalid time format', attr.pos)
time.now() now
} }
} }
} }

View File

@ -274,11 +274,7 @@ pub fn (mut c Checker) if_expr(mut node ast.IfExpr) ast.Type {
} }
} }
// if only untyped literals were given default to int/f64 // if only untyped literals were given default to int/f64
if node.typ == ast.int_literal_type { node.typ = ast.mktyp(node.typ)
node.typ = ast.int_type
} else if node.typ == ast.float_literal_type {
node.typ = ast.f64_type
}
if expr_required && !node.has_else { if expr_required && !node.has_else {
d := if node.is_comptime { '$' } else { '' } d := if node.is_comptime { '$' } else { '' }
c.error('`$if_kind` expression needs `${d}else` clause', node.pos) c.error('`$if_kind` expression needs `${d}else` clause', node.pos)

View File

@ -7,6 +7,9 @@ import v.pref
// TODO: non deferred // TODO: non deferred
pub fn (mut c Checker) return_stmt(mut node ast.Return) { pub fn (mut c Checker) return_stmt(mut node ast.Return) {
if isnil(c.table.cur_fn) {
return
}
c.expected_type = c.table.cur_fn.return_type c.expected_type = c.table.cur_fn.return_type
mut expected_type := c.unwrap_generic(c.expected_type) mut expected_type := c.unwrap_generic(c.expected_type)
expected_type_sym := c.table.sym(expected_type) expected_type_sym := c.table.sym(expected_type)
@ -88,7 +91,9 @@ pub fn (mut c Checker) return_stmt(mut node ast.Return) {
} }
if expected_types.len > 0 && expected_types.len != got_types.len { if expected_types.len > 0 && expected_types.len != got_types.len {
arg := if expected_types.len == 1 { 'argument' } else { 'arguments' } arg := if expected_types.len == 1 { 'argument' } else { 'arguments' }
c.error('expected $expected_types.len $arg, but got $got_types.len', node.pos) midx := imax(0, imin(expected_types.len, expr_idxs.len - 1))
mismatch_pos := node.exprs[expr_idxs[midx]].pos()
c.error('expected $expected_types.len $arg, but got $got_types.len', mismatch_pos)
return return
} }
for i, exp_type in expected_types { for i, exp_type in expected_types {
@ -327,3 +332,11 @@ fn is_noreturn_callexpr(expr ast.Expr) bool {
} }
return false return false
} }
fn imin(a int, b int) int {
return if a < b { a } else { b }
}
fn imax(a int, b int) int {
return if a < b { b } else { a }
}

View File

@ -97,7 +97,7 @@ pub fn (mut c Checker) string_inter_lit(mut node ast.StringInterLiteral) ast.Typ
node.need_fmts[i] = fmt != c.get_default_fmt(ftyp, typ) node.need_fmts[i] = fmt != c.get_default_fmt(ftyp, typ)
} }
// check recursive str // check recursive str
if c.table.cur_fn.is_method && c.table.cur_fn.name == 'str' if !isnil(c.table.cur_fn) && c.table.cur_fn.is_method && c.table.cur_fn.name == 'str'
&& c.table.cur_fn.receiver.name == expr.str() { && c.table.cur_fn.receiver.name == expr.str() {
c.error('cannot call `str()` method recursively', expr.pos()) c.error('cannot call `str()` method recursively', expr.pos())
} }

View File

@ -219,7 +219,7 @@ pub fn (mut c Checker) struct_init(mut node ast.StructInit) ast.Type {
&& node.generic_types.len != struct_sym.info.generic_types.len { && node.generic_types.len != struct_sym.info.generic_types.len {
c.error('generic struct init expects $struct_sym.info.generic_types.len generic parameter, but got $node.generic_types.len', c.error('generic struct init expects $struct_sym.info.generic_types.len generic parameter, but got $node.generic_types.len',
node.pos) node.pos)
} else if node.generic_types.len > 0 { } else if node.generic_types.len > 0 && !isnil(c.table.cur_fn) {
for gtyp in node.generic_types { for gtyp in node.generic_types {
gtyp_name := c.table.sym(gtyp).name gtyp_name := c.table.sym(gtyp).name
if gtyp_name !in c.table.cur_fn.generic_names { if gtyp_name !in c.table.cur_fn.generic_names {
@ -247,7 +247,7 @@ pub fn (mut c Checker) struct_init(mut node ast.StructInit) ast.Type {
} }
} }
// register generic struct type when current fn is generic fn // register generic struct type when current fn is generic fn
if c.table.cur_fn.generic_names.len > 0 { if !isnil(c.table.cur_fn) && c.table.cur_fn.generic_names.len > 0 {
c.table.unwrap_generic_type(node.typ, c.table.cur_fn.generic_names, c.table.cur_concrete_types) c.table.unwrap_generic_type(node.typ, c.table.cur_fn.generic_names, c.table.cur_concrete_types)
} }
c.ensure_type_exists(node.typ, node.pos) or {} c.ensure_type_exists(node.typ, node.pos) or {}
@ -291,7 +291,7 @@ pub fn (mut c Checker) struct_init(mut node ast.StructInit) ast.Type {
'it cannot be initialized with `$type_sym.name{}`', node.pos) 'it cannot be initialized with `$type_sym.name{}`', node.pos)
} }
} }
if type_sym.name.len == 1 && c.table.cur_fn.generic_names.len == 0 { if type_sym.name.len == 1 && !isnil(c.table.cur_fn) && c.table.cur_fn.generic_names.len == 0 {
c.error('unknown struct `$type_sym.name`', node.pos) c.error('unknown struct `$type_sym.name`', node.pos)
return 0 return 0
} }

View File

@ -0,0 +1,42 @@
vlib/v/checker/tests/field_deprecations.vv:23:9: notice: field `d` will be deprecated after 2999-03-01, and will become an error after 2999-08-28; d use Xyz.a instead
21 | dump(x.c)
22 | x.c = 11
23 | dump(x.d)
| ^
24 | x.d = 45
25 | }
vlib/v/checker/tests/field_deprecations.vv:24:4: notice: field `d` will be deprecated after 2999-03-01, and will become an error after 2999-08-28; d use Xyz.a instead
22 | x.c = 11
23 | dump(x.d)
24 | x.d = 45
| ^
25 | }
26 |
vlib/v/checker/tests/field_deprecations.vv:19:9: warning: field `b` has been deprecated
17 | dump(x.a)
18 | x.a = 123
19 | dump(x.b)
| ^
20 | x.b = 456
21 | dump(x.c)
vlib/v/checker/tests/field_deprecations.vv:20:4: warning: field `b` has been deprecated
18 | x.a = 123
19 | dump(x.b)
20 | x.b = 456
| ^
21 | dump(x.c)
22 | x.c = 11
vlib/v/checker/tests/field_deprecations.vv:21:9: error: field `c` has been deprecated since 2021-03-01; c use Xyz.a instead
19 | dump(x.b)
20 | x.b = 456
21 | dump(x.c)
| ^
22 | x.c = 11
23 | dump(x.d)
vlib/v/checker/tests/field_deprecations.vv:22:4: error: field `c` has been deprecated since 2021-03-01; c use Xyz.a instead
20 | x.b = 456
21 | dump(x.c)
22 | x.c = 11
| ^
23 | dump(x.d)
24 | x.d = 45

View File

@ -0,0 +1,36 @@
import v.checker.tests.module_with_structs_with_deprecated_fields as m
struct Abc {
mut:
x int
d int [deprecated]
z int
}
fn use_m_externally() {
x := m.Xyz{}
dump(x)
}
fn use_m_externally_and_use_deprecated_fields() {
mut x := m.Xyz{}
dump(x.a)
x.a = 123
dump(x.b)
x.b = 456
dump(x.c)
x.c = 11
dump(x.d)
x.d = 45
}
fn main() {
mut a := Abc{}
a.x = 1
a.d = 1
a.z = 1
dump(a)
println(a.d)
x := a.d + 1
dump(x)
}

View File

@ -1,4 +1,4 @@
vlib/v/checker/tests/fn_call_arg_mismatch_err_c.vv:13:18: error: `os.chdir(files) ?` (no value) used as value in argument 1 to `os.ls` vlib/v/checker/tests/fn_call_arg_mismatch_err_c.vv:13:18: error: `os.chdir(files)?` (no value) used as value in argument 1 to `os.ls`
11 | println(files) 11 | println(files)
12 | } else { 12 | } else {
13 | println(os.ls(os.chdir(files)?)?) 13 | println(os.ls(os.chdir(files)?)?)

View File

@ -0,0 +1,13 @@
vlib/v/checker/tests/fn_ref_arg_mismatch_err.vv:15:10: error: literal argument cannot be passed as reference parameter `&T`
13 | fn main() {
14 | foo := Foo<int>{}
15 | foo.foo(12)
| ~~
16 |
17 | bar<int>(12)
vlib/v/checker/tests/fn_ref_arg_mismatch_err.vv:17:11: error: literal argument cannot be passed as reference parameter `&T`
15 | foo.foo(12)
16 |
17 | bar<int>(12)
| ~~
18 | }

View File

@ -0,0 +1,18 @@
module main
struct Foo<T> { }
fn (f &Foo<T>) foo(a &T) {
println(a)
}
fn bar<T>(a &T) {
println(a)
}
fn main() {
foo := Foo<int>{}
foo.foo(12)
bar<int>(12)
}

Some files were not shown because too many files have changed in this diff Show More