builtin: move C. calls to .c.v files (#11164)

pull/11155/head^2
Delyan Angelov 2021-08-12 21:46:38 +03:00 committed by GitHub
parent c9e9556a92
commit 47884dfd1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 288 additions and 206 deletions

View File

@ -0,0 +1 @@
module builtin

View File

@ -71,7 +71,7 @@ fn new_array_from_c_array(len int, cap int, elm_size int, c_array voidptr) array
cap: cap_
}
// TODO Write all memory functions (like memcpy) in V
unsafe { C.memcpy(arr.data, c_array, len * elm_size) }
unsafe { vmemcpy(arr.data, c_array, len * elm_size) }
return arr
}
@ -98,7 +98,7 @@ fn (mut a array) ensure_cap(required int) {
new_size := cap * a.element_size
new_data := vcalloc(new_size)
if a.data != voidptr(0) {
unsafe { C.memcpy(new_data, a.data, a.len * a.element_size) }
unsafe { vmemcpy(new_data, a.data, a.len * a.element_size) }
// TODO: the old data may be leaked when no GC is used (ref-counting?)
}
a.data = new_data
@ -136,9 +136,9 @@ pub fn (a array) repeat_to_depth(count int, depth int) array {
for i in 0 .. count {
if depth > 0 {
ary_clone := unsafe { a.clone_to_depth(depth) }
unsafe { C.memcpy(arr.get_unsafe(i * a.len), &byte(ary_clone.data), a.len * a.element_size) }
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &byte(ary_clone.data), a.len * a.element_size) }
} else {
unsafe { C.memcpy(arr.get_unsafe(i * a.len), &byte(a.data), a.len * a.element_size) }
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &byte(a.data), a.len * a.element_size) }
}
}
}
@ -150,7 +150,7 @@ pub fn (mut a array) sort_with_compare(compare voidptr) {
$if freestanding {
panic('sort does not work with -freestanding')
} $else {
C.qsort(mut a.data, a.len, a.element_size, compare)
unsafe { vqsort(a.data, size_t(a.len), size_t(a.element_size), compare) }
}
}
@ -163,7 +163,7 @@ pub fn (mut a array) insert(i int, val voidptr) {
}
a.ensure_cap(a.len + 1)
unsafe {
C.memmove(a.get_unsafe(i + 1), a.get_unsafe(i), (a.len - i) * a.element_size)
vmemmove(a.get_unsafe(i + 1), a.get_unsafe(i), (a.len - i) * a.element_size)
a.set_unsafe(i, val)
}
a.len++
@ -181,8 +181,8 @@ pub fn (mut a array) insert_many(i int, val voidptr, size int) {
elem_size := a.element_size
unsafe {
iptr := a.get_unsafe(i)
C.memmove(a.get_unsafe(i + size), iptr, (a.len - i) * elem_size)
C.memcpy(iptr, val, size * elem_size)
vmemmove(a.get_unsafe(i + size), iptr, (a.len - i) * elem_size)
vmemcpy(iptr, val, size * elem_size)
}
a.len += size
}
@ -217,9 +217,9 @@ pub fn (mut a array) delete_many(i int, size int) {
new_size := a.len - size
new_cap := if new_size == 0 { 1 } else { new_size }
a.data = vcalloc(new_cap * a.element_size)
unsafe { C.memcpy(a.data, old_data, i * a.element_size) }
unsafe { vmemcpy(a.data, old_data, i * a.element_size) }
unsafe {
C.memcpy(&byte(a.data) + i * a.element_size, &byte(old_data) + (i + size) * a.element_size,
vmemcpy(&byte(a.data) + i * a.element_size, &byte(old_data) + (i + size) * a.element_size,
(a.len - i - size) * a.element_size)
}
a.len = new_size
@ -385,14 +385,14 @@ pub fn (a &array) clone_to_depth(depth int) array {
if depth > 0 && a.element_size == sizeof(array) && a.len >= 0 && a.cap >= a.len {
for i in 0 .. a.len {
ar := array{}
unsafe { C.memcpy(&ar, a.get_unsafe(i), int(sizeof(array))) }
unsafe { vmemcpy(&ar, a.get_unsafe(i), int(sizeof(array))) }
ar_clone := unsafe { ar.clone_to_depth(depth - 1) }
unsafe { arr.set_unsafe(i, &ar_clone) }
}
return arr
} else {
if !isnil(a.data) {
unsafe { C.memcpy(&byte(arr.data), a.data, a.cap * a.element_size) }
unsafe { vmemcpy(&byte(arr.data), a.data, a.cap * a.element_size) }
}
return arr
}
@ -401,7 +401,7 @@ pub fn (a &array) clone_to_depth(depth int) array {
// we manually inline this for single operations for performance without -prod
[inline; unsafe]
fn (mut a array) set_unsafe(i int, val voidptr) {
unsafe { C.memcpy(&byte(a.data) + a.element_size * i, val, a.element_size) }
unsafe { vmemcpy(&byte(a.data) + a.element_size * i, val, a.element_size) }
}
// Private function. Used to implement assigment to the array element.
@ -411,12 +411,12 @@ fn (mut a array) set(i int, val voidptr) {
panic('array.set: index out of range (i == $i, a.len == $a.len)')
}
}
unsafe { C.memcpy(&byte(a.data) + a.element_size * i, val, a.element_size) }
unsafe { vmemcpy(&byte(a.data) + a.element_size * i, val, a.element_size) }
}
fn (mut a array) push(val voidptr) {
a.ensure_cap(a.len + 1)
unsafe { C.memmove(&byte(a.data) + a.element_size * a.len, val, a.element_size) }
unsafe { vmemmove(&byte(a.data) + a.element_size * a.len, val, a.element_size) }
a.len++
}
@ -429,13 +429,13 @@ pub fn (mut a3 array) push_many(val voidptr, size int) {
copy := a3.clone()
a3.ensure_cap(a3.len + size)
unsafe {
// C.memcpy(a.data, copy.data, copy.element_size * copy.len)
C.memcpy(a3.get_unsafe(a3.len), copy.data, a3.element_size * size)
// vmemcpy(a.data, copy.data, copy.element_size * copy.len)
vmemcpy(a3.get_unsafe(a3.len), copy.data, a3.element_size * size)
}
} else {
a3.ensure_cap(a3.len + size)
if !isnil(a3.data) && !isnil(val) {
unsafe { C.memcpy(a3.get_unsafe(a3.len), val, a3.element_size * size) }
unsafe { vmemcpy(a3.get_unsafe(a3.len), val, a3.element_size * size) }
}
}
a3.len += size
@ -449,10 +449,10 @@ pub fn (mut a array) reverse_in_place() {
unsafe {
mut tmp_value := malloc(a.element_size)
for i in 0 .. a.len / 2 {
C.memcpy(tmp_value, &byte(a.data) + i * a.element_size, a.element_size)
C.memcpy(&byte(a.data) + i * a.element_size, &byte(a.data) +
vmemcpy(tmp_value, &byte(a.data) + i * a.element_size, a.element_size)
vmemcpy(&byte(a.data) + i * a.element_size, &byte(a.data) +
(a.len - 1 - i) * a.element_size, a.element_size)
C.memcpy(&byte(a.data) + (a.len - 1 - i) * a.element_size, tmp_value, a.element_size)
vmemcpy(&byte(a.data) + (a.len - 1 - i) * a.element_size, tmp_value, a.element_size)
}
free(tmp_value)
}
@ -587,7 +587,7 @@ pub fn (b []byte) hex() string {
pub fn copy(dst []byte, src []byte) int {
min := if dst.len < src.len { dst.len } else { src.len }
if min > 0 {
unsafe { C.memcpy(&byte(dst.data), src.data, min) }
unsafe { vmemcpy(&byte(dst.data), src.data, min) }
}
return min
}

View File

@ -57,7 +57,7 @@ fn new_array_from_c_array_noscan(len int, cap int, elm_size int, c_array voidptr
cap: cap_
}
// TODO Write all memory functions (like memcpy) in V
unsafe { C.memcpy(arr.data, c_array, len * elm_size) }
unsafe { vmemcpy(arr.data, c_array, len * elm_size) }
return arr
}
@ -73,7 +73,7 @@ fn (mut a array) ensure_cap_noscan(required int) {
new_size := cap * a.element_size
new_data := vcalloc_noscan(new_size)
if a.data != voidptr(0) {
unsafe { C.memcpy(new_data, a.data, a.len * a.element_size) }
unsafe { vmemcpy(new_data, a.data, a.len * a.element_size) }
// TODO: the old data may be leaked when no GC is used (ref-counting?)
}
a.data = new_data
@ -105,9 +105,9 @@ fn (a array) repeat_to_depth_noscan(count int, depth int) array {
for i in 0 .. count {
if depth > 0 {
ary_clone := unsafe { a.clone_to_depth_noscan(depth) }
unsafe { C.memcpy(arr.get_unsafe(i * a.len), &byte(ary_clone.data), a.len * a.element_size) }
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &byte(ary_clone.data), a.len * a.element_size) }
} else {
unsafe { C.memcpy(arr.get_unsafe(i * a.len), &byte(a.data), a.len * a.element_size) }
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &byte(a.data), a.len * a.element_size) }
}
}
}
@ -123,7 +123,7 @@ fn (mut a array) insert_noscan(i int, val voidptr) {
}
a.ensure_cap_noscan(a.len + 1)
unsafe {
C.memmove(a.get_unsafe(i + 1), a.get_unsafe(i), (a.len - i) * a.element_size)
vmemmove(a.get_unsafe(i + 1), a.get_unsafe(i), (a.len - i) * a.element_size)
a.set_unsafe(i, val)
}
a.len++
@ -141,8 +141,8 @@ fn (mut a array) insert_many_noscan(i int, val voidptr, size int) {
elem_size := a.element_size
unsafe {
iptr := a.get_unsafe(i)
C.memmove(a.get_unsafe(i + size), iptr, (a.len - i) * elem_size)
C.memcpy(iptr, val, size * elem_size)
vmemmove(a.get_unsafe(i + size), iptr, (a.len - i) * elem_size)
vmemcpy(iptr, val, size * elem_size)
}
a.len += size
}
@ -198,14 +198,14 @@ fn (a &array) clone_to_depth_noscan(depth int) array {
if depth > 0 {
for i in 0 .. a.len {
ar := array{}
unsafe { C.memcpy(&ar, a.get_unsafe(i), int(sizeof(array))) }
unsafe { vmemcpy(&ar, a.get_unsafe(i), int(sizeof(array))) }
ar_clone := unsafe { ar.clone_to_depth_noscan(depth - 1) }
unsafe { arr.set_unsafe(i, &ar_clone) }
}
return arr
} else {
if !isnil(a.data) {
unsafe { C.memcpy(&byte(arr.data), a.data, a.cap * a.element_size) }
unsafe { vmemcpy(&byte(arr.data), a.data, a.cap * a.element_size) }
}
return arr
}
@ -213,7 +213,7 @@ fn (a &array) clone_to_depth_noscan(depth int) array {
fn (mut a array) push_noscan(val voidptr) {
a.ensure_cap_noscan(a.len + 1)
unsafe { C.memmove(&byte(a.data) + a.element_size * a.len, val, a.element_size) }
unsafe { vmemmove(&byte(a.data) + a.element_size * a.len, val, a.element_size) }
a.len++
}
@ -226,13 +226,12 @@ fn (mut a3 array) push_many_noscan(val voidptr, size int) {
copy := a3.clone()
a3.ensure_cap_noscan(a3.len + size)
unsafe {
// C.memcpy(a.data, copy.data, copy.element_size * copy.len)
C.memcpy(a3.get_unsafe(a3.len), copy.data, a3.element_size * size)
vmemcpy(a3.get_unsafe(a3.len), copy.data, a3.element_size * size)
}
} else {
a3.ensure_cap_noscan(a3.len + size)
if !isnil(a3.data) && !isnil(val) {
unsafe { C.memcpy(a3.get_unsafe(a3.len), val, a3.element_size * size) }
unsafe { vmemcpy(a3.get_unsafe(a3.len), val, a3.element_size * size) }
}
}
a3.len += size

View File

@ -525,3 +525,23 @@ fn v_fixed_index(i int, len int) int {
}
return i
}
// print_backtrace shows a backtrace of the current call stack on stdout
pub fn print_backtrace() {
// At the time of backtrace_symbols_fd call, the C stack would look something like this:
// * print_backtrace_skipping_top_frames
// * print_backtrace itself
// * the rest of the backtrace frames
// => top 2 frames should be skipped, since they will not be informative to the developer
$if !no_backtrace ? {
$if freestanding {
println(bare_backtrace())
} $else {
$if tinyc {
C.tcc_backtrace(c'Backtrace')
} $else {
print_backtrace_skipping_top_frames(2)
}
}
}
}

View File

@ -15,26 +15,6 @@ fn on_panic(f fn(int)int) {
}
*/
// print_backtrace shows a backtrace of the current call stack on stdout
pub fn print_backtrace() {
// At the time of backtrace_symbols_fd call, the C stack would look something like this:
// * print_backtrace_skipping_top_frames
// * print_backtrace itself
// * the rest of the backtrace frames
// => top 2 frames should be skipped, since they will not be informative to the developer
$if !no_backtrace ? {
$if freestanding {
println(bare_backtrace())
} $else {
$if tinyc {
C.tcc_backtrace(c'Backtrace')
} $else {
print_backtrace_skipping_top_frames(2)
}
}
}
}
struct VCastTypeIndexName {
tindex int
tname string

View File

@ -1,11 +1,13 @@
module builtin
// <string.h>
fn C.memcpy(dest &byte, src &byte, n int) voidptr
fn C.memcpy(dest voidptr, const_src voidptr, n size_t) voidptr
fn C.memcmp(&byte, &byte, int) int
fn C.memcmp(const_s1 voidptr, const_s2 voidptr, n size_t) int
fn C.memmove(&byte, &byte, int) voidptr
fn C.memmove(dest voidptr, const_src voidptr, n size_t) voidptr
fn C.memset(str voidptr, c int, n size_t) voidptr
[trusted]
fn C.calloc(int, int) &byte
@ -125,8 +127,6 @@ fn C.rename(old_filename &char, new_filename &char) int
fn C.fgets(str &char, n int, stream &C.FILE) int
fn C.memset(str voidptr, c int, n size_t) int
[trusted]
fn C.sigemptyset() int

View File

@ -0,0 +1,72 @@
module builtin
// vstrlen returns the V length of the C string `s` (0 terminator is not counted).
// The C string is expected to be a &byte pointer.
[inline; unsafe]
pub fn vstrlen(s &byte) int {
return unsafe { C.strlen(&char(s)) }
}
// vstrlen_char returns the V length of the C string `s` (0 terminator is not counted).
// The C string is expected to be a &char pointer.
[inline; unsafe]
pub fn vstrlen_char(s &char) int {
return unsafe { C.strlen(s) }
}
// vmemcpy copies n bytes from memory area src to memory area dest.
// The memory areas *MUST NOT OVERLAP*. Use vmemmove, if the memory
// areas do overlap. vmemcpy returns a pointer to `dest`.
[inline; unsafe]
pub fn vmemcpy(dest voidptr, const_src voidptr, n int) voidptr {
unsafe {
return C.memcpy(dest, const_src, n)
}
}
// vmemmove copies n bytes from memory area `src` to memory area `dest`.
// The memory areas *MAY* overlap: copying takes place as though the bytes
// in `src` are first copied into a temporary array that does not overlap
// `src` or `dest`, and the bytes are then copied from the temporary array
// to `dest`. vmemmove returns a pointer to `dest`.
[inline; unsafe]
pub fn vmemmove(dest voidptr, const_src voidptr, n int) voidptr {
unsafe {
return C.memmove(dest, const_src, n)
}
}
// vmemcmp compares the first n bytes (each interpreted as unsigned char)
// of the memory areas s1 and s2. It returns an integer less than, equal to,
// or greater than zero, if the first n bytes of s1 is found, respectively,
// to be less than, to match, or be greater than the first n bytes of s2.
// For a nonzero return value, the sign is determined by the sign of the
// difference between the first pair of bytes (interpreted as unsigned char)
// that differ in s1 and s2.
// If n is zero, the return value is zero.
// Do NOT use vmemcmp to compare security critical data, such as cryptographic
// secrets, because the required CPU time depends on the number of equal bytes.
// You should use a function that performs comparisons in constant time for
// this.
[inline; unsafe]
pub fn vmemcmp(const_s1 voidptr, const_s2 voidptr, n int) int {
unsafe {
return C.memcmp(const_s1, const_s2, n)
}
}
// vmemset fills the first `n` bytes of the memory area pointed to by `s`,
// with the constant byte `c`. It returns a pointer to the memory area `s`.
[inline; unsafe]
pub fn vmemset(s voidptr, c int, n int) voidptr {
unsafe {
return C.memset(s, c, n)
}
}
type FnSortCB = fn (const_a voidptr, const_b voidptr) int
[inline; unsafe]
fn vqsort(base voidptr, nmemb size_t, size size_t, sort_cb FnSortCB) {
C.qsort(base, nmemb, size, voidptr(sort_cb))
}

View File

@ -71,7 +71,7 @@ fn (nn int) str_l(max int) string {
buf[index] = `-`
}
diff := max - index
C.memmove(buf, buf + index, diff + 1)
vmemmove(buf, buf + index, diff + 1)
/*
// === manual memory move for bare metal ===
mut c:= 0
@ -142,7 +142,7 @@ pub fn (nn u32) str() string {
index++
}
diff := max - index
C.memmove(buf, buf + index, diff + 1)
vmemmove(buf, buf + index, diff + 1)
return tos(buf, diff)
// return tos(memdup(&buf[0] + index, (max - index)), (max - index))
@ -196,7 +196,7 @@ pub fn (nn i64) str() string {
buf[index] = `-`
}
diff := max - index
C.memmove(buf, buf + index, diff + 1)
vmemmove(buf, buf + index, diff + 1)
return tos(buf, diff)
// return tos(memdup(&buf[0] + index, (max - index)), (max - index))
}
@ -233,7 +233,7 @@ pub fn (nn u64) str() string {
index++
}
diff := max - index
C.memmove(buf, buf + index, diff + 1)
vmemmove(buf, buf + index, diff + 1)
return tos(buf, diff)
// return tos(memdup(&buf[0] + index, (max - index)), (max - index))
}

View File

@ -155,3 +155,8 @@ fn bare_backtrace() string {
fn __exit(code int) {
sys_exit(code)
}
[export: 'qsort']
fn __qsort(base voidptr, nmemb size_t, size size_t, sort_cb FnSortCB) {
panic('qsort() is not yet implemented in `-freestanding`')
}

View File

@ -0,0 +1,79 @@
module builtin
fn C.wyhash(&byte, u64, u64, &u64) u64
fn C.wyhash64(u64, u64) u64
// fast_string_eq is intended to be fast when
// the strings are very likely to be equal
// TODO: add branch prediction hints
[inline]
fn fast_string_eq(a string, b string) bool {
if a.len != b.len {
return false
}
unsafe {
return C.memcmp(a.str, b.str, b.len) == 0
}
}
fn map_hash_string(pkey voidptr) u64 {
key := *unsafe { &string(pkey) }
return C.wyhash(key.str, u64(key.len), 0, &u64(C._wyp))
}
fn map_hash_int_1(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &byte(pkey) }, 0)
}
fn map_hash_int_2(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &u16(pkey) }, 0)
}
fn map_hash_int_4(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &u32(pkey) }, 0)
}
fn map_hash_int_8(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &u64(pkey) }, 0)
}
// Move all zeros to the end of the array and resize array
fn (mut d DenseArray) zeros_to_end() {
// TODO alloca?
mut tmp_value := unsafe { malloc(d.value_bytes) }
mut tmp_key := unsafe { malloc(d.key_bytes) }
mut count := 0
for i in 0 .. d.len {
if d.has_index(i) {
// swap (TODO: optimize)
unsafe {
if count != i {
// Swap keys
C.memcpy(tmp_key, d.key(count), d.key_bytes)
C.memcpy(d.key(count), d.key(i), d.key_bytes)
C.memcpy(d.key(i), tmp_key, d.key_bytes)
// Swap values
C.memcpy(tmp_value, d.value(count), d.value_bytes)
C.memcpy(d.value(count), d.value(i), d.value_bytes)
C.memcpy(d.value(i), tmp_value, d.value_bytes)
}
}
count++
}
}
unsafe {
free(tmp_value)
free(tmp_key)
d.deletes = 0
// TODO: reallocate instead as more deletes are likely
free(d.all_deleted)
}
d.len = count
old_cap := d.cap
d.cap = if count < 8 { 8 } else { count }
unsafe {
d.values = realloc_data(d.values, d.value_bytes * old_cap, d.value_bytes * d.cap)
d.keys = realloc_data(d.keys, d.key_bytes * old_cap, d.key_bytes * d.cap)
}
}

View File

@ -3,10 +3,6 @@
// that can be found in the LICENSE file.
module builtin
fn C.wyhash(&byte, u64, u64, &u64) u64
fn C.wyhash64(u64, u64) u64
/*
This is a highly optimized hashmap implementation. It has several traits that
in combination makes it very fast and memory efficient. Here is a short expl-
@ -79,19 +75,6 @@ const (
probe_inc = u32(0x01000000)
)
// fast_string_eq is intended to be fast when
// the strings are very likely to be equal
// TODO: add branch prediction hints
[inline]
fn fast_string_eq(a string, b string) bool {
if a.len != b.len {
return false
}
unsafe {
return C.memcmp(a.str, b.str, b.len) == 0
}
}
// DenseArray represents a dynamic array with very low growth factor
struct DenseArray {
key_bytes int
@ -152,7 +135,7 @@ fn (mut d DenseArray) expand() int {
d.values = realloc_data(d.values, old_value_size, d.value_bytes * d.cap)
if d.deletes != 0 {
d.all_deleted = realloc_data(d.all_deleted, old_cap, d.cap)
C.memset(d.all_deleted + d.len, 0, d.cap - d.len)
vmemset(d.all_deleted + d.len, 0, d.cap - d.len)
}
}
}
@ -166,46 +149,6 @@ fn (mut d DenseArray) expand() int {
return push_index
}
// Move all zeros to the end of the array and resize array
fn (mut d DenseArray) zeros_to_end() {
// TODO alloca?
mut tmp_value := unsafe { malloc(d.value_bytes) }
mut tmp_key := unsafe { malloc(d.key_bytes) }
mut count := 0
for i in 0 .. d.len {
if d.has_index(i) {
// swap (TODO: optimize)
unsafe {
if count != i {
// Swap keys
C.memcpy(tmp_key, d.key(count), d.key_bytes)
C.memcpy(d.key(count), d.key(i), d.key_bytes)
C.memcpy(d.key(i), tmp_key, d.key_bytes)
// Swap values
C.memcpy(tmp_value, d.value(count), d.value_bytes)
C.memcpy(d.value(count), d.value(i), d.value_bytes)
C.memcpy(d.value(i), tmp_value, d.value_bytes)
}
}
count++
}
}
unsafe {
free(tmp_value)
free(tmp_key)
d.deletes = 0
// TODO: reallocate instead as more deletes are likely
free(d.all_deleted)
}
d.len = count
old_cap := d.cap
d.cap = if count < 8 { 8 } else { count }
unsafe {
d.values = realloc_data(d.values, d.value_bytes * old_cap, d.value_bytes * d.cap)
d.keys = realloc_data(d.keys, d.key_bytes * old_cap, d.key_bytes * d.cap)
}
}
type MapHashFn = fn (voidptr) u64
type MapEqFn = fn (voidptr, voidptr) bool
@ -246,27 +189,6 @@ pub mut:
len int
}
fn map_hash_string(pkey voidptr) u64 {
key := *unsafe { &string(pkey) }
return C.wyhash(key.str, u64(key.len), 0, &u64(C._wyp))
}
fn map_hash_int_1(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &byte(pkey) }, 0)
}
fn map_hash_int_2(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &u16(pkey) }, 0)
}
fn map_hash_int_4(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &u32(pkey) }, 0)
}
fn map_hash_int_8(pkey voidptr) u64 {
return C.wyhash64(*unsafe { &u64(pkey) }, 0)
}
fn map_eq_string(a voidptr, b voidptr) bool {
return fast_string_eq(*unsafe { &string(a) }, *unsafe { &string(b) })
}
@ -367,7 +289,7 @@ fn new_map_init(hash_fn MapHashFn, key_eq_fn MapEqFn, clone_fn MapCloneFn, free_
pub fn (mut m map) move() map {
r := *m
unsafe {
C.memset(m, 0, sizeof(map))
vmemset(m, 0, int(sizeof(map)))
}
return r
}
@ -428,7 +350,7 @@ fn (mut m map) ensure_extra_metas(probe_count u32) {
unsafe {
x := realloc_data(&byte(m.metas), int(size_of_u32 * old_mem_size), int(size_of_u32 * mem_size))
m.metas = &u32(x)
C.memset(m.metas + mem_size - extra_metas_inc, 0, int(sizeof(u32) * extra_metas_inc))
vmemset(m.metas + mem_size - extra_metas_inc, 0, int(sizeof(u32) * extra_metas_inc))
}
// Should almost never happen
if probe_count == 252 {
@ -454,7 +376,7 @@ fn (mut m map) set(key voidptr, value voidptr) {
if m.key_eq_fn(key, pkey) {
unsafe {
pval := m.key_values.value(kv_index)
C.memcpy(pval, value, m.value_bytes)
vmemcpy(pval, value, m.value_bytes)
}
return
}
@ -466,7 +388,7 @@ fn (mut m map) set(key voidptr, value voidptr) {
pkey := m.key_values.key(kv_index)
pvalue := m.key_values.value(kv_index)
m.clone_fn(pkey, key)
C.memcpy(&byte(pvalue), value, m.value_bytes)
vmemcpy(&byte(pvalue), value, m.value_bytes)
}
m.meta_greater(index, meta, u32(kv_index))
m.len++
@ -498,7 +420,7 @@ fn (mut m map) rehash() {
// TODO: use realloc_data here too
x := v_realloc(&byte(m.metas), int(meta_bytes))
m.metas = &u32(x)
C.memset(m.metas, 0, meta_bytes)
vmemset(m.metas, 0, int(meta_bytes))
}
for i := 0; i < m.key_values.len; i++ {
if !m.key_values.has_index(i) {
@ -663,7 +585,7 @@ pub fn (mut m map) delete(key voidptr) {
m.metas[index] = 0
m.free_fn(pkey)
// Mark key as deleted
C.memset(pkey, 0, m.key_bytes)
vmemset(pkey, 0, m.key_bytes)
}
if m.key_values.len <= 32 {
return
@ -750,7 +672,7 @@ pub fn (m &map) clone() map {
clone_fn: m.clone_fn
free_fn: m.free_fn
}
unsafe { C.memcpy(res.metas, m.metas, metasize) }
unsafe { vmemcpy(res.metas, m.metas, metasize) }
if !m.has_string_keys {
return res
}

View File

@ -0,0 +1,15 @@
module builtin
[typedef]
struct C.IError {
_object voidptr
}
[unsafe]
pub fn (ie &IError) free() {
unsafe {
ie.msg.free()
cie := &C.IError(ie)
free(cie._object)
}
}

View File

@ -74,7 +74,7 @@ fn opt_ok(data voidptr, mut option Option, size int) {
unsafe {
*option = Option{}
// use err to get the end of OptionBase and then memcpy into it
C.memcpy(&byte(&option.err) + sizeof(IError), data, size)
vmemcpy(&byte(&option.err) + sizeof(IError), data, size)
}
}
@ -87,17 +87,3 @@ pub fn (e &Error) free() {
pub fn (n &None__) free() {
unsafe { n.msg.free() }
}
[typedef]
struct C.IError {
_object voidptr
}
[unsafe]
pub fn (ie &IError) free() {
unsafe {
ie.msg.free()
cie := &C.IError(ie)
free(cie._object)
}
}

View File

@ -55,7 +55,7 @@ pub fn (b []byte) clone() []byte {
pub fn (b []byte) bytestr() string {
unsafe {
buf := malloc_noscan(b.len + 1)
C.memcpy(buf, b.data, b.len)
vmemcpy(buf, b.data, b.len)
buf[b.len] = 0
return tos(buf, b.len)
}

View File

@ -80,7 +80,7 @@ fn (mut m SortedMap) set(key string, value voidptr) {
parent.split_child(child_index, mut node)
if key == parent.keys[child_index] {
unsafe {
C.memcpy(parent.values[child_index], value, m.value_bytes)
vmemcpy(parent.values[child_index], value, m.value_bytes)
}
return
}
@ -96,7 +96,7 @@ fn (mut m SortedMap) set(key string, value voidptr) {
}
if i != node.len && key == node.keys[i] {
unsafe {
C.memcpy(node.values[i], value, m.value_bytes)
vmemcpy(node.values[i], value, m.value_bytes)
}
return
}
@ -110,7 +110,7 @@ fn (mut m SortedMap) set(key string, value voidptr) {
node.keys[j + 1] = key
unsafe {
node.values[j + 1] = malloc(m.value_bytes)
C.memcpy(node.values[j + 1], value, m.value_bytes)
vmemcpy(node.values[j + 1], value, m.value_bytes)
}
node.len++
m.len++
@ -169,7 +169,7 @@ fn (m SortedMap) get(key string, out voidptr) bool {
}
if i != -1 && key == node.keys[i] {
unsafe {
C.memcpy(out, node.values[i], m.value_bytes)
vmemcpy(out, node.values[i], m.value_bytes)
}
return true
}

View File

@ -55,12 +55,6 @@ mut:
is_lit int
}
// vstrlen returns the V length of the C string `s` (0 terminator is not counted).
[unsafe]
pub fn vstrlen(s &byte) int {
return unsafe { C.strlen(&char(s)) }
}
pub fn (s string) runes() []rune {
mut runes := []rune{cap: s.len}
for i := 0; i < s.len; i++ {
@ -118,7 +112,7 @@ pub fn tos3(s &char) string {
}
return string{
str: &byte(s)
len: unsafe { C.strlen(s) }
len: unsafe { vstrlen_char(s) }
}
}
@ -147,7 +141,7 @@ pub fn tos5(s &char) string {
pub fn (bp &byte) vstring() string {
return string{
str: unsafe { bp }
len: unsafe { C.strlen(&char(bp)) }
len: unsafe { vstrlen(bp) }
}
}
@ -168,7 +162,7 @@ pub fn (bp &byte) vstring_with_len(len int) string {
pub fn (cp &char) vstring() string {
return string{
str: &byte(cp)
len: unsafe { C.strlen(cp) }
len: unsafe { vstrlen_char(cp) }
is_lit: 0
}
}
@ -195,7 +189,7 @@ pub fn (cp &char) vstring_with_len(len int) string {
pub fn (bp &byte) vstring_literal() string {
return string{
str: unsafe { bp }
len: unsafe { C.strlen(&char(bp)) }
len: unsafe { vstrlen(bp) }
is_lit: 1
}
}
@ -218,7 +212,7 @@ pub fn (bp &byte) vstring_literal_with_len(len int) string {
pub fn (cp &char) vstring_literal() string {
return string{
str: &byte(cp)
len: unsafe { C.strlen(cp) }
len: unsafe { vstrlen_char(cp) }
is_lit: 1
}
}
@ -251,7 +245,7 @@ pub fn (a string) clone() string {
len: a.len
}
unsafe {
C.memcpy(b.str, a.str, a.len)
vmemcpy(b.str, a.str, a.len)
b.str[a.len] = 0
}
return b
@ -453,13 +447,11 @@ pub fn (s string) i16() i16 {
// f32 returns the value of the string as f32 `'1.0'.f32() == f32(1)`.
pub fn (s string) f32() f32 {
// return C.atof(&char(s.str))
return f32(strconv.atof64(s))
}
// f64 returns the value of the string as f64 `'1.0'.f64() == f64(1)`.
pub fn (s string) f64() f64 {
// return C.atof(&char(s.str))
return strconv.atof64(s)
}
@ -494,7 +486,7 @@ fn (s string) == (a string) bool {
}
}
unsafe {
return C.memcmp(s.str, a.str, a.len) == 0
return vmemcmp(s.str, a.str, a.len) == 0
}
}
@ -1411,13 +1403,13 @@ pub fn (a []string) join(sep string) string {
mut idx := 0
for i, val in a {
unsafe {
C.memcpy(res.str + idx, val.str, val.len)
vmemcpy(res.str + idx, val.str, val.len)
idx += val.len
}
// Add sep if it's not last
if i != a.len - 1 {
unsafe {
C.memcpy(res.str + idx, sep.str, sep.len)
vmemcpy(res.str + idx, sep.str, sep.len)
idx += sep.len
}
}
@ -1482,7 +1474,7 @@ pub fn (s string) bytes() []byte {
return []
}
mut buf := []byte{len: s.len}
unsafe { C.memcpy(buf.data, s.str, s.len) }
unsafe { vmemcpy(buf.data, s.str, s.len) }
return buf
}

View File

@ -1,6 +1,6 @@
module builtin
// NB: this file will be removed soon
// NB: this file will be removed soon
// byteptr.vbytes() - makes a V []byte structure from a C style memory buffer. NB: the data is reused, NOT copied!
[unsafe]
@ -15,7 +15,7 @@ pub fn (data byteptr) vbytes(len int) []byte {
pub fn (bp byteptr) vstring() string {
return string{
str: bp
len: unsafe { C.strlen(&char(bp)) }
len: unsafe { vstrlen(bp) }
}
}
@ -36,7 +36,7 @@ pub fn (bp byteptr) vstring_with_len(len int) string {
pub fn (cp charptr) vstring() string {
return string{
str: byteptr(cp)
len: unsafe { C.strlen(&char(cp)) }
len: unsafe { vstrlen_char(cp) }
is_lit: 0
}
}
@ -63,7 +63,7 @@ pub fn (cp charptr) vstring_with_len(len int) string {
pub fn (bp byteptr) vstring_literal() string {
return string{
str: bp
len: unsafe { C.strlen(&char(bp)) }
len: unsafe { vstrlen(bp) }
is_lit: 1
}
}
@ -86,7 +86,7 @@ pub fn (bp byteptr) vstring_literal_with_len(len int) string {
pub fn (cp charptr) vstring_literal() string {
return string{
str: byteptr(cp)
len: unsafe { C.strlen(&char(cp)) }
len: unsafe { vstrlen_char(cp) }
is_lit: 1
}
}

View File

@ -36,9 +36,14 @@ pub fn (prefs &Preferences) should_compile_filtered_files(dir string, files_ []s
}
mut allowed := false
for cdefine in prefs.compile_defines {
file_postfix := '_d_${cdefine}.v'
if file.ends_with(file_postfix) {
allowed = true
file_postfixes := ['_d_${cdefine}.v', '_d_${cdefine}.c.v']
for file_postfix in file_postfixes {
if file.ends_with(file_postfix) {
allowed = true
break
}
}
if allowed {
break
}
}
@ -49,9 +54,14 @@ pub fn (prefs &Preferences) should_compile_filtered_files(dir string, files_ []s
if file.contains('_notd_') {
mut allowed := true
for cdefine in prefs.compile_defines {
file_postfix := '_notd_${cdefine}.v'
if file.ends_with(file_postfix) {
allowed = false
file_postfixes := ['_notd_${cdefine}.v', '_notd_${cdefine}.c.v']
for file_postfix in file_postfixes {
if file.ends_with(file_postfix) {
allowed = false
break
}
}
if !allowed {
break
}
}
@ -142,27 +152,28 @@ pub fn (prefs &Preferences) should_compile_c(file string) bool {
if prefs.backend != .native && file.ends_with('_native.v') {
return false
}
if prefs.os == .windows && (file.ends_with('_nix.c.v') || file.ends_with('_nix.v')) {
return false
}
if prefs.os != .windows && (file.ends_with('_windows.c.v') || file.ends_with('_windows.v')) {
return false
}
//
if prefs.os != .linux && (file.ends_with('_linux.c.v') || file.ends_with('_linux.v')) {
return false
}
//
if prefs.os != .macos && (file.ends_with('_darwin.c.v') || file.ends_with('_darwin.v')) {
return false
}
if (file.ends_with('_ios.c.v') || file.ends_with('_ios.v')) && prefs.os != .ios {
return false
}
if file.ends_with('_nix.c.v') && prefs.os == .windows {
return false
}
if prefs.os != .macos && (file.ends_with('_macos.c.v') || file.ends_with('_macos.v')) {
return false
}
if prefs.os == .windows && file.ends_with('_nix.c.v') {
//
if prefs.os != .ios && (file.ends_with('_ios.c.v') || file.ends_with('_ios.v')) {
return false
}
//
if prefs.os != .android && file.ends_with('_android.c.v') {
return false
}