diff --git a/vlib/builtin/linux_bare/libc_impl.v b/vlib/builtin/linux_bare/libc_impl.v index 7c7fbb9cdf..b5724b7351 100644 --- a/vlib/builtin/linux_bare/libc_impl.v +++ b/vlib/builtin/linux_bare/libc_impl.v @@ -111,12 +111,6 @@ fn memcmp(a &C.void, b &C.void, n usize) int { [export: 'free'] [unsafe] fn __free(ptr &C.void) { - /* - err := mm_free(ptr) - if err != .enoerror { - eprintln('free error:') - panic(err) - }*/ unsafe { global_allocator.free_(ptr) } diff --git a/vlib/builtin/linux_bare/memory_managment.v b/vlib/builtin/linux_bare/memory_managment.v index e17520df06..5973c35a02 100644 --- a/vlib/builtin/linux_bare/memory_managment.v +++ b/vlib/builtin/linux_bare/memory_managment.v @@ -38,7 +38,7 @@ fn system_alloc(_ voidptr, size usize) (voidptr, usize, u32) { map_flags := MapFlags(int(MapFlags.map_private) | int(MapFlags.map_anonymous)) // END CONSTS - a, e := sys_mmap(&byte(0), u64(size + sizeof(u64)), mem_prot, map_flags, -1, 0) + a, e := sys_mmap(&byte(0), u64(size), mem_prot, map_flags, -1, 0) if e == .enoerror { return a, size, 0 diff --git a/vlib/builtin/wasm_bare/libc_impl.v b/vlib/builtin/wasm_bare/libc_impl.v new file mode 100644 index 0000000000..bbde0a5055 --- /dev/null +++ b/vlib/builtin/wasm_bare/libc_impl.v @@ -0,0 +1,173 @@ +module builtin + +import dlmalloc + +__global global_allocator dlmalloc.Dlmalloc + +[unsafe] +pub fn memcpy(dest &C.void, src &C.void, n usize) &C.void { + dest_ := unsafe { &byte(dest) } + src_ := unsafe { &byte(src) } + unsafe { + for i in 0 .. int(n) { + dest_[i] = src_[i] + } + } + return unsafe { dest } +} + +[export: 'malloc'] +[unsafe] +fn __malloc(n usize) &C.void { + return unsafe { global_allocator.malloc(n) } +} + +[unsafe] +fn strlen(_s &C.void) usize { + s := unsafe { &byte(_s) } + mut i := 0 + for ; unsafe { s[i] } != 0; i++ {} + return usize(i) +} + +[unsafe] +fn realloc(old_area &C.void, new_size usize) &C.void { + if old_area == 0 { + return unsafe { malloc(int(new_size)) } + } + if new_size == usize(0) { + unsafe { free(old_area) } + return 0 + } + old_size := unsafe { *(&u64(old_area - sizeof(u64))) } + if u64(new_size) <= old_size { + return unsafe { old_area } + } else { + new_area := unsafe { malloc(int(new_size)) } + unsafe { memmove(new_area, old_area, usize(old_size)) } + unsafe { free(old_area) } + return new_area + } +} + +[unsafe] +fn memset(s &C.void, c int, n usize) &C.void { + mut s_ := unsafe { &char(s) } + for i in 0 .. int(n) { + unsafe { + s_[i] = char(c) + } + } + return unsafe { s } +} + +[unsafe] +fn memmove(dest &C.void, src &C.void, n usize) &C.void { + dest_ := unsafe { &byte(dest) } + src_ := unsafe { &byte(src) } + mut temp_buf := unsafe { malloc(int(n)) } + for i in 0 .. int(n) { + unsafe { + temp_buf[i] = src_[i] + } + } + + for i in 0 .. int(n) { + unsafe { + dest_[i] = temp_buf[i] + } + } + unsafe { free(temp_buf) } + return unsafe { dest } +} + +[export: 'calloc'] +[unsafe] +fn __calloc(nmemb usize, size usize) &C.void { + new_area := unsafe { malloc(int(nmemb) * int(size)) } + unsafe { memset(new_area, 0, nmemb * size) } + return new_area +} + +fn getchar() int { + return 0 +} + +fn memcmp(a &C.void, b &C.void, n usize) int { + a_ := unsafe { &byte(a) } + b_ := unsafe { &byte(b) } + for i in 0 .. int(n) { + if unsafe { a_[i] != b_[i] } { + unsafe { + return a_[i] - b_[i] + } + } + } + return 0 +} + +[export: 'free'] +[unsafe] +fn __free(ptr &C.void) { + unsafe { + global_allocator.free_(ptr) + } +} + +fn vsprintf(str &char, format &char, ap &byte) int { + panic('vsprintf(): string interpolation is not supported in `-freestanding`') +} + +fn vsnprintf(str &char, size usize, format &char, ap &byte) int { + panic('vsnprintf(): string interpolation is not supported in `-freestanding`') +} + +enum Errno { + enoerror + eerror +} + +// not really needed +fn bare_read(buf &byte, count u64) (i64, Errno) { + return 0, Errno.eerror +} + +pub fn bare_print(buf &byte, len u64) { +} + +fn bare_eprint(buf &byte, len u64) { +} + +pub fn write(_fd i64, _buf &byte, _count u64) i64 { + return -1 +} + +[noreturn] +fn bare_panic(msg string) { + println('V panic' + msg) + exit(1) +} + +fn bare_backtrace() string { + return 'backtraces are not available with `-freestanding`' +} + +[export: 'exit'] +[noreturn] +fn __exit(code int) { + unsafe { + // the only way to abort process execution in WASM + mut x := &int(voidptr(0)) + *x = code + } + for {} +} + +[export: 'qsort'] +fn __qsort(base voidptr, nmemb usize, size usize, sort_cb FnSortCB) { + panic('qsort() is not yet implemented in `-freestanding`') +} + +fn init_global_allocator() { + global_allocator = dlmalloc.new(get_wasm_allocator()) +} diff --git a/vlib/builtin/wasm_bare/memory_management.v b/vlib/builtin/wasm_bare/memory_management.v new file mode 100644 index 0000000000..65f400d2f3 --- /dev/null +++ b/vlib/builtin/wasm_bare/memory_management.v @@ -0,0 +1,68 @@ +module builtin + +import dlmalloc + +// Corresponding intrinsic to wasm’s `memory.grow` instruction +// +// This function, when called, will attempt to grow the default linear memory by the specified delta of pages. +// The current WebAssembly page size is 65536 bytes (64 KB). If memory is successfully grown then the previous size of memory, in pages, is returned. +// If memory cannot be grown then -1 is returned. +// +// The argument mem is the numerical index of which memory to return the size of. Note that currently the WebAssembly specification only supports one memory, +// so it is required that zero is passed in. The argument is present to be forward-compatible with future WebAssembly revisions. +// If a nonzero argument is passed to this function it will currently unconditionally abort +fn C.__builtin_wasm_memory_grow(mem u32, delta usize) usize + +/// Corresponding intrinsic to wasm's `memory.size` instruction +/// +/// This function, when called, will return the current memory size in units of +/// pages. The current WebAssembly page size is 65536 bytes (64 KB). +fn C.__builtin_wasm_memory_size(mem u32) usize + +const page_size = 65536 + +fn system_alloc(_ voidptr, size usize) (voidptr, usize, u32) { + pages := size / page_size + prev := C.__builtin_wasm_memory_grow(0, pages) + if prev == -1 { + return voidptr(0), 0, 0 + } + return voidptr(prev * page_size), pages * page_size, 0 +} + +fn system_remap(_ voidptr, _ voidptr, _ usize, _ usize, _ bool) voidptr { + return voidptr(0) +} + +fn system_free_part(_ voidptr, _ voidptr, _ usize, _ usize) bool { + return false +} + +fn system_free(_ voidptr, _ voidptr, _ usize) bool { + return false +} + +fn system_allocates_zeros(_ voidptr) bool { + return false +} + +fn system_page_size(_ voidptr) usize { + return page_size +} + +fn system_can_release_part(_ voidptr, _ u32) bool { + return false +} + +fn get_wasm_allocator() dlmalloc.Allocator { + return dlmalloc.Allocator{ + alloc: system_alloc + remap: system_remap + free_part: system_free_part + free_: system_free + can_release_part: system_can_release_part + allocates_zeros: system_allocates_zeros + page_size: system_page_size + data: voidptr(0) + } +} diff --git a/vlib/dlmalloc/dlmalloc.v b/vlib/dlmalloc/dlmalloc.v index eb8bda8f10..4fa2b7f9f7 100644 --- a/vlib/dlmalloc/dlmalloc.v +++ b/vlib/dlmalloc/dlmalloc.v @@ -16,6 +16,14 @@ module dlmalloc import math.bits +$if debug ? { + #include "valgrind.h" +} +/* +fn C.VALGRIND_MALLOCLIKE_BLOCK(addr voidptr, size usize, rzb usize,is_zeroed bool) +fn C.VALGRIND_FREELIKE_BLOCK(addr voidptr, rzB usize) +fn C.VALGRIND_MAKE_MEM_UNDEFINED(addr voidptr, size usize) +*/ pub const ( n_small_bins = 32 n_tree_bins = 32 @@ -144,7 +152,11 @@ fn small_index(size usize) u32 { } fn align_up(a usize, alignment usize) usize { - return (a + (alignment - 1)) & ~(alignment - 1) + if a % alignment == 0 { + return a + } else { + return a - (a % alignment) + alignment + } } fn left_bits(x u32) u32 { @@ -467,7 +479,7 @@ fn (mut dl Dlmalloc) unlink_small_chunk(chunk_ &Chunk, size usize) { mut b := chunk.next idx := small_index(size) - if b == f { + if voidptr(b) == voidptr(f) { unsafe { dl.clear_smallmap(idx) } } else { f.next = b @@ -514,7 +526,7 @@ fn (mut dl Dlmalloc) unlink_large_chunk(chunk_ &TreeChunk) { } mut h := dl.treebin_at(chunk.index) - if chunk == *h { + if voidptr(chunk) == voidptr(*h) { *h = r if isnil(r) { dl.clear_treemap(chunk.index) @@ -549,7 +561,7 @@ fn (mut dl Dlmalloc) unlink_first_small_chunk(head_ &Chunk, next_ &Chunk, idx u3 mut head := head_ mut ptr := next.prev - if head == ptr { + if voidptr(head) == voidptr(ptr) { unsafe { dl.clear_smallmap(idx) } } else { ptr.next = head @@ -574,9 +586,11 @@ pub fn (mut dl Dlmalloc) calloc(size usize) voidptr { [unsafe] pub fn (mut dl Dlmalloc) free_(mem voidptr) { unsafe { + // C.VALGRIND_FREELIKE_BLOCK(mem, 0) mut p := chunk_from_mem(mem) mut psize := p.size() + next := p.plus_offset(psize) if !p.pinuse() { @@ -589,6 +603,7 @@ pub fn (mut dl Dlmalloc) free_(mem voidptr) { { dl.footprint -= psize } + return } @@ -600,6 +615,7 @@ pub fn (mut dl Dlmalloc) free_(mem voidptr) { } else if (next.head & dlmalloc.inuse) == dlmalloc.inuse { dl.dvsize = psize p.set_free_with_pinuse(psize, next) + return } } @@ -620,18 +636,21 @@ pub fn (mut dl Dlmalloc) free_(mem voidptr) { if dl.should_trim(tsize) { dl.sys_trim(0) } + return } else if voidptr(next) == voidptr(dl.dv) { dl.dvsize += psize dsize := dl.dvsize dl.dv = p p.set_size_and_pinuse_of_free_chunk(dsize) + return } else { nsize := next.size() psize += nsize dl.unlink_chunk(next, nsize) p.set_size_and_pinuse_of_free_chunk(psize) + if voidptr(p) == voidptr(dl.dv) { dl.dvsize = psize return @@ -892,11 +911,20 @@ fn (mut dl Dlmalloc) treemap_is_marked(idx u32) bool { return dl.treemap & (1 << idx) != 0 } +pub fn (mut dl Dlmalloc) malloc(size usize) voidptr { + unsafe { + p := dl.malloc_real(size) + if !isnil(p) { + // C.VALGRIND_MALLOCLIKE_BLOCK(p, size, 0,false) + } + return p + } +} + /// malloc behaves as libc malloc, but operates within the given space [unsafe] -pub fn (mut dl Dlmalloc) malloc(size usize) voidptr { +fn (mut dl Dlmalloc) malloc_real(size usize) voidptr { mut nb := usize(0) - unsafe { if size <= max_small_request() { nb = request_2_size(size) @@ -907,10 +935,14 @@ pub fn (mut dl Dlmalloc) malloc(size usize) voidptr { b := dl.smallbin_at(idx) mut p := b.prev - dl.unlink_first_small_chunk(b, p, idx) smallsize := small_index2size(idx) + + dl.unlink_first_small_chunk(b, p, idx) + p.set_inuse_and_pinuse(smallsize) + ret := p.to_mem() + return ret } @@ -934,7 +966,9 @@ pub fn (mut dl Dlmalloc) malloc(size usize) voidptr { r.set_size_and_pinuse_of_free_chunk(size) dl.replace_dv(r, rsize) } + ret := p.to_mem() + return ret } else if dl.treemap != 0 { mem := dl.tmalloc_small(nb) @@ -972,8 +1006,9 @@ pub fn (mut dl Dlmalloc) malloc(size usize) voidptr { dl.dv = voidptr(0) p.set_inuse_and_pinuse(dvs) } + ret := p.to_mem() - return p.to_mem() + return ret } // Split the top node if we can if nb < dl.topsize { @@ -984,8 +1019,9 @@ pub fn (mut dl Dlmalloc) malloc(size usize) voidptr { mut r := dl.top r.head = rsize | dlmalloc.pinuse p.set_size_and_pinuse_of_inuse_chunk(nb) + ret := p.to_mem() - return p.to_mem() + return ret } return dl.sys_alloc(nb) @@ -1011,18 +1047,22 @@ fn (mut dl Dlmalloc) init_top(ptr &Chunk, size_ usize) { size := size_ - offset dl.top = p dl.topsize = size + // C.VALGRIND_MAKE_MEM_UNDEFINED(p.plus_offset(sizeof(usize)),sizeof(usize)) p.head = size | dlmalloc.pinuse - + // C.VALGRIND_MAKE_MEM_UNDEFINED(p.plus_offset(size + sizeof(usize)),sizeof(usize)) p.plus_offset(size).head = top_foot_size() dl.trim_check = u32(default_trim_threshold()) } [unsafe] fn (mut dl Dlmalloc) sys_alloc(size usize) voidptr { - asize := align_up(size + top_foot_size() + malloc_alignment(), default_granularity()) + page_size := dl.system_allocator.page_size(dl.system_allocator.data) + asize := align_up(align_up(size + top_foot_size() + malloc_alignment(), default_granularity()), + page_size) unsafe { - tbase, mut tsize, flags := dl.system_allocator.alloc(dl.system_allocator.data, - asize) + alloc := dl.system_allocator.alloc + tbase, mut tsize, flags := alloc(dl.system_allocator.data, asize) + if isnil(tbase) { return tbase } @@ -1085,7 +1125,9 @@ fn (mut dl Dlmalloc) sys_alloc(size usize) voidptr { mut r := dl.top r.head = rsize | dlmalloc.pinuse p.set_size_and_pinuse_of_inuse_chunk(size) - return p.to_mem() + ret := p.to_mem() + + return ret } } return voidptr(0) @@ -1122,6 +1164,7 @@ fn (mut dl Dlmalloc) tmalloc_small(size usize) voidptr { rc.set_size_and_pinuse_of_free_chunk(rsize) dl.replace_dv(rc, rsize) } + return vc.to_mem() } } @@ -1382,6 +1425,8 @@ pub fn (mut dl Dlmalloc) memalign(alignment_ usize, bytes usize) voidptr { dl.dispose_chunk(remainder, remainder_size) } } + + // C.VALGRIND_MALLOCLIKE_BLOCK(p.to_mem(), bytes, 0, false) return p.to_mem() } }