builtin: improve support for large arrays (`[]int{len: 1_000_000_000}` now works), fix an arr.repeat() bug (#14294)
parent
af8be14639
commit
1a4d9017e2
|
@ -31,7 +31,7 @@ fn __new_array(mylen int, cap int, elm_size int) array {
|
||||||
cap_ := if cap < mylen { mylen } else { cap }
|
cap_ := if cap < mylen { mylen } else { cap }
|
||||||
arr := array{
|
arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: vcalloc(cap_ * elm_size)
|
data: vcalloc(u64(cap_) * u64(elm_size))
|
||||||
len: mylen
|
len: mylen
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
|
@ -45,14 +45,19 @@ fn __new_array_with_default(mylen int, cap int, elm_size int, val voidptr) array
|
||||||
len: mylen
|
len: mylen
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
|
total_size := u64(cap_) * u64(elm_size)
|
||||||
if cap_ > 0 && mylen == 0 {
|
if cap_ > 0 && mylen == 0 {
|
||||||
arr.data = unsafe { malloc(cap_ * elm_size) }
|
arr.data = unsafe { malloc(total_size) }
|
||||||
} else {
|
} else {
|
||||||
arr.data = vcalloc(cap_ * elm_size)
|
arr.data = vcalloc(total_size)
|
||||||
}
|
}
|
||||||
if val != 0 {
|
if val != 0 {
|
||||||
for i in 0 .. arr.len {
|
mut eptr := &u8(arr.data)
|
||||||
unsafe { arr.set_unsafe(i, val) }
|
unsafe {
|
||||||
|
for _ in 0 .. arr.len {
|
||||||
|
vmemcpy(eptr, val, arr.element_size)
|
||||||
|
eptr += arr.element_size
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return arr
|
return arr
|
||||||
|
@ -62,13 +67,17 @@ fn __new_array_with_array_default(mylen int, cap int, elm_size int, val array) a
|
||||||
cap_ := if cap < mylen { mylen } else { cap }
|
cap_ := if cap < mylen { mylen } else { cap }
|
||||||
mut arr := array{
|
mut arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: unsafe { malloc(cap_ * elm_size) }
|
data: unsafe { malloc(u64(cap_) * u64(elm_size)) }
|
||||||
len: mylen
|
len: mylen
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
for i in 0 .. arr.len {
|
mut eptr := &u8(arr.data)
|
||||||
val_clone := unsafe { val.clone_to_depth(1) }
|
unsafe {
|
||||||
unsafe { arr.set_unsafe(i, &val_clone) }
|
for _ in 0 .. arr.len {
|
||||||
|
val_clone := val.clone_to_depth(1)
|
||||||
|
vmemcpy(eptr, &val_clone, arr.element_size)
|
||||||
|
eptr += arr.element_size
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
@ -77,13 +86,17 @@ fn __new_array_with_map_default(mylen int, cap int, elm_size int, val map) array
|
||||||
cap_ := if cap < mylen { mylen } else { cap }
|
cap_ := if cap < mylen { mylen } else { cap }
|
||||||
mut arr := array{
|
mut arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: unsafe { malloc(cap_ * elm_size) }
|
data: unsafe { malloc(u64(cap_) * u64(elm_size)) }
|
||||||
len: mylen
|
len: mylen
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
for i in 0 .. arr.len {
|
mut eptr := &u8(arr.data)
|
||||||
val_clone := unsafe { val.clone() }
|
unsafe {
|
||||||
unsafe { arr.set_unsafe(i, &val_clone) }
|
for _ in 0 .. arr.len {
|
||||||
|
val_clone := val.clone()
|
||||||
|
vmemcpy(eptr, &val_clone, arr.element_size)
|
||||||
|
eptr += arr.element_size
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
@ -93,12 +106,12 @@ fn new_array_from_c_array(len int, cap int, elm_size int, c_array voidptr) array
|
||||||
cap_ := if cap < len { len } else { cap }
|
cap_ := if cap < len { len } else { cap }
|
||||||
arr := array{
|
arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: vcalloc(cap_ * elm_size)
|
data: vcalloc(u64(cap_) * u64(elm_size))
|
||||||
len: len
|
len: len
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
// TODO Write all memory functions (like memcpy) in V
|
// TODO Write all memory functions (like memcpy) in V
|
||||||
unsafe { vmemcpy(arr.data, c_array, len * elm_size) }
|
unsafe { vmemcpy(arr.data, c_array, u64(len) * u64(elm_size)) }
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,10 +137,10 @@ fn (mut a array) ensure_cap(required int) {
|
||||||
for required > cap {
|
for required > cap {
|
||||||
cap *= 2
|
cap *= 2
|
||||||
}
|
}
|
||||||
new_size := cap * a.element_size
|
new_size := u64(cap) * u64(a.element_size)
|
||||||
new_data := unsafe { malloc(new_size) }
|
new_data := unsafe { malloc(new_size) }
|
||||||
if a.data != voidptr(0) {
|
if a.data != voidptr(0) {
|
||||||
unsafe { vmemcpy(new_data, a.data, a.len * a.element_size) }
|
unsafe { vmemcpy(new_data, a.data, u64(a.len) * u64(a.element_size)) }
|
||||||
// TODO: the old data may be leaked when no GC is used (ref-counting?)
|
// TODO: the old data may be leaked when no GC is used (ref-counting?)
|
||||||
if a.flags.has(.noslices) {
|
if a.flags.has(.noslices) {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -153,14 +166,14 @@ pub fn (a array) repeat(count int) array {
|
||||||
// multi-dimensional arrays.
|
// multi-dimensional arrays.
|
||||||
//
|
//
|
||||||
// It is `unsafe` to call directly because `depth` is not checked
|
// It is `unsafe` to call directly because `depth` is not checked
|
||||||
[unsafe]
|
[direct_array_access; unsafe]
|
||||||
pub fn (a array) repeat_to_depth(count int, depth int) array {
|
pub fn (a array) repeat_to_depth(count int, depth int) array {
|
||||||
if count < 0 {
|
if count < 0 {
|
||||||
panic('array.repeat: count is negative: $count')
|
panic('array.repeat: count is negative: $count')
|
||||||
}
|
}
|
||||||
mut size := count * a.len * a.element_size
|
mut size := u64(count) * u64(a.len) * u64(a.element_size)
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
size = a.element_size
|
size = u64(a.element_size)
|
||||||
}
|
}
|
||||||
arr := array{
|
arr := array{
|
||||||
element_size: a.element_size
|
element_size: a.element_size
|
||||||
|
@ -169,12 +182,18 @@ pub fn (a array) repeat_to_depth(count int, depth int) array {
|
||||||
cap: count * a.len
|
cap: count * a.len
|
||||||
}
|
}
|
||||||
if a.len > 0 {
|
if a.len > 0 {
|
||||||
for i in 0 .. count {
|
a_total_size := u64(a.len) * u64(a.element_size)
|
||||||
|
arr_step_size := u64(a.len) * u64(arr.element_size)
|
||||||
|
mut eptr := &u8(arr.data)
|
||||||
|
unsafe {
|
||||||
|
for _ in 0 .. count {
|
||||||
if depth > 0 {
|
if depth > 0 {
|
||||||
ary_clone := unsafe { a.clone_to_depth(depth) }
|
ary_clone := a.clone_to_depth(depth)
|
||||||
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &u8(ary_clone.data), a.len * a.element_size) }
|
vmemcpy(eptr, &u8(ary_clone.data), a_total_size)
|
||||||
} else {
|
} else {
|
||||||
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &u8(a.data), a.len * a.element_size) }
|
vmemcpy(eptr, &u8(a.data), a_total_size)
|
||||||
|
}
|
||||||
|
eptr += arr_step_size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -209,7 +228,7 @@ pub fn (mut a array) insert(i int, val voidptr) {
|
||||||
a.ensure_cap(a.len + 1)
|
a.ensure_cap(a.len + 1)
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
vmemmove(a.get_unsafe(i + 1), a.get_unsafe(i), (a.len - i) * a.element_size)
|
vmemmove(a.get_unsafe(i + 1), a.get_unsafe(i), u64((a.len - i)) * u64(a.element_size))
|
||||||
a.set_unsafe(i, val)
|
a.set_unsafe(i, val)
|
||||||
}
|
}
|
||||||
a.len++
|
a.len++
|
||||||
|
@ -228,8 +247,8 @@ fn (mut a array) insert_many(i int, val voidptr, size int) {
|
||||||
elem_size := a.element_size
|
elem_size := a.element_size
|
||||||
unsafe {
|
unsafe {
|
||||||
iptr := a.get_unsafe(i)
|
iptr := a.get_unsafe(i)
|
||||||
vmemmove(a.get_unsafe(i + size), iptr, (a.len - i) * elem_size)
|
vmemmove(a.get_unsafe(i + size), iptr, u64(a.len - i) * u64(elem_size))
|
||||||
vmemcpy(iptr, val, size * elem_size)
|
vmemcpy(iptr, val, u64(size) * u64(elem_size))
|
||||||
}
|
}
|
||||||
a.len += size
|
a.len += size
|
||||||
}
|
}
|
||||||
|
@ -286,8 +305,8 @@ pub fn (mut a array) delete_many(i int, size int) {
|
||||||
}
|
}
|
||||||
if a.flags.all(.noshrink | .noslices) {
|
if a.flags.all(.noshrink | .noslices) {
|
||||||
unsafe {
|
unsafe {
|
||||||
vmemmove(&u8(a.data) + i * a.element_size, &u8(a.data) + (i + size) * a.element_size,
|
vmemmove(&u8(a.data) + u64(i) * u64(a.element_size), &u8(a.data) + u64(i +
|
||||||
(a.len - i - size) * a.element_size)
|
size) * u64(a.element_size), u64(a.len - i - size) * u64(a.element_size))
|
||||||
}
|
}
|
||||||
a.len -= size
|
a.len -= size
|
||||||
return
|
return
|
||||||
|
@ -297,11 +316,11 @@ pub fn (mut a array) delete_many(i int, size int) {
|
||||||
old_data := a.data
|
old_data := a.data
|
||||||
new_size := a.len - size
|
new_size := a.len - size
|
||||||
new_cap := if new_size == 0 { 1 } else { new_size }
|
new_cap := if new_size == 0 { 1 } else { new_size }
|
||||||
a.data = vcalloc(new_cap * a.element_size)
|
a.data = vcalloc(u64(new_cap) * u64(a.element_size))
|
||||||
unsafe { vmemcpy(a.data, old_data, i * a.element_size) }
|
unsafe { vmemcpy(a.data, old_data, u64(i) * u64(a.element_size)) }
|
||||||
unsafe {
|
unsafe {
|
||||||
vmemcpy(&u8(a.data) + i * a.element_size, &u8(old_data) + (i + size) * a.element_size,
|
vmemcpy(&u8(a.data) + u64(i) * u64(a.element_size), &u8(old_data) + u64(i +
|
||||||
(a.len - i - size) * a.element_size)
|
size) * u64(a.element_size), u64(a.len - i - size) * u64(a.element_size))
|
||||||
}
|
}
|
||||||
if a.flags.has(.noslices) {
|
if a.flags.has(.noslices) {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -343,9 +362,9 @@ pub fn (mut a array) drop(num int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n := if num <= a.len { num } else { a.len }
|
n := if num <= a.len { num } else { a.len }
|
||||||
blen := n * a.element_size
|
blen := u64(n) * u64(a.element_size)
|
||||||
a.data = unsafe { &u8(a.data) + blen }
|
a.data = unsafe { &u8(a.data) + blen }
|
||||||
a.offset += blen
|
a.offset += int(blen) // TODO: offset should become 64bit as well
|
||||||
a.len -= n
|
a.len -= n
|
||||||
a.cap -= n
|
a.cap -= n
|
||||||
}
|
}
|
||||||
|
@ -354,7 +373,7 @@ pub fn (mut a array) drop(num int) {
|
||||||
[inline; unsafe]
|
[inline; unsafe]
|
||||||
fn (a array) get_unsafe(i int) voidptr {
|
fn (a array) get_unsafe(i int) voidptr {
|
||||||
unsafe {
|
unsafe {
|
||||||
return &u8(a.data) + i * a.element_size
|
return &u8(a.data) + u64(i) * u64(a.element_size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +385,7 @@ fn (a array) get(i int) voidptr {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
return &u8(a.data) + i * a.element_size
|
return &u8(a.data) + u64(i) * u64(a.element_size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,7 +395,7 @@ fn (a array) get_with_check(i int) voidptr {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
return &u8(a.data) + i * a.element_size
|
return &u8(a.data) + u64(i) * u64(a.element_size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,7 +421,7 @@ pub fn (a array) last() voidptr {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
return &u8(a.data) + (a.len - 1) * a.element_size
|
return &u8(a.data) + u64(a.len - 1) * u64(a.element_size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,7 +448,7 @@ pub fn (mut a array) pop() voidptr {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
new_len := a.len - 1
|
new_len := a.len - 1
|
||||||
last_elem := unsafe { &u8(a.data) + new_len * a.element_size }
|
last_elem := unsafe { &u8(a.data) + u64(new_len) * u64(a.element_size) }
|
||||||
a.len = new_len
|
a.len = new_len
|
||||||
// Note: a.cap is not changed here *on purpose*, so that
|
// Note: a.cap is not changed here *on purpose*, so that
|
||||||
// further << ops on that array will be more efficient.
|
// further << ops on that array will be more efficient.
|
||||||
|
@ -474,13 +493,13 @@ fn (a array) slice(start int, _end int) array {
|
||||||
}
|
}
|
||||||
// TODO: integrate reference counting
|
// TODO: integrate reference counting
|
||||||
// a.flags.clear(.noslices)
|
// a.flags.clear(.noslices)
|
||||||
offset := start * a.element_size
|
offset := u64(start) * u64(a.element_size)
|
||||||
data := unsafe { &u8(a.data) + offset }
|
data := unsafe { &u8(a.data) + offset }
|
||||||
l := end - start
|
l := end - start
|
||||||
res := array{
|
res := array{
|
||||||
element_size: a.element_size
|
element_size: a.element_size
|
||||||
data: data
|
data: data
|
||||||
offset: a.offset + offset
|
offset: a.offset + int(offset) // TODO: offset should become 64bit
|
||||||
len: l
|
len: l
|
||||||
cap: l
|
cap: l
|
||||||
}
|
}
|
||||||
|
@ -526,13 +545,13 @@ fn (a array) slice_ni(_start int, _end int) array {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
offset := start * a.element_size
|
offset := u64(start) * u64(a.element_size)
|
||||||
data := unsafe { &u8(a.data) + offset }
|
data := unsafe { &u8(a.data) + offset }
|
||||||
l := end - start
|
l := end - start
|
||||||
res := array{
|
res := array{
|
||||||
element_size: a.element_size
|
element_size: a.element_size
|
||||||
data: data
|
data: data
|
||||||
offset: a.offset + offset
|
offset: a.offset + int(offset) // TODO: offset should be 64bit
|
||||||
len: l
|
len: l
|
||||||
cap: l
|
cap: l
|
||||||
}
|
}
|
||||||
|
@ -562,7 +581,7 @@ pub fn (a &array) clone() array {
|
||||||
// recursively clone given array - `unsafe` when called directly because depth is not checked
|
// recursively clone given array - `unsafe` when called directly because depth is not checked
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn (a &array) clone_to_depth(depth int) array {
|
pub fn (a &array) clone_to_depth(depth int) array {
|
||||||
mut size := a.cap * a.element_size
|
mut size := u64(a.cap) * u64(a.element_size)
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
size++
|
size++
|
||||||
}
|
}
|
||||||
|
@ -583,7 +602,7 @@ pub fn (a &array) clone_to_depth(depth int) array {
|
||||||
return arr
|
return arr
|
||||||
} else {
|
} else {
|
||||||
if !isnil(a.data) {
|
if !isnil(a.data) {
|
||||||
unsafe { vmemcpy(&u8(arr.data), a.data, a.cap * a.element_size) }
|
unsafe { vmemcpy(&u8(arr.data), a.data, u64(a.cap) * u64(a.element_size)) }
|
||||||
}
|
}
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
@ -592,7 +611,7 @@ pub fn (a &array) clone_to_depth(depth int) array {
|
||||||
// we manually inline this for single operations for performance without -prod
|
// we manually inline this for single operations for performance without -prod
|
||||||
[inline; unsafe]
|
[inline; unsafe]
|
||||||
fn (mut a array) set_unsafe(i int, val voidptr) {
|
fn (mut a array) set_unsafe(i int, val voidptr) {
|
||||||
unsafe { vmemcpy(&u8(a.data) + a.element_size * i, val, a.element_size) }
|
unsafe { vmemcpy(&u8(a.data) + u64(a.element_size) * u64(i), val, a.element_size) }
|
||||||
}
|
}
|
||||||
|
|
||||||
// Private function. Used to implement assigment to the array element.
|
// Private function. Used to implement assigment to the array element.
|
||||||
|
@ -602,14 +621,14 @@ fn (mut a array) set(i int, val voidptr) {
|
||||||
panic('array.set: index out of range (i == $i, a.len == $a.len)')
|
panic('array.set: index out of range (i == $i, a.len == $a.len)')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unsafe { vmemcpy(&u8(a.data) + a.element_size * i, val, a.element_size) }
|
unsafe { vmemcpy(&u8(a.data) + u64(a.element_size) * u64(i), val, a.element_size) }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut a array) push(val voidptr) {
|
fn (mut a array) push(val voidptr) {
|
||||||
if a.len >= a.cap {
|
if a.len >= a.cap {
|
||||||
a.ensure_cap(a.len + 1)
|
a.ensure_cap(a.len + 1)
|
||||||
}
|
}
|
||||||
unsafe { vmemcpy(&u8(a.data) + a.element_size * a.len, val, a.element_size) }
|
unsafe { vmemcpy(&u8(a.data) + u64(a.element_size) * u64(a.len), val, a.element_size) }
|
||||||
a.len++
|
a.len++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,11 +642,11 @@ pub fn (mut a3 array) push_many(val voidptr, size int) {
|
||||||
copy := a3.clone()
|
copy := a3.clone()
|
||||||
unsafe {
|
unsafe {
|
||||||
// vmemcpy(a.data, copy.data, copy.element_size * copy.len)
|
// vmemcpy(a.data, copy.data, copy.element_size * copy.len)
|
||||||
vmemcpy(a3.get_unsafe(a3.len), copy.data, a3.element_size * size)
|
vmemcpy(a3.get_unsafe(a3.len), copy.data, u64(a3.element_size) * u64(size))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !isnil(a3.data) && !isnil(val) {
|
if !isnil(a3.data) && !isnil(val) {
|
||||||
unsafe { vmemcpy(a3.get_unsafe(a3.len), val, a3.element_size * size) }
|
unsafe { vmemcpy(a3.get_unsafe(a3.len), val, u64(a3.element_size) * u64(size)) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a3.len += size
|
a3.len += size
|
||||||
|
@ -641,10 +660,11 @@ pub fn (mut a array) reverse_in_place() {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut tmp_value := malloc(a.element_size)
|
mut tmp_value := malloc(a.element_size)
|
||||||
for i in 0 .. a.len / 2 {
|
for i in 0 .. a.len / 2 {
|
||||||
vmemcpy(tmp_value, &u8(a.data) + i * a.element_size, a.element_size)
|
vmemcpy(tmp_value, &u8(a.data) + u64(i) * u64(a.element_size), a.element_size)
|
||||||
vmemcpy(&u8(a.data) + i * a.element_size, &u8(a.data) + (a.len - 1 - i) * a.element_size,
|
vmemcpy(&u8(a.data) + u64(i) * u64(a.element_size), &u8(a.data) +
|
||||||
|
u64(a.len - 1 - i) * u64(a.element_size), a.element_size)
|
||||||
|
vmemcpy(&u8(a.data) + u64(a.len - 1 - i) * u64(a.element_size), tmp_value,
|
||||||
a.element_size)
|
a.element_size)
|
||||||
vmemcpy(&u8(a.data) + (a.len - 1 - i) * a.element_size, tmp_value, a.element_size)
|
|
||||||
}
|
}
|
||||||
free(tmp_value)
|
free(tmp_value)
|
||||||
}
|
}
|
||||||
|
@ -657,7 +677,7 @@ pub fn (a array) reverse() array {
|
||||||
}
|
}
|
||||||
mut arr := array{
|
mut arr := array{
|
||||||
element_size: a.element_size
|
element_size: a.element_size
|
||||||
data: vcalloc(a.cap * a.element_size)
|
data: vcalloc(u64(a.cap) * u64(a.element_size))
|
||||||
len: a.len
|
len: a.len
|
||||||
cap: a.cap
|
cap: a.cap
|
||||||
}
|
}
|
||||||
|
@ -840,7 +860,7 @@ pub fn (a []string) str() string {
|
||||||
// hex returns a string with the hexadecimal representation
|
// hex returns a string with the hexadecimal representation
|
||||||
// of the byte elements of the array.
|
// of the byte elements of the array.
|
||||||
pub fn (b []u8) hex() string {
|
pub fn (b []u8) hex() string {
|
||||||
mut hex := unsafe { malloc_noscan(b.len * 2 + 1) }
|
mut hex := unsafe { malloc_noscan(u64(b.len) * 2 + 1) }
|
||||||
mut dst_i := 0
|
mut dst_i := 0
|
||||||
for i in b {
|
for i in b {
|
||||||
n0 := i >> 4
|
n0 := i >> 4
|
||||||
|
|
|
@ -9,7 +9,7 @@ fn __new_array_noscan(mylen int, cap int, elm_size int) array {
|
||||||
cap_ := if cap < mylen { mylen } else { cap }
|
cap_ := if cap < mylen { mylen } else { cap }
|
||||||
arr := array{
|
arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: vcalloc_noscan(cap_ * elm_size)
|
data: vcalloc_noscan(u64(cap_) * u64(elm_size))
|
||||||
len: mylen
|
len: mylen
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ fn __new_array_with_default_noscan(mylen int, cap int, elm_size int, val voidptr
|
||||||
cap_ := if cap < mylen { mylen } else { cap }
|
cap_ := if cap < mylen { mylen } else { cap }
|
||||||
mut arr := array{
|
mut arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: vcalloc_noscan(cap_ * elm_size)
|
data: vcalloc_noscan(u64(cap_) * u64(elm_size))
|
||||||
len: mylen
|
len: mylen
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ fn __new_array_with_array_default_noscan(mylen int, cap int, elm_size int, val a
|
||||||
cap_ := if cap < mylen { mylen } else { cap }
|
cap_ := if cap < mylen { mylen } else { cap }
|
||||||
mut arr := array{
|
mut arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: vcalloc_noscan(cap_ * elm_size)
|
data: vcalloc_noscan(u64(cap_) * u64(elm_size))
|
||||||
len: mylen
|
len: mylen
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
|
@ -52,12 +52,12 @@ fn new_array_from_c_array_noscan(len int, cap int, elm_size int, c_array voidptr
|
||||||
cap_ := if cap < len { len } else { cap }
|
cap_ := if cap < len { len } else { cap }
|
||||||
arr := array{
|
arr := array{
|
||||||
element_size: elm_size
|
element_size: elm_size
|
||||||
data: vcalloc_noscan(cap_ * elm_size)
|
data: vcalloc_noscan(u64(cap_) * u64(elm_size))
|
||||||
len: len
|
len: len
|
||||||
cap: cap_
|
cap: cap_
|
||||||
}
|
}
|
||||||
// TODO Write all memory functions (like memcpy) in V
|
// TODO Write all memory functions (like memcpy) in V
|
||||||
unsafe { vmemcpy(arr.data, c_array, len * elm_size) }
|
unsafe { vmemcpy(arr.data, c_array, u64(len) * u64(elm_size)) }
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,10 +70,10 @@ fn (mut a array) ensure_cap_noscan(required int) {
|
||||||
for required > cap {
|
for required > cap {
|
||||||
cap *= 2
|
cap *= 2
|
||||||
}
|
}
|
||||||
new_size := cap * a.element_size
|
new_size := u64(cap) * u64(a.element_size)
|
||||||
new_data := vcalloc_noscan(new_size)
|
new_data := vcalloc_noscan(new_size)
|
||||||
if a.data != voidptr(0) {
|
if a.data != voidptr(0) {
|
||||||
unsafe { vmemcpy(new_data, a.data, a.len * a.element_size) }
|
unsafe { vmemcpy(new_data, a.data, u64(a.len) * u64(a.element_size)) }
|
||||||
// TODO: the old data may be leaked when no GC is used (ref-counting?)
|
// TODO: the old data may be leaked when no GC is used (ref-counting?)
|
||||||
}
|
}
|
||||||
a.data = new_data
|
a.data = new_data
|
||||||
|
@ -91,9 +91,9 @@ fn (a array) repeat_to_depth_noscan(count int, depth int) array {
|
||||||
if count < 0 {
|
if count < 0 {
|
||||||
panic('array.repeat: count is negative: $count')
|
panic('array.repeat: count is negative: $count')
|
||||||
}
|
}
|
||||||
mut size := count * a.len * a.element_size
|
mut size := u64(count) * u64(a.len) * u64(a.element_size)
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
size = a.element_size
|
size = u64(a.element_size)
|
||||||
}
|
}
|
||||||
arr := array{
|
arr := array{
|
||||||
element_size: a.element_size
|
element_size: a.element_size
|
||||||
|
@ -102,12 +102,18 @@ fn (a array) repeat_to_depth_noscan(count int, depth int) array {
|
||||||
cap: count * a.len
|
cap: count * a.len
|
||||||
}
|
}
|
||||||
if a.len > 0 {
|
if a.len > 0 {
|
||||||
for i in 0 .. count {
|
a_total_size := u64(a.len) * u64(a.element_size)
|
||||||
|
arr_step_size := u64(a.len) * u64(arr.element_size)
|
||||||
|
mut eptr := &u8(arr.data)
|
||||||
|
unsafe {
|
||||||
|
for _ in 0 .. count {
|
||||||
if depth > 0 {
|
if depth > 0 {
|
||||||
ary_clone := unsafe { a.clone_to_depth_noscan(depth) }
|
ary_clone := a.clone_to_depth_noscan(depth)
|
||||||
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &u8(ary_clone.data), a.len * a.element_size) }
|
vmemcpy(eptr, &u8(ary_clone.data), a_total_size)
|
||||||
} else {
|
} else {
|
||||||
unsafe { vmemcpy(arr.get_unsafe(i * a.len), &u8(a.data), a.len * a.element_size) }
|
vmemcpy(eptr, &u8(a.data), a_total_size)
|
||||||
|
}
|
||||||
|
eptr += arr_step_size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -123,7 +129,7 @@ fn (mut a array) insert_noscan(i int, val voidptr) {
|
||||||
}
|
}
|
||||||
a.ensure_cap_noscan(a.len + 1)
|
a.ensure_cap_noscan(a.len + 1)
|
||||||
unsafe {
|
unsafe {
|
||||||
vmemmove(a.get_unsafe(i + 1), a.get_unsafe(i), (a.len - i) * a.element_size)
|
vmemmove(a.get_unsafe(i + 1), a.get_unsafe(i), u64(a.len - i) * u64(a.element_size))
|
||||||
a.set_unsafe(i, val)
|
a.set_unsafe(i, val)
|
||||||
}
|
}
|
||||||
a.len++
|
a.len++
|
||||||
|
@ -141,8 +147,8 @@ fn (mut a array) insert_many_noscan(i int, val voidptr, size int) {
|
||||||
elem_size := a.element_size
|
elem_size := a.element_size
|
||||||
unsafe {
|
unsafe {
|
||||||
iptr := a.get_unsafe(i)
|
iptr := a.get_unsafe(i)
|
||||||
vmemmove(a.get_unsafe(i + size), iptr, (a.len - i) * elem_size)
|
vmemmove(a.get_unsafe(i + size), iptr, u64(a.len - i) * u64(elem_size))
|
||||||
vmemcpy(iptr, val, size * elem_size)
|
vmemcpy(iptr, val, u64(size) * u64(elem_size))
|
||||||
}
|
}
|
||||||
a.len += size
|
a.len += size
|
||||||
}
|
}
|
||||||
|
@ -167,7 +173,7 @@ fn (mut a array) pop_noscan() voidptr {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
new_len := a.len - 1
|
new_len := a.len - 1
|
||||||
last_elem := unsafe { &u8(a.data) + new_len * a.element_size }
|
last_elem := unsafe { &u8(a.data) + u64(new_len) * u64(a.element_size) }
|
||||||
a.len = new_len
|
a.len = new_len
|
||||||
// Note: a.cap is not changed here *on purpose*, so that
|
// Note: a.cap is not changed here *on purpose*, so that
|
||||||
// further << ops on that array will be more efficient.
|
// further << ops on that array will be more efficient.
|
||||||
|
@ -184,7 +190,7 @@ fn (a array) clone_static_to_depth_noscan(depth int) array {
|
||||||
// recursively clone given array - `unsafe` when called directly because depth is not checked
|
// recursively clone given array - `unsafe` when called directly because depth is not checked
|
||||||
[unsafe]
|
[unsafe]
|
||||||
fn (a &array) clone_to_depth_noscan(depth int) array {
|
fn (a &array) clone_to_depth_noscan(depth int) array {
|
||||||
mut size := a.cap * a.element_size
|
mut size := u64(a.cap) * u64(a.element_size)
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
size++
|
size++
|
||||||
}
|
}
|
||||||
|
@ -205,7 +211,7 @@ fn (a &array) clone_to_depth_noscan(depth int) array {
|
||||||
return arr
|
return arr
|
||||||
} else {
|
} else {
|
||||||
if !isnil(a.data) {
|
if !isnil(a.data) {
|
||||||
unsafe { vmemcpy(&u8(arr.data), a.data, a.cap * a.element_size) }
|
unsafe { vmemcpy(&u8(arr.data), a.data, u64(a.cap) * u64(a.element_size)) }
|
||||||
}
|
}
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
@ -213,7 +219,7 @@ fn (a &array) clone_to_depth_noscan(depth int) array {
|
||||||
|
|
||||||
fn (mut a array) push_noscan(val voidptr) {
|
fn (mut a array) push_noscan(val voidptr) {
|
||||||
a.ensure_cap_noscan(a.len + 1)
|
a.ensure_cap_noscan(a.len + 1)
|
||||||
unsafe { vmemcpy(&u8(a.data) + a.element_size * a.len, val, a.element_size) }
|
unsafe { vmemcpy(&u8(a.data) + u64(a.element_size) * u64(a.len), val, a.element_size) }
|
||||||
a.len++
|
a.len++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,12 +232,12 @@ fn (mut a3 array) push_many_noscan(val voidptr, size int) {
|
||||||
copy := a3.clone()
|
copy := a3.clone()
|
||||||
a3.ensure_cap_noscan(a3.len + size)
|
a3.ensure_cap_noscan(a3.len + size)
|
||||||
unsafe {
|
unsafe {
|
||||||
vmemcpy(a3.get_unsafe(a3.len), copy.data, a3.element_size * size)
|
vmemcpy(a3.get_unsafe(a3.len), copy.data, u64(a3.element_size) * u64(size))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a3.ensure_cap_noscan(a3.len + size)
|
a3.ensure_cap_noscan(a3.len + size)
|
||||||
if !isnil(a3.data) && !isnil(val) {
|
if !isnil(a3.data) && !isnil(val) {
|
||||||
unsafe { vmemcpy(a3.get_unsafe(a3.len), val, a3.element_size * size) }
|
unsafe { vmemcpy(a3.get_unsafe(a3.len), val, u64(a3.element_size) * u64(size)) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a3.len += size
|
a3.len += size
|
||||||
|
@ -244,7 +250,7 @@ fn (a array) reverse_noscan() array {
|
||||||
}
|
}
|
||||||
mut arr := array{
|
mut arr := array{
|
||||||
element_size: a.element_size
|
element_size: a.element_size
|
||||||
data: vcalloc_noscan(a.cap * a.element_size)
|
data: vcalloc_noscan(u64(a.cap) * u64(a.element_size))
|
||||||
len: a.len
|
len: a.len
|
||||||
cap: a.cap
|
cap: a.cap
|
||||||
}
|
}
|
||||||
|
|
|
@ -209,61 +209,82 @@ fn test_compare_ints() {
|
||||||
assert compare_ints(a, a) == 0
|
assert compare_ints(a, a) == 0
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
fn test_repeat() {
|
|
||||||
{
|
fn test_repeat_int() {
|
||||||
a := [0].repeat(5)
|
a := [1234].repeat(5)
|
||||||
|
dump(a)
|
||||||
assert a.len == 5
|
assert a.len == 5
|
||||||
assert a[0] == 0 && a[1] == 0 && a[2] == 0 && a[3] == 0 && a[4] == 0
|
for x in a {
|
||||||
|
assert x == 1234
|
||||||
}
|
}
|
||||||
{
|
}
|
||||||
|
|
||||||
|
fn test_repeat_f64() {
|
||||||
a := [1.1].repeat(10)
|
a := [1.1].repeat(10)
|
||||||
|
dump(a)
|
||||||
|
assert a.len == 10
|
||||||
assert a[0] == 1.1
|
assert a[0] == 1.1
|
||||||
assert a[5] == 1.1
|
assert a[5] == 1.1
|
||||||
assert a[9] == 1.1
|
assert a[9] == 1.1
|
||||||
}
|
}
|
||||||
{
|
|
||||||
|
fn test_repeat_f32() {
|
||||||
|
a := [f32(1.1)].repeat(10)
|
||||||
|
dump(a)
|
||||||
|
assert a.len == 10
|
||||||
|
assert a[0] == f32(1.1)
|
||||||
|
assert a[5] == f32(1.1)
|
||||||
|
assert a[9] == f32(1.1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_repeat_i64() {
|
||||||
a := [i64(-123)].repeat(10)
|
a := [i64(-123)].repeat(10)
|
||||||
|
dump(a)
|
||||||
|
assert a.len == 10
|
||||||
assert a[0] == -123
|
assert a[0] == -123
|
||||||
assert a[5] == -123
|
assert a[5] == -123
|
||||||
assert a[9] == -123
|
assert a[9] == -123
|
||||||
}
|
}
|
||||||
{
|
|
||||||
|
fn test_repeat_u64() {
|
||||||
a := [u64(123)].repeat(10)
|
a := [u64(123)].repeat(10)
|
||||||
assert a[0] == 123
|
assert a[0] == 123
|
||||||
assert a[5] == 123
|
assert a[5] == 123
|
||||||
assert a[9] == 123
|
assert a[9] == 123
|
||||||
}
|
}
|
||||||
{
|
|
||||||
a := [1.1].repeat(10)
|
fn test_repeat_several_ints() {
|
||||||
assert a[0] == 1.1
|
|
||||||
assert a[5] == 1.1
|
|
||||||
assert a[9] == 1.1
|
|
||||||
}
|
|
||||||
{
|
|
||||||
a := [1, 2].repeat(2)
|
a := [1, 2].repeat(2)
|
||||||
|
dump(a)
|
||||||
|
assert a.len == 4
|
||||||
assert a[0] == 1
|
assert a[0] == 1
|
||||||
assert a[1] == 2
|
assert a[1] == 2
|
||||||
assert a[2] == 1
|
assert a[2] == 1
|
||||||
assert a[3] == 2
|
assert a[3] == 2
|
||||||
}
|
}
|
||||||
{
|
|
||||||
|
fn test_repeat_several_strings_2() {
|
||||||
a := ['1', 'abc'].repeat(2)
|
a := ['1', 'abc'].repeat(2)
|
||||||
|
dump(a)
|
||||||
|
assert a.len == 4
|
||||||
assert a[0] == '1'
|
assert a[0] == '1'
|
||||||
assert a[1] == 'abc'
|
assert a[1] == 'abc'
|
||||||
assert a[2] == '1'
|
assert a[2] == '1'
|
||||||
assert a[3] == 'abc'
|
assert a[3] == 'abc'
|
||||||
}
|
}
|
||||||
{
|
|
||||||
|
fn test_repeat_several_strings_0() {
|
||||||
mut a := ['1', 'abc'].repeat(0)
|
mut a := ['1', 'abc'].repeat(0)
|
||||||
|
dump(a)
|
||||||
assert a.len == 0
|
assert a.len == 0
|
||||||
a << 'abc'
|
a << 'abc'
|
||||||
assert a[0] == 'abc'
|
assert a[0] == 'abc'
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_deep_repeat() {
|
fn test_deep_repeat() {
|
||||||
mut a3 := [[[1, 1], [2, 2], [3, 3]], [[4, 4], [5, 5], [6, 6]]]
|
mut a3 := [[[1, 1], [2, 2], [3, 3]], [[4, 4], [5, 5], [6, 6]]]
|
||||||
r := a3.repeat(3)
|
r := a3.repeat(3)
|
||||||
|
dump(r)
|
||||||
a3[1][1][0] = 17
|
a3[1][1][0] = 17
|
||||||
assert r == [
|
assert r == [
|
||||||
[[1, 1], [2, 2], [3, 3]],
|
[[1, 1], [2, 2], [3, 3]],
|
||||||
|
|
|
@ -276,7 +276,7 @@ __global total_m = i64(0)
|
||||||
// malloc returns a `byteptr` pointing to the memory address of the allocated space.
|
// malloc returns a `byteptr` pointing to the memory address of the allocated space.
|
||||||
// unlike the `calloc` family of functions - malloc will not zero the memory block.
|
// unlike the `calloc` family of functions - malloc will not zero the memory block.
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn malloc(n int) &u8 {
|
pub fn malloc(n isize) &u8 {
|
||||||
if n <= 0 {
|
if n <= 0 {
|
||||||
panic('malloc($n <= 0)')
|
panic('malloc($n <= 0)')
|
||||||
}
|
}
|
||||||
|
@ -319,7 +319,7 @@ pub fn malloc(n int) &u8 {
|
||||||
}
|
}
|
||||||
|
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn malloc_noscan(n int) &u8 {
|
pub fn malloc_noscan(n isize) &u8 {
|
||||||
if n <= 0 {
|
if n <= 0 {
|
||||||
panic('malloc_noscan($n <= 0)')
|
panic('malloc_noscan($n <= 0)')
|
||||||
}
|
}
|
||||||
|
@ -370,7 +370,7 @@ pub fn malloc_noscan(n int) &u8 {
|
||||||
// previously allocated with `malloc`, `v_calloc` or `vcalloc`.
|
// previously allocated with `malloc`, `v_calloc` or `vcalloc`.
|
||||||
// Please, see also realloc_data, and use it instead if possible.
|
// Please, see also realloc_data, and use it instead if possible.
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn v_realloc(b &u8, n int) &u8 {
|
pub fn v_realloc(b &u8, n isize) &u8 {
|
||||||
$if trace_realloc ? {
|
$if trace_realloc ? {
|
||||||
C.fprintf(C.stderr, c'v_realloc %6d\n', n)
|
C.fprintf(C.stderr, c'v_realloc %6d\n', n)
|
||||||
}
|
}
|
||||||
|
@ -441,7 +441,7 @@ pub fn realloc_data(old_data &u8, old_size int, new_size int) &u8 {
|
||||||
// vcalloc dynamically allocates a zeroed `n` bytes block of memory on the heap.
|
// vcalloc dynamically allocates a zeroed `n` bytes block of memory on the heap.
|
||||||
// vcalloc returns a `byteptr` pointing to the memory address of the allocated space.
|
// vcalloc returns a `byteptr` pointing to the memory address of the allocated space.
|
||||||
// Unlike `v_calloc` vcalloc checks for negative values given in `n`.
|
// Unlike `v_calloc` vcalloc checks for negative values given in `n`.
|
||||||
pub fn vcalloc(n int) &u8 {
|
pub fn vcalloc(n isize) &u8 {
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
panic('calloc($n < 0)')
|
panic('calloc($n < 0)')
|
||||||
} else if n == 0 {
|
} else if n == 0 {
|
||||||
|
@ -462,7 +462,7 @@ pub fn vcalloc(n int) &u8 {
|
||||||
|
|
||||||
// special versions of the above that allocate memory which is not scanned
|
// special versions of the above that allocate memory which is not scanned
|
||||||
// for pointers (but is collected) when the Boehm garbage collection is used
|
// for pointers (but is collected) when the Boehm garbage collection is used
|
||||||
pub fn vcalloc_noscan(n int) &u8 {
|
pub fn vcalloc_noscan(n isize) &u8 {
|
||||||
$if trace_vcalloc ? {
|
$if trace_vcalloc ? {
|
||||||
total_m += n
|
total_m += n
|
||||||
C.fprintf(C.stderr, c'vcalloc_noscan %6d total %10d\n', n, total_m)
|
C.fprintf(C.stderr, c'vcalloc_noscan %6d total %10d\n', n, total_m)
|
||||||
|
|
|
@ -18,7 +18,7 @@ pub fn vstrlen_char(s &char) int {
|
||||||
// The memory areas *MUST NOT OVERLAP*. Use vmemmove, if the memory
|
// The memory areas *MUST NOT OVERLAP*. Use vmemmove, if the memory
|
||||||
// areas do overlap. vmemcpy returns a pointer to `dest`.
|
// areas do overlap. vmemcpy returns a pointer to `dest`.
|
||||||
[inline; unsafe]
|
[inline; unsafe]
|
||||||
pub fn vmemcpy(dest voidptr, const_src voidptr, n int) voidptr {
|
pub fn vmemcpy(dest voidptr, const_src voidptr, n isize) voidptr {
|
||||||
unsafe {
|
unsafe {
|
||||||
return C.memcpy(dest, const_src, n)
|
return C.memcpy(dest, const_src, n)
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ pub fn vmemcpy(dest voidptr, const_src voidptr, n int) voidptr {
|
||||||
// `src` or `dest`, and the bytes are then copied from the temporary array
|
// `src` or `dest`, and the bytes are then copied from the temporary array
|
||||||
// to `dest`. vmemmove returns a pointer to `dest`.
|
// to `dest`. vmemmove returns a pointer to `dest`.
|
||||||
[inline; unsafe]
|
[inline; unsafe]
|
||||||
pub fn vmemmove(dest voidptr, const_src voidptr, n int) voidptr {
|
pub fn vmemmove(dest voidptr, const_src voidptr, n isize) voidptr {
|
||||||
unsafe {
|
unsafe {
|
||||||
return C.memmove(dest, const_src, n)
|
return C.memmove(dest, const_src, n)
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ pub fn vmemmove(dest voidptr, const_src voidptr, n int) voidptr {
|
||||||
// You should use a function that performs comparisons in constant time for
|
// You should use a function that performs comparisons in constant time for
|
||||||
// this.
|
// this.
|
||||||
[inline; unsafe]
|
[inline; unsafe]
|
||||||
pub fn vmemcmp(const_s1 voidptr, const_s2 voidptr, n int) int {
|
pub fn vmemcmp(const_s1 voidptr, const_s2 voidptr, n isize) int {
|
||||||
unsafe {
|
unsafe {
|
||||||
return C.memcmp(const_s1, const_s2, n)
|
return C.memcmp(const_s1, const_s2, n)
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ pub fn vmemcmp(const_s1 voidptr, const_s2 voidptr, n int) int {
|
||||||
// vmemset fills the first `n` bytes of the memory area pointed to by `s`,
|
// vmemset fills the first `n` bytes of the memory area pointed to by `s`,
|
||||||
// with the constant byte `c`. It returns a pointer to the memory area `s`.
|
// with the constant byte `c`. It returns a pointer to the memory area `s`.
|
||||||
[inline; unsafe]
|
[inline; unsafe]
|
||||||
pub fn vmemset(s voidptr, c int, n int) voidptr {
|
pub fn vmemset(s voidptr, c int, n isize) voidptr {
|
||||||
unsafe {
|
unsafe {
|
||||||
return C.memset(s, c, n)
|
return C.memset(s, c, n)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ pub fn mm_free(addr &byte) Errno {
|
||||||
return sys_munmap(ap, size)
|
return sys_munmap(ap, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mem_copy(dest0 voidptr, src0 voidptr, n int) voidptr {
|
pub fn mem_copy(dest0 voidptr, src0 voidptr, n isize) voidptr {
|
||||||
mut dest := &u8(dest0)
|
mut dest := &u8(dest0)
|
||||||
src := &u8(src0)
|
src := &u8(src0)
|
||||||
for i in 0 .. n {
|
for i in 0 .. n {
|
||||||
|
@ -41,7 +41,7 @@ pub fn mem_copy(dest0 voidptr, src0 voidptr, n int) voidptr {
|
||||||
}
|
}
|
||||||
|
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn malloc(n int) &byte {
|
pub fn malloc(n isize) &byte {
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
panic('malloc(<0)')
|
panic('malloc(<0)')
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,16 +21,16 @@ __global g_memory_block &VMemoryBlock
|
||||||
struct VMemoryBlock {
|
struct VMemoryBlock {
|
||||||
mut:
|
mut:
|
||||||
id int
|
id int
|
||||||
cap int
|
cap isize
|
||||||
start &byte = 0
|
start &byte = 0
|
||||||
previous &VMemoryBlock = 0
|
previous &VMemoryBlock = 0
|
||||||
remaining int
|
remaining isize
|
||||||
current &u8 = 0
|
current &u8 = 0
|
||||||
mallocs int
|
mallocs int
|
||||||
}
|
}
|
||||||
|
|
||||||
[unsafe]
|
[unsafe]
|
||||||
fn vmemory_block_new(prev &VMemoryBlock, at_least int) &VMemoryBlock {
|
fn vmemory_block_new(prev &VMemoryBlock, at_least isize) &VMemoryBlock {
|
||||||
mut v := unsafe { &VMemoryBlock(C.calloc(1, sizeof(VMemoryBlock))) }
|
mut v := unsafe { &VMemoryBlock(C.calloc(1, sizeof(VMemoryBlock))) }
|
||||||
if prev != 0 {
|
if prev != 0 {
|
||||||
v.id = prev.id + 1
|
v.id = prev.id + 1
|
||||||
|
@ -45,7 +45,7 @@ fn vmemory_block_new(prev &VMemoryBlock, at_least int) &VMemoryBlock {
|
||||||
}
|
}
|
||||||
|
|
||||||
[unsafe]
|
[unsafe]
|
||||||
fn vmemory_block_malloc(n int) &byte {
|
fn vmemory_block_malloc(n isize) &byte {
|
||||||
unsafe {
|
unsafe {
|
||||||
if g_memory_block.remaining < n {
|
if g_memory_block.remaining < n {
|
||||||
g_memory_block = vmemory_block_new(g_memory_block, n)
|
g_memory_block = vmemory_block_new(g_memory_block, n)
|
||||||
|
@ -95,12 +95,12 @@ fn prealloc_vcleanup() {
|
||||||
}
|
}
|
||||||
|
|
||||||
[unsafe]
|
[unsafe]
|
||||||
fn prealloc_malloc(n int) &byte {
|
fn prealloc_malloc(n isize) &byte {
|
||||||
return unsafe { vmemory_block_malloc(n) }
|
return unsafe { vmemory_block_malloc(n) }
|
||||||
}
|
}
|
||||||
|
|
||||||
[unsafe]
|
[unsafe]
|
||||||
fn prealloc_realloc(old_data &byte, old_size int, new_size int) &byte {
|
fn prealloc_realloc(old_data &byte, old_size isize, new_size isize) &byte {
|
||||||
new_ptr := unsafe { vmemory_block_malloc(new_size) }
|
new_ptr := unsafe { vmemory_block_malloc(new_size) }
|
||||||
min_size := if old_size < new_size { old_size } else { new_size }
|
min_size := if old_size < new_size { old_size } else { new_size }
|
||||||
unsafe { C.memcpy(new_ptr, old_data, min_size) }
|
unsafe { C.memcpy(new_ptr, old_data, min_size) }
|
||||||
|
@ -108,7 +108,7 @@ fn prealloc_realloc(old_data &byte, old_size int, new_size int) &byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
[unsafe]
|
[unsafe]
|
||||||
fn prealloc_calloc(n int) &byte {
|
fn prealloc_calloc(n isize) &byte {
|
||||||
new_ptr := unsafe { vmemory_block_malloc(n) }
|
new_ptr := unsafe { vmemory_block_malloc(n) }
|
||||||
unsafe { C.memset(new_ptr, 0, n) }
|
unsafe { C.memset(new_ptr, 0, n) }
|
||||||
return new_ptr
|
return new_ptr
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
[direct_array_access]
|
||||||
|
fn test_big_int_array() {
|
||||||
|
dump(sizeof(isize))
|
||||||
|
mut maxn := 500_000_000 // try allocating ~2GB worth of integers on 32bit platforms
|
||||||
|
if sizeof(isize) > 4 {
|
||||||
|
maxn = 1_000_000_000 // 1 billion integers, when each is 4 bytes => require ~4GB
|
||||||
|
}
|
||||||
|
dump(maxn)
|
||||||
|
mut data := []int{len: maxn}
|
||||||
|
|
||||||
|
// ensure that all of the elements are written at least once, to prevent the OS from cheating:
|
||||||
|
for i in 0 .. maxn {
|
||||||
|
data[i] = i
|
||||||
|
}
|
||||||
|
assert data[0] == 0
|
||||||
|
assert data[maxn - 1] == maxn - 1
|
||||||
|
dump(data#[0..10])
|
||||||
|
dump(data#[-10..])
|
||||||
|
}
|
Loading…
Reference in New Issue