|
|
@ -3,20 +3,22 @@ module c
|
|
|
|
// NB: @@@ here serve as placeholders.
|
|
|
|
// NB: @@@ here serve as placeholders.
|
|
|
|
// They will be replaced with correct strings
|
|
|
|
// They will be replaced with correct strings
|
|
|
|
// for each constant, during C code generation.
|
|
|
|
// for each constant, during C code generation.
|
|
|
|
const (
|
|
|
|
|
|
|
|
// V_COMMIT_HASH is generated by cmd/tools/gen_vc.v .
|
|
|
|
// V_COMMIT_HASH is generated by cmd/tools/gen_vc.v .
|
|
|
|
c_commit_hash_default = '
|
|
|
|
const c_commit_hash_default = '
|
|
|
|
#ifndef V_COMMIT_HASH
|
|
|
|
#ifndef V_COMMIT_HASH
|
|
|
|
#define V_COMMIT_HASH "@@@"
|
|
|
|
#define V_COMMIT_HASH "@@@"
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
'
|
|
|
|
'
|
|
|
|
// V_CURRENT_COMMIT_HASH is updated, when V is rebuilt inside a git repo.
|
|
|
|
|
|
|
|
c_current_commit_hash_default = '
|
|
|
|
// V_CURRENT_COMMIT_HASH is updated, when V is rebuilt inside a git repo.
|
|
|
|
|
|
|
|
const c_current_commit_hash_default = '
|
|
|
|
#ifndef V_CURRENT_COMMIT_HASH
|
|
|
|
#ifndef V_CURRENT_COMMIT_HASH
|
|
|
|
#define V_CURRENT_COMMIT_HASH "@@@"
|
|
|
|
#define V_CURRENT_COMMIT_HASH "@@@"
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
'
|
|
|
|
'
|
|
|
|
c_concurrency_helpers = '
|
|
|
|
|
|
|
|
|
|
|
|
const c_concurrency_helpers = '
|
|
|
|
typedef struct __shared_map __shared_map;
|
|
|
|
typedef struct __shared_map __shared_map;
|
|
|
|
struct __shared_map { map val; sync__RwMutex mtx; };
|
|
|
|
struct __shared_map { map val; sync__RwMutex mtx; };
|
|
|
|
static inline voidptr __dup_shared_map(voidptr src, int sz) {
|
|
|
|
static inline voidptr __dup_shared_map(voidptr src, int sz) {
|
|
|
@ -31,8 +33,7 @@ static inline voidptr __dup_shared_array(voidptr src, int sz) {
|
|
|
|
sync__RwMutex_init(&dest->mtx);
|
|
|
|
sync__RwMutex_init(&dest->mtx);
|
|
|
|
return dest;
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static inline void __sort_ptr(uintptr_t a[], bool b[], int l)
|
|
|
|
static inline void __sort_ptr(uintptr_t a[], bool b[], int l) {
|
|
|
|
{
|
|
|
|
|
|
|
|
for (int i=1; i<l; i++) {
|
|
|
|
for (int i=1; i<l; i++) {
|
|
|
|
uintptr_t ins = a[i];
|
|
|
|
uintptr_t ins = a[i];
|
|
|
|
bool insb = b[i];
|
|
|
|
bool insb = b[i];
|
|
|
@ -47,10 +48,8 @@ static inline void __sort_ptr(uintptr_t a[], bool b[], int l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'
|
|
|
|
'
|
|
|
|
// TODO: must be romved in future, no more mof use for it.
|
|
|
|
|
|
|
|
c_str_fn_defs = ''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
c_common_macros = '
|
|
|
|
const c_common_macros = '
|
|
|
|
#define EMPTY_VARG_INITIALIZATION 0
|
|
|
|
#define EMPTY_VARG_INITIALIZATION 0
|
|
|
|
#define EMPTY_STRUCT_DECLARATION
|
|
|
|
#define EMPTY_STRUCT_DECLARATION
|
|
|
|
#define EMPTY_STRUCT_INITIALIZATION
|
|
|
|
#define EMPTY_STRUCT_INITIALIZATION
|
|
|
@ -164,8 +163,18 @@ static inline void __sort_ptr(uintptr_t a[], bool b[], int l)
|
|
|
|
#undef __has_include
|
|
|
|
#undef __has_include
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//likely and unlikely macros
|
|
|
|
|
|
|
|
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
|
|
|
|
|
|
|
|
#define _likely_(x) __builtin_expect(x,1)
|
|
|
|
|
|
|
|
#define _unlikely_(x) __builtin_expect(x,0)
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define _likely_(x) (x)
|
|
|
|
|
|
|
|
#define _unlikely_(x) (x)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
'
|
|
|
|
'
|
|
|
|
c_unsigned_comparison_functions = '
|
|
|
|
|
|
|
|
|
|
|
|
const c_unsigned_comparison_functions = '
|
|
|
|
// unsigned/signed comparisons
|
|
|
|
// unsigned/signed comparisons
|
|
|
|
static inline bool _us32_gt(uint32_t a, int32_t b) { return a > INT32_MAX || (int32_t)a > b; }
|
|
|
|
static inline bool _us32_gt(uint32_t a, int32_t b) { return a > INT32_MAX || (int32_t)a > b; }
|
|
|
|
static inline bool _us32_ge(uint32_t a, int32_t b) { return a >= INT32_MAX || (int32_t)a >= b; }
|
|
|
|
static inline bool _us32_ge(uint32_t a, int32_t b) { return a >= INT32_MAX || (int32_t)a >= b; }
|
|
|
@ -180,157 +189,8 @@ static inline bool _us64_ne(uint64_t a, int64_t b) { return a > INT64_MAX || (in
|
|
|
|
static inline bool _us64_le(uint64_t a, int64_t b) { return a <= INT64_MAX && (int64_t)a <= b; }
|
|
|
|
static inline bool _us64_le(uint64_t a, int64_t b) { return a <= INT64_MAX && (int64_t)a <= b; }
|
|
|
|
static inline bool _us64_lt(uint64_t a, int64_t b) { return a < INT64_MAX && (int64_t)a < b; }
|
|
|
|
static inline bool _us64_lt(uint64_t a, int64_t b) { return a < INT64_MAX && (int64_t)a < b; }
|
|
|
|
'
|
|
|
|
'
|
|
|
|
c_wyhash = '
|
|
|
|
|
|
|
|
// ============== wyhash ==============
|
|
|
|
|
|
|
|
#ifndef wyhash_final_version_3
|
|
|
|
|
|
|
|
#define wyhash_final_version_3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef WYHASH_CONDOM
|
|
|
|
const c_helper_macros = '//============================== HELPER C MACROS =============================*/
|
|
|
|
//protections that produce different results:
|
|
|
|
|
|
|
|
//1: normal valid behavior
|
|
|
|
|
|
|
|
//2: extra protection against entropy loss (probability=2^-63), aka. "blind multiplication"
|
|
|
|
|
|
|
|
#define WYHASH_CONDOM 1
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef WYHASH_32BIT_MUM
|
|
|
|
|
|
|
|
//0: normal version, slow on 32 bit systems
|
|
|
|
|
|
|
|
//1: faster on 32 bit systems but produces different results, incompatible with wy2u0k function
|
|
|
|
|
|
|
|
#define WYHASH_32BIT_MUM 0
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//includes
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
#if defined(_MSC_VER) && defined(_M_X64)
|
|
|
|
|
|
|
|
#include <intrin.h>
|
|
|
|
|
|
|
|
#pragma intrinsic(_umul128)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//likely and unlikely macros
|
|
|
|
|
|
|
|
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
|
|
|
|
|
|
|
|
#define _likely_(x) __builtin_expect(x,1)
|
|
|
|
|
|
|
|
#define _unlikely_(x) __builtin_expect(x,0)
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define _likely_(x) (x)
|
|
|
|
|
|
|
|
#define _unlikely_(x) (x)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//128bit multiply function
|
|
|
|
|
|
|
|
static inline uint64_t _wyrot(uint64_t x) { return (x>>32)|(x<<32); }
|
|
|
|
|
|
|
|
static inline void _wymum(uint64_t *A, uint64_t *B){
|
|
|
|
|
|
|
|
#if(WYHASH_32BIT_MUM)
|
|
|
|
|
|
|
|
uint64_t hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B;
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
*A^=_wyrot(hl)^hh; *B^=_wyrot(lh)^ll;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=_wyrot(hl)^hh; *B=_wyrot(lh)^ll;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#elif defined(__SIZEOF_INT128__)
|
|
|
|
|
|
|
|
__uint128_t r=*A; r*=*B;
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
*A^=(uint64_t)r; *B^=(uint64_t)(r>>64);
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=(uint64_t)r; *B=(uint64_t)(r>>64);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#elif defined(_MSC_VER) && defined(_M_X64)
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
uint64_t a, b;
|
|
|
|
|
|
|
|
a=_umul128(*A,*B,&b);
|
|
|
|
|
|
|
|
*A^=a; *B^=b;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=_umul128(*A,*B,B);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo;
|
|
|
|
|
|
|
|
uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
|
|
|
|
|
|
|
|
lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c;
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
*A^=lo; *B^=hi;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=lo; *B=hi;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//multiply and xor mix function, aka MUM
|
|
|
|
|
|
|
|
static inline uint64_t _wymix(uint64_t A, uint64_t B){ _wymum(&A,&B); return A^B; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//endian macros
|
|
|
|
|
|
|
|
#ifndef WYHASH_LITTLE_ENDIAN
|
|
|
|
|
|
|
|
#ifdef TARGET_ORDER_IS_LITTLE
|
|
|
|
|
|
|
|
#define WYHASH_LITTLE_ENDIAN 1
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define WYHASH_LITTLE_ENDIAN 0
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//read functions
|
|
|
|
|
|
|
|
#if (WYHASH_LITTLE_ENDIAN)
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v;}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v;}
|
|
|
|
|
|
|
|
#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v);}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return __builtin_bswap32(v);}
|
|
|
|
|
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return _byteswap_ulong(v);}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) {
|
|
|
|
|
|
|
|
uint64_t v; memcpy(&v, p, 8);
|
|
|
|
|
|
|
|
return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) {
|
|
|
|
|
|
|
|
uint32_t v; memcpy(&v, p, 4);
|
|
|
|
|
|
|
|
return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline uint64_t _wyr3(const uint8_t *p, size_t k) { return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1];}
|
|
|
|
|
|
|
|
//wyhash main function
|
|
|
|
|
|
|
|
static inline uint64_t wyhash(const void *key, size_t len, uint64_t seed, const uint64_t *secret){
|
|
|
|
|
|
|
|
const uint8_t *p=(const uint8_t *)key; seed^=*secret; uint64_t a, b;
|
|
|
|
|
|
|
|
if(_likely_(len<=16)){
|
|
|
|
|
|
|
|
if(_likely_(len>=4)){ a=(_wyr4(p)<<32)|_wyr4(p+((len>>3)<<2)); b=(_wyr4(p+len-4)<<32)|_wyr4(p+len-4-((len>>3)<<2)); }
|
|
|
|
|
|
|
|
else if(_likely_(len>0)){ a=_wyr3(p,len); b=0;}
|
|
|
|
|
|
|
|
else a=b=0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
else{
|
|
|
|
|
|
|
|
size_t i=len;
|
|
|
|
|
|
|
|
if(_unlikely_(i>48)){
|
|
|
|
|
|
|
|
uint64_t see1=seed, see2=seed;
|
|
|
|
|
|
|
|
do{
|
|
|
|
|
|
|
|
seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed);
|
|
|
|
|
|
|
|
see1=_wymix(_wyr8(p+16)^secret[2],_wyr8(p+24)^see1);
|
|
|
|
|
|
|
|
see2=_wymix(_wyr8(p+32)^secret[3],_wyr8(p+40)^see2);
|
|
|
|
|
|
|
|
p+=48; i-=48;
|
|
|
|
|
|
|
|
}while(_likely_(i>48));
|
|
|
|
|
|
|
|
seed^=see1^see2;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
while(_unlikely_(i>16)){ seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); i-=16; p+=16; }
|
|
|
|
|
|
|
|
a=_wyr8(p+i-16); b=_wyr8(p+i-8);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return _wymix(secret[1]^len,_wymix(a^secret[1],b^seed));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
//the default secret parameters
|
|
|
|
|
|
|
|
static const uint64_t _wyp[4] = {0xa0761d6478bd642full, 0xe7037ed1a0b428dbull, 0x8ebc6af09c88c6e3ull, 0x589965cc75374cc3ull};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//a useful 64bit-64bit mix function to produce deterministic pseudo random numbers that can pass BigCrush and PractRand
|
|
|
|
|
|
|
|
static inline uint64_t wyhash64(uint64_t A, uint64_t B){ A^=0xa0761d6478bd642full; B^=0xe7037ed1a0b428dbull; _wymum(&A,&B); return _wymix(A^0xa0761d6478bd642full,B^0xe7037ed1a0b428dbull);}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//The wyrand PRNG that pass BigCrush and PractRand
|
|
|
|
|
|
|
|
static inline uint64_t wyrand(uint64_t *seed){ *seed+=0xa0761d6478bd642full; return _wymix(*seed,*seed^0xe7037ed1a0b428dbull);}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//convert any 64 bit pseudo random numbers to uniform distribution [0,1). It can be combined with wyrand, wyhash64 or wyhash.
|
|
|
|
|
|
|
|
static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r>>12)*_wynorm;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//convert any 64 bit pseudo random numbers to APPROXIMATE Gaussian distribution. It can be combined with wyrand, wyhash64 or wyhash.
|
|
|
|
|
|
|
|
static inline double wy2gau(uint64_t r){ const double _wynorm=1.0/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if(!WYHASH_32BIT_MUM)
|
|
|
|
|
|
|
|
//fast range integer random number generation on [0,k) credit to Daniel Lemire. May not work when WYHASH_32BIT_MUM=1. It can be combined with wyrand, wyhash64 or wyhash.
|
|
|
|
|
|
|
|
static inline uint64_t wy2u0k(uint64_t r, uint64_t k){ _wymum(&r,&k); return k; }
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
'
|
|
|
|
|
|
|
|
c_helper_macros = '//============================== HELPER C MACROS =============================*/
|
|
|
|
|
|
|
|
//#define tos4(s, slen) ((string){.str=(s), .len=(slen)})
|
|
|
|
//#define tos4(s, slen) ((string){.str=(s), .len=(slen)})
|
|
|
|
// _SLIT0 is used as NULL string for literal arguments
|
|
|
|
// _SLIT0 is used as NULL string for literal arguments
|
|
|
|
// `"" s` is used to enforce a string literal argument
|
|
|
|
// `"" s` is used to enforce a string literal argument
|
|
|
@ -342,10 +202,10 @@ static inline uint64_t wy2u0k(uint64_t r, uint64_t k){ _wymum(&r,&k); return k;
|
|
|
|
// copy something to the heap
|
|
|
|
// copy something to the heap
|
|
|
|
#define HEAP(type, expr) ((type*)memdup((void*)&((type[]){expr}[0]), sizeof(type)))
|
|
|
|
#define HEAP(type, expr) ((type*)memdup((void*)&((type[]){expr}[0]), sizeof(type)))
|
|
|
|
#define _PUSH_MANY(arr, val, tmp, tmp_typ) {tmp_typ tmp = (val); array_push_many(arr, tmp.data, tmp.len);}
|
|
|
|
#define _PUSH_MANY(arr, val, tmp, tmp_typ) {tmp_typ tmp = (val); array_push_many(arr, tmp.data, tmp.len);}
|
|
|
|
#define _IN_MAP(val, m) map_exists(m, val)
|
|
|
|
|
|
|
|
'
|
|
|
|
'
|
|
|
|
c_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros +
|
|
|
|
|
|
|
|
r'
|
|
|
|
const c_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros +
|
|
|
|
|
|
|
|
r'
|
|
|
|
// c_headers
|
|
|
|
// c_headers
|
|
|
|
typedef int (*qsort_callback_func)(const void*, const void*);
|
|
|
|
typedef int (*qsort_callback_func)(const void*, const void*);
|
|
|
|
#include <stdio.h> // TODO remove all these includes, define all function signatures and types manually
|
|
|
|
#include <stdio.h> // TODO remove all these includes, define all function signatures and types manually
|
|
|
@ -411,7 +271,6 @@ static voidptr memfreedup(voidptr ptr, voidptr src, int sz) {
|
|
|
|
return memdup(src, sz);
|
|
|
|
return memdup(src, sz);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if INTPTR_MAX == INT32_MAX
|
|
|
|
#if INTPTR_MAX == INT32_MAX
|
|
|
|
#define TARGET_IS_32BIT 1
|
|
|
|
#define TARGET_IS_32BIT 1
|
|
|
|
#elif INTPTR_MAX == INT64_MAX
|
|
|
|
#elif INTPTR_MAX == INT64_MAX
|
|
|
@ -537,9 +396,9 @@ static void* g_live_info = NULL;
|
|
|
|
#ifdef _VFREESTANDING
|
|
|
|
#ifdef _VFREESTANDING
|
|
|
|
#undef _VFREESTANDING
|
|
|
|
#undef _VFREESTANDING
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
' +
|
|
|
|
'
|
|
|
|
c_wyhash
|
|
|
|
|
|
|
|
c_builtin_types = '
|
|
|
|
const c_builtin_types = '
|
|
|
|
//================================== builtin types ================================*/
|
|
|
|
//================================== builtin types ================================*/
|
|
|
|
typedef int64_t i64;
|
|
|
|
typedef int64_t i64;
|
|
|
|
typedef int16_t i16;
|
|
|
|
typedef int16_t i16;
|
|
|
@ -573,9 +432,9 @@ typedef bool (*MapEqFn)(voidptr, voidptr);
|
|
|
|
typedef void (*MapCloneFn)(voidptr, voidptr);
|
|
|
|
typedef void (*MapCloneFn)(voidptr, voidptr);
|
|
|
|
typedef void (*MapFreeFn)(voidptr);
|
|
|
|
typedef void (*MapFreeFn)(voidptr);
|
|
|
|
'
|
|
|
|
'
|
|
|
|
bare_c_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros +
|
|
|
|
|
|
|
|
'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const c_bare_headers = c_helper_macros + c_unsigned_comparison_functions + c_common_macros +
|
|
|
|
|
|
|
|
'
|
|
|
|
#define _VFREESTANDING
|
|
|
|
#define _VFREESTANDING
|
|
|
|
|
|
|
|
|
|
|
|
typedef long unsigned int size_t;
|
|
|
|
typedef long unsigned int size_t;
|
|
|
@ -612,6 +471,149 @@ static voidptr memfreedup(voidptr ptr, voidptr src, int sz) {
|
|
|
|
return memdup(src, sz);
|
|
|
|
return memdup(src, sz);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
' +
|
|
|
|
'
|
|
|
|
c_wyhash
|
|
|
|
|
|
|
|
)
|
|
|
|
const c_wyhash_headers = '
|
|
|
|
|
|
|
|
// ============== wyhash ==============
|
|
|
|
|
|
|
|
#ifndef wyhash_final_version_3
|
|
|
|
|
|
|
|
#define wyhash_final_version_3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef WYHASH_CONDOM
|
|
|
|
|
|
|
|
//protections that produce different results:
|
|
|
|
|
|
|
|
//1: normal valid behavior
|
|
|
|
|
|
|
|
//2: extra protection against entropy loss (probability=2^-63), aka. "blind multiplication"
|
|
|
|
|
|
|
|
#define WYHASH_CONDOM 1
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef WYHASH_32BIT_MUM
|
|
|
|
|
|
|
|
//0: normal version, slow on 32 bit systems
|
|
|
|
|
|
|
|
//1: faster on 32 bit systems but produces different results, incompatible with wy2u0k function
|
|
|
|
|
|
|
|
#define WYHASH_32BIT_MUM 0
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//includes
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
#if defined(_MSC_VER) && defined(_M_X64)
|
|
|
|
|
|
|
|
#include <intrin.h>
|
|
|
|
|
|
|
|
#pragma intrinsic(_umul128)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//128bit multiply function
|
|
|
|
|
|
|
|
static inline uint64_t _wyrot(uint64_t x) { return (x>>32)|(x<<32); }
|
|
|
|
|
|
|
|
static inline void _wymum(uint64_t *A, uint64_t *B){
|
|
|
|
|
|
|
|
#if(WYHASH_32BIT_MUM)
|
|
|
|
|
|
|
|
uint64_t hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B;
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
*A^=_wyrot(hl)^hh; *B^=_wyrot(lh)^ll;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=_wyrot(hl)^hh; *B=_wyrot(lh)^ll;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#elif defined(__SIZEOF_INT128__)
|
|
|
|
|
|
|
|
__uint128_t r=*A; r*=*B;
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
*A^=(uint64_t)r; *B^=(uint64_t)(r>>64);
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=(uint64_t)r; *B=(uint64_t)(r>>64);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#elif defined(_MSC_VER) && defined(_M_X64)
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
uint64_t a, b;
|
|
|
|
|
|
|
|
a=_umul128(*A,*B,&b);
|
|
|
|
|
|
|
|
*A^=a; *B^=b;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=_umul128(*A,*B,B);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo;
|
|
|
|
|
|
|
|
uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
|
|
|
|
|
|
|
|
lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c;
|
|
|
|
|
|
|
|
#if(WYHASH_CONDOM>1)
|
|
|
|
|
|
|
|
*A^=lo; *B^=hi;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
*A=lo; *B=hi;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//multiply and xor mix function, aka MUM
|
|
|
|
|
|
|
|
static inline uint64_t _wymix(uint64_t A, uint64_t B){ _wymum(&A,&B); return A^B; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//endian macros
|
|
|
|
|
|
|
|
#ifndef WYHASH_LITTLE_ENDIAN
|
|
|
|
|
|
|
|
#ifdef TARGET_ORDER_IS_LITTLE
|
|
|
|
|
|
|
|
#define WYHASH_LITTLE_ENDIAN 1
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define WYHASH_LITTLE_ENDIAN 0
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//read functions
|
|
|
|
|
|
|
|
#if (WYHASH_LITTLE_ENDIAN)
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v;}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v;}
|
|
|
|
|
|
|
|
#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v);}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return __builtin_bswap32(v);}
|
|
|
|
|
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return _byteswap_ulong(v);}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline uint64_t _wyr8(const uint8_t *p) {
|
|
|
|
|
|
|
|
uint64_t v; memcpy(&v, p, 8);
|
|
|
|
|
|
|
|
return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t _wyr4(const uint8_t *p) {
|
|
|
|
|
|
|
|
uint32_t v; memcpy(&v, p, 4);
|
|
|
|
|
|
|
|
return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline uint64_t _wyr3(const uint8_t *p, size_t k) { return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1];}
|
|
|
|
|
|
|
|
//wyhash main function
|
|
|
|
|
|
|
|
static inline uint64_t wyhash(const void *key, size_t len, uint64_t seed, const uint64_t *secret){
|
|
|
|
|
|
|
|
const uint8_t *p=(const uint8_t *)key; seed^=*secret; uint64_t a, b;
|
|
|
|
|
|
|
|
if(_likely_(len<=16)){
|
|
|
|
|
|
|
|
if(_likely_(len>=4)){ a=(_wyr4(p)<<32)|_wyr4(p+((len>>3)<<2)); b=(_wyr4(p+len-4)<<32)|_wyr4(p+len-4-((len>>3)<<2)); }
|
|
|
|
|
|
|
|
else if(_likely_(len>0)){ a=_wyr3(p,len); b=0;}
|
|
|
|
|
|
|
|
else a=b=0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
else{
|
|
|
|
|
|
|
|
size_t i=len;
|
|
|
|
|
|
|
|
if(_unlikely_(i>48)){
|
|
|
|
|
|
|
|
uint64_t see1=seed, see2=seed;
|
|
|
|
|
|
|
|
do{
|
|
|
|
|
|
|
|
seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed);
|
|
|
|
|
|
|
|
see1=_wymix(_wyr8(p+16)^secret[2],_wyr8(p+24)^see1);
|
|
|
|
|
|
|
|
see2=_wymix(_wyr8(p+32)^secret[3],_wyr8(p+40)^see2);
|
|
|
|
|
|
|
|
p+=48; i-=48;
|
|
|
|
|
|
|
|
}while(_likely_(i>48));
|
|
|
|
|
|
|
|
seed^=see1^see2;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
while(_unlikely_(i>16)){ seed=_wymix(_wyr8(p)^secret[1],_wyr8(p+8)^seed); i-=16; p+=16; }
|
|
|
|
|
|
|
|
a=_wyr8(p+i-16); b=_wyr8(p+i-8);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return _wymix(secret[1]^len,_wymix(a^secret[1],b^seed));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
//the default secret parameters
|
|
|
|
|
|
|
|
static const uint64_t _wyp[4] = {0xa0761d6478bd642full, 0xe7037ed1a0b428dbull, 0x8ebc6af09c88c6e3ull, 0x589965cc75374cc3ull};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//a useful 64bit-64bit mix function to produce deterministic pseudo random numbers that can pass BigCrush and PractRand
|
|
|
|
|
|
|
|
static inline uint64_t wyhash64(uint64_t A, uint64_t B){ A^=0xa0761d6478bd642full; B^=0xe7037ed1a0b428dbull; _wymum(&A,&B); return _wymix(A^0xa0761d6478bd642full,B^0xe7037ed1a0b428dbull);}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//The wyrand PRNG that pass BigCrush and PractRand
|
|
|
|
|
|
|
|
static inline uint64_t wyrand(uint64_t *seed){ *seed+=0xa0761d6478bd642full; return _wymix(*seed,*seed^0xe7037ed1a0b428dbull);}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//convert any 64 bit pseudo random numbers to uniform distribution [0,1). It can be combined with wyrand, wyhash64 or wyhash.
|
|
|
|
|
|
|
|
static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r>>12)*_wynorm;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//convert any 64 bit pseudo random numbers to APPROXIMATE Gaussian distribution. It can be combined with wyrand, wyhash64 or wyhash.
|
|
|
|
|
|
|
|
static inline double wy2gau(uint64_t r){ const double _wynorm=1.0/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if(!WYHASH_32BIT_MUM)
|
|
|
|
|
|
|
|
//fast range integer random number generation on [0,k) credit to Daniel Lemire. May not work when WYHASH_32BIT_MUM=1. It can be combined with wyrand, wyhash64 or wyhash.
|
|
|
|
|
|
|
|
static inline uint64_t wy2u0k(uint64_t r, uint64_t k){ _wymum(&r,&k); return k; }
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define _IN_MAP(val, m) map_exists(m, val)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'
|
|
|
|