|
|
|
@@ -4,13 +4,14 @@ Version: 0 (From Scratch, 1-Stage Compilation, LLVM & WinAPI Only, Win CRT Mul
|
|
|
|
Host: Windows 11 (x86-64)
|
|
|
|
Host: Windows 11 (x86-64)
|
|
|
|
Toolchain: LLVM (2025-08-30), C-Stanard: 11
|
|
|
|
Toolchain: LLVM (2025-08-30), C-Stanard: 11
|
|
|
|
|
|
|
|
|
|
|
|
Following strictly (mostly): Neokineogfx - Fixing C
|
|
|
|
Following strictly: Neokineogfx - Fixing C
|
|
|
|
https://youtu.be/RrL7121MOeA
|
|
|
|
https://youtu.be/RrL7121MOeA
|
|
|
|
|
|
|
|
|
|
|
|
Unlike lottes_hybrid this file will be entirely untyped for any pointer addressing.
|
|
|
|
|
|
|
|
Win CRT imports will also be typeless signatures.
|
|
|
|
|
|
|
|
*/
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma clang diagnostic push
|
|
|
|
|
|
|
|
#pragma clang diagnostic ignored "-Wpre-c11-compat"
|
|
|
|
|
|
|
|
// #pragma clang diagnostic ignored "-Wc++-keyword"
|
|
|
|
|
|
|
|
#pragma clang diagnostic ignored "-Wcast-qual"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-const-variable"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-const-variable"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-but-set-variable"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-but-set-variable"
|
|
|
|
#pragma clang diagnostic ignored "-Wswitch"
|
|
|
|
#pragma clang diagnostic ignored "-Wswitch"
|
|
|
|
@@ -24,12 +25,10 @@ Win CRT imports will also be typeless signatures.
|
|
|
|
#pragma clang diagnostic ignored "-Wkeyword-macro"
|
|
|
|
#pragma clang diagnostic ignored "-Wkeyword-macro"
|
|
|
|
#pragma clang diagnostic ignored "-Wc23-compat"
|
|
|
|
#pragma clang diagnostic ignored "-Wc23-compat"
|
|
|
|
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
|
|
|
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
|
|
|
#pragma clang diagnostic ignored "-Wpre-c11-compat"
|
|
|
|
|
|
|
|
#pragma clang diagnostic ignored "-Wc23-extensions"
|
|
|
|
#pragma clang diagnostic ignored "-Wc23-extensions"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-macros"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-macros"
|
|
|
|
#pragma clang diagnostic ignored "-Wdeclaration-after-statement"
|
|
|
|
#pragma clang diagnostic ignored "-Wdeclaration-after-statement"
|
|
|
|
#pragma clang diagnostic ignored "-Wunsafe-buffer-usage"
|
|
|
|
#pragma clang diagnostic ignored "-Wunsafe-buffer-usage"
|
|
|
|
#pragma clang diagnostic ignored "-Wc++-keyword"
|
|
|
|
|
|
|
|
#pragma clang diagnostic ignored "-Wimplicit-function-declaration"
|
|
|
|
#pragma clang diagnostic ignored "-Wimplicit-function-declaration"
|
|
|
|
#pragma clang diagnostic ignored "-Wcast-align"
|
|
|
|
#pragma clang diagnostic ignored "-Wcast-align"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-parameter"
|
|
|
|
#pragma clang diagnostic ignored "-Wunused-parameter"
|
|
|
|
@@ -54,12 +53,12 @@ Win CRT imports will also be typeless signatures.
|
|
|
|
#define V_ volatile
|
|
|
|
#define V_ volatile
|
|
|
|
#define W_ __attribute((__stdcall__)) __attribute__((__force_align_arg_pointer__))
|
|
|
|
#define W_ __attribute((__stdcall__)) __attribute__((__force_align_arg_pointer__))
|
|
|
|
|
|
|
|
|
|
|
|
#define reg register
|
|
|
|
// #define reg register
|
|
|
|
|
|
|
|
|
|
|
|
#define glue_impl(A, B) A ## B
|
|
|
|
#define glue_impl(A, B) A ## B
|
|
|
|
#define glue(A, B) glue_impl(A, B)
|
|
|
|
#define glue(A, B) glue_impl(A, B)
|
|
|
|
#define stringify_impl(S) #S
|
|
|
|
#define stringify_impl(S) #S
|
|
|
|
#define stringify(S) stringify_impl(S)
|
|
|
|
#define stringify(S) cast(UTF8*, stringify_impl(S))
|
|
|
|
#define tmpl(prefix, type) prefix ## _ ## type
|
|
|
|
#define tmpl(prefix, type) prefix ## _ ## type
|
|
|
|
|
|
|
|
|
|
|
|
#define static_assert _Static_assert
|
|
|
|
#define static_assert _Static_assert
|
|
|
|
@@ -131,7 +130,7 @@ enum { false = 0, true = 1, true_overflow, };
|
|
|
|
#define pcast(type, data) cast(type*, & (data))[0]
|
|
|
|
#define pcast(type, data) cast(type*, & (data))[0]
|
|
|
|
#define nullptr cast(void*, 0)
|
|
|
|
#define nullptr cast(void*, 0)
|
|
|
|
#define null cast(U8, 0)
|
|
|
|
#define null cast(U8, 0)
|
|
|
|
#define offset_of(type, member) cast(U8, & (((type*) 0)->member))
|
|
|
|
#define soff(type, member) cast(U8, & (((type*) 0)->member)) // offset_of
|
|
|
|
#define size_of(data) cast(U8, sizeof(data))
|
|
|
|
#define size_of(data) cast(U8, sizeof(data))
|
|
|
|
|
|
|
|
|
|
|
|
#define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
|
|
|
|
#define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
|
|
|
|
@@ -198,8 +197,8 @@ typedef def_struct(Slice_Str8) { U8 ptr; U8 len; };
|
|
|
|
{ \
|
|
|
|
{ \
|
|
|
|
assert_handler( \
|
|
|
|
assert_handler( \
|
|
|
|
stringify(cond), \
|
|
|
|
stringify(cond), \
|
|
|
|
__FILE__, \
|
|
|
|
(UTF8*)__FILE__, \
|
|
|
|
__func__, \
|
|
|
|
(UTF8*)__func__, \
|
|
|
|
cast(S4, __LINE__), \
|
|
|
|
cast(S4, __LINE__), \
|
|
|
|
msg, \
|
|
|
|
msg, \
|
|
|
|
## __VA_ARGS__); \
|
|
|
|
## __VA_ARGS__); \
|
|
|
|
@@ -230,6 +229,8 @@ I_ B4 mem_zero (U8 dest, U8 len) { if (dest == 0) return fa
|
|
|
|
#define struct_copy(type, dest, src) mem_copy(dest, src, sizeof(type))
|
|
|
|
#define struct_copy(type, dest, src) mem_copy(dest, src, sizeof(type))
|
|
|
|
#define struct_zero(type, dest) mem_zero(dest, sizeof(type))
|
|
|
|
#define struct_zero(type, dest) mem_zero(dest, sizeof(type))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define struct_assign(type, dest, src) cast(type*R_, dest)[0] = cast(type*R_, src)[0]
|
|
|
|
|
|
|
|
|
|
|
|
I_ U8 align_pow2(U8 x, U8 b) {
|
|
|
|
I_ U8 align_pow2(U8 x, U8 b) {
|
|
|
|
assert(b != 0);
|
|
|
|
assert(b != 0);
|
|
|
|
assert((b & (b - 1)) == 0); // Check power of 2
|
|
|
|
assert((b & (b - 1)) == 0); // Check power of 2
|
|
|
|
@@ -276,7 +277,7 @@ typedef def_Slice(B1);
|
|
|
|
#define slice_to_bytes(slice) ((Slice_B1){cast(B1*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
|
|
|
|
#define slice_to_bytes(slice) ((Slice_B1){cast(B1*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
|
|
|
|
#define slice_fmem(mem) slice_mem(u8_(mem), size_of(mem))
|
|
|
|
#define slice_fmem(mem) slice_mem(u8_(mem), size_of(mem))
|
|
|
|
|
|
|
|
|
|
|
|
I_ void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); memory_zero(u8_(mem.ptr), mem.len); }
|
|
|
|
I_ void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); mem_zero(u8_(mem.ptr), mem.len); }
|
|
|
|
#define slice_zero(slice) slice__zero(slice_mem_s(slice), size_of_slice_type(slice))
|
|
|
|
#define slice_zero(slice) slice__zero(slice_mem_s(slice), size_of_slice_type(slice))
|
|
|
|
|
|
|
|
|
|
|
|
I_ void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth) {
|
|
|
|
I_ void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth) {
|
|
|
|
@@ -293,6 +294,19 @@ I_ void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typew
|
|
|
|
#define slice_iter(container, iter) (typeof((container).ptr) iter = (container).ptr; iter != slice_end(container); ++ iter)
|
|
|
|
#define slice_iter(container, iter) (typeof((container).ptr) iter = (container).ptr; iter != slice_end(container); ++ iter)
|
|
|
|
#define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) }
|
|
|
|
#define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
I_ void slice_assign(U8 dest, U8 src) {
|
|
|
|
|
|
|
|
u8_r(dest + soff(Slice_Mem, ptr))[0] = u8_r(src + soff(Slice_Mem, ptr))[0];
|
|
|
|
|
|
|
|
u8_r(dest + soff(Slice_Mem, len))[0] = u8_r(src + soff(Slice_Mem, len))[0];
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
I_ void slice_assign_comp(U8 dest, U8 ptr, U8 len) {
|
|
|
|
|
|
|
|
u8_r(dest + soff(Slice_Mem, ptr))[0] = ptr;
|
|
|
|
|
|
|
|
u8_r(dest + soff(Slice_Mem, len))[0] = len;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
I_ void slice_clear(U8 base) {
|
|
|
|
|
|
|
|
u8_r(base + soff(Slice_Mem, ptr))[0] = 0;
|
|
|
|
|
|
|
|
u8_r(base + soff(Slice_Mem, len))[0] = 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define span_iter(type, iter, m_begin, op, m_end) \
|
|
|
|
#define span_iter(type, iter, m_begin, op, m_end) \
|
|
|
|
( \
|
|
|
|
( \
|
|
|
|
tmpl(Iter_Span,type) iter = { \
|
|
|
|
tmpl(Iter_Span,type) iter = { \
|
|
|
|
@@ -442,7 +456,7 @@ typedef def_struct(FArena) {
|
|
|
|
I_ void farena_init__u (U8 arena, U8 mem_ptr, U8 mem_len);
|
|
|
|
I_ void farena_init__u (U8 arena, U8 mem_ptr, U8 mem_len);
|
|
|
|
void farena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
|
|
|
|
void farena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
|
|
|
|
I_ void farena_reset__u (U8 arena);
|
|
|
|
I_ void farena_reset__u (U8 arena);
|
|
|
|
I_ void farena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot);
|
|
|
|
I_ void farena_rewind__u(U8 arena, U8 sp_slot);
|
|
|
|
I_ void farena_save__u (U8 arena, U8 sp);
|
|
|
|
I_ void farena_save__u (U8 arena, U8 sp);
|
|
|
|
|
|
|
|
|
|
|
|
I_ FArena farena_make (Slice_Mem mem);
|
|
|
|
I_ FArena farena_make (Slice_Mem mem);
|
|
|
|
@@ -472,17 +486,18 @@ typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; };
|
|
|
|
global OS_Windows_State os__windows_info;
|
|
|
|
global OS_Windows_State os__windows_info;
|
|
|
|
|
|
|
|
|
|
|
|
I_ OS_SystemInfo* os_system_info(void);
|
|
|
|
I_ OS_SystemInfo* os_system_info(void);
|
|
|
|
I_ void os_init (void);
|
|
|
|
void os_init (void);
|
|
|
|
|
|
|
|
|
|
|
|
I_ U8 os_vmem_reserve__u( U8 size, U4 no_large_pages, U8 base_addr);
|
|
|
|
I_ U8 os_vmem_reserve__u( U8 size, B4 no_large_pages, U8 base_addr);
|
|
|
|
I_ B4 os_vmem_commit__u (U8 vm, U8 size);
|
|
|
|
I_ B4 os_vmem_commit__u (U8 vm, U8 size, B4 no_large_pages);
|
|
|
|
I_ void os_vmem_release__u(U8 vm, U8 size);
|
|
|
|
I_ void os_vmem_release__u(U8 vm, U8 size);
|
|
|
|
|
|
|
|
|
|
|
|
I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts);
|
|
|
|
I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts);
|
|
|
|
I_ B4 os_vmem_commit (U8 vm, U8 size);
|
|
|
|
I_ B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem_R opts);
|
|
|
|
I_ void os_vmem_release (U8 vm, U8 size);
|
|
|
|
I_ void os_vmem_release (U8 vm, U8 size);
|
|
|
|
|
|
|
|
|
|
|
|
#define os_vmem_reserve(size, ...) os__vmem_reserve(size, opt_args(Opts_vmem, __VA_ARGS__))
|
|
|
|
#define os_vmem_commit(vm, size, ...) os__vmem_commit (vm, size, opt_args(Opts_vmem, __VA_ARGS__))
|
|
|
|
|
|
|
|
#define os_vmem_reserve(size, ...) os__vmem_reserve( size, opt_args(Opts_vmem, __VA_ARGS__))
|
|
|
|
#pragma endregion OS
|
|
|
|
#pragma endregion OS
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region VArena (Virtual Address Space Arena)
|
|
|
|
#pragma region VArena (Virtual Address Space Arena)
|
|
|
|
@@ -505,10 +520,10 @@ typedef def_struct(Opts_varena_make) {
|
|
|
|
VArenaFlags flags;
|
|
|
|
VArenaFlags flags;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
I_ U8 varena__make__u (U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr);
|
|
|
|
U8 varena__make__u (U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr);
|
|
|
|
I_ void varena_release__u(U8 arena);
|
|
|
|
I_ void varena_release__u(U8 arena);
|
|
|
|
I_ void varena_reset__u (U8 arena);
|
|
|
|
I_ void varena_reset__u (U8 arena);
|
|
|
|
I_ void varena_rewind__u (U8 arena, U8 sp_type_sig, U8 sp_slot);
|
|
|
|
I_ void varena_rewind__u (U8 arena, U8 sp_slot);
|
|
|
|
I_ void varena_save__u (U8 arena, U8 sp_addr);
|
|
|
|
I_ void varena_save__u (U8 arena, U8 sp_addr);
|
|
|
|
void varena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
|
|
|
|
void varena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
|
|
|
|
void varena__grow__u (U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero);
|
|
|
|
void varena__grow__u (U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero);
|
|
|
|
@@ -522,6 +537,8 @@ I_ void varena_rewind (VArena_R arena, AllocatorSP save_point);
|
|
|
|
I_ Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts);
|
|
|
|
I_ Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts);
|
|
|
|
I_ AllocatorSP varena_save (VArena_R arena);
|
|
|
|
I_ AllocatorSP varena_save (VArena_R arena);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define varena_make(...) varena__make(opt_args(Opts_varena_make, __VA_ARGS__))
|
|
|
|
|
|
|
|
|
|
|
|
void varena_allocator_proc(U8 data, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out);
|
|
|
|
void varena_allocator_proc(U8 data, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out);
|
|
|
|
|
|
|
|
|
|
|
|
#define ainfo_varena(arena) (AllocatorInfo){ .proc = varena_allocator_proc, .data = u8_(arena) }
|
|
|
|
#define ainfo_varena(arena) (AllocatorInfo){ .proc = varena_allocator_proc, .data = u8_(arena) }
|
|
|
|
@@ -535,6 +552,111 @@ cast(type*, varena__push(arena, size_of(type), 1, opt_args(Opts_varena, __VA_ARG
|
|
|
|
(tmpl(Slice,type)){ varena__push(arena, size_of(type), amount, opt_args(Opts_varena, __VA_ARGS__)).ptr, amount }
|
|
|
|
(tmpl(Slice,type)){ varena__push(arena, size_of(type), amount, opt_args(Opts_varena, __VA_ARGS__)).ptr, amount }
|
|
|
|
#pragma endregion VArena
|
|
|
|
#pragma endregion VArena
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Arena
|
|
|
|
|
|
|
|
typedef Opts_varena Opts_arena;
|
|
|
|
|
|
|
|
typedef def_enum(U4, ArenaFlags) {
|
|
|
|
|
|
|
|
ArenaFlag_NoLargePages = (1 << 0),
|
|
|
|
|
|
|
|
ArenaFlag_NoChain = (1 << 1),
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef def_struct(Arena) {
|
|
|
|
|
|
|
|
VArena* backing;
|
|
|
|
|
|
|
|
Arena* prev;
|
|
|
|
|
|
|
|
Arena* current;
|
|
|
|
|
|
|
|
U8 base_pos;
|
|
|
|
|
|
|
|
U8 pos;
|
|
|
|
|
|
|
|
ArenaFlags flags;
|
|
|
|
|
|
|
|
A4_B1 _PAD_;
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
U8 arena_make__u (U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr);
|
|
|
|
|
|
|
|
void arena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignemnt, U8 out_mem);
|
|
|
|
|
|
|
|
void arena_release__u(U8 arena);
|
|
|
|
|
|
|
|
void arena_reset__u (U8 arena);
|
|
|
|
|
|
|
|
void arena_rewind__u (U8 arena, U8 slot);
|
|
|
|
|
|
|
|
void arena_save__u (U8 arena, U8 out_sp);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
typedef Opts_varena_make Opts_arena_make;
|
|
|
|
|
|
|
|
Arena* arena__make (Opts_arena_make*R_ opts);
|
|
|
|
|
|
|
|
Slice_Mem arena__push (Arena_R arena, U8 amount, U8 type_width, Opts_arena*R_ opts);
|
|
|
|
|
|
|
|
I_ void arena_release(Arena_R arena);
|
|
|
|
|
|
|
|
I_ void arena_reset (Arena_R arena);
|
|
|
|
|
|
|
|
void arena_rewind (Arena_R arena, AllocatorSP save_point);
|
|
|
|
|
|
|
|
I_ AllocatorSP arena_save (Arena_R arena);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void arena_allocator_proc(U8 data, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out);
|
|
|
|
|
|
|
|
#define ainfo_arena(arena) (AllocatorInfo){ .proc = & arena_allocator_proc, .data = u8_(arena) }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define arena_make(...) arena__make(opt_args(Opts_arena_make, __VA_ARGS__))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define arena_push_mem(arena, amount, ...) arena__push(arena, amount, 1, opt_args(Opts_arena, lit(stringify(B1)), __VA_ARGS__))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define arena_push(arena, type, ...) \
|
|
|
|
|
|
|
|
cast(type*, arena__push(arena, 1, size_of(type), opt_args(Opts_arena, lit(stringify(type)), __VA_ARGS__) ).ptr)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define arena_push_array(arena, type, amount, ...) \
|
|
|
|
|
|
|
|
(tmpl(Slice,type)){ arena__push(arena, size_of(type), amount, opt_args(Opts_arena, lit(stringify(type)), __VA_ARGS__)).ptr, amount }
|
|
|
|
|
|
|
|
#pragma endregion Arena
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Hashing
|
|
|
|
|
|
|
|
I_ void hash64_fnv1a__u(U8 hash, U8 data_ptr, U8 data_len, U8 seed) {
|
|
|
|
|
|
|
|
local_persist U8 const default_seed = 0xcbf29ce484222325;
|
|
|
|
|
|
|
|
if (seed != 0) { u8_r(hash)[0] = seed; }
|
|
|
|
|
|
|
|
else { u8_r(hash)[0] = default_seed; }
|
|
|
|
|
|
|
|
U8 elem = data_ptr;
|
|
|
|
|
|
|
|
loop:
|
|
|
|
|
|
|
|
if (elem == data_ptr + data_len) goto end;
|
|
|
|
|
|
|
|
u8_r(hash)[0] ^= u1_r(elem)[0];
|
|
|
|
|
|
|
|
u8_r(hash)[0] *= 0x100000001b3;
|
|
|
|
|
|
|
|
elem += 1;
|
|
|
|
|
|
|
|
goto loop;
|
|
|
|
|
|
|
|
end:
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef def_struct(Opts_hash64_fnv1a) { U8 seed; };
|
|
|
|
|
|
|
|
I_ void hash64__fnv1a(U8_R hash, Slice_Mem data, Opts_hash64_fnv1a*R_ opts) {
|
|
|
|
|
|
|
|
assert(opts != nullptr);
|
|
|
|
|
|
|
|
hash64_fnv1a__u(u8_(hash), data.ptr, data.len, opts->seed);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#define hash64_fnv1a(hash, data, ...) hash64__fnv1a(hash, data, opt_args(Opts_hash64_fnv1a, __VA_ARGS__))
|
|
|
|
|
|
|
|
#pragma endregion Hashing
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Key Table Linear (KTL)
|
|
|
|
|
|
|
|
#define def_KTL_Slot(type) \
|
|
|
|
|
|
|
|
def_struct(tmpl(KTL_Slot,type)) { \
|
|
|
|
|
|
|
|
U8 key; \
|
|
|
|
|
|
|
|
type value; \
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#define def_KTL(type) \
|
|
|
|
|
|
|
|
def_Slice(tmpl(KTL_Slot,type)); \
|
|
|
|
|
|
|
|
typedef tmpl(Slice_KTL_Slot,type) tmpl(KTL,type)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
typedef Slice_Mem KTL_Byte;
|
|
|
|
|
|
|
|
typedef def_struct(KTL_Meta) {
|
|
|
|
|
|
|
|
U8 slot_size;
|
|
|
|
|
|
|
|
U8 kt_value_offset;
|
|
|
|
|
|
|
|
U8 type_width;
|
|
|
|
|
|
|
|
Str8 type_name;
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
typedef def_farray(Str8, 2);
|
|
|
|
|
|
|
|
typedef def_Slice(A2_Str8);
|
|
|
|
|
|
|
|
typedef def_KTL_Slot(Str8);
|
|
|
|
|
|
|
|
typedef def_KTL(Str8);
|
|
|
|
|
|
|
|
void ktl_populate_slice_a2_str8(U8 kt, U8 backing_proc, U8 backing_data, U8 values);
|
|
|
|
|
|
|
|
#pragma endregion KTL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Key Table 1-Layer Chained-Chunked-Cells (KT1CX)
|
|
|
|
|
|
|
|
#pragma endregion KT1CX
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region String Operations
|
|
|
|
|
|
|
|
#pragma endregion String Operations
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region FIle System
|
|
|
|
|
|
|
|
#pragma endregion FIle System
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region WATL
|
|
|
|
|
|
|
|
#pragma endregion WATL
|
|
|
|
|
|
|
|
|
|
|
|
#pragma endregion Header
|
|
|
|
#pragma endregion Header
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Implementation
|
|
|
|
#pragma region Implementation
|
|
|
|
@@ -560,29 +682,29 @@ I_ void mem_save_point__u(U8 proc, U8 data, U8 sp) {
|
|
|
|
assert(proc != null);
|
|
|
|
assert(proc != null);
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
cast(AllocatorProc*, proc)(data, 0, 0, 0, 0, AllocatorOp_SavePoint, u8_(out));
|
|
|
|
cast(AllocatorProc*, proc)(data, 0, 0, 0, 0, AllocatorOp_SavePoint, u8_(out));
|
|
|
|
struct_copy(AllocatorSP, sp, (U8) out + offset_of(AllocatorProc_Out, save_point));
|
|
|
|
struct_assign(AllocatorSP, sp, (U8) out + soff(AllocatorProc_Out, save_point));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void mem__alloc__u(U8 out_mem, U8 proc, U8 data, U8 size, U8 alignment, B4 no_zero) {
|
|
|
|
I_ void mem__alloc__u(U8 out_mem, U8 proc, U8 data, U8 size, U8 alignment, B4 no_zero) {
|
|
|
|
assert(proc != null);
|
|
|
|
assert(proc != null);
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
cast(AllocatorProc*, proc)(data, size, alignment, 0, 0, no_zero ? AllocatorOp_Alloc_NoZero : AllocatorOp_Alloc, u8_(out));
|
|
|
|
cast(AllocatorProc*, proc)(data, size, alignment, 0, 0, no_zero ? AllocatorOp_Alloc_NoZero : AllocatorOp_Alloc, u8_(out));
|
|
|
|
struct_copy(Slice_Mem, out_mem, (U8) out + offset_of(AllocatorProc_Out, allocation));
|
|
|
|
slice_assign(out_mem, (U8) out + soff(AllocatorProc_Out, allocation));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void mem__grow__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment, B4 no_zero, B4 give_actual) {
|
|
|
|
I_ void mem__grow__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment, B4 no_zero, B4 give_actual) {
|
|
|
|
assert(proc != null);
|
|
|
|
assert(proc != null);
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
cast(AllocatorProc*, proc)(data, size, alignment, old_ptr, old_len, no_zero ? AllocatorOp_Grow_NoZero : AllocatorOp_Grow, u8_(out));
|
|
|
|
cast(AllocatorProc*, proc)(data, size, alignment, old_ptr, old_len, no_zero ? AllocatorOp_Grow_NoZero : AllocatorOp_Grow, u8_(out));
|
|
|
|
if (give_actual == false) { u8_r(out + offset_of(AllocatorProc_Out, allocation) + offset_of(Slice_Mem, len))[0] = size; }
|
|
|
|
if (give_actual == false) { u8_r(out + soff(AllocatorProc_Out, allocation) + soff(Slice_Mem, len))[0] = size; }
|
|
|
|
struct_copy(Slice_Mem, out_mem, (U8) out + offset_of(AllocatorProc_Out, allocation));
|
|
|
|
slice_assign(out_mem, (U8) out + soff(AllocatorProc_Out, allocation));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void mem__shrink__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment) {
|
|
|
|
I_ void mem__shrink__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment) {
|
|
|
|
assert(proc != null);
|
|
|
|
assert(proc != null);
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
uvar(AllocatorProc_Out, out) = {0};
|
|
|
|
cast(AllocatorProc*, proc)(data, size, alignment, old_ptr, old_len, AllocatorOp_Shrink, u8_(out));
|
|
|
|
cast(AllocatorProc*, proc)(data, size, alignment, old_ptr, old_len, AllocatorOp_Shrink, u8_(out));
|
|
|
|
struct_copy(Slice_Mem, out_mem, (U8) out + offset_of(AllocatorProc_Out, allocation));
|
|
|
|
slice_assign(out_mem, (U8) out + soff(AllocatorProc_Out, allocation));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void mem__resize__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment, B4 no_zero, B4 give_acutal) {
|
|
|
|
I_ void mem__resize__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment, B4 no_zero, B4 give_acutal) {
|
|
|
|
if (old_len == size) { struct_copy(Slice_Mem, out_mem, (U8)& slice_mem(old_ptr, old_len)); }
|
|
|
|
if (old_len == size) { slice_assign_comp(out_mem, old_ptr, old_len); }
|
|
|
|
if (old_len < size) { mem__grow__u (out_mem, proc, data, old_ptr, old_len, size, alignment, no_zero, give_acutal); }
|
|
|
|
if (old_len < size) { mem__grow__u (out_mem, proc, data, old_ptr, old_len, size, alignment, no_zero, give_acutal); }
|
|
|
|
else { mem__shrink__u(out_mem, proc, data, old_ptr, old_len, size, alignment); }
|
|
|
|
else { mem__shrink__u(out_mem, proc, data, old_ptr, old_len, size, alignment); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
@@ -621,87 +743,86 @@ I_ Slice_Mem mem__shrink(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_s
|
|
|
|
#pragma region FArena (Fixed-Sized Arena)
|
|
|
|
#pragma region FArena (Fixed-Sized Arena)
|
|
|
|
I_ void farena_init__u(U8 arena, U8 mem_ptr, U8 mem_len) {
|
|
|
|
I_ void farena_init__u(U8 arena, U8 mem_ptr, U8 mem_len) {
|
|
|
|
assert(arena != null);
|
|
|
|
assert(arena != null);
|
|
|
|
u8_r(arena + offset_of(FArena, start) )[0] = mem_ptr;
|
|
|
|
u8_r(arena + soff(FArena, start) )[0] = mem_ptr;
|
|
|
|
u8_r(arena + offset_of(FArena, capacity))[0] = mem_len;
|
|
|
|
u8_r(arena + soff(FArena, capacity))[0] = mem_len;
|
|
|
|
u8_r(arena + offset_of(FArena, used) )[0] = 0;
|
|
|
|
u8_r(arena + soff(FArena, used) )[0] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
inline void farena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 result) {
|
|
|
|
inline void farena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 result) {
|
|
|
|
if (amount == 0) { struct_zero(Slice_Mem, result); }
|
|
|
|
if (amount == 0) { struct_zero(Slice_Mem, result); }
|
|
|
|
U8 reg desired = type_width * amount;
|
|
|
|
U8 desired = type_width * amount;
|
|
|
|
U8 reg to_commit = align_pow2(desired, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
U8 to_commit = align_pow2(desired, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
U8_R reg used = u8_r(arena + offset_of(FArena, used));
|
|
|
|
U8_R used = u8_r(arena + soff(FArena, used));
|
|
|
|
U8 reg unused = u8_r(arena + offset_of(FArena, capacity))[0] - used[0]; assert(to_commit <= unused);
|
|
|
|
U8 unused = u8_r(arena + soff(FArena, capacity))[0] - used[0]; assert(to_commit <= unused);
|
|
|
|
U8 reg ptr = u8_r(arena + offset_of(FArena, start) )[0] + used[0];
|
|
|
|
U8 ptr = u8_r(arena + soff(FArena, start) )[0] + used[0];
|
|
|
|
used[0] += to_commit;
|
|
|
|
used[0] += to_commit;
|
|
|
|
struct_copy(Slice_Mem, result, (U8)& slice_mem(ptr, desired));
|
|
|
|
slice_assign_comp(result, ptr, desired);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
inline void farena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) {
|
|
|
|
inline void farena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) {
|
|
|
|
assert(result != null);
|
|
|
|
assert(result != null);
|
|
|
|
assert(arena != null);
|
|
|
|
assert(arena != null);
|
|
|
|
U8_R reg used = u8_r(arena + offset_of(FArena, used));
|
|
|
|
U8_R used = u8_r(arena + soff(FArena, used));
|
|
|
|
/*Check if the allocation is at the end of the arena*/{
|
|
|
|
/*Check if the allocation is at the end of the arena*/{
|
|
|
|
U8 reg alloc_end = old_ptr + old_len;
|
|
|
|
U8 alloc_end = old_ptr + old_len;
|
|
|
|
U8 reg arena_end = u8_r(arena + offset_of(FArena, start))[0] + used[0];
|
|
|
|
U8 arena_end = u8_r(arena + soff(FArena, start))[0] + used[0];
|
|
|
|
if (alloc_end != arena_end) {
|
|
|
|
if (alloc_end != arena_end) {
|
|
|
|
// Not at the end, can't grow in place
|
|
|
|
// Not at the end, can't grow in place
|
|
|
|
struct_zero(Slice_Mem, result);
|
|
|
|
slice_clear(result);
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Calculate growth
|
|
|
|
// Calculate growth
|
|
|
|
U8 reg grow_amount = requested_size - old_len;
|
|
|
|
U8 grow_amount = requested_size - old_len;
|
|
|
|
U8 reg aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
U8 aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
U8 reg unused = u8_r(arena + offset_of(FArena, capacity))[0] - used[0];
|
|
|
|
U8 unused = u8_r(arena + soff(FArena, capacity))[0] - used[0];
|
|
|
|
if (aligned_grow > unused) {
|
|
|
|
if (aligned_grow > unused) {
|
|
|
|
// Not enough space
|
|
|
|
// Not enough space
|
|
|
|
struct_zero(Slice_Mem, result);
|
|
|
|
slice_clear(result);
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
used[0] += aligned_grow;
|
|
|
|
used[0] += aligned_grow;
|
|
|
|
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, aligned_grow + requested_size));
|
|
|
|
slice_assign_comp(result, old_ptr, aligned_grow + requested_size);
|
|
|
|
memory_zero(old_ptr + old_len, grow_amount * cast(U8, should_zero));
|
|
|
|
mem_zero(old_ptr + old_len, grow_amount * cast(U8, should_zero));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
inline void farena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) {
|
|
|
|
inline void farena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) {
|
|
|
|
assert(result != null);
|
|
|
|
assert(result != null);
|
|
|
|
assert(arena != null);
|
|
|
|
assert(arena != null);
|
|
|
|
U8_R reg used = u8_r(arena + offset_of(FArena, used));
|
|
|
|
U8_R used = u8_r(arena + soff(FArena, used));
|
|
|
|
/*Check if the allocation is at the end of the arena*/ {
|
|
|
|
/*Check if the allocation is at the end of the arena*/ {
|
|
|
|
U8 reg alloc_end = old_ptr + old_len;
|
|
|
|
U8 alloc_end = old_ptr + old_len;
|
|
|
|
U8 reg arena_end = u8_r(arena + offset_of(FArena, start))[0] + used[0];
|
|
|
|
U8 arena_end = u8_r(arena + soff(FArena, start))[0] + used[0];
|
|
|
|
if (alloc_end != arena_end) {
|
|
|
|
if (alloc_end != arena_end) {
|
|
|
|
// Not at the end, can't shrink but return adjusted size
|
|
|
|
// Not at the end, can't shrink but return adjusted size
|
|
|
|
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, requested_size));
|
|
|
|
slice_assign_comp(result, old_ptr, requested_size);
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
U8 reg aligned_original = align_pow2(old_len, MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
U8 aligned_original = align_pow2(old_len, MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
U8 reg aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
U8 aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
|
|
used[0] -= (aligned_original - aligned_new);
|
|
|
|
used[0] -= (aligned_original - aligned_new);
|
|
|
|
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, requested_size));
|
|
|
|
slice_assign_comp(result, old_ptr, requested_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void farena_reset__u(U8 arena) { u8_r(arena + offset_of(FArena, used))[0] = 0; }
|
|
|
|
I_ void farena_reset__u(U8 arena) { u8_r(arena + soff(FArena, used))[0] = 0; }
|
|
|
|
I_ void farena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot) {
|
|
|
|
I_ void farena_rewind__u(U8 arena, U8 sp_slot) {
|
|
|
|
assert(sp_type_sig == (U8)& farena_allocator_proc);
|
|
|
|
U8 start = u8_r(arena + soff(FArena, start))[0];
|
|
|
|
U8 reg start = u8_r(arena + offset_of(FArena, start))[0];
|
|
|
|
U8_R used = u8_r(arena + soff(FArena, used));
|
|
|
|
U8_R reg used = u8_r(arena + offset_of(FArena, used));
|
|
|
|
U8 end = start + used[0]; assert_bounds(sp_slot, start, end);
|
|
|
|
U8 reg end = start + used[0]; assert_bounds(sp_slot, start, end);
|
|
|
|
|
|
|
|
used[0] -= sp_slot - start;
|
|
|
|
used[0] -= sp_slot - start;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void farena_save__u(U8 arena, U8 sp) {
|
|
|
|
I_ void farena_save__u(U8 arena, U8 sp) {
|
|
|
|
u8_r(sp + offset_of(AllocatorSP, type_sig))[0] = (U8)& farena_allocator_proc;
|
|
|
|
u8_r(sp + soff(AllocatorSP, type_sig))[0] = (U8)& farena_allocator_proc;
|
|
|
|
u8_r(sp + offset_of(AllocatorSP, slot ))[0] = u8_r(arena + offset_of(FArena, used))[0];
|
|
|
|
u8_r(sp + soff(AllocatorSP, slot ))[0] = u8_r(arena + soff(FArena, used))[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void farena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out)
|
|
|
|
void farena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
assert(out != null);
|
|
|
|
assert(out != null);
|
|
|
|
assert(arena != null);
|
|
|
|
assert(arena != null);
|
|
|
|
U8 reg allocation = arena + offset_of(AllocatorProc_Out, allocation);
|
|
|
|
U8 allocation = arena + soff(AllocatorProc_Out, allocation);
|
|
|
|
switch (op)
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
case AllocatorOp_Alloc:
|
|
|
|
case AllocatorOp_Alloc:
|
|
|
|
case AllocatorOp_Alloc_NoZero:
|
|
|
|
case AllocatorOp_Alloc_NoZero:
|
|
|
|
farena__push__u(arena, requested_size, 1, alignment, allocation);
|
|
|
|
farena__push__u(arena, requested_size, 1, alignment, allocation);
|
|
|
|
memory_zero(u8_r(allocation + offset_of(Slice_Mem, ptr))[0], u8_r(allocation + offset_of(Slice_Mem, len))[0] * op);
|
|
|
|
mem_zero(u8_r(allocation + soff(Slice_Mem, ptr))[0], u8_r(allocation + soff(Slice_Mem, len))[0] * op);
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
case AllocatorOp_Free: break;
|
|
|
|
case AllocatorOp_Free: break;
|
|
|
|
case AllocatorOp_Reset: farena_reset__u(arena); break;
|
|
|
|
case AllocatorOp_Reset: farena_reset__u(arena); break;
|
|
|
|
@@ -714,21 +835,21 @@ void farena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr
|
|
|
|
farena__shrink__u(allocation, arena, old_ptr, old_len, requested_size, alignment);
|
|
|
|
farena__shrink__u(allocation, arena, old_ptr, old_len, requested_size, alignment);
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Rewind: farena_rewind__u(arena, old_ptr, old_len); break;
|
|
|
|
case AllocatorOp_Rewind: farena_rewind__u(arena, old_len); break;
|
|
|
|
case AllocatorOp_SavePoint: farena_save__u(arena, allocation); break;
|
|
|
|
case AllocatorOp_SavePoint: farena_save__u(arena, allocation); break;
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Query:
|
|
|
|
case AllocatorOp_Query:
|
|
|
|
u4_r(out + offset_of(AllocatorQueryInfo, features))[0] =
|
|
|
|
u4_r(out + soff(AllocatorQueryInfo, features))[0] =
|
|
|
|
AllocatorQuery_Alloc
|
|
|
|
AllocatorQuery_Alloc
|
|
|
|
| AllocatorQuery_Reset
|
|
|
|
| AllocatorQuery_Reset
|
|
|
|
| AllocatorQuery_Resize
|
|
|
|
| AllocatorQuery_Resize
|
|
|
|
| AllocatorQuery_Rewind
|
|
|
|
| AllocatorQuery_Rewind
|
|
|
|
;
|
|
|
|
;
|
|
|
|
U8 reg max_alloc = u8_r(arena + offset_of(FArena, capacity))[0] - u8_r(arena + offset_of(FArena, used))[0];
|
|
|
|
U8 max_alloc = u8_r(arena + soff(FArena, capacity))[0] - u8_r(arena + soff(FArena, used))[0];
|
|
|
|
u8_r(out + offset_of(AllocatorQueryInfo, max_alloc))[0] = max_alloc;
|
|
|
|
u8_r(out + soff(AllocatorQueryInfo, max_alloc))[0] = max_alloc;
|
|
|
|
u8_r(out + offset_of(AllocatorQueryInfo, min_alloc))[0] = 0;
|
|
|
|
u8_r(out + soff(AllocatorQueryInfo, min_alloc))[0] = 0;
|
|
|
|
u8_r(out + offset_of(AllocatorQueryInfo, left ))[0] = max_alloc;
|
|
|
|
u8_r(out + soff(AllocatorQueryInfo, left ))[0] = max_alloc;
|
|
|
|
farena_save__u(arena, out + offset_of(AllocatorQueryInfo, save_point));
|
|
|
|
farena_save__u(arena, out + soff(AllocatorQueryInfo, save_point));
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
@@ -782,7 +903,7 @@ typedef def_struct(MS_TOKEN_PRIVILEGES) { MS_DWORD PrivilegeCount; MS_LUID_AN
|
|
|
|
W_ MS_BOOL ms_close_handle(MS_HANDLE hObject) __asm__("CloseHandle");
|
|
|
|
W_ MS_BOOL ms_close_handle(MS_HANDLE hObject) __asm__("CloseHandle");
|
|
|
|
W_ MS_BOOL ms_adjust_token_privleges(MS_HANDLE TokenHandle, MS_BOOL DisableAllPrivileges, MS_PTOKEN_PRIVILEGES NewState, MS_DWORD BufferLength, MS_PTOKEN_PRIVILEGES PreviousState, MS_PDWORD ReturnLength) __asm__("AdjustTokenPrivileges");
|
|
|
|
W_ MS_BOOL ms_adjust_token_privleges(MS_HANDLE TokenHandle, MS_BOOL DisableAllPrivileges, MS_PTOKEN_PRIVILEGES NewState, MS_DWORD BufferLength, MS_PTOKEN_PRIVILEGES PreviousState, MS_PDWORD ReturnLength) __asm__("AdjustTokenPrivileges");
|
|
|
|
W_ MS_HANDLE ms_get_current_process(void) __asm__("GetCurrentProcess");
|
|
|
|
W_ MS_HANDLE ms_get_current_process(void) __asm__("GetCurrentProcess");
|
|
|
|
W_ U8 ms_get_larg_page_minimum(void) __asm__("GetCurrentProcess");
|
|
|
|
W_ U8 ms_get_larg_page_minimum(void) __asm__("GetLargePageMinimum");
|
|
|
|
W_ MS_BOOL ms_lookup_priviledge_value_w(MS_LPWSTR lpSystemName, MS_LPWSTR lpName, MS_PLUID lpLuid) __asm__("LookupPrivilegeValueW");
|
|
|
|
W_ MS_BOOL ms_lookup_priviledge_value_w(MS_LPWSTR lpSystemName, MS_LPWSTR lpName, MS_PLUID lpLuid) __asm__("LookupPrivilegeValueW");
|
|
|
|
W_ MS_BOOL ms_open_process_token(MS_HANDLE ProcessHandle, MS_DWORD DesiredAccess, MS_PHANDLE TokenHandle) __asm__("OpenProcessToken");
|
|
|
|
W_ MS_BOOL ms_open_process_token(MS_HANDLE ProcessHandle, MS_DWORD DesiredAccess, MS_PHANDLE TokenHandle) __asm__("OpenProcessToken");
|
|
|
|
W_ MS_LPVOID ms_virtual_alloc(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD flAllocationType, MS_DWORD flProtect) __asm__("VirtualAlloc");
|
|
|
|
W_ MS_LPVOID ms_virtual_alloc(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD flAllocationType, MS_DWORD flProtect) __asm__("VirtualAlloc");
|
|
|
|
@@ -806,117 +927,133 @@ I_ void os__enable_large_pages(void) {
|
|
|
|
ms_close_handle(token);
|
|
|
|
ms_close_handle(token);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void os_init(void) {
|
|
|
|
inline
|
|
|
|
os__enable_large_pages();
|
|
|
|
void os_init(void) {
|
|
|
|
|
|
|
|
// os__enable_large_pages();
|
|
|
|
os_system_info()->target_page_size = ms_get_larg_page_minimum();
|
|
|
|
os_system_info()->target_page_size = ms_get_larg_page_minimum();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ U8 os_vmem_reserve__u(U8 size, B4 no_large_pages, U8 base_addr) {
|
|
|
|
I_ U8 os_vmem_reserve__u(U8 size, B4 no_large_pages, U8 base_addr) {
|
|
|
|
return cast(U8, VirtualAlloc(cast(MS_LPVOID, base_addr), size, MS_MEM_RESERVE,
|
|
|
|
return cast(U8, ms_virtual_alloc(cast(MS_LPVOID, base_addr), size, MS_MEM_RESERVE,
|
|
|
|
MS_PAGE_READWRITE /* | (opts->no_large_pages ? 0 : MS_MEM_LARGE_PAGES) */)
|
|
|
|
MS_PAGE_READWRITE /* | (opts->no_large_pages ? 0 : MS_MEM_LARGE_PAGES) */)
|
|
|
|
);
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ B4 os_vmem_commit__u (U8 vm, U8 size) { return ms_virtual_alloc(cast(MS_LPVOID, vm), size, MS_MEM_COMMIT, MS_PAGE_READWRITE) != null; }
|
|
|
|
I_ B4 os_vmem_commit__u (U8 vm, U8 size, B4 no_large_pages) {
|
|
|
|
|
|
|
|
// if (no_large_pages == false ) { return 1; }
|
|
|
|
|
|
|
|
return ms_virtual_alloc(cast(MS_LPVOID, vm), size, MS_MEM_COMMIT, MS_PAGE_READWRITE) != null;
|
|
|
|
|
|
|
|
}
|
|
|
|
I_ void os_vmem_release__u(U8 vm, U8 size) { ms_virtual_free(cast(MS_LPVOID, vm), 0, MS_MEM_RESERVE); }
|
|
|
|
I_ void os_vmem_release__u(U8 vm, U8 size) { ms_virtual_free(cast(MS_LPVOID, vm), 0, MS_MEM_RESERVE); }
|
|
|
|
|
|
|
|
|
|
|
|
I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts) {
|
|
|
|
I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts) {
|
|
|
|
assert(opts != nullptr);
|
|
|
|
assert(opts != nullptr);
|
|
|
|
return os_vmem_reserve__u(size, opts->no_large_pages, opts->base_addr);
|
|
|
|
return os_vmem_reserve__u(size, opts->no_large_pages, opts->base_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ B4 os_vmem_commit (U8 vm, U8 size) { return os_vmem_commit__u(vm, size); }
|
|
|
|
I_ B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem_R opts) {
|
|
|
|
|
|
|
|
assert(opts != nullptr);
|
|
|
|
|
|
|
|
return os_vmem_commit__u(vm, size, opts->no_large_pages);
|
|
|
|
|
|
|
|
}
|
|
|
|
I_ void os_vmem_release(U8 vm, U8 size) { os_vmem_release__u(vm, size); }
|
|
|
|
I_ void os_vmem_release(U8 vm, U8 size) { os_vmem_release__u(vm, size); }
|
|
|
|
#pragma endregion OS
|
|
|
|
#pragma endregion OS
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region VArena (Virtual Address Space Arena)
|
|
|
|
#pragma region VArena (Virtual Address Space Arena)
|
|
|
|
I_ U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
|
|
|
|
I_ U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
|
|
|
|
|
|
|
|
|
|
|
|
I_ U8 varena__make__u(U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr) {
|
|
|
|
inline U8 varena__make__u(U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr) {
|
|
|
|
if (reserve_size == 0) { reserve_size = mega(64); }
|
|
|
|
if (reserve_size == 0) { reserve_size = mega(64); }
|
|
|
|
if (commit_size == 0) { commit_size = mega(64); }
|
|
|
|
if (commit_size == 0) { commit_size = mega(64); }
|
|
|
|
U8 reg page = os_system_info()->target_page_size;
|
|
|
|
U8 page = os_system_info()->target_page_size;
|
|
|
|
U8 reg reserve_sz = align_pow2(reserve_size, page);
|
|
|
|
U8 reserve_sz = align_pow2(reserve_size, page);
|
|
|
|
U8 reg commit_sz = align_pow2(commit_size, page);
|
|
|
|
U8 commit_sz = align_pow2(commit_size, page);
|
|
|
|
B4 reg no_large = (flags & VArenaFlag_NoLargePages) != 0;
|
|
|
|
B4 no_large = (flags & VArenaFlag_NoLargePages) != 0;
|
|
|
|
U8 base = os_vmem_reserve__u(reserve_sz, no_large, base_addr); assert(base != 0);
|
|
|
|
U8 base = os_vmem_reserve__u(reserve_sz, no_large, base_addr); assert(base != 0);
|
|
|
|
B4 ok = os_vmem_commit__u(base, commit_sz); assert(ok != 0);
|
|
|
|
B4 ok = os_vmem_commit__u(base, commit_sz, no_large); assert(ok != 0);
|
|
|
|
U8 header = varena_header_size();
|
|
|
|
U8 header = varena_header_size();
|
|
|
|
U8 data_start = base + header;
|
|
|
|
U8 data_start = base + header;
|
|
|
|
u8_r(base + offset_of(VArena, reserve_start))[0] = data_start;
|
|
|
|
u8_r(base + soff(VArena, reserve_start))[0] = data_start;
|
|
|
|
u8_r(base + offset_of(VArena, reserve ))[0] = reserve_sz;
|
|
|
|
u8_r(base + soff(VArena, reserve ))[0] = reserve_sz;
|
|
|
|
u8_r(base + offset_of(VArena, commit_size ))[0] = commit_sz;
|
|
|
|
u8_r(base + soff(VArena, commit_size ))[0] = commit_sz;
|
|
|
|
u8_r(base + offset_of(VArena, committed ))[0] = commit_sz;
|
|
|
|
u8_r(base + soff(VArena, committed ))[0] = commit_sz;
|
|
|
|
u8_r(base + offset_of(VArena, commit_used ))[0] = header;
|
|
|
|
u8_r(base + soff(VArena, commit_used ))[0] = header;
|
|
|
|
u4_r(base + offset_of(VArena, flags ))[0] = flags;
|
|
|
|
u4_r(base + soff(VArena, flags ))[0] = flags;
|
|
|
|
return base;
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
inline
|
|
|
|
inline
|
|
|
|
void varena__push__u(U8 vm, U8 amount, U8 type_width, U8 alignment, U8 result) {
|
|
|
|
void varena__push__u(U8 vm, U8 amount, U8 type_width, U8 alignment, U8 result) {
|
|
|
|
assert(result != null);
|
|
|
|
assert(result != null);
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
if (amount == 0) { struct_zero(Slice_Mem, result); return; }
|
|
|
|
if (amount == 0) { slice_clear(result); return; }
|
|
|
|
U8 align = alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT;
|
|
|
|
alignment = alignment == 0 ? alignment : MEMORY_ALIGNMENT_DEFAULT;
|
|
|
|
U8 requested_size = amount * type_width;
|
|
|
|
U8 requested_size = amount * type_width;
|
|
|
|
U8 reg aligned_size = align_pow2(requested_size, align);
|
|
|
|
U8 aligned_size = align_pow2(requested_size, alignment);
|
|
|
|
U8_R reg commit_used = u8_r(vm + offset_of(VArena, commit_used ));
|
|
|
|
U8_R commit_used = u8_r(vm + soff(VArena, commit_used ));
|
|
|
|
U8 reg reserve_left = u8_r(vm + offset_of(VArena, reserve ))[0] - commit_used[0];
|
|
|
|
U8 to_be_used = commit_used[0] + aligned_size;
|
|
|
|
if (aligned_size > reserve_left) { struct_zero(Slice_Mem, result); return; }
|
|
|
|
U8 reserve_left = u8_r(vm + soff(VArena, reserve ))[0] - commit_used[0];
|
|
|
|
U8 reg committed = u8_r(vm + offset_of(VArena, committed ))[0];
|
|
|
|
U8 committed = u8_r(vm + soff(VArena, committed ))[0];
|
|
|
|
U8 commit_left = committed - commit_used[0];
|
|
|
|
U8 commit_left = committed - commit_used[0];
|
|
|
|
if (commit_left < aligned_size) {
|
|
|
|
assert(to_be_used< reserve_left);
|
|
|
|
U8 reg commit_size = u8_r(vm + offset_of(VArena, commit_size))[0];
|
|
|
|
if (/*exhausted?*/commit_left < aligned_size) {
|
|
|
|
U8 reg next_commit = reserve_left > aligned_size ? max(commit_size, aligned_size) : reserve_left;
|
|
|
|
U8 commit_size = u8_r(vm + soff(VArena, commit_size))[0];
|
|
|
|
if (next_commit != 0) {
|
|
|
|
U8 next_commit_size = reserve_left > aligned_size ? max(commit_size, aligned_size) : reserve_left;
|
|
|
|
B4 no_large = (u4_r(vm + offset_of(VArena, flags))[0] & VArenaFlag_NoLargePages) != 0;
|
|
|
|
if (next_commit_size != 0) {
|
|
|
|
U8 reg next_commit_start = vm + committed;
|
|
|
|
B4 no_large_pages = (u4_r(vm + soff(VArena, flags))[0] & VArenaFlag_NoLargePages) != 0;
|
|
|
|
if (os_vmem_commit__u(next_commit_start, next_commit) == false) {
|
|
|
|
U8 next_commit_start = vm + committed;
|
|
|
|
|
|
|
|
if (os_vmem_commit__u(next_commit_start, next_commit_size, no_large_pages) == false) {
|
|
|
|
struct_zero(Slice_Mem, result);
|
|
|
|
struct_zero(Slice_Mem, result);
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
committed += next_commit;
|
|
|
|
committed += next_commit_size;
|
|
|
|
u8_r(vm + offset_of(VArena, committed))[0] = committed;
|
|
|
|
u8_r(vm + soff(VArena, committed))[0] = committed;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
commit_used[0] += aligned_size; {
|
|
|
|
commit_used[0] += aligned_size;
|
|
|
|
U8 reg current_offset = u8_r(vm + offset_of(VArena, reserve_start))[0] + commit_used[0];
|
|
|
|
U8 current_offset = u8_r(vm + soff(VArena, reserve_start))[0] + commit_used[0];
|
|
|
|
struct_copy(Slice_Mem, result, (U8)& slice_mem(current_offset, requested_size));
|
|
|
|
slice_assign(result, (U8)& slice_mem(current_offset, requested_size));
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
inline
|
|
|
|
inline
|
|
|
|
void varena__grow__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) {
|
|
|
|
void varena__grow__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) {
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
assert(result != null);
|
|
|
|
assert(result != null);
|
|
|
|
U8 reg grow_amount = requested_size - old_len;
|
|
|
|
U8 grow_amount = requested_size - old_len;
|
|
|
|
if (grow_amount == 0) { struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, old_len)); return; }
|
|
|
|
if (grow_amount == 0) { slice_assign(result, (U8)& slice_mem(old_ptr, old_len)); return; }
|
|
|
|
U8 reg current_offset = u8_r(vm + offset_of(VArena, reserve_start))[0] + u8_r(vm + offset_of(VArena, commit_used))[0];
|
|
|
|
U8 current_offset = u8_r(vm + soff(VArena, reserve_start))[0] + u8_r(vm + soff(VArena, commit_used))[0];
|
|
|
|
// Growing when not the last allocation not allowed
|
|
|
|
// Growing when not the last allocation not allowed
|
|
|
|
assert(old_ptr == current_offset);
|
|
|
|
assert(old_ptr == current_offset);
|
|
|
|
uvar(Slice_Mem, allocation); varena__push__u(vm, grow_amount, 1, alignment, u8_(allocation));
|
|
|
|
uvar(Slice_Mem, allocation); varena__push__u(vm, grow_amount, 1, alignment, u8_(allocation));
|
|
|
|
|
|
|
|
U8 a_ptr = u8_r(allocation + soff(Slice_Mem, ptr))[0];
|
|
|
|
|
|
|
|
U8 a_len = u8_r(allocation + soff(Slice_Mem, len))[0];
|
|
|
|
|
|
|
|
assert(a_ptr != 0);
|
|
|
|
|
|
|
|
mem_zero(a_ptr, a_len * should_zero);
|
|
|
|
|
|
|
|
slice_assign(result, (U8)& slice_mem(old_ptr, old_len + a_len));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline
|
|
|
|
void varena__shrink__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) {
|
|
|
|
void varena__shrink__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) {
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
assert(result != null);
|
|
|
|
assert(result != null);
|
|
|
|
|
|
|
|
U8 shrink_amount = old_len - requested_size;
|
|
|
|
|
|
|
|
if (lt_s(shrink_amount, 0)) { slice_assign(result, (U8)& slice_mem(old_ptr, old_len)); return; }
|
|
|
|
|
|
|
|
U8_R commit_used = u8_r(vm + soff(VArena, commit_used));
|
|
|
|
|
|
|
|
U8 current_offset = u8_r(vm + soff(VArena, reserve_start))[0] + commit_used[0]; assert(old_ptr == current_offset);
|
|
|
|
|
|
|
|
commit_used[0] -= shrink_amount;
|
|
|
|
|
|
|
|
slice_assign(result, (U8)& slice_mem(old_ptr, requested_size));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void varena_release__u(U8 vm) {
|
|
|
|
I_ void varena_release__u(U8 vm) {
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
os_vmem_release__u(vm, u8_r(vm + offset_of(VArena, reserve))[0]);
|
|
|
|
os_vmem_release__u(vm, u8_r(vm + soff(VArena, reserve))[0]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void varena_reset__u(U8 vm) {
|
|
|
|
I_ void varena_reset__u(U8 vm) {
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
u8_r(vm + offset_of(VArena, commit_used))[0] = 0;
|
|
|
|
u8_r(vm + soff(VArena, commit_used))[0] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void varena_rewind__u(U8 vm, U8 sp_type_sig, U8 sp_slot) {
|
|
|
|
I_ void varena_rewind__u(U8 vm, U8 sp_slot) {
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
assert(sp_type_sig == (U8) varena_allocator_proc);
|
|
|
|
U8 header = varena_header_size();
|
|
|
|
U8 reg header = varena_header_size();
|
|
|
|
|
|
|
|
if (sp_slot < header) { sp_slot = header; }
|
|
|
|
if (sp_slot < header) { sp_slot = header; }
|
|
|
|
u8_r(vm + offset_of(VArena, commit_used))[0] = sp_slot;
|
|
|
|
u8_r(vm + soff(VArena, commit_used))[0] = sp_slot;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ void varena_save__u(U8 vm, U8 sp_addr) {
|
|
|
|
I_ void varena_save__u(U8 vm, U8 sp_addr) {
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
assert(sp_addr != null);
|
|
|
|
assert(sp_addr != null);
|
|
|
|
u8_r(sp_addr + offset_of(AllocatorSP, type_sig))[0] = (U8) varena_allocator_proc;
|
|
|
|
u8_r(sp_addr + soff(AllocatorSP, type_sig))[0] = (U8) varena_allocator_proc;
|
|
|
|
u8_r(sp_addr + offset_of(AllocatorSP, slot ))[0] = u8_r(vm + offset_of(VArena, commit_used))[0];
|
|
|
|
u8_r(sp_addr + soff(AllocatorSP, slot ))[0] = u8_r(vm + soff(VArena, commit_used))[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
I_ VArena* varena__make(Opts_varena_make*R_ opts) {
|
|
|
|
I_ VArena* varena__make(Opts_varena_make*R_ opts) {
|
|
|
|
@@ -938,7 +1075,8 @@ I_ void varena_release(VArena_R vm) { varena_release__u(u8_(vm)); }
|
|
|
|
I_ void varena_reset (VArena_R vm) { varena_reset__u (u8_(vm)); }
|
|
|
|
I_ void varena_reset (VArena_R vm) { varena_reset__u (u8_(vm)); }
|
|
|
|
|
|
|
|
|
|
|
|
I_ void varena_rewind (VArena_R vm, AllocatorSP save_point) {
|
|
|
|
I_ void varena_rewind (VArena_R vm, AllocatorSP save_point) {
|
|
|
|
varena_rewind__u(u8_(vm), u8_(save_point.type_sig), save_point.slot);
|
|
|
|
assert(save_point.type_sig == varena_allocator_proc);
|
|
|
|
|
|
|
|
varena_rewind__u(u8_(vm), save_point.slot);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I_ AllocatorSP varena_save(VArena_R vm) { AllocatorSP sp; varena_save__u(u8_(vm), u8_(& sp)); return sp; }
|
|
|
|
I_ AllocatorSP varena_save(VArena_R vm) { AllocatorSP sp; varena_save__u(u8_(vm), u8_(& sp)); return sp; }
|
|
|
|
|
|
|
|
|
|
|
|
@@ -946,16 +1084,16 @@ void varena_allocator_proc(U8 vm, U8 requested_size, U8 alignment, U8 old_ptr, U
|
|
|
|
{
|
|
|
|
{
|
|
|
|
assert(vm != null);
|
|
|
|
assert(vm != null);
|
|
|
|
assert(out_addr != null);
|
|
|
|
assert(out_addr != null);
|
|
|
|
U8 out_allocation = out_addr ? out_addr + offset_of(AllocatorProc_Out, allocation) : 0;
|
|
|
|
U8 out_allocation = out_addr ? out_addr + soff(AllocatorProc_Out, allocation) : 0;
|
|
|
|
switch (op)
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
case AllocatorOp_Alloc:
|
|
|
|
case AllocatorOp_Alloc:
|
|
|
|
case AllocatorOp_Alloc_NoZero:
|
|
|
|
case AllocatorOp_Alloc_NoZero:
|
|
|
|
varena__push__u(vm, requested_size, 1, alignment, out_allocation);
|
|
|
|
varena__push__u(vm, requested_size, 1, alignment, out_allocation);
|
|
|
|
if (op == AllocatorOp_Alloc) {
|
|
|
|
if (op == AllocatorOp_Alloc) {
|
|
|
|
U8 ptr = u8_r(out_allocation + offset_of(Slice_Mem, ptr))[0];
|
|
|
|
U8 ptr = u8_r(out_allocation + soff(Slice_Mem, ptr))[0];
|
|
|
|
U8 len = u8_r(out_allocation + offset_of(Slice_Mem, len))[0];
|
|
|
|
U8 len = u8_r(out_allocation + soff(Slice_Mem, len))[0];
|
|
|
|
if (ptr && len) { memory_zero(ptr, len); }
|
|
|
|
if (ptr && len) { mem_zero(ptr, len); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
|
|
@@ -970,33 +1108,254 @@ void varena_allocator_proc(U8 vm, U8 requested_size, U8 alignment, U8 old_ptr, U
|
|
|
|
varena__shrink__u(out_allocation, vm, old_ptr, old_len, requested_size, alignment);
|
|
|
|
varena__shrink__u(out_allocation, vm, old_ptr, old_len, requested_size, alignment);
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Rewind: varena_rewind__u(vm, old_ptr, old_len); break;
|
|
|
|
case AllocatorOp_Rewind: varena_rewind__u(vm, old_len); break;
|
|
|
|
case AllocatorOp_SavePoint: varena_save__u (vm, out_addr + offset_of(AllocatorProc_Out, save_point)); break;
|
|
|
|
case AllocatorOp_SavePoint: varena_save__u (vm, out_addr + soff(AllocatorProc_Out, save_point)); break;
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Query:
|
|
|
|
case AllocatorOp_Query:
|
|
|
|
u4_r(out_addr + offset_of(AllocatorQueryInfo, features))[0] =
|
|
|
|
u4_r(out_addr + soff(AllocatorQueryInfo, features))[0] =
|
|
|
|
AllocatorQuery_Alloc
|
|
|
|
AllocatorQuery_Alloc
|
|
|
|
| AllocatorQuery_Reset
|
|
|
|
| AllocatorQuery_Reset
|
|
|
|
| AllocatorQuery_Resize
|
|
|
|
| AllocatorQuery_Resize
|
|
|
|
| AllocatorQuery_Rewind;
|
|
|
|
| AllocatorQuery_Rewind;
|
|
|
|
U8 reserve = u8_r(vm + offset_of(VArena, reserve ))[0];
|
|
|
|
U8 reserve = u8_r(vm + soff(VArena, reserve ))[0];
|
|
|
|
U8 committed = u8_r(vm + offset_of(VArena, committed))[0];
|
|
|
|
U8 committed = u8_r(vm + soff(VArena, committed))[0];
|
|
|
|
U8 max_alloc = (reserve > committed) ? (reserve - committed) : 0;
|
|
|
|
U8 max_alloc = (reserve > committed) ? (reserve - committed) : 0;
|
|
|
|
u8_r(out_addr + offset_of(AllocatorQueryInfo, max_alloc))[0] = max_alloc;
|
|
|
|
u8_r(out_addr + soff(AllocatorQueryInfo, max_alloc))[0] = max_alloc;
|
|
|
|
u8_r(out_addr + offset_of(AllocatorQueryInfo, min_alloc))[0] = kilo(4);
|
|
|
|
u8_r(out_addr + soff(AllocatorQueryInfo, min_alloc))[0] = kilo(4);
|
|
|
|
u8_r(out_addr + offset_of(AllocatorQueryInfo, left ))[0] = max_alloc;
|
|
|
|
u8_r(out_addr + soff(AllocatorQueryInfo, left ))[0] = max_alloc;
|
|
|
|
AllocatorSP sp = { .type_sig = varena_allocator_proc, .slot = u8_r(vm + offset_of(VArena, commit_used))[0] };
|
|
|
|
AllocatorSP sp = { .type_sig = varena_allocator_proc, .slot = u8_r(vm + soff(VArena, commit_used))[0] };
|
|
|
|
struct_copy(AllocatorSP, out_addr + offset_of(AllocatorQueryInfo, save_point), (U8)& sp);
|
|
|
|
struct_assign(AllocatorSP, out_addr + soff(AllocatorQueryInfo, save_point), (U8)& sp);
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#pragma endregion VArena
|
|
|
|
#pragma endregion VArena
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Arena
|
|
|
|
|
|
|
|
I_ U8 arena_header_size(void) { return align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT); }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
U8 arena_make__u(U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr) {
|
|
|
|
|
|
|
|
U8 header_size = arena_header_size();
|
|
|
|
|
|
|
|
U8 current = varena__make__u(reserve_size, commit_size, flags, base_addr); assert(current != null);
|
|
|
|
|
|
|
|
U8 arena; varena__push__u(current, header_size, 1, MEMORY_ALIGNMENT_DEFAULT, (U8)& arena);
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, backing ))[0] = current;
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, prev ))[0] = null;
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, current ))[0] = arena;
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, base_pos))[0] = 0;
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, pos ))[0] = header_size;
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, flags ))[0] = flags;
|
|
|
|
|
|
|
|
return arena;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void arena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 out_mem) {
|
|
|
|
|
|
|
|
assert(arena != null);
|
|
|
|
|
|
|
|
U8 active = u8_r(arena + soff(Arena, current ))[0];
|
|
|
|
|
|
|
|
U8 size_requested = amount * type_width;
|
|
|
|
|
|
|
|
alignment = alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT;
|
|
|
|
|
|
|
|
U8 size_aligned = align_pow2(size_requested, alignment);
|
|
|
|
|
|
|
|
U8 pos_pre = u8_r(active + soff(Arena, pos))[0];
|
|
|
|
|
|
|
|
U8 pos_pst = pos_pre + size_aligned;
|
|
|
|
|
|
|
|
U8 backing = active + soff(Arena, backing);
|
|
|
|
|
|
|
|
U8 reserve = u8_r(backing + soff(VArena, reserve))[0];
|
|
|
|
|
|
|
|
B4 should_chain =
|
|
|
|
|
|
|
|
((u8_r(arena + soff(Arena, flags))[0] & ArenaFlag_NoChain) == 0)
|
|
|
|
|
|
|
|
&& reserve < pos_pst;
|
|
|
|
|
|
|
|
if (should_chain)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
U8 current = arena + soff(Arena, current);
|
|
|
|
|
|
|
|
U8 new_arena = arena_make__u(
|
|
|
|
|
|
|
|
reserve,
|
|
|
|
|
|
|
|
u8_r(backing + soff(VArena, commit_size))[0],
|
|
|
|
|
|
|
|
u4_r(backing + soff(VArena, flags ))[0],
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
);
|
|
|
|
|
|
|
|
u8_r(new_arena + soff(Arena, base_pos))[0] = u8_r(active + soff(Arena, base_pos))[0] + reserve;
|
|
|
|
|
|
|
|
u8_r(new_arena + soff(Arena, prev ))[0] = u8_r(current)[0];
|
|
|
|
|
|
|
|
u8_r(current)[0] = new_arena;
|
|
|
|
|
|
|
|
active = u8_r(current)[0];
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
U8 result = active + pos_pre;
|
|
|
|
|
|
|
|
varena__push__u(u8_r(backing)[0], size_aligned, 1, alignment, out_mem);
|
|
|
|
|
|
|
|
assert(u8_r(out_mem + soff(Slice_Mem, ptr))[0] == result);
|
|
|
|
|
|
|
|
assert(u8_r(out_mem + soff(Slice_Mem, len))[0] > 0);
|
|
|
|
|
|
|
|
u8_r(active + soff(Arena, pos))[0] = pos_pst;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
I_ void arena_release__u(U8 arena) {
|
|
|
|
|
|
|
|
assert(arena != null);
|
|
|
|
|
|
|
|
U8 curr = arena + soff(Arena, current);
|
|
|
|
|
|
|
|
U8 prev = null;
|
|
|
|
|
|
|
|
for (; u8_r(curr)[0] != null; curr = prev) {
|
|
|
|
|
|
|
|
u8_r(prev)[0] = u8_r(curr + soff(Arena, prev))[0];
|
|
|
|
|
|
|
|
varena_release__u(u8_r(curr)[0]);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
I_ void arena_reset__u(U8 arena) { arena_rewind__u(arena, 0); }
|
|
|
|
|
|
|
|
void arena_rewind__u(U8 arena, U8 slot) {
|
|
|
|
|
|
|
|
assert(arena != null);
|
|
|
|
|
|
|
|
U8 header_size = arena_header_size();
|
|
|
|
|
|
|
|
U8 curr = arena + soff(Arena, current);
|
|
|
|
|
|
|
|
U8 big_pos = clamp_bot(header_size, slot);
|
|
|
|
|
|
|
|
for (U8 prev = null; u8_r(curr, soff(Arena, base_pos))[0] >= big_pos; u8_r(curr) = prev) {
|
|
|
|
|
|
|
|
prev = u8_r(curr + soff(Arena, prev))[0];
|
|
|
|
|
|
|
|
varena_release__u(u8_r(curr + soff(Arena, backing))[0]);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, current))[0] = u8_r(curr)[0];
|
|
|
|
|
|
|
|
U8 new_pos = big_pos - u8_r(curr + soff(Arena, base_pos))[0]; assert(new_pos <= u8_r(curr + soff(Arena, pos))[0]);
|
|
|
|
|
|
|
|
u8_r(curr + soff(Arena, pos))[0] = new_pos;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
I_ void arena_save__u(U8 arena, U8 out_sp) {
|
|
|
|
|
|
|
|
u8_r(out_sp + soff(AllocatorSP, type_sig))[0] = arena_allocator_proc;
|
|
|
|
|
|
|
|
u8_r(out_sp + soff(AllocatorSP, slot ))[0] =
|
|
|
|
|
|
|
|
u8_r(arena + soff(Arena, base_pos) )[0]
|
|
|
|
|
|
|
|
+ u8_r(arena + soff(Arena, current) + soff(Arena, pos))[0];
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void arena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, U8 out_addr)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(out_addr != null);
|
|
|
|
|
|
|
|
assert(arena != null);
|
|
|
|
|
|
|
|
switch (op)
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
case AllocatorOp_Alloc:
|
|
|
|
|
|
|
|
case AllocatorOp_Alloc_NoZero:
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Free: break;
|
|
|
|
|
|
|
|
case AllocatorOp_Reset: break;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Grow:
|
|
|
|
|
|
|
|
case AllocatorOp_Grow_NoZero: break;
|
|
|
|
|
|
|
|
case AllocatorOp_Shrink: break;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Rewind: break;
|
|
|
|
|
|
|
|
case AllocatorOp_SavePoint: break;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case AllocatorOp_Query:
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#pragma endregion Arena
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Key Table Linear (KTL)
|
|
|
|
|
|
|
|
I_ void ktl_populate_slice_a2_str8(U8 kt, U8 backing_ptr, U8 backing_len, U8 values) {
|
|
|
|
|
|
|
|
assert(kt != null);
|
|
|
|
|
|
|
|
U8 values_len = u8_r(values + soff(Slice_A2_Str8, len))[0];
|
|
|
|
|
|
|
|
if (values_len == 0) return;
|
|
|
|
|
|
|
|
mem__alloc__u(kt, backing_ptr, backing_len, size_of(KTL_Slot_Str8) * values_len, 0, false);
|
|
|
|
|
|
|
|
for (U8 id = 0; id < values_len; ++id) {
|
|
|
|
|
|
|
|
U8 kt_slot = kt + soff(KTL_Str8, ptr) * id;
|
|
|
|
|
|
|
|
U8 value = u8_r(values + soff(Slice_A2_Str8, ptr))[0] + size_of(A2_Str8) * id;
|
|
|
|
|
|
|
|
mem_copy (kt_slot + soff(KTL_Slot_Str8, value), value + size_of(Str8) * 1, size_of(Str8));
|
|
|
|
|
|
|
|
hash64__fnv1a__u(kt_slot + soff(KTL_Slot_Str8, key), value);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#pragma endregion KTL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Key Table 1-Layer Chained-Chunked_Cells (KT1CX)
|
|
|
|
|
|
|
|
#pragma endregion Key Table
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region String Operations
|
|
|
|
|
|
|
|
#pragma endregion String Operations
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region File System
|
|
|
|
|
|
|
|
#pragma endregion File System
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region Debug
|
|
|
|
|
|
|
|
#if defined(BUILD_DEBUG)
|
|
|
|
|
|
|
|
// #include <stdio.h>
|
|
|
|
|
|
|
|
#define MS_CRT_INTERNAL_LOCAL_PRINTF_OPTIONS (*__local_stdio_printf_options())
|
|
|
|
|
|
|
|
#define MS_stderr (__acrt_iob_func(2))
|
|
|
|
|
|
|
|
#define MS__crt_va_start_a(ap, x) ((void)(__va_start(&ap, x)))
|
|
|
|
|
|
|
|
#define MS__crt_va_arg(ap, t) \
|
|
|
|
|
|
|
|
((sizeof(t) > sizeof(__int64) || (sizeof(t) & (sizeof(t) - 1)) != 0) \
|
|
|
|
|
|
|
|
? **(t**)((ap += sizeof(__int64)) - sizeof(__int64)) \
|
|
|
|
|
|
|
|
: *(t* )((ap += sizeof(__int64)) - sizeof(__int64)))
|
|
|
|
|
|
|
|
#define MS__crt_va_end(ap) ((void)(ap = (va_list)0))
|
|
|
|
|
|
|
|
#define va_start(ap, x) MS__crt_va_start_a(ap, x)
|
|
|
|
|
|
|
|
#define va_arg MS__crt_va_arg
|
|
|
|
|
|
|
|
#define va_end MS__crt_va_end
|
|
|
|
|
|
|
|
#define va_copy(destination, source) ((destination) = (source))
|
|
|
|
|
|
|
|
typedef def_struct(__crt_locale_pointers) { struct __crt_locale_data* locinfo; struct __crt_multibyte_data* mbcinfo; };
|
|
|
|
|
|
|
|
typedef __crt_locale_pointers* _locale_t;
|
|
|
|
|
|
|
|
typedef char* va_list;
|
|
|
|
|
|
|
|
MS_FILE* __cdecl __acrt_iob_func(unsigned _Ix);
|
|
|
|
|
|
|
|
N_
|
|
|
|
|
|
|
|
U8* __cdecl __local_stdio_printf_options(void) {
|
|
|
|
|
|
|
|
// NOTE(CRT): This function must not be inlined into callers to avoid ODR violations. The
|
|
|
|
|
|
|
|
// static local variable has different names in C and in C++ translation units.
|
|
|
|
|
|
|
|
local_persist U8 _OptionsStorage; return &_OptionsStorage;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int __cdecl __stdio_common_vfprintf_s(
|
|
|
|
|
|
|
|
U8 _Options,
|
|
|
|
|
|
|
|
MS_FILE* _Stream,
|
|
|
|
|
|
|
|
char const* _Format,
|
|
|
|
|
|
|
|
_locale_t _Locale,
|
|
|
|
|
|
|
|
va_list _ArgList
|
|
|
|
|
|
|
|
);
|
|
|
|
|
|
|
|
void __cdecl __va_start(va_list* , ...);
|
|
|
|
|
|
|
|
I_ int printf_err(char const* fmt, ...) {
|
|
|
|
|
|
|
|
int result;
|
|
|
|
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
|
|
|
|
result = __stdio_common_vfprintf_s(MS_CRT_INTERNAL_LOCAL_PRINTF_OPTIONS, MS_stderr, fmt, nullptr, args);
|
|
|
|
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void assert_handler( UTF8*R_ condition, UTF8*R_ file, UTF8*R_ function, S4 line, UTF8*R_ msg, ... ) {
|
|
|
|
|
|
|
|
printf_err( "%s - %s:(%d): Assert Failure: ", file, function, line );
|
|
|
|
|
|
|
|
if ( condition )
|
|
|
|
|
|
|
|
printf_err( "`%s` \n", condition );
|
|
|
|
|
|
|
|
if ( msg ) {
|
|
|
|
|
|
|
|
va_list va = {0};
|
|
|
|
|
|
|
|
va_start( va, msg );
|
|
|
|
|
|
|
|
__stdio_common_vfprintf_s(MS_CRT_INTERNAL_LOCAL_PRINTF_OPTIONS, MS_stderr, msg, nullptr, va);
|
|
|
|
|
|
|
|
va_end( va );
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
printf_err( "%s", "\n" );
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#pragma endregion Debug
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma region WATL
|
|
|
|
|
|
|
|
#pragma endregion WATL
|
|
|
|
|
|
|
|
|
|
|
|
#pragma endregion Implementation
|
|
|
|
#pragma endregion Implementation
|
|
|
|
|
|
|
|
|
|
|
|
int main(void)
|
|
|
|
int main(void)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
os_init();
|
|
|
|
|
|
|
|
VArena_R vm_file = varena_make(.reserve_size = giga(4), .flags = VArenaFlag_NoLargePages);
|
|
|
|
|
|
|
|
// FileOpInfo file = file_read_contents(lit("watl.v0.llvm.lottes.c"), .backing = ainfo_varena(vm_file));
|
|
|
|
|
|
|
|
// slice_assert(file.content);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Arena_R a_msgs = arena_make();
|
|
|
|
|
|
|
|
Arena_R a_toks = arena_make();
|
|
|
|
|
|
|
|
// WATL_LexInfo lex_res = watl_lex(pcast(Str8, file.content),
|
|
|
|
|
|
|
|
// .ainfo_msgs = ainfo_arena(a_msgs),
|
|
|
|
|
|
|
|
// .ainfo_toks = ainfo_arena(a_toks),
|
|
|
|
|
|
|
|
// );
|
|
|
|
|
|
|
|
// assert((lex_res.signal & WATL_LexStatus_MemFail_SliceConstraintFail) == 0);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Arena_R str_cache_kt1_ainfo = arena_make();
|
|
|
|
|
|
|
|
// Str8Cache str_cache = str8cache_make(
|
|
|
|
|
|
|
|
// .str_reserve = ainfo_arena(arena_make(.reserve_size = mega(256))),
|
|
|
|
|
|
|
|
// .cell_reserve = ainfo_arena(str_cache_kt1_ainfo),
|
|
|
|
|
|
|
|
// .tbl_backing = ainfo_arena(str_cache_kt1_ainfo),
|
|
|
|
|
|
|
|
// .cell_pool_size = kilo(8),
|
|
|
|
|
|
|
|
// .table_size = kilo(64),
|
|
|
|
|
|
|
|
// );
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Arena_R a_lines = arena_make();
|
|
|
|
|
|
|
|
// WATL_ParseInfo parse_res = watl_parse(lex_res.toks,
|
|
|
|
|
|
|
|
// .ainfo_msgs = ainfo_arena(a_msgs),
|
|
|
|
|
|
|
|
// .ainfo_nodes = ainfo_arena(a_toks),
|
|
|
|
|
|
|
|
// .ainfo_lines = ainfo_arena(a_lines),
|
|
|
|
|
|
|
|
// .str_cache = & str_cache
|
|
|
|
|
|
|
|
// );
|
|
|
|
|
|
|
|
// assert((parse_res.signal & WATL_ParseStatus_MemFail_SliceConstraintFail) == 0);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// arena_reset(a_msgs);
|
|
|
|
|
|
|
|
// arena_reset(a_toks);
|
|
|
|
|
|
|
|
// Str8 listing = watl_dump_listing(ainfo_arena(a_msgs), parse_res.lines);
|
|
|
|
|
|
|
|
// file_write_str8(lit("watl.v0.lottes.c.listing.txt"), listing);
|
|
|
|
|
|
|
|
// return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#pragma clang diagnostic pop
|
|
|
|
#pragma clang diagnostic pop
|
|
|
|
|