From d7790795ddf50847be897cc65be546fecb3a6511 Mon Sep 17 00:00:00 2001 From: Ed_ Date: Thu, 6 Nov 2025 19:23:58 -0500 Subject: [PATCH] stuff --- C/watl.v0.llvm.lottes.c | 511 +++++++++++++++++++++--------- C/watl.v0.llvm.lottes_hybrid.c | 1 + Odin/watl.v0.ideomatic.odin | 61 ++++ Odin/watl.v0.win32.ideomatic.odin | 0 Odin/watl.v0.win32.odin | 326 +++++++------------ Odin/watl.win32.lottes.odin | 0 6 files changed, 545 insertions(+), 354 deletions(-) create mode 100644 Odin/watl.v0.ideomatic.odin delete mode 100644 Odin/watl.v0.win32.ideomatic.odin delete mode 100644 Odin/watl.win32.lottes.odin diff --git a/C/watl.v0.llvm.lottes.c b/C/watl.v0.llvm.lottes.c index 54a5989..f1a4704 100644 --- a/C/watl.v0.llvm.lottes.c +++ b/C/watl.v0.llvm.lottes.c @@ -118,6 +118,7 @@ enum { false = 0, true = 1, true_overflow, }; #define def_farray_impl(_type, _len) _type def_farray_sym(_type, _len)[_len]; typedef def_ptr_set(def_farray_sym(_type, _len)) #define def_farray(type, len) def_farray_impl(type, len) #define def_enum(underlying_type, symbol) underlying_type def_tset(symbol); enum symbol +#define def_field(s,member) tmpl(s,member) = __builtin_offsetof(s,member) // Used within enum blocks #define def_struct(symbol) struct symbol def_tset(symbol); struct symbol #define def_union(symbol) union symbol def_tset(symbol); union symbol #define def_proc(symbol) symbol @@ -129,7 +130,7 @@ enum { false = 0, true = 1, true_overflow, }; #define pcast(type, data) cast(type*, & (data))[0] #define nullptr cast(void*, 0) #define null cast(U8, 0) -#define soff(type, member) cast(U8, & (((type*) 0)->member)) // offset_of +#define offset_of(type, member) cast(U8,__builtin_offsetof(type,member)) #define size_of(data) cast(U8, sizeof(data)) #define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr) @@ -268,6 +269,10 @@ I_ U8 align_pow2(U8 x, U8 b) { #define size_of_slice_type(slice) size_of( (slice).ptr[0] ) typedef def_struct(Slice_Mem) { U8 ptr; U8 len; }; +enum { + Slice_ptr = offset_of(Slice_Mem, ptr), + Slice_len = offset_of(Slice_Mem, len), +}; #define slice_mem(ptr, len) ((Slice_Mem){u8_(ptr), u8_(len)}) #define slice_mem_s(slice) ((Slice_Mem){u8_((slice).ptr), (slice).len * size_of_slice_type(slice) }) @@ -294,16 +299,16 @@ I_ void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typew #define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) } I_ void slice_assign(U8 dest, U8 src) { - u8_r(dest + soff(Slice_Mem, ptr))[0] = u8_r(src + soff(Slice_Mem, ptr))[0]; - u8_r(dest + soff(Slice_Mem, len))[0] = u8_r(src + soff(Slice_Mem, len))[0]; + u8_r(dest + Slice_ptr)[0] = u8_r(src + Slice_ptr)[0]; + u8_r(dest + Slice_len)[0] = u8_r(src + Slice_len)[0]; } I_ void slice_assign_comp(U8 dest, U8 ptr, U8 len) { - u8_r(dest + soff(Slice_Mem, ptr))[0] = ptr; - u8_r(dest + soff(Slice_Mem, len))[0] = len; + u8_r(dest + Slice_ptr)[0] = ptr; + u8_r(dest + Slice_len)[0] = len; } I_ void slice_clear(U8 base) { - u8_r(base + soff(Slice_Mem, ptr))[0] = 0; - u8_r(base + soff(Slice_Mem, len))[0] = 0; + u8_r(base + Slice_ptr)[0] = 0; + u8_r(base + Slice_len)[0] = 0; } #define span_iter(type, iter, m_begin, op, m_end) \ @@ -355,7 +360,6 @@ typedef def_enum(U4, AllocatorQueryFlags) { // Ability to rewind to a save point (ex: arenas, stack), must also be able to save such a point AllocatorQuery_Rewind = (1 << 6), }; -typedef struct AllocatorProc_In def_tset(AllocatorProc_In); typedef struct AllocatorProc_Out def_tset(AllocatorProc_Out); typedef struct AllocatorSP AllocatorSP; typedef void def_proc(AllocatorProc) (U8 data, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out); @@ -363,16 +367,9 @@ struct AllocatorSP { AllocatorProc* type_sig; U8 slot; }; -struct AllocatorProc_In { - U8 data; - U8 requested_size; - U8 alignment; - union { - Slice_Mem old_allocation; - AllocatorSP save_point; - }; - AllocatorOp op; - A4_B1 _PAD_; +enum { + def_field(AllocatorSP,type_sig), + def_field(AllocatorSP,slot), }; struct AllocatorProc_Out { union { @@ -386,11 +383,23 @@ struct AllocatorProc_Out { U8 min_alloc; A4_B1 _PAD_2; }; +enum { + def_field(AllocatorProc_Out,allocation), + def_field(AllocatorProc_Out,save_point), + def_field(AllocatorProc_Out,features), + def_field(AllocatorProc_Out,left), + def_field(AllocatorProc_Out,max_alloc), + def_field(AllocatorProc_Out,min_alloc), +}; typedef def_struct(AllocatorInfo) { AllocatorProc* proc; U8 data; }; static_assert(size_of(AllocatorSP) <= size_of(Slice_Mem)); +enum { + def_field(AllocatorInfo,proc), + def_field(AllocatorInfo,data), +}; typedef def_struct(AllocatorQueryInfo) { AllocatorSP save_point; AllocatorQueryFlags features; @@ -401,6 +410,13 @@ typedef def_struct(AllocatorQueryInfo) { A4_B1 _PAD_2; }; static_assert(size_of(AllocatorProc_Out) == size_of(AllocatorQueryInfo)); +enum { + def_field(AllocatorQueryInfo,save_point), + def_field(AllocatorQueryInfo,features), + def_field(AllocatorQueryInfo,left), + def_field(AllocatorQueryInfo,max_alloc), + def_field(AllocatorQueryInfo,min_alloc), +}; #define MEMORY_ALIGNMENT_DEFAULT (2 * size_of(void*)) @@ -451,6 +467,11 @@ typedef def_struct(FArena) { U8 capacity; U8 used; }; +enum { + def_field(FArena,start), + def_field(FArena,capacity), + def_field(FArena,used), +}; I_ void farena_init__u (U8 arena, U8 mem_ptr, U8 mem_len); S_ void farena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr); @@ -478,10 +499,15 @@ cast(type*, farena__push(arena, size_of(type), 1, opt_args(Opts_farena, __VA_ARG #pragma endregion FArena #pragma region OS -typedef def_struct(OS_SystemInfo) { U8 target_page_size; }; -typedef def_struct(Opts_vmem) { U8 base_addr; B4 no_large_pages; A4_B1 _PAD_; }; - -typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; }; +typedef def_struct(OS_SystemInfo) { U8 target_page_size; }; +typedef def_struct(Opts_vmem) { U8 base_addr; B4 no_large_pages; A4_B1 _PAD_; }; +typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; }; +enum { + def_field(OS_SystemInfo,target_page_size), + def_field(Opts_vmem,base_addr), + def_field(Opts_vmem,no_large_pages), + def_field(OS_Windows_State,system_info), +}; G_ OS_Windows_State os__windows_info; I_ U8 os_system_info(void); @@ -512,12 +538,26 @@ typedef def_struct(VArena) { U8 commit_used; VArenaFlags flags; }; +enum { + def_field(VArena,reserve_start), + def_field(VArena,reserve), + def_field(VArena,commit_size), + def_field(VArena,committed), + def_field(VArena,commit_used), + def_field(VArena,flags), +}; typedef def_struct(Opts_varena_make) { U8 base_addr; U8 reserve_size; U8 commit_size; VArenaFlags flags; }; +enum { + def_field(Opts_varena_make,base_addr), + def_field(Opts_varena_make,reserve_size), + def_field(Opts_varena_make,commit_size), + def_field(Opts_varena_make,flags), +}; S_ U8 varena__make__u (U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr); I_ void varena_release__u(U8 arena); @@ -566,6 +606,14 @@ typedef def_struct(Arena) { ArenaFlags flags; A4_B1 _PAD_; }; +enum { + def_field(Arena,backing), + def_field(Arena,prev), + def_field(Arena,current), + def_field(Arena,base_pos), + def_field(Arena,pos), + def_field(Arena,flags), +}; S_ U8 arena_make__u (U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr); S_ void arena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignemnt, U8 out_mem); @@ -629,6 +677,11 @@ def_struct(tmpl(KTL_Slot,type)) { \ def_Slice(tmpl(KTL_Slot,type)); \ typedef tmpl(Slice_KTL_Slot,type) tmpl(KTL,type) +enum { + KTL_Slot_key = 0, + KTL_Slot_value = 8, +}; + typedef Slice_Mem KTL_Byte; typedef def_struct(KTL_Meta) { U8 slot_size; @@ -645,6 +698,95 @@ I_ void ktl_populate_slice_a2_str8(U8 kt, U8 backing_proc, U8 backing_data, U8 v #pragma endregion KTL #pragma region Key Table 1-Layer Chained-Chunked-Cells (KT1CX) +#define def_KT1CX_Slot(type) \ +def_struct(tmpl(KT1CX_Slot,type)) { \ + type value; \ + U8 key; \ + B4 occupied; \ + A4_B1 _PAD_; \ +} +#define def_KT1CX_Cell(type, depth) \ +def_struct(tmpl(KT1CX_Cell,type)) { \ + tmpl(KT1CX_Slot,type) slots[depth]; \ + tmpl(KT1CX_Slot,type)* next; \ +} +#define def_KT1CX(type) \ +def_struct(tmpl(KT1CX,type)) { \ + tmpl(Slice_KT1CX_Cell,type) table; \ +} +typedef def_struct(KT1CX_Byte_Slot) { + U8 key; + B4 occupied; + A4_B1 _PAD_; +}; +typedef def_struct(KT1CX_Byte_Cell) { + U8 next; +}; +typedef def_struct(KT1CX_Byte) { + Slice_Mem table; +}; +typedef def_struct(KT1CX_ByteMeta) { + U8 slot_size; + U8 slot_key_offset; + U8 cell_next_offset; + U8 cell_depth; + U8 cell_size; + U8 type_width; + Str8 type_name; +}; +typedef def_struct(KT1CX_InfoMeta) { + U8 cell_pool_size; + U8 table_size; + U8 slot_size; + U8 slot_key_offset; + U8 cell_next_offset; + U8 cell_depth; + U8 cell_size; + U8 type_width; + Str8 type_name; +}; +typedef def_struct(KT1CX_Info) { + AllocatorInfo backing_table; + AllocatorInfo backing_cells; +}; +enum { + def_field(KT1CX_Byte_Slot,key), + def_field(KT1CX_Byte_Slot,occupied), + + def_field(KT1CX_ByteMeta,slot_size), + def_field(KT1CX_ByteMeta,slot_key_offset), + def_field(KT1CX_ByteMeta,cell_next_offset), + def_field(KT1CX_ByteMeta,cell_depth), + def_field(KT1CX_ByteMeta,cell_size), + def_field(KT1CX_ByteMeta,type_width), + def_field(KT1CX_ByteMeta,type_name), + + def_field(KT1CX_InfoMeta,cell_pool_size), + def_field(KT1CX_InfoMeta,table_size), + def_field(KT1CX_InfoMeta,slot_size), + def_field(KT1CX_InfoMeta,slot_key_offset), + def_field(KT1CX_InfoMeta,cell_next_offset), + def_field(KT1CX_InfoMeta,cell_depth), + def_field(KT1CX_InfoMeta,cell_size), + def_field(KT1CX_InfoMeta,type_width), + def_field(KT1CX_InfoMeta,type_name), +}; +S_ void kt1cx_init__u (U8 backing_tbl, U8 backing_cells, U8 m, U8 result); +S_ void kt1cx_clear__u (U8 kt, U8 m); +I_ U8 kt1cx_slot_id__u(U8 kt, U8 key); +S_ U8 kt1cx_get__u (U8 kt, U8 key, U8 m); +S_ U8 kt1cx_set__u (U8 kt, U8 key, U8 v_ptr, U8 v_len, U8 backing_cells, U8 m); + +I_ void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte*R_ result); +I_ void kt1cx_clear (KT1CX_Byte kt, KT1CX_ByteMeta meta); +I_ U8 kt1cx_slot_id(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta meta); +I_ U8 kt1cx_get (KT1CX_Byte kt, U8 key, KT1CX_ByteMeta meta); +I_ U8 kt1cx_set (KT1CX_Byte kt, U8 key, Slice_Mem value, AllocatorInfo backing_cells, KT1CX_ByteMeta meta); + +#define kt1cx_assert(kt) do { \ + slice_assert(kt.table); \ +} while(0) +#define kt1cx_byte(kt) (KT1CX_Byte){ (Slice_Mem){u8_(kt.table.ptr), kt.table.len} } #pragma endregion KT1CX #pragma region String Operations @@ -681,26 +823,26 @@ I_ void mem_save_point__u(U8 proc, U8 data, U8 sp) { assert(proc != null); uvar(AllocatorProc_Out, out) = {0}; cast(AllocatorProc*, proc)(data, 0, 0, 0, 0, AllocatorOp_SavePoint, u8_(out)); - struct_assign(AllocatorSP, sp, (U8) out + soff(AllocatorProc_Out, save_point)); + struct_assign(AllocatorSP, sp, (U8) out + AllocatorProc_Out_save_point); } I_ void mem__alloc__u(U8 out_mem, U8 proc, U8 data, U8 size, U8 alignment, B4 no_zero) { assert(proc != null); uvar(AllocatorProc_Out, out) = {0}; cast(AllocatorProc*, proc)(data, size, alignment, 0, 0, no_zero ? AllocatorOp_Alloc_NoZero : AllocatorOp_Alloc, u8_(out)); - slice_assign(out_mem, (U8) out + soff(AllocatorProc_Out, allocation)); + slice_assign(out_mem, (U8) out + AllocatorProc_Out_allocation); } I_ void mem__grow__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment, B4 no_zero, B4 give_actual) { assert(proc != null); uvar(AllocatorProc_Out, out) = {0}; cast(AllocatorProc*, proc)(data, size, alignment, old_ptr, old_len, no_zero ? AllocatorOp_Grow_NoZero : AllocatorOp_Grow, u8_(out)); - if (give_actual == false) { u8_r(out + soff(AllocatorProc_Out, allocation) + soff(Slice_Mem, len))[0] = size; } - slice_assign(out_mem, (U8) out + soff(AllocatorProc_Out, allocation)); + if (give_actual == false) { u8_r(out + AllocatorProc_Out_allocation + Slice_len)[0] = size; } + slice_assign(out_mem, (U8) out + AllocatorProc_Out_allocation); } I_ void mem__shrink__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment) { assert(proc != null); uvar(AllocatorProc_Out, out) = {0}; cast(AllocatorProc*, proc)(data, size, alignment, old_ptr, old_len, AllocatorOp_Shrink, u8_(out)); - slice_assign(out_mem, (U8) out + soff(AllocatorProc_Out, allocation)); + slice_assign(out_mem, (U8) out + AllocatorProc_Out_allocation); } I_ void mem__resize__u(U8 out_mem, U8 proc, U8 data, U8 old_ptr, U8 old_len, U8 size, U8 alignment, B4 no_zero, B4 give_acutal) { if (old_len == size) { slice_assign_comp(out_mem, old_ptr, old_len); } @@ -742,27 +884,27 @@ I_ Slice_Mem mem__shrink(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_s #pragma region FArena (Fixed-Sized Arena) I_ void farena_init__u(U8 arena, U8 mem_ptr, U8 mem_len) { assert(arena != null); - u8_r(arena + soff(FArena, start) )[0] = mem_ptr; - u8_r(arena + soff(FArena, capacity))[0] = mem_len; - u8_r(arena + soff(FArena, used) )[0] = 0; + u8_r(arena + FArena_start )[0] = mem_ptr; + u8_r(arena + FArena_capacity)[0] = mem_len; + u8_r(arena + FArena_used )[0] = 0; } S_ inline void farena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 result) { if (amount == 0) { slice_clear(result); } U8 desired = type_width * amount; U8 to_commit = align_pow2(desired, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT); - U8_R used = u8_r(arena + soff(FArena, used)); - U8 unused = u8_r(arena + soff(FArena, capacity))[0] - used[0]; assert(to_commit <= unused); - U8 ptr = u8_r(arena + soff(FArena, start) )[0] + used[0]; + U8_R used = u8_r(arena + FArena_used); + U8 unused = u8_r(arena + FArena_capacity)[0] - used[0]; assert(to_commit <= unused); + U8 ptr = u8_r(arena + FArena_start )[0] + used[0]; used[0] += to_commit; slice_assign_comp(result, ptr, desired); } S_ inline void farena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) { assert(result != null); assert(arena != null); - U8_R used = u8_r(arena + soff(FArena, used)); + U8_R used = u8_r(arena + FArena_used); /*Check if the allocation is at the end of the arena*/{ U8 alloc_end = old_ptr + old_len; - U8 arena_end = u8_r(arena + soff(FArena, start))[0] + used[0]; + U8 arena_end = u8_r(arena + FArena_start)[0] + used[0]; if (alloc_end != arena_end) { // Not at the end, can't grow in place slice_clear(result); @@ -772,7 +914,7 @@ S_ inline void farena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 r // Calculate growth U8 grow_amount = requested_size - old_len; U8 aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT); - U8 unused = u8_r(arena + soff(FArena, capacity))[0] - used[0]; + U8 unused = u8_r(arena + FArena_capacity)[0] - used[0]; if (aligned_grow > unused) { // Not enough space slice_clear(result); @@ -785,10 +927,10 @@ S_ inline void farena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 r S_ inline void farena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) { assert(result != null); assert(arena != null); - U8_R used = u8_r(arena + soff(FArena, used)); + U8_R used = u8_r(arena + FArena_used); /*Check if the allocation is at the end of the arena*/ { U8 alloc_end = old_ptr + old_len; - U8 arena_end = u8_r(arena + soff(FArena, start))[0] + used[0]; + U8 arena_end = u8_r(arena + FArena_start)[0] + used[0]; if (alloc_end != arena_end) { // Not at the end, can't shrink but return adjusted size slice_assign_comp(result, old_ptr, requested_size); @@ -800,28 +942,28 @@ S_ inline void farena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 used[0] -= (aligned_original - aligned_new); slice_assign_comp(result, old_ptr, requested_size); } -I_ void farena_reset__u(U8 arena) { u8_r(arena + soff(FArena, used))[0] = 0; } +I_ void farena_reset__u(U8 arena) { u8_r(arena + FArena_used)[0] = 0; } I_ void farena_rewind__u(U8 arena, U8 sp_slot) { - U8 start = u8_r(arena + soff(FArena, start))[0]; - U8_R used = u8_r(arena + soff(FArena, used)); + U8 start = u8_r(arena + FArena_start)[0]; + U8_R used = u8_r(arena + FArena_used); U8 end = start + used[0]; assert_bounds(sp_slot, start, end); used[0] -= sp_slot - start; } I_ void farena_save__u(U8 arena, U8 sp) { - u8_r(sp + soff(AllocatorSP, type_sig))[0] = (U8)& farena_allocator_proc; - u8_r(sp + soff(AllocatorSP, slot ))[0] = u8_r(arena + soff(FArena, used))[0]; + u8_r(sp + AllocatorSP_type_sig)[0] = (U8)& farena_allocator_proc; + u8_r(sp + AllocatorSP_slot )[0] = u8_r(arena + FArena_used)[0]; } S_ void farena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out) { assert(out != null); assert(arena != null); - U8 allocation = arena + soff(AllocatorProc_Out, allocation); + U8 allocation = arena + AllocatorProc_Out_allocation; switch (op) { case AllocatorOp_Alloc: case AllocatorOp_Alloc_NoZero: farena__push__u(arena, requested_size, 1, alignment, allocation); - mem_zero(u8_r(allocation + soff(Slice_Mem, ptr))[0], u8_r(allocation + soff(Slice_Mem, len))[0] * op); + mem_zero(u8_r(allocation + Slice_ptr)[0], u8_r(allocation + Slice_len)[0] * op); break; case AllocatorOp_Free: break; case AllocatorOp_Reset: farena_reset__u(arena); break; @@ -838,17 +980,17 @@ S_ void farena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ case AllocatorOp_SavePoint: farena_save__u(arena, allocation); break; case AllocatorOp_Query: - u4_r(out + soff(AllocatorQueryInfo, features))[0] = + u4_r(out + AllocatorQueryInfo_features)[0] = AllocatorQuery_Alloc | AllocatorQuery_Reset | AllocatorQuery_Resize | AllocatorQuery_Rewind ; - U8 max_alloc = u8_r(arena + soff(FArena, capacity))[0] - u8_r(arena + soff(FArena, used))[0]; - u8_r(out + soff(AllocatorQueryInfo, max_alloc))[0] = max_alloc; - u8_r(out + soff(AllocatorQueryInfo, min_alloc))[0] = 0; - u8_r(out + soff(AllocatorQueryInfo, left ))[0] = max_alloc; - farena_save__u(arena, out + soff(AllocatorQueryInfo, save_point)); + U8 max_alloc = u8_r(arena + FArena_capacity)[0] - u8_r(arena + FArena_used)[0]; + u8_r(out + AllocatorQueryInfo_max_alloc)[0] = max_alloc; + u8_r(out + AllocatorQueryInfo_min_alloc)[0] = 0; + u8_r(out + AllocatorQueryInfo_left )[0] = max_alloc; + farena_save__u(arena, out + AllocatorQueryInfo_save_point); break; } return; @@ -929,7 +1071,7 @@ I_ void os__enable_large_pages(void) { S_ inline void os_init(void) { // os__enable_large_pages(); - u8_r(os_system_info() + soff(OS_SystemInfo, target_page_size))[0] = ms_get_larg_page_minimum(); + u8_r(os_system_info() + OS_SystemInfo_target_page_size)[0] = ms_get_larg_page_minimum(); } I_ U8 os_vmem_reserve__u(U8 size, B4 no_large_pages, U8 base_addr) { return cast(U8, ms_virtual_alloc(cast(MS_LPVOID, base_addr), size, MS_MEM_RESERVE, @@ -959,7 +1101,7 @@ I_ U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGN S_ inline U8 varena__make__u(U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr) { if (reserve_size == 0) { reserve_size = mega(64); } if (commit_size == 0) { commit_size = mega(64); } - U8 page = u8_r(os_system_info() + soff(OS_SystemInfo, target_page_size))[0]; + U8 page = u8_r(os_system_info() + OS_SystemInfo_target_page_size)[0]; U8 reserve_sz = align_pow2(reserve_size, page); U8 commit_sz = align_pow2(commit_size, page); B4 no_large = (flags & VArenaFlag_NoLargePages) != 0; @@ -967,12 +1109,12 @@ S_ inline U8 varena__make__u(U8 reserve_size, U8 commit_size, U4 flags, U8 base_ B4 ok = os_vmem_commit__u(base, commit_sz, no_large); assert(ok != 0); U8 header = varena_header_size(); U8 data_start = base + header; - u8_r(base + soff(VArena, reserve_start))[0] = data_start; - u8_r(base + soff(VArena, reserve ))[0] = reserve_sz; - u8_r(base + soff(VArena, commit_size ))[0] = commit_sz; - u8_r(base + soff(VArena, committed ))[0] = commit_sz; - u8_r(base + soff(VArena, commit_used ))[0] = header; - u4_r(base + soff(VArena, flags ))[0] = flags; + u8_r(base + VArena_reserve_start)[0] = data_start; + u8_r(base + VArena_reserve )[0] = reserve_sz; + u8_r(base + VArena_commit_size )[0] = commit_sz; + u8_r(base + VArena_committed )[0] = commit_sz; + u8_r(base + VArena_commit_used )[0] = header; + u4_r(base + VArena_flags )[0] = flags; return base; } S_ inline void varena__push__u(U8 vm, U8 amount, U8 type_width, U8 alignment, U8 result) { @@ -982,28 +1124,28 @@ S_ inline void varena__push__u(U8 vm, U8 amount, U8 type_width, U8 alignment, U8 alignment = alignment == 0 ? alignment : MEMORY_ALIGNMENT_DEFAULT; U8 requested_size = amount * type_width; U8 aligned_size = align_pow2(requested_size, alignment); - U8_R commit_used = u8_r(vm + soff(VArena, commit_used)); + U8_R commit_used = u8_r(vm + VArena_commit_used); U8 to_be_used = commit_used[0] + aligned_size; - U8 reserve_left = u8_r(vm + soff(VArena, reserve ))[0] - commit_used[0]; - U8 committed = u8_r(vm + soff(VArena, committed))[0]; + U8 reserve_left = u8_r(vm + VArena_reserve )[0] - commit_used[0]; + U8 committed = u8_r(vm + VArena_committed)[0]; U8 commit_left = committed - commit_used[0]; assert(to_be_used< reserve_left); if (/*exhausted?*/commit_left < aligned_size) { - U8 commit_size = u8_r(vm + soff(VArena, commit_size))[0]; + U8 commit_size = u8_r(vm + VArena_commit_size)[0]; U8 next_commit_size = reserve_left > aligned_size ? max(commit_size, aligned_size) : reserve_left; if (next_commit_size != 0) { - B4 no_large_pages = (u4_r(vm + soff(VArena, flags))[0] & VArenaFlag_NoLargePages) != 0; + B4 no_large_pages = (u4_r(vm + VArena_flags)[0] & VArenaFlag_NoLargePages) != 0; U8 next_commit_start = vm + committed; if (os_vmem_commit__u(next_commit_start, next_commit_size, no_large_pages) == false) { slice_clear(result); return; } committed += next_commit_size; - u8_r(vm + soff(VArena, committed))[0] = committed; + u8_r(vm + VArena_committed)[0] = committed; } } commit_used[0] += aligned_size; - U8 current_offset = u8_r(vm + soff(VArena, reserve_start))[0] + commit_used[0]; + U8 current_offset = u8_r(vm + VArena_reserve_start)[0] + commit_used[0]; slice_assign_comp(result, current_offset, requested_size); } S_ inline void varena__grow__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) { @@ -1011,12 +1153,12 @@ S_ inline void varena__grow__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requ assert(result != null); U8 grow_amount = requested_size - old_len; if (grow_amount == 0) { slice_assign_comp(result, old_ptr, old_len); return; } - U8 current_offset = u8_r(vm + soff(VArena, reserve_start))[0] + u8_r(vm + soff(VArena, commit_used))[0]; + U8 current_offset = u8_r(vm + VArena_reserve_start)[0] + u8_r(vm + VArena_commit_used)[0]; // Growing when not the last allocation not allowed assert(old_ptr == current_offset); uvar(Slice_Mem, allocation); varena__push__u(vm, grow_amount, 1, alignment, u8_(allocation)); - U8 a_ptr = u8_r(allocation + soff(Slice_Mem, ptr))[0]; - U8 a_len = u8_r(allocation + soff(Slice_Mem, len))[0]; + U8 a_ptr = u8_r(allocation + Slice_ptr)[0]; + U8 a_len = u8_r(allocation + Slice_len)[0]; assert(a_ptr != 0); mem_zero(a_ptr, a_len * should_zero); slice_assign_comp(result, old_ptr, old_len + a_len); @@ -1026,30 +1168,30 @@ S_ inline void varena__shrink__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 re assert(result != null); U8 shrink_amount = old_len - requested_size; if (lt_s(shrink_amount, 0)) { slice_assign_comp(result, old_ptr, old_len); return; } - U8_R commit_used = u8_r(vm + soff(VArena, commit_used)); - U8 current_offset = u8_r(vm + soff(VArena, reserve_start))[0] + commit_used[0]; assert(old_ptr == current_offset); + U8_R commit_used = u8_r(vm + VArena_commit_used); + U8 current_offset = u8_r(vm + VArena_reserve_start)[0] + commit_used[0]; assert(old_ptr == current_offset); commit_used[0] -= shrink_amount; slice_assign_comp(result, old_ptr, requested_size); } I_ void varena_release__u(U8 vm) { assert(vm != null); - os_vmem_release__u(vm, u8_r(vm + soff(VArena, reserve))[0]); + os_vmem_release__u(vm, u8_r(vm + VArena_reserve)[0]); } I_ void varena_reset__u(U8 vm) { assert(vm != null); - u8_r(vm + soff(VArena, commit_used))[0] = 0; + u8_r(vm + VArena_commit_used)[0] = 0; } I_ void varena_rewind__u(U8 vm, U8 sp_slot) { assert(vm != null); U8 header = varena_header_size(); if (sp_slot < header) { sp_slot = header; } - u8_r(vm + soff(VArena, commit_used))[0] = sp_slot; + u8_r(vm + VArena_commit_used)[0] = sp_slot; } I_ void varena_save__u(U8 vm, U8 sp_addr) { assert(vm != null); assert(sp_addr != null); - u8_r(sp_addr + soff(AllocatorSP, type_sig))[0] = (U8) varena_allocator_proc; - u8_r(sp_addr + soff(AllocatorSP, slot ))[0] = u8_r(vm + soff(VArena, commit_used))[0]; + u8_r(sp_addr + AllocatorSP_type_sig)[0] = (U8) varena_allocator_proc; + u8_r(sp_addr + AllocatorSP_slot )[0] = u8_r(vm + VArena_commit_used)[0]; } I_ VArena* varena__make(Opts_varena_make*R_ opts) { @@ -1079,15 +1221,15 @@ S_ void varena_allocator_proc(U8 vm, U8 requested_size, U8 alignment, U8 old_ptr { assert(vm != null); assert(out_addr != null); - U8 out_allocation = out_addr ? out_addr + soff(AllocatorProc_Out, allocation) : 0; + U8 out_allocation = out_addr ? out_addr + AllocatorProc_Out_allocation : 0; switch (op) { case AllocatorOp_Alloc: case AllocatorOp_Alloc_NoZero: varena__push__u(vm, requested_size, 1, alignment, out_allocation); if (op == AllocatorOp_Alloc) { - U8 ptr = u8_r(out_allocation + soff(Slice_Mem, ptr))[0]; - U8 len = u8_r(out_allocation + soff(Slice_Mem, len))[0]; + U8 ptr = u8_r(out_allocation + Slice_ptr)[0]; + U8 len = u8_r(out_allocation + Slice_len)[0]; if (ptr && len) { mem_zero(ptr, len); } } break; @@ -1103,23 +1245,23 @@ S_ void varena_allocator_proc(U8 vm, U8 requested_size, U8 alignment, U8 old_ptr varena__shrink__u(out_allocation, vm, old_ptr, old_len, requested_size, alignment); break; - case AllocatorOp_Rewind: varena_rewind__u(vm, old_len); break; - case AllocatorOp_SavePoint: varena_save__u (vm, out_addr + soff(AllocatorProc_Out, save_point)); break; + case AllocatorOp_Rewind: varena_rewind__u(vm, old_len); break; + case AllocatorOp_SavePoint: varena_save__u (vm, out_addr + AllocatorProc_Out_save_point); break; case AllocatorOp_Query: - u4_r(out_addr + soff(AllocatorQueryInfo, features))[0] = + u4_r(out_addr + AllocatorQueryInfo_features)[0] = AllocatorQuery_Alloc | AllocatorQuery_Reset | AllocatorQuery_Resize | AllocatorQuery_Rewind; - U8 reserve = u8_r(vm + soff(VArena, reserve ))[0]; - U8 committed = u8_r(vm + soff(VArena, committed))[0]; + U8 reserve = u8_r(vm + VArena_reserve )[0]; + U8 committed = u8_r(vm + VArena_committed)[0]; U8 max_alloc = (reserve > committed) ? (reserve - committed) : 0; - u8_r(out_addr + soff(AllocatorQueryInfo, max_alloc))[0] = max_alloc; - u8_r(out_addr + soff(AllocatorQueryInfo, min_alloc))[0] = kilo(4); - u8_r(out_addr + soff(AllocatorQueryInfo, left ))[0] = max_alloc; - AllocatorSP sp = { .type_sig = varena_allocator_proc, .slot = u8_r(vm + soff(VArena, commit_used))[0] }; - struct_assign(AllocatorSP, out_addr + soff(AllocatorQueryInfo, save_point), (U8)& sp); + u8_r(out_addr + AllocatorQueryInfo_max_alloc)[0] = max_alloc; + u8_r(out_addr + AllocatorQueryInfo_min_alloc)[0] = kilo(4); + u8_r(out_addr + AllocatorQueryInfo_left )[0] = max_alloc; + AllocatorSP sp = { .type_sig = varena_allocator_proc, .slot = u8_r(vm + VArena_commit_used)[0] }; + struct_assign(AllocatorSP, out_addr + AllocatorQueryInfo_save_point, (U8)& sp); break; } } @@ -1132,89 +1274,89 @@ S_ inline U8 arena_make__u(U8 reserve_size, U8 commit_size, U4 flags, U8 base_ad U8 header_size = arena_header_size(); U8 current = varena__make__u(reserve_size, commit_size, flags, base_addr); assert(current != null); U8 arena; varena__push__u(current, header_size, 1, MEMORY_ALIGNMENT_DEFAULT, (U8)& arena); - u8_r(arena + soff(Arena, backing ))[0] = current; - u8_r(arena + soff(Arena, prev ))[0] = null; - u8_r(arena + soff(Arena, current ))[0] = arena; - u8_r(arena + soff(Arena, base_pos))[0] = 0; - u8_r(arena + soff(Arena, pos ))[0] = header_size; - u8_r(arena + soff(Arena, flags ))[0] = flags; + u8_r(arena + Arena_backing )[0] = current; + u8_r(arena + Arena_prev )[0] = null; + u8_r(arena + Arena_current )[0] = arena; + u8_r(arena + Arena_base_pos)[0] = 0; + u8_r(arena + Arena_pos )[0] = header_size; + u8_r(arena + Arena_flags )[0] = flags; return arena; } S_ inline void arena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 out_mem) { assert(arena != null); - U8 active = u8_r(arena + soff(Arena, current ))[0]; + U8 active = u8_r(arena + Arena_current)[0]; U8 size_requested = amount * type_width; alignment = alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT; U8 size_aligned = align_pow2(size_requested, alignment); - U8 pos_pre = u8_r(active + soff(Arena, pos))[0]; + U8 pos_pre = u8_r(active + Arena_pos)[0]; U8 pos_pst = pos_pre + size_aligned; - U8 backing = active + soff(Arena, backing); - U8 reserve = u8_r(backing + soff(VArena, reserve))[0]; + U8 backing = active + Arena_backing; + U8 reserve = u8_r(backing + VArena_reserve)[0]; B4 should_chain = - ((u8_r(arena + soff(Arena, flags))[0] & ArenaFlag_NoChain) == 0) + ((u8_r(arena + Arena_flags)[0] & ArenaFlag_NoChain) == 0) && reserve < pos_pst; if (should_chain) { - U8 current = arena + soff(Arena, current); + U8 current = arena + Arena_current; U8 new_arena = arena_make__u( reserve, - u8_r(backing + soff(VArena, commit_size))[0], - u4_r(backing + soff(VArena, flags ))[0], + u8_r(backing + VArena_commit_size)[0], + u4_r(backing + VArena_flags )[0], 0 ); - u8_r(new_arena + soff(Arena, base_pos))[0] = u8_r(active + soff(Arena, base_pos))[0] + reserve; - u8_r(new_arena + soff(Arena, prev ))[0] = u8_r(current)[0]; - u8_r(current)[0] = new_arena; - active = u8_r(current)[0]; + u8_r(new_arena + Arena_base_pos)[0] = u8_r(active + Arena_base_pos)[0] + reserve; + u8_r(new_arena + Arena_prev )[0] = u8_r(current)[0]; + u8_r(current)[0] = new_arena; + active = u8_r(current)[0]; } U8 result = active + pos_pre; varena__push__u(u8_r(backing)[0], size_aligned, 1, alignment, out_mem); - assert(u8_r(out_mem + soff(Slice_Mem, ptr))[0] == result); - assert(u8_r(out_mem + soff(Slice_Mem, len))[0] > 0); - u8_r(active + soff(Arena, pos))[0] = pos_pst; + assert(u8_r(out_mem + Slice_ptr)[0] == result); + assert(u8_r(out_mem + Slice_len)[0] > 0); + u8_r(active + Arena_pos)[0] = pos_pst; } S_ inline void arena__grow__u(U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero, U8 out_mem) { - U8 active = arena + soff(Arena, current); - U8_R active_pos = u8_r(active + soff(Arena, pos)); + U8 active = arena + Arena_current; + U8_R active_pos = u8_r(active + Arena_pos); U8 alloc_end = old_ptr + old_len; U8 arena_end = active + active_pos[0]; if (alloc_end == arena_end) { U8 grow_amount = requested_size - old_len; U8 aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT); - if (active_pos[0] + aligned_grow <= u8_r(active + soff(Arena, backing) + soff(VArena, reserve))[0]) { - uvar(Slice_Mem, vresult); varena__push__u(u8_r(active + soff(Arena, backing))[0], aligned_grow, 1, alignment, (U8)vresult); - if (u8_r(vresult + soff(Slice_Mem, ptr))[0] != null) { + if (active_pos[0] + aligned_grow <= u8_r(active + Arena_backing + VArena_reserve)[0]) { + uvar(Slice_Mem, vresult); varena__push__u(u8_r(active + Arena_backing)[0], aligned_grow, 1, alignment, (U8)vresult); + if (u8_r(vresult + Slice_ptr)[0] != null) { active_pos[0] += aligned_grow; mem_zero(old_ptr + old_len, aligned_grow * should_zero); - slice_assign_comp(out_mem, u8_(vresult) + soff(Slice_Mem, ptr), u8_(vresult) + soff(Slice_Mem, len)); + slice_assign_comp(out_mem, u8_(vresult) + Slice_ptr, u8_(vresult) + Slice_len); return; } } } arena__push__u(arena, requested_size, 1, alignment, out_mem); - if (u8_r(out_mem + soff(Slice_Mem, ptr))[0] == null) { slice_assign_comp(out_mem, 0, 0); return; } - mem_copy(u8_r(out_mem + soff(Slice_Mem, ptr))[0], old_ptr, old_len); - mem_zero(u8_r(out_mem + soff(Slice_Mem, ptr))[0], (u8_r(out_mem + soff(Slice_Mem, len))[0] - old_len) * should_zero); + if (u8_r(out_mem + Slice_ptr)[0] == null) { slice_assign_comp(out_mem, 0, 0); return; } + mem_copy(u8_r(out_mem + Slice_ptr)[0], old_ptr, old_len); + mem_zero(u8_r(out_mem + Slice_ptr)[0], (u8_r(out_mem + Slice_len)[0] - old_len) * should_zero); } S_ inline void arena__shrink__u(U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, U8 out_mem) { - U8 active = arena + soff(Arena, current); - U8_R active_pos = u8_r(active + soff(Arena, pos)); + U8 active = arena + Arena_current; + U8_R active_pos = u8_r(active + Arena_pos); U8 alloc_end = old_ptr + old_len; U8 arena_end = active + active_pos[0]; if (alloc_end != arena_end) { slice_assign_comp(out_mem, old_ptr, old_len); return; } U8 aligned_original = align_pow2(old_len, MEMORY_ALIGNMENT_DEFAULT); U8 aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT); U8 pos_reduction = aligned_original - aligned_new; - u8_r(active + soff(Arena, pos))[0] -= pos_reduction; - varena__shrink__u(out_mem, active + soff(Arena, backing), old_ptr, old_len, requested_size, alignment); + u8_r(active + Arena_pos)[0] -= pos_reduction; + varena__shrink__u(out_mem, active + Arena_backing, old_ptr, old_len, requested_size, alignment); } I_ void arena_release__u(U8 arena) { assert(arena != null); - U8 curr = arena + soff(Arena, current); + U8 curr = arena + Arena_current; U8 prev = null; for (; u8_r(curr)[0] != null; curr = prev) { - u8_r(prev)[0] = u8_r(curr + soff(Arena, prev))[0]; + u8_r(prev)[0] = u8_r(curr + Arena_prev)[0]; varena_release__u(u8_r(curr)[0]); } } @@ -1222,33 +1364,33 @@ I_ void arena_reset__u(U8 arena) { arena_rewind__u(arena, 0); } void arena_rewind__u(U8 arena, U8 slot) { assert(arena != null); U8 header_size = arena_header_size(); - U8 curr = arena + soff(Arena, current); + U8 curr = arena + Arena_current; U8 big_pos = clamp_bot(header_size, slot); - for (U8 prev = null; u8_r(curr + soff(Arena, base_pos))[0] >= big_pos; u8_r(curr)[0] = prev) { - prev = u8_r(curr + soff(Arena, prev))[0]; - varena_release__u(u8_r(curr + soff(Arena, backing))[0]); + for (U8 prev = null; u8_r(curr + Arena_base_pos)[0] >= big_pos; u8_r(curr)[0] = prev) { + prev = u8_r(curr + Arena_prev)[0]; + varena_release__u(u8_r(curr + Arena_backing)[0]); } - u8_r(arena + soff(Arena, current))[0] = u8_r(curr)[0]; - U8 new_pos = big_pos - u8_r(curr + soff(Arena, base_pos))[0]; assert(new_pos <= u8_r(curr + soff(Arena, pos))[0]); - u8_r(curr + soff(Arena, pos))[0] = new_pos; + u8_r(arena + Arena_current)[0] = u8_r(curr)[0]; + U8 new_pos = big_pos - u8_r(curr + Arena_base_pos)[0]; assert(new_pos <= u8_r(curr + Arena_pos)[0]); + u8_r(curr + Arena_pos)[0] = new_pos; } I_ void arena_save__u(U8 arena, U8 out_sp) { - u8_r(out_sp + soff(AllocatorSP, type_sig))[0] = (U8)& arena_allocator_proc; - u8_r(out_sp + soff(AllocatorSP, slot ))[0] = - u8_r(arena + soff(Arena, base_pos) )[0] - + u8_r(arena + soff(Arena, current) + soff(Arena, pos))[0]; + u8_r(out_sp + AllocatorSP_type_sig)[0] = (U8)& arena_allocator_proc; + u8_r(out_sp + AllocatorSP_slot )[0] = + u8_r(arena + Arena_base_pos )[0] + + u8_r(arena + Arena_current + Arena_pos)[0]; } S_ inline void arena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, U8 out_addr) { assert(out_addr != null); assert(arena != null); - U8 out_allocation = out_addr + soff(AllocatorProc_Out, allocation); + U8 out_allocation = out_addr + AllocatorProc_Out_allocation; switch (op) { case AllocatorOp_Alloc: case AllocatorOp_Alloc_NoZero: arena__push__u(arena, requested_size, 1, alignment, out_allocation); - mem_zero(out_allocation, u8_r(out_allocation + soff(Slice_Mem, len))[0] * op); + mem_zero(out_allocation, u8_r(out_allocation + Slice_len)[0] * op); break; case AllocatorOp_Free: break; @@ -1262,20 +1404,20 @@ S_ inline void arena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U arena__shrink__u(arena, old_ptr, old_len, requested_size, alignment, out_allocation); break; - case AllocatorOp_Rewind: arena_rewind__u(arena, old_len); break; - case AllocatorOp_SavePoint: arena_save__u(arena, out_addr + soff(AllocatorProc_Out, save_point)); break; + case AllocatorOp_Rewind: arena_rewind__u(arena, old_len); break; + case AllocatorOp_SavePoint: arena_save__u(arena, out_addr + AllocatorProc_Out_save_point); break; case AllocatorOp_Query: - u4_r(out_addr + soff(AllocatorQueryInfo, features))[0] = + u4_r(out_addr + AllocatorQueryInfo_features)[0] = AllocatorQuery_Alloc | AllocatorQuery_Resize | AllocatorQuery_Reset | AllocatorQuery_Rewind ; - u8_r(out_addr + soff(AllocatorQueryInfo, max_alloc ))[0] = u8_r(arena + soff(Arena, backing) + soff(VArena, reserve))[0]; - u8_r(out_addr + soff(AllocatorQueryInfo, min_alloc ))[0] = kilo(4); - u8_r(out_addr + soff(AllocatorQueryInfo, left ))[0] = u8_r(out_addr + soff(AllocatorQueryInfo, max_alloc))[0] - u8_r(arena + soff(Arena, backing) + soff(VArena, commit_used))[0]; - arena_save__u(arena, out_addr + soff(AllocatorQueryInfo, save_point)); + u8_r(out_addr + AllocatorQueryInfo_max_alloc )[0] = u8_r(arena + Arena_backing + VArena_reserve)[0]; + u8_r(out_addr + AllocatorQueryInfo_min_alloc )[0] = kilo(4); + u8_r(out_addr + AllocatorQueryInfo_left )[0] = u8_r(out_addr + AllocatorQueryInfo_max_alloc)[0] - u8_r(arena + Arena_backing + VArena_commit_used)[0]; + arena_save__u(arena, out_addr + AllocatorQueryInfo_save_point); break; } } @@ -1289,19 +1431,88 @@ I_ Arena* arena__make(Opts_arena_make*R_ opts) { #pragma region Key Table Linear (KTL) I_ void ktl_populate_slice_a2_str8(U8 kt, U8 backing_ptr, U8 backing_len, U8 values) { assert(kt != null); - U8 values_len = u8_r(values + soff(Slice_A2_Str8, len))[0]; + U8 values_len = u8_r(values + Slice_len)[0]; if (values_len == 0) return; mem__alloc__u(kt, backing_ptr, backing_len, size_of(KTL_Slot_Str8) * values_len, 0, false); for (U8 id = 0; id < values_len; ++id) { - U8 kt_slot = kt + soff(KTL_Str8, ptr) * id; - U8 value = u8_r(values + soff(Slice_A2_Str8, ptr) + size_of(A2_Str8) * id)[0]; - mem_copy (kt_slot + soff(KTL_Slot_Str8, value), value + size_of(Str8) * 1, size_of(Str8)); - hash64__fnv1a__u(kt_slot + soff(KTL_Slot_Str8, key), value); + U8 kt_slot = kt + Slice_ptr * id; + U8 value = u8_r(values + Slice_ptr + size_of(A2_Str8) * id)[0]; + mem_copy (kt_slot + KTL_Slot_value, value + size_of(Str8) * 1, size_of(Str8)); + hash64__fnv1a__u(kt_slot + KTL_Slot_key, value); } } #pragma endregion KTL #pragma region Key Table 1-Layer Chained-Chunked_Cells (KT1CX) +S_ inline void kt1cx_init__u(U8 backing_tbl, U8 backing_cells, U8 m, U8 result) { + assert(result != null); + assert(u8_r(backing_cells + AllocatorInfo_proc)[0] != null); + assert(u8_r(backing_tbl + AllocatorInfo_proc)[0] != null); + U8 table_size = u8_r(m + KT1CX_InfoMeta_table_size)[0]; + assert(u8_r(m + KT1CX_InfoMeta_cell_depth )[0] > 0); + assert(u8_r(m + KT1CX_InfoMeta_cell_pool_size)[0] >= kilo(4)); + assert(table_size >= kilo(4)); + assert(u8_r(m + KT1CX_InfoMeta_type_width )[0] >= 0); + U8 alloc_size = table_size + u8_r(m + KT1CX_InfoMeta_cell_size)[0]; + mem__alloc__u(result, backing_tbl, backing_tbl + AllocatorInfo_data, alloc_size, 0, 0); + assert(u8_r(result + Slice_ptr)[0] != null); + assert(u8_r(result + Slice_len)[0] > 0); + u8_r(result + Slice_len)[0] = table_size; +} +S_ inline void kt1cx_clear__u(U8 kt, U8 m) { + U8 cell_cursor = u8_r(kt + Slice_ptr)[0]; + U8 cell_size = u8_r(m + KT1CX_ByteMeta_cell_size)[0]; + U8 cell_depth = u8_r(m + KT1CX_ByteMeta_cell_depth)[0]; + U8 table_len = u8_r(kt + Slice_len)[0] * cell_size; + U8 table_end = cell_cursor + table_len; + U8 slot_size = u8_r(m + KT1CX_ByteMeta_slot_size)[0]; + for (; cell_cursor != table_end; cell_cursor += cell_size) + { + U8 slots_end = cell_cursor + (cell_depth * slot_size); + U8 slot_cursor = cell_cursor; + for (; slot_cursor < slots_end; slot_cursor += slot_size) { + process_slots: + mem_zero(slot_cursor, u8_r(slot_cursor + Slice_len)[0]); + } + U8 next = slot_cursor + u8_r(m + KT1CX_ByteMeta_cell_next_offset)[0]; + if (next != null) { + slot_cursor = next; + slots_end = slot_cursor + (cell_depth * slot_size); + goto process_slots; + } + } +} +I_ U8 kt1cx_slot_id__u(U8 kt, U8 key) { + return key % u8_r(kt + Slice_len)[0]; +} +S_ inline U8 kt1cx_get__u(U8 kt, U8 key, U8 m) { + U8 hash_index = kt1cx_slot_id__u(kt, key); + U8 cell_offset = hash_index * u8_r(m + KT1CX_ByteMeta_cell_size)[0]; + U8 cell_cursor = u8_r(kt + cell_offset); + { + U8 slot_size = u8_r(m + KT1CX_ByteMeta_slot_size)[0]; + U8 slot_cursor = cell_cursor; + U8 slots_end = cell_cursor + u8_r(m + KT1CX_ByteMeta_cell_depth)[0] * slot_size; + for (; slot_cursor != slots_end; slot_cursor += slot_size) { + process_slots: + if (u8_r(slot_cursor + KT1CX_Byte_Slot_occupied)[0] && u8_r(slot_cursor + KT1CX_Byte_Slot_key)[0] == key) { + return slot_cursor; + } + } + U8 cell_next = u8_r(cell_cursor + u8_r(m + KT1CX_ByteMeta_cell_next_offset)[0]); + if (cell_next != null) { + slot_cursor = cell_next; + cell_cursor = cell_next; + goto process_slots; + } + else { + return null; + } + } +} +S_ inline U8 kt1cx_set__u(U8 kt, U8 key, U8 v_ptr, U8 v_len, U8 backing_cells, U8 m) { + return 0; +} #pragma endregion Key Table #pragma region String Operations diff --git a/C/watl.v0.llvm.lottes_hybrid.c b/C/watl.v0.llvm.lottes_hybrid.c index 60e2b22..f806181 100644 --- a/C/watl.v0.llvm.lottes_hybrid.c +++ b/C/watl.v0.llvm.lottes_hybrid.c @@ -2179,6 +2179,7 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines) #pragma endregion WATL #pragma endregion Implementation + int main(void) { os_init(); diff --git a/Odin/watl.v0.ideomatic.odin b/Odin/watl.v0.ideomatic.odin new file mode 100644 index 0000000..fb67d4c --- /dev/null +++ b/Odin/watl.v0.ideomatic.odin @@ -0,0 +1,61 @@ +package watl + +import "core:os/os2" +import "core:mem/virtual" +import "core:mem" + + +main :: proc() +{ + os_init() + + // Note(Ed): Possible compiler bug, cannot resolve proc map with named arguments. + + vm_file: virtual.Arena; virtual.arena_init_static(& vm_file, reserved = mem.Gigabytes * 4) + data, err := os2.read_entire_file_from_path("watl.v0.ideomatic.odin", virtual.arena_allocator(& vm_file), ) + assert(err != .None) + + + + a_msgs := arena_make() + a_toks := arena_make() + // lex_res := watl_lex(transmute(string) file.content, + // ainfo_msgs = ainfo(a_msgs), + // ainfo_toks = ainfo(a_toks), + // ) + lex_res := watl_lex(transmute(string) file.content, + ainfo(a_msgs), + ainfo(a_toks), + ) + assert(lex_res.signal & { .MemFail_SliceConstraintFail } == {}) + + str8_cache_kt1_ainfo := arena_make() + str_cache := str8cache_make( + str_reserve = ainfo(arena_make()), + cell_reserve = ainfo(str8_cache_kt1_ainfo), + tbl_backing = ainfo(str8_cache_kt1_ainfo), + cell_pool_size = Kilo * 4, + table_size = Kilo * 32, + ) + + a_lines := arena_make() + // parse_res := watl_parse(lex_res.toks, + // ainfo_msgs = ainfo(a_msgs), + // ainfo_nodes = ainfo(a_toks), + // ainfo_lines = ainfo(a_lines), + // str_cache = & str_cache + // ) + parse_res := watl_parse(lex_res.toks, + ainfo(a_msgs), + ainfo(a_toks), + ainfo(a_lines), + & str_cache + ) + assert(parse_res.signal & { .MemFail_SliceConstraintFail } == {}) + + arena_reset(a_msgs) + arena_reset(a_toks) + listing := watl_dump_listing(ainfo(a_msgs), parse_res.lines) + file_write_str8("watl.v0.win32.odin.listing.txt", listing) + return +} \ No newline at end of file diff --git a/Odin/watl.v0.win32.ideomatic.odin b/Odin/watl.v0.win32.ideomatic.odin deleted file mode 100644 index e69de29..0000000 diff --git a/Odin/watl.v0.win32.odin b/Odin/watl.v0.win32.odin index 9f791f6..36f6f06 100644 --- a/Odin/watl.v0.win32.odin +++ b/Odin/watl.v0.win32.odin @@ -100,23 +100,14 @@ align_pow2 :: #force_inline proc(x: int, b: int) -> int { assert((b & (b - 1)) == 0) // Check power of 2 return ((x + b - 1) & ~(b - 1)) } -memory_zero :: #force_inline proc "contextless" (data: rawptr, len: int) -> rawptr { - intrinsics.mem_zero(data, len) - return data -} +memory_zero :: #force_inline proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.mem_zero(data, len); return data } memory_zero_explicit :: #force_inline proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering return data } -memory_copy_overlapping :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr { - intrinsics.mem_copy(dst, src, len) - return dst -} -memory_copy :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr { - intrinsics.mem_copy_non_overlapping(dst, src, len) - return dst -} +memory_copy_overlapping :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy(dst, src, len); return dst } +memory_copy :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy_non_overlapping(dst, src, len); return dst } sll_stack_push_n :: proc "contextless" (curr, n, n_link: ^^$Type) { (n_link ^) = (curr ^) @@ -136,21 +127,13 @@ sll_queue_push_nz :: proc "contextless" (first: ^$ParentType, last, n: ^^$Type, } sll_queue_push_n :: #force_inline proc "contextless" (first: $ParentType, last, n: ^^$Type) { sll_queue_push_nz(first, last, n, nil) } -SliceByte :: struct { - data: [^]byte, - len: int -} -SliceRaw :: struct ($Type: typeid) { - data: [^]Type, - len: int, -} +SliceByte :: struct { data: [^]byte, len: int } +SliceRaw :: struct($Type: typeid) { data: [^]Type, len: int, } slice :: #force_inline proc "contextless" (s: [^] $Type, num: $Some_Integer) -> [ ]Type { return transmute([]Type) SliceRaw(Type) { s, cast(int) num } } slice_cursor :: #force_inline proc "contextless" (s: []$Type) -> [^]Type { return transmute([^]Type) raw_data(s) } -slice_assert :: #force_inline proc (s: $SliceType / []$Type) { - assert(len(s) > 0) - assert(s != nil) -} -slice_end :: #force_inline proc "contextless" (s : $SliceType / []$Type) -> ^Type { return & cursor(s)[len(s)] } +slice_end :: #force_inline proc "contextless" (s : $SliceType / []$Type) -> ^Type { return & cursor(s)[len(s)] } + +slice_assert :: #force_inline proc (s: $SliceType / []$Type) { assert(len(s) > 0); assert(s != nil) } @(require_results) slice_to_bytes :: proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] } @(require_results) slice_raw :: proc "contextless" (s: []$Type) -> SliceRaw(Type) { return transmute(SliceRaw(Type)) s } @@ -270,8 +253,7 @@ mem_alloc :: proc(ainfo: AllocatorInfo, size: int, alignment: int = MEMORY_ALIGN requested_size = size, alignment = alignment, } - output: AllocatorProc_Out - ainfo.procedure(input, & output) + output: AllocatorProc_Out; ainfo.procedure(input, & output) return output.allocation } mem_grow :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, give_actual: b32 = false) -> []byte { @@ -283,8 +265,7 @@ mem_grow :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int = alignment = alignment, old_allocation = mem, } - output: AllocatorProc_Out - ainfo.procedure(input, & output) + output: AllocatorProc_Out; ainfo.procedure(input, & output) return slice(cursor(output.allocation), give_actual ? len(output.allocation) : size) } mem_resize :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, give_actual: b32 = false) -> []byte { @@ -296,8 +277,7 @@ mem_resize :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int alignment = alignment, old_allocation = mem, } - output: AllocatorProc_Out - ainfo.procedure(input, & output) + output: AllocatorProc_Out; ainfo.procedure(input, & output) return slice(cursor(output.allocation), give_actual ? len(output.allocation) : size) } mem_shrink :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false) -> []byte { @@ -309,8 +289,7 @@ mem_shrink :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int alignment = alignment, old_allocation = mem, } - output: AllocatorProc_Out - ainfo.procedure(input, & output) + output: AllocatorProc_Out; ainfo.procedure(input, & output) return output.allocation } @@ -322,8 +301,7 @@ alloc_type :: proc(ainfo: AllocatorInfo, $Type: typeid, alignment: int = MEMORY requested_size = size_of(Type), alignment = alignment, } - output: AllocatorProc_Out - ainfo.procedure(input, & output) + output: AllocatorProc_Out; ainfo.procedure(input, & output) return transmute(^Type) raw_data(output.allocation) } alloc_slice :: proc(ainfo: AllocatorInfo, $SliceType: typeid / []$Type, num : int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false) -> []Type { @@ -334,17 +312,13 @@ alloc_slice :: proc(ainfo: AllocatorInfo, $SliceType: typeid / []$Type, num : in requested_size = size_of(Type) * num, alignment = alignment, } - output: AllocatorProc_Out - ainfo.procedure(input, & output) + output: AllocatorProc_Out; ainfo.procedure(input, & output) return transmute([]Type) slice(raw_data(output.allocation), num) } //endregion Allocator Interface //region Strings -Raw_String :: struct { - data: [^]byte, - len: int, -} +Raw_String :: struct { data: [^]byte, len: int, } string_cursor :: proc(s: string) -> [^]u8 { return slice_cursor(transmute([]byte) s) } string_copy :: proc(dst, src: string) { slice_copy (transmute([]byte) dst, transmute([]byte) src) } string_end :: proc(s: string) -> ^u8 { return slice_end (transmute([]byte) s) } @@ -356,10 +330,7 @@ FArena :: struct { mem: []byte, used: int, } -farena_make :: proc(backing: []byte) -> FArena { - arena := FArena {mem = backing} - return arena -} +farena_make :: proc(backing: []byte) -> FArena { return {mem = backing} } farena_init :: proc(arena: ^FArena, backing: []byte) { assert(arena != nil) arena.mem = backing @@ -367,20 +338,15 @@ farena_init :: proc(arena: ^FArena, backing: []byte) { } farena_push :: proc(arena: ^FArena, $Type: typeid, amount: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT) -> []Type { assert(arena != nil) - if amount == 0 { - return {} - } + if amount == 0 { return {} } desired := size_of(Type) * amount to_commit := align_pow2(desired, alignment) - unused := len(arena.mem) - arena.used - assert(to_commit <= unused) + unused := len(arena.mem) - arena.used; assert(to_commit <= unused) ptr := cursor(arena.mem[arena.used:]) arena.used += to_commit return slice(ptr, amount) } -farena_reset :: proc(arena: ^FArena) { - arena.used = 0 -} +farena_reset :: #force_inline proc(arena: ^FArena) { arena.used = 0 } farena_rewind :: proc(arena: ^FArena, save_point: AllocatorSP) { assert(save_point.type_sig == farena_allocator_proc) assert(save_point.slot >= 0 && save_point.slot <= arena.used) @@ -391,7 +357,6 @@ farena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Ou assert(output != nil) assert(input.data != nil) arena := transmute(^FArena) input.data - switch input.op { case .Alloc, .Alloc_NoZero: @@ -399,12 +364,9 @@ farena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Ou if input.op == .Alloc { zero(output.allocation) } - - case .Free: - // No-op for arena - - case .Reset: - farena_reset(arena) + + case .Free: // No-op for arena + case .Reset: farena_reset(arena) case .Grow, .Grow_NoZero: // Check if the allocation is at the end of the arena @@ -453,11 +415,8 @@ farena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Ou arena.used -= (aligned_original - aligned_new) output.allocation = input.old_allocation[:input.requested_size] - case .Rewind: - farena_rewind(arena, input.save_point) - - case .SavePoint: - output.save_point = farena_save(arena^) + case .Rewind: farena_rewind(arena, input.save_point) + case .SavePoint: output.save_point = farena_save(arena^) case .Query: output.features = {.Alloc, .Reset, .Grow, .Shrink, .Rewind} @@ -471,14 +430,9 @@ farena_ainfo :: #force_inline proc "contextless" (arena : ^FArena) -> AllocatorI //endregion FArena //region OS -OS_SystemInfo :: struct { - target_page_size: int, -} -OS_Windows_State :: struct { - system_info: OS_SystemInfo, -} -@(private) -os_windows_info: OS_Windows_State +OS_SystemInfo :: struct { target_page_size: int } +OS_Windows_State :: struct { system_info: OS_SystemInfo } +@(private) os_windows_info: OS_Windows_State // Windows API constants MS_INVALID_HANDLE_VALUE :: ~uintptr(0) @@ -537,12 +491,7 @@ os_enable_large_pages :: proc() { { priv := MS_TOKEN_PRIVILEGES { privilege_count = 1, - privileges = { - { - luid = luid, - attributes = MS_SE_PRIVILEGE_ENABLED, - }, - }, + privileges = { { luid = luid, attributes = MS_SE_PRIVILEGE_ENABLED, }, }, } AdjustTokenPrivileges(token, 0, &priv, size_of(MS_TOKEN_PRIVILEGES), nil, nil) } @@ -554,25 +503,19 @@ os_init :: proc() { info := &os_windows_info.system_info info.target_page_size = int(GetLargePageMinimum()) } -os_system_info :: proc() -> ^OS_SystemInfo { - return &os_windows_info.system_info -} -os_vmem_commit :: proc(vm: rawptr, size: int, no_large_pages: b32 = false) -> b32 { +os_system_info :: #force_inline proc "contextless" () -> ^OS_SystemInfo { return & os_windows_info.system_info } +os_vmem_commit :: #force_inline proc "contextless" (vm: rawptr, size: int, no_large_pages: b32 = false) -> b32 { // Large pages disabled for now (not failing gracefully in original C) - result := VirtualAlloc(vm, uintptr(size), MS_MEM_COMMIT, MS_PAGE_READWRITE) != nil - return b32(result) + return cast(b32) VirtualAlloc(vm, uintptr(size), MS_MEM_COMMIT, MS_PAGE_READWRITE) != nil } -os_vmem_reserve :: proc(size: int, base_addr: int = 0, no_large_pages: b32 = false) -> rawptr { - result := VirtualAlloc(rawptr(uintptr(base_addr)), uintptr(size), +os_vmem_reserve :: #force_inline proc "contextless" (size: int, base_addr: int = 0, no_large_pages: b32 = false) -> rawptr { + return VirtualAlloc(rawptr(uintptr(base_addr)), uintptr(size), MS_MEM_RESERVE, // MS_MEM_COMMIT // | (no_large_pages ? 0 : MS_MEM_LARGE_PAGES), // Large pages disabled MS_PAGE_READWRITE) - return result -} -os_vmem_release :: proc(vm: rawptr, size: int) { - VirtualFree(vm, 0, MS_MEM_RELEASE) } +os_vmem_release :: #force_inline proc "contextless" (vm: rawptr, size: int) { VirtualFree(vm, 0, MS_MEM_RELEASE) } //endregion OS //region VArena @@ -646,17 +589,6 @@ varena_push :: proc(va: ^VArena, $Type: typeid, amount: int, alignment: int = ME va.commit_used = to_be_used return slice(transmute([^]Type) uintptr(current_offset), amount) } -varena_release :: proc(va: ^VArena) { - os_vmem_release(va, va.reserve) -} -varena_rewind :: proc(va: ^VArena, save_point: AllocatorSP) { - assert(va != nil) - assert(save_point.type_sig == varena_allocator_proc) - va.commit_used = max(save_point.slot, size_of(VArena)) -} -varena_reset :: proc(va: ^VArena) { - va.commit_used = size_of(VArena) -} varena_shrink :: proc(va: ^VArena, old_allocation: []byte, requested_size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT) -> []byte { assert(va != nil) current_offset := va.reserve_start + va.commit_used @@ -668,6 +600,13 @@ varena_shrink :: proc(va: ^VArena, old_allocation: []byte, requested_size: int, va.commit_used -= shrink_amount return old_allocation[:requested_size] } +varena_release :: #force_inline proc(va: ^VArena) { os_vmem_release(va, va.reserve) } +varena_reset :: #force_inline proc(va: ^VArena) { va.commit_used = size_of(VArena) } +varena_rewind :: #force_inline proc(va: ^VArena, save_point: AllocatorSP) { + assert(va != nil) + assert(save_point.type_sig == varena_allocator_proc) + va.commit_used = max(save_point.slot, size_of(VArena)) +} varena_save :: #force_inline proc "contextless" (va: ^VArena) -> AllocatorSP { return AllocatorSP { type_sig = varena_allocator_proc, slot = va.commit_used } } varena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Out) { assert(output != nil) @@ -785,7 +724,7 @@ arena_push :: proc(arena: ^Arena, $Type: typeid, amount: int, alignment: int = M active.pos = pos_pst return slice(result_ptr, amount) } -arena_release :: proc(arena: ^Arena) { +arena_release :: #force_inline proc(arena: ^Arena) { assert(arena != nil) curr := arena.current for curr != nil { @@ -794,9 +733,7 @@ arena_release :: proc(arena: ^Arena) { curr = prev } } -arena_reset :: proc(arena: ^Arena) { - arena_rewind(arena, AllocatorSP { type_sig = arena_allocator_proc, slot = 0 }) -} +arena_reset :: #force_inline proc(arena: ^Arena) { arena_rewind(arena, AllocatorSP { type_sig = arena_allocator_proc, slot = 0 }) } arena_rewind :: proc(arena: ^Arena, save_point: AllocatorSP) { assert(arena != nil) assert(save_point.type_sig == arena_allocator_proc) @@ -1025,11 +962,7 @@ kt1cx_clear :: proc(kt: KT1CX_Byte, m: KT1CX_ByteMeta) { } } } -kt1cx_slot_id :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> u64 { - cell_size := m.cell_size // dummy value - hash_index := key % u64(len(kt.table)) - return hash_index -} +kt1cx_slot_id :: #force_inline proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> u64 { return key % u64(len(kt.table)) } kt1cx_get :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> ^byte { hash_index := kt1cx_slot_id(kt, key, m) cell_offset := uintptr(hash_index) * uintptr(m.cell_size) @@ -1100,28 +1033,22 @@ kt1cx_set :: proc(kt: KT1CX_Byte, key: u64, value: []byte, backing_cells: Alloca return nil } } -kt1cx_assert :: proc(kt: $type / KT1CX) { - slice_assert(kt.table) -} -kt1cx_byte :: proc(kt: $type / KT1CX) -> KT1CX_Byte { return { - slice( transmute([^]byte) cursor(kt.table), len(kt.table)) -} } +kt1cx_assert :: #force_inline proc(kt: $type / KT1CX) { slice_assert(kt.table) } +kt1cx_byte :: #force_inline proc(kt: $type / KT1CX) -> KT1CX_Byte { return { slice( transmute([^]byte) cursor(kt.table), len(kt.table)) } } //endregion Key Table 1-Layer Chained-Chunked-Cells (KT1CX) //region String Operations -char_is_upper :: proc(c: u8) -> b32 { return('A' <= c && c <= 'Z') } -char_to_lower :: proc(c: u8) -> u8 { c:=c; if (char_is_upper(c)) { c += ('a' - 'A') }; return (c) } +char_is_upper :: #force_inline proc(c: u8) -> b32 { return('A' <= c && c <= 'Z') } +char_to_lower :: #force_inline proc(c: u8) -> u8 { c:=c; if (char_is_upper(c)) { c += ('a' - 'A') }; return (c) } -integer_symbols :: proc(value: u8) -> u8 { +integer_symbols :: #force_inline proc(value: u8) -> u8 { @static lookup_table: [16]u8 = { '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F', }; return lookup_table[value]; } -str8_to_cstr_capped :: proc(content: string, mem: []byte) -> cstring { +str8_to_cstr_capped :: #force_inline proc(content: string, mem: []byte) -> cstring { copy_len := min(len(content), len(mem) - 1) - if copy_len > 0 { - copy(mem[:copy_len], transmute([]byte) content) - } + if copy_len > 0 { copy(mem[:copy_len], transmute([]byte) content) } mem[copy_len] = 0 return transmute(cstring) raw_data(mem) } @@ -1184,7 +1111,6 @@ str8_from_u32 :: proc(ainfo: AllocatorInfo, num: u32, radix: u32 = 10, min_digit } return result } - str8_fmt_kt1l :: proc(ainfo: AllocatorInfo, _buffer: ^[]byte, table: []KTL_Slot(string), fmt_template: string) -> string { buffer := _buffer^ slice_assert(buffer) @@ -1264,15 +1190,14 @@ str8_fmt_kt1l :: proc(ainfo: AllocatorInfo, _buffer: ^[]byte, table: []KTL_Slot( result := transmute(string) slice(cursor(buffer), len(buffer) - buffer_remaining) return result } - -str8_fmt_backed :: proc(tbl_ainfo, buf_ainfo: AllocatorInfo, fmt_template: string, entries: [][2]string) -> string { +str8_fmt_backed :: #force_inline proc(tbl_ainfo, buf_ainfo: AllocatorInfo, fmt_template: string, entries: [][2]string) -> string { kt: []KTL_Slot(string); ktl_populate_slice_a2_str(& kt, tbl_ainfo, entries) buf_size := Kilo * 64 buffer := mem_alloc(buf_ainfo, buf_size) result := str8_fmt_kt1l(buf_ainfo, & buffer, kt, fmt_template) return result } -str8_fmt_tmp :: proc(fmt_template: string, entries: [][2]string) -> string { +str8_fmt_tmp :: #force_inline proc(fmt_template: string, entries: [][2]string) -> string { @static tbl_mem: [Kilo * 32]byte; tbl_arena := farena_make(tbl_mem[:]) @static buf_mem: [Kilo * 64]byte; buffer := buf_mem[:] kt: []KTL_Slot(string); ktl_populate_slice_a2_str(& kt, ainfo(& tbl_arena), entries) @@ -1317,7 +1242,7 @@ str8cache_init :: proc(cache: ^Str8Cache, str_reserve, cell_reserve, tbl_backing kt1cx_init(info, m, transmute(^KT1CX_Byte) & cache.kt) return } -str8cache_make :: proc(str_reserve, cell_reserve, tbl_backing: AllocatorInfo, cell_pool_size, table_size: int) -> Str8Cache { +str8cache_make :: #force_inline proc(str_reserve, cell_reserve, tbl_backing: AllocatorInfo, cell_pool_size, table_size: int) -> Str8Cache { cache : Str8Cache; str8cache_init(& cache, str_reserve, cell_reserve, tbl_backing, cell_pool_size, table_size); return cache } str8cache_clear :: proc(kt: KT1CX_Str8) { @@ -1368,11 +1293,10 @@ str8cache_set :: proc(kt: KT1CX_Str8, key: u64, value: string, str_reserve, cell } return result } -cache_str8 :: proc(cache: ^Str8Cache, str: string) -> string { +cache_str8 :: #force_inline proc(cache: ^Str8Cache, str: string) -> string { assert(cache != nil) key: u64 = 0; hash64_fnv1a(& key, transmute([]byte) str) - result := str8cache_set(cache.kt, key, str, cache.str_reserve, cache.cell_reserve) - return result ^ + return str8cache_set(cache.kt, key, str, cache.str_reserve, cache.cell_reserve) ^ } Str8Gen :: struct { @@ -1389,9 +1313,9 @@ str8gen_init :: proc(gen: ^Str8Gen, ainfo: AllocatorInfo) { gen.len = 0 gen.cap = Kilo * 4 } -str8gen_make :: proc(ainfo: AllocatorInfo) -> Str8Gen { gen: Str8Gen; str8gen_init(& gen, ainfo); return gen } -str8gen_to_bytes :: proc(gen: Str8Gen) -> []byte { return transmute([]byte) SliceByte {data = gen.ptr, len = gen.cap} } -str8_from_str8gen :: proc(gen: Str8Gen) -> string { return transmute(string) SliceByte {data = gen.ptr, len = gen.len} } +str8gen_make :: #force_inline proc(ainfo: AllocatorInfo) -> Str8Gen { gen: Str8Gen; str8gen_init(& gen, ainfo); return gen } +str8gen_to_bytes :: #force_inline proc(gen: Str8Gen) -> []byte { return transmute([]byte) SliceByte {data = gen.ptr, len = gen.cap} } +str8_from_str8gen :: #force_inline proc(gen: Str8Gen) -> string { return transmute(string) SliceByte {data = gen.ptr, len = gen.len} } str8gen_append_str8 :: proc(gen: ^Str8Gen, str: string) { result := mem_grow(gen.backing, str8gen_to_bytes(gen ^), len(str) + gen.len) @@ -1515,9 +1439,8 @@ api_file_read_contents :: proc(result: ^FileOpInfo, path: string, backing: Alloc result.content = slice(cursor(buffer), cast(int) file_size.QuadPart) return } -file_read_contents_stack :: proc(path: string, backing: AllocatorInfo, zero_backing: b32 = false) -> FileOpInfo { - result : FileOpInfo; api_file_read_contents(& result, path, backing, zero_backing) - return result +file_read_contents_stack :: #force_inline proc(path: string, backing: AllocatorInfo, zero_backing: b32 = false) -> FileOpInfo { + result: FileOpInfo; api_file_read_contents(& result, path, backing, zero_backing) return result } file_write_str8 :: proc(path, content: string) { string_assert(path) @@ -1604,55 +1527,54 @@ api_watl_lex :: proc(info: ^WATL_LexInfo, source: string, alloc_tok :: #force_inline proc(ainfo: AllocatorInfo) -> ^Raw_String { return alloc_type(ainfo, Raw_String, align_of(Raw_String), true) } - #partial switch cast(WATL_TokKind) code - { - case .Space: fallthrough - case .Tab: - if prev[0] != src_cursor[0] { - new_tok := alloc_tok(ainfo_toks); if cursor(new_tok)[-1:] != tok && tok != nil { - slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); - return - } - tok = new_tok - tok^ = transmute(Raw_String) slice(src_cursor, 0) - was_formatting = true - num += 1 - } - src_cursor = src_cursor[1:] - tok.len += 1 - case .Line_Feed: - new_tok := alloc_tok(ainfo_toks); if cursor(new_tok)[-1:] != tok && tok != nil{ - slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); + #partial switch cast(WATL_TokKind) code { + case .Space: fallthrough + case .Tab: + if prev[0] != src_cursor[0] { + new_tok := alloc_tok(ainfo_toks); if cursor(new_tok)[-1:] != tok && tok != nil { + slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); return } tok = new_tok - tok^ = transmute(Raw_String) slice(src_cursor, 1) - src_cursor = src_cursor[1:] + tok^ = transmute(Raw_String) slice(src_cursor, 0) was_formatting = true num += 1 - case .Carriage_Return: + } + src_cursor = src_cursor[1:] + tok.len += 1 + case .Line_Feed: + new_tok := alloc_tok(ainfo_toks); if cursor(new_tok)[-1:] != tok && tok != nil{ + slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); + return + } + tok = new_tok + tok^ = transmute(Raw_String) slice(src_cursor, 1) + src_cursor = src_cursor[1:] + was_formatting = true + num += 1 + case .Carriage_Return: + new_tok := alloc_tok(ainfo_toks); if cursor(new_tok)[-1:] != tok && tok != nil { + slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); + return + } + tok = new_tok + tok^ = transmute(Raw_String) slice(src_cursor, 2) + src_cursor = src_cursor[1:] + was_formatting = true + num += 1 + case: + if (was_formatting) { new_tok := alloc_tok(ainfo_toks); if cursor(new_tok)[-1:] != tok && tok != nil { - slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); + slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); return } tok = new_tok - tok^ = transmute(Raw_String) slice(src_cursor, 2) - src_cursor = src_cursor[1:] - was_formatting = true + tok^ = transmute(Raw_String) slice(src_cursor, 0) + was_formatting = false; num += 1 - case: - if (was_formatting) { - new_tok := alloc_tok(ainfo_toks); if cursor(new_tok)[-1:] != tok && tok != nil { - slice_constraint_fail(info, ainfo_msgs, new_tok, & msg_last); - return - } - tok = new_tok - tok^ = transmute(Raw_String) slice(src_cursor, 0) - was_formatting = false; - num += 1 - } - src_cursor = src_cursor[1:] - tok.len += 1 + } + src_cursor = src_cursor[1:] + tok.len += 1 } prev = src_cursor[-1:] code = src_cursor[0] @@ -1729,28 +1651,25 @@ api_watl_parse :: proc(info: ^WATL_ParseInfo, tokens: []WATL_Tok, info_lines ^ = { transmute([^]WATL_Node) line, 0 } for & token in tokens { - #partial switch cast(WATL_TokKind) token[0] - { - case .Carriage_Return: fallthrough - case .Line_Feed: - new_line := alloc_type(ainfo_lines, WATL_Line); if cursor(new_line)[-1:] != transmute(^[]string)line { - info.signal |= { .MemFail_SliceConstraintFail } - msg := alloc_type(ainfo_msgs, WATL_ParseMsg) - msg.content = "Line slice allocation was not contiguous" - msg.pos = { cast(i32) len(info.lines), cast(i32) line.len } - msg.line = transmute(^[]WATL_Node) line - msg.tok = & token - sll_queue_push_n(& info.msgs, & msg_last, & msg) - assert(failon_slice_constraint_fail == false) - return - } - line = transmute(^SliceRaw(WATL_Node)) new_line - line.data = curr - info_lines.len += 1 - continue - - case: - break; + #partial switch cast(WATL_TokKind) token[0] { + case .Carriage_Return: fallthrough + case .Line_Feed: + new_line := alloc_type(ainfo_lines, WATL_Line); if cursor(new_line)[-1:] != transmute(^[]string)line { + info.signal |= { .MemFail_SliceConstraintFail } + msg := alloc_type(ainfo_msgs, WATL_ParseMsg) + msg.content = "Line slice allocation was not contiguous" + msg.pos = { cast(i32) len(info.lines), cast(i32) line.len } + msg.line = transmute(^[]WATL_Node) line + msg.tok = & token + sll_queue_push_n(& info.msgs, & msg_last, & msg) + assert(failon_slice_constraint_fail == false) + return + } + line = transmute(^SliceRaw(WATL_Node)) new_line + line.data = curr + info_lines.len += 1 + continue + case: break; } curr ^ = cache_str8(str_cache, token) new_node := alloc_type(ainfo_nodes, WATL_Node); if cursor(new_node)[-1:] != curr { @@ -1799,11 +1718,10 @@ watl_dump_listing :: proc(buffer: AllocatorInfo, lines: []WATL_Line) -> string { for chunk in line { id : string - #partial switch cast(WATL_TokKind) chunk[0] - { - case .Space: id = "Space" - case .Tab: id = "Tab" - case: id = "Visible" + #partial switch cast(WATL_TokKind) chunk[0] { + case .Space: id = "Space" + case .Tab: id = "Tab" + case: id = "Visible" } str8gen_append_fmt(& result, "\t(): ''\n", { { "id", id }, diff --git a/Odin/watl.win32.lottes.odin b/Odin/watl.win32.lottes.odin deleted file mode 100644 index e69de29..0000000