Compare commits

...

11 Commits

Author SHA1 Message Date
Ed_
bd9d2b3a7b misc 2025-11-08 10:42:04 -05:00
Ed_
f5330c686b llm refinement attempt 2025-11-07 14:53:45 -05:00
Ed_
a48681fc00 preparing for curation and runtime testing 2025-11-07 14:32:04 -05:00
Ed_
acb5e916c1 missing changes (llm) 2025-11-07 13:56:15 -05:00
Ed_
5a44788b4a WIP(compiles, untested, to review): Another attempt at using llms to codegen very tedius stuff. 2025-11-07 13:51:12 -05:00
Ed_
dbb1367acb oops 2 2025-11-06 19:39:33 -05:00
Ed_
aab3a3f689 oops 2025-11-06 19:25:28 -05:00
Ed_
d7790795dd stuff 2025-11-06 19:23:58 -05:00
Ed_
ac05262c8d finished arena (lottes.c) 2025-11-05 22:21:44 -05:00
Ed_
3bb46692e1 Update lottes hybrid 2025-11-05 20:43:42 -05:00
Ed_
a7d17a8b70 changes to watl.v0.msvc.c
Made everything internal linkage.
Moved memory operations impl to header section (keeping same loc as I have for the lottes variants).
arena__grow && arena__shirnk lifted to definitions.
2025-11-05 20:00:41 -05:00
10 changed files with 2594 additions and 1094 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ Version: 0 (From Scratch, 1-Stage Compilation, LLVM & WinAPI Only, Win CRT Mul
Host: Windows 11 (x86-64)
Toolchain: LLVM (2025-08-30), C-Stanard: 11
Based on: Neokineogfx - Fixing C, personalized to include typeinfo more readily.
Based on: Neokineogfx - Fixing C, personalized to utilize typeinfo.
https://youtu.be/RrL7121MOeA
*/
@@ -185,7 +185,7 @@ typedef def_struct(Slice_Str8) { Str8* ptr; U8 len; };
debug_trap(); \
} \
} while(0)
void assert_handler(UTF8*R_ condition, UTF8*R_ file, UTF8*R_ function, S4 line, UTF8*R_ msg, ... );
internal void assert_handler(UTF8*R_ condition, UTF8*R_ file, UTF8*R_ function, S4 line, UTF8*R_ msg, ... );
#else
#define debug_trap()
#define assert_trap(cond)
@@ -278,7 +278,6 @@ void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidt
iter.cursor op iter.r.end; \
++ iter.cursor \
)
#define def_span(type) \
def_struct(tmpl( Span,type)) { type begin; type end; }; \
typedef def_struct(tmpl(Iter_Span,type)) { tmpl(Span,type) r; type cursor; }
@@ -406,12 +405,12 @@ typedef def_struct(FArena) {
};
finline FArena farena_make (Slice_Mem mem);
finline void farena_init (FArena_R arena, Slice_Mem byte);
Slice_Mem farena__push (FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_ opts);
internal Slice_Mem farena__push (FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_ opts);
finline void farena_reset (FArena_R arena);
finline void farena_rewind(FArena_R arena, AllocatorSP save_point);
finline AllocatorSP farena_save (FArena arena);
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
internal void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
#define ainfo_farena(arena) (AllocatorInfo){ .proc = farena_allocator_proc, .data = u8_(& arena) }
#define farena_push_mem(arena, amount, ...) farena__push(arena, amount, 1, opt_args(Opts_farena, lit(stringify(B1)), __VA_ARGS__))
@@ -435,7 +434,7 @@ typedef def_struct(Opts_vmem) {
B4 no_large_pages;
A4_B1 _PAD_;
};
void os_init(void);
internal void os_init (void);
finline OS_SystemInfo* os_system_info(void);
finline B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem*R_ opts);
@@ -467,17 +466,17 @@ typedef def_struct(Opts_varena_make) {
VArenaFlags flags;
A4_B1 _PAD_;
};
VArena* varena__make(Opts_varena_make*R_ opts);
internal VArena* varena__make(Opts_varena_make*R_ opts);
#define varena_make(...) varena__make(opt_args(Opts_varena_make, __VA_ARGS__))
Slice_Mem varena__push (VArena_R arena, U8 amount, U8 type_width, Opts_varena*R_ opts);
internal Slice_Mem varena__push (VArena_R arena, U8 amount, U8 type_width, Opts_varena*R_ opts);
finline void varena_release(VArena_R arena);
finline void varena_rewind (VArena_R arena, AllocatorSP save_point);
void varena_reset (VArena_R arena);
Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size);
internal void varena_reset (VArena_R arena);
internal Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size);
finline AllocatorSP varena_save (VArena_R arena);
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
internal void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
#define ainfo_varena(varena) (AllocatorInfo) { .proc = & varena_allocator_proc, .data = u8_(varena) }
#define varena_push_mem(arena, amount, ...) varena__push(arena, amount, 1, opt_args(Opts_varena, lit(stringify(B1)), __VA_ARGS__))
@@ -505,14 +504,14 @@ typedef def_struct(Arena) {
A4_B1 _PAD_;
};
typedef Opts_varena_make Opts_arena_make;
Arena* arena__make (Opts_arena_make*R_ opts);
Slice_Mem arena__push (Arena_R arena, U8 amount, U8 type_width, Opts_arena*R_ opts);
internal Arena* arena__make (Opts_arena_make*R_ opts);
internal Slice_Mem arena__push (Arena_R arena, U8 amount, U8 type_width, Opts_arena*R_ opts);
finline void arena_release(Arena_R arena);
finline void arena_reset (Arena_R arena);
void arena_rewind (Arena_R arena, AllocatorSP save_point);
internal void arena_rewind (Arena_R arena, AllocatorSP save_point);
finline AllocatorSP arena_save (Arena_R arena);
void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
internal void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
#define ainfo_arena(arena) (AllocatorInfo){ .proc = & arena_allocator_proc, .data = u8_(arena) }
#define arena_make(...) arena__make(opt_args(Opts_arena_make, __VA_ARGS__))
@@ -568,7 +567,7 @@ typedef def_farray(Str8, 2);
typedef def_Slice(A2_Str8);
typedef def_KTL_Slot(Str8);
typedef def_KTL(Str8);
void ktl_populate_slice_a2_str8(KTL_Str8*R_ kt, AllocatorInfo backing, Slice_A2_Str8 values);
finline void ktl_populate_slice_a2_str8(KTL_Str8*R_ kt, AllocatorInfo backing, Slice_A2_Str8 values);
#pragma endregion KTL
#pragma region Key Table 1-Layer Chained-Chunked-Cells (KT1CX)
@@ -623,11 +622,11 @@ typedef def_struct(KT1CX_Info) {
AllocatorInfo backing_table;
AllocatorInfo backing_cells;
};
void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte*R_ result);
void kt1cx_clear (KT1CX_Byte kt, KT1CX_ByteMeta meta);
internal void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte*R_ result);
internal void kt1cx_clear (KT1CX_Byte kt, KT1CX_ByteMeta meta);
finline U8 kt1cx_slot_id(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta meta);
U8 kt1cx_get (KT1CX_Byte kt, U8 key, KT1CX_ByteMeta meta);
U8 kt1cx_set (KT1CX_Byte kt, U8 key, Slice_Mem value, AllocatorInfo backing_cells, KT1CX_ByteMeta meta);
internal U8 kt1cx_get (KT1CX_Byte kt, U8 key, KT1CX_ByteMeta meta);
internal U8 kt1cx_set (KT1CX_Byte kt, U8 key, Slice_Mem value, AllocatorInfo backing_cells, KT1CX_ByteMeta meta);
#define kt1cx_assert(kt) do { \
slice_assert(kt.table); \
@@ -643,13 +642,13 @@ finline U1 integer_symbols(U1 value) {
}
finline char* str8_to_cstr_capped(Str8 content, Slice_Mem mem);
Str8 str8_from_u32(AllocatorInfo ainfo, U4 num, U4 radix, U4 min_digits, U4 digit_group_separator);
internal Str8 str8_from_u32(AllocatorInfo ainfo, U4 num, U4 radix, U4 min_digits, U4 digit_group_separator);
finline Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8*R_ entries);
#define str8_fmt_backed(tbl_backing, buf_backing, fmt_template, ...) \
str8__fmt_backed(tbl_backing, buf_backing, lit(fmt_template), slice_arg_from_array(A2_Str8, __VA_ARGS__))
Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8*R_ entries);
internal Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8*R_ entries);
#define str8_fmt(fmt_template, ...) str8__fmt(lit(fmt_template), slice_arg_from_array(A2_Str8, __VA_ARGS__))
#define Str8Cache_CELL_DEPTH 4
@@ -672,7 +671,7 @@ typedef def_struct(Opts_str8cache_init) {
U8 cell_pool_size;
U8 table_size;
};
void str8cache__init(Str8Cache_R cache, Opts_str8cache_init*R_ opts);
internal void str8cache__init(Str8Cache_R cache, Opts_str8cache_init*R_ opts);
finline Str8Cache str8cache__make( Opts_str8cache_init*R_ opts);
#define str8cache_init(cache, ...) str8cache__init(cache, opt_args(Opts_str8cache_init, __VA_ARGS__))
@@ -698,7 +697,7 @@ finline Str8Gen str8gen_make( AllocatorInfo backing);
finline Str8 str8_from_str8gen(Str8Gen gen) { return (Str8){ cast(UTF8_R, gen.ptr), gen.len}; }
finline void str8gen_append_str8(Str8Gen_R gen, Str8 str);
void str8gen__append_fmt(Str8Gen_R gen, Str8 fmt_template, Slice_A2_Str8*R_ tokens);
internal void str8gen__append_fmt(Str8Gen_R gen, Str8 fmt_template, Slice_A2_Str8*R_ tokens);
#define str8gen_append_fmt(gen, fmt_template, ...) str8gen__append_fmt(gen, lit(fmt_template), slice_arg_from_array(A2_Str8, __VA_ARGS__))
#pragma endregion String Operations
@@ -712,8 +711,8 @@ typedef def_struct(Opts_read_file_contents) {
B4 zero_backing;
A4_B1 _PAD_;
};
void api_file_read_contents(FileOpInfo_R result, Str8 path, Opts_read_file_contents opts);
void file_write_str8 (Str8 path, Str8 content);
internal void api_file_read_contents(FileOpInfo_R result, Str8 path, Opts_read_file_contents opts);
internal void file_write_str8 (Str8 path, Str8 content);
finline FileOpInfo file__read_contents(Str8 path, Opts_read_file_contents*R_ opts);
#define file_read_contents(path, ...) file__read_contents(path, opt_args(Opts_read_file_contents, __VA_ARGS__))
@@ -756,8 +755,8 @@ typedef def_struct(Opts_watl_lex) {
B1 failon_slice_constraint_fail;
A4_B1 _PAD_;
};
void api_watl_lex(WATL_LexInfo_R info, Str8 source, Opts_watl_lex*R_ opts);
WATL_LexInfo watl__lex ( Str8 source, Opts_watl_lex*R_ opts);
internal void api_watl_lex(WATL_LexInfo_R info, Str8 source, Opts_watl_lex*R_ opts);
finline WATL_LexInfo watl__lex ( Str8 source, Opts_watl_lex*R_ opts);
#define watl_lex(source, ...) watl__lex(source, opt_args(Opts_watl_lex, __VA_ARGS__))
typedef Str8 WATL_Node; typedef def_ptr_set(WATL_Node);
@@ -788,11 +787,11 @@ typedef def_struct(Opts_watl_parse) {
B4 failon_slice_constraint_fail;
A4_B1 _PAD_;
};
void api_watl_parse(WATL_ParseInfo_R info, Slice_WATL_Tok tokens, Opts_watl_parse*R_ opts);
WATL_ParseInfo watl__parse ( Slice_WATL_Tok tokens, Opts_watl_parse*R_ opts);
internal void api_watl_parse(WATL_ParseInfo_R info, Slice_WATL_Tok tokens, Opts_watl_parse*R_ opts);
finline WATL_ParseInfo watl__parse ( Slice_WATL_Tok tokens, Opts_watl_parse*R_ opts);
#define watl_parse(tokens, ...) watl__parse(tokens, opt_args(Opts_watl_parse, __VA_ARGS__))
Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines);
internal Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines);
#pragma endregion WATL
#pragma endregion Header
@@ -824,8 +823,7 @@ void mem_rewind(AllocatorInfo ainfo, AllocatorSP save_point) {
finline
AllocatorSP mem_save_point(AllocatorInfo ainfo) {
assert(ainfo.proc != nullptr);
AllocatorProc_Out out;
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_SavePoint}, & out);
AllocatorProc_Out out; ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_SavePoint}, & out);
return out.save_point;
}
finline
@@ -838,8 +836,7 @@ Slice_Mem mem__alloc(AllocatorInfo ainfo, U8 size, Opts_mem_alloc*R_ opts) {
.requested_size = size,
.alignment = opts->alignment,
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
AllocatorProc_Out out; ainfo.proc(in, & out);
return out.allocation;
}
finline
@@ -853,8 +850,7 @@ Slice_Mem mem__grow(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_grow*R
.alignment = opts->alignment,
.old_allocation = mem
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
AllocatorProc_Out out; ainfo.proc(in, & out);
return (Slice_Mem){ out.allocation.ptr, opts->give_actual ? out.allocation.len : in.requested_size };
}
finline
@@ -868,8 +864,7 @@ Slice_Mem mem__resize(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_resi
.alignment = opts->alignment,
.old_allocation = mem,
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
AllocatorProc_Out out; ainfo.proc(in, & out);
return (Slice_Mem){ out.allocation.ptr, opts->give_actual ? out.allocation.len : in.requested_size };
}
finline
@@ -883,8 +878,7 @@ Slice_Mem mem__shrink(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_shri
.alignment = opts->alignment,
.old_allocation = mem
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
AllocatorProc_Out out; ainfo.proc(in, & out);
return out.allocation;
}
#pragma endregion Allocator Interface
@@ -898,7 +892,7 @@ void farena_init(FArena_R arena, Slice_Mem mem) {
arena->used = 0;
}
finline FArena farena_make(Slice_Mem mem) { FArena a; farena_init(& a, mem); return a; }
inline
internal inline
Slice_Mem farena__push(FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_ opts) {
assert(opts != nullptr);
if (amount == 0) { return (Slice_Mem){}; }
@@ -909,7 +903,7 @@ Slice_Mem farena__push(FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_
arena->used += to_commit;
return (Slice_Mem){ptr, desired};
}
inline
internal inline
Slice_Mem farena__grow(FArena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment, B4 should_zero) {
// Check if the allocation is at the end of the arena
U8 alloc_end = old_allocation.ptr + old_allocation.len;
@@ -927,11 +921,10 @@ Slice_Mem farena__grow(FArena_R arena, Slice_Mem old_allocation, U8 requested_si
return (Slice_Mem){0};
}
arena->used += aligned_grow;
Slice_Mem result = (Slice_Mem){ old_allocation.ptr, aligned_grow + requested_size };
mem_zero(old_allocation.ptr + old_allocation.len, grow_amount * cast(U8, should_zero));
return result;
return (Slice_Mem){ old_allocation.ptr, aligned_grow + requested_size };
}
inline
internal inline
Slice_Mem farena__shrink(FArena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment)
{
// Check if the allocation is at the end of the arena
@@ -957,6 +950,7 @@ finline
AllocatorSP farena_save (FArena arena) {
return (AllocatorSP){ .type_sig = & farena_allocator_proc, .slot = arena.used };
}
internal
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
{
assert(out != nullptr);
@@ -1010,6 +1004,7 @@ void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
#define MS_ANYSIZE_ARRAY 1
#define MS_MEM_COMMIT 0x00001000
#define MS_MEM_RESERVE 0x00002000
#define MS_MEM_RELEASE 0x00008000
#define MS_MEM_LARGE_PAGES 0x20000000
#define MS_PAGE_READWRITE 0x04
#define MS_TOKEN_ADJUST_PRIVILEGES (0x0020)
@@ -1057,7 +1052,7 @@ typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; };
global OS_Windows_State os__windows_info;
finline OS_SystemInfo* os_system_info(void) { return & os__windows_info.system_info; }
inline
internal inline
void os__enable_large_pages(void) {
MS_HANDLE token;
if (OpenProcessToken(GetCurrentProcess(), MS_TOKEN_ADJUST_PRIVILEGES | MS_TOKEN_QUERY, &token))
@@ -1074,7 +1069,7 @@ void os__enable_large_pages(void) {
CloseHandle(token);
}
}
inline
internal inline
void os_init(void) {
os__enable_large_pages();
OS_SystemInfo*R_ info = & os__windows_info.system_info;
@@ -1096,12 +1091,12 @@ finline B4 os__vmem_commit(U8 vm, U8 size, Opts_vmem*R_ opts) {
B4 result = (VirtualAlloc(cast(MS_LPVOID, vm), size, MS_MEM_COMMIT, MS_PAGE_READWRITE) != 0);
return result;
}
inline void os_vmem_release(U8 vm, U8 size) { VirtualFree(cast(MS_LPVOID, vm), 0, MS_MEM_RESERVE); }
internal inline void os_vmem_release(U8 vm, U8 size) { VirtualFree(cast(MS_LPVOID, vm), 0, MS_MEM_RELEASE); }
#pragma endregion OS
#pragma region VArena (Virutal Address Space Arena)
finline U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
inline
internal inline
VArena* varena__make(Opts_varena_make*R_ opts) {
assert(opts != nullptr);
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); }
@@ -1123,7 +1118,7 @@ VArena* varena__make(Opts_varena_make*R_ opts) {
};
return vm;
}
inline
internal inline
Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opts) {
assert(vm != nullptr);
assert(amount != 0);
@@ -1146,11 +1141,11 @@ Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opt
vm->committed += next_commit_size;
}
}
vm->commit_used = to_be_used;
U8 current_offset = vm->reserve_start + vm->commit_used;
vm->commit_used = to_be_used;
return (Slice_Mem){.ptr = current_offset, .len = requested_size};
}
inline
internal inline
Slice_Mem varena__grow(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, U8 alignment, B4 should_zero) {
U8 grow_amount = requested_size - old_allocation.len;
if (grow_amount == 0) { return old_allocation; } // Growing when not the last allocation not allowed
@@ -1160,7 +1155,7 @@ Slice_Mem varena__grow(VArena_R vm, Slice_Mem old_allocation, U8 requested_size,
return (Slice_Mem){ old_allocation.ptr, old_allocation.len + allocation.len };
}
finline void varena_release(VArena_R arena) { os_vmem_release(u8_(arena), arena->reserve); }
inline
internal inline
Slice_Mem varena__shrink(VArena_R vm, Slice_Mem old_allocation, U8 requested_size) {
U8 shrink_amount = old_allocation.len - requested_size;
if (lt_s(shrink_amount, 0)) { return old_allocation; }
@@ -1175,6 +1170,7 @@ void varena_rewind(VArena_R vm, AllocatorSP sp) {
vm->commit_used = max(sp.slot, sizeof(VArena));
}
finline AllocatorSP varena_save(VArena_R vm) { return (AllocatorSP){varena_allocator_proc, vm->commit_used}; }
internal
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
{
VArena_R vm = cast(VArena_R, in.data);
@@ -1218,7 +1214,7 @@ void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
#pragma region Arena (Chained Arena)
finline U8 arena_header_size(void) { return align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT); }
inline
internal inline
Arena* arena__make(Opts_arena_make*R_ opts) {
assert(opts != nullptr);
U8 header_size = arena_header_size();
@@ -1233,6 +1229,7 @@ Arena* arena__make(Opts_arena_make*R_ opts) {
};
return arena;
}
internal inline
Slice_Mem arena__push(Arena_R arena, U8 amount, U8 type_width, Opts_arena* opts) {
assert(arena != nullptr);
assert(opts != nullptr);
@@ -1264,6 +1261,45 @@ Slice_Mem arena__push(Arena_R arena, U8 amount, U8 type_width, Opts_arena* opts)
active->pos = pos_pst;
return vresult;
}
internal inline
Slice_Mem arena__grow(Arena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment, B4 should_zero) {
Arena_R active = arena->current;
U8 alloc_end = old_allocation.ptr + old_allocation.len + requested_size;
U8 arena_end = u8_(active) + active->pos;
if (alloc_end == arena_end)
{
U8 grow_amount = requested_size - old_allocation.len;
U8 aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
if (active->pos + aligned_grow <= active->backing->reserve) {
Slice_Mem vresult = varena_push_mem(active->backing, aligned_grow, .alignment = alignment);
if (vresult.ptr != null) {
active->pos += aligned_grow;
mem_zero(old_allocation.ptr + old_allocation.len, aligned_grow * (U8)should_zero);
return (Slice_Mem){old_allocation.ptr, aligned_grow + old_allocation.len};
}
}
}
#pragma diagnostic push
#pragma clang diagnostic ignored "-Wnrvo"
Slice_Mem new_alloc = arena__push(arena, requested_size, 1, &(Opts_arena){.alignment = alignment});
if (new_alloc.ptr == null) { return (Slice_Mem){0}; }
mem_copy(new_alloc.ptr, old_allocation.ptr, old_allocation.len);
mem_zero(new_alloc.ptr + old_allocation.len, (new_alloc.len - old_allocation.len) * (U8)should_zero);
return new_alloc;
#pragma diagnostic pop
}
internal inline
Slice_Mem arena__shrink(Arena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment) {
Arena_R active = arena->current;
U8 alloc_end = old_allocation.ptr + old_allocation.len;
U8 arena_end = u8_(active) + active->pos;
if (alloc_end != arena_end) { return (Slice_Mem){old_allocation.ptr, requested_size}; }
U8 aligned_original = align_pow2(old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
U8 aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 pos_reduction = aligned_original - aligned_new;
active->pos -= pos_reduction;
return varena__shrink(active->backing, old_allocation, requested_size);
}
finline
void arena_release(Arena_R arena) {
assert(arena != nullptr);
@@ -1275,6 +1311,7 @@ void arena_release(Arena_R arena) {
}
}
finline void arena_reset(Arena_R arena) { arena_rewind(arena, (AllocatorSP){.type_sig = arena_allocator_proc, .slot = 0}); }
internal inline
void arena_rewind(Arena_R arena, AllocatorSP save_point) {
assert(arena != nullptr);
assert(save_point.type_sig == arena_allocator_proc);
@@ -1291,6 +1328,7 @@ void arena_rewind(Arena_R arena, AllocatorSP save_point) {
varena_rewind(curr->backing, (AllocatorSP){varena_allocator_proc, curr->pos + size_of(VArena)});
}
finline AllocatorSP arena_save(Arena_R arena) { return (AllocatorSP){arena_allocator_proc, arena->base_pos + arena->current->pos}; }
internal
void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
{
assert(out != nullptr);
@@ -1308,53 +1346,11 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
case AllocatorOp_Reset: arena_reset(arena); break;
case AllocatorOp_Grow:
case AllocatorOp_Grow_NoZero: {
Arena_R active = arena->current;
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
U8 arena_end = u8_(active) + active->pos;
if (alloc_end == arena_end)
{
U8 grow_amount = in.requested_size - in.old_allocation.len;
U8 aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
if (active->pos + aligned_grow <= active->backing->reserve)
{
Slice_Mem vresult = varena_push_mem(active->backing, aligned_grow, .alignment = in.alignment);
if (vresult.ptr != null)
{
active->pos += aligned_grow;
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
mem_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * in.op - AllocatorOp_Grow_NoZero);
case AllocatorOp_Grow_NoZero:
out->allocation = arena__grow(arena, in.old_allocation, in.requested_size, in.alignment, in.op - AllocatorOp_Grow_NoZero);
break;
}
}
}
Slice_Mem new_alloc = arena__push(arena, in.requested_size, 1, &(Opts_arena){.alignment = in.alignment});
if (new_alloc.ptr == null) {
out->allocation = (Slice_Mem){0};
break;
}
mem_copy(new_alloc.ptr, in.old_allocation.ptr, in.old_allocation.len);
mem_zero(new_alloc.ptr + in.old_allocation.len, (in.requested_size - in.old_allocation.len) * in.op - AllocatorOp_Grow_NoZero);
out->allocation = new_alloc;
}
break;
case AllocatorOp_Shrink: {
Arena_R active = arena->current;
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
U8 arena_end = u8_(active) + active->pos;
if (alloc_end != arena_end) {
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
break;
}
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
U8 aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
U8 aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 pos_reduction = aligned_original - aligned_new;
active->pos -= pos_reduction;
varena__shrink(active->backing, in.old_allocation, in.requested_size);
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
}
case AllocatorOp_Shrink:
out->allocation = arena__shrink(arena, in.old_allocation, in.requested_size, in.alignment);
break;
case AllocatorOp_Rewind: arena_rewind(arena, in.save_point); break;
@@ -1377,7 +1373,7 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
#pragma endregion Arena
#pragma region Key Table Linear (KTL)
inline
finline
void ktl_populate_slice_a2_str8(KTL_Str8*R_ kt, AllocatorInfo backing, Slice_A2_Str8 values) {
assert(kt != nullptr);
if (values.len == 0) return;
@@ -1390,7 +1386,7 @@ void ktl_populate_slice_a2_str8(KTL_Str8*R_ kt, AllocatorInfo backing, Slice_A2_
#pragma endregion KTL
#pragma region Key Table 1-Layer Chained-Chunked_Cells (KT1CX)
inline
internal inline
void kt1cx_init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte*R_ result) {
assert(result != nullptr);
assert(info.backing_cells.proc != nullptr);
@@ -1402,7 +1398,7 @@ void kt1cx_init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte*R_ result) {
result->table = mem_alloc(info.backing_table, m.table_size * m.cell_size); slice_assert(result->table);
result->table.len = m.table_size; // Setting to the table number of elements instead of byte length.
}
inline
internal inline
void kt1cx_clear(KT1CX_Byte kt, KT1CX_ByteMeta m) {
U8 cell_cursor = kt.table.ptr;
U8 table_len = kt.table.len * m.cell_size;
@@ -1425,9 +1421,9 @@ void kt1cx_clear(KT1CX_Byte kt, KT1CX_ByteMeta m) {
}
finline
U8 kt1cx_slot_id(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta m) {
U8 hash_index = key % kt.table.len;
return hash_index;
return key % kt.table.len;
}
inline
U8 kt1cx_get(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta m) {
U8 hash_index = kt1cx_slot_id(kt, key, m);
U8 cell_offset = hash_index * m.cell_size;
@@ -1454,7 +1450,7 @@ U8 kt1cx_get(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta m) {
}
}
}
inline
internal
U8 kt1cx_set(KT1CX_Byte kt, U8 key, Slice_Mem value, AllocatorInfo backing_cells, KT1CX_ByteMeta m) {
U8 hash_index = kt1cx_slot_id(kt, key, m);
U8 cell_offset = hash_index * m.cell_size;
@@ -1504,6 +1500,7 @@ char* str8_to_cstr_capped(Str8 content, Slice_Mem mem) {
u1_r(mem.ptr)[copy_len] = '\0';
return cast(char*, mem.ptr);
}
internal
Str8 str8_from_u32(AllocatorInfo ainfo, U4 num, U4 radix, U4 min_digits, U4 digit_group_separator)
{
Str8 result = {0};
@@ -1663,17 +1660,16 @@ finline
Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8*R_ entries) {
KTL_Str8 kt; kt1l_populate_slice_a2_str8(& kt, tbl_backing, entries[0] );
U8 buf_size = kilo(64); Slice_Mem buffer = mem_alloc(buf_backing, buf_size);
Str8 result = str8__fmt_ktl(buf_backing, & buffer, kt, fmt_template);
return result;
return str8__fmt_ktl(buf_backing, & buffer, kt, fmt_template);
}
finline
Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8*R_ entries) {
local_persist B1 tbl_mem[kilo(32)]; FArena tbl_arena = farena_make(slice_fmem(tbl_mem));
local_persist B1 buf_mem[kilo(64)];
KTL_Str8 kt = {0}; ktl_populate_slice_a2_str8(& kt, ainfo_farena(tbl_arena), entries[0] );
Str8 result = str8__fmt_ktl((AllocatorInfo){0}, & slice_fmem(buf_mem), kt, fmt_template);
return result;
return str8__fmt_ktl((AllocatorInfo){0}, & slice_fmem(buf_mem), kt, fmt_template);
}
inline
internal inline
void str8cache__init(Str8Cache_R cache, Opts_str8cache_init*R_ opts) {
assert(cache != nullptr);
assert(opts != nullptr);
@@ -1761,8 +1757,7 @@ finline
Str8 cache_str8(Str8Cache_R cache, Str8 str) {
assert(cache != nullptr);
U8 key = 0; hash64_fnv1a(& key, slice_mem_s(str));
Str8_R result = str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve);
return result[0];
return str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve)[0];
}
finline
void str8gen_init(Str8Gen_R gen, AllocatorInfo backing) {
@@ -1784,6 +1779,7 @@ void str8gen_append_str8(Str8Gen_R gen, Str8 str){
gen->len += str.len;
gen->cap = result.len;
}
internal inline
void str8gen__append_fmt(Str8Gen_R gen, Str8 fmt_template, Slice_A2_Str8*R_ entries){
local_persist B1 tbl_mem[kilo(32)]; FArena tbl_arena = farena_make(slice_fmem(tbl_mem));
KTL_Str8 kt = {0}; ktl_populate_slice_a2_str8(& kt, ainfo_farena(tbl_arena), entries[0] );
@@ -1839,6 +1835,7 @@ FileOpInfo file__read_contents(Str8 path, Opts_read_file_contents*R_ opts) {
FileOpInfo result = {0}; api_file_read_contents(& result, path, opts[0]);
return result;
}
internal
void api_file_read_contents(FileOpInfo_R result, Str8 path, Opts_read_file_contents opts)
{
assert(result != nullptr);
@@ -1898,6 +1895,7 @@ void api_file_read_contents(FileOpInfo_R result, Str8 path, Opts_read_file_conte
result->content.len = u8_(file_size.QuadPart);
return;
}
internal
void file_write_str8(Str8 path, Str8 content)
{
slice_assert(path);
@@ -1972,6 +1970,7 @@ int printf_err(char const* fmt, ...) {
va_end(args);
return result;
}
internal inline
void assert_handler( UTF8*R_ condition, UTF8*R_ file, UTF8*R_ function, S4 line, UTF8*R_ msg, ... ) {
printf_err( "%s - %s:(%d): Assert Failure: ", file, function, line );
if ( condition )
@@ -1988,6 +1987,7 @@ void assert_handler( UTF8*R_ condition, UTF8*R_ file, UTF8*R_ function, S4 line,
#pragma endregion Debug
#pragma region WATL
internal
void api_watl_lex(WATL_LexInfo_R info, Str8 source, Opts_watl_lex*R_ opts)
{
if (source.len == 0) { return; }
@@ -2077,7 +2077,7 @@ slice_constraint_fail:
return;
}
inline WATL_LexInfo watl__lex(Str8 source, Opts_watl_lex*R_ opts) { WATL_LexInfo info = {0}; api_watl_lex(& info, source, opts); return info; }
internal
void api_watl_parse(WATL_ParseInfo_R info, Slice_WATL_Tok tokens, Opts_watl_parse*R_ opts)
{
if (tokens.len == 0) { return; }
@@ -2138,8 +2138,8 @@ void api_watl_parse(WATL_ParseInfo_R info, Slice_WATL_Tok tokens, Opts_watl_pars
}
return;
}
inline WATL_ParseInfo watl__parse(Slice_WATL_Tok tokens, Opts_watl_parse*R_ opts) { WATL_ParseInfo info = {0}; api_watl_parse(& info, tokens, opts); return info; }
finline WATL_ParseInfo watl__parse(Slice_WATL_Tok tokens, Opts_watl_parse*R_ opts) { WATL_ParseInfo info = {0}; api_watl_parse(& info, tokens, opts); return info; }
internal
Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines)
{
local_persist B1 scratch[kilo(64)] = {0}; FArena sarena = farena_make(slice_fmem(scratch)); AllocatorInfo sinfo = ainfo_farena(sarena);
@@ -2159,8 +2159,7 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines)
for slice_iter(line[0], chunk)
{
Str8 id;
switch (chunk->ptr[0])
{
switch (chunk->ptr[0]) {
case WATL_Tok_Space: id = lit("Space"); break;
case WATL_Tok_Tab: id = lit("Tab"); break;
default: id = lit("Visible"); break;

View File

@@ -58,11 +58,6 @@ enum { false = 0, true = 1, true_overflow, };
#define offset_of(type, member) cast(SSIZE, & (((type*) 0)->member))
#define size_of(data) cast(SSIZE, sizeof(data))
// Not using this since its lottes related.
// #define R_ __restrict
// #define V_ volatile
// #define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
// #define v_(ptr) cast(typeof_ptr(ptr)*V_, ptr)
#define ssize(value) cast(SSIZE, value)
#define kilo(n) (cast(SSIZE, n) << 10)
@@ -104,12 +99,40 @@ typedef def_struct(Slice_Str8) { Str8* ptr; SSIZE len; };
debug_trap(); \
} \
} while(0)
void assert_handler( char const* condition, char const* file, char const* function, S32 line, char const* msg, ... );
internal void assert_handler( char const* condition, char const* file, char const* function, S32 line, char const* msg, ... );
#endif
#pragma endregion Debug
#pragma region Memory
inline SSIZE align_pow2(SSIZE x, SSIZE b);
// #include <memory.h>
void* __cdecl memcpy (void* _Dst, void const* _Src, USIZE _Size);
void* __cdecl memmove(void* _Dst, void const* _Src, USIZE _Size);
void* __cdecl memset (void* _Dst, int _Val, USIZE _Size);
internal inline
SSIZE align_pow2(SSIZE x, SSIZE b) {
assert(b != 0);
assert((b & (b - 1)) == 0); // Check power of 2
return ((x + b - 1) & (~(b - 1)));
}
internal inline
void* mem_copy(void* restrict dest, void const* restrict src, USIZE length) {
if (dest == nullptr || src == nullptr) { return nullptr; }
memcpy(dest, src, length);
return dest;
}
internal inline
void* memory_copy_overlapping(void* restrict dest, void const* restrict src, USIZE length) {
if (dest == nullptr || src == nullptr) { return nullptr; }
memmove(dest, src, length);
return dest;
}
internal inline
B32 mem_zero(void* dest, USIZE length) {
if (dest == nullptr) return false;
memset((unsigned char*)dest, 0, length);
return true;
}
#define align_struct(type_width) ((SSIZE)(((type_width) + 7) / 8 * 8))
@@ -118,10 +141,6 @@ inline SSIZE align_pow2(SSIZE x, SSIZE b);
assert(ssize(point) <= ssize(end)); \
} while(0)
void* memory_copy (void* restrict dest, void const* restrict src, USIZE length);
void* memory_copy_overlapping(void* restrict dest, void const* restrict src, USIZE length);
B32 memory_zero (void* dest, USIZE length);
#define check_nil(nil, p) ((p) == 0 || (p) == nil)
#define set_nil(nil, p) ((p) = nil)
@@ -156,8 +175,14 @@ typedef def_Slice(Byte);
#define slice_fmem(mem) ((Slice_Byte){ mem, size_of(mem) })
#define slice_to_bytes(slice) ((Slice_Byte){cast(Byte*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
void slice__copy(Slice_Byte dest, SSIZE dest_typewidth, Slice_Byte src, SSIZE src_typewidth);
void slice__zero(Slice_Byte mem, SSIZE typewidth);
internal inline void slice__zero(Slice_Byte mem, SSIZE typewidth) { slice_assert(mem); mem_zero(mem.ptr, mem.len); }
internal inline
void slice__copy(Slice_Byte dest, SSIZE dest_typewidth, Slice_Byte src, SSIZE src_typewidth) {
assert(dest.len >= src.len);
slice_assert(dest);
slice_assert(src);
mem_copy(dest.ptr, src.ptr, src.len);
}
#define slice_copy(dest, src) do { \
static_assert(typeof_same(dest, src)); \
slice__copy(slice_to_bytes(dest), size_of_slice_type(dest), slice_to_bytes(src), size_of_slice_type(src)); \
@@ -271,20 +296,20 @@ static_assert(size_of(AllocatorProc_Out) == size_of(AllocatorQueryInfo));
AllocatorQueryInfo allocator_query(AllocatorInfo ainfo);
void mem_free (AllocatorInfo ainfo, Slice_Byte mem);
void mem_reset (AllocatorInfo ainfo);
void mem_rewind (AllocatorInfo ainfo, AllocatorSP save_point);
AllocatorSP mem_save_point(AllocatorInfo ainfo);
internal void mem_free (AllocatorInfo ainfo, Slice_Byte mem);
internal void mem_reset (AllocatorInfo ainfo);
internal void mem_rewind (AllocatorInfo ainfo, AllocatorSP save_point);
internal AllocatorSP mem_save_point(AllocatorInfo ainfo);
typedef def_struct(Opts_mem_alloc) { SSIZE alignment; B32 no_zero; byte_pad(4); };
typedef def_struct(Opts_mem_grow) { SSIZE alignment; B32 no_zero; B32 give_actual; };
typedef def_struct(Opts_mem_shrink) { SSIZE alignment; };
typedef def_struct(Opts_mem_resize) { SSIZE alignment; B32 no_zero; B32 give_actual; };
Slice_Byte mem__alloc (AllocatorInfo ainfo, SSIZE size, Opts_mem_alloc* opts);
Slice_Byte mem__grow (AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_grow* opts);
Slice_Byte mem__resize(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_resize* opts);
Slice_Byte mem__shrink(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_shrink* opts);
internal Slice_Byte mem__alloc (AllocatorInfo ainfo, SSIZE size, Opts_mem_alloc* opts);
internal Slice_Byte mem__grow (AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_grow* opts);
internal Slice_Byte mem__resize(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_resize* opts);
internal Slice_Byte mem__shrink(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_shrink* opts);
#define mem_alloc(ainfo, size, ...) mem__alloc (ainfo, size, opt_args(Opts_mem_alloc, __VA_ARGS__))
#define mem_grow(ainfo, mem, size, ...) mem__grow (ainfo, mem, size, opt_args(Opts_mem_grow, __VA_ARGS__))
@@ -305,12 +330,12 @@ typedef def_struct(FArena) {
SSIZE capacity;
SSIZE used;
};
FArena farena_make (Slice_Byte mem);
void farena_init (FArena* arena, Slice_Byte byte);
Slice_Byte farena__push (FArena* arena, SSIZE amount, SSIZE type_width, Opts_farena* opts);
void farena_reset (FArena* arena);
void farena_rewind(FArena* arena, AllocatorSP save_point);
AllocatorSP farena_save (FArena arena);
internal FArena farena_make (Slice_Byte mem);
internal void farena_init (FArena* arena, Slice_Byte byte);
internal Slice_Byte farena__push (FArena* arena, SSIZE amount, SSIZE type_width, Opts_farena* opts);
internal void farena_reset (FArena* arena);
internal void farena_rewind(FArena* arena, AllocatorSP save_point);
internal AllocatorSP farena_save (FArena arena);
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out);
#define ainfo_farena(arena) (AllocatorInfo){ .proc = farena_allocator_proc, .data = & arena }
@@ -331,12 +356,12 @@ typedef def_struct(Opts_vmem) {
B32 no_large_pages;
byte_pad(4);
};
void os_init(void);
OS_SystemInfo* os_system_info(void);
internal void os_init (void);
internal OS_SystemInfo* os_system_info(void);
inline B32 os__vmem_commit (void* vm, SSIZE size, Opts_vmem* opts);
inline Byte* os__vmem_reserve( SSIZE size, Opts_vmem* opts);
inline void os_vmem_release (void* vm, SSIZE size);
internal inline B32 os__vmem_commit (void* vm, SSIZE size, Opts_vmem* opts);
internal inline Byte* os__vmem_reserve( SSIZE size, Opts_vmem* opts);
internal inline void os_vmem_release (void* vm, SSIZE size);
#define os_vmem_reserve(size, ...) os__vmem_reserve( size, opt_args(Opts_vmem, __VA_ARGS__))
#define os_vmem_commit(vm, size, ...) os__vmem_commit(vm, size, opt_args(Opts_vmem, __VA_ARGS__))
@@ -363,17 +388,17 @@ typedef def_struct(Opts_varena_make) {
VArenaFlags flags;
byte_pad(4);
};
VArena* varena__make(Opts_varena_make* opts);
internal VArena* varena__make(Opts_varena_make* opts);
#define varena_make(...) varena__make(opt_args(Opts_varena_make, __VA_ARGS__))
Slice_Byte varena__push (VArena* arena, SSIZE amount, SSIZE type_width, Opts_varena* opts);
void varena_release(VArena* arena);
void varena_rewind (VArena* arena, AllocatorSP save_point);
void varena_reset (VArena* arena);
Slice_Byte varena__shrink(VArena* arena, Slice_Byte old_allocation, SSIZE requested_size);
AllocatorSP varena_save (VArena* arena);
internal Slice_Byte varena__push (VArena* arena, SSIZE amount, SSIZE type_width, Opts_varena* opts);
internal void varena_release(VArena* arena);
internal void varena_rewind (VArena* arena, AllocatorSP save_point);
internal void varena_reset (VArena* arena);
internal Slice_Byte varena__shrink(VArena* arena, Slice_Byte old_allocation, SSIZE requested_size);
internal AllocatorSP varena_save (VArena* arena);
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out);
internal void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out);
#define ainfo_varena(varena) (AllocatorInfo) { .proc = & varena_allocator_proc, .data = varena }
#define varena_push(arena, type, ...) \
@@ -399,14 +424,14 @@ typedef def_struct(Arena) {
byte_pad(4);
};
typedef Opts_varena_make Opts_arena_make;
Arena* arena__make (Opts_arena_make* opts);
Slice_Byte arena__push (Arena* arena, SSIZE amount, SSIZE type_width, Opts_arena* opts);
void arena_release(Arena* arena);
void arena_reset (Arena* arena);
void arena_rewind (Arena* arena, AllocatorSP save_point);
AllocatorSP arena_save (Arena* arena);
internal Arena* arena__make (Opts_arena_make* opts);
internal Slice_Byte arena__push (Arena* arena, SSIZE amount, SSIZE type_width, Opts_arena* opts);
internal void arena_release(Arena* arena);
internal void arena_reset (Arena* arena);
internal void arena_rewind (Arena* arena, AllocatorSP save_point);
internal AllocatorSP arena_save (Arena* arena);
void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out);
internal void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out);
#define ainfo_arena(arena) (AllocatorInfo){ .proc = & arena_allocator_proc, .data = arena }
#define arena_make(...) arena__make(opt_args(Opts_arena_make, __VA_ARGS__))
@@ -420,7 +445,7 @@ cast(type*, arena__push(arena, 1, size_of(type), opt_args(Opts_arena, lit(string
#pragma region Hashing
typedef def_struct(Opts_hash64_fnv1a) { U64 seed; };
inline
internal inline
void hash64__fnv1a(U64* hash, Slice_Byte data, Opts_hash64_fnv1a* opts) {
local_persist U64 const default_seed = 0xcbf29ce484222325;
assert(opts != nullptr); if (opts->seed == 0) opts->seed = default_seed;
@@ -451,13 +476,13 @@ typedef def_farray(Str8, 2);
typedef def_Slice(A2_Str8);
typedef def_KTL_Slot(Str8);
typedef def_KTL(Str8);
inline
internal inline
void ktl_populate_slice_a2_str8(KTL_Str8* kt, AllocatorInfo backing, Slice_A2_Str8 values) {
assert(kt != nullptr);
if (values.len == 0) return;
* kt = alloc_slice(backing, KTL_Slot_Str8, values.len);
for span_iter(SSIZE, id, 0, <, values.len) {
memory_copy(& kt->ptr[id.cursor].value, & values.ptr[id.cursor][1], size_of(Str8));
mem_copy(& kt->ptr[id.cursor].value, & values.ptr[id.cursor][1], size_of(Str8));
hash64_fnv1a(& kt->ptr[id.cursor].key, slice_to_bytes(values.ptr[id.cursor][0]));
}
}
@@ -515,11 +540,11 @@ typedef def_struct(KT1CX_Info) {
AllocatorInfo backing_table;
AllocatorInfo backing_cells;
};
void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result);
void kt1cx_clear (KT1CX_Byte kt, KT1CX_ByteMeta meta);
U64 kt1cx_slot_id(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta);
Byte* kt1cx_get (KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta);
Byte* kt1cx_set (KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_cells, KT1CX_ByteMeta meta);
internal void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result);
internal void kt1cx_clear (KT1CX_Byte kt, KT1CX_ByteMeta meta);
internal U64 kt1cx_slot_id(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta);
internal Byte* kt1cx_get (KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta);
internal Byte* kt1cx_set (KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_cells, KT1CX_ByteMeta meta);
#define kt1cx_assert(kt) do { \
slice_assert(kt.table); \
@@ -528,20 +553,20 @@ Byte* kt1cx_set (KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo bac
#pragma endregion KT1CX
#pragma region String Operations
inline B32 char_is_upper(U8 c) { return('A' <= c && c <= 'Z'); }
inline U8 char_to_lower(U8 c) { if (char_is_upper(c)) { c += ('a' - 'A'); } return(c); }
inline U8 integer_symbols(U8 value) {
internal inline B32 char_is_upper(U8 c) { return('A' <= c && c <= 'Z'); }
internal inline U8 char_to_lower(U8 c) { if (char_is_upper(c)) { c += ('a' - 'A'); } return(c); }
internal inline U8 integer_symbols(U8 value) {
local_persist U8 lookup_table[16] = { '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F', }; return lookup_table[value];
}
char* str8_to_cstr_capped(Str8 content, Slice_Byte mem);
Str8 str8_from_u32(AllocatorInfo ainfo, U32 num, U32 radix, U8 min_digits, U8 digit_group_separator);
internal char* str8_to_cstr_capped(Str8 content, Slice_Byte mem);
internal Str8 str8_from_u32(AllocatorInfo ainfo, U32 num, U32 radix, U8 min_digits, U8 digit_group_separator);
Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8* entries);
internal Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8* entries);
#define str8_fmt_backed(tbl_backing, buf_backing, fmt_template, ...) \
str8__fmt_backed(tbl_backing, buf_backing, lit(fmt_template), slice_arg_from_array(A2_Str8, __VA_ARGS__))
Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8* entries);
internal Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8* entries);
#define str8_fmt(fmt_template, ...) str8__fmt(lit(fmt_template), slice_arg_from_array(A2_Str8, __VA_ARGS__))
#define Str8Cache_CELL_DEPTH 4
@@ -564,17 +589,17 @@ typedef def_struct(Opts_str8cache_init) {
SSIZE cell_pool_size;
SSIZE table_size;
};
void str8cache__init(Str8Cache* cache, Opts_str8cache_init* opts);
Str8Cache str8cache__make( Opts_str8cache_init* opts);
internal void str8cache__init(Str8Cache* cache, Opts_str8cache_init* opts);
internal Str8Cache str8cache__make( Opts_str8cache_init* opts);
#define str8cache_init(cache, ...) str8cache__init(cache, opt_args(Opts_str8cache_init, __VA_ARGS__))
#define str8cache_make(...) str8cache__make( opt_args(Opts_str8cache_init, __VA_ARGS__))
void str8cache_clear(KT1CX_Str8 kt);
Str8* str8cache_get (KT1CX_Str8 kt, U64 key);
Str8* str8cache_set (KT1CX_Str8 kt, U64 key, Str8 value, AllocatorInfo str_reserve, AllocatorInfo backing_cells);
internal void str8cache_clear(KT1CX_Str8 kt);
internal Str8* str8cache_get (KT1CX_Str8 kt, U64 key);
internal Str8* str8cache_set (KT1CX_Str8 kt, U64 key, Str8 value, AllocatorInfo str_reserve, AllocatorInfo backing_cells);
Str8 cache_str8(Str8Cache* cache, Str8 str);
internal Str8 cache_str8(Str8Cache* cache, Str8 str);
typedef def_struct(Str8Gen) {
AllocatorInfo backing;
@@ -582,15 +607,15 @@ typedef def_struct(Str8Gen) {
SSIZE len;
SSIZE cap;
};
void str8gen_init(Str8Gen* gen, AllocatorInfo backing);
Str8Gen str8gen_make( AllocatorInfo backing);
internal void str8gen_init(Str8Gen* gen, AllocatorInfo backing);
internal Str8Gen str8gen_make( AllocatorInfo backing);
#define str8gen_slice_byte(gen) (Slice_Byte){ cast(Byte*, (gen).ptr), (gen).cap }
inline Str8 str8_from_str8gen(Str8Gen gen) { return (Str8){gen.ptr, gen.len}; }
internal inline Str8 str8_from_str8gen(Str8Gen gen) { return (Str8){gen.ptr, gen.len}; }
void str8gen_append_str8(Str8Gen* gen, Str8 str);
void str8gen__append_fmt(Str8Gen* gen, Str8 fmt_template, Slice_A2_Str8* tokens);
internal void str8gen_append_str8(Str8Gen* gen, Str8 str);
internal void str8gen__append_fmt(Str8Gen* gen, Str8 fmt_template, Slice_A2_Str8* tokens);
#define str8gen_append_fmt(gen, fmt_template, ...) str8gen__append_fmt(gen, lit(fmt_template), slice_arg_from_array(A2_Str8, __VA_ARGS__))
#pragma endregion String Operations
@@ -604,10 +629,10 @@ typedef def_struct(Opts_file_read_contents) {
B32 zero_backing;
byte_pad(4);
};
void api_file_read_contents(FileOpInfo* result, Str8 path, Opts_file_read_contents opts);
void file_write_str8 (Str8 path, Str8 content);
internal void api_file_read_contents(FileOpInfo* result, Str8 path, Opts_file_read_contents opts);
internal void file_write_str8 (Str8 path, Str8 content);
FileOpInfo file__read_contents(Str8 path, Opts_file_read_contents* opts);
internal FileOpInfo file__read_contents(Str8 path, Opts_file_read_contents* opts);
#define file_read_contents(path, ...) file__read_contents(path, opt_args(Opts_file_read_contents, __VA_ARGS__))
#pragma endregion File System
@@ -648,8 +673,8 @@ typedef def_struct(Opts_watl_lex) {
B8 failon_slice_constraint_fail;
byte_pad(5);
};
void api_watl_lex(WATL_LexInfo* info, Str8 source, Opts_watl_lex* opts);
WATL_LexInfo watl__lex ( Str8 source, Opts_watl_lex* opts);
internal void api_watl_lex(WATL_LexInfo* info, Str8 source, Opts_watl_lex* opts);
internal WATL_LexInfo watl__lex ( Str8 source, Opts_watl_lex* opts);
#define watl_lex(source, ...) watl__lex(source, opt_args(Opts_watl_lex, __VA_ARGS__))
typedef Str8 WATL_Node;
@@ -680,87 +705,47 @@ typedef def_struct(Opts_watl_parse) {
B32 failon_slice_constraint_fail;
byte_pad(4);
};
void api_watl_parse(WATL_ParseInfo* info, Slice_WATL_Tok tokens, Opts_watl_parse* opts);
WATL_ParseInfo watl__parse ( Slice_WATL_Tok tokens, Opts_watl_parse* opts);
internal void api_watl_parse(WATL_ParseInfo* info, Slice_WATL_Tok tokens, Opts_watl_parse* opts);
internal WATL_ParseInfo watl__parse ( Slice_WATL_Tok tokens, Opts_watl_parse* opts);
#define watl_parse(tokens, ...) watl__parse(tokens, opt_args(Opts_watl_parse, __VA_ARGS__))
Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines);
internal Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines);
#pragma endregion WATL
#pragma endregion Header
#pragma region Implementation
#pragma region Memory Operations
// #include <memory.h>
void* __cdecl memcpy (void* _Dst, void const* _Src, USIZE _Size);
void* __cdecl memmove(void* _Dst, void const* _Src, USIZE _Size);
void* __cdecl memset (void* _Dst, int _Val, USIZE _Size);
inline
SSIZE align_pow2(SSIZE x, SSIZE b) {
assert(b != 0);
assert((b & (b - 1)) == 0); // Check power of 2
return ((x + b - 1) & (~(b - 1)));
}
inline
void* memory_copy(void* restrict dest, void const* restrict src, USIZE length) {
if (dest == nullptr || src == nullptr) { return nullptr; }
memcpy(dest, src, length);
return dest;
}
inline
void* memory_copy_overlapping(void* restrict dest, void const* restrict src, USIZE length) {
if (dest == nullptr || src == nullptr) { return nullptr; }
memmove(dest, src, length);
return dest;
}
inline
B32 memory_zero(void* dest, USIZE length) {
if (dest == nullptr) return false;
memset((unsigned char*)dest, 0, length);
return true;
}
inline void slice__zero(Slice_Byte mem, SSIZE typewidth) { slice_assert(mem); memory_zero(mem.ptr, mem.len); }
inline
void slice__copy(Slice_Byte dest, SSIZE dest_typewidth, Slice_Byte src, SSIZE src_typewidth) {
assert(dest.len >= src.len);
slice_assert(dest);
slice_assert(src);
memory_copy(dest.ptr, src.ptr, src.len);
}
#pragma endregion Memory Operations
#pragma region Allocator Interface
inline
internal inline
AllocatorQueryInfo allocator_query(AllocatorInfo ainfo) {
assert(ainfo.proc != nullptr);
AllocatorQueryInfo out; ainfo.proc((AllocatorProc_In){ .data = ainfo.data, .op = AllocatorOp_Query}, (AllocatorProc_Out*)& out);
return out;
}
inline
internal inline
void mem_free(AllocatorInfo ainfo, Slice_Byte mem) {
assert(ainfo.proc != nullptr);
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_Free, .old_allocation = mem}, &(AllocatorProc_Out){});
}
inline
internal inline
void mem_reset(AllocatorInfo ainfo) {
assert(ainfo.proc != nullptr);
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_Reset}, &(AllocatorProc_Out){});
}
inline
internal inline
void mem_rewind(AllocatorInfo ainfo, AllocatorSP save_point) {
assert(ainfo.proc != nullptr);
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_Rewind, .save_point = save_point}, &(AllocatorProc_Out){});
}
inline
internal inline
AllocatorSP mem_save_point(AllocatorInfo ainfo) {
assert(ainfo.proc != nullptr);
AllocatorProc_Out out;
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_SavePoint}, & out);
return out.save_point;
}
inline
internal inline
Slice_Byte mem__alloc(AllocatorInfo ainfo, SSIZE size, Opts_mem_alloc* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
@@ -774,7 +759,7 @@ Slice_Byte mem__alloc(AllocatorInfo ainfo, SSIZE size, Opts_mem_alloc* opts) {
ainfo.proc(in, & out);
return out.allocation;
}
inline
internal inline
Slice_Byte mem__grow(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_grow* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
@@ -789,7 +774,7 @@ Slice_Byte mem__grow(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_g
ainfo.proc(in, & out);
return (Slice_Byte){out.allocation.ptr, opts->give_actual ? out.allocation.len : in.requested_size };
}
inline
internal inline
Slice_Byte mem__resize(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_resize* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
@@ -804,7 +789,7 @@ Slice_Byte mem__resize(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem
ainfo.proc(in, & out);
return (Slice_Byte){out.allocation.ptr, opts->give_actual ? out.allocation.len : in.requested_size };
}
inline
internal inline
Slice_Byte mem__shrink(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_shrink* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
@@ -822,15 +807,15 @@ Slice_Byte mem__shrink(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem
#pragma endregion Allocator Interface
#pragma region FArena (Fixed-Sized Arena)
inline
internal inline
void farena_init(FArena* arena, Slice_Byte mem) {
assert(arena != nullptr);
arena->start = mem.ptr;
arena->capacity = mem.len;
arena->used = 0;
}
inline FArena farena_make(Slice_Byte mem) { FArena a; farena_init(& a, mem); return a; }
inline
internal inline FArena farena_make(Slice_Byte mem) { FArena a; farena_init(& a, mem); return a; }
internal inline
Slice_Byte farena__push(FArena* arena, SSIZE amount, SSIZE type_width, Opts_farena* opts) {
assert(opts != nullptr);
if (amount == 0) {
@@ -843,7 +828,7 @@ Slice_Byte farena__push(FArena* arena, SSIZE amount, SSIZE type_width, Opts_fare
arena->used += to_commit;
return (Slice_Byte){ptr, desired};
}
inline
internal inline
Slice_Byte farena__grow(FArena* arena, SSIZE requested_size, Slice_Byte old_allocation, SSIZE alignment, B32 should_zero) {
// Check if the allocation is at the end of the arena
Byte* alloc_end = old_allocation.ptr + old_allocation.len;
@@ -861,10 +846,10 @@ Slice_Byte farena__grow(FArena* arena, SSIZE requested_size, Slice_Byte old_allo
return (Slice_Byte){0};
}
arena->used += aligned_grow;
memory_zero(old_allocation.ptr + old_allocation.len, grow_amount * cast(SSIZE, should_zero));
mem_zero(old_allocation.ptr + old_allocation.len, grow_amount * cast(SSIZE, should_zero));
return (Slice_Byte){old_allocation.ptr, requested_size};
}
inline
internal inline
Slice_Byte farena__shrink(FArena* arena, Slice_Byte old_allocation, SSIZE requested_size, SSIZE alignment) {
// Check if the allocation is at the end of the arena
Byte* alloc_end = old_allocation.ptr + old_allocation.len;
@@ -880,17 +865,18 @@ Slice_Byte farena__shrink(FArena* arena, Slice_Byte old_allocation, SSIZE reques
arena->used -= (aligned_original - aligned_new);
return (Slice_Byte){old_allocation.ptr, requested_size};
}
inline void farena_reset(FArena* arena) { arena->used = 0; }
inline
internal inline void farena_reset(FArena* arena) { arena->used = 0; }
internal inline
void farena_rewind(FArena* arena, AllocatorSP save_point) {
assert(save_point.type_sig == & farena_allocator_proc);
Byte* end = cast(Byte*, cast(SSIZE, arena->start) + arena->used); assert_bounds(save_point.slot, arena->start, end);
arena->used -= save_point.slot - cast(SSIZE, arena->start);
}
inline
internal inline
AllocatorSP farena_save (FArena arena) {
return (AllocatorSP){ .type_sig = & farena_allocator_proc, .slot = cast(SSIZE, arena.used) };
}
internal
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
{
assert(out != nullptr);
@@ -901,7 +887,7 @@ void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
case AllocatorOp_Alloc:
case AllocatorOp_Alloc_NoZero:
out->allocation = farena__push(arena, in.requested_size, 1, &(Opts_farena){.type_name = lit("Byte"), .alignment = in.alignment});
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
mem_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
break;
case AllocatorOp_Free: break;
@@ -944,6 +930,7 @@ void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
#define MS_ANYSIZE_ARRAY 1
#define MS_MEM_COMMIT 0x00001000
#define MS_MEM_RESERVE 0x00002000
#define MS_MEM_RELEASE 0x00002000
#define MS_MEM_LARGE_PAGES 0x20000000
#define MS_PAGE_READWRITE 0x04
#define MS_TOKEN_ADJUST_PRIVILEGES (0x0020)
@@ -992,11 +979,11 @@ typedef def_struct(OS_Windows_State) {
};
global OS_Windows_State os__windows_info;
inline
internal inline
OS_SystemInfo* os_system_info(void) {
return & os__windows_info.system_info;
}
inline
internal inline
void os__enable_large_pages(void) {
MS_HANDLE token;
if (OpenProcessToken(GetCurrentProcess(), MS_TOKEN_ADJUST_PRIVILEGES | MS_TOKEN_QUERY, &token))
@@ -1013,14 +1000,15 @@ void os__enable_large_pages(void) {
CloseHandle(token);
}
}
inline
internal inline
void os_init(void) {
os__enable_large_pages();
OS_SystemInfo* info = & os__windows_info.system_info;
info->target_page_size = (SSIZE)GetLargePageMinimum();
}
// TODO(Ed): Large pages disabled for now... (not failing gracefully)
inline Byte* os__vmem_reserve(SSIZE size, Opts_vmem* opts) {
internal inline
Byte* os__vmem_reserve(SSIZE size, Opts_vmem* opts) {
assert(opts != nullptr);
void* result = VirtualAlloc(cast(void*, opts->base_addr), size
, MS_MEM_RESERVE
@@ -1029,18 +1017,18 @@ inline Byte* os__vmem_reserve(SSIZE size, Opts_vmem* opts) {
);
return result;
}
inline B32 os__vmem_commit(void* vm, SSIZE size, Opts_vmem* opts) {
internal inline B32 os__vmem_commit(void* vm, SSIZE size, Opts_vmem* opts) {
assert(opts != nullptr);
// if (opts->no_large_pages == false ) { return 1; }
B32 result = (VirtualAlloc(vm, size, MS_MEM_COMMIT, MS_PAGE_READWRITE) != 0);
return result;
}
inline void os_vmem_release(void* vm, SSIZE size) { VirtualFree(vm, 0, MS_MEM_RESERVE); }
internal inline void os_vmem_release(void* vm, SSIZE size) { VirtualFree(vm, 0, MS_MEM_RESERVE); }
#pragma endregion OS
#pragma region VArena (Virutal Address Space Arena)
finline SSIZE varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
inline
internal finline SSIZE varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
internal inline
VArena* varena__make(Opts_varena_make* opts) {
assert(opts != nullptr);
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); }
@@ -1062,7 +1050,7 @@ VArena* varena__make(Opts_varena_make* opts) {
};
return vm;
}
inline
internal inline
Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena* opts) {
assert(vm != nullptr);
assert(amount != 0);
@@ -1085,21 +1073,21 @@ Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena*
vm->committed += next_commit_size;
}
}
vm->commit_used = to_be_used;
SSIZE current_offset = vm->reserve_start + vm->commit_used;
vm->commit_used = to_be_used;
return (Slice_Byte){.ptr = cast(Byte*, current_offset), .len = requested_size};
}
inline
internal inline
Slice_Byte varena__grow(VArena* vm, SSIZE requested_size, Slice_Byte old_allocation, SSIZE alignment, B32 should_zero) {
assert(vm != nullptr);
SSIZE grow_amount = requested_size - old_allocation.len;
if (grow_amount == 0) { return old_allocation; } // Growing when not the last allocation not allowed
SSIZE current_offset = vm->reserve_start + vm->commit_used; assert(old_allocation.ptr == cast(Byte*, current_offset));
Slice_Byte allocation = varena_push_array(vm, Byte, grow_amount, alignment); assert(allocation.ptr != nullptr);
memory_zero(allocation.ptr, allocation.len * should_zero);
mem_zero(allocation.ptr, allocation.len * should_zero);
return (Slice_Byte){ old_allocation.ptr, old_allocation.len + allocation.len };
}
inline Slice_Byte varena__shrink(VArena* vm, Slice_Byte old_allocation, SSIZE requested_size) {
internal inline Slice_Byte varena__shrink(VArena* vm, Slice_Byte old_allocation, SSIZE requested_size) {
SSIZE current_offset = vm->reserve_start + vm->commit_used;
SSIZE shrink_amount = old_allocation.len - requested_size;
if (shrink_amount < 0) { return old_allocation; }
@@ -1107,14 +1095,15 @@ inline Slice_Byte varena__shrink(VArena* vm, Slice_Byte old_allocation, SSIZE re
vm->commit_used -= shrink_amount;
return (Slice_Byte){ old_allocation.ptr, requested_size };
}
inline void varena_release(VArena* arena) { os_vmem_release(arena, arena->reserve); }
inline
internal inline void varena_release(VArena* arena) { os_vmem_release(arena, arena->reserve); }
internal inline
void varena_rewind(VArena* vm, AllocatorSP sp) {
assert(vm != nullptr);
assert(sp.type_sig == & varena_allocator_proc);
vm->commit_used = max(sp.slot, sizeof(VArena));
}
inline AllocatorSP varena_save(VArena* vm) { return (AllocatorSP){varena_allocator_proc, vm->commit_used}; }
internal inline AllocatorSP varena_save(VArena* vm) { return (AllocatorSP){varena_allocator_proc, vm->commit_used}; }
internal
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
{
VArena* vm = cast(VArena*, in.data);
@@ -1123,7 +1112,7 @@ void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
case AllocatorOp_Alloc:
case AllocatorOp_Alloc_NoZero:
out->allocation = varena_push_array(vm, Byte, in.requested_size, .alignment = in.alignment);
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
mem_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
break;
case AllocatorOp_Free: break;
@@ -1157,7 +1146,7 @@ void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
#pragma endregion VArena
#pragma region Arena (Chained Arena)
inline
internal inline
Arena* arena__make(Opts_arena_make* opts) {
assert(opts != nullptr);
SSIZE header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
@@ -1172,6 +1161,7 @@ Arena* arena__make(Opts_arena_make* opts) {
};
return arena;
}
internal inline
Slice_Byte arena__push(Arena* arena, SSIZE amount, SSIZE type_width, Opts_arena* opts) {
assert(arena != nullptr);
assert(opts != nullptr);
@@ -1203,6 +1193,44 @@ Slice_Byte arena__push(Arena* arena, SSIZE amount, SSIZE type_width, Opts_arena*
active->pos = pos_pst;
return vresult;
}
internal inline
Slice_Byte arena__grow(Arena* arena, Slice_Byte old_allocation, SSIZE requested_size, SSIZE alignment, B32 should_zero) {
Arena* active = arena->current;
Byte* alloc_end = old_allocation.ptr + old_allocation.len;
Byte* arena_end = cast(Byte*, active) + active->pos;
if (alloc_end == arena_end)
{
SSIZE grow_amount = requested_size - old_allocation.len;
SSIZE aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
if (active->pos + aligned_grow <= active->backing->reserve) {
Slice_Byte vresult = varena_push_array(active->backing, Byte, aligned_grow, .alignment = alignment);
if (vresult.ptr != nullptr) {
active->pos += aligned_grow;
mem_zero(old_allocation.ptr + old_allocation.len, grow_amount * (SSIZE)should_zero);
return (Slice_Byte){old_allocation.ptr, old_allocation.len + vresult.len};
}
}
}
Slice_Byte new_alloc = arena__push(arena, requested_size, 1, &(Opts_arena){.alignment = alignment});
if (new_alloc.ptr == nullptr) { return (Slice_Byte){0}; }
mem_copy(new_alloc.ptr, old_allocation.ptr, old_allocation.len);
mem_zero(new_alloc.ptr + old_allocation.len, (new_alloc.len - old_allocation.len) * (SSIZE)should_zero);
return new_alloc;
}
internal inline
Slice_Byte arena__shrink(Arena* arena, Slice_Byte old_allocation, SSIZE requested_size, SSIZE alignment) {
Arena* active = arena->current;
Byte* alloc_end = old_allocation.ptr + old_allocation.len;
Byte* arena_end = cast(Byte*, active) + active->pos;
if (alloc_end != arena_end) {
return (Slice_Byte){old_allocation.ptr, requested_size};
}
SSIZE aligned_original = align_pow2(old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
SSIZE aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
SSIZE pos_reduction = aligned_original - aligned_new;
active->pos -= pos_reduction;
return varena__shrink(active->backing, old_allocation, requested_size);
}
inline
void arena_release(Arena* arena) {
assert(arena != nullptr);
@@ -1213,7 +1241,8 @@ void arena_release(Arena* arena) {
varena_release(curr->backing);
}
}
inline void arena_reset(Arena* arena) { arena_rewind(arena, (AllocatorSP){.type_sig = arena_allocator_proc, .slot = 0}); }
internal inline void arena_reset(Arena* arena) { arena_rewind(arena, (AllocatorSP){.type_sig = arena_allocator_proc, .slot = 0}); }
internal inline
void arena_rewind(Arena* arena, AllocatorSP save_point) {
assert(arena != nullptr);
assert(save_point.type_sig == arena_allocator_proc);
@@ -1230,7 +1259,8 @@ void arena_rewind(Arena* arena, AllocatorSP save_point) {
curr->pos = new_pos;
varena_rewind(curr->backing, (AllocatorSP){varena_allocator_proc, curr->pos + sizeof(VArena)});
}
inline AllocatorSP arena_save(Arena* arena) { return (AllocatorSP){arena_allocator_proc, arena->base_pos + arena->current->pos}; };
internal inline AllocatorSP arena_save(Arena* arena) { return (AllocatorSP){arena_allocator_proc, arena->base_pos + arena->current->pos}; };
internal
void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
{
assert(out != nullptr);
@@ -1241,59 +1271,18 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
case AllocatorOp_Alloc:
case AllocatorOp_Alloc_NoZero:
out->allocation = arena_push_array(arena, Byte, in.requested_size, .alignment = in.alignment);
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
mem_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
break;
case AllocatorOp_Free: break;
case AllocatorOp_Reset: arena_reset(arena); break;
case AllocatorOp_Grow:
case AllocatorOp_Grow_NoZero: {
Arena* active = arena->current;
Byte* alloc_end = in.old_allocation.ptr + in.old_allocation.len;
Byte* arena_end = cast(Byte*, active) + active->pos;
if (alloc_end == arena_end)
{
SSIZE grow_amount = in.requested_size - in.old_allocation.len;
SSIZE aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
if (active->pos + aligned_grow <= active->backing->reserve)
{
Slice_Byte vresult = varena_push_array(active->backing, Byte, aligned_grow, .alignment = in.alignment);
if (vresult.ptr != nullptr)
{
active->pos += aligned_grow;
out->allocation = (Slice_Byte){in.old_allocation.ptr, in.requested_size};
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * (cast(SSIZE, in.op) - AllocatorOp_Grow_NoZero));
case AllocatorOp_Grow_NoZero:
out->allocation = arena__grow(arena, in.old_allocation, in.requested_size, in.alignment, (cast(SSIZE, in.op) - AllocatorOp_Grow_NoZero));
break;
}
}
}
Slice_Byte new_alloc = arena__push(arena, in.requested_size, 1, &(Opts_arena){.alignment = in.alignment});
if (new_alloc.ptr == nullptr) {
out->allocation = (Slice_Byte){0};
break;
}
memory_copy(new_alloc.ptr, in.old_allocation.ptr, in.old_allocation.len);
memory_zero(new_alloc.ptr + in.old_allocation.len, (in.requested_size - in.old_allocation.len) * (cast(SSIZE, in.op) - AllocatorOp_Grow_NoZero) );
out->allocation = new_alloc;
}
break;
case AllocatorOp_Shrink: {
Arena* active = arena->current;
Byte* alloc_end = in.old_allocation.ptr + in.old_allocation.len;
Byte* arena_end = cast(Byte*, active) + active->pos;
if (alloc_end != arena_end) {
out->allocation = (Slice_Byte){in.old_allocation.ptr, in.requested_size};
break;
}
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
SSIZE aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
SSIZE aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
SSIZE pos_reduction = aligned_original - aligned_new;
active->pos -= pos_reduction;
out->allocation = varena__shrink(active->backing, in.old_allocation, in.requested_size);
}
case AllocatorOp_Shrink:
out->allocation = arena__shrink(arena, in.old_allocation, in.requested_size, in.alignment);
break;
case AllocatorOp_Rewind: arena_rewind(arena, in.save_point); break;
@@ -1316,7 +1305,7 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
#pragma endregion Arena
#pragma region Key Table 1-Layer Chained-Chunked_Cells (KT1CX)
inline
internal inline
void kt1cx_init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result) {
assert(result != nullptr);
assert(info.backing_cells.proc != nullptr);
@@ -1328,6 +1317,7 @@ void kt1cx_init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result) {
result->table = mem_alloc(info.backing_table, m.table_size * m.cell_size); slice_assert(result->table);
result->table.len = m.table_size; // Setting to the table number of elements instead of byte length.
}
internal inline
void kt1cx_clear(KT1CX_Byte kt, KT1CX_ByteMeta m) {
Byte* cell_cursor = kt.table.ptr;
SSIZE table_len = kt.table.len * m.cell_size;
@@ -1348,11 +1338,12 @@ void kt1cx_clear(KT1CX_Byte kt, KT1CX_ByteMeta m) {
}
}
}
inline
internal inline
U64 kt1cx_slot_id(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta m) {
U64 hash_index = key % cast(U64, kt.table.len);
return hash_index;
}
internal inline
Byte* kt1cx__get(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta m) {
U64 hash_index = kt1cx_slot_id(kt, key, m);
SSIZE cell_offset = hash_index * m.cell_size;
@@ -1379,7 +1370,7 @@ Byte* kt1cx__get(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta m) {
}
}
}
inline
internal
Byte* kt1cx_set(KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_cells, KT1CX_ByteMeta m) {
U64 hash_index = kt1cx_slot_id(kt, key, m);
SSIZE cell_offset = hash_index * m.cell_size;
@@ -1421,13 +1412,14 @@ Byte* kt1cx_set(KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_
#pragma endregion Key Table
#pragma region String Operations
inline
internal inline
char* str8_to_cstr_capped(Str8 content, Slice_Byte mem) {
SSIZE copy_len = min(content.len, mem.len - 1);
memory_copy(mem.ptr, content.ptr, copy_len);
mem_copy(mem.ptr, content.ptr, copy_len);
mem.ptr[copy_len] = '\0';
return cast(char*, mem.ptr);
}
internal
Str8 str8_from_u32(AllocatorInfo ainfo, U32 num, U32 radix, U8 min_digits, U8 digit_group_separator)
{
Str8 result = {0};
@@ -1503,6 +1495,7 @@ Str8 str8_from_u32(AllocatorInfo ainfo, U32 num, U32 radix, U8 min_digits, U8 di
}
return result;
}
internal
Str8 str8__fmt_ktl(AllocatorInfo ainfo, Slice_Byte* _buffer, KTL_Str8 table, Str8 fmt_template)
{
assert(_buffer != nullptr);
@@ -1523,7 +1516,7 @@ Str8 str8__fmt_ktl(AllocatorInfo ainfo, Slice_Byte* _buffer, KTL_Str8 table, Str
while (cursor_fmt[copy_offset] != cast(UTF8, '<') && (cursor_fmt + copy_offset) < slice_end(fmt_template)) {
++ copy_offset;
}
memory_copy(cursor_buffer, cursor_fmt, copy_offset);
mem_copy(cursor_buffer, cursor_fmt, copy_offset);
buffer_remaining -= copy_offset;
left_fmt -= copy_offset;
cursor_buffer += copy_offset;
@@ -1560,7 +1553,7 @@ Str8 str8__fmt_ktl(AllocatorInfo ainfo, Slice_Byte* _buffer, KTL_Str8 table, Str
buffer_remaining += potential_token_len;
}
assert((buffer_remaining - potential_token_len) > 0);
memory_copy(cursor_buffer, value->ptr, value->len);
mem_copy(cursor_buffer, value->ptr, value->len);
// Sync cursor format to after the processed token
cursor_buffer += value->len;
buffer_remaining -= value->len;
@@ -1581,7 +1574,7 @@ Str8 str8__fmt_ktl(AllocatorInfo ainfo, Slice_Byte* _buffer, KTL_Str8 table, Str
Str8 result = {buffer.ptr, buffer.len - buffer_remaining};
return result;
}
inline
internal inline
Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8* entries) {
KTL_Str8 kt; ktl_populate_slice_a2_str8(& kt, tbl_backing, *entries );
SSIZE buf_size = kilo(64);
@@ -1589,6 +1582,7 @@ Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8
Str8 result = str8__fmt_ktl(buf_backing, & buffer, kt, fmt_template);
return result;
}
internal inline
Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8* entries) {
local_persist Byte tbl_mem[kilo(32)]; FArena tbl_arena = farena_make(slice_fmem(tbl_mem));
local_persist Byte buf_mem[kilo(64)];
@@ -1596,7 +1590,7 @@ Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8* entries) {
Str8 result = str8__fmt_ktl((AllocatorInfo){0}, & slice_fmem(buf_mem), kt, fmt_template);
return result;
}
inline
internal inline
void str8cache__init(Str8Cache* cache, Opts_str8cache_init* opts) {
assert(cache != nullptr);
assert(opts != nullptr);
@@ -1626,8 +1620,8 @@ void str8cache__init(Str8Cache* cache, Opts_str8cache_init* opts) {
kt1cx_init(info, m, cast(KT1CX_Byte*, & cache->kt));
return;
}
inline Str8Cache str8cache__make(Opts_str8cache_init* opts) { Str8Cache cache; str8cache__init(& cache, opts); return cache; }
inline
internal inline Str8Cache str8cache__make(Opts_str8cache_init* opts) { Str8Cache cache; str8cache__init(& cache, opts); return cache; }
internal inline
void str8cache_clear(KT1CX_Str8 kt) {
kt1cx_assert(kt);
kt1cx_clear(kt1cx_byte(kt), (KT1CX_ByteMeta){
@@ -1640,7 +1634,7 @@ void str8cache_clear(KT1CX_Str8 kt) {
.type_name = lit(stringify(Str8))
});
}
inline
internal inline
Str8* str8cache_get(KT1CX_Str8 kt, U64 key) {
kt1cx_assert(kt);
Byte* result = kt1cx__get(kt1cx_byte(kt), key
@@ -1655,7 +1649,7 @@ Str8* str8cache_get(KT1CX_Str8 kt, U64 key) {
});
return cast(Str8*, result);
}
inline
internal inline
Str8* str8cache_set(KT1CX_Str8 kt, U64 key, Str8 value, AllocatorInfo str_reserve, AllocatorInfo backing_cells) {
kt1cx_assert(kt);
slice_assert(value);
@@ -1679,14 +1673,14 @@ Str8* str8cache_set(KT1CX_Str8 kt, U64 key, Str8 value, AllocatorInfo str_reserv
}
return result;
}
inline
internal inline
Str8 cache_str8(Str8Cache* cache, Str8 str) {
assert(cache != nullptr);
U64 key = 0; hash64_fnv1a(& key, slice_to_bytes(str));
Str8* result = str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve);
return * result;
}
inline
internal inline
void str8gen_init(Str8Gen* gen, AllocatorInfo backing) {
assert(gen != nullptr);
gen->backing = backing;
@@ -1695,8 +1689,8 @@ void str8gen_init(Str8Gen* gen, AllocatorInfo backing) {
gen->len = 0;
gen->cap = kilo(4);
}
inline Str8Gen str8gen_make(AllocatorInfo backing) { Str8Gen gen; str8gen_init(& gen, backing); return gen; }
inline
internal inline Str8Gen str8gen_make(AllocatorInfo backing) { Str8Gen gen; str8gen_init(& gen, backing); return gen; }
internal inline
void str8gen_append_str8(Str8Gen* gen, Str8 str){
Slice_Byte result = mem_grow(gen->backing, str8gen_slice_byte(* gen), str.len + gen->len);
slice_assert(result);
@@ -1706,6 +1700,7 @@ void str8gen_append_str8(Str8Gen* gen, Str8 str){
gen->len += str.len;
gen->cap = result.len;
}
internal inline
void str8gen__append_fmt(Str8Gen* gen, Str8 fmt_template, Slice_A2_Str8* entries){
local_persist Byte tbl_mem[kilo(32)]; FArena tbl_arena = farena_make(slice_fmem(tbl_mem));
KTL_Str8 kt = {0}; ktl_populate_slice_a2_str8(& kt, ainfo_farena(tbl_arena), *entries );
@@ -1755,12 +1750,13 @@ _declspec(dllimport) MS_BOOL __stdcall WriteFile(
__declspec(dllimport) MS_BOOL __stdcall GetFileSizeEx(MS_HANDLE hFile, MS_LARGE_INTEGER* lpFileSize);
__declspec(dllimport) MS_DWORD __stdcall GetLastError(void);
inline
internal inline
FileOpInfo file__read_contents(Str8 path, Opts_file_read_contents* opts) {
assert(opts != nullptr);
FileOpInfo result = {0}; api_file_read_contents(& result, path, * opts);
return result;
}
internal
void api_file_read_contents(FileOpInfo* result, Str8 path, Opts_file_read_contents opts)
{
assert(result != nullptr);
@@ -1820,6 +1816,7 @@ void api_file_read_contents(FileOpInfo* result, Str8 path, Opts_file_read_conten
result->content.len = file_size.QuadPart;
return;
}
internal inline
void file_write_str8(Str8 path, Str8 content)
{
slice_assert(path);
@@ -1885,7 +1882,7 @@ int __cdecl __stdio_common_vfprintf_s(
va_list _ArgList
);
void __cdecl __va_start(va_list* , ...);
inline
internal inline
int printf_err(char const* fmt, ...) {
int result;
va_list args;
@@ -1894,6 +1891,7 @@ int printf_err(char const* fmt, ...) {
va_end(args);
return result;
}
internal inline
void assert_handler( char const* condition, char const* file, char const* function, S32 line, char const* msg, ... ) {
printf_err( "%s - %s:(%d): Assert Failure: ", file, function, line );
if ( condition )
@@ -1910,6 +1908,7 @@ void assert_handler( char const* condition, char const* file, char const* functi
#pragma endregion Debug
#pragma region WATL
internal
void api_watl_lex(WATL_LexInfo* info, Str8 source, Opts_watl_lex* opts)
{
if (source.len == 0) { return; }
@@ -1998,8 +1997,8 @@ slice_constraint_fail:
assert(opts->failon_slice_constraint_fail == false);
return;
}
inline WATL_LexInfo watl__lex(Str8 source, Opts_watl_lex* opts) { WATL_LexInfo info = {0}; api_watl_lex(& info, source, opts); return info; }
internal inline WATL_LexInfo watl__lex(Str8 source, Opts_watl_lex* opts) { WATL_LexInfo info = {0}; api_watl_lex(& info, source, opts); return info; }
internal
void api_watl_parse(WATL_ParseInfo* info, Slice_WATL_Tok tokens, Opts_watl_parse* opts)
{
if (tokens.len == 0) { return; }
@@ -2060,8 +2059,8 @@ void api_watl_parse(WATL_ParseInfo* info, Slice_WATL_Tok tokens, Opts_watl_parse
}
return;
}
inline WATL_ParseInfo watl__parse(Slice_WATL_Tok tokens, Opts_watl_parse* opts) { WATL_ParseInfo info = {0}; api_watl_parse(& info, tokens, opts); return info; }
internal inline WATL_ParseInfo watl__parse(Slice_WATL_Tok tokens, Opts_watl_parse* opts) { WATL_ParseInfo info = {0}; api_watl_parse(& info, tokens, opts); return info; }
internal
Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines)
{
local_persist Byte scratch[kilo(64)] = {0}; FArena sarena = farena_make(slice_fmem(scratch)); AllocatorInfo sinfo = ainfo_farena(sarena);
@@ -2081,8 +2080,7 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines)
for slice_iter(* line, chunk)
{
Str8 id;
switch (* chunk->ptr)
{
switch (* chunk->ptr) {
case WATL_Tok_Space: id = lit("Space"); break;
case WATL_Tok_Tab: id = lit("Tab"); break;
default: id = lit("Visible"); break;

View File

@@ -0,0 +1,61 @@
package watl
import "core:os/os2"
import "core:mem/virtual"
import "core:mem"
main :: proc()
{
os_init()
// Note(Ed): Possible compiler bug, cannot resolve proc map with named arguments.
vm_file: virtual.Arena; virtual.arena_init_static(& vm_file, reserved = mem.Gigabytes * 4)
data, err := os2.read_entire_file_from_path("watl.v0.ideomatic.odin", virtual.arena_allocator(& vm_file), )
assert(err != .None)
a_msgs := arena_make()
a_toks := arena_make()
// lex_res := watl_lex(transmute(string) file.content,
// ainfo_msgs = ainfo(a_msgs),
// ainfo_toks = ainfo(a_toks),
// )
lex_res := watl_lex(transmute(string) file.content,
ainfo(a_msgs),
ainfo(a_toks),
)
assert(lex_res.signal & { .MemFail_SliceConstraintFail } == {})
str8_cache_kt1_ainfo := arena_make()
str_cache := str8cache_make(
str_reserve = ainfo(arena_make()),
cell_reserve = ainfo(str8_cache_kt1_ainfo),
tbl_backing = ainfo(str8_cache_kt1_ainfo),
cell_pool_size = Kilo * 4,
table_size = Kilo * 32,
)
a_lines := arena_make()
// parse_res := watl_parse(lex_res.toks,
// ainfo_msgs = ainfo(a_msgs),
// ainfo_nodes = ainfo(a_toks),
// ainfo_lines = ainfo(a_lines),
// str_cache = & str_cache
// )
parse_res := watl_parse(lex_res.toks,
ainfo(a_msgs),
ainfo(a_toks),
ainfo(a_lines),
& str_cache
)
assert(parse_res.signal & { .MemFail_SliceConstraintFail } == {})
arena_reset(a_msgs)
arena_reset(a_toks)
listing := watl_dump_listing(ainfo(a_msgs), parse_res.lines)
file_write_str8("watl.v0.win32.odin.listing.txt", listing)
return
}

View File

@@ -100,23 +100,14 @@ align_pow2 :: #force_inline proc(x: int, b: int) -> int {
assert((b & (b - 1)) == 0) // Check power of 2
return ((x + b - 1) & ~(b - 1))
}
memory_zero :: #force_inline proc "contextless" (data: rawptr, len: int) -> rawptr {
intrinsics.mem_zero(data, len)
return data
}
memory_zero :: #force_inline proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.mem_zero(data, len); return data }
memory_zero_explicit :: #force_inline proc "contextless" (data: rawptr, len: int) -> rawptr {
intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering
return data
}
memory_copy_overlapping :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr {
intrinsics.mem_copy(dst, src, len)
return dst
}
memory_copy :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr {
intrinsics.mem_copy_non_overlapping(dst, src, len)
return dst
}
memory_copy_overlapping :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy(dst, src, len); return dst }
memory_copy :: #force_inline proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy_non_overlapping(dst, src, len); return dst }
sll_stack_push_n :: proc "contextless" (curr, n, n_link: ^^$Type) {
(n_link ^) = (curr ^)
@@ -136,22 +127,14 @@ sll_queue_push_nz :: proc "contextless" (first: ^$ParentType, last, n: ^^$Type,
}
sll_queue_push_n :: #force_inline proc "contextless" (first: $ParentType, last, n: ^^$Type) { sll_queue_push_nz(first, last, n, nil) }
SliceByte :: struct {
data: [^]byte,
len: int
}
SliceRaw :: struct ($Type: typeid) {
data: [^]Type,
len: int,
}
SliceByte :: struct { data: [^]byte, len: int }
SliceRaw :: struct($Type: typeid) { data: [^]Type, len: int, }
slice :: #force_inline proc "contextless" (s: [^] $Type, num: $Some_Integer) -> [ ]Type { return transmute([]Type) SliceRaw(Type) { s, cast(int) num } }
slice_cursor :: #force_inline proc "contextless" (s: []$Type) -> [^]Type { return transmute([^]Type) raw_data(s) }
slice_assert :: #force_inline proc (s: $SliceType / []$Type) {
assert(len(s) > 0)
assert(s != nil)
}
slice_end :: #force_inline proc "contextless" (s : $SliceType / []$Type) -> ^Type { return & cursor(s)[len(s)] }
slice_assert :: #force_inline proc (s: $SliceType / []$Type) { assert(len(s) > 0); assert(s != nil) }
@(require_results) slice_to_bytes :: proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] }
@(require_results) slice_raw :: proc "contextless" (s: []$Type) -> SliceRaw(Type) { return transmute(SliceRaw(Type)) s }
@@ -270,8 +253,7 @@ mem_alloc :: proc(ainfo: AllocatorInfo, size: int, alignment: int = MEMORY_ALIGN
requested_size = size,
alignment = alignment,
}
output: AllocatorProc_Out
ainfo.procedure(input, & output)
output: AllocatorProc_Out; ainfo.procedure(input, & output)
return output.allocation
}
mem_grow :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, give_actual: b32 = false) -> []byte {
@@ -283,8 +265,7 @@ mem_grow :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int =
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
ainfo.procedure(input, & output)
output: AllocatorProc_Out; ainfo.procedure(input, & output)
return slice(cursor(output.allocation), give_actual ? len(output.allocation) : size)
}
mem_resize :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, give_actual: b32 = false) -> []byte {
@@ -296,8 +277,7 @@ mem_resize :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
ainfo.procedure(input, & output)
output: AllocatorProc_Out; ainfo.procedure(input, & output)
return slice(cursor(output.allocation), give_actual ? len(output.allocation) : size)
}
mem_shrink :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false) -> []byte {
@@ -309,8 +289,7 @@ mem_shrink :: proc(ainfo: AllocatorInfo, mem: []byte, size: int, alignment: int
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
ainfo.procedure(input, & output)
output: AllocatorProc_Out; ainfo.procedure(input, & output)
return output.allocation
}
@@ -322,8 +301,7 @@ alloc_type :: proc(ainfo: AllocatorInfo, $Type: typeid, alignment: int = MEMORY
requested_size = size_of(Type),
alignment = alignment,
}
output: AllocatorProc_Out
ainfo.procedure(input, & output)
output: AllocatorProc_Out; ainfo.procedure(input, & output)
return transmute(^Type) raw_data(output.allocation)
}
alloc_slice :: proc(ainfo: AllocatorInfo, $SliceType: typeid / []$Type, num : int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false) -> []Type {
@@ -334,17 +312,13 @@ alloc_slice :: proc(ainfo: AllocatorInfo, $SliceType: typeid / []$Type, num : in
requested_size = size_of(Type) * num,
alignment = alignment,
}
output: AllocatorProc_Out
ainfo.procedure(input, & output)
output: AllocatorProc_Out; ainfo.procedure(input, & output)
return transmute([]Type) slice(raw_data(output.allocation), num)
}
//endregion Allocator Interface
//region Strings
Raw_String :: struct {
data: [^]byte,
len: int,
}
Raw_String :: struct { data: [^]byte, len: int, }
string_cursor :: proc(s: string) -> [^]u8 { return slice_cursor(transmute([]byte) s) }
string_copy :: proc(dst, src: string) { slice_copy (transmute([]byte) dst, transmute([]byte) src) }
string_end :: proc(s: string) -> ^u8 { return slice_end (transmute([]byte) s) }
@@ -356,10 +330,7 @@ FArena :: struct {
mem: []byte,
used: int,
}
farena_make :: proc(backing: []byte) -> FArena {
arena := FArena {mem = backing}
return arena
}
farena_make :: proc(backing: []byte) -> FArena { return {mem = backing} }
farena_init :: proc(arena: ^FArena, backing: []byte) {
assert(arena != nil)
arena.mem = backing
@@ -367,20 +338,15 @@ farena_init :: proc(arena: ^FArena, backing: []byte) {
}
farena_push :: proc(arena: ^FArena, $Type: typeid, amount: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT) -> []Type {
assert(arena != nil)
if amount == 0 {
return {}
}
if amount == 0 { return {} }
desired := size_of(Type) * amount
to_commit := align_pow2(desired, alignment)
unused := len(arena.mem) - arena.used
assert(to_commit <= unused)
unused := len(arena.mem) - arena.used; assert(to_commit <= unused)
ptr := cursor(arena.mem[arena.used:])
arena.used += to_commit
return slice(ptr, amount)
}
farena_reset :: proc(arena: ^FArena) {
arena.used = 0
}
farena_reset :: #force_inline proc(arena: ^FArena) { arena.used = 0 }
farena_rewind :: proc(arena: ^FArena, save_point: AllocatorSP) {
assert(save_point.type_sig == farena_allocator_proc)
assert(save_point.slot >= 0 && save_point.slot <= arena.used)
@@ -391,7 +357,6 @@ farena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Ou
assert(output != nil)
assert(input.data != nil)
arena := transmute(^FArena) input.data
switch input.op
{
case .Alloc, .Alloc_NoZero:
@@ -400,11 +365,8 @@ farena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Ou
zero(output.allocation)
}
case .Free:
// No-op for arena
case .Reset:
farena_reset(arena)
case .Free: // No-op for arena
case .Reset: farena_reset(arena)
case .Grow, .Grow_NoZero:
// Check if the allocation is at the end of the arena
@@ -453,11 +415,8 @@ farena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Ou
arena.used -= (aligned_original - aligned_new)
output.allocation = input.old_allocation[:input.requested_size]
case .Rewind:
farena_rewind(arena, input.save_point)
case .SavePoint:
output.save_point = farena_save(arena^)
case .Rewind: farena_rewind(arena, input.save_point)
case .SavePoint: output.save_point = farena_save(arena^)
case .Query:
output.features = {.Alloc, .Reset, .Grow, .Shrink, .Rewind}
@@ -471,14 +430,9 @@ farena_ainfo :: #force_inline proc "contextless" (arena : ^FArena) -> AllocatorI
//endregion FArena
//region OS
OS_SystemInfo :: struct {
target_page_size: int,
}
OS_Windows_State :: struct {
system_info: OS_SystemInfo,
}
@(private)
os_windows_info: OS_Windows_State
OS_SystemInfo :: struct { target_page_size: int }
OS_Windows_State :: struct { system_info: OS_SystemInfo }
@(private) os_windows_info: OS_Windows_State
// Windows API constants
MS_INVALID_HANDLE_VALUE :: ~uintptr(0)
@@ -537,12 +491,7 @@ os_enable_large_pages :: proc() {
{
priv := MS_TOKEN_PRIVILEGES {
privilege_count = 1,
privileges = {
{
luid = luid,
attributes = MS_SE_PRIVILEGE_ENABLED,
},
},
privileges = { { luid = luid, attributes = MS_SE_PRIVILEGE_ENABLED, }, },
}
AdjustTokenPrivileges(token, 0, &priv, size_of(MS_TOKEN_PRIVILEGES), nil, nil)
}
@@ -554,25 +503,19 @@ os_init :: proc() {
info := &os_windows_info.system_info
info.target_page_size = int(GetLargePageMinimum())
}
os_system_info :: proc() -> ^OS_SystemInfo {
return &os_windows_info.system_info
}
os_vmem_commit :: proc(vm: rawptr, size: int, no_large_pages: b32 = false) -> b32 {
os_system_info :: #force_inline proc "contextless" () -> ^OS_SystemInfo { return & os_windows_info.system_info }
os_vmem_commit :: #force_inline proc "contextless" (vm: rawptr, size: int, no_large_pages: b32 = false) -> b32 {
// Large pages disabled for now (not failing gracefully in original C)
result := VirtualAlloc(vm, uintptr(size), MS_MEM_COMMIT, MS_PAGE_READWRITE) != nil
return b32(result)
return cast(b32) VirtualAlloc(vm, uintptr(size), MS_MEM_COMMIT, MS_PAGE_READWRITE) != nil
}
os_vmem_reserve :: proc(size: int, base_addr: int = 0, no_large_pages: b32 = false) -> rawptr {
result := VirtualAlloc(rawptr(uintptr(base_addr)), uintptr(size),
os_vmem_reserve :: #force_inline proc "contextless" (size: int, base_addr: int = 0, no_large_pages: b32 = false) -> rawptr {
return VirtualAlloc(rawptr(uintptr(base_addr)), uintptr(size),
MS_MEM_RESERVE,
// MS_MEM_COMMIT
// | (no_large_pages ? 0 : MS_MEM_LARGE_PAGES), // Large pages disabled
MS_PAGE_READWRITE)
return result
}
os_vmem_release :: proc(vm: rawptr, size: int) {
VirtualFree(vm, 0, MS_MEM_RELEASE)
}
os_vmem_release :: #force_inline proc "contextless" (vm: rawptr, size: int) { VirtualFree(vm, 0, MS_MEM_RELEASE) }
//endregion OS
//region VArena
@@ -646,17 +589,6 @@ varena_push :: proc(va: ^VArena, $Type: typeid, amount: int, alignment: int = ME
va.commit_used = to_be_used
return slice(transmute([^]Type) uintptr(current_offset), amount)
}
varena_release :: proc(va: ^VArena) {
os_vmem_release(va, va.reserve)
}
varena_rewind :: proc(va: ^VArena, save_point: AllocatorSP) {
assert(va != nil)
assert(save_point.type_sig == varena_allocator_proc)
va.commit_used = max(save_point.slot, size_of(VArena))
}
varena_reset :: proc(va: ^VArena) {
va.commit_used = size_of(VArena)
}
varena_shrink :: proc(va: ^VArena, old_allocation: []byte, requested_size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT) -> []byte {
assert(va != nil)
current_offset := va.reserve_start + va.commit_used
@@ -668,6 +600,13 @@ varena_shrink :: proc(va: ^VArena, old_allocation: []byte, requested_size: int,
va.commit_used -= shrink_amount
return old_allocation[:requested_size]
}
varena_release :: #force_inline proc(va: ^VArena) { os_vmem_release(va, va.reserve) }
varena_reset :: #force_inline proc(va: ^VArena) { va.commit_used = size_of(VArena) }
varena_rewind :: #force_inline proc(va: ^VArena, save_point: AllocatorSP) {
assert(va != nil)
assert(save_point.type_sig == varena_allocator_proc)
va.commit_used = max(save_point.slot, size_of(VArena))
}
varena_save :: #force_inline proc "contextless" (va: ^VArena) -> AllocatorSP { return AllocatorSP { type_sig = varena_allocator_proc, slot = va.commit_used } }
varena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Out) {
assert(output != nil)
@@ -785,7 +724,7 @@ arena_push :: proc(arena: ^Arena, $Type: typeid, amount: int, alignment: int = M
active.pos = pos_pst
return slice(result_ptr, amount)
}
arena_release :: proc(arena: ^Arena) {
arena_release :: #force_inline proc(arena: ^Arena) {
assert(arena != nil)
curr := arena.current
for curr != nil {
@@ -794,9 +733,7 @@ arena_release :: proc(arena: ^Arena) {
curr = prev
}
}
arena_reset :: proc(arena: ^Arena) {
arena_rewind(arena, AllocatorSP { type_sig = arena_allocator_proc, slot = 0 })
}
arena_reset :: #force_inline proc(arena: ^Arena) { arena_rewind(arena, AllocatorSP { type_sig = arena_allocator_proc, slot = 0 }) }
arena_rewind :: proc(arena: ^Arena, save_point: AllocatorSP) {
assert(arena != nil)
assert(save_point.type_sig == arena_allocator_proc)
@@ -1025,11 +962,7 @@ kt1cx_clear :: proc(kt: KT1CX_Byte, m: KT1CX_ByteMeta) {
}
}
}
kt1cx_slot_id :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> u64 {
cell_size := m.cell_size // dummy value
hash_index := key % u64(len(kt.table))
return hash_index
}
kt1cx_slot_id :: #force_inline proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> u64 { return key % u64(len(kt.table)) }
kt1cx_get :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> ^byte {
hash_index := kt1cx_slot_id(kt, key, m)
cell_offset := uintptr(hash_index) * uintptr(m.cell_size)
@@ -1100,28 +1033,22 @@ kt1cx_set :: proc(kt: KT1CX_Byte, key: u64, value: []byte, backing_cells: Alloca
return nil
}
}
kt1cx_assert :: proc(kt: $type / KT1CX) {
slice_assert(kt.table)
}
kt1cx_byte :: proc(kt: $type / KT1CX) -> KT1CX_Byte { return {
slice( transmute([^]byte) cursor(kt.table), len(kt.table))
} }
kt1cx_assert :: #force_inline proc(kt: $type / KT1CX) { slice_assert(kt.table) }
kt1cx_byte :: #force_inline proc(kt: $type / KT1CX) -> KT1CX_Byte { return { slice( transmute([^]byte) cursor(kt.table), len(kt.table)) } }
//endregion Key Table 1-Layer Chained-Chunked-Cells (KT1CX)
//region String Operations
char_is_upper :: proc(c: u8) -> b32 { return('A' <= c && c <= 'Z') }
char_to_lower :: proc(c: u8) -> u8 { c:=c; if (char_is_upper(c)) { c += ('a' - 'A') }; return (c) }
char_is_upper :: #force_inline proc(c: u8) -> b32 { return('A' <= c && c <= 'Z') }
char_to_lower :: #force_inline proc(c: u8) -> u8 { c:=c; if (char_is_upper(c)) { c += ('a' - 'A') }; return (c) }
integer_symbols :: proc(value: u8) -> u8 {
integer_symbols :: #force_inline proc(value: u8) -> u8 {
@static lookup_table: [16]u8 = { '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F', };
return lookup_table[value];
}
str8_to_cstr_capped :: proc(content: string, mem: []byte) -> cstring {
str8_to_cstr_capped :: #force_inline proc(content: string, mem: []byte) -> cstring {
copy_len := min(len(content), len(mem) - 1)
if copy_len > 0 {
copy(mem[:copy_len], transmute([]byte) content)
}
if copy_len > 0 { copy(mem[:copy_len], transmute([]byte) content) }
mem[copy_len] = 0
return transmute(cstring) raw_data(mem)
}
@@ -1184,7 +1111,6 @@ str8_from_u32 :: proc(ainfo: AllocatorInfo, num: u32, radix: u32 = 10, min_digit
}
return result
}
str8_fmt_kt1l :: proc(ainfo: AllocatorInfo, _buffer: ^[]byte, table: []KTL_Slot(string), fmt_template: string) -> string {
buffer := _buffer^
slice_assert(buffer)
@@ -1264,15 +1190,14 @@ str8_fmt_kt1l :: proc(ainfo: AllocatorInfo, _buffer: ^[]byte, table: []KTL_Slot(
result := transmute(string) slice(cursor(buffer), len(buffer) - buffer_remaining)
return result
}
str8_fmt_backed :: proc(tbl_ainfo, buf_ainfo: AllocatorInfo, fmt_template: string, entries: [][2]string) -> string {
str8_fmt_backed :: #force_inline proc(tbl_ainfo, buf_ainfo: AllocatorInfo, fmt_template: string, entries: [][2]string) -> string {
kt: []KTL_Slot(string); ktl_populate_slice_a2_str(& kt, tbl_ainfo, entries)
buf_size := Kilo * 64
buffer := mem_alloc(buf_ainfo, buf_size)
result := str8_fmt_kt1l(buf_ainfo, & buffer, kt, fmt_template)
return result
}
str8_fmt_tmp :: proc(fmt_template: string, entries: [][2]string) -> string {
str8_fmt_tmp :: #force_inline proc(fmt_template: string, entries: [][2]string) -> string {
@static tbl_mem: [Kilo * 32]byte; tbl_arena := farena_make(tbl_mem[:])
@static buf_mem: [Kilo * 64]byte; buffer := buf_mem[:]
kt: []KTL_Slot(string); ktl_populate_slice_a2_str(& kt, ainfo(& tbl_arena), entries)
@@ -1317,7 +1242,7 @@ str8cache_init :: proc(cache: ^Str8Cache, str_reserve, cell_reserve, tbl_backing
kt1cx_init(info, m, transmute(^KT1CX_Byte) & cache.kt)
return
}
str8cache_make :: proc(str_reserve, cell_reserve, tbl_backing: AllocatorInfo, cell_pool_size, table_size: int) -> Str8Cache {
str8cache_make :: #force_inline proc(str_reserve, cell_reserve, tbl_backing: AllocatorInfo, cell_pool_size, table_size: int) -> Str8Cache {
cache : Str8Cache; str8cache_init(& cache, str_reserve, cell_reserve, tbl_backing, cell_pool_size, table_size); return cache
}
str8cache_clear :: proc(kt: KT1CX_Str8) {
@@ -1368,11 +1293,10 @@ str8cache_set :: proc(kt: KT1CX_Str8, key: u64, value: string, str_reserve, cell
}
return result
}
cache_str8 :: proc(cache: ^Str8Cache, str: string) -> string {
cache_str8 :: #force_inline proc(cache: ^Str8Cache, str: string) -> string {
assert(cache != nil)
key: u64 = 0; hash64_fnv1a(& key, transmute([]byte) str)
result := str8cache_set(cache.kt, key, str, cache.str_reserve, cache.cell_reserve)
return result ^
return str8cache_set(cache.kt, key, str, cache.str_reserve, cache.cell_reserve) ^
}
Str8Gen :: struct {
@@ -1389,9 +1313,9 @@ str8gen_init :: proc(gen: ^Str8Gen, ainfo: AllocatorInfo) {
gen.len = 0
gen.cap = Kilo * 4
}
str8gen_make :: proc(ainfo: AllocatorInfo) -> Str8Gen { gen: Str8Gen; str8gen_init(& gen, ainfo); return gen }
str8gen_to_bytes :: proc(gen: Str8Gen) -> []byte { return transmute([]byte) SliceByte {data = gen.ptr, len = gen.cap} }
str8_from_str8gen :: proc(gen: Str8Gen) -> string { return transmute(string) SliceByte {data = gen.ptr, len = gen.len} }
str8gen_make :: #force_inline proc(ainfo: AllocatorInfo) -> Str8Gen { gen: Str8Gen; str8gen_init(& gen, ainfo); return gen }
str8gen_to_bytes :: #force_inline proc(gen: Str8Gen) -> []byte { return transmute([]byte) SliceByte {data = gen.ptr, len = gen.cap} }
str8_from_str8gen :: #force_inline proc(gen: Str8Gen) -> string { return transmute(string) SliceByte {data = gen.ptr, len = gen.len} }
str8gen_append_str8 :: proc(gen: ^Str8Gen, str: string) {
result := mem_grow(gen.backing, str8gen_to_bytes(gen ^), len(str) + gen.len)
@@ -1515,9 +1439,8 @@ api_file_read_contents :: proc(result: ^FileOpInfo, path: string, backing: Alloc
result.content = slice(cursor(buffer), cast(int) file_size.QuadPart)
return
}
file_read_contents_stack :: proc(path: string, backing: AllocatorInfo, zero_backing: b32 = false) -> FileOpInfo {
result : FileOpInfo; api_file_read_contents(& result, path, backing, zero_backing)
return result
file_read_contents_stack :: #force_inline proc(path: string, backing: AllocatorInfo, zero_backing: b32 = false) -> FileOpInfo {
result: FileOpInfo; api_file_read_contents(& result, path, backing, zero_backing) return result
}
file_write_str8 :: proc(path, content: string) {
string_assert(path)
@@ -1604,8 +1527,7 @@ api_watl_lex :: proc(info: ^WATL_LexInfo, source: string,
alloc_tok :: #force_inline proc(ainfo: AllocatorInfo) -> ^Raw_String {
return alloc_type(ainfo, Raw_String, align_of(Raw_String), true)
}
#partial switch cast(WATL_TokKind) code
{
#partial switch cast(WATL_TokKind) code {
case .Space: fallthrough
case .Tab:
if prev[0] != src_cursor[0] {
@@ -1729,8 +1651,7 @@ api_watl_parse :: proc(info: ^WATL_ParseInfo, tokens: []WATL_Tok,
info_lines ^ = { transmute([^]WATL_Node) line, 0 }
for & token in tokens
{
#partial switch cast(WATL_TokKind) token[0]
{
#partial switch cast(WATL_TokKind) token[0] {
case .Carriage_Return: fallthrough
case .Line_Feed:
new_line := alloc_type(ainfo_lines, WATL_Line); if cursor(new_line)[-1:] != transmute(^[]string)line {
@@ -1748,9 +1669,7 @@ api_watl_parse :: proc(info: ^WATL_ParseInfo, tokens: []WATL_Tok,
line.data = curr
info_lines.len += 1
continue
case:
break;
case: break;
}
curr ^ = cache_str8(str_cache, token)
new_node := alloc_type(ainfo_nodes, WATL_Node); if cursor(new_node)[-1:] != curr {
@@ -1799,8 +1718,7 @@ watl_dump_listing :: proc(buffer: AllocatorInfo, lines: []WATL_Line) -> string {
for chunk in line
{
id : string
#partial switch cast(WATL_TokKind) chunk[0]
{
#partial switch cast(WATL_TokKind) chunk[0] {
case .Space: id = "Space"
case .Tab: id = "Tab"
case: id = "Visible"

View File

@@ -118,8 +118,8 @@ $compiler_args += $flag_full_src_path
# $compiler_args += $flag_optimize_speed_max
# $compiler_args += $flag_optimize_fast
# $compiler_args += $flag_optimize_size
# $compiler_args += $flag_optimize_intrinsics
$compiler_args += $flag_no_optimization
$compiler_args += $flag_optimize_intrinsics
# $compiler_args += $flag_no_optimization
# Debug setup
$compiler_args += ($flag_define + 'BUILD_DEBUG')

View File

@@ -118,8 +118,8 @@ $compiler_args += $flag_full_src_path
# $compiler_args += $flag_optimize_speed_max
# $compiler_args += $flag_optimize_fast
# $compiler_args += $flag_optimize_size
# $compiler_args += $flag_optimize_intrinsics
$compiler_args += $flag_no_optimization
$compiler_args += $flag_optimize_intrinsics
# $compiler_args += $flag_no_optimization
# Debug setup
$compiler_args += ($flag_define + 'BUILD_DEBUG')

View File

@@ -117,8 +117,8 @@ $compiler_args += $flag_full_src_path
# $compiler_args += $flag_optimize_speed_max
# $compiler_args += $flag_optimize_fast
# $compiler_args += $flag_optimize_size
# $compiler_args += $flag_optimize_intrinsics
$compiler_args += $flag_no_optimization
$compiler_args += $flag_optimize_intrinsics
# $compiler_args += $flag_no_optimization
# Debug setup
$compiler_args += ($flag_define + 'BUILD_DEBUG')
@@ -139,7 +139,10 @@ $compiler_args += $flag_compile, $unit
$compiler_args | ForEach-Object { Write-Host $_ }
# Compile the unit
$compilation_time = Measure-Command {
& $compiler $compiler_args
}
write-host "Compilation took $($compilation_time.TotalMilliseconds)ms"
write-host
$binary = join-path $path_build "$unit_name.exe"
@@ -168,8 +171,9 @@ if ($true) {
# Diagnoistc print for the args
$linker_args | ForEach-Object { Write-Host $_ }
& $linker $linker_args
$linking_time = Measure-Command { & $linker $linker_args }
# & $radlink $linker_args
write-host "Linking took $($linking_time.TotalMilliseconds)ms"
write-host
}