WIP(untesed, not-compiled): Curating varena code for pure lottes c-- variant, misc changes

This commit is contained in:
2025-11-04 14:21:13 -05:00
parent 816ed5debd
commit 9179f77f05
3 changed files with 205 additions and 255 deletions

View File

@@ -440,7 +440,7 @@ typedef def_struct(FArena) {
}; };
I_ void farena_init__u (U8 arena, U8 mem_ptr, U8 mem_len); I_ void farena_init__u (U8 arena, U8 mem_ptr, U8 mem_len);
void farena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_mem); void farena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
I_ void farena_reset__u (U8 arena); I_ void farena_reset__u (U8 arena);
I_ void farena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot); I_ void farena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot);
I_ void farena_save__u (U8 arena, U8 sp); I_ void farena_save__u (U8 arena, U8 sp);
@@ -520,15 +520,15 @@ typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; };
global OS_Windows_State os__windows_info; global OS_Windows_State os__windows_info;
I_ OS_SystemInfo* os_system_info(void); I_ OS_SystemInfo* os_system_info(void);
I_ void os_init(void); I_ void os_init (void);
I_ U8 os__vmem_reserve__u(U8 size, U8 opts_addr); I_ U8 os__vmem_reserve__u( U8 size, U8 opts_addr);
I_ B4 os__vmem_commit__u (U8 vm, U8 size, U8 opts_addr); I_ B4 os__vmem_commit__u (U8 vm, U8 size, U8 opts_addr);
I_ void os_vmem_release__u(U8 vm, U8 size); I_ void os_vmem_release__u (U8 vm, U8 size);
I_ U8 os__vmem_reserve(U8 size, Opts_vmem_R opts); I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts);
I_ B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem_R opts); I_ B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem_R opts);
I_ void os_vmem_release(U8 vm, U8 size); I_ void os_vmem_release (U8 vm, U8 size);
#define os_vmem_reserve(size, ...) os__vmem_reserve(size, opt_args(Opts_vmem, __VA_ARGS__)) #define os_vmem_reserve(size, ...) os__vmem_reserve(size, opt_args(Opts_vmem, __VA_ARGS__))
#define os_vmem_commit(vm, size, ...) os__vmem_commit (vm, size, opt_args(Opts_vmem, __VA_ARGS__)) #define os_vmem_commit(vm, size, ...) os__vmem_commit (vm, size, opt_args(Opts_vmem, __VA_ARGS__))
@@ -554,17 +554,17 @@ typedef def_struct(Opts_varena_make) {
VArenaFlags flags; VArenaFlags flags;
}; };
I_ U8 varena__make__u (Opts_varena_make_R opts); I_ U8 varena__make__u (U8 reserve_size, U8 commit_size, U8 flags, U8 base_addr);
I_ void varena_release__u(U8 arena); I_ void varena_release__u(U8 arena);
I_ void varena_reset__u (U8 arena); I_ void varena_reset__u (U8 arena);
I_ void varena_rewind__u (U8 arena, U8 sp_type_sig, U8 sp_slot); I_ void varena_rewind__u (U8 arena, U8 sp_type_sig, U8 sp_slot);
I_ void varena_save__u (U8 arena, U8 sp_addr); I_ void varena_save__u (U8 arena, U8 sp_addr);
void varena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr); void varena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
void varena__grow__u (U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero); void varena__grow__u (U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero);
void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment); void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment);
VArena* varena__make (Opts_varena_make*R_ opts); VArena* varena__make (Opts_varena_make*R_ opts);
Slice_Mem varena__push (VArena_R arena, U8 amount, U8 type_width, Opts_varena*R_ opts); Slice_Mem varena__push (VArena_R arena, U8 amount, U8 type_width, Opts_varena*R_ opts);
void varena_release(VArena_R arena); void varena_release(VArena_R arena);
void varena_reset (VArena_R arena); void varena_reset (VArena_R arena);
void varena_rewind (VArena_R arena, AllocatorSP save_point); void varena_rewind (VArena_R arena, AllocatorSP save_point);
@@ -821,76 +821,45 @@ I_ void os_vmem_release (U8 vm, U8 size) { os_vmem_release__u(
#pragma endregion OS #pragma endregion OS
#pragma region VArena (Virtual Address Space Arena) #pragma region VArena (Virtual Address Space Arena)
I_ U8 varena__header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); } I_ U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
I_ U8 varena__make__u(Opts_varena_make_R opts) { I_ U8 varena__make__u(U8 reserve_size, U8 commit_size, U8 flags, U8 base_addr) {
assert(opts != nullptr); if (reserve_size == 0) { reserve_size = mega(64); }
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); } if (commit_size == 0) { commit_size = mega(64); }
if (opts->commit_size == 0) { opts->commit_size = mega(64); }
U8 reg page = os_system_info()->target_page_size; U8 reg page = os_system_info()->target_page_size;
U8 reg reserve_sz = align_pow2(opts->reserve_size, page); U8 reg reserve_sz = align_pow2(reserve_size, page);
U8 reg commit_sz = align_pow2(opts->commit_size, page); U8 reg commit_sz = align_pow2(commit_size, page);
U8 base = os_vmem_reserve(reserve_sz, .base_addr = opts->base_addr, .no_large_pages = no_large); B4 reg no_large = (flags & VArenaFlag_NoLargePages) != 0;
assert(base != 0); U8 base = os_vmem_reserve__u(reserve_sz, base_addr, no_large); assert(base != 0);
B4 reg no_large = (opts->flags & VArenaFlag_NoLargePages) != 0; B4 ok = os_vmem_commit__u(base, commit_sz, no_large); assert(ok != 0);
B4 ok = os_vmem_commit(base, commit_sz, .no_large_pages = no_large); U8 header = varena_header_size();
assert(ok != 0);
U8 header = varena__header_size();
U8 data_start = base + header; U8 data_start = base + header;
u8_r(base + offset_of(VArena, reserve_start))[0] = data_start; u8_r(base + offset_of(VArena, reserve_start))[0] = data_start;
u8_r(base + offset_of(VArena, reserve ))[0] = reserve_sz; u8_r(base + offset_of(VArena, reserve ))[0] = reserve_sz;
u8_r(base + offset_of(VArena, commit_size ))[0] = commit_sz; u8_r(base + offset_of(VArena, commit_size ))[0] = commit_sz;
u8_r(base + offset_of(VArena, committed ))[0] = commit_sz; u8_r(base + offset_of(VArena, committed ))[0] = commit_sz;
u8_r(base + offset_of(VArena, commit_used ))[0] = header; u8_r(base + offset_of(VArena, commit_used ))[0] = header;
u4_r(base + offset_of(VArena, flags ))[0] = opts->flags; u4_r(base + offset_of(VArena, flags ))[0] = flags;
return base; return base;
} }
I_ void varena_release__u(U8 arena) {
if (arena == null) { return; }
os_vmem_release__u(arena, u8_r(arena + offset_of(VArena, reserve))[0]);
}
I_ void varena_reset__u(U8 arena) {
if (arena == null) { return; }
u8_r(arena + offset_of(VArena, commit_used))[0] = 0;
}
I_ void varena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot) {
if (arena == null) { return; }
assert(sp_type_sig == (U8) varena_allocator_proc);
U8 header = varena__header_size();
if (sp_slot < header) { sp_slot = header; }
u8_r(arena + offset_of(VArena, commit_used))[0] = sp_slot;
}
I_ void varena_save__u(U8 arena, U8 sp_addr) {
if (sp_addr == null) { return; }
u8_r(sp_addr + offset_of(AllocatorSP, type_sig))[0] = (U8) varena_allocator_proc;
u8_r(sp_addr + offset_of(AllocatorSP, slot ))[0] = u8_r(arena + offset_of(VArena, commit_used))[0];
}
void varena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 result) { void varena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 result) {
if (result == null || arena == null) { return; } if (result == null || arena == null) { return; }
if (amount == 0) { struct_zero(Slice_Mem, result); return; } if (amount == 0) { struct_zero(Slice_Mem, result); return; }
U8 reg align = alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT; U8 reg align = alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT;
U8 reg requested_size = amount * type_width; U8 reg requested_size = amount * type_width;
U8 reg aligned_size = align_pow2(requested_size, align); U8 reg aligned_size = align_pow2(requested_size, align);
U8 reg reserve_start = u8_r(arena + offset_of(VArena, reserve_start))[0]; U8_R reg commit_used = u8_r(arena + offset_of(VArena, commit_used ));
U8_R reg commit_used = u8_r(arena + offset_of(VArena, commit_used)); U8 reg reserve_left = u8_r(arena + offset_of(VArena, reserve ))[0] - commit_used[0];
U8 reg current_offset = reserve_start + commit_used[0];
U8 reg reserve_total = u8_r(arena + offset_of(VArena, reserve))[0];
U8 reg reserve_left = reserve_total - commit_used[0];
if (aligned_size > reserve_left) { struct_zero(Slice_Mem, result); return; } if (aligned_size > reserve_left) { struct_zero(Slice_Mem, result); return; }
U8 reg committed = u8_r(arena + offset_of(VArena, committed))[0]; U8 reg committed = u8_r(arena + offset_of(VArena, committed ))[0];
U8 reg commit_size = u8_r(arena + offset_of(VArena, commit_size))[0]; U8 reg commit_left = committed - commit_used[0];
U8 reg commit_left = committed - commit_used[0];
if (commit_left < aligned_size) { if (commit_left < aligned_size) {
U8 reg commit_size = u8_r(arena + offset_of(VArena, commit_size))[0];
U8 reg next_commit = reserve_left > aligned_size ? max(commit_size, aligned_size) : reserve_left; U8 reg next_commit = reserve_left > aligned_size ? max(commit_size, aligned_size) : reserve_left;
if (next_commit != 0) { if (next_commit != 0) {
B4 no_large = (u4_r(arena + offset_of(VArena, flags))[0] & VArenaFlag_NoLargePages) != 0; B4 no_large = (u4_r(arena + offset_of(VArena, flags))[0] & VArenaFlag_NoLargePages) != 0;
U8 reg next_commit_start = arena + committed; U8 reg next_commit_start = arena + committed;
if (! os_vmem_commit(next_commit_start, next_commit, .no_large_pages = no_large)) { if (os_vmem_commit(next_commit_start, next_commit, .no_large_pages = no_large) == false) {
struct_zero(Slice_Mem, result); struct_zero(Slice_Mem, result);
return; return;
} }
@@ -898,10 +867,11 @@ void varena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 result
u8_r(arena + offset_of(VArena, committed))[0] = committed; u8_r(arena + offset_of(VArena, committed))[0] = committed;
} }
} }
commit_used[0] += aligned_size; commit_used[0] += aligned_size; {
struct_copy(Slice_Mem, result, (U8)& slice_mem(current_offset, requested_size)); U8 reg current_offset = u8_r(arena + offset_of(VArena, reserve_start))[0] + commit_used[0];
struct_copy(Slice_Mem, result, (U8)& slice_mem(current_offset, requested_size));
}
} }
void varena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) { void varena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) {
if (result == null || arena == null) { return; } if (result == null || arena == null) { return; }
if (old_ptr == 0 || requested_size <= old_len) { if (old_ptr == 0 || requested_size <= old_len) {
@@ -931,6 +901,27 @@ void varena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_s
} }
} }
I_ void varena_release__u(U8 arena) {
if (arena == null) { return; }
os_vmem_release__u(arena, u8_r(arena + offset_of(VArena, reserve))[0]);
}
I_ void varena_reset__u(U8 arena) {
if (arena == null) { return; }
u8_r(arena + offset_of(VArena, commit_used))[0] = 0;
}
I_ void varena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot) {
if (arena == null) { return; }
assert(sp_type_sig == (U8) varena_allocator_proc);
U8 header = varena__header_size();
if (sp_slot < header) { sp_slot = header; }
u8_r(arena + offset_of(VArena, commit_used))[0] = sp_slot;
}
I_ void varena_save__u(U8 arena, U8 sp_addr) {
if (sp_addr == null) { return; }
u8_r(sp_addr + offset_of(AllocatorSP, type_sig))[0] = (U8) varena_allocator_proc;
u8_r(sp_addr + offset_of(AllocatorSP, slot ))[0] = u8_r(arena + offset_of(VArena, commit_used))[0];
}
void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) { void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) {
if (result == null || arena == null) { return; } if (result == null || arena == null) { return; }
if (old_ptr == 0 || requested_size >= old_len) { if (old_ptr == 0 || requested_size >= old_len) {
@@ -951,8 +942,9 @@ void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, requested_size)); struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, requested_size));
} }
VArena* varena__make(Opts_varena_make* opts) { I_ VArena* varena__make(Opts_varena_make* opts) {
return cast(VArena*, varena__make__u(opts)); assert(opts != nullptr);
return cast(VArena*, varena__make__u(opts->reserve_size, opts->commit_size, opts->flags, opts->base_addr));
} }
Slice_Mem varena__push(VArena_R arena, U8 amount, U8 type_width, Opts_varena* opts) { Slice_Mem varena__push(VArena_R arena, U8 amount, U8 type_width, Opts_varena* opts) {

View File

@@ -117,6 +117,8 @@ enum { false = 0, true = 1, true_overflow, };
#define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr) #define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
#define v_(ptr) cast(typeof_ptr(ptr)*V_, ptr) #define v_(ptr) cast(typeof_ptr(ptr)*V_, ptr)
#define tr_(type, ptr) cast(type*R_, ptr)
#define tv_(type, ptr) cast(type*V_, ptr)
#define kilo(n) (cast(U8, n) << 10) #define kilo(n) (cast(U8, n) << 10)
#define mega(n) (cast(U8, n) << 20) #define mega(n) (cast(U8, n) << 20)
@@ -197,7 +199,17 @@ typedef def_farray(B1, 2);
typedef def_farray(B1, 4); typedef def_farray(B1, 4);
typedef def_farray(B1, 8); typedef def_farray(B1, 8);
finline U8 align_pow2(U8 x, U8 b); finline U8 mem_copy (U8 dest, U8 src, U8 len) { return (U8)(__builtin_memcpy ((void*)dest, (void const*)src, len)); }
finline U8 mem_copy_overlapping(U8 dest, U8 src, U8 len) { return (U8)(__builtin_memmove((void*)dest, (void const*)src, len)); }
finline U8 mem_fill (U8 dest, U8 value, U8 len) { return (U8)(__builtin_memset ((void*)dest, (int) value, len)); }
finline B4 mem_zero (U8 dest, U8 len) { if (dest == 0) return false; mem_fill(dest, 0, len); return true; }
finline
U8 align_pow2(U8 x, U8 b) {
assert(b != 0);
assert((b & (b - 1)) == 0); // Check power of 2
return ((x + b - 1) & (~(b - 1)));
}
#define align_struct(type_width) ((U8)(((type_width) + 7) / 8 * 8)) #define align_struct(type_width) ((U8)(((type_width) + 7) / 8 * 8))
@@ -206,10 +218,6 @@ finline U8 align_pow2(U8 x, U8 b);
assert(point <= end); \ assert(point <= end); \
} while(0) } while(0)
U8 mem_copy (U8 dest, U8 src, U8 length);
U8 mem_copy_overlapping(U8 dest, U8 src, U8 length);
B4 mem_zero (U8 dest, U8 length);
#define check_nil(nil, p) ((p) == 0 || (p) == nil) #define check_nil(nil, p) ((p) == 0 || (p) == nil)
#define set_nil(nil, p) ((p) = nil) #define set_nil(nil, p) ((p) = nil)
@@ -243,13 +251,20 @@ typedef def_Slice(B1);
#define slice_to_bytes(slice) ((Slice_B1){cast(B1*, (slice).ptr), (slice).len * size_of_slice_type(slice)}) #define slice_to_bytes(slice) ((Slice_B1){cast(B1*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
#define slice_fmem(mem) slice_mem(u8_(mem), size_of(mem)) #define slice_fmem(mem) slice_mem(u8_(mem), size_of(mem))
finline void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth); finline void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); memory_zero(u8_(mem.ptr), mem.len); }
finline void slice__zero(Slice_B1 mem, U8 typewidth); #define slice_zero(slice) slice__zero(slice_mem_s(slice), size_of_slice_type(slice))
finline
void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth) {
assert(dest.len >= src.len);
slice_assert(dest);
slice_assert(src);
memory_copy(u8_(dest.ptr), u8_(src.ptr), src.len);
}
#define slice_copy(dest, src) do { \ #define slice_copy(dest, src) do { \
static_assert(typeof_same(dest, src)); \ static_assert(typeof_same(dest, src)); \
slice__copy(slice_to_bytes(dest), size_of_slice_type(dest), slice_to_bytes(src), size_of_slice_type(src)); \ slice__copy(slice_to_bytes(dest), size_of_slice_type(dest), slice_to_bytes(src), size_of_slice_type(src)); \
} while (0) } while (0)
#define slice_zero(slice) slice__zero(slice_mem_s(slice), size_of_slice_type(slice))
#define slice_iter(container, iter) (typeof((container).ptr) iter = (container).ptr; iter != slice_end(container); ++ iter) #define slice_iter(container, iter) (typeof((container).ptr) iter = (container).ptr; iter != slice_end(container); ++ iter)
#define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) } #define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) }
@@ -783,34 +798,6 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines);
#pragma region Implementation #pragma region Implementation
#pragma region Memory Operations
void* __cdecl memcpy (void*R_ _Dst, void const*R_ _Src, U8 _Size);
void* __cdecl memmove(void* _Dst, void const* _Src, U8 _Size);
void* __cdecl memset (void*R_ _Dst, int _Val, U8 _Size);
finline
U8 align_pow2(U8 x, U8 b) {
assert(b != 0);
assert((b & (b - 1)) == 0); // Check power of 2
return ((x + b - 1) & (~(b - 1)));
}
U8 memory_copy (U8 dest, U8 src, U8 len) __asm__("memcpy");
U8 memory_copy_overlapping(U8 dest, U8 src, U8 len) __asm__("memmove");
finline
B4 memory_zero(U8 dest, U8 length) {
if (dest == 0) return false;
memset((void*R_)dest, 0, length);
return true;
}
finline void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); memory_zero(u8_(mem.ptr), mem.len); }
finline
void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth) {
assert(dest.len >= src.len);
slice_assert(dest);
slice_assert(src);
memory_copy(u8_(dest.ptr), u8_(src.ptr), src.len);
}
#pragma endregion Memory Operations
#pragma region Allocator Interface #pragma region Allocator Interface
finline finline
AllocatorQueryInfo allocator_query(AllocatorInfo ainfo) { AllocatorQueryInfo allocator_query(AllocatorInfo ainfo) {
@@ -911,29 +898,67 @@ void farena_init(FArena_R arena, Slice_Mem mem) {
} }
finline FArena farena_make(Slice_Mem mem) { FArena a; farena_init(& a, mem); return a; } finline FArena farena_make(Slice_Mem mem) { FArena a; farena_init(& a, mem); return a; }
inline inline
Slice_Mem farena__push(FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_ opts) { Slice_Mem farena__push(FArena_R arena, U8 amount, U8 type_width, Opts__farena*R_ opts) {
assert(opts != nullptr); assert(opts != nullptr);
if (amount == 0) { return (Slice_Mem){}; } if (amount == 0) { return (Slice_Mem){}; }
U8 desired = type_width * amount; U8 desired = type_width * amount;
U8 to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT); U8 to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 unused = arena->capacity - arena->used; U8 unused = arena->capacity - arena->used; assert(to_commit <= unused);
assert(to_commit <= unused);
U8 ptr = arena->start + arena->used; U8 ptr = arena->start + arena->used;
arena->used += to_commit; arena->used += to_commit;
return (Slice_Mem){ptr, desired}; return (Slice_Mem){ptr, desired};
} }
inline
Slice_Mem farena__grow(FArena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment, B4 should_zero) {
Slice_Mem result;
// Check if the allocation is at the end of the arena
U8 alloc_end = old_allocation.ptr + old_allocation.len;
U8 arena_end = arena->start + arena->used;
if (alloc_end != arena_end) {
// Not at the end, can't grow in place
result = (Slice_Mem){0};
return;
}
// Calculate growth
U8 grow_amount = requested_size - old_allocation.len;
U8 aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 unused = arena->capacity - arena->used;
if (aligned_grow > unused) {
// Not enough space
result = (Slice_Mem){0};
return;
}
arena->used += aligned_grow;
result = (Slice_Mem){ old_allocation.ptr, aligned_grow + requested_size };
memory_zero(old_allocation.ptr + old_allocation.len, grow_amount * cast(U8, should_zero));
}
inline
Slice_Mem farena__shrink(FArena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment)
{
Slice_Mem result;
// Check if the allocation is at the end of the arena
U8 alloc_end = old_allocation.ptr + old_allocation.len;
U8 arena_end = arena->start + arena->used;
if (alloc_end != arena_end) {
// Not at the end, can't shrink but return adjusted size
result = (Slice_Mem){old_allocation.ptr, requested_size};
return;
}
U8 aligned_original = align_pow2(old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
U8 aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
arena->used -= (aligned_original - aligned_new);
result = (Slice_Mem){old_allocation.ptr, requested_size};
}
finline void farena_reset(FArena_R arena) { arena->used = 0; } finline void farena_reset(FArena_R arena) { arena->used = 0; }
finline finline
void farena_rewind(FArena_R arena, AllocatorSP save_point) { void farena_rewind(FArena_R arena, AllocatorSP save_point) {
assert(save_point.type_sig == & farena_allocator_proc); assert(save_point.type_sig == & farena_allocator_proc);
U8 end = arena->start + arena->used; U8 end = arena->start + arena->used; assert_bounds(save_point.slot, arena->start, end);
assert_bounds(save_point.slot, arena->start, end);
arena->used -= save_point.slot - arena->start; arena->used -= save_point.slot - arena->start;
} }
finline finline
AllocatorSP farena_save (FArena arena) { AllocatorSP farena_save (FArena arena) {
AllocatorSP sp = { .type_sig = & farena_allocator_proc, .slot = arena.used }; return (AllocatorSP){ .type_sig = & farena_allocator_proc, .slot = arena.used };
return sp;
} }
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out) void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
{ {
@@ -948,61 +973,19 @@ void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
memory_zero(out->allocation.ptr, out->allocation.len * in.op); memory_zero(out->allocation.ptr, out->allocation.len * in.op);
break; break;
case AllocatorOp_Free: case AllocatorOp_Free: break;
break; case AllocatorOp_Reset: farena_reset(arena); break;
case AllocatorOp_Reset:
farena_reset(arena);
break;
case AllocatorOp_Grow: case AllocatorOp_Grow:
case AllocatorOp_Grow_NoZero: { case AllocatorOp_Grow_NoZero:
// Check if the allocation is at the end of the arena out->allocation = farena__grow(arena, in.old_allocation, in.requested_size, in.alignment, in.op - AllocatorOp_Grow_NoZero);
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len; break;
U8 arena_end = arena->start + arena->used; case AllocatorOp_Shrink:
if (alloc_end != arena_end) { out->allocation = farena__shrink(arena, in.old_allocation, in.requested_size, in.alignment);
// Not at the end, can't grow in place
out->allocation = (Slice_Mem){0};
break;
}
// Calculate growth
U8 grow_amount = in.requested_size - in.old_allocation.len;
U8 aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 unused = arena->capacity - arena->used;
if (aligned_grow > unused) {
// Not enough space
out->allocation = (Slice_Mem){0};
break;
}
arena->used += aligned_grow;
out->allocation = (Slice_Mem){ in.old_allocation.ptr, aligned_grow + in.requested_size };
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * in.op - AllocatorOp_Grow_NoZero);
}
break; break;
case AllocatorOp_Shrink: { case AllocatorOp_Rewind: farena_rewind(arena, in.save_point); break;
// Check if the allocation is at the end of the arena case AllocatorOp_SavePoint: out->save_point = farena_save(arena[0]); break;
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
U8 arena_end = arena->start + arena->used;
if (alloc_end != arena_end) {
// Not at the end, can't shrink but return adjusted size
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
break;
}
// Calculate shrinkage
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
U8 aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
U8 aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
arena->used -= (aligned_original - aligned_new);
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
}
break;
case AllocatorOp_Rewind:
farena_rewind(arena, in.save_point);
break;
case AllocatorOp_SavePoint:
out->save_point = farena_save(arena[0]);
break;
case AllocatorOp_Query: case AllocatorOp_Query:
out->features = out->features =
@@ -1073,15 +1056,10 @@ W_ MS_LPVOID VirtualAlloc(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD flAllocationT
W_ MS_BOOL VirtualFree (MS_LPVOID lpAddress, U8 dwSize, MS_DWORD dwFreeType); W_ MS_BOOL VirtualFree (MS_LPVOID lpAddress, U8 dwSize, MS_DWORD dwFreeType);
#pragma warning(pop) #pragma warning(pop)
typedef def_struct(OS_Windows_State) { typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; };
OS_SystemInfo system_info;
};
global OS_Windows_State os__windows_info; global OS_Windows_State os__windows_info;
finline finline OS_SystemInfo* os_system_info(void) { return & os__windows_info.system_info; }
OS_SystemInfo* os_system_info(void) {
return & os__windows_info.system_info;
}
inline inline
void os__enable_large_pages(void) { void os__enable_large_pages(void) {
MS_HANDLE token; MS_HANDLE token;
@@ -1125,6 +1103,8 @@ inline void os_vmem_release(U8 vm, U8 size) { VirtualFree(cast(MS_LPVOID, vm),
#pragma endregion OS #pragma endregion OS
#pragma region VArena (Virutal Address Space Arena) #pragma region VArena (Virutal Address Space Arena)
finline U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
inline inline
VArena* varena__make(Opts_varena_make*R_ opts) { VArena* varena__make(Opts_varena_make*R_ opts) {
assert(opts != nullptr); assert(opts != nullptr);
@@ -1133,12 +1113,11 @@ VArena* varena__make(Opts_varena_make*R_ opts) {
U8 reserve_size = align_pow2(opts->reserve_size, os_system_info()->target_page_size); U8 reserve_size = align_pow2(opts->reserve_size, os_system_info()->target_page_size);
U8 commit_size = align_pow2(opts->commit_size, os_system_info()->target_page_size); U8 commit_size = align_pow2(opts->commit_size, os_system_info()->target_page_size);
B4 no_large_pages = (opts->flags & VArenaFlag_NoLargePages) != 0; B4 no_large_pages = (opts->flags & VArenaFlag_NoLargePages) != 0;
U8 base = os_vmem_reserve(reserve_size, .base_addr = opts->base_addr, .no_large_pages = no_large_pages); U8 base = os_vmem_reserve(reserve_size, .base_addr = opts->base_addr, .no_large_pages = no_large_pages);
assert(base != 0); assert(base != 0);
os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages); os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages);
U8 header_size = align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); U8 header_size = varena__header_size();
VArena* vm = cast(VArena*, base); VArena* vm = cast(VArena*, base); r_(vm)[0] = (VArena){
r_(vm)[0] = (VArena){
.reserve_start = base + header_size, .reserve_start = base + header_size,
.reserve = reserve_size, .reserve = reserve_size,
.commit_size = commit_size, .commit_size = commit_size,
@@ -1150,6 +1129,7 @@ VArena* varena__make(Opts_varena_make*R_ opts) {
} }
inline inline
Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opts) { Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opts) {
assert(vm != nullptr);
assert(amount != 0); assert(amount != 0);
U8 alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT; U8 alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
U8 requested_size = amount * type_width; U8 requested_size = amount * type_width;
@@ -1158,8 +1138,7 @@ Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opt
U8 to_be_used = vm->commit_used + aligned_size; U8 to_be_used = vm->commit_used + aligned_size;
U8 reserve_left = vm->reserve - vm->commit_used; U8 reserve_left = vm->reserve - vm->commit_used;
U8 commit_left = vm->committed - vm->commit_used; U8 commit_left = vm->committed - vm->commit_used;
B4 exhausted = commit_left < to_be_used; B4 exhausted = commit_left < to_be_used; assert(to_be_used < reserve_left);
assert(to_be_used < reserve_left);
if (exhausted) if (exhausted)
{ {
U8 next_commit_size = reserve_left > 0 ? U8 next_commit_size = reserve_left > 0 ?
@@ -1169,30 +1148,32 @@ Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opt
U8 next_commit_start = u8_(vm) + vm->committed; U8 next_commit_start = u8_(vm) + vm->committed;
B4 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0; B4 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0;
B4 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages); B4 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages);
if (commit_result == false) { if (commit_result == false) { return (Slice_Mem){0}; }
return (Slice_Mem){0};
}
vm->committed += next_commit_size; vm->committed += next_commit_size;
} }
} }
vm->commit_used = to_be_used; vm->commit_used = to_be_used;
return (Slice_Mem){.ptr = current_offset, .len = requested_size}; return (Slice_Mem){.ptr = current_offset, .len = requested_size};
} }
inline
Slice_Mem varena__grow(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, U8 alignment, B4 no_zero) {
Slice_Mem result;
U8 grow_amount = requested_size - old_allocation.len;
if (grow_amount == 0) { result = old_allocation; return; } // Growing when not the last allocation not allowed
U8 current_offset = vm->reserve_start + vm->commit_used; assert(old_allocation.ptr == current_offset);
Slice_Mem allocation = varena_push_mem(vm, grow_amount, alignment); assert(allocation.ptr != 0);
result = (Slice_Mem){ old_allocation.ptr, requested_size + allocation.len }; memory_zero(result.ptr, result.len * no_zero);
}
finline void varena_release(VArena_R arena) { os_vmem_release(u8_(arena), arena->reserve); } finline void varena_release(VArena_R arena) { os_vmem_release(u8_(arena), arena->reserve); }
inline inline
Slice_Mem varena__shrink(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts) { Slice_Mem varena__shrink(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts) {
assert(opts != nullptr); assert(opts != nullptr);
Slice_Mem result = {0}; Slice_Mem result = {0};
U8 current_offset = vm->reserve_start + vm->commit_used;
U8 shrink_amount = old_allocation.len - requested_size; U8 shrink_amount = old_allocation.len - requested_size;
if (lt_s(shrink_amount, 0)) { if (lt_s(shrink_amount, 0)) { result = old_allocation; return result; }
result = old_allocation; U8 current_offset = vm->reserve_start + vm->commit_used; assert(old_allocation.ptr == current_offset);
return result; vm->commit_used -= shrink_amount;
} result = (Slice_Mem){ old_allocation.ptr, requested_size }; return result;
assert(old_allocation.ptr == current_offset);
vm->commit_used -= shrink_amount;
result = (Slice_Mem){ old_allocation.ptr, requested_size };
return result;
} }
finline finline
void varena_rewind(VArena_R vm, AllocatorSP sp) { void varena_rewind(VArena_R vm, AllocatorSP sp) {
@@ -1212,11 +1193,8 @@ void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
memory_zero(out->allocation.ptr, out->allocation.len * in.op); memory_zero(out->allocation.ptr, out->allocation.len * in.op);
break; break;
case AllocatorOp_Free: case AllocatorOp_Free: break;
break; case AllocatorOp_Reset: vm->commit_used = 0; break;
case AllocatorOp_Reset:
vm->commit_used = 0;
break;
case AllocatorOp_Grow_NoZero: case AllocatorOp_Grow_NoZero:
case AllocatorOp_Grow: { case AllocatorOp_Grow: {
@@ -1247,12 +1225,8 @@ void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
} }
break; break;
case AllocatorOp_Rewind: case AllocatorOp_Rewind: vm->commit_used = in.save_point.slot; break;
vm->commit_used = in.save_point.slot; case AllocatorOp_SavePoint: out->save_point = varena_save(vm); break;
break;
case AllocatorOp_SavePoint:
out->save_point = varena_save(vm);
break;
case AllocatorOp_Query: case AllocatorOp_Query:
out->features = out->features =
@@ -1277,8 +1251,7 @@ Arena* arena__make(Opts_arena_make*R_ opts) {
U8 header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT); U8 header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
VArena_R current = varena__make(opts); VArena_R current = varena__make(opts);
assert(current != nullptr); assert(current != nullptr);
Arena* arena = varena_push(current, Arena); Arena* arena = varena_push(current, Arena); r_(arena)[0] = (Arena){
r_(arena)[0] = (Arena){
.backing = current, .backing = current,
.prev = nullptr, .prev = nullptr,
.current = arena, .current = arena,
@@ -1359,11 +1332,9 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
out->allocation = arena_push_mem(arena, in.requested_size, .alignment = in.alignment); out->allocation = arena_push_mem(arena, in.requested_size, .alignment = in.alignment);
memory_zero(out->allocation.ptr, out->allocation.len * in.op); memory_zero(out->allocation.ptr, out->allocation.len * in.op);
break; break;
case AllocatorOp_Free:
break; case AllocatorOp_Free: break;
case AllocatorOp_Reset: case AllocatorOp_Reset: arena_reset(arena); break;
arena_reset(arena);
break;
case AllocatorOp_Grow: case AllocatorOp_Grow:
case AllocatorOp_Grow_NoZero: { case AllocatorOp_Grow_NoZero: {
@@ -1415,13 +1386,9 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
} }
break; break;
case AllocatorOp_Rewind: case AllocatorOp_Rewind: arena_rewind(arena, in.save_point); break;
arena_rewind(arena, in.save_point); case AllocatorOp_SavePoint: out->save_point = arena_save(arena); break;
break;
case AllocatorOp_SavePoint:
out->save_point = arena_save(arena);
break;
case AllocatorOp_Query: case AllocatorOp_Query:
out->features = out->features =
AllocatorQuery_Alloc AllocatorQuery_Alloc
@@ -1822,7 +1789,7 @@ Str8* str8cache_set(KT1CX_Str8 kt, U8 key, Str8 value, AllocatorInfo str_reserve
finline finline
Str8 cache_str8(Str8Cache_R cache, Str8 str) { Str8 cache_str8(Str8Cache_R cache, Str8 str) {
assert(cache != nullptr); assert(cache != nullptr);
U8 key = 0; hash64_fnv1a(& key, slice_mem_s(str)); U8 key = 0; hash64_fnv1a(& key, slice_mem_s(str));
Str8_R result = str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve); Str8_R result = str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve);
return result[0]; return result[0];
} }

View File

@@ -25,6 +25,7 @@ Toolchain: MSVC 19.43, C-Stanard: 11
#define local_persist static #define local_persist static
#define global static #define global static
#define internal static #define internal static
#define finline __forceinline
#define static_assert _Static_assert #define static_assert _Static_assert
#define typeof __typeof__ #define typeof __typeof__
@@ -57,6 +58,12 @@ enum { false = 0, true = 1, true_overflow, };
#define offset_of(type, member) cast(SSIZE, & (((type*) 0)->member)) #define offset_of(type, member) cast(SSIZE, & (((type*) 0)->member))
#define size_of(data) cast(SSIZE, sizeof(data)) #define size_of(data) cast(SSIZE, sizeof(data))
#define R_ __restrict
#define V_ volatile
#define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
#define v_(ptr) cast(typeof_ptr(ptr)*V_, ptr)
#define ssize(value) cast(SSIZE, value)
#define kilo(n) (cast(SSIZE, n) << 10) #define kilo(n) (cast(SSIZE, n) << 10)
#define mega(n) (cast(SSIZE, n) << 20) #define mega(n) (cast(SSIZE, n) << 20)
#define giga(n) (cast(SSIZE, n) << 30) #define giga(n) (cast(SSIZE, n) << 30)
@@ -106,11 +113,8 @@ inline SSIZE align_pow2(SSIZE x, SSIZE b);
#define align_struct(type_width) ((SSIZE)(((type_width) + 7) / 8 * 8)) #define align_struct(type_width) ((SSIZE)(((type_width) + 7) / 8 * 8))
#define assert_bounds(point, start, end) do { \ #define assert_bounds(point, start, end) do { \
SSIZE pos_point = cast(SSIZE, point); \ assert(ssize(start) <= ssize(point)); \
SSIZE pos_start = cast(SSIZE, start); \ assert(ssize(point) <= ssize(end)); \
SSIZE pos_end = cast(SSIZE, end); \
assert(pos_start <= pos_point); \
assert(pos_point <= pos_end); \
} while(0) } while(0)
void* memory_copy (void* restrict dest, void const* restrict src, USIZE length); void* memory_copy (void* restrict dest, void const* restrict src, USIZE length);
@@ -833,18 +837,16 @@ Slice_Byte farena__push(FArena* arena, SSIZE amount, SSIZE type_width, Opts_fare
} }
SSIZE desired = type_width * amount; SSIZE desired = type_width * amount;
SSIZE to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT); SSIZE to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT);
SSIZE unused = arena->capacity - arena->used; SSIZE unused = arena->capacity - arena->used; assert(to_commit <= unused);
assert(to_commit <= unused); Byte* ptr = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
Byte* ptr = cast(Byte*, cast(SSIZE, arena->start) + arena->used); arena->used += to_commit;
arena->used += to_commit;
return (Slice_Byte){ptr, desired}; return (Slice_Byte){ptr, desired};
} }
inline void farena_reset(FArena* arena) { arena->used = 0; } inline void farena_reset(FArena* arena) { arena->used = 0; }
inline inline
void farena_rewind(FArena* arena, AllocatorSP save_point) { void farena_rewind(FArena* arena, AllocatorSP save_point) {
assert(save_point.type_sig == & farena_allocator_proc); assert(save_point.type_sig == & farena_allocator_proc);
Byte* end = cast(Byte*, cast(SSIZE, arena->start) + arena->used); Byte* end = cast(Byte*, cast(SSIZE, arena->start) + arena->used); assert_bounds(save_point.slot, arena->start, end);
assert_bounds(save_point.slot, arena->start, end);
arena->used -= save_point.slot - cast(SSIZE, arena->start); arena->used -= save_point.slot - cast(SSIZE, arena->start);
} }
inline inline
@@ -1042,20 +1044,20 @@ inline void os_vmem_release(void* vm, SSIZE size) { VirtualFree(vm, 0, MS_MEM_R
#pragma endregion OS #pragma endregion OS
#pragma region VArena (Virutal Address Space Arena) #pragma region VArena (Virutal Address Space Arena)
finline U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
inline inline
VArena* varena__make(Opts_varena_make* opts) { VArena* varena__make(Opts_varena_make* opts) {
assert(opts != nullptr); assert(opts != nullptr);
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); } if (opts->reserve_size == 0) { opts->reserve_size = mega(64); }
if (opts->commit_size == 0) { opts->commit_size = mega(64); } if (opts->commit_size == 0) { opts->commit_size = mega(64); }
SSIZE reserve_size = align_pow2(opts->reserve_size, os_system_info()->target_page_size); SSIZE reserve_size = align_pow2(opts->reserve_size, os_system_info()->target_page_size);
SSIZE commit_size = align_pow2(opts->commit_size, os_system_info()->target_page_size); SSIZE commit_size = align_pow2(opts->commit_size, os_system_info()->target_page_size);
B32 no_large_pages = (opts->flags & VArenaFlag_NoLargePages) != 0; B32 no_large_pages = (opts->flags & VArenaFlag_NoLargePages) != 0;
Byte* base = os__vmem_reserve(reserve_size, &(Opts_vmem){.base_addr = opts->base_addr, .no_large_pages = no_large_pages}); Byte* base = os_vmem_reserve(reserve_size, .base_addr = opts->base_addr, .no_large_pages = no_large_pages);
assert(base != nullptr); assert(base != nullptr);
os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages); os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages);
SSIZE header_size = align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); SSIZE header_size = align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT);
VArena* vm = cast(VArena*, base); VArena* vm = cast(VArena*, base); * vm = (VArena){
* vm = (VArena){
.reserve_start = cast(SSIZE, base) + header_size, .reserve_start = cast(SSIZE, base) + header_size,
.reserve = reserve_size, .reserve = reserve_size,
.commit_size = commit_size, .commit_size = commit_size,
@@ -1067,6 +1069,7 @@ VArena* varena__make(Opts_varena_make* opts) {
} }
inline inline
Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena* opts) { Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena* opts) {
assert(vm != nullptr);
assert(amount != 0); assert(amount != 0);
SSIZE alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT; SSIZE alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
SSIZE requested_size = amount * type_width; SSIZE requested_size = amount * type_width;
@@ -1075,10 +1078,8 @@ Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena*
SSIZE to_be_used = vm->commit_used + aligned_size; SSIZE to_be_used = vm->commit_used + aligned_size;
SSIZE reserve_left = vm->reserve - vm->commit_used; SSIZE reserve_left = vm->reserve - vm->commit_used;
SSIZE commit_left = vm->committed - vm->commit_used; SSIZE commit_left = vm->committed - vm->commit_used;
B32 exhausted = commit_left < to_be_used; B32 exhausted = commit_left < to_be_used; assert(to_be_used < reserve_left);
assert(to_be_used < reserve_left); if (exhausted) {
if (exhausted)
{
SSIZE next_commit_size = reserve_left > 0 ? SSIZE next_commit_size = reserve_left > 0 ?
max(vm->commit_size, to_be_used) max(vm->commit_size, to_be_used)
: cast(SSIZE, align_pow2( reserve_left, os_system_info()->target_page_size)); : cast(SSIZE, align_pow2( reserve_left, os_system_info()->target_page_size));
@@ -1086,9 +1087,7 @@ Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena*
Byte* next_commit_start = cast(Byte*, cast(SSIZE, vm) + vm->committed); Byte* next_commit_start = cast(Byte*, cast(SSIZE, vm) + vm->committed);
B32 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0; B32 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0;
B32 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages); B32 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages);
if (commit_result == false) { if (commit_result == false) { return (Slice_Byte){0}; }
return (Slice_Byte){0};
}
vm->committed += next_commit_size; vm->committed += next_commit_size;
} }
} }
@@ -1191,10 +1190,8 @@ inline
Arena* arena__make(Opts_arena_make* opts) { Arena* arena__make(Opts_arena_make* opts) {
assert(opts != nullptr); assert(opts != nullptr);
SSIZE header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT); SSIZE header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
VArena* current = varena__make(opts); VArena* current = varena__make(opts); assert(current != nullptr);
assert(current != nullptr); Arena* arena = varena_push(current, Arena); * arena = (Arena){
Arena* arena = varena_push(current, Arena);
* arena = (Arena){
.backing = current, .backing = current,
.prev = nullptr, .prev = nullptr,
.current = arena, .current = arena,
@@ -1213,7 +1210,7 @@ Slice_Byte arena__push(Arena* arena, SSIZE amount, SSIZE type_width, Opts_arena*
SSIZE size_aligned = align_pow2(size_requested, alignment); SSIZE size_aligned = align_pow2(size_requested, alignment);
SSIZE pos_pre = active->pos; SSIZE pos_pre = active->pos;
SSIZE pos_pst = pos_pre + size_aligned; SSIZE pos_pst = pos_pre + size_aligned;
B32 should_chain = B32 should_chain =
((arena->flags & ArenaFlag_NoChain) == 0) ((arena->flags & ArenaFlag_NoChain) == 0)
&& active->backing->reserve < pos_pst; && active->backing->reserve < pos_pst;
if (should_chain) if (should_chain)
@@ -1275,11 +1272,9 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
out->allocation = arena_push_array(arena, Byte, in.requested_size, .alignment = in.alignment); out->allocation = arena_push_array(arena, Byte, in.requested_size, .alignment = in.alignment);
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op)); memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
break; break;
case AllocatorOp_Free:
break; case AllocatorOp_Free: break;
case AllocatorOp_Reset: case AllocatorOp_Reset: arena_reset(arena); break;
arena_reset(arena);
break;
case AllocatorOp_Grow: case AllocatorOp_Grow:
case AllocatorOp_Grow_NoZero: { case AllocatorOp_Grow_NoZero: {
@@ -1331,13 +1326,9 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
} }
break; break;
case AllocatorOp_Rewind: case AllocatorOp_Rewind: arena_rewind(arena, in.save_point); break;
arena_rewind(arena, in.save_point); case AllocatorOp_SavePoint: out->save_point = arena_save(arena); break;
break;
case AllocatorOp_SavePoint:
out->save_point = arena_save(arena);
break;
case AllocatorOp_Query: case AllocatorOp_Query:
out->features = out->features =
AllocatorQuery_Alloc AllocatorQuery_Alloc