code2: initial curation of virtual arena

This commit is contained in:
2025-10-18 20:46:06 -04:00
parent 0f621b4e1b
commit 58ba273dd1
7 changed files with 287 additions and 22 deletions

View File

@@ -115,7 +115,7 @@ AllocatorInfo :: struct {
// Listing of every single allocator (used on hot-reloadable builds)
AllocatorProcID :: enum uintptr {
FArena,
// VArena,
VArena,
// CArena,
// Pool,
// Slab,
@@ -127,7 +127,7 @@ resolve_allocator_proc :: #force_inline proc "contextless" (procedure: $Allocato
when ODIN_DEBUG {
switch (transmute(AllocatorProcID)procedure) {
case .FArena: return farena_allocator_proc
// case .VArena: return varena_allocaotr_proc
case .VArena: return varena_allocator_proc
// case .CArena: return carena_allocator_proc
// case .Pool: return pool_allocator_proc
// case .Slab: return slab_allocator_proc
@@ -145,7 +145,7 @@ resolve_odin_allocator :: #force_inline proc "contextless" (allocator: Odin_Allo
when ODIN_DEBUG {
switch (transmute(AllocatorProcID)allocator.procedure) {
case .FArena: return { farena_odin_allocator_proc, allocator.data }
// case .VArena: return { varena_odin_allocaotr_proc, allocator.data }
case .VArena: return { varena_odin_allocator_proc, allocator.data }
// case .CArena: return { carena_odin_allocator_proc, allocator.data }
// case .Pool: return nil // pool_allocator_proc
// case .Slab: return nil // slab_allocator_proc
@@ -177,6 +177,7 @@ odin_allocator_mode_to_allocator_op :: #force_inline proc "contextless" (mode: O
panic_contextless("Impossible path")
}
// TODO(Ed): Change to DEFAULT_ALIGNMENT
MEMORY_ALIGNMENT_DEFAULT :: 2 * size_of(rawptr)
allocatorinfo :: #force_inline proc(ainfo := context.allocator) -> AllocatorInfo { return transmute(AllocatorInfo) ainfo }

View File

@@ -172,6 +172,6 @@ when ODIN_DEBUG {
farena_allocator :: #force_inline proc "contextless" (arena: ^FArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{proc_id = .FArena, data = arena} }
}
else {
farena_ainfo :: #force_inline proc "contextless" (arena: ^FArena) -> AllocatorInfo { return AllocatorInfo{procedure = farena_allocator_proc, data = arena} }
farena_allocator :: #force_inline proc "contextless" (arena: ^FArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{procedure = farena_allocator_proc, data = arena} }
farena_ainfo :: #force_inline proc "contextless" (arena: ^FArena) -> AllocatorInfo { return AllocatorInfo{procedure = farena_allocator_proc, data = arena} }
farena_allocator :: #force_inline proc "contextless" (arena: ^FArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{procedure = farena_odin_allocator_proc, data = arena} }
}

View File

@@ -6,6 +6,7 @@ import "base:builtin"
import "base:intrinsics"
atomic_thread_fence :: intrinsics.atomic_thread_fence
mem_zero_volatile :: intrinsics.mem_zero_volatile
add_overflow :: intrinsics.overflow_add
// mem_zero :: intrinsics.mem_zero
// mem_copy :: intrinsics.mem_copy_non_overlapping
// mem_copy_overlapping :: intrinsics.mem_copy
@@ -140,7 +141,7 @@ copy :: proc {
mem_copy,
slice_copy,
}
copy_non_overlaping :: proc {
copy_non_overlapping :: proc {
mem_copy_non_overlapping,
slice_copy_overlapping,
}

View File

@@ -1,4 +1,7 @@
package grime
import "base:runtime"
/*
So this is a virtual memory backed arena allocator designed
to take advantage of one large contigous reserve of memory.
@@ -11,7 +14,6 @@ No other part of the program will directly touch the vitual memory interface dir
Thus for the scope of this prototype the Virtual Arena are the only interfaces to dynamic address spaces for the runtime of the client app.
The host application as well ideally (although this may not be the case for a while)
*/
VArena_GrowthPolicyProc :: #type proc( commit_used, committed, reserved, requested_size : uint ) -> uint
VArena :: struct {
using vmem: VirtualMemoryRegion,
@@ -19,7 +21,277 @@ VArena :: struct {
dbg_name: string,
commit_used: uint,
growth_policy: VArena_GrowthPolicyProc,
allow_any_resize: b32,
allow_any_resize: b64,
mutex: Mutex,
}
VArena_GrowthPolicyProc :: #type proc(commit_used, committed, reserved, requested_size: uint) -> uint
varena_default_growth_policy :: proc(commit_used, committed, reserved, requested_size: uint) -> uint {
@static commit_limit := uint(1 * Mega)
@static increment := uint(16 * Kilo)
page_size := uint(virtual_get_page_size())
if increment < Giga && committed > commit_limit {
commit_limit *= 2
increment *= 2
increment = clamp(increment, Mega, Giga)
}
remaining_reserve := reserved - committed
growth_increment := max( increment, requested_size )
growth_increment = clamp( growth_increment, page_size, remaining_reserve )
next_commit_size := memory_align_formula( committed + growth_increment, page_size )
return next_commit_size
}
// Default growth_policy is varena_default_growth_policy
varena_init :: proc(base_address: uintptr, to_reserve, to_commit: uint,
growth_policy: VArena_GrowthPolicyProc = varena_default_growth_policy,
allow_any_resize: bool = false,
dbg_name: string = "",
enable_mem_tracking: bool = false,
) -> (arena: VArena, alloc_error: AllocatorError)
{
page_size := uint(virtual_get_page_size())
verify( page_size > size_of(VirtualMemoryRegion), "Make sure page size is not smaller than a VirtualMemoryRegion?")
verify( to_reserve >= page_size, "Attempted to reserve less than a page size" )
verify( to_commit >= page_size, "Attempted to commit less than a page size")
verify( to_reserve >= to_commit, "Attempted to commit more than there is to reserve" )
vmem : VirtualMemoryRegion
vmem, alloc_error = virtual_reserve_and_commit( base_address, to_reserve, to_commit )
if ensure(vmem.base_address == nil || alloc_error != .None, "Failed to allocate requested virtual memory for virtual arena") {
return
}
arena.vmem = vmem
arena.commit_used = 0
if growth_policy == nil do arena.growth_policy = varena_default_growth_policy
else do arena.growth_policy = growth_policy
arena.allow_any_resize = b64(allow_any_resize)
if Track_Memory && enable_mem_tracking {
memtracker_init( & arena.tracker, runtime.heap_allocator(), Kilo * 128, dbg_name)
}
return
}
varena_alloc :: proc(using self: ^VArena,
size: int,
alignment: int = MEMORY_ALIGNMENT_DEFAULT,
zero_memory := true,
location := #caller_location
) -> (data: []byte, alloc_error: AllocatorError)
{
verify( alignment & (alignment - 1) == 0, "Non-power of two alignment", location = location )
page_size := uint(virtual_get_page_size())
requested_size := uint(size)
if ensure(requested_size == 0, "Requested 0 size") do return nil, .Invalid_Argument
// ensure( requested_size > page_size, "Requested less than a page size, going to allocate a page size")
// requested_size = max(requested_size, page_size)
// TODO(Ed): Prevent multiple threads from entering here extrusively?
// sync.mutex_guard( & mutex )
alignment_offset := uint(0)
current_offset := uintptr(self.reserve_start) + uintptr(commit_used)
mask := uintptr(alignment - 1)
if (current_offset & mask != 0) do alignment_offset = uint(alignment) - uint(current_offset & mask)
size_to_allocate, overflow_signal := add_overflow( requested_size, alignment_offset )
if overflow_signal do return {}, .Out_Of_Memory
to_be_used : uint
to_be_used, overflow_signal = add_overflow( commit_used, size_to_allocate )
if (overflow_signal || to_be_used > reserved) do return {}, .Out_Of_Memory
header_offset := uint( uintptr(reserve_start) - uintptr(base_address) )
commit_left := committed - commit_used - header_offset
needs_more_committed := commit_left < size_to_allocate
if needs_more_committed {
profile("VArena Growing")
next_commit_size := growth_policy( commit_used, committed, reserved, size_to_allocate )
alloc_error = virtual_commit( vmem, next_commit_size )
if alloc_error != .None do return
}
data_ptr := ([^]byte)(current_offset + uintptr(alignment_offset))
data = slice( data_ptr, requested_size )
commit_used += size_to_allocate
alloc_error = .None
// log_backing: [Kilobyte * 16]byte; backing_slice := log_backing[:]
// log( str_pfmt_buffer( backing_slice, "varena alloc - BASE: %p PTR: %X, SIZE: %d", cast(rawptr) self.base_address, & data[0], requested_size) )
if zero_memory {
// log( str_pfmt_buffer( backing_slice, "Zeroring data (Range: %p to %p)", raw_data(data), cast(rawptr) (uintptr(raw_data(data)) + uintptr(requested_size))))
// zero( data )
mem_zero( data_ptr, int(requested_size) )
}
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & tracker, & data[0], & data[len(data) - 1] )
}
return
}
varena_grow :: #force_inline proc(self: ^VArena, old_memory: []byte, requested_size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, should_zero := true, loc := #caller_location) -> (data: []byte, error: AllocatorError)
{
if ensure(old_memory == nil, "Growing without old_memory?") {
data, error = varena_alloc(self, requested_size, alignment, should_zero, loc)
return
}
if ensure(requested_size == len(old_memory), "Requested grow when none needed") {
data = old_memory
return
}
alignment_offset := uintptr(cursor(old_memory)) & uintptr(alignment - 1)
if ensure(alignment_offset == 0 && requested_size < len(old_memory), "Requested a shrink from varena_grow") {
data = old_memory
return
}
old_memory_offset := cursor(old_memory)[len(old_memory):]
current_offset := self.reserve_start[self.commit_used:]
when false {
if old_size < page_size {
// We're dealing with an allocation that requested less than the minimum allocated on vmem.
// Provide them more of their actual memory
data = slice(transmute([^]byte)old_memory, size )
return
}
}
verify( old_memory_offset == current_offset || bool(self.allow_any_resize),
"Cannot grow existing allocation in vitual arena to a larger size unless it was the last allocated" )
log_backing: [Kilo * 16]byte
backing_slice := log_backing[:]
if old_memory_offset != current_offset && self.allow_any_resize
{
// Give it new memory and copy the old over. Old memory is unrecoverable until clear.
new_region : []byte
new_region, error = varena_alloc( self, requested_size, alignment, should_zero, loc )
if ensure(new_region == nil || error != .None, "Failed to grab new region") {
data = old_memory
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & self.tracker, & data[0], & data[len(data) - 1] )
}
return
}
copy_non_overlapping( cursor(new_region), cursor(old_memory), len(old_memory) )
data = new_region
// log_print_fmt("varena resize (new): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size)
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & self.tracker, & data[0], & data[len(data) - 1] )
}
return
}
new_region : []byte
new_region, error = varena_alloc( self, requested_size - len(old_memory), alignment, should_zero, loc)
if ensure(new_region == nil || error != .None, "Failed to grab new region") {
data = old_memory
return
}
data = slice(cursor(old_memory), requested_size )
// log_print_fmt("varena resize (expanded): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size)
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & self.tracker, & data[0], & data[len(data) - 1] )
}
return
}
varena_shrink :: proc(self: ^VArena, memory: []byte, requested_size: int, loc := #caller_location) -> (data: []byte, error: AllocatorError)
{
if requested_size == len(memory) { return memory, .None }
if ensure(memory == nil, "Shrinking without old_memory?") do return memory, .Invalid_Argument
current_offset := self.reserve_start[self.commit_used:]
shrink_amount := len(memory) - requested_size
if shrink_amount < 0 do return memory, .None
assert(cursor(memory) == current_offset)
self.commit_used -= uint(shrink_amount)
return memory[:requested_size], .None
}
varena_reset :: #force_inline proc(self: ^VArena) {
// TODO(Ed): Prevent multiple threads from entering here extrusively?
// sync.mutex_guard( & mutex )
self.commit_used = 0
if Track_Memory && self.tracker.entries.header != nil {
array_clear(self.tracker.entries)
}
}
varena_release :: #force_inline proc(self: ^VArena) {
// TODO(Ed): Prevent multiple threads from entering here extrusively?
// sync.mutex_guard( & mutex )
virtual_release( self.vmem )
self.commit_used = 0
}
varena_rewind :: #force_inline proc(arena: ^VArena, save_point: AllocatorSP, loc := #caller_location) {
assert_contextless(save_point.type_sig == varena_allocator_proc)
assert_contextless(save_point.slot >= 0 && save_point.slot <= int(arena.commit_used))
arena.commit_used = cast(uint) save_point.slot
}
varena_save :: #force_inline proc(arena: ^VArena) -> AllocatorSP { return AllocatorSP { type_sig = varena_allocator_proc, slot = cast(int) arena.commit_used }}
varena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Out) {
assert(output != nil)
assert(input.data != nil)
arena := transmute(^VArena) input.data
switch input.op {
case .Alloc, .Alloc_NoZero:
output.allocation, output.error = varena_alloc(arena, input.requested_size, input.alignment, input.op == .Alloc, input.loc)
return
case .Free:
output.error = .Mode_Not_Implemented
case .Reset:
varena_reset(arena)
case .Grow, .Grow_NoZero:
output.allocation, output.error = varena_grow(arena, input.old_allocation, input.requested_size, input.alignment, input.op == .Grow, input.loc)
case .Shrink:
output.allocation, output.error = varena_shrink(arena, input.old_allocation, input.requested_size)
case .Rewind:
varena_rewind(arena, input.save_point)
case .SavePoint:
output.save_point = varena_save(arena)
case .Query:
output.features = {.Alloc, .Reset, .Grow, .Shrink, .Rewind}
output.max_alloc = int(arena.reserved - arena.commit_used)
output.min_alloc = 0
output.left = output.max_alloc
output.save_point = varena_save(arena)
}
}
varena_odin_allocator_proc :: proc(
allocator_data : rawptr,
mode : Odin_AllocatorMode,
size : int,
alignment : int,
old_memory : rawptr,
old_size : int,
location : SourceCodeLocation = #caller_location
) -> (data: []byte, alloc_error: AllocatorError)
{
arena := transmute( ^VArena) allocator_data
page_size := uint(virtual_get_page_size())
switch mode {
case .Alloc, .Alloc_Non_Zeroed:
data, alloc_error = varena_alloc( arena, size, alignment, (mode == .Alloc), location )
return
case .Free:
alloc_error = .Mode_Not_Implemented
case .Free_All:
varena_reset( arena )
case .Resize, .Resize_Non_Zeroed:
if size > old_size do varena_grow (arena, slice(cursor(old_memory), old_size), size, alignment, (mode == .Alloc), location)
else do varena_shrink(arena, slice(cursor(old_memory), old_size), size, location)
case .Query_Features:
set := cast( ^Odin_AllocatorModeSet) old_memory
if set != nil do (set ^) = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
case .Query_Info:
info := (^Odin_AllocatorQueryInfo)(old_memory)
info.pointer = transmute(rawptr) varena_save(arena).slot
info.size = cast(int) arena.reserved
info.alignment = MEMORY_ALIGNMENT_DEFAULT
return to_bytes(info), nil
}
return
}
varena_odin_allocator :: proc(arena: ^VArena) -> (allocator: Odin_Allocator) {
allocator.procedure = varena_odin_allocator_proc
allocator.data = arena
return
}
when ODIN_DEBUG {
varena_ainfo :: #force_inline proc "contextless" (arena: ^VArena) -> AllocatorInfo { return AllocatorInfo{proc_id = .VArena, data = arena} }
varena_allocator :: #force_inline proc "contextless" (arena: ^VArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{proc_id = .VArena, data = arena} }
}
else {
varena_ainfo :: #force_inline proc "contextless" (arena: ^VArena) -> AllocatorInfo { return AllocatorInfo{procedure = varena_allocator_proc, data = arena} }
varena_allocator :: #force_inline proc "contextless" (arena: ^VArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{procedure = varena_odin_allocator_proc, data = arena} }
}

View File

@@ -151,6 +151,8 @@ main :: proc()
if thread_memory.id == .Master_Prepper {
thread_join_multiple(.. host_memory.threads[1:THREAD_TICK_LANES + THREAD_JOB_WORKERS])
}
host_memory.client_api.shutdown();
unload_client_api( & host_memory.client_api )

View File

@@ -100,7 +100,8 @@ startup :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
log_print_fmt("Startup time: %v ms", startup_ms)
}
// For some reason odin's symbols conflict with native foreign symbols...
// NOTE(Ed): For some reason odin's symbols conflict with native foreign symbols...
// Called in host.main after all tick lane or job worker threads have joined.
@export
sectr_shutdown :: proc()
{

View File

@@ -183,11 +183,9 @@ poll_input_events :: proc( input, prev_input : ^InputState, input_events : Input
for prev_key, id in prev_input.keyboard.keys {
input.keyboard.keys[id].ended_down = prev_key.ended_down
}
for prev_btn, id in prev_input.mouse.btns {
input.mouse.btns[id].ended_down = prev_btn.ended_down
}
input.mouse.raw_pos = prev_input.mouse.raw_pos
input.mouse.pos = prev_input.mouse.pos
@@ -200,7 +198,6 @@ poll_input_events :: proc( input, prev_input : ^InputState, input_events : Input
if events.num > 0 {
last_frame = peek_back( events).frame_id
}
// No new events, don't update
if last_frame == prev_frame do return
@@ -232,7 +229,6 @@ poll_input_events :: proc( input, prev_input : ^InputState, input_events : Input
}
}
}
Iterate_Mouse_Events:
{
iter_obj := iterator( & mouse_events ); iter := & iter_obj
@@ -241,17 +237,13 @@ poll_input_events :: proc( input, prev_input : ^InputState, input_events : Input
if last_frame > event.frame_id {
break
}
process_digital_btn :: proc( btn : ^DigitalBtn, prev_btn : DigitalBtn, ended_down : b32 )
{
first_transition := btn.half_transitions == 0
btn.half_transitions += 1
btn.ended_down = ended_down
}
// logf("mouse event: %v", event)
// log_print_fmt("mouse event: %v", event)
#partial switch event.type {
case .Mouse_Pressed:
btn := & input.mouse.btns[event.btn]
@@ -277,22 +269,18 @@ poll_input_events :: proc( input, prev_input : ^InputState, input_events : Input
input.mouse.delta = event.delta * { 1, -1 }
}
}
prev_frame = last_frame
}
input_event_iter :: #force_inline proc () -> FRingBufferIterator(InputEvent) {
return iterator_ringbuf_fixed( & memory.client_memory.input_events.events )
}
input_key_event_iter :: #force_inline proc() -> FRingBufferIterator(InputKeyEvent) {
return iterator_ringbuf_fixed( & memory.client_memory.input_events.key_events )
}
input_mouse_event_iter :: #force_inline proc() -> FRingBufferIterator(InputMouseEvent) {
return iterator_ringbuf_fixed( & memory.client_memory.input_events.mouse_events )
}
input_codes_pressed_slice :: #force_inline proc() -> []rune {
return to_slice( memory.client_memory.input_events.codes_pressed )
}