Bug fixes to allocators and containers

Still haven't found the main bug with resizing zpl_hashmap on string interning
This commit is contained in:
Edward R. Gonzalez 2024-05-06 23:29:47 -04:00
parent 0e27635a9b
commit 1e6e9581c3
16 changed files with 276 additions and 52 deletions

12
.vscode/launch.json vendored
View File

@ -4,6 +4,18 @@
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Debug gen_odin_src",
"type": "cppvsdbg",
"request": "launch",
"program": "${workspaceFolder}/toolchain/odin/codegen/build/gen_src.exe",
"args": [],
"stopAtEntry": false,
"cwd": "${workspaceFolder}/toolchain/odin",
"environment": [],
"console": "externalTerminal",
"visualizerFile": "${workspaceFolder}/toolchain/odin/scripts/gencpp.natvis"
},
{
"type": "lldb",
"request": "launch",

View File

@ -10,6 +10,8 @@
"files.associations": {
"*.rmd": "markdown",
"type_traits": "cpp",
"utf8proc.c": "cpp"
}
"utf8proc.c": "cpp",
"xtr1common": "cpp"
},
"C_Cpp.intelliSenseEngineFallback": "disabled"
}

View File

@ -15,9 +15,9 @@ import rl "vendor:raylib"
Path_Assets :: "../assets/"
Path_Input_Replay :: "scratch.sectr_replay"
Persistent_Slab_DBG_Name :: "Peristent Slab"
Frame_Slab_DBG_Name :: "Frame Slab"
Transient_Slab_DBG_Name :: "Transient Slab"
Persistent_Slab_DBG_Name := "Peristent Slab"
Frame_Slab_DBG_Name := "Frame Slab"
Transient_Slab_DBG_Name := "Transient Slab"
ModuleAPI :: struct {
lib : dynlib.Library,
@ -56,6 +56,7 @@ startup :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem
}
state := new( State, persistent_allocator() )
Memory_App.state = state
using state
// Setup Persistent Slab
@ -118,16 +119,14 @@ startup :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem
string_cache = str_cache_init()
context.user_ptr = state
input = & input_data[1]
input_prev = & input_data[0]
// Configuration Load
{
using config
resolution_width = 1280
resolution_height = 900
resolution_width = 1000
resolution_height = 600
refresh_rate = 0
cam_min_zoom = 0.25
@ -153,7 +152,7 @@ startup :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem
rl.SetConfigFlags( {
rl.ConfigFlag.WINDOW_RESIZABLE,
rl.ConfigFlag.WINDOW_TOPMOST,
// rl.ConfigFlag.WINDOW_TOPMOST,
})
window_width : i32 = cast(i32) config.resolution_width
@ -275,7 +274,7 @@ reload :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem,
context.allocator = persistent_allocator()
context.temp_allocator = transient_allocator()
state := get_state(); using state
using state
// Procedure Addresses are not preserved on hot-reload. They must be restored for persistent data.
// The only way to alleviate this is to either do custom handles to allocators

View File

@ -8,6 +8,8 @@ import "core:os"
import rl "vendor:raylib"
Str_App_State := "App State"
Memory_App : Memory
Memory_Base_Address_Persistent :: Terabyte * 1
@ -40,6 +42,8 @@ Memory :: struct {
transient : ^VArena,
files_buffer : ^VArena,
state : ^State,
// Should only be used for small memory allocation iterations
// Not for large memory env states
snapshot : MemorySnapshot,
@ -199,7 +203,8 @@ State :: struct {
}
get_state :: proc "contextless" () -> ^ State {
return cast( ^ State ) Memory_App.persistent.reserve_start
// return cast( ^ State ) Memory_App.persistent.reserve_start
return Memory_App.state
}
AppWindow :: struct {

View File

@ -16,6 +16,8 @@ import "core:slice"
ArrayHeader :: struct ( $ Type : typeid ) {
backing : Allocator,
dbg_name : string,
fixed_cap : b32,
capacity : u64,
num : u64,
data : [^]Type,
@ -54,7 +56,7 @@ array_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( Array(Type), A
}
array_init_reserve :: proc
( $ Type : typeid, allocator : Allocator, capacity : u64 ) -> ( result : Array(Type), alloc_error : AllocatorError )
( $ Type : typeid, allocator : Allocator, capacity : u64, fixed_cap : b32 = false, dbg_name : string = "" ) -> ( result : Array(Type), alloc_error : AllocatorError )
{
header_size := size_of(ArrayHeader(Type))
array_size := header_size + int(capacity) * size_of(Type)
@ -66,6 +68,8 @@ array_init_reserve :: proc
result.header = cast( ^ArrayHeader(Type)) raw_mem;
result.backing = allocator
// result.dbg_name = dbg_name
result.fixed_cap = fixed_cap
result.capacity = capacity
result.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) result.header)[ 1:]
return
@ -127,14 +131,8 @@ array_append_at :: proc( using self : ^Array( $ Type ), item : Type, id : u64 )
}
target := & data[id]
libc.memmove( ptr_offset(target, 1), target, uint(num - id) * size_of(Type) )
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE.
dst = slice_ptr( ptr_offset(target) + 1, num - id - 1 )
src = slice_ptr( target, num - id )
copy( dst, src )
// Note(Ed) : Original code from gencpp
// libc.memmove( ptr_offset(target, 1), target, (num - idx) * size_of(Type) )
data[id] = item
num += 1
return AllocatorError.None
@ -183,11 +181,11 @@ array_push_back :: proc( using self : Array( $ Type)) -> b32 {
return true
}
array_clear :: proc( using self : Array( $ Type ), zero_data : b32 ) {
array_clear :: proc "contextless" ( using self : Array( $ Type ), zero_data : b32 = false ) {
if zero_data {
mem.set( raw_data( data ), 0, num )
mem.set( data, 0, int(num * size_of(Type)) )
}
num = 0
header.num = 0
}
array_fill :: proc( using self : Array( $ Type ), begin, end : u64, value : Type ) -> b32
@ -206,7 +204,7 @@ array_fill :: proc( using self : Array( $ Type ), begin, end : u64, value : Type
}
array_free :: proc( using self : Array( $ Type ) ) {
free( data, backing )
free( self.header, backing )
self.data = nil
}
@ -228,13 +226,13 @@ array_pop :: proc( using self : Array( $ Type ) ) {
array_remove_at :: proc( using self : Array( $ Type ), id : u64 )
{
verify( id >= num, "Attempted to remove from an index larger than the array" )
verify( id < header.num, "Attempted to remove from an index larger than the array" )
left = slice_ptr( data, id )
right = slice_ptr( ptr_offset( memory_after(left), 1), num - len(left) - 1 )
copy( left, right )
left := & data[id]
right := & data[id + 1]
libc.memmove( left, right, uint(num - id) * size_of(Type) )
num -= 1
header.num -= 1
}
array_reserve :: proc( using self : ^Array( $ Type ), new_capacity : u64 ) -> AllocatorError
@ -291,3 +289,20 @@ array_set_capacity :: proc( self : ^Array( $ Type ), new_capacity : u64 ) -> All
self.header.num = self.num
return result_code
}
array_block_size :: proc "contextless" ( self : Array( $Type ) ) -> u64 {
header_size :: size_of(ArrayHeader(Type))
block_size := cast(u64) (header_size + self.capacity * size_of(Type))
return block_size
}
array_memtracker_entry :: proc( self : Array( $Type ), name : string ) -> MemoryTrackerEntry {
header_size :: size_of(ArrayHeader(Type))
block_size := cast(uintptr) (header_size + (cast(uintptr) self.capacity) * size_of(Type))
block_start := transmute(^u8) self.header
block_end := ptr_offset( block_start, block_size )
tracker_entry := MemoryTrackerEntry { name, block_start, block_end }
return tracker_entry
}

View File

@ -1,5 +1,12 @@
package sectr
// GrimeContextExt :: struct {
// dbg_name : string
// }
// Global_Transient_Context : GrimeContextExt
context_ext :: proc( $ Type : typeid ) -> (^Type) {
return cast(^Type) context.user_ptr
}

View File

@ -36,8 +36,8 @@ HMapZPL_Entry :: struct ( $ Type : typeid) {
}
HMapZPL :: struct ( $ Type : typeid ) {
hashes : Array( i64 ),
entries : Array( HMapZPL_Entry(Type) ),
hashes : Array( i64 ),
entries : Array( HMapZPL_Entry(Type) ),
}
zpl_hmap_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( HMapZPL( Type), AllocatorError ) {
@ -45,12 +45,12 @@ zpl_hmap_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( HMapZPL( Ty
}
zpl_hmap_init_reserve :: proc
( $ Type : typeid, allocator : Allocator, num : u64 ) -> ( HMapZPL( Type), AllocatorError )
( $ Type : typeid, allocator : Allocator, num : u64, dbg_name : string = "" ) -> ( HMapZPL( Type), AllocatorError )
{
result : HMapZPL(Type)
hashes_result, entries_result : AllocatorError
result.hashes, hashes_result = array_init_reserve( i64, allocator, num )
result.hashes, hashes_result = array_init_reserve( i64, allocator, num, dbg_name = dbg_name )
if hashes_result != AllocatorError.None {
ensure( false, "Failed to allocate hashes array" )
return result, hashes_result
@ -58,7 +58,7 @@ zpl_hmap_init_reserve :: proc
array_resize( & result.hashes, num )
slice.fill( slice_ptr( result.hashes.data, cast(int) result.hashes.num), -1 )
result.entries, entries_result = array_init_reserve( HMapZPL_Entry(Type), allocator, num )
result.entries, entries_result = array_init_reserve( HMapZPL_Entry(Type), allocator, num, dbg_name = dbg_name )
if entries_result != AllocatorError.None {
ensure( false, "Failed to allocate entries array" )
return result, entries_result
@ -119,7 +119,7 @@ zpl_hmap_rehash :: proc( ht : ^ HMapZPL( $ Type ), new_num : u64 ) -> AllocatorE
// ensure( false, "ZPL HMAP IS REHASHING" )
last_added_index : i64
new_ht, init_result := zpl_hmap_init_reserve( Type, ht.hashes.backing, new_num )
new_ht, init_result := zpl_hmap_init_reserve( Type, ht.hashes.backing, new_num, ht.hashes.dbg_name )
if init_result != AllocatorError.None {
ensure( false, "New zpl_hmap failed to allocate" )
return init_result

View File

@ -0,0 +1,117 @@
package sectr
MemoryTrackerEntry :: struct {
start, end : rawptr,
// owner : string,
}
MemoryTracker :: struct {
name : string,
entries : Array(MemoryTrackerEntry),
}
memtracker_clear :: proc ( tracker : MemoryTracker ) {
// logf("Clearing tracker: %v", tracker.name)
memtracker_dump_entries(tracker);
array_clear(tracker.entries)
}
memtracker_init :: proc ( tracker : ^MemoryTracker, allocator : Allocator, num_entries : u64, name : string )
{
tracker.name = name
error : AllocatorError
tracker.entries, error = array_init_reserve( MemoryTrackerEntry, allocator, num_entries, dbg_name = name )
if error != AllocatorError.None {
fatal("Failed to allocate memory tracker's hashmap");
}
}
memtracker_register :: proc( tracker : ^MemoryTracker, new_entry : MemoryTrackerEntry )
{
if tracker.entries.num == tracker.entries.capacity {
ensure(false, "Memory tracker entries array full, can no longer register any more allocations")
return
}
for idx in 0..< tracker.entries.num
{
entry := & tracker.entries.data[idx]
if new_entry.start > entry.start {
continue
}
if (entry.end < new_entry.start)
{
msg := str_fmt_tmp("Memory tracker(%v) detected a collision:\nold_entry: %v\nnew_entry: %v", tracker.name, entry, new_entry)
ensure( false, msg )
memtracker_dump_entries(tracker ^)
}
array_append_at( & tracker.entries, new_entry, idx )
// log(str_fmt_tmp("%v : Registered: %v", tracker.name, new_entry) )
return
}
array_append( & tracker.entries, new_entry )
// log(str_fmt_tmp("%v : Registered: %v", tracker.name, new_entry) )
}
memtracker_register_auto_name :: proc( tracker : ^MemoryTracker, start, end : rawptr )
{
memtracker_register( tracker, {start, end})
}
memtracker_register_auto_name_slice :: proc( tracker : ^MemoryTracker, slice : []byte )
{
start := raw_data(slice)
end := & slice[ len(slice) - 1 ]
memtracker_register( tracker, {start, end})
}
memtracker_unregister :: proc( tracker : MemoryTracker, to_remove : MemoryTrackerEntry )
{
entries := array_to_slice_num(tracker.entries)
for idx in 0..< tracker.entries.num
{
entry := & entries[idx]
if entry.start == to_remove.start {
if (entry.end == to_remove.end || to_remove.end == nil) {
// log(str_fmt_tmp("%v: Unregistered: %v", tracker.name, to_remove));
array_remove_at(tracker.entries, idx)
return
}
ensure(false, str_fmt_tmp("%v: Found an entry with the same start address but end address was different:\nentry : %v\nto_remove: %v", tracker.name, entry, to_remove))
memtracker_dump_entries(tracker)
}
}
ensure(false, str_fmt_tmp("%v: Attempted to unregister an entry that was not tracked: %v", tracker.name, to_remove))
memtracker_dump_entries(tracker)
}
memtracker_check_for_collisions :: proc ( tracker : MemoryTracker )
{
entries := array_to_slice_num(tracker.entries)
for idx in 1 ..< tracker.entries.num {
// Check to make sure each allocations adjacent entries do not intersect
left := & entries[idx - 1]
right := & entries[idx]
collided := left.start > right.start || left.end > right.end
if collided {
msg := str_fmt_tmp("%v: Memory tracker detected a collision:\nleft: %v\nright: %v", tracker.name, left, right)
memtracker_dump_entries(tracker)
}
}
}
memtracker_dump_entries :: proc( tracker : MemoryTracker )
{
log( "Dumping Memory Tracker:")
for idx in 0 ..< tracker.entries.num {
entry := & tracker.entries.data[idx]
log( str_fmt_tmp("%v", entry) )
}
}

View File

@ -23,7 +23,9 @@ Pool :: struct {
}
PoolHeader :: struct {
backing : Allocator,
backing : Allocator,
dbg_name : string,
tracker : MemoryTracker,
zero_bucket : b32,
block_size : uint,
@ -53,7 +55,8 @@ pool_init :: proc (
bucket_capacity : uint,
bucket_reserve_num : uint = 0,
alignment : uint = mem.DEFAULT_ALIGNMENT,
allocator : Allocator = context.allocator
allocator : Allocator = context.allocator,
dbg_name : string,
) -> ( pool : Pool, alloc_error : AllocatorError )
{
header_size := align_forward_int( size_of(PoolHeader), int(alignment) )
@ -65,13 +68,17 @@ pool_init :: proc (
pool.header = cast( ^PoolHeader) raw_mem
pool.zero_bucket = should_zero_buckets
pool.backing = allocator
pool.dbg_name = dbg_name
pool.block_size = align_forward_uint(block_size, alignment)
pool.bucket_capacity = bucket_capacity
pool.alignment = alignment
memtracker_init( & pool.tracker, allocator, Kilobyte * 96, dbg_name )
if bucket_reserve_num > 0 {
alloc_error = pool_allocate_buckets( pool, bucket_reserve_num )
}
pool.current_bucket = pool.bucket_list.first
return
}
@ -91,6 +98,8 @@ pool_destroy :: proc ( using self : Pool )
}
free( self.header, backing )
memtracker_clear( self.tracker )
}
pool_allocate_buckets :: proc( pool : Pool, num_buckets : uint ) -> AllocatorError
@ -175,6 +184,8 @@ pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, allo
if zero_memory {
slice.zero(block)
}
memtracker_register_auto_name_slice( & pool.tracker, block)
return
}
@ -234,6 +245,8 @@ pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, allo
slice.zero(block)
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", block_ptr, cast(rawptr) (uintptr(block_ptr) + uintptr(pool.block_size))))
}
memtracker_register_auto_name_slice( & pool.tracker, block)
return
}
@ -257,8 +270,12 @@ pool_release :: proc( self : Pool, block : []byte, loc := #caller_location )
new_free_block.next = self.free_list_head
self.free_list_head = new_free_block
new_free_block = new_free_block
// new_free_block = new_free_block
// log( str_fmt_tmp("Released block: %p %d", new_free_block, self.block_size))
start := new_free_block
end := transmute(rawptr) (uintptr(new_free_block) + uintptr(self.block_size) - 1)
memtracker_unregister( self.tracker, { start, end } )
}
pool_reset :: proc( using pool : Pool )
@ -298,6 +315,15 @@ pool_validate_ownership :: proc( using self : Pool, block : [] byte ) -> b32
block_address := uintptr(raw_data(block))
if start <= block_address && block_address < end {
misalignment := (block_address - start) % uintptr(block_size)
if misalignment != 0 {
ensure(false, "pool_validate_ownership: This data is within this pool's buckets, however its not aligned to the start of a block")
log(str_fmt_tmp("Block address: %p Misalignment: %p closest: %p",
transmute(rawptr)block_address,
transmute(rawptr)misalignment,
rawptr(block_address - misalignment)))
}
within_bucket = true
break
}

View File

@ -44,6 +44,7 @@ SlabPolicy :: StackFixed(SlabSizeClass, Slab_Max_Size_Classes)
SlabHeader :: struct {
dbg_name : string,
tracker : MemoryTracker,
backing : Allocator,
pools : StackFixed(Pool, Slab_Max_Size_Classes),
}
@ -69,6 +70,7 @@ slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocato
slab.header = cast( ^SlabHeader) raw_mem
slab.backing = allocator
slab.dbg_name = dbg_name
memtracker_init( & slab.tracker, allocator, Kilobyte * 256, dbg_name )
alloc_error = slab_init_pools( slab, policy, bucket_reserve_num, should_zero_buckets )
return
}
@ -76,10 +78,12 @@ slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocato
slab_init_pools :: proc ( using self : Slab, policy : ^SlabPolicy, bucket_reserve_num : uint = 0, should_zero_buckets : b32 ) -> AllocatorError
{
profile(#procedure)
for id in 0 ..< policy.idx {
using size_class := policy.items[id]
pool, alloc_error := pool_init( should_zero_buckets, block_size, bucket_capacity, bucket_reserve_num, block_alignment, backing )
pool_dbg_name := str_fmt_alloc("%v pool[%v]", dbg_name, block_size, allocator = backing)
pool, alloc_error := pool_init( should_zero_buckets, block_size, bucket_capacity, bucket_reserve_num, block_alignment, backing, pool_dbg_name )
if alloc_error != .None do return alloc_error
push( & self.pools, pool )
@ -105,6 +109,7 @@ slab_destroy :: proc( using self : Slab )
}
free( self.header, backing )
memtracker_clear(tracker)
}
slab_alloc :: proc( self : Slab,
@ -142,6 +147,8 @@ slab_alloc :: proc( self : Slab,
if zero_memory {
slice.zero(data)
}
memtracker_register_auto_name( & self.tracker, raw_data(block), & block[ len(block) - 1 ] )
return
}
@ -153,6 +160,9 @@ slab_free :: proc( using self : Slab, data : []byte, loc := #caller_location )
{
pool = pools.items[id]
if pool_validate_ownership( pool, data ) {
start := raw_data(data)
end := ptr_offset(start, pool.block_size - 1)
memtracker_unregister( self.tracker, { start, end } )
pool_release( pool, data, loc )
return
}
@ -235,9 +245,14 @@ slab_resize :: proc( using self : Slab,
// log( str_fmt_tmp("%v: Resize via new block, copying from old data block to new block: (%p %d), (%p %d)", dbg_name, raw_data(data), len(data), raw_data(new_block), len(new_block)))
copy_non_overlapping( raw_data(new_block), raw_data(data), int(old_size) )
pool_release( pool_old, data )
start := raw_data( data )
end := rawptr(uintptr(start) + uintptr(pool_old.block_size) - 1)
memtracker_unregister( self.tracker, { start, end } )
}
new_data = new_block[ :new_size]
memtracker_register_auto_name( & self.tracker, raw_data(new_block), & new_block[ len(new_block) - 1 ] )
return
}
@ -247,6 +262,7 @@ slab_reset :: proc( slab : Slab )
pool := slab.pools.items[id]
pool_reset( pool )
}
memtracker_clear(slab.tracker)
}
slab_validate_pools :: proc( slab : Slab )

View File

@ -61,8 +61,8 @@ str_cache_init :: proc( /*allocator : Allocator*/ ) -> ( cache : StringCache ) {
cache.slab, alloc_error = slab_init( & policy, allocator = persistent_allocator(), dbg_name = dbg_name )
verify(alloc_error == .None, "Failed to initialize the string cache" )
cache.table, alloc_error = zpl_hmap_init_reserve( StringCached, persistent_slab_allocator(), 4 * Kilobyte )
// cache.table, alloc_error = zpl_hmap_init_reserve( StringCached, persistent_slab_allocator(), 8 )
// cache.table, alloc_error = zpl_hmap_init_reserve( StringCached, persistent_slab_allocator(), 4 * Kilobyte )
cache.table, alloc_error = zpl_hmap_init_reserve( StringCached, persistent_slab_allocator(), 8, dbg_name )
return
}

View File

@ -25,6 +25,8 @@ VArena_GrowthPolicyProc :: #type proc( commit_used, committed, reserved, request
VArena :: struct {
using vmem : VirtualMemoryRegion,
dbg_name : string,
tracker : MemoryTracker,
commit_used : uint,
growth_policy : VArena_GrowthPolicyProc,
allow_any_reize : b32,
@ -59,7 +61,7 @@ varena_allocator :: proc( arena : ^VArena ) -> ( allocator : Allocator ) {
// Default growth_policy is nil
varena_init :: proc( base_address : uintptr, to_reserve, to_commit : uint,
growth_policy : VArena_GrowthPolicyProc, allow_any_reize : b32 = false
growth_policy : VArena_GrowthPolicyProc, allow_any_reize : b32 = false, dbg_name : string
) -> ( arena : VArena, alloc_error : AllocatorError)
{
page_size := uint(virtual_get_page_size())
@ -85,6 +87,9 @@ varena_init :: proc( base_address : uintptr, to_reserve, to_commit : uint,
arena.growth_policy = growth_policy
}
arena.allow_any_reize = allow_any_reize
// Setup the tracker
memtracker_init( & arena.tracker, runtime.heap_allocator(), Kilobyte * 128, dbg_name )
return
}
@ -96,7 +101,6 @@ varena_alloc :: proc( using self : ^VArena,
) -> ( data : []byte, alloc_error : AllocatorError )
{
verify( alignment & (alignment - 1) == 0, "Non-power of two alignment", location = location )
context.user_ptr = self
page_size := uint(virtual_get_page_size())
requested_size := size
@ -160,6 +164,9 @@ varena_alloc :: proc( using self : ^VArena,
mem_zero( data_ptr, int(requested_size) )
}
when ODIN_DEBUG {
memtracker_register_auto_name( & tracker, & data[0], & data[len(data) - 1] )
}
return
}
@ -167,6 +174,10 @@ varena_free_all :: proc( using self : ^VArena )
{
sync.mutex_guard( & mutex )
commit_used = 0
when ODIN_DEBUG {
array_clear(tracker.entries)
}
}
varena_release :: proc( using self : ^VArena )
@ -198,7 +209,8 @@ varena_allocator_proc :: proc(
switch mode
{
case .Alloc, .Alloc_Non_Zeroed:
return varena_alloc( arena, size, alignment, (mode != .Alloc_Non_Zeroed), location )
data, alloc_error = varena_alloc( arena, size, alignment, (mode != .Alloc_Non_Zeroed), location )
return
case .Free:
alloc_error = .Mode_Not_Implemented
@ -209,7 +221,8 @@ varena_allocator_proc :: proc(
case .Resize, .Resize_Non_Zeroed:
if old_memory == nil {
ensure(false, "Resizing without old_memory?")
return varena_alloc( arena, size, alignment, (mode != .Resize_Non_Zeroed), location )
data, alloc_error = varena_alloc( arena, size, alignment, (mode != .Resize_Non_Zeroed), location )
return
}
if size == old_size {
@ -249,12 +262,20 @@ varena_allocator_proc :: proc(
if new_region == nil || alloc_error != .None {
ensure(false, "Failed to grab new region")
data = byte_slice( old_memory, old_size )
when ODIN_DEBUG {
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
}
return
}
copy_non_overlapping( raw_data(new_region), old_memory, int(old_size) )
data = new_region
// log( str_fmt_tmp("varena resize (new): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size))
when ODIN_DEBUG {
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
}
return
}
@ -268,6 +289,10 @@ varena_allocator_proc :: proc(
data = byte_slice( old_memory, size )
// log( str_fmt_tmp("varena resize (expanded): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size))
when ODIN_DEBUG {
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
}
return
case .Query_Features:

View File

@ -120,16 +120,16 @@ setup_memory :: proc( profiler : ^SpallProfiler ) -> ClientMemory
// Setup the static arena for the entire application
{
alloc_error : AllocatorError
persistent, alloc_error = varena_init( sectr.Memory_Base_Address_Persistent, sectr.Memory_Reserve_Persistent, sectr.Memory_Commit_Initial_Persistent, nil )
persistent, alloc_error = varena_init( sectr.Memory_Base_Address_Persistent, sectr.Memory_Reserve_Persistent, sectr.Memory_Commit_Initial_Persistent, nil, dbg_name = "persistent" )
verify( alloc_error == .None, "Failed to allocate persistent virtual arena for the sectr module")
frame, alloc_error = varena_init( sectr.Memory_Base_Address_Frame, sectr.Memory_Reserve_Frame, sectr.Memory_Commit_Initial_Frame, nil, allow_any_reize = true )
frame, alloc_error = varena_init( sectr.Memory_Base_Address_Frame, sectr.Memory_Reserve_Frame, sectr.Memory_Commit_Initial_Frame, nil, allow_any_reize = true, dbg_name = "frame" )
verify( alloc_error == .None, "Failed to allocate frame virtual arena for the sectr module")
transient, alloc_error = varena_init( sectr.Memory_Base_Address_Transient, sectr.Memory_Reserve_Transient, sectr.Memory_Commit_Initial_Transient, nil, allow_any_reize = true )
transient, alloc_error = varena_init( sectr.Memory_Base_Address_Transient, sectr.Memory_Reserve_Transient, sectr.Memory_Commit_Initial_Transient, nil, allow_any_reize = true, dbg_name = "transient" )
verify( alloc_error == .None, "Failed to allocate transient virtual arena for the sectr module")
files_buffer, alloc_error = varena_init( sectr.Memory_Base_Address_Files_Buffer, sectr.Memory_Reserve_FilesBuffer, sectr.Memory_Commit_Initial_Filebuffer, nil )
files_buffer, alloc_error = varena_init( sectr.Memory_Base_Address_Files_Buffer, sectr.Memory_Reserve_FilesBuffer, sectr.Memory_Commit_Initial_Filebuffer, nil, dbg_name = "files_buffer" )
verify( alloc_error == .None, "Failed to allocate files buffer virtual arena for the sectr module")
}

View File

@ -8,7 +8,7 @@ import str "core:strings"
import "core:time"
import core_log "core:log"
Max_Logger_Message_Width :: 120
Max_Logger_Message_Width :: 300
LogLevel :: core_log.Level

View File

@ -165,7 +165,7 @@ pws_parser_lex :: proc ( text : string, allocator : Allocator ) -> ( PWS_LexResu
}
alloc_error : AllocatorError
tokens, alloc_error = array_init_reserve( PWS_Token, allocator, Kilobyte * 4 )
tokens, alloc_error = array_init_reserve( PWS_Token, allocator, Kilobyte * 4 )
if alloc_error != AllocatorError.None {
ensure(false, "Failed to allocate token's array")
return result, alloc_error

@ -1 +1 @@
Subproject commit fa825477057fc060debd44027181e2c31c1976a1
Subproject commit 647d7ed9e3e07b8b248d3b56eaa8fa60b451e1c9