Finally fixed memory allocations for slabs/pools/virtual arenas

This commit is contained in:
Edward R. Gonzalez 2024-03-20 22:34:18 -04:00
parent ca66f517e9
commit e3ff1ce70f
13 changed files with 73 additions and 56 deletions

View File

@ -50,31 +50,39 @@ Memory :: struct {
}
persistent_allocator :: proc() -> Allocator {
return varena_allocator( Memory_App.persistent )
result := varena_allocator( Memory_App.persistent )
return result
}
frame_allocator :: proc() -> Allocator {
return varena_allocator( Memory_App.frame )
result := varena_allocator( Memory_App.frame )
return result
}
transient_allocator :: proc() -> Allocator {
return varena_allocator( Memory_App.transient )
result := varena_allocator( Memory_App.transient )
return result
}
files_buffer_allocator :: proc() -> Allocator {
return varena_allocator( Memory_App.files_buffer )
result := varena_allocator( Memory_App.files_buffer )
return result
}
persistent_slab_allocator :: proc() -> Allocator {
return slab_allocator( get_state().persistent_slab )
state := get_state()
result := slab_allocator( state.persistent_slab )
return result
}
frame_slab_allocator :: proc() -> Allocator {
return slab_allocator( get_state().frame_slab )
result := slab_allocator( get_state().frame_slab )
return result
}
transient_slab_allocator :: proc() -> Allocator {
return slab_allocator( get_state().transient_slab )
result := slab_allocator( get_state().transient_slab )
return result
}
// TODO(Ed) : Implment host memory mapping api

View File

@ -7,7 +7,7 @@ package sectr
import "core:mem"
// Initialize a sub-section of our virtual memory as a sub-arena
sub_arena_init :: proc( address : ^ byte, size : int ) -> ( ^ Arena) {
sub_arena_init :: proc( address : ^byte, size : int ) -> ( ^ Arena) {
Arena :: mem.Arena
arena_size :: size_of( Arena)

View File

@ -45,7 +45,8 @@ array_to_slice :: proc( using self : Array($ Type) ) -> []Type {
}
array_grow_formula :: proc( value : u64 ) -> u64 {
return 2 * value + 8
result := (2 * value) + 8
return result
}
array_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( Array(Type), AllocatorError ) {
@ -273,7 +274,6 @@ array_set_capacity :: proc( self : ^Array( $ Type ), new_capacity : u64 ) -> All
new_size := header_size + (cast(int) new_capacity ) * size_of(Type)
old_size := header_size + (cast(int) self.capacity) * size_of(Type)
// new_mem, result_code := resize( self.header, old_size, new_size, allocator = self.backing )
new_mem, result_code := resize_non_zeroed( self.header, old_size, new_size, mem.DEFAULT_ALIGNMENT, allocator = self.backing )
if result_code != AllocatorError.None {

View File

@ -6,13 +6,13 @@ LL_Node :: struct ( $ Type : typeid ) {
// ll_push :: proc( list_ptr : ^(^ ($ Type)), node : ^Type ) {
ll_push :: #force_inline proc "contextless" ( list_ptr : ^(^ ($ Type)), node : ^Type ) {
list := (list_ptr^)
list : ^Type = (list_ptr^)
node.next = list
(list_ptr^) = node
}
ll_pop :: #force_inline proc "contextless" ( list_ptr : ^(^ ($ Type)) ) -> ( node : ^Type ) {
list := (list_ptr^)
list : ^Type = (list_ptr^)
(list_ptr^) = list.next
return list
}
@ -37,7 +37,7 @@ DLL_Node :: struct ( $ Type : typeid ) #raw_union {
DLL_NodeFull :: struct ( $ Type : typeid ) {
// using _ : DLL_NodeFL(Type),
first, last : ^Type,
prev, next : ^Type,
prev, next : ^Type,
}
DLL_NodePN :: struct ( $ Type : typeid ) {
@ -80,7 +80,7 @@ dll_fl_append :: proc ( list : ^( $TypeList), node : ^( $TypeNode) )
dll_push_back :: proc "contextless" ( current_ptr : ^(^ ($ TypeCurr)), node : ^$TypeNode )
{
current := (current_ptr ^)
current : ^TypeCurr = (current_ptr ^)
if current == nil
{
@ -99,7 +99,7 @@ dll_push_back :: proc "contextless" ( current_ptr : ^(^ ($ TypeCurr)), node : ^$
dll_pop_back :: #force_inline proc "contextless" ( current_ptr : ^(^ ($ Type)) )
{
to_remove := (current_ptr ^)
to_remove : ^Type = (current_ptr ^)
if to_remove == nil {
return
}

View File

@ -56,8 +56,8 @@ memory_after :: #force_inline proc "contextless" ( slice : []byte ) -> ( ^ byte)
}
memory_after_header :: #force_inline proc "contextless" ( header : ^($ Type) ) -> ( [^]byte) {
// return cast( [^]byte) (cast( [^]Type) header)[ 1:]
result := cast( [^]byte) ptr_offset( header, size_of(Type) )
result := cast( [^]byte) ptr_offset( header, 1 )
// result := cast( [^]byte) (cast( [^]Type) header)[ 1:]
return result
}

View File

@ -100,9 +100,14 @@ pool_allocate_buckets :: proc( pool : Pool, num_buckets : uint ) -> AllocatorErr
bucket_size := header_size + pool.bucket_capacity
to_allocate := cast(int) (bucket_size * num_buckets)
log(str_fmt_tmp("Allocating %d bytes for %d buckets with header_size %d bytes & bucket_size %d", to_allocate, num_buckets, header_size, bucket_size ))
pool_validate( pool )
bucket_memory, alloc_error := alloc_bytes_non_zeroed( to_allocate, int(pool.alignment), pool.backing )
pool_validate( pool )
log(str_fmt_tmp("Bucket memory size: %d bytes, without header: %d", len(bucket_memory), len(bucket_memory) - int(header_size)))
if alloc_error != .None {
return alloc_error
}
@ -114,16 +119,14 @@ pool_allocate_buckets :: proc( pool : Pool, num_buckets : uint ) -> AllocatorErr
bucket := cast( ^PoolBucket) next_bucket_ptr
bucket.blocks = memory_after_header(bucket)
bucket.next_block = 0
log( str_fmt_tmp("Pool (%d) allocated bucket: %p capacity: %d",
log( str_fmt_tmp("\tPool (%d) allocated bucket: %p start %p capacity: %d (raw: %d)",
pool.block_size,
raw_data(bucket_memory),
pool.bucket_capacity / pool.block_size
bucket.blocks,
pool.bucket_capacity / pool.block_size,
pool.bucket_capacity
))
if bucket == cast(rawptr) uintptr(0x100017740D0) {
runtime.debug_trap()
}
if pool.bucket_list.first == nil {
pool.bucket_list.first = bucket
pool.bucket_list.last = bucket
@ -142,26 +145,25 @@ pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, allo
{
pool := pool
if pool.current_bucket != nil {
verify( pool.current_bucket.blocks != nil, str_fmt_tmp("current_bucket was wiped %p", pool.current_bucket) )
verify( pool.current_bucket.blocks != nil, str_fmt_tmp("(corruption) current_bucket was wiped %p", pool.current_bucket) )
}
// profile(#procedure)
alloc_error = .None
// Check the free-list first for a block
// if pool.free_list_head != nil && false
// if pool.free_list_head != nil
if false
{
head := & pool.free_list_head
// Compiler Bug? Fails to compile
// last_free := ll_pop( & pool.free_list_head )
last_free : ^Pool_FreeBlock = pool.free_list_head
pool.free_list_head = pool.free_list_head.next
block = byte_slice( cast([^]byte) last_free, int(pool.block_size) )
log( str_fmt_tmp("Returning free block: %p %d", raw_data(block), pool.block_size))
log( str_fmt_tmp("\tReturning free block: %p %d", raw_data(block), pool.block_size))
if zero_memory {
slice.zero(block)
}
@ -169,8 +171,6 @@ pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, allo
return
}
// Compiler Fail Bug ? using current_bucket directly instead of with pool..
// if current_bucket == nil
if pool.current_bucket == nil
{
alloc_error = pool_allocate_buckets( pool, 1 )
@ -182,9 +182,6 @@ pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, allo
// log( "First bucket allocation")
}
// Compiler Bug ? (Won't work without "pool."")
// next := uintptr(current_bucket.blocks) + uintptr(current_bucket.next_block)
// end := uintptr(current_bucket.blocks) + uintptr(bucket_capacity)
next := uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
end := uintptr(pool.current_bucket.blocks) + uintptr(pool.bucket_capacity)
@ -195,20 +192,20 @@ pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, allo
// if current_bucket.next != nil {
if pool.current_bucket.next != nil {
// current_bucket = current_bucket.next
// log( str_fmt_tmp("Bucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
log( str_fmt_tmp("\tBucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
pool.current_bucket = pool.current_bucket.next
verify( pool.current_bucket.blocks != nil, "Next's blocks are null?" )
verify( pool.current_bucket.blocks != nil, "New current_bucket's blocks are null (new current_bucket is corrupted)" )
}
else
{
log( "All previous buckets exhausted, allocating new bucket")
log( "\tAll previous buckets exhausted, allocating new bucket")
alloc_error := pool_allocate_buckets( pool, 1 )
if alloc_error != .None {
ensure(false, "Failed to allocate bucket")
return
}
pool.current_bucket = pool.current_bucket.next
verify( pool.current_bucket.blocks != nil, "Next's blocks are null (Post new bucket alloc)?" )
verify( pool.current_bucket.blocks != nil, "Next's blocks are null (Post new bucket alloc)" )
}
}
@ -224,10 +221,11 @@ pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, allo
pool.current_bucket.next_block += pool.block_size
next = uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
// log( str_fmt_tmp("grabbing block: %p blocks left: %d", raw_data(block), (end - next) / uintptr(pool.block_size) ))
log( str_fmt_tmp("\tgrabbing block: %p from %p blocks left: %d", raw_data(block), pool.current_bucket.blocks, (end - next) / uintptr(pool.block_size) ))
if zero_memory {
slice.zero(block)
log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", block_ptr, cast(rawptr) (uintptr(block_ptr) + uintptr(pool.block_size))))
}
return
}
@ -274,7 +272,7 @@ pool_validate :: proc( pool : Pool )
// Compiler bug ^^ same as pool_reset
for ; bucket != nil; bucket = bucket.next
{
verify( bucket.blocks != nil, "Found corrupted bucket" )
verify( bucket.blocks != nil, str_fmt_tmp("Found corrupted bucket %p", bucket) )
}
}

View File

@ -28,6 +28,7 @@ which each contain the ratio of bucket to block size.
*/
package sectr
import "base:runtime"
import "core:mem"
import "core:slice"
@ -123,14 +124,14 @@ slab_alloc :: proc( self : Slab,
break
}
}
verify( id < self.pools.idx, "There is not a size class in the slab's policy to satisfy the requested allocation" )
verify( pool.header != nil, "Requested alloc not supported by the slab allocator", location = loc )
verify( id < self.pools.idx, "There is not a size class in the slab's policy to satisfy the requested allocation", location = loc )
verify( pool.header != nil, "Requested alloc not supported by the slab allocator", location = loc )
block : []byte
slab_validate_pools( self )
block, alloc_error = pool_grab(pool)
slab_validate_pools( self )
if block == nil || alloc_error != .None {
ensure(false, "Bad block from pool")
return nil, alloc_error
@ -198,9 +199,11 @@ slab_resize :: proc( using self : Slab,
if zero_memory && new_size > old_size {
to_zero := byte_slice( new_data_ptr, int(new_size - old_size) )
slab_validate_pools( self )
slice.zero( to_zero )
slab_validate_pools( self )
log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", new_data_ptr, cast(rawptr) (uintptr(new_data_ptr) + uintptr(new_size - old_size))))
}
return
@ -208,23 +211,28 @@ slab_resize :: proc( using self : Slab,
// We'll need to provide an entirely new block, so the data will need to be copied over.
new_block : []byte
slab_validate_pools( self )
new_block, alloc_error = pool_grab( pool_resize )
slab_validate_pools( self )
if new_block == nil {
ensure(false, "Retreived a null block")
return
}
if alloc_error != .None do return
// if zero_memory {
// slice.zero( new_block )
// }
// TODO(Ed): Reapply this when safe.
if zero_memory {
slice.zero( new_block )
log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", raw_data(new_block), cast(rawptr) (uintptr(raw_data(new_block)) + uintptr(new_size))))
}
// log( str_fmt_tmp("Resize via new block: %p %d (old : %p $d )", raw_data(new_block), len(new_block), raw_data(data), old_size ))
if raw_data(data) != raw_data(new_block) {
log( str_fmt_tmp("%v: Resize view new block, copying from old data block to new block: (%p %d), (%p %d)", dbg_name, raw_data(data), len(data), raw_data(new_block), len(new_block)))
log( str_fmt_tmp("%v: Resize via new block, copying from old data block to new block: (%p %d), (%p %d)", dbg_name, raw_data(data), len(data), raw_data(new_block), len(new_block)))
copy_non_overlapping( raw_data(new_block), raw_data(data), int(old_size) )
pool_release( pool_old, data )
}

View File

@ -11,6 +11,7 @@ If open addressing, we just keep the open addressed array of node slots in the g
*/
package sectr
import "base:runtime"
import "core:mem"
import "core:slice"
import "core:strings"
@ -54,6 +55,8 @@ str_cache_init :: proc( /*allocator : Allocator*/ ) -> ( cache : StringCache ) {
@static dbg_name := "StringCache slab"
state := get_state()
alloc_error : AllocatorError
cache.slab, alloc_error = slab_init( & policy, allocator = persistent_allocator(), dbg_name = dbg_name )
verify(alloc_error == .None, "Failed to initialize the string cache" )

View File

@ -15,6 +15,7 @@ The host application as well ideally (although this may not be the case for a wh
package sectr
import "base:intrinsics"
import "base:runtime"
import "core:mem"
import "core:os"
import "core:slice"
@ -148,9 +149,8 @@ varena_alloc :: proc( using self : ^VArena,
self.commit_used += size_to_allocate
alloc_error = .None
log_backing : [Kilobyte * 16]byte
backing_slice := byte_slice( & log_backing[0], len(log_backing))
// log_backing : [Kilobyte * 16]byte
// backing_slice := byte_slice( & log_backing[0], len(log_backing))
// log( str_fmt_buffer( backing_slice, "varena alloc - BASE: %p PTR: %X, SIZE: %d", cast(rawptr) self.base_address, & data[0], requested_size) )
if zero_memory

View File

@ -99,7 +99,7 @@ when ODIN_OS != OS_Type.Windows {
virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
{
header_size := size_of(VirtualMemoryRegionHeader)
header_size := memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
// Ignoring the base address, add an os specific impl if you want it.
data : []byte
@ -107,7 +107,7 @@ virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -
alloc_error := core_virtual.commit( header_size )
vmem.base_address := cast( ^VirtualMemoryRegionHeader ) raw_data(data)
vmem.reserve_start = memory_after_header(vmem.base_address)
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
vmem.reserved = len(data)
vmem.committed = header_size
return

View File

@ -3,6 +3,7 @@ package sectr
import "core:c"
import "core:c/libc"
import "core:fmt"
import "core:mem"
import core_virtual "core:mem/virtual"
import "core:strings"
import win32 "core:sys/windows"
@ -73,10 +74,9 @@ WIN32_ERROR_INVALID_ADDRESS :: 487
WIN32_ERROR_COMMITMENT_LIMIT :: 1455
@(require_results)
virtual__reserve ::
proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
{
header_size :: cast(uint) size_of(VirtualMemoryRegion)
header_size := cast(uint) memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
result := win32.VirtualAlloc( rawptr(base_address), header_size + size, win32.MEM_RESERVE, win32.PAGE_READWRITE )
if result == nil {
@ -102,7 +102,7 @@ proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMe
}
vmem.base_address = cast(^VirtualMemoryRegionHeader) result
vmem.reserve_start = memory_after_header(vmem.base_address)
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
vmem.reserved = size
vmem.committed = header_size
alloc_error = .None

View File

@ -8,7 +8,7 @@ import str "core:strings"
import "core:time"
import core_log "core:log"
Max_Logger_Message_Width :: 180
Max_Logger_Message_Width :: 120
LogLevel :: core_log.Level

View File

@ -274,7 +274,7 @@ UI_Box :: struct {
UI_Layout_Stack_Size :: 512
UI_Style_Stack_Size :: 512
UI_Parent_Stack_Size :: 512
UI_Built_Boxes_Array_Size :: 8 * Kilobyte
UI_Built_Boxes_Array_Size :: 8
UI_State :: struct {
// TODO(Ed) : Use these