Slab allocator compiler bug fixes

This commit is contained in:
Edward R. Gonzalez 2024-03-07 18:29:49 -05:00
parent f17aead3ea
commit aa47f0ead0
6 changed files with 91 additions and 75 deletions

View File

@ -197,8 +197,9 @@ reload :: proc( persistent_mem, frame_mem, transient_mem, files_buffer_mem : ^VA
// Or as done below, correct containers using allocators on reload.
// Thankfully persistent dynamic allocations are rare, and thus we know exactly which ones they are.
// font_provider_data := & get_state().font_provider_data
// font_provider_data.font_cache.allocator = arena_allocator( & font_provider_data.font_arena )
font_provider_data := & get_state().font_provider_data
// font_provide_data.font_cache.hashes.allocator = slab_allocator()
// font_provide_data.font_cache.entries.allocator = slab_allocator()
ui_reload( & get_state().project.workspace.ui, persistent_allocator() )

View File

@ -65,8 +65,7 @@ files_buffer_allocator :: proc() -> Allocator {
}
general_slab_allocator :: proc() -> Allocator {
using state := get_state()
return slab_allocator( general_slab )
return slab_allocator( get_state().general_slab )
}
// TODO(Ed) : Implment host memory mapping api

View File

@ -8,7 +8,6 @@ import "core:os"
import rl "vendor:raylib"
Font_Arena_Size :: 32 * Megabyte
Font_Largest_Px_Size :: 96
// Font_Default :: ""
@ -54,22 +53,17 @@ FontDef :: struct {
}
FontProviderData :: struct {
font_arena : Arena,
font_cache : HMapZPL(FontDef),
}
font_provider_startup :: proc()
{
state := get_state()
font_provider_data := & get_state().font_provider_data; using font_provider_data
data, alloc_result := alloc_bytes( Font_Arena_Size, allocator = persistent_allocator() )
verify( alloc_result == AllocatorError.None, "Failed to allocate memory for font_arena from persistent" )
log("font_arena allocated from persistent memory")
arena_init( & font_arena, data )
font_cache_alloc_error : AllocatorError
font_cache, font_cache_alloc_error = zpl_hmap_init_reserve( FontDef, persistent_allocator(), 8 )
font_cache, font_cache_alloc_error = zpl_hmap_init_reserve( FontDef, general_slab_allocator(), 8 )
verify( font_cache_alloc_error == AllocatorError.None, "Failed to allocate font_cache" )
log("font_cache created")
@ -100,7 +94,7 @@ font_load :: proc( path_file : string,
{
font_provider_data := & get_state().font_provider_data; using font_provider_data
font_data, read_succeded : = os.read_entire_file( path_file )
font_data, read_succeded : = os.read_entire_file( path_file, general_slab_allocator() )
verify( b32(read_succeded), str_fmt_tmp("Failed to read font file for: %v", path_file) )
font_data_size := cast(i32) len(font_data)
@ -126,7 +120,9 @@ font_load :: proc( path_file : string,
def.data = font_data
def.default_size = i32(points_to_pixels(default_size))
// TODO(Ed): this is extremely slow
// TODO(Ed): this is slow & eats quite a bit of memory early on. Setup a more on demand load for a specific size.
// Also, we need to eventually switch to a SDF shader for rendering
// Render all sizes at once
// Note(Ed) : We only generate textures for even multiples of the font.
for font_size : i32 = 2; font_size <= Font_Largest_Px_Size; font_size += 2

View File

@ -1,6 +1,6 @@
/*
This is a pool allocator setup to grow incrementally via buckets.
Buckets are stored in singly-linked lists so that allocations aren't necessrily congigous.
Buckets are stored in singly-linked lists so that allocations aren't necessrily contigous.
The pool is setup with the intention to only grab single entires from the bucket,
not for a contigous array of them.
@ -27,20 +27,20 @@ PoolHeader :: struct {
alignment : uint,
free_list_head : ^Pool_FreeBlock,
bucket_list : DLL_NodeFL( PoolBucket),
current_bucket : ^PoolBucket,
bucket_list : DLL_NodeFL( PoolBucket)
}
PoolBucket :: struct {
blocks : [^]byte,
next_block : uint,
using nodes : DLL_NodePN( PoolBucket),
}
Pool_FreeBlock :: struct {
next : ^Pool_FreeBlock,
}
PoolBucket :: struct {
using links : DLL_NodePN( PoolBucket),
next_block : uint,
blocks : [^]byte,
}
Pool_Check_Release_Object_Validity :: true
pool_allocator :: proc ( using self : Pool ) -> (allocator : Allocator) {
@ -114,77 +114,106 @@ pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> Alloca
return alloc_error
}
pool_grab :: proc( using self : Pool ) -> ( block : []byte, alloc_error : AllocatorError )
pool_grab :: proc( using pool : Pool ) -> ( block : []byte, alloc_error : AllocatorError )
{
alloc_error = .None
// Check the free-list first for a block
if free_list_head != nil {
block = slice_ptr( cast([^]byte) ll_pop( & self.free_list_head ), int(block_size) )
head := & pool.free_list_head
// Compiler Bug? Fails to compile
// ll_pop( head )
last_free : ^Pool_FreeBlock = pool.free_list_head
// last_free := ll_pop( & pool.free_list_head )
pool.free_list_head = pool.free_list_head.next // ll_pop
block = slice_ptr( cast([^]byte) last_free, int(pool.block_size) )
return
}
if current_bucket == nil
// Compiler Fail Bug ? using current_bucket directly instead of with pool..
// if current_bucket == nil
if pool.current_bucket == nil
{
alloc_error := pool_allocate_buckets( self, 1 )
alloc_error = pool_allocate_buckets( pool, 1 )
if alloc_error != .None {
return
}
self.current_bucket = bucket_list.first
pool.current_bucket = bucket_list.first
}
next := uintptr(current_bucket.blocks) + uintptr(current_bucket.next_block)
end := uintptr(current_bucket.blocks) + uintptr(bucket_capacity)
// Compiler Bug ? (Won't work without "pool."")
// next := uintptr(current_bucket.blocks) + uintptr(current_bucket.next_block)
// end := uintptr(current_bucket.blocks) + uintptr(bucket_capacity)
next := uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
end := uintptr(pool.current_bucket.blocks) + uintptr(pool.bucket_capacity)
blocks_left := end - next
if blocks_left == 0
{
if current_bucket.next != nil {
self.current_bucket = current_bucket.next
// Compiler Bug
// if current_bucket.next != nil {
if pool.current_bucket.next != nil {
// current_bucket = current_bucket.next
pool.current_bucket = pool.current_bucket.next
}
else
{
alloc_error := pool_allocate_buckets( self, 1 )
alloc_error := pool_allocate_buckets( pool, 1 )
if alloc_error != .None {
return
}
self.current_bucket = current_bucket.next
pool.current_bucket = pool.current_bucket.next
}
}
block = slice_ptr( current_bucket.blocks[ current_bucket.next_block:], int(block_size) )
self.current_bucket.next_block += block_size
// Compiler Bug
// block = slice_ptr( current_bucket.blocks[ current_bucket.next_block:], int(block_size) )
// self.current_bucket.next_block += block_size
block = slice_ptr( pool.current_bucket.blocks[ pool.current_bucket.next_block:], int(block_size) )
pool.current_bucket.next_block += block_size
return
}
pool_release :: proc( using self : Pool, block : []byte )
pool_release :: proc( using self : Pool, block : []byte, loc := #caller_location )
{
when Pool_Check_Release_Object_Validity
{
within_bucket := pool_validate_ownership( self, block )
verify( within_bucket, "Attempted to release data that is not within a bucket of this pool" )
verify( within_bucket, "Attempted to release data that is not within a bucket of this pool", location = loc )
return
}
ll_push( & self.free_list_head, cast(^Pool_FreeBlock) raw_data(block) )
// Compiler bug
// ll_push( & self.free_list_head, cast(^Pool_FreeBlock) raw_data(block) )
// ll_push:
new_free_block := cast(^Pool_FreeBlock) raw_data(block)
new_free_block.next = self.free_list_head
self.free_list_head = new_free_block
}
pool_reset :: proc( using self : Pool )
pool_reset :: proc( using pool : Pool )
{
bucket := bucket_list.first
bucket : ^PoolBucket = bucket_list.first // TODO(Ed): Compiler bug? Build fails unless ^PoolBucket is explcitly specified.
for ; bucket != nil; {
bucket.next_block = 0
}
self.free_list_head = nil
self.current_bucket = bucket_list.first
pool.free_list_head = nil
pool.current_bucket = bucket_list.first
}
pool_validate_ownership :: proc( using self : Pool, block : [] byte ) -> b32
{
within_bucket := b32(false)
bucket := bucket_list.first
// Compiler Bug : Same as pool_reset
bucket : ^PoolBucket = bucket_list.first
for ; bucket != nil; bucket = bucket.next
{
start := uintptr( bucket.blocks )

View File

@ -25,17 +25,6 @@ A freelist is tracked for free-blocks for each pool (provided by the underlying
A slab starts out with pools initialized with no buckets and grows as needed.
When a slab is initialized the slab policy is provided to know how many size-classes there should be
which each contain the ratio of bucket to block size.
Strings Slab pool size-classes (bucket:block ratio) are as follows:
16 mb : 64 b = 262,144 blocks
8 mb : 128 b = 8192 blocks
8 mb : 256 b = 8192 blocks
8 mb : 1 kb = 8192 blocks
16 mb : 4 kb = 4096 blocks
32 mb : 16 kb = 4096 blocks
32 mb : 32 kb = 4096 blocks
256 mb : 64 kb =
512 mb : 128 kb =
*/
package sectr
@ -111,7 +100,7 @@ slab_alloc :: proc( using self : Slab,
size : uint,
alignment : uint,
zero_memory := true,
location := #caller_location
loc := #caller_location
) -> ( data : []byte, alloc_error : AllocatorError )
{
pool : Pool
@ -123,7 +112,7 @@ slab_alloc :: proc( using self : Slab,
}
}
verify( pool.header != nil, "Requested alloc not supported by the slab allocator", location = location )
verify( pool.header != nil, "Requested alloc not supported by the slab allocator", location = loc )
block : []byte
block, alloc_error = pool_grab(pool)
@ -139,26 +128,26 @@ slab_alloc :: proc( using self : Slab,
return
}
slab_free :: proc( using self : Slab, data : []byte, location := #caller_location )
slab_free :: proc( using self : Slab, data : []byte, loc := #caller_location )
{
pool : Pool
for id in 0 ..< pools.idx
{
pool = pools.items[id]
if pool_validate_ownership( pool, data ) {
pool_release( pool, data )
pool_release( pool, data, loc )
return
}
}
verify(false, "Attempted to free a block not within a pool of this slab", location = location)
verify(false, "Attempted to free a block not within a pool of this slab", location = loc)
}
slab_resize :: proc( using self : Slab,
data : []byte,
new_size : uint,
alignment : uint,
data : []byte,
new_size : uint,
alignment : uint,
zero_memory := true,
location := #caller_location
loc := #caller_location
) -> ( new_data : []byte, alloc_error : AllocatorError )
{
old_size := uint( len(data))
@ -179,7 +168,7 @@ slab_resize :: proc( using self : Slab,
}
}
verify( pool_resize.header != nil, "Requested resize not supported by the slab allocator" )
verify( pool_resize.header != nil, "Requested resize not supported by the slab allocator", location = loc )
// Resize will keep block in the same size_class, just give it more of its already allocated block
if pool_old == pool_resize
@ -208,10 +197,10 @@ slab_resize :: proc( using self : Slab,
return
}
slab_reset :: proc( using self : Slab )
slab_reset :: proc( slab : Slab )
{
for id in 0 ..< pools.idx {
pool := pools.items[id]
for id in 0 ..< slab.pools.idx {
pool := slab.pools.items[id]
pool_reset( pool )
}
}
@ -223,28 +212,30 @@ slab_allocator_proc :: proc(
alignment : int,
old_memory : rawptr,
old_size : int,
location := #caller_location
loc := #caller_location
) -> ( data : []byte, alloc_error : AllocatorError)
{
slab := Slab { cast( ^SlabHeader) allocator_data }
slab : Slab
slab.header = cast( ^SlabHeader) allocator_data
size := uint(size)
alignment := uint(alignment)
old_size := uint(old_size)
// TODO(Ed) : Compiler bug - Some of these are commented out until I finish resolving issues with the pool allocator
switch mode
{
case .Alloc, .Alloc_Non_Zeroed:
return slab_alloc( slab, size, alignment, (mode != .Alloc_Non_Zeroed), location)
return slab_alloc( slab, size, alignment, (mode != .Alloc_Non_Zeroed), loc)
case .Free:
slab_free( slab, byte_slice( old_memory, int(old_size)) )
slab_free( slab, byte_slice( old_memory, int(old_size)), loc )
case .Free_All:
slab_reset( slab )
case .Resize, .Resize_Non_Zeroed:
return slab_resize( slab, byte_slice(old_memory, int(old_size)), size, alignment, (mode != .Resize_Non_Zeroed), location)
return slab_resize( slab, byte_slice(old_memory, int(old_size)), size, alignment, (mode != .Resize_Non_Zeroed), loc)
case .Query_Features:
set := cast( ^AllocatorModeSet) old_memory

2
thirdparty/Odin vendored

@ -1 +1 @@
Subproject commit 57712343be3b471202b3f0c5ac77430766abb92b
Subproject commit 242d5b8c5c92cab6c76757af2a4acc763fed41e3