Completed first draft of slab allocator

This commit is contained in:
Edward R. Gonzalez 2024-03-07 03:10:21 -05:00
parent d63c58008e
commit 6836459a1d
6 changed files with 319 additions and 51 deletions

View File

@ -28,6 +28,7 @@ import fmt_io "core:fmt"
str_to_file_ln :: fmt_io.fprintln
str_tmp_from_any :: fmt_io.tprint
import "core:mem"
align_forward_int :: mem.align_forward_int
Allocator :: mem.Allocator
AllocatorError :: mem.Allocator_Error
AllocatorMode :: mem.Allocator_Mode
@ -38,7 +39,9 @@ import "core:mem"
arena_allocator :: mem.arena_allocator
arena_init :: mem.arena_init
byte_slice :: mem.byte_slice
copy_non_overlapping :: mem.copy_non_overlapping
free :: mem.free
is_power_of_two_uintptr :: mem.is_power_of_two
ptr_offset :: mem.ptr_offset
resize :: mem.resize
slice_ptr :: mem.slice_ptr
@ -90,6 +93,7 @@ get_bounds :: proc {
is_power_of_two :: proc {
is_power_of_two_u32,
is_power_of_two_uintptr,
}
pop :: proc {

View File

@ -65,6 +65,27 @@ memory_align_formula :: #force_inline proc "contextless" ( size, align : uint) -
return result - result % align
}
// This is here just for docs
memory_misalignment :: #force_inline proc ( address, alignment : uintptr) -> uint {
// address % alignment
assert(is_power_of_two(alignment))
return uint( address & (alignment - 1) )
}
// This is here just for docs
@(require_results)
memory_aign_forward :: #force_inline proc( address, alignment : uintptr) -> uintptr
{
assert(is_power_of_two(alignment))
aligned_address := address
misalignment := cast(uintptr) memory_misalignment( address, alignment )
if misalignment != 0 {
aligned_address += alignment - misalignment
}
return aligned_address
}
//endregion Memory Math
// Since this is a prototype, all memory is always tracked. No arena is is interfaced directly.

View File

@ -26,9 +26,9 @@ PoolHeader :: struct {
bucket_capacity : uint,
alignment : uint,
free_list_head : ^Pool_FreeBlock,
bucket_list_head : ^PoolBucket,
current_bucket : ^PoolBucket,
free_list_head : ^Pool_FreeBlock,
bucket_list : DLL_NodeFL( PoolBucket),
current_bucket : ^PoolBucket,
}
Pool_FreeBlock :: struct {
@ -36,7 +36,7 @@ Pool_FreeBlock :: struct {
}
PoolBucket :: struct {
next : ^PoolBucket,
using links : DLL_NodePN( PoolBucket),
next_block : uint,
blocks : [^]byte,
}
@ -49,35 +49,50 @@ pool_allocator :: proc ( using self : Pool ) -> (allocator : Allocator) {
return
}
pool_init :: proc ( block_size, block_alignment, bucket_capacity : uint,
pool_init :: proc (
block_size : uint,
bucket_capacity : uint,
bucket_reserve_num : uint = 0,
alignment : uint = mem.DEFAULT_ALIGNMENT,
allocator : Allocator = context.allocator
) -> ( pool : Pool, alloc_error : AllocatorError )
{
header_size := size_of(PoolHeader)
header_size := align_forward_int( size_of(PoolHeader), int(alignment) )
raw_mem : rawptr
raw_mem, alloc_error = alloc( header_size, mem.DEFAULT_ALIGNMENT, allocator )
raw_mem, alloc_error = alloc( header_size, int(alignment), allocator )
if alloc_error != .None do return
pool.header = cast( ^PoolHeader) raw_mem
pool.block_size = block_size
pool.alignment = block_alignment
pool.free_list_head = nil
pool.bucket_list_head = nil
pool.alignment = alignment
alloc_error = pool_allocate_buckets( pool, bucket_reserve_num )
pool.current_bucket = pool.bucket_list_head
alloc_error = pool_allocate_buckets( pool, bucket_reserve_num )
pool.current_bucket = pool.bucket_list.first
return
}
pool_destroy :: proc ( using self : Pool )
{
if bucket_list.first != nil
{
bucket := bucket_list.first
for ; bucket != nil; bucket = bucket.next {
free( bucket, backing )
}
}
free( self.header, backing )
}
pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> AllocatorError
{
if num_buckets == 0 {
return .Invalid_Argument
}
header_size := cast(uint) size_of(PoolBucket)
to_allocate := cast(int) (header_size + block_size * bucket_capacity * num_buckets)
header_size := cast(uint) align_forward_int( size_of(PoolBucket), int(alignment))
bucket_size := block_size * bucket_capacity
to_allocate := cast(int) (header_size + bucket_size * num_buckets)
bucket_memory, alloc_error := alloc( to_allocate, int(alignment), backing )
if alloc_error != .None {
@ -90,7 +105,9 @@ pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> Alloca
bucket := cast( ^PoolBucket) next_bucket_ptr
bucket.blocks = memory_after_header(bucket)
bucket.next_block = 0
ll_push( & self.bucket_list_head, bucket )
dll_push_back( & self.bucket_list.last, bucket )
next_bucket_ptr = next_bucket_ptr[ bucket_size: ]
}
return alloc_error
}
@ -105,11 +122,24 @@ pool_grab :: proc( using self : Pool ) -> ( block : []byte, alloc_error : Alloca
return
}
blocks_left := bucket_capacity - block_size
if current_bucket == nil
{
alloc_error := pool_allocate_buckets( self, 1 )
if alloc_error != .None {
return
}
self.current_bucket = bucket_list.first
}
next := uintptr(current_bucket.blocks) + uintptr(current_bucket.next_block)
end := uintptr(current_bucket.blocks) + uintptr(bucket_capacity)
blocks_left := end - next
if blocks_left == 0
{
if bucket_list_head.next != nil {
self.current_bucket = ll_pop( & self.bucket_list_head )
if current_bucket.next != nil {
self.current_bucket = current_bucket.next
}
else
{
@ -117,10 +147,12 @@ pool_grab :: proc( using self : Pool ) -> ( block : []byte, alloc_error : Alloca
if alloc_error != .None {
return
}
self.current_bucket = current_bucket.next
}
}
block = slice_ptr( current_bucket.blocks[ current_bucket.next_block:], int(block_size) )
block = slice_ptr( current_bucket.blocks[ current_bucket.next_block:], int(block_size) )
self.current_bucket.next_block += block_size
return
}
@ -128,20 +160,7 @@ pool_release :: proc( using self : Pool, block : []byte )
{
when Pool_Check_Release_Object_Validity
{
within_bucket := b32(false)
bucket := bucket_list_head
for ; bucket != nil; bucket = bucket.next
{
start := uintptr( bucket.blocks )
end := start + uintptr(bucket_capacity)
block_address := uintptr(raw_data(block))
if start <= block_address && block_address < end {
within_bucket = true
break
}
}
within_bucket := pool_validate_ownership( self, block )
verify( within_bucket, "Attempted to release data that is not within a bucket of this pool" )
return
}
@ -149,6 +168,36 @@ pool_release :: proc( using self : Pool, block : []byte )
ll_push( & self.free_list_head, cast(^Pool_FreeBlock) raw_data(block) )
}
pool_reset :: proc( using self : Pool )
{
bucket := bucket_list.first
for ; bucket != nil; {
bucket.next_block = 0
}
self.free_list_head = nil
self.current_bucket = bucket_list.first
}
pool_validate_ownership :: proc( using self : Pool, block : [] byte ) -> b32
{
within_bucket := b32(false)
bucket := bucket_list.first
for ; bucket != nil; bucket = bucket.next
{
start := uintptr( bucket.blocks )
end := start + uintptr(bucket_capacity)
block_address := uintptr(raw_data(block))
if start <= block_address && block_address < end {
within_bucket = true
break
}
}
return within_bucket
}
// This interface should really not be used for a pool allocator... But fk it its here.
// TODO(Ed): Implement this eventaully..
pool_allocator_proc :: proc(

View File

@ -39,25 +39,215 @@ Strings Slab pool size-classes (bucket:block ratio) are as follows:
*/
package sectr
import "core:mem"
import "core:slice"
SlabSizeClass :: struct {
bucket : uint,
block : uint,
bucket_capacity : uint,
block_size : uint,
block_alignment : uint,
}
Slab_Max_Size_Classes :: 32
Slab_Max_Size_Classes :: 64
SlabPolicy :: [Slab_Max_Size_Classes]SlabSizeClass
SlabPolicy :: StackFixed(SlabSizeClass, Slab_Max_Size_Classes)
SlabHeader :: struct {
backing : Allocator,
policy : SlabPolicy,
pools : [Slab_Max_Size_Classes]Pool,
pools : StackFixed(Pool, Slab_Max_Size_Classes),
}
Slab :: struct {
using header : SlabHeader,
using header : ^SlabHeader,
}
slab_init_reserve :: proc( ) -> ( Slab )
slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocator : Allocator ) -> ( slab : Slab, alloc_error : AllocatorError )
{
return {}
header_size :: size_of( SlabHeader )
raw_mem : rawptr
raw_mem, alloc_error = alloc( header_size, mem.DEFAULT_ALIGNMENT, allocator )
if alloc_error != .None do return
slab.header = cast( ^SlabHeader) raw_mem
slab.backing = allocator
slab.policy = (policy^)
alloc_error = slab_init_pools( slab )
return
}
slab_init_pools :: proc ( using self : Slab, bucket_reserve_num : uint = 0 ) -> AllocatorError
{
for id in 0 ..< policy.idx {
using size_class := policy.items[id]
pool, alloc_error := pool_init( block_size, block_alignment, bucket_capacity, bucket_reserve_num, backing )
if alloc_error != .None do return alloc_error
push( & self.pools, pool )
}
return .None
}
slab_destroy :: proc( using self : Slab )
{
for id in 0 ..< policy.idx {
pool := pools.items[id]
pool_destroy( pool )
}
free( self.header, backing )
}
slab_alloc :: proc( using self : Slab,
size : uint,
alignment : uint,
zero_memory := true,
location := #caller_location
) -> ( data : []byte, alloc_error : AllocatorError )
{
pool : Pool
for id in 0 ..< pools.idx {
pool = pools.items[id]
if pool.block_size >= size && pool.alignment >= alignment {
break
}
}
verify( pool.header != nil, "Requested alloc not supported by the slab allocator", location = location )
block : []byte
block, alloc_error = pool_grab(pool)
if alloc_error != .None {
return nil, alloc_error
}
if zero_memory {
slice.zero(block)
}
data = byte_slice(raw_data(block), size)
return
}
slab_free :: proc( using self : Slab, data : []byte, location := #caller_location )
{
pool : Pool
for id in 0 ..< pools.idx
{
pool = pools.items[id]
if pool_validate_ownership( pool, data ) {
pool_release( pool, data )
return
}
}
verify(false, "Attempted to free a block not within a pool of this slab", location = location)
}
slab_resize :: proc( using self : Slab,
data : []byte,
new_size : uint,
alignment : uint,
zero_memory := true,
location := #caller_location
) -> ( new_data : []byte, alloc_error : AllocatorError )
{
old_size := uint( len(data))
pool_resize, pool_old : Pool
for id in 0 ..< pools.idx
{
pool := pools.items[id]
if pool.block_size >= new_size && pool.alignment >= alignment {
pool_resize = pool
}
if pool_validate_ownership( pool, data ) {
pool_old = pool
}
if pool_resize.header != nil && pool_old.header != nil {
break
}
}
verify( pool_resize.header != nil, "Requested resize not supported by the slab allocator" )
// Resize will keep block in the same size_class, just give it more of its already allocated block
if pool_old == pool_resize
{
new_data = byte_slice( raw_data(data), new_size )
if zero_memory && new_size > old_size {
to_zero := slice_ptr( memory_after(data), int(new_size - old_size) )
slice.zero( to_zero )
}
return
}
// We'll need to provide an entirely new block, so the data will need to be copied over.
new_block : []byte
new_block, alloc_error = pool_grab( pool_resize )
if alloc_error != .None do return
copy_non_overlapping( raw_data(new_block), raw_data(data), int(old_size) )
pool_release( pool_old, data )
new_data = byte_slice( raw_data(new_block), int(old_size) )
if zero_memory {
slice.zero( new_data )
}
return
}
slab_reset :: proc( using self : Slab )
{
for id in 0 ..< pools.idx {
pool := pools.items[id]
pool_reset( pool )
}
}
slab_allocator_proc :: proc(
allocator_data : rawptr,
mode : AllocatorMode,
size : int,
alignment : int,
old_memory : rawptr,
old_size : int,
location := #caller_location
) -> ( data : []byte, alloc_error : AllocatorError)
{
slab := Slab { cast( ^SlabHeader) allocator_data }
size := uint(size)
alignment := uint(alignment)
old_size := uint(old_size)
switch mode
{
case .Alloc, .Alloc_Non_Zeroed:
return slab_alloc( slab, size, alignment, (mode != .Alloc_Non_Zeroed), location)
case .Free:
slab_free( slab, byte_slice( old_memory, int(old_size)) )
case .Free_All:
slab_reset( slab )
case .Resize, .Resize_Non_Zeroed:
return slab_resize( slab, byte_slice(old_memory, int(old_size)), size, alignment, (mode != .Resize_Non_Zeroed), location)
case .Query_Features:
set := cast( ^AllocatorModeSet) old_memory
if set != nil {
(set ^) = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
}
case .Query_Info:
alloc_error = .Mode_Not_Implemented
}
return
}

View File

@ -27,12 +27,12 @@ stack_pop :: proc( using stack : ^StackFixed( $ Type, $ Size ) ) {
}
stack_peek_ref :: proc( using stack : ^StackFixed( $ Type, $ Size ) ) -> ( ^Type) {
last := max( 0, idx - 1 )
last := max( 0, idx - 1 ) if idx > 0 else 0
return & items[last]
}
stack_peek :: proc ( using stack : ^StackFixed( $ Type, $ Size ) ) -> Type {
last := max( 0, idx - 1 )
last := max( 0, idx - 1 ) if idx > 0 else 0
return items[last]
}
@ -86,10 +86,15 @@ stack_allocator_init :: proc( size : int, allocator := context.allocator ) -> (
stack.data = cast( [^]byte) (cast( [^]StackAllocatorBase) stack.base)[ 1:]
stack.top = cast(^StackAllocatorHeader) stack.data
stack.bottom = stack.first
stack.bottom = stack.top
return
}
stack_allocator_destroy :: proc( using self : StackAllocator )
{
free( self.base, backing )
}
stack_allocator_init_via_memory :: proc( memory : []byte ) -> ( stack : StackAllocator )
{
header_size := size_of(StackAllocatorBase)
@ -104,7 +109,7 @@ stack_allocator_init_via_memory :: proc( memory : []byte ) -> ( stack : StackAll
stack.data = cast( [^]byte ) (cast( [^]StackAllocatorBase) stack.base)[ 1:]
stack.top = cast( ^StackAllocatorHeader) stack.data
stack.bottom = stack.first
stack.bottom = stack.top
return
}
@ -213,12 +218,11 @@ stack_allocator_proc :: proc(
dll_pop_back( & stack.last, stack.last )
}
case .Free_All:
{
// TODO(Ed) : Review that we don't have any header issues with the reset.
stack.last = stack.first
stack.first.next = nil
stack.first.block_size = 0
}
stack.bottom = stack.top
stack.top.next = nil
stack.top.block_size = 0
case .Resize, .Resize_Non_Zeroed:
{
// Check if old_memory is at the first on the stack, if it is, just grow its size

View File

@ -325,7 +325,7 @@ ui_box_equal :: proc( a, b : ^ UI_Box ) -> b32 {
ui_box_make :: proc( flags : UI_BoxFlags, label : string ) -> (^ UI_Box)
{
using get_state().ui_context
using ui := get_state().ui_context
key := ui_key_from_string( label )