Progress on lifting the 'grime' module to its own package
This commit is contained in:
@ -79,7 +79,7 @@ font_provider_startup :: proc()
|
||||
font_provider_data := & get_state().font_provider_data; using font_provider_data
|
||||
|
||||
font_cache_alloc_error : AllocatorError
|
||||
font_cache, font_cache_alloc_error = make( HMapChained(FontDef), hmap_closest_prime(1 * Kilo), persistent_allocator(), dbg_name = "font_cache" )
|
||||
font_cache, font_cache_alloc_error = make( HMapChained(FontDef), hmap_closest_prime(1 * Kilo), persistent_allocator() /*dbg_name = "font_cache"*/ )
|
||||
verify( font_cache_alloc_error == AllocatorError.None, "Failed to allocate font_cache" )
|
||||
|
||||
log("font_cache created")
|
||||
|
@ -1,225 +0,0 @@
|
||||
/*
|
||||
Separate chaining hashtable with tombstone (vacancy aware)
|
||||
|
||||
This is an alternative to odin's map and the zpl hashtable I first used for this codebase.
|
||||
|
||||
So this is a hahstable loosely based at what I saw in the raddbg codebase.
|
||||
It uses a fixed-size lookup table for the base layer of entries that can be chained.
|
||||
Each slot keeps track of its vacancy (tombstone, is occupied).
|
||||
If its occupied a new slot is chained using the fixed bucket-size pool allocator which will have its blocks sized to the type of the table.
|
||||
|
||||
This is ideal for tables have an indeterminate scope for how entires are added,
|
||||
and direct pointers are kept across the codebase instead of a key to the slot.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "core:mem"
|
||||
|
||||
HTable_Minimum_Capacity :: 4 * Kilobyte
|
||||
|
||||
HMapChainedSlot :: struct( $Type : typeid ) {
|
||||
using links : DLL_NodePN(HMapChainedSlot(Type)),
|
||||
value : Type,
|
||||
key : u64,
|
||||
occupied : b32,
|
||||
}
|
||||
|
||||
HMapChainedHeader :: struct( $ Type : typeid ) {
|
||||
pool : Pool,
|
||||
lookup : [] ^HMapChainedSlot(Type),
|
||||
}
|
||||
|
||||
HMapChained :: struct( $ Type : typeid) {
|
||||
using header : ^HMapChainedHeader(Type),
|
||||
}
|
||||
|
||||
// Provides the nearest prime number value for the given capacity
|
||||
hmap_closest_prime :: proc( capacity : uint ) -> uint
|
||||
{
|
||||
prime_table : []uint = {
|
||||
53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593,
|
||||
49157, 98317, 196613, 393241, 786433, 1572869, 3145739,
|
||||
6291469, 12582917, 25165843, 50331653, 100663319,
|
||||
201326611, 402653189, 805306457, 1610612741, 3221225473, 6442450941
|
||||
};
|
||||
for slot in prime_table {
|
||||
if slot >= capacity {
|
||||
return slot
|
||||
}
|
||||
}
|
||||
return prime_table[len(prime_table) - 1]
|
||||
}
|
||||
|
||||
hmap_chained_init :: proc( $HMapChainedType : typeid/HMapChained($Type), lookup_capacity : uint,
|
||||
allocator := context.allocator,
|
||||
pool_bucket_cap : uint = 1 * Kilo,
|
||||
pool_bucket_reserve_num : uint = 0,
|
||||
pool_alignment : uint = mem.DEFAULT_ALIGNMENT,
|
||||
dbg_name : string = ""
|
||||
) -> (table : HMapChained(Type), error : AllocatorError)
|
||||
{
|
||||
header_size := size_of(HMapChainedHeader(Type))
|
||||
size := header_size + int(lookup_capacity) * size_of( ^HMapChainedSlot(Type)) + size_of(int)
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, error = alloc( size, allocator = allocator )
|
||||
if error != AllocatorError.None do return
|
||||
|
||||
table.header = cast( ^HMapChainedHeader(Type)) raw_mem
|
||||
table.pool, error = pool_init(
|
||||
should_zero_buckets = false,
|
||||
block_size = size_of(HMapChainedSlot(Type)),
|
||||
bucket_capacity = pool_bucket_cap,
|
||||
bucket_reserve_num = pool_bucket_reserve_num,
|
||||
alignment = pool_alignment,
|
||||
allocator = allocator,
|
||||
dbg_name = str_intern(str_fmt("%v: pool", dbg_name)).str
|
||||
)
|
||||
data := transmute([^] ^HMapChainedSlot(Type)) (transmute( [^]HMapChainedHeader(Type)) table.header)[1:]
|
||||
table.lookup = slice_ptr( data, int(lookup_capacity) )
|
||||
return
|
||||
}
|
||||
|
||||
hmap_chained_clear :: proc( using self : HMapChained($Type))
|
||||
{
|
||||
for slot in lookup
|
||||
{
|
||||
if slot == nil {
|
||||
continue
|
||||
}
|
||||
for probe_slot = slot.next; probe_slot != nil; probe_slot = probe_slot.next {
|
||||
slot.occupied = false
|
||||
}
|
||||
slot.occupied = false
|
||||
}
|
||||
}
|
||||
|
||||
hmap_chained_destroy :: proc( using self : ^HMapChained($Type)) {
|
||||
pool_destroy( pool )
|
||||
free( self.header, backing)
|
||||
self = nil
|
||||
}
|
||||
|
||||
hmap_chained_lookup_id :: #force_inline proc( using self : HMapChained($Type), key : u64 ) -> u64
|
||||
{
|
||||
hash_index := key % u64( len(lookup) )
|
||||
return hash_index
|
||||
}
|
||||
|
||||
hmap_chained_get :: proc( using self : HMapChained($Type), key : u64) -> ^Type
|
||||
{
|
||||
// profile(#procedure)
|
||||
surface_slot := lookup[hmap_chained_lookup_id(self, key)]
|
||||
|
||||
if surface_slot == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if surface_slot.occupied && surface_slot.key == key {
|
||||
return & surface_slot.value
|
||||
}
|
||||
|
||||
for slot := surface_slot.next; slot != nil; slot = slot.next {
|
||||
if slot.occupied && slot.key == key {
|
||||
return & surface_slot.value
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
hmap_chained_reload :: proc( self : HMapChained($Type), allocator : Allocator )
|
||||
{
|
||||
pool_reload(self.pool, allocator)
|
||||
}
|
||||
|
||||
// Returns true if an slot was actually found and marked as vacant
|
||||
// Entries already found to be vacant will not return true
|
||||
hmap_chained_remove :: proc( self : HMapChained($Type), key : u64 ) -> b32
|
||||
{
|
||||
surface_slot := lookup[hmap_chained_lookup_id(self, key)]
|
||||
|
||||
if surface_slot == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if surface_slot.occupied && surface_slot.key == key {
|
||||
surface_slot.occupied = false
|
||||
return true
|
||||
}
|
||||
|
||||
for slot := surface_slot.next; slot != nil; slot.next
|
||||
{
|
||||
if slot.occupied && slot.key == key {
|
||||
slot.occupied = false
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Sets the value to a vacant slot
|
||||
// Will preemptively allocate the next slot in the hashtable if its null for the slot.
|
||||
hmap_chained_set :: proc( using self : HMapChained($Type), key : u64, value : Type ) -> (^ Type, AllocatorError)
|
||||
{
|
||||
// profile(#procedure)
|
||||
hash_index := hmap_chained_lookup_id(self, key)
|
||||
surface_slot := lookup[hash_index]
|
||||
set_slot :: #force_inline proc( using self : HMapChained(Type),
|
||||
slot : ^HMapChainedSlot(Type),
|
||||
key : u64,
|
||||
value : Type
|
||||
) -> (^ Type, AllocatorError )
|
||||
{
|
||||
error := AllocatorError.None
|
||||
if slot.next == nil {
|
||||
block : []byte
|
||||
block, error = pool_grab(pool)
|
||||
next := transmute( ^HMapChainedSlot(Type)) & block[0]
|
||||
slot.next = next
|
||||
next.prev = slot
|
||||
}
|
||||
slot.key = key
|
||||
slot.value = value
|
||||
slot.occupied = true
|
||||
return & slot.value, error
|
||||
}
|
||||
|
||||
if surface_slot == nil {
|
||||
block, error := pool_grab(pool)
|
||||
surface_slot := transmute( ^HMapChainedSlot(Type)) & block[0]
|
||||
surface_slot.key = key
|
||||
surface_slot.value = value
|
||||
surface_slot.occupied = true
|
||||
if error != AllocatorError.None {
|
||||
ensure(error != AllocatorError.None, "Allocation failure for chained slot in hash table")
|
||||
return nil, error
|
||||
}
|
||||
lookup[hash_index] = surface_slot
|
||||
|
||||
block, error = pool_grab(pool)
|
||||
next := transmute( ^HMapChainedSlot(Type)) & block[0]
|
||||
surface_slot.next = next
|
||||
next.prev = surface_slot
|
||||
return & surface_slot.value, error
|
||||
}
|
||||
|
||||
if ! surface_slot.occupied
|
||||
{
|
||||
result, error := set_slot( self, surface_slot, key, value)
|
||||
return result, error
|
||||
}
|
||||
|
||||
slot := surface_slot.next
|
||||
for ; slot != nil; slot = slot.next
|
||||
{
|
||||
if !slot.occupied
|
||||
{
|
||||
result, error := set_slot( self, surface_slot, key, value)
|
||||
return result, error
|
||||
}
|
||||
}
|
||||
ensure(false, "Somehow got to a null slot that wasn't preemptively allocated from a previus set")
|
||||
return nil, AllocatorError.None
|
||||
}
|
@ -165,6 +165,27 @@ import "codebase:grime"
|
||||
array_remove_at :: grime.array_remove_at
|
||||
array_resize :: grime.array_resize
|
||||
|
||||
HMapChained :: grime.HMapChained
|
||||
|
||||
hmap_closest_prime :: grime.hmap_closest_prime
|
||||
|
||||
hmap_chained_get :: grime.hmap_chained_get
|
||||
hmap_chained_init :: grime.hmap_chained_init
|
||||
hmap_chained_set :: grime.hmap_chained_set
|
||||
hmap_chained_reload :: grime.hmap_chained_reload
|
||||
|
||||
Pool :: grime.Pool
|
||||
|
||||
Slab :: grime.Slab
|
||||
SlabPolicy :: grime.SlabPolicy
|
||||
SlabSizeClass :: grime.SlabSizeClass
|
||||
|
||||
slab_allocator :: grime.slab_allocator
|
||||
slab_alloc :: grime.slab_alloc
|
||||
slab_init :: grime.slab_init
|
||||
slab_reload :: grime.slab_reload
|
||||
slab_validate_pools :: grime.slab_validate_pools
|
||||
|
||||
StackFixed :: grime.StackFixed
|
||||
|
||||
stack_clear :: grime.stack_clear
|
||||
|
@ -1,361 +0,0 @@
|
||||
/*
|
||||
This is a pool allocator setup to grow incrementally via buckets.
|
||||
Buckets are stored in singly-linked lists so that allocations aren't necessrily contiguous.
|
||||
|
||||
The pool is setup with the intention to only grab single entires from the bucket,
|
||||
not for a contiguous array of them.
|
||||
Thus the free-list only tracks the last free entries thrown out by the user,
|
||||
irrespective of the bucket the originated from.
|
||||
This means if there is a heavy recyling of entires in a pool
|
||||
there can be a large discrepancy of memory localicty if buckets are small.
|
||||
|
||||
The pool doesn't allocate any buckets on initialization unless the user specifies.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "base:intrinsics"
|
||||
import "base:runtime"
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
|
||||
Pool :: struct {
|
||||
using header : ^PoolHeader,
|
||||
}
|
||||
|
||||
PoolHeader :: struct {
|
||||
backing : Allocator,
|
||||
dbg_name : string,
|
||||
tracker : MemoryTracker,
|
||||
|
||||
zero_bucket : b32,
|
||||
block_size : uint,
|
||||
bucket_capacity : uint,
|
||||
alignment : uint,
|
||||
|
||||
free_list_head : ^Pool_FreeBlock,
|
||||
current_bucket : ^PoolBucket,
|
||||
bucket_list : DLL_NodeFL( PoolBucket),
|
||||
}
|
||||
|
||||
PoolBucket :: struct {
|
||||
using nodes : DLL_NodePN( PoolBucket),
|
||||
next_block : uint,
|
||||
blocks : [^]byte,
|
||||
}
|
||||
|
||||
Pool_FreeBlock :: struct {
|
||||
next : ^Pool_FreeBlock,
|
||||
}
|
||||
|
||||
Pool_Check_Release_Object_Validity :: true
|
||||
|
||||
pool_init :: proc (
|
||||
should_zero_buckets : b32,
|
||||
block_size : uint,
|
||||
bucket_capacity : uint,
|
||||
bucket_reserve_num : uint = 0,
|
||||
alignment : uint = mem.DEFAULT_ALIGNMENT,
|
||||
allocator : Allocator = context.allocator,
|
||||
dbg_name : string,
|
||||
) -> ( pool : Pool, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := align_forward_int( size_of(PoolHeader), int(alignment) )
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( header_size, int(alignment), allocator )
|
||||
if alloc_error != .None do return
|
||||
|
||||
ensure(block_size > 0, "Bad block size provided")
|
||||
ensure(bucket_capacity > 0, "Bad bucket capacity provided")
|
||||
|
||||
pool.header = cast( ^PoolHeader) raw_mem
|
||||
pool.zero_bucket = should_zero_buckets
|
||||
pool.backing = allocator
|
||||
pool.dbg_name = dbg_name
|
||||
pool.block_size = align_forward_uint(block_size, alignment)
|
||||
pool.bucket_capacity = bucket_capacity
|
||||
pool.alignment = alignment
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_init( & pool.tracker, allocator, Kilobyte * 96, dbg_name )
|
||||
}
|
||||
|
||||
if bucket_reserve_num > 0 {
|
||||
alloc_error = pool_allocate_buckets( pool, bucket_reserve_num )
|
||||
}
|
||||
|
||||
pool.current_bucket = pool.bucket_list.first
|
||||
return
|
||||
}
|
||||
|
||||
pool_reload :: proc( pool : Pool, allocator : Allocator ) {
|
||||
pool.backing = allocator
|
||||
}
|
||||
|
||||
pool_destroy :: proc ( using self : Pool )
|
||||
{
|
||||
if bucket_list.first != nil
|
||||
{
|
||||
bucket := bucket_list.first
|
||||
for ; bucket != nil; bucket = bucket.next {
|
||||
free( bucket, backing )
|
||||
}
|
||||
}
|
||||
|
||||
free( self.header, backing )
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_clear( self.tracker )
|
||||
}
|
||||
}
|
||||
|
||||
pool_allocate_buckets :: proc( pool : Pool, num_buckets : uint ) -> AllocatorError
|
||||
{
|
||||
profile(#procedure)
|
||||
if num_buckets == 0 {
|
||||
return .Invalid_Argument
|
||||
}
|
||||
header_size := cast(uint) align_forward_int( size_of(PoolBucket), int(pool.alignment))
|
||||
bucket_size := header_size + pool.bucket_capacity
|
||||
to_allocate := cast(int) (bucket_size * num_buckets)
|
||||
|
||||
// log(str_fmt_tmp("Allocating %d bytes for %d buckets with header_size %d bytes & bucket_size %d", to_allocate, num_buckets, header_size, bucket_size ))
|
||||
|
||||
bucket_memory : []byte
|
||||
alloc_error : AllocatorError
|
||||
|
||||
pool_validate( pool )
|
||||
if pool.zero_bucket {
|
||||
bucket_memory, alloc_error = alloc_bytes( to_allocate, int(pool.alignment), pool.backing )
|
||||
}
|
||||
else {
|
||||
bucket_memory, alloc_error = alloc_bytes_non_zeroed( to_allocate, int(pool.alignment), pool.backing )
|
||||
}
|
||||
pool_validate( pool )
|
||||
|
||||
// log(str_fmt_tmp("Bucket memory size: %d bytes, without header: %d", len(bucket_memory), len(bucket_memory) - int(header_size)))
|
||||
|
||||
if alloc_error != .None {
|
||||
return alloc_error
|
||||
}
|
||||
verify( bucket_memory != nil, "Bucket memory is null")
|
||||
|
||||
next_bucket_ptr := cast( [^]byte) raw_data(bucket_memory)
|
||||
for index in 0 ..< num_buckets
|
||||
{
|
||||
bucket := cast( ^PoolBucket) next_bucket_ptr
|
||||
bucket.blocks = memory_after_header(bucket)
|
||||
bucket.next_block = 0
|
||||
// log( str_fmt_tmp("\tPool (%d) allocated bucket: %p start %p capacity: %d (raw: %d)",
|
||||
// pool.block_size,
|
||||
// raw_data(bucket_memory),
|
||||
// bucket.blocks,
|
||||
// pool.bucket_capacity / pool.block_size,
|
||||
// pool.bucket_capacity ))
|
||||
|
||||
if pool.bucket_list.first == nil {
|
||||
pool.bucket_list.first = bucket
|
||||
pool.bucket_list.last = bucket
|
||||
}
|
||||
else {
|
||||
dll_push_back( & pool.bucket_list.last, bucket )
|
||||
}
|
||||
// log( str_fmt_tmp("Bucket List First: %p", self.bucket_list.first))
|
||||
|
||||
next_bucket_ptr = next_bucket_ptr[ bucket_size: ]
|
||||
}
|
||||
return alloc_error
|
||||
}
|
||||
|
||||
pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
pool := pool
|
||||
if pool.current_bucket != nil {
|
||||
if ( pool.current_bucket.blocks == nil ) {
|
||||
ensure( false, str_fmt("(corruption) current_bucket was wiped %p", pool.current_bucket) )
|
||||
}
|
||||
// verify( pool.current_bucket.blocks != nil, str_fmt_tmp("(corruption) current_bucket was wiped %p", pool.current_bucket) )
|
||||
}
|
||||
// profile(#procedure)
|
||||
alloc_error = .None
|
||||
|
||||
// Check the free-list first for a block
|
||||
if pool.free_list_head != nil
|
||||
{
|
||||
head := & pool.free_list_head
|
||||
|
||||
// Compiler Bug? Fails to compile
|
||||
// last_free := ll_pop( & pool.free_list_head )
|
||||
last_free : ^Pool_FreeBlock = pool.free_list_head
|
||||
pool.free_list_head = pool.free_list_head.next
|
||||
|
||||
block = byte_slice( cast([^]byte) last_free, int(pool.block_size) )
|
||||
// log( str_fmt_tmp("\tReturning free block: %p %d", raw_data(block), pool.block_size))
|
||||
if zero_memory {
|
||||
slice.zero(block)
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name_slice( & pool.tracker, block)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if pool.current_bucket == nil
|
||||
{
|
||||
alloc_error = pool_allocate_buckets( pool, 1 )
|
||||
if alloc_error != .None {
|
||||
ensure(false, "Failed to allocate bucket")
|
||||
return
|
||||
}
|
||||
pool.current_bucket = pool.bucket_list.first
|
||||
// log( "First bucket allocation")
|
||||
}
|
||||
|
||||
next := uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
|
||||
end := uintptr(pool.current_bucket.blocks) + uintptr(pool.bucket_capacity)
|
||||
|
||||
blocks_left, overflow_signal := intrinsics.overflow_sub( end, next )
|
||||
if blocks_left == 0 || overflow_signal
|
||||
{
|
||||
// Compiler Bug
|
||||
// if current_bucket.next != nil {
|
||||
if pool.current_bucket.next != nil {
|
||||
// current_bucket = current_bucket.next
|
||||
// log( str_fmt_tmp("\tBucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
|
||||
pool.current_bucket = pool.current_bucket.next
|
||||
verify( pool.current_bucket.blocks != nil, "New current_bucket's blocks are null (new current_bucket is corrupted)" )
|
||||
}
|
||||
else
|
||||
{
|
||||
// log( "\tAll previous buckets exhausted, allocating new bucket")
|
||||
alloc_error := pool_allocate_buckets( pool, 1 )
|
||||
if alloc_error != .None {
|
||||
ensure(false, "Failed to allocate bucket")
|
||||
return
|
||||
}
|
||||
pool.current_bucket = pool.current_bucket.next
|
||||
verify( pool.current_bucket.blocks != nil, "Next's blocks are null (Post new bucket alloc)" )
|
||||
}
|
||||
}
|
||||
|
||||
verify( pool.current_bucket != nil, "Attempted to grab a block from a null bucket reference" )
|
||||
|
||||
// Compiler Bug
|
||||
// block = slice_ptr( current_bucket.blocks[ current_bucket.next_block:], int(block_size) )
|
||||
// self.current_bucket.next_block += block_size
|
||||
|
||||
block_ptr := cast(rawptr) (uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block))
|
||||
|
||||
block = byte_slice( block_ptr, int(pool.block_size) )
|
||||
pool.current_bucket.next_block += pool.block_size
|
||||
|
||||
next = uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
|
||||
// log( str_fmt_tmp("\tgrabbing block: %p from %p blocks left: %d", raw_data(block), pool.current_bucket.blocks, (end - next) / uintptr(pool.block_size) ))
|
||||
|
||||
if zero_memory {
|
||||
slice.zero(block)
|
||||
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", block_ptr, cast(rawptr) (uintptr(block_ptr) + uintptr(pool.block_size))))
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name_slice( & pool.tracker, block)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
pool_release :: proc( self : Pool, block : []byte, loc := #caller_location )
|
||||
{
|
||||
// profile(#procedure)
|
||||
if Pool_Check_Release_Object_Validity {
|
||||
within_bucket := pool_validate_ownership( self, block )
|
||||
verify( within_bucket, "Attempted to release data that is not within a bucket of this pool", location = loc )
|
||||
}
|
||||
|
||||
// Compiler bug
|
||||
// ll_push( & self.free_list_head, cast(^Pool_FreeBlock) raw_data(block) )
|
||||
|
||||
pool_watch := self
|
||||
head_watch := & self.free_list_head
|
||||
|
||||
// ll_push:
|
||||
new_free_block := cast(^Pool_FreeBlock) raw_data(block)
|
||||
(new_free_block ^) = {}
|
||||
new_free_block.next = self.free_list_head
|
||||
self.free_list_head = new_free_block
|
||||
|
||||
// new_free_block = new_free_block
|
||||
// log( str_fmt_tmp("Released block: %p %d", new_free_block, self.block_size))
|
||||
|
||||
start := new_free_block
|
||||
end := transmute(rawptr) (uintptr(new_free_block) + uintptr(self.block_size) - 1)
|
||||
when ODIN_DEBUG {
|
||||
memtracker_unregister( self.tracker, { start, end } )
|
||||
}
|
||||
}
|
||||
|
||||
pool_reset :: proc( using pool : Pool )
|
||||
{
|
||||
bucket : ^PoolBucket = bucket_list.first // TODO(Ed): Compiler bug? Build fails unless ^PoolBucket is explcitly specified.
|
||||
for ; bucket != nil; {
|
||||
bucket.next_block = 0
|
||||
}
|
||||
|
||||
pool.free_list_head = nil
|
||||
pool.current_bucket = bucket_list.first
|
||||
}
|
||||
|
||||
pool_validate :: proc( pool : Pool )
|
||||
{
|
||||
when !ODIN_DEBUG do return
|
||||
pool := pool
|
||||
// Make sure all buckets don't show any indication of corruption
|
||||
bucket : ^PoolBucket = pool.bucket_list.first
|
||||
|
||||
if bucket != nil && uintptr(bucket) < 0x10000000000 {
|
||||
ensure(false, str_fmt("Found a corrupted bucket %p", bucket ))
|
||||
}
|
||||
// Compiler bug ^^ same as pool_reset
|
||||
for ; bucket != nil; bucket = bucket.next
|
||||
{
|
||||
if bucket != nil && uintptr(bucket) < 0x10000000000 {
|
||||
ensure(false, str_fmt("Found a corrupted bucket %p", bucket ))
|
||||
}
|
||||
|
||||
if ( bucket.blocks == nil ) {
|
||||
ensure(false, str_fmt("Found a corrupted bucket %p", bucket ))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pool_validate_ownership :: proc( using self : Pool, block : [] byte ) -> b32
|
||||
{
|
||||
profile(#procedure)
|
||||
within_bucket := b32(false)
|
||||
|
||||
// Compiler Bug : Same as pool_reset
|
||||
bucket : ^PoolBucket = bucket_list.first
|
||||
for ; bucket != nil; bucket = bucket.next
|
||||
{
|
||||
start := uintptr( bucket.blocks )
|
||||
end := start + uintptr(bucket_capacity)
|
||||
block_address := uintptr(raw_data(block))
|
||||
|
||||
if start <= block_address && block_address < end
|
||||
{
|
||||
misalignment := (block_address - start) % uintptr(block_size)
|
||||
if misalignment != 0 {
|
||||
ensure(false, "pool_validate_ownership: This data is within this pool's buckets, however its not aligned to the start of a block")
|
||||
log(str_fmt("Block address: %p Misalignment: %p closest: %p",
|
||||
transmute(rawptr)block_address,
|
||||
transmute(rawptr)misalignment,
|
||||
rawptr(block_address - misalignment)))
|
||||
}
|
||||
|
||||
within_bucket = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return within_bucket
|
||||
}
|
@ -1,335 +0,0 @@
|
||||
/* Slab Allocator
|
||||
These are a collection of pool allocators serving as a general way
|
||||
to allocate a large amount of dynamic sized data.
|
||||
|
||||
The usual use case for this is an arena, stack,
|
||||
or dedicated pool allocator fail to be enough to handle a data structure
|
||||
that either is too random with its size (ex: strings)
|
||||
or is intended to grow an abitrary degree with an unknown upper bound (dynamic arrays, and hashtables).
|
||||
|
||||
The protototype will use slab allocators for two purposes:
|
||||
* String interning
|
||||
* General purpose set for handling large arrays & hash tables within some underlying arena or stack.
|
||||
|
||||
Technically speaking the general purpose situations can instead be grown on demand
|
||||
with a dedicated segement of vmem, however this might be overkill
|
||||
if the worst case buckets allocated are < 500 mb for most app usage.
|
||||
|
||||
The slab allocators are expected to hold growable pool allocators,
|
||||
where each pool stores a 'bucket' of fixed-sized blocks of memory.
|
||||
When a pools bucket is full it will request another bucket from its arena
|
||||
for permanent usage within the arena's lifetime.
|
||||
|
||||
A freelist is tracked for free-blocks for each pool (provided by the underlying pool allocator)
|
||||
|
||||
A slab starts out with pools initialized with no buckets and grows as needed.
|
||||
When a slab is initialized the slab policy is provided to know how many size-classes there should be
|
||||
which each contain the ratio of bucket to block size.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
|
||||
SlabSizeClass :: struct {
|
||||
bucket_capacity : uint,
|
||||
block_size : uint,
|
||||
block_alignment : uint,
|
||||
}
|
||||
|
||||
Slab_Max_Size_Classes :: 64
|
||||
|
||||
SlabPolicy :: StackFixed(SlabSizeClass, Slab_Max_Size_Classes)
|
||||
|
||||
SlabHeader :: struct {
|
||||
dbg_name : string,
|
||||
tracker : MemoryTracker,
|
||||
backing : Allocator,
|
||||
pools : StackFixed(Pool, Slab_Max_Size_Classes),
|
||||
}
|
||||
|
||||
Slab :: struct {
|
||||
using header : ^SlabHeader,
|
||||
}
|
||||
|
||||
slab_allocator :: proc( slab : Slab ) -> ( allocator : Allocator ) {
|
||||
allocator.procedure = slab_allocator_proc
|
||||
allocator.data = slab.header
|
||||
return
|
||||
}
|
||||
|
||||
slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocator : Allocator = context.allocator, dbg_name : string = "", should_zero_buckets : b32 = false ) -> ( slab : Slab, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size :: size_of( SlabHeader )
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( header_size, mem.DEFAULT_ALIGNMENT, allocator )
|
||||
if alloc_error != .None do return
|
||||
|
||||
slab.header = cast( ^SlabHeader) raw_mem
|
||||
slab.backing = allocator
|
||||
slab.dbg_name = dbg_name
|
||||
when ODIN_DEBUG {
|
||||
memtracker_init( & slab.tracker, allocator, Kilobyte * 256, dbg_name )
|
||||
}
|
||||
alloc_error = slab_init_pools( slab, policy, bucket_reserve_num, should_zero_buckets )
|
||||
return
|
||||
}
|
||||
|
||||
slab_init_pools :: proc ( using self : Slab, policy : ^SlabPolicy, bucket_reserve_num : uint = 0, should_zero_buckets : b32 ) -> AllocatorError
|
||||
{
|
||||
profile(#procedure)
|
||||
|
||||
for id in 0 ..< policy.idx {
|
||||
using size_class := policy.items[id]
|
||||
|
||||
pool_dbg_name := str_fmt("%v pool[%v]", dbg_name, block_size, allocator = backing)
|
||||
pool, alloc_error := pool_init( should_zero_buckets, block_size, bucket_capacity, bucket_reserve_num, block_alignment, backing, pool_dbg_name )
|
||||
if alloc_error != .None do return alloc_error
|
||||
|
||||
push( & self.pools, pool )
|
||||
}
|
||||
return .None
|
||||
}
|
||||
|
||||
slab_reload :: proc ( slab : Slab, allocator : Allocator )
|
||||
{
|
||||
slab.backing = allocator
|
||||
|
||||
for id in 0 ..< slab.pools.idx {
|
||||
pool := slab.pools.items[id]
|
||||
pool_reload( pool, slab.backing )
|
||||
}
|
||||
}
|
||||
|
||||
slab_destroy :: proc( using self : Slab )
|
||||
{
|
||||
for id in 0 ..< pools.idx {
|
||||
pool := pools.items[id]
|
||||
pool_destroy( pool )
|
||||
}
|
||||
|
||||
free( self.header, backing )
|
||||
when ODIN_DEBUG {
|
||||
memtracker_clear(tracker)
|
||||
}
|
||||
}
|
||||
|
||||
slab_alloc :: proc( self : Slab,
|
||||
size : uint,
|
||||
alignment : uint,
|
||||
zero_memory := true,
|
||||
loc := #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
// profile(#procedure)
|
||||
pool : Pool
|
||||
id : u32 = 0
|
||||
for ; id < self.pools.idx; id += 1 {
|
||||
pool = self.pools.items[id]
|
||||
|
||||
if pool.block_size >= size && pool.alignment >= alignment {
|
||||
break
|
||||
}
|
||||
}
|
||||
verify( id < self.pools.idx, "There is not a size class in the slab's policy to satisfy the requested allocation", location = loc )
|
||||
verify( pool.header != nil, "Requested alloc not supported by the slab allocator", location = loc )
|
||||
|
||||
block : []byte
|
||||
slab_validate_pools( self )
|
||||
block, alloc_error = pool_grab(pool)
|
||||
slab_validate_pools( self )
|
||||
|
||||
if block == nil || alloc_error != .None {
|
||||
ensure(false, "Bad block from pool")
|
||||
return nil, alloc_error
|
||||
}
|
||||
// log( str_fmt_tmp("%v: Retrieved block: %p %d", self.dbg_name, raw_data(block), len(block) ))
|
||||
|
||||
data = byte_slice(raw_data(block), size)
|
||||
if zero_memory {
|
||||
slice.zero(data)
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & self.tracker, raw_data(block), & block[ len(block) - 1 ] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
slab_free :: proc( using self : Slab, data : []byte, loc := #caller_location )
|
||||
{
|
||||
// profile(#procedure)
|
||||
pool : Pool
|
||||
for id in 0 ..< pools.idx
|
||||
{
|
||||
pool = pools.items[id]
|
||||
if pool_validate_ownership( pool, data ) {
|
||||
start := raw_data(data)
|
||||
end := ptr_offset(start, pool.block_size - 1)
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_unregister( self.tracker, { start, end } )
|
||||
}
|
||||
|
||||
pool_release( pool, data, loc )
|
||||
return
|
||||
}
|
||||
}
|
||||
verify(false, "Attempted to free a block not within a pool of this slab", location = loc)
|
||||
}
|
||||
|
||||
slab_resize :: proc( using self : Slab,
|
||||
data : []byte,
|
||||
new_size : uint,
|
||||
alignment : uint,
|
||||
zero_memory := true,
|
||||
loc := #caller_location
|
||||
) -> ( new_data : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
// profile(#procedure)
|
||||
old_size := uint( len(data))
|
||||
|
||||
pool_resize, pool_old : Pool
|
||||
for id in 0 ..< pools.idx
|
||||
{
|
||||
pool := pools.items[id]
|
||||
|
||||
if pool.block_size >= new_size && pool.alignment >= alignment {
|
||||
pool_resize = pool
|
||||
}
|
||||
if pool_validate_ownership( pool, data ) {
|
||||
pool_old = pool
|
||||
}
|
||||
if pool_resize.header != nil && pool_old.header != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
verify( pool_resize.header != nil, "Requested resize not supported by the slab allocator", location = loc )
|
||||
|
||||
// Resize will keep block in the same size_class, just give it more of its already allocated block
|
||||
if pool_old.block_size == pool_resize.block_size
|
||||
{
|
||||
new_data_ptr := memory_after(data)
|
||||
new_data = byte_slice( raw_data(data), new_size )
|
||||
// log( dump_stacktrace() )
|
||||
// log( str_fmt_tmp("%v: Resize via expanding block space allocation %p %d", dbg_name, new_data_ptr, int(new_size - old_size)))
|
||||
|
||||
if zero_memory && new_size > old_size {
|
||||
to_zero := byte_slice( new_data_ptr, int(new_size - old_size) )
|
||||
|
||||
slab_validate_pools( self )
|
||||
slice.zero( to_zero )
|
||||
slab_validate_pools( self )
|
||||
|
||||
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", new_data_ptr, cast(rawptr) (uintptr(new_data_ptr) + uintptr(new_size - old_size))))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// We'll need to provide an entirely new block, so the data will need to be copied over.
|
||||
new_block : []byte
|
||||
|
||||
slab_validate_pools( self )
|
||||
new_block, alloc_error = pool_grab( pool_resize )
|
||||
slab_validate_pools( self )
|
||||
|
||||
if new_block == nil {
|
||||
ensure(false, "Retreived a null block")
|
||||
return
|
||||
}
|
||||
|
||||
if alloc_error != .None do return
|
||||
|
||||
// TODO(Ed): Reapply this when safe.
|
||||
if zero_memory {
|
||||
slice.zero( new_block )
|
||||
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", raw_data(new_block), cast(rawptr) (uintptr(raw_data(new_block)) + uintptr(new_size))))
|
||||
}
|
||||
|
||||
// log( str_fmt_tmp("Resize via new block: %p %d (old : %p $d )", raw_data(new_block), len(new_block), raw_data(data), old_size ))
|
||||
|
||||
if raw_data(data) != raw_data(new_block) {
|
||||
// log( str_fmt_tmp("%v: Resize via new block, copying from old data block to new block: (%p %d), (%p %d)", dbg_name, raw_data(data), len(data), raw_data(new_block), len(new_block)))
|
||||
copy_non_overlapping( raw_data(new_block), raw_data(data), int(old_size) )
|
||||
pool_release( pool_old, data )
|
||||
|
||||
start := raw_data( data )
|
||||
end := rawptr(uintptr(start) + uintptr(pool_old.block_size) - 1)
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_unregister( self.tracker, { start, end } )
|
||||
}
|
||||
}
|
||||
|
||||
new_data = new_block[ :new_size]
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & self.tracker, raw_data(new_block), & new_block[ len(new_block) - 1 ] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
slab_reset :: proc( slab : Slab )
|
||||
{
|
||||
for id in 0 ..< slab.pools.idx {
|
||||
pool := slab.pools.items[id]
|
||||
pool_reset( pool )
|
||||
}
|
||||
when ODIN_DEBUG {
|
||||
memtracker_clear(slab.tracker)
|
||||
}
|
||||
}
|
||||
|
||||
slab_validate_pools :: proc( slab : Slab )
|
||||
{
|
||||
slab := slab
|
||||
for id in 0 ..< slab.pools.idx {
|
||||
pool := slab.pools.items[id]
|
||||
pool_validate( pool )
|
||||
}
|
||||
}
|
||||
|
||||
slab_allocator_proc :: proc(
|
||||
allocator_data : rawptr,
|
||||
mode : AllocatorMode,
|
||||
size : int,
|
||||
alignment : int,
|
||||
old_memory : rawptr,
|
||||
old_size : int,
|
||||
loc := #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError)
|
||||
{
|
||||
slab : Slab
|
||||
slab.header = cast( ^SlabHeader) allocator_data
|
||||
|
||||
size := uint(size)
|
||||
alignment := uint(alignment)
|
||||
old_size := uint(old_size)
|
||||
|
||||
switch mode
|
||||
{
|
||||
case .Alloc, .Alloc_Non_Zeroed:
|
||||
return slab_alloc( slab, size, alignment, (mode != .Alloc_Non_Zeroed), loc)
|
||||
|
||||
case .Free:
|
||||
slab_free( slab, byte_slice( old_memory, int(old_size)), loc )
|
||||
|
||||
case .Free_All:
|
||||
slab_reset( slab )
|
||||
|
||||
case .Resize, .Resize_Non_Zeroed:
|
||||
return slab_resize( slab, byte_slice(old_memory, int(old_size)), size, alignment, (mode != .Resize_Non_Zeroed), loc)
|
||||
|
||||
case .Query_Features:
|
||||
set := cast( ^AllocatorModeSet) old_memory
|
||||
if set != nil {
|
||||
(set ^) = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
|
||||
}
|
||||
|
||||
case .Query_Info:
|
||||
alloc_error = .Mode_Not_Implemented
|
||||
}
|
||||
return
|
||||
}
|
@ -29,8 +29,8 @@ ui_floating_startup :: proc( self : ^UI_FloatingManager, build_queue_cap, tracke
|
||||
return error
|
||||
}
|
||||
|
||||
tracked_dbg_name := str_intern(str_fmt("%s: tracked", dbg_name))
|
||||
self.tracked, error = make( HMapChained(UI_Floating), uint(tracked_cap), allocator, dbg_name = tracked_dbg_name.str )
|
||||
// tracked_dbg_name := str_intern(str_fmt("%s: tracked", dbg_name))
|
||||
self.tracked, error = make( HMapChained(UI_Floating), uint(tracked_cap), allocator, /*dbg_name = tracked_dbg_name.str*/ )
|
||||
if error != AllocatorError.None
|
||||
{
|
||||
ensure(false, "Failed to allocate tracking table")
|
||||
|
Reference in New Issue
Block a user