Updates and fixes to memory

This commit is contained in:
Edward R. Gonzalez 2024-03-14 00:00:44 -04:00
parent 2f574ef760
commit 180c296556
5 changed files with 55 additions and 26 deletions

View File

@ -145,7 +145,7 @@ startup :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem
rl.SetConfigFlags( {
rl.ConfigFlag.WINDOW_RESIZABLE,
// rl.ConfigFlag.WINDOW_TOPMOST,
rl.ConfigFlag.WINDOW_TOPMOST,
})
// Rough setup of window with rl stuff
@ -266,17 +266,20 @@ reload :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem,
context.allocator = persistent_allocator()
context.temp_allocator = transient_allocator()
state := get_state()
state := get_state(); using state
// Procedure Addresses are not preserved on hot-reload. They must be restored for persistent data.
// The only way to alleviate this is to either do custom handles to allocators
// Or as done below, correct containers using allocators on reload.
// Thankfully persistent dynamic allocations are rare, and thus we know exactly which ones they are.
font_provider_data := & state.font_provider_data
font_provider_data.font_cache.hashes.backing = persistent_slab_allocator()
font_provider_data.font_cache.entries.backing = persistent_slab_allocator()
slab_reload( string_cache.slab, persistent_slab_allocator() )
string_cache.table.hashes.backing = persistent_slab_allocator()
string_cache.table.entries.backing = persistent_slab_allocator()
ui_reload( & get_state().project.workspace.ui, cache_allocator = persistent_slab_allocator() )
log("Module reloaded")

View File

@ -71,6 +71,10 @@ pool_init :: proc (
return
}
pool_reload :: proc( using self : Pool, allocator : Allocator ) {
self.backing = allocator
}
pool_destroy :: proc ( using self : Pool )
{
if bucket_list.first != nil
@ -105,7 +109,7 @@ pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> Alloca
bucket := cast( ^PoolBucket) next_bucket_ptr
bucket.blocks = memory_after_header(bucket)
bucket.next_block = 0
log( str_fmt_tmp("Pool allocated block: %p capacity: %d", raw_data(bucket_memory), bucket_capacity))
// log( str_fmt_tmp("Pool allocated block: %p capacity: %d", raw_data(bucket_memory), bucket_capacity))
if self.bucket_list.first == nil {
@ -115,7 +119,7 @@ pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> Alloca
else {
dll_push_back( & self.bucket_list.last, bucket )
}
log( str_fmt_tmp("Bucket List First: %p", self.bucket_list.first))
// log( str_fmt_tmp("Bucket List First: %p", self.bucket_list.first))
next_bucket_ptr = next_bucket_ptr[ bucket_capacity: ]
}
@ -142,7 +146,7 @@ pool_grab :: proc( using pool : Pool, zero_memory := false ) -> ( block : []byte
pool.free_list_head = pool.free_list_head.next // ll_pop
block = byte_slice( cast([^]byte) last_free, int(pool.block_size) )
log( str_fmt_tmp("Returning free block: %p %d", raw_data(block), pool.block_size))
// log( str_fmt_tmp("Returning free block: %p %d", raw_data(block), pool.block_size))
return
}
@ -171,12 +175,12 @@ pool_grab :: proc( using pool : Pool, zero_memory := false ) -> ( block : []byte
// if current_bucket.next != nil {
if pool.current_bucket.next != nil {
// current_bucket = current_bucket.next
log( str_fmt_tmp("Bucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
// log( str_fmt_tmp("Bucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
pool.current_bucket = pool.current_bucket.next
}
else
{
log( "All previous buckets exhausted, allocating new bucket")
// log( "All previous buckets exhausted, allocating new bucket")
alloc_error := pool_allocate_buckets( pool, 1 )
if alloc_error != .None {
ensure(false, "Failed to allocate bucket")

View File

@ -87,6 +87,14 @@ slab_init_pools :: proc ( using self : Slab, bucket_reserve_num : uint = 0 ) ->
return .None
}
slab_reload :: proc ( using self : Slab, allocator : Allocator )
{
for id in 0 ..< pools.idx {
pool := pools.items[id]
pool_reload( pool, allocator )
}
}
slab_destroy :: proc( using self : Slab )
{
for id in 0 ..< policy.idx {
@ -124,7 +132,7 @@ slab_alloc :: proc( using self : Slab,
ensure(false, "Bad block from pool")
return nil, alloc_error
}
log( str_fmt_tmp("Retrieved block: %p %d", raw_data(block), len(block) ))
// log( str_fmt_tmp("Retrieved block: %p %d", raw_data(block), len(block) ))
// if zero_memory {
// slice.zero(block)
@ -186,12 +194,12 @@ slab_resize :: proc( using self : Slab,
{
new_data_ptr := memory_after(data)
new_data = byte_slice( raw_data(data), new_size )
log( str_fmt_tmp("Resize via expanding block space allocation %p %d", new_data_ptr, int(new_size - old_size)))
// log( str_fmt_tmp("Resize via expanding block space allocation %p %d", new_data_ptr, int(new_size - old_size)))
if zero_memory && new_size > old_size {
to_zero := byte_slice( memory_after(data), int(new_size - old_size) )
slice.zero( to_zero )
log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", new_data_ptr, int(new_size - old_size)))
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", new_data_ptr, int(new_size - old_size)))
}
return
}
@ -209,7 +217,7 @@ slab_resize :: proc( using self : Slab,
// slice.zero( new_block )
// }
log( str_fmt_tmp("Resize via new block: %p %d (old : %p $d )", raw_data(new_block), len(new_block), raw_data(data), old_size ))
// log( str_fmt_tmp("Resize via new block: %p %d (old : %p $d )", raw_data(new_block), len(new_block), raw_data(data), old_size ))
if raw_data(data) != raw_data(new_block) {
copy_non_overlapping( raw_data(new_block), raw_data(data), int(old_size) )

View File

@ -15,6 +15,7 @@ import "core:mem"
import "core:slice"
import "core:strings"
// Should this just store the key instead?
StringCached :: struct {
str : string,
runes : []rune,
@ -52,7 +53,7 @@ str_cache_init :: proc( /*allocator : Allocator*/ ) -> ( cache : StringCache ) {
cache.slab, alloc_error = slab_init( & policy, allocator = persistent_allocator() )
verify(alloc_error == .None, "Failed to initialize the string cache" )
cache.table, alloc_error = zpl_hmap_init_reserve( StringCached, persistent_slab_allocator(), 64 * Kilobyte )
cache.table, alloc_error = zpl_hmap_init_reserve( StringCached, persistent_slab_allocator(), 2 * Kilobyte )
return
}

View File

@ -28,14 +28,15 @@ import fmt_io "core:fmt"
str_fmt :: fmt_io.printf
str_fmt_alloc :: fmt_io.aprintf
str_fmt_tmp :: fmt_io.tprintf
str_fmt_buffer :: fmt_io.bprintf
str_fmt_builder :: fmt_io.sbprintf
import "core:log"
import "core:mem"
Allocator :: mem.Allocator
AllocatorError :: mem.Allocator_Error
TrackingAllocator :: mem.Tracking_Allocator
Arena :: mem.Arena
arena_allocator :: mem.arena_allocator
import "core:mem/virtual"
Arena :: virtual.Arena
MapFileError :: virtual.Map_File_Error
MapFileFlag :: virtual.Map_File_Flag
MapFileFlags :: virtual.Map_File_Flags
@ -99,6 +100,9 @@ when ODIN_OS == runtime.Odin_OS_Type.Windows
// TODO(Ed): Disable the default allocators for the host, we'll be handling it instead.
RuntimeState :: struct {
persistent : Arena,
transient : Arena,
running : b32,
client_memory : ClientMemory,
sectr_api : sectr.ModuleAPI,
@ -242,11 +246,23 @@ sync_sectr_api :: proc( sectr_api : ^sectr.ModuleAPI, memory : ^ClientMemory, lo
}
}
fmt_backing : [16 * Kilobyte] u8
persistent_backing : [2 * Megabyte] byte
transient_backing : [2 * Megabyte] byte
main :: proc()
{
state : RuntimeState
using state
mem.arena_init( & state.persistent, persistent_backing[:] )
mem.arena_init( & state.transient, transient_backing[:] )
context.allocator = arena_allocator( & state.persistent)
context.temp_allocator = arena_allocator( & state.transient)
// Setup profiling
profiler : SpallProfiler
{
@ -267,16 +283,16 @@ main :: proc()
os.make_directory( Path_Logs )
}
timestamp := str_fmt_tmp("%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec)
path_logger_finalized = str_fmt_alloc( "%s/sectr_%v.log", Path_Logs, timestamp)
timestamp := str_fmt_buffer( fmt_backing[:], "%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec)
path_logger_finalized = str_fmt_buffer( fmt_backing[:], "%s/sectr_%v.log", Path_Logs, timestamp)
}
logger : sectr.Logger
logger_init( & logger, "Sectr Host", str_fmt_alloc( "%s/sectr.log", Path_Logs ) )
logger_init( & logger, "Sectr Host", str_fmt_buffer( fmt_backing[:], "%s/sectr.log", Path_Logs ) )
context.logger = to_odin_logger( & logger )
{
// Log System Context
backing_builder : [16 * Kilobyte] u8
backing_builder : [1 * Kilobyte] u8
builder := str_builder_from_bytes( backing_builder[:] )
str_fmt_builder( & builder, "Core Count: %v, ", os.processor_core_count() )
str_fmt_builder( & builder, "Page Size: %v", os.get_page_size() )
@ -286,18 +302,13 @@ main :: proc()
memory := setup_memory( & profiler )
// TODO(Ed): Cannot use the manually created allocators for the host. Not sure why
// Something is wrong with the tracked_allocator init
// context.allocator = tracked_allocator( & memory.host_persistent )
// context.temp_allocator = tracked_allocator( & memory.host_transient )
// Load the Enviornment API for the first-time
{
sectr_api = load_sectr_api( 1 )
verify( sectr_api.lib_version != 0, "Failed to initially load the sectr module" )
}
free_all( context.temp_allocator )
// free_all( context.temp_allocator )
running = true;
sectr_api = sectr_api
@ -326,6 +337,8 @@ main :: proc()
delta_ns = time.tick_lap_time( & host_tick )
host_tick = time.tick_now()
free_all( arena_allocator( & state.transient))
}
// Determine how the run_cyle completed, if it failed due to an error,
@ -344,5 +357,5 @@ main :: proc()
file_close( logger.file )
// TODO(Ed) : Add string interning!!!!!!!!!
// file_rename( logger.file_path, path_logger_finalized )
file_rename( str_fmt_tmp( "%s/sectr.log", Path_Logs), path_logger_finalized )
file_rename( str_fmt_buffer( fmt_backing[:], "%s/sectr.log", Path_Logs), path_logger_finalized )
}