Started to overhaul memory setup + runtime fixes

This commit is contained in:
Edward R. Gonzalez 2024-03-07 15:57:05 -05:00
parent 6836459a1d
commit f2237e7211
13 changed files with 256 additions and 358 deletions

View File

@ -18,54 +18,63 @@ ModuleAPI :: struct {
write_time : FileTime,
lib_version : i32,
startup : type_of( startup ),
shutdown : type_of( sectr_shutdown ),
reload : type_of( reload ),
tick : type_of( tick ),
clean_temp : type_of( clean_temp ),
startup : type_of( startup ),
shutdown : type_of( sectr_shutdown ),
reload : type_of( reload ),
tick : type_of( tick ),
clean_frame : type_of( clean_frame ),
}
@export
startup :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ Logger )
startup :: proc( persistent_mem, frame_mem, transient_mem, files_buffer_mem : ^VArena, host_logger : ^ Logger )
{
logger_init( & Memory_App.logger, "Sectr", host_logger.file_path, host_logger.file )
context.logger = to_odin_logger( & Memory_App.logger )
// Setup memory for the first time
{
arena_size :: size_of( Arena)
internals_size :: 4 * Megabyte
using Memory_App;
block := live_mem.curr_block
persistent = persistent_mem
frame = frame_mem
transient = transient_mem
files_buffer = files_buffer_mem
live = live_mem
snapshot = snapshot_mem
persistent_slice := slice_ptr( block.base, Memory_Persistent_Size )
transient_slice := slice_ptr( memory_after( persistent_slice), Memory_Trans_Temp_Szie )
temp_slice := slice_ptr( memory_after( transient_slice), Memory_Trans_Temp_Szie )
when Use_TrackingAllocator {
// We assign the beginning of the block to be the host's persistent memory's arena.
// Then we offset past the arena and determine its slice to be the amount left after for the size of host's persistent.
persistent = tracked_allocator_init_vmem( persistent_slice, internals_size )
transient = tracked_allocator_init_vmem( transient_slice, internals_size )
temp = tracked_allocator_init_vmem( temp_slice , internals_size )
}
else {
persistent = arena_allocator_init_vmem( persistent_slice )
transient = arena_allocator_init_vmem( transient_slice )
temp = arena_allocator_init_vmem( temp_slice )
}
context.allocator = transient_allocator()
context.temp_allocator = temp_allocator()
context.allocator = persistent_allocator()
context.temp_allocator = transient_allocator()
}
state := new( State, persistent_allocator() )
using state
// Setup General Slab
{
alignment := uint(mem.DEFAULT_ALIGNMENT)
policy : SlabPolicy
policy_ptr := & policy
push( policy_ptr, SlabSizeClass { 16 * Megabyte, 4 * Kilobyte, alignment })
push( policy_ptr, SlabSizeClass { 32 * Megabyte, 16 * Kilobyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 32 * Kilobyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 64 * Kilobyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 128 * Kilobyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 256 * Kilobyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 512 * Kilobyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 1 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 2 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 4 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 8 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 16 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 64 * Megabyte, 32 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 256 * Megabyte, 64 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 256 * Megabyte, 128 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 512 * Megabyte, 256 * Megabyte, alignment })
push( policy_ptr, SlabSizeClass { 512 * Megabyte, 512 * Megabyte, alignment })
alloc_error : AllocatorError
general_slab, alloc_error = slab_init( policy_ptr, allocator = persistent_allocator() )
verify( alloc_error == .None, "Failed to allocate the general slab allocator" )
}
context.user_ptr = state
input = & input_data[1]
@ -108,7 +117,7 @@ startup :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^
// path_squidgy_slimes := strings.concatenate( { Path_Assets, "Squidgy Slimes.ttf" } )
// font_squidgy_slimes = font_load( path_squidgy_slimes, 24.0, "Squidgy_Slime" )
path_firacode := strings.concatenate( { Path_Assets, "FiraCode-Regular.ttf" }, temp_allocator() )
path_firacode := strings.concatenate( { Path_Assets, "FiraCode-Regular.ttf" }, frame_allocator() )
font_firacode = font_load( path_firacode, 24.0, "FiraCode" )
// font_data, read_succeded : = os.read_entire_file( path_rec_mono_semicasual_reg )
@ -162,7 +171,7 @@ sectr_shutdown :: proc()
// Replay
{
os.close( Memory_App.replay.active_file )
file_close( Memory_App.replay.active_file )
}
font_provider_shutdown()
@ -171,34 +180,17 @@ sectr_shutdown :: proc()
}
@export
reload :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ Logger )
reload :: proc( persistent_mem, frame_mem, transient_mem, files_buffer_mem : ^VArena, host_logger : ^ Logger )
{
using Memory_App;
block := live_mem.curr_block
live = live_mem
snapshot = snapshot_mem
persistent = persistent_mem
frame = frame_mem
transient = transient_mem
files_buffer = files_buffer_mem
// This is no longer necessary as we have proper base address setting
when true
{
persistent_slice := slice_ptr( block.base, Memory_Persistent_Size )
transient_slice := slice_ptr( memory_after( persistent_slice), Memory_Trans_Temp_Szie )
temp_slice := slice_ptr( memory_after( transient_slice), Memory_Trans_Temp_Szie )
when Use_TrackingAllocator {
persistent = cast( ^ TrackedAllocator ) & persistent_slice[0]
transient = cast( ^ TrackedAllocator ) & transient_slice[0]
temp = cast( ^ TrackedAllocator ) & temp_slice[0]
}
else {
persistent = cast( ^ Arena ) & persistent_slice[0]
transient = cast( ^ Arena ) & transient_slice[0]
temp = cast( ^ Arena ) & temp_slice[0]
}
}
context.allocator = transient_allocator()
context.temp_allocator = temp_allocator()
context.allocator = persistent_allocator()
context.temp_allocator = transient_allocator()
// Procedure Addresses are not preserved on hot-reload. They must be restored for persistent data.
// The only way to alleviate this is to either do custom handles to allocators
@ -221,8 +213,8 @@ swap :: proc( a, b : ^ $Type ) -> ( ^ Type, ^ Type ) {
@export
tick :: proc( delta_time : f64, delta_ns : Duration ) -> b32
{
context.allocator = transient_allocator()
context.temp_allocator = temp_allocator()
context.allocator = frame_allocator()
context.temp_allocator = transient_allocator()
get_state().frametime_delta_ns = delta_ns
@ -232,11 +224,6 @@ tick :: proc( delta_time : f64, delta_ns : Duration ) -> b32
}
@export
clean_temp :: proc() {
when Use_TrackingAllocator {
mem.tracking_allocator_clear( & Memory_App.temp.tracker )
}
else {
free_all( temp_allocator() )
}
clean_frame :: proc() {
free_all( frame_allocator() )
}

View File

@ -16,91 +16,97 @@ Memory_Chunk_Size :: 2 * Gigabyte
Memory_Persistent_Size :: 256 * Megabyte
Memory_Trans_Temp_Szie :: (Memory_Chunk_Size - Memory_Persistent_Size ) / 2
Memory_Base_Address_Persistent :: Terabyte * 1
Memory_Base_Address_Frame :: Memory_Base_Address_Persistent + Memory_Reserve_Persistent
Memory_Base_Address_Persistent :: Terabyte * 1
Memory_Base_Address_Frame :: Memory_Base_Address_Persistent + Memory_Reserve_Persistent * 2
Memory_Base_Address_Transient :: Memory_Base_Address_Frame + Memory_Reserve_Frame * 2
Memory_Base_Address_Files_Buffer :: Memory_Base_Address_Transient + Memory_Reserve_Transient * 2
// TODO(Ed) : This is based off of using 32 gigs of my ram(Ed) as a maximum.
// Later on this has to be adjusted to be ratios based on user's system memory.
Memory_Reserve_Persistent :: 8 * Gigabyte
Memory_Reserve_Frame :: 4 * Gigabyte
Memory_Reserve_Transient :: 4 * Gigabyte
Memory_Reserve_FilesBuffer :: 16 * Gigabyte
// This reserve goes beyond the typical amount of ram the user has,
// TODO(Ed): Setup warnings when the amount is heading toward half the ram size
Memory_Reserve_Persistent :: 32 * Gigabyte
Memory_Reserve_Frame :: 16 * Gigabyte
Memory_Reserve_Transient :: 16 * Gigabyte
Memory_Reserve_FilesBuffer :: 64 * Gigabyte
// TODO(Ed) : These are high for ease of use, they eventually need to be drastically minimized.
Memory_Commit_Initial_Persistent :: 256 * Megabyte
Memory_Commit_Initial_Frame :: 1 * Gigabyte
Memory_Commit_Initial_Transient :: 1 * Gigabyte
Memory_Commit_Initial_Filebuffer :: 2 * Gigabyte
Memory_Commit_Initial_Persistent :: 4 * Kilobyte
Memory_Commit_Initial_Frame :: 4 * Kilobyte
Memory_Commit_Initial_Transient :: 4 * Kilobyte
Memory_Commit_Initial_Filebuffer :: 4 * Kilobyte
// TODO(Ed): There is an issue with mutex locks on the tracking allocator..
Use_TrackingAllocator :: false
when Use_TrackingAllocator
{
Memory :: struct {
live : virtual.Arena,
snapshot : []u8,
persistent : ^ TrackedAllocator,
transient : ^ TrackedAllocator,
temp : ^ TrackedAllocator,
replay : ReplayState,
logger : Logger,
}
MemorySnapshot :: struct {
persistent : []u8,
frame : []u8,
transient : []u8,
// files_buffer cannot be restored from snapshot
}
else
{
Memory :: struct {
live : virtual.Arena,
snapshot : []u8,
persistent : ^ Arena,
transient : ^ Arena,
temp : ^ Arena,
Memory :: struct {
persistent : ^VArena,
frame : ^VArena,
transient : ^VArena,
files_buffer : ^VArena,
replay : ReplayState,
logger : Logger,
}
// Should only be used for small memory allocation iterations
// Not for large memory env states
snapshot : MemorySnapshot,
replay : ReplayState,
logger : Logger,
}
persistent_allocator :: proc() -> Allocator {
when Use_TrackingAllocator {
return tracked_allocator( Memory_App.persistent )
}
else {
return arena_allocator( Memory_App.persistent )
}
return varena_allocator( Memory_App.persistent )
}
frame_allocator :: proc() -> Allocator {
return varena_allocator( Memory_App.frame )
}
transient_allocator :: proc() -> Allocator {
when Use_TrackingAllocator {
return tracked_allocator( Memory_App.transient )
}
else {
return arena_allocator( Memory_App.transient )
}
return varena_allocator( Memory_App.transient )
}
temp_allocator :: proc() -> Allocator {
when Use_TrackingAllocator {
return tracked_allocator( Memory_App.temp )
}
else {
return arena_allocator( Memory_App.temp )
files_buffer_allocator :: proc() -> Allocator {
return varena_allocator( Memory_App.files_buffer )
}
general_slab_allocator :: proc() -> Allocator {
return slab_allocator( get_state().general_slab )
}
// TODO(Ed) : Implment host memory mapping api
save_snapshot :: proc( snapshot : ^MemorySnapshot )
{
// Make sure the snapshot size is able to hold the current size of the arenas
// Grow the files & mapping otherwise
{
// TODO(Ed) : Implement eventually
}
persistent := Memory_App.persistent
mem.copy_non_overlapping( & snapshot.persistent[0], persistent.reserve_start, int(persistent.commit_used) )
frame := Memory_App.frame
mem.copy_non_overlapping( & snapshot.frame[0], frame.reserve_start, int(frame.commit_used) )
transient := Memory_App.transient
mem.copy_non_overlapping( & snapshot.transient[0], transient.reserve_start, int(transient.commit_used) )
}
save_snapshot :: proc( snapshot : [^]u8 ) {
live_ptr := cast( ^ rawptr ) Memory_App.live.curr_block.base
mem.copy_non_overlapping( & snapshot[0], live_ptr, Memory_Chunk_Size )
}
load_snapshot :: proc( snapshot : [^]u8 ) {
live_ptr := cast( ^ rawptr ) Memory_App.live.curr_block.base
mem.copy_non_overlapping( live_ptr, snapshot, Memory_Chunk_Size )
// TODO(Ed) : Implment host memory mapping api
load_snapshot :: proc( snapshot : ^MemorySnapshot ) {
persistent := Memory_App.persistent
mem.copy_non_overlapping( persistent.reserve_start, & snapshot.persistent[0], int(persistent.commit_used) )
frame := Memory_App.frame
mem.copy_non_overlapping( frame.reserve_start, & snapshot.frame[0], int(frame.commit_used) )
transient := Memory_App.transient
mem.copy_non_overlapping( transient.reserve_start, & snapshot.transient[0], int(transient.commit_used) )
}
// TODO(Ed) : Implement usage of this
MemoryConfig :: struct {
reserve_persistent : uint,
reserve_frame : uint,
@ -124,6 +130,8 @@ AppConfig :: struct {
}
State :: struct {
general_slab : Slab,
font_provider_data : FontProviderData,
input_data : [2]InputState,
@ -157,12 +165,7 @@ State :: struct {
}
get_state :: proc "contextless" () -> ^ State {
when Use_TrackingAllocator {
return cast( ^ State ) raw_data( Memory_App.persistent.backing.data )
}
else {
return cast( ^ State ) raw_data( Memory_App.persistent. data )
}
return cast( ^ State ) Memory_App.persistent.reserve_start
}
AppWindow :: struct {

View File

@ -56,7 +56,9 @@ memory_after :: #force_inline proc "contextless" ( slice : []byte ) -> ( ^ byte)
}
memory_after_header :: #force_inline proc "contextless" ( header : ^($ Type) ) -> ( [^]byte) {
return cast( [^]byte) (cast( [^]Type) header)[ 1:]
// return cast( [^]byte) (cast( [^]Type) header)[ 1:]
result := cast( [^]byte) ptr_offset( header, size_of(Type) )
return result
}
@(require_results)

View File

@ -67,7 +67,9 @@ pool_init :: proc (
pool.block_size = block_size
pool.alignment = alignment
alloc_error = pool_allocate_buckets( pool, bucket_reserve_num )
if bucket_reserve_num > 0 {
alloc_error = pool_allocate_buckets( pool, bucket_reserve_num )
}
pool.current_bucket = pool.bucket_list.first
return
}

View File

@ -63,6 +63,12 @@ Slab :: struct {
using header : ^SlabHeader,
}
slab_allocator :: proc( slab : Slab ) -> ( allocator : Allocator ) {
allocator.procedure = slab_allocator_proc
allocator.data = slab.header
return
}
slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocator : Allocator ) -> ( slab : Slab, alloc_error : AllocatorError )
{
header_size :: size_of( SlabHeader )
@ -83,7 +89,7 @@ slab_init_pools :: proc ( using self : Slab, bucket_reserve_num : uint = 0 ) ->
for id in 0 ..< policy.idx {
using size_class := policy.items[id]
pool, alloc_error := pool_init( block_size, block_alignment, bucket_capacity, bucket_reserve_num, backing )
pool, alloc_error := pool_init( block_size, bucket_capacity, bucket_reserve_num, block_alignment, backing )
if alloc_error != .None do return alloc_error
push( & self.pools, pool )

View File

@ -41,8 +41,8 @@ VArena :: struct {
varena_default_growth_policy :: proc( commit_used, committed, reserved, requested_size : uint ) -> uint
{
@static commit_limit := uint(2 * Megabyte)
@static increment := uint(Megabyte)
@static commit_limit := uint(1 * Megabyte)
@static increment := uint(16 * Kilobyte)
page_size := uint(virtual_get_page_size())
if increment < Gigabyte && committed > commit_limit {
@ -72,8 +72,8 @@ varena_init :: proc( base_address : uintptr, to_reserve, to_commit : uint,
{
page_size := uint(virtual_get_page_size())
verify( page_size > size_of(VirtualMemoryRegion), "Make sure page size is not smaller than a VirtualMemoryRegion?")
verify( to_reserve > page_size, "Attempted to reserve less than a page size" )
verify( to_commit > page_size, "Attempted to commit less than a page size")
verify( to_reserve >= page_size, "Attempted to reserve less than a page size" )
verify( to_commit >= page_size, "Attempted to commit less than a page size")
verify( to_reserve >= to_commit, "Attempted to commit more than there is to reserve" )
vmem : VirtualMemoryRegion
@ -114,7 +114,7 @@ varena_alloc :: proc( using self : ^VArena,
sync.mutex_guard( & mutex )
alignment_offset := uint(0)
current_offset := cast(uintptr) reserve_start[ committed:]
current_offset := cast(uintptr) reserve_start[ commit_used:]
mask := uintptr(alignment - 1)
if current_offset & mask != 0 {
@ -128,24 +128,30 @@ varena_alloc :: proc( using self : ^VArena,
}
to_be_used : uint
to_be_used, overflow_signal = intrinsics.overflow_add( commit_used, requested_size )
to_be_used, overflow_signal = intrinsics.overflow_add( commit_used, size_to_allocate )
if overflow_signal || to_be_used > reserved {
alloc_error = .Out_Of_Memory
return
}
if needs_more_committed := committed - commit_used < size; needs_more_committed
header_offset := uint( uintptr(reserve_start) - uintptr(base_address) )
commit_left := committed - commit_used - header_offset
needs_more_committed := commit_left < size_to_allocate
if needs_more_committed
{
next_commit_size := growth_policy( commit_used, committed, reserved, requested_size )
next_commit_size := growth_policy( commit_used, committed, reserved, size_to_allocate )
alloc_error = virtual_commit( vmem, next_commit_size )
if alloc_error != .None {
return
}
}
data = slice_ptr( reserve_start[ commit_used:], int(requested_size) )
commit_used += requested_size
alloc_error = .None
aligned_start := uintptr(self.commit_used + alignment_offset)
data_ptr := rawptr(uintptr( reserve_start ) + aligned_start)
data = byte_slice( data_ptr, int(requested_size) )
commit_used += size_to_allocate
alloc_error = .None
if zero_memory {
slice.zero( data )

View File

@ -36,8 +36,9 @@ virtual_commit :: proc "contextless" ( using vmem : VirtualMemoryRegion, size :
return .None
}
page_size := uint(virtual_get_page_size())
to_commit := memory_align_formula( size, page_size )
header_size := size_of(VirtualMemoryRegionHeader)
page_size := uint(virtual_get_page_size())
to_commit := memory_align_formula( size, page_size )
alloc_error = core_virtual.commit( base_address, to_commit )
if alloc_error != .None {
@ -98,14 +99,17 @@ when ODIN_OS != OS_Type.Windows {
virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
{
header_size := size_of(VirtualMemoryRegionHeader)
// Ignoring the base address, add an os specific impl if you want it.
data : []byte
data, alloc_error := core_virtual.reserve( size ) or_return
data, alloc_error := core_virtual.reserve( header_size + size ) or_return
alloc_error := core_virtual.commit( header_size )
vmem.base_address := cast( ^VirtualMemoryRegionHeader ) raw_data(data)
vmem.reserve_start = memory_after_header(vmem.base_address)
vmem.reserved = len(data)
vmem.committed = 0
vmem.committed = header_size
return
}

View File

@ -4,11 +4,14 @@ since I want full control of it for debug purposes.
*/
package sectr
import "core:mem/virtual"
import core_virtual "core:mem/virtual"
import win32 "core:sys/windows"
when ODIN_OS == OS_Type.Windows {
WIN32_ERROR_INVALID_ADDRESS :: 487
WIN32_ERROR_COMMITMENT_LIMIT :: 1455
@(require_results)
virtual__reserve ::
proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
@ -20,11 +23,28 @@ proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMe
alloc_error = .Out_Of_Memory
return
}
result = win32.VirtualAlloc( rawptr(base_address), header_size, win32.MEM_COMMIT, win32.PAGE_READWRITE )
if result == nil
{
switch err := win32.GetLastError(); err
{
case 0:
alloc_error = .Invalid_Argument
return
case WIN32_ERROR_INVALID_ADDRESS, WIN32_ERROR_COMMITMENT_LIMIT:
alloc_error = .Out_Of_Memory
return
}
alloc_error = .Out_Of_Memory
return
}
vmem.base_address = cast(^VirtualMemoryRegionHeader) result
vmem.reserve_start = memory_after_header(vmem.base_address)
vmem.reserved = size
vmem.committed = 0
vmem.committed = header_size
alloc_error = .None
return
}

View File

@ -31,6 +31,7 @@ import fmt_io "core:fmt"
import "core:log"
import "core:mem"
Allocator :: mem.Allocator
AllocatorError :: mem.Allocator_Error
TrackingAllocator :: mem.Tracking_Allocator
import "core:mem/virtual"
Arena :: virtual.Arena
@ -59,6 +60,8 @@ import "core:time"
thread_sleep :: time.sleep
import rl "vendor:raylib"
import sectr "../."
VArena :: sectr.VArena
varena_init :: sectr.varena_init
fatal :: sectr.fatal
file_is_locked :: sectr.file_is_locked
file_copy_sync :: sectr.file_copy_sync
@ -92,39 +95,41 @@ when ODIN_OS == runtime.Odin_OS_Type.Windows
}
RuntimeState :: struct {
running : b32,
memory : VMemChunk,
sectr_api : sectr.ModuleAPI,
running : b32,
client_memory : ClientMemory,
sectr_api : sectr.ModuleAPI,
}
VMemChunk :: struct {
og_allocator : Allocator,
og_temp_allocator : Allocator,
host_persistent : TrackedAllocator,
host_transient : TrackedAllocator,
sectr_live : Arena,
sectr_snapshot : []u8
ClientMemory :: struct {
persistent : VArena,
frame : VArena,
transient : VArena,
files_buffer : VArena,
}
setup_memory :: proc() -> VMemChunk
setup_memory :: proc() -> ClientMemory
{
memory : VMemChunk; using memory
Host_Persistent_Size :: 32 * Megabyte
Host_Transient_Size :: 96 * Megabyte
Internals_Size :: 4 * Megabyte
host_persistent = tracked_allocator_init( Host_Persistent_Size, Internals_Size )
host_transient = tracked_allocator_init( Host_Transient_Size, Internals_Size )
memory : ClientMemory; using memory
// Setup the static arena for the entire application
{
base_address : rawptr = transmute( rawptr) u64(sectr.Memory_Base_Address)
result := arena_init_static( & sectr_live, base_address, sectr.Memory_Chunk_Size, sectr.Memory_Chunk_Size )
verify( result == runtime.Allocator_Error.None, "Failed to allocate live memory for the sectr module" )
alloc_error : AllocatorError
persistent, alloc_error = varena_init( sectr.Memory_Base_Address_Persistent, sectr.Memory_Reserve_Persistent, sectr.Memory_Commit_Initial_Persistent, nil )
verify( alloc_error == .None, "Failed to allocate persistent virtual arena for the sectr module")
frame, alloc_error = varena_init( sectr.Memory_Base_Address_Frame, sectr.Memory_Reserve_Frame, sectr.Memory_Commit_Initial_Frame, nil )
verify( alloc_error == .None, "Failed to allocate frame virtual arena for the sectr module")
transient, alloc_error = varena_init( sectr.Memory_Base_Address_Transient, sectr.Memory_Reserve_Transient, sectr.Memory_Commit_Initial_Transient, nil )
verify( alloc_error == .None, "Failed to allocate transient virtual arena for the sectr module")
files_buffer, alloc_error = varena_init( sectr.Memory_Base_Address_Files_Buffer, sectr.Memory_Reserve_FilesBuffer, sectr.Memory_Commit_Initial_Filebuffer, nil )
verify( alloc_error == .None, "Failed to allocate files buffer virtual arena for the sectr module")
}
// Setup memory mapped io for snapshots
// TODO(Ed) : We cannot do this with our growing arenas. Instead we need to map on demand for saving and loading
when false
{
snapshot_file, open_error := file_open( Path_Snapshot, FileFlag_ReadWrite | FileFlag_Create )
verify( open_error == os.ERROR_NONE, "Failed to open snapshot file for the sectr module" )
@ -142,9 +147,6 @@ setup_memory :: proc() -> VMemChunk
file_close(snapshot_file)
}
// Reassign default allocators for host
memory.og_allocator = context.allocator
memory.og_temp_allocator = context.temp_allocator
log("Memory setup")
return memory;
}
@ -168,18 +170,18 @@ load_sectr_api :: proc( version_id : i32 ) -> (loaded_module : sectr.ModuleAPI)
return
}
startup := cast( type_of( sectr.startup )) os_lib_get_proc( lib, "startup" )
shutdown := cast( type_of( sectr.sectr_shutdown )) os_lib_get_proc( lib, "sectr_shutdown" )
reload := cast( type_of( sectr.reload )) os_lib_get_proc( lib, "reload" )
tick := cast( type_of( sectr.tick )) os_lib_get_proc( lib, "tick" )
clean_temp := cast( type_of( sectr.clean_temp )) os_lib_get_proc( lib, "clean_temp" )
startup := cast( type_of( sectr.startup )) os_lib_get_proc( lib, "startup" )
shutdown := cast( type_of( sectr.sectr_shutdown )) os_lib_get_proc( lib, "sectr_shutdown" )
reload := cast( type_of( sectr.reload )) os_lib_get_proc( lib, "reload" )
tick := cast( type_of( sectr.tick )) os_lib_get_proc( lib, "tick" )
clean_frame := cast( type_of( sectr.clean_frame )) os_lib_get_proc( lib, "clean_frame" )
missing_symbol : b32 = false
if startup == nil do log("Failed to load sectr.startup symbol", LogLevel.Warning )
if shutdown == nil do log("Failed to load sectr.shutdown symbol", LogLevel.Warning )
if reload == nil do log("Failed to load sectr.reload symbol", LogLevel.Warning )
if tick == nil do log("Failed to load sectr.tick symbol", LogLevel.Warning )
if clean_temp == nil do log("Failed to load sector.clean_temp symbol", LogLevel.Warning )
if startup == nil do log("Failed to load sectr.startup symbol", LogLevel.Warning )
if shutdown == nil do log("Failed to load sectr.shutdown symbol", LogLevel.Warning )
if reload == nil do log("Failed to load sectr.reload symbol", LogLevel.Warning )
if tick == nil do log("Failed to load sectr.tick symbol", LogLevel.Warning )
if clean_frame == nil do log("Failed to load sector.clean_frame symbol", LogLevel.Warning )
if missing_symbol {
runtime.debug_trap()
return
@ -191,11 +193,11 @@ load_sectr_api :: proc( version_id : i32 ) -> (loaded_module : sectr.ModuleAPI)
write_time = write_time,
lib_version = version_id,
startup = startup,
shutdown = shutdown,
reload = reload,
tick = tick,
clean_temp = clean_temp,
startup = startup,
shutdown = shutdown,
reload = reload,
tick = tick,
clean_frame = clean_frame,
}
return
}
@ -208,7 +210,7 @@ unload_sectr_api :: proc( module : ^ sectr.ModuleAPI )
log("Unloaded sectr API")
}
sync_sectr_api :: proc( sectr_api : ^ sectr.ModuleAPI, memory : ^ VMemChunk, logger : ^ Logger )
sync_sectr_api :: proc( sectr_api : ^sectr.ModuleAPI, memory : ^ClientMemory, logger : ^Logger )
{
if write_time, result := os.last_write_time_by_name( Path_Sectr_Module );
result == os.ERROR_NONE && sectr_api.write_time != write_time
@ -223,7 +225,12 @@ sync_sectr_api :: proc( sectr_api : ^ sectr.ModuleAPI, memory : ^ VMemChunk, log
sectr_api ^ = load_sectr_api( version_id )
verify( sectr_api.lib_version != 0, "Failed to hot-reload the sectr module" )
sectr_api.reload( memory.sectr_live, memory.sectr_snapshot, logger )
sectr_api.reload(
& memory.persistent,
& memory.frame,
& memory.transient,
& memory.files_buffer,
logger )
}
}
@ -260,16 +267,10 @@ main :: proc()
log( to_str(builder) )
}
// Basic Giant VMem Block
{
// By default odin uses a growing arena for the runtime context
// We're going to make it static for the prototype and separate it from the 'project' memory.
// Then shove the context allocator for the engine to it.
// The project's context will use its own subsection arena allocator.
memory = setup_memory()
}
memory := setup_memory()
// TODO(Ed): Cannot use the manually created allocators for the host. Not sure why
// Something is wrong with the tracked_allocator init
// context.allocator = tracked_allocator( & memory.host_persistent )
// context.temp_allocator = tracked_allocator( & memory.host_transient )
@ -281,7 +282,12 @@ main :: proc()
running = true;
sectr_api = sectr_api
sectr_api.startup( memory.sectr_live, memory.sectr_snapshot, & logger )
sectr_api.startup(
& memory.persistent,
& memory.frame,
& memory.transient,
& memory.files_buffer,
& logger )
delta_ns : Duration
@ -294,7 +300,7 @@ main :: proc()
sync_sectr_api( & sectr_api, & memory, & logger )
running = sectr_api.tick( duration_seconds( delta_ns ), delta_ns )
sectr_api.clean_temp()
sectr_api.clean_frame()
delta_ns = time.tick_lap_time( & start_tick )
}

View File

@ -1,140 +0,0 @@
// TODO(Ed): Move this to the grime module when its made
// This was made becaause odin didn't expose the base_address param that virtual alloc allows.
package host
import "base:runtime"
import "core:mem"
import "core:mem/virtual"
import win32 "core:sys/windows"
when ODIN_OS == runtime.Odin_OS_Type.Windows {
@(private="file")
virtual_Platform_Memory_Block :: struct {
block: virtual.Memory_Block,
committed: uint,
reserved: uint,
}
@(private="file", require_results)
memory_align_formula :: #force_inline proc "contextless" (size, align: uint) -> uint {
result := size + align - 1
return result - result % align
}
@(private="file")
win32_reserve_with_base_address :: proc "contextless" (base_address : rawptr, size: uint) -> (data: []byte, err: virtual.Allocator_Error) {
result := win32.VirtualAlloc(base_address, size, win32.MEM_RESERVE, win32.PAGE_READWRITE)
if result == nil {
err = .Out_Of_Memory
return
}
data = ([^]byte)(result)[:size]
return
}
@(private="file")
platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint, base_address : rawptr) ->
(block: ^virtual_Platform_Memory_Block, err: virtual.Allocator_Error)
{
to_commit, to_reserve := to_commit, to_reserve
to_reserve = max(to_commit, to_reserve)
total_to_reserved := max(to_reserve, size_of( virtual_Platform_Memory_Block))
to_commit = clamp(to_commit, size_of( virtual_Platform_Memory_Block), total_to_reserved)
data := win32_reserve_with_base_address(base_address, total_to_reserved) or_return
virtual.commit(raw_data(data), to_commit)
block = (^virtual_Platform_Memory_Block)(raw_data(data))
block.committed = to_commit
block.reserved = to_reserve
return
}
@(private="file")
platform_memory_commit :: proc "contextless" (block: ^virtual_Platform_Memory_Block, to_commit: uint) -> (err: virtual.Allocator_Error) {
if to_commit < block.committed {
return nil
}
if to_commit > block.reserved {
return .Out_Of_Memory
}
virtual.commit(block, to_commit) or_return
block.committed = to_commit
return nil
}
@(private="file", require_results)
memory_block_alloc :: proc(committed, reserved: uint, base_address : rawptr,
alignment : uint = 0,
flags : virtual.Memory_Block_Flags = {}
) -> (block: ^virtual.Memory_Block, err: virtual.Allocator_Error)
{
page_size := virtual.DEFAULT_PAGE_SIZE
assert(mem.is_power_of_two(uintptr(page_size)))
committed := committed
reserved := reserved
committed = memory_align_formula(committed, page_size)
reserved = memory_align_formula(reserved, page_size)
committed = clamp(committed, 0, reserved)
total_size := uint(reserved + max(alignment, size_of( virtual_Platform_Memory_Block)))
base_offset := uintptr(max(alignment, size_of( virtual_Platform_Memory_Block)))
protect_offset := uintptr(0)
do_protection := false
if .Overflow_Protection in flags { // overflow protection
rounded_size := reserved
total_size = uint(rounded_size + 2*page_size)
base_offset = uintptr(page_size + rounded_size - uint(reserved))
protect_offset = uintptr(page_size + rounded_size)
do_protection = true
}
pmblock := platform_memory_alloc(0, total_size, base_address) or_return
pmblock.block.base = ([^]byte)(pmblock)[base_offset:]
platform_memory_commit(pmblock, uint(base_offset) + committed) or_return
// Should be zeroed
assert(pmblock.block.used == 0)
assert(pmblock.block.prev == nil)
if do_protection {
virtual.protect(([^]byte)(pmblock)[protect_offset:], page_size, virtual.Protect_No_Access)
}
pmblock.block.committed = committed
pmblock.block.reserved = reserved
return &pmblock.block, nil
}
// This is the same as odin's virtual library, except I use my own allocation implementation to set the address space base.
@(require_results)
arena_init_static :: proc(arena: ^virtual.Arena, base_address : rawptr,
reserved : uint = virtual.DEFAULT_ARENA_STATIC_RESERVE_SIZE,
commit_size : uint = virtual.DEFAULT_ARENA_STATIC_COMMIT_SIZE
) -> (err: virtual.Allocator_Error)
{
arena.kind = .Static
arena.curr_block = memory_block_alloc(commit_size, reserved, base_address, {}) or_return
arena.total_used = 0
arena.total_reserved = arena.curr_block.reserved
return
}
/* END OF: when ODIN_OS == runtime.Odin_OS_Type.Windows */ }
else
{
// Fallback to regular init_static impl for other platforms for now.
arena_init_static :: proc(arena: ^virtual.Arena, base_address : rawptr,
reserved : uint = virtual.DEFAULT_ARENA_STATIC_RESERVE_SIZE,
commit_size : uint = virtual.DEFAULT_ARENA_STATIC_COMMIT_SIZE
) -> (err: virtual.Allocator_Error) {
return virtual.arena_init_static( arena, reserved, commit_size )
}
}

View File

@ -354,7 +354,7 @@ play_input :: proc( replay_file : os.Handle, input : ^ InputState ) {
total_read, result_code := file_read( replay_file, raw_data )
if result_code == os.ERROR_HANDLE_EOF {
file_rewind( replay_file )
load_snapshot( & Memory_App.snapshot[0] )
load_snapshot( & Memory_App.snapshot )
}
}

View File

@ -11,7 +11,7 @@ debug_draw_text :: proc( content : string, pos : Vec2, size : f32, color : rl.Co
if len( content ) == 0 {
return
}
runes, alloc_error := to_runes( content, context.temp_allocator )
runes, alloc_error := to_runes( content, frame_allocator() )
// runes, alloc_error := to_runes( content, context.temp_allocator )
// verify( alloc_error == AllocatorError.None, "Failed to temp allocate runes" )
@ -40,7 +40,7 @@ debug_draw_text_world :: proc( content : string, pos : Vec2, size : f32, color :
if len( content ) == 0 {
return
}
runes, alloc_error := to_runes( content, context.temp_allocator )
runes, alloc_error := to_runes( content, frame_allocator() )
verify( alloc_error == AllocatorError.None, "Failed to temp allocate runes" )
font := font

View File

@ -86,11 +86,13 @@ update :: proc( delta_time : f64 ) -> b32
}
//region Input Replay
// TODO(Ed) : Implment host memory mapping api
when false
{
if debug_actions.record_replay { #partial switch replay.mode
{
case ReplayMode.Off : {
save_snapshot( & Memory_App.snapshot[0] )
save_snapshot( & Memory_App.snapshot )
replay_recording_begin( Path_Input_Replay )
}
case ReplayMode.Record : {
@ -102,21 +104,21 @@ update :: proc( delta_time : f64 ) -> b32
{
case ReplayMode.Off : {
if ! file_exists( Path_Input_Replay ) {
save_snapshot( & Memory_App.snapshot[0] )
save_snapshot( & Memory_App.snapshot )
replay_recording_begin( Path_Input_Replay )
}
else {
load_snapshot( & Memory_App.snapshot[0] )
load_snapshot( & Memory_App.snapshot )
replay_playback_begin( Path_Input_Replay )
}
}
case ReplayMode.Playback : {
replay_playback_end()
load_snapshot( & Memory_App.snapshot[0] )
load_snapshot( & Memory_App.snapshot )
}
case ReplayMode.Record : {
replay_recording_end()
load_snapshot( & Memory_App.snapshot[0] )
load_snapshot( & Memory_App.snapshot )
replay_playback_begin( Path_Input_Replay )
}
}}