More edge case testing on the multi-threading, preppared to start moving heavy code back
This commit is contained in:
@@ -10,6 +10,7 @@ package host
|
||||
//region HOST RUNTIME
|
||||
|
||||
load_client_api :: proc(version_id: int) -> (loaded_module: Client_API) {
|
||||
profile(#procedure)
|
||||
using loaded_module
|
||||
// Make sure we have a dll to work with
|
||||
file_io_err: OS_Error; write_time, file_io_err = file_last_write_time_by_name("sectr.dll")
|
||||
@@ -17,12 +18,13 @@ load_client_api :: proc(version_id: int) -> (loaded_module: Client_API) {
|
||||
panic_contextless( "Could not resolve the last write time for sectr")
|
||||
}
|
||||
//TODO(Ed): Lets try to minimize this...
|
||||
thread_sleep( Millisecond * 50 )
|
||||
thread_sleep( Millisecond * 25 )
|
||||
// Get the live dll loaded up
|
||||
file_copy_sync( Path_Sectr_Module, Path_Sectr_Live_Module, allocator = context.temp_allocator )
|
||||
did_load: bool; lib, did_load = os_lib_load( Path_Sectr_Live_Module )
|
||||
if ! did_load do panic( "Failed to load the sectr module.")
|
||||
startup = cast( type_of( host_memory.client_api.startup)) os_lib_get_proc(lib, "startup")
|
||||
shutdown = cast( type_of( host_memory.client_api.shutdown)) os_lib_get_proc(lib, "sectr_shutdown")
|
||||
tick_lane_startup = cast( type_of( host_memory.client_api.tick_lane_startup)) os_lib_get_proc(lib, "tick_lane_startup")
|
||||
job_worker_startup = cast( type_of( host_memory.client_api.job_worker_startup)) os_lib_get_proc(lib, "job_worker_startup")
|
||||
hot_reload = cast( type_of( host_memory.client_api.hot_reload)) os_lib_get_proc(lib, "hot_reload")
|
||||
@@ -30,6 +32,7 @@ load_client_api :: proc(version_id: int) -> (loaded_module: Client_API) {
|
||||
clean_frame = cast( type_of( host_memory.client_api.clean_frame)) os_lib_get_proc(lib, "clean_frame")
|
||||
jobsys_worker_tick = cast( type_of( host_memory.client_api.jobsys_worker_tick)) os_lib_get_proc(lib, "jobsys_worker_tick")
|
||||
if startup == nil do panic("Failed to load sectr.startup symbol" )
|
||||
if shutdown == nil do panic("Failed to load sectr.shutdown symbol" )
|
||||
if tick_lane_startup == nil do panic("Failed to load sectr.tick_lane_startup symbol" )
|
||||
if job_worker_startup == nil do panic("Failed to load sectr.job_worker_startup symbol" )
|
||||
if hot_reload == nil do panic("Failed to load sectr.hot_reload symbol" )
|
||||
@@ -69,6 +72,7 @@ main :: proc()
|
||||
host_memory.spall_context = spall_context_create(Path_Sectr_Spall_Record)
|
||||
grime_set_profiler_module_context(& host_memory.spall_context)
|
||||
thread_memory.spall_buffer = spall_buffer_create(thread_memory.spall_buffer_backing[:], cast(u32) thread_memory.system_ctx.id)
|
||||
grime_set_profiler_thread_buffer(& thread_memory.spall_buffer)
|
||||
}
|
||||
// Setup the logger
|
||||
path_logger_finalized: string
|
||||
@@ -88,8 +92,8 @@ main :: proc()
|
||||
timestamp := str_pfmt_buffer( fmt_backing, "%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec)
|
||||
host_memory.path_logger_finalized = str_pfmt_buffer( fmt_backing, "%s/sectr_%v.log", Path_Logs, timestamp)
|
||||
}
|
||||
logger_init( & host_memory.logger, "Sectr Host", str_pfmt_buffer( fmt_backing, "%s/sectr.log", Path_Logs ) )
|
||||
context.logger = to_odin_logger( & host_memory.logger )
|
||||
logger_init( & host_memory.host_logger, "Sectr Host", str_pfmt_buffer( fmt_backing, "%s/sectr.log", Path_Logs ) )
|
||||
context.logger = to_odin_logger( & host_memory.host_logger )
|
||||
{
|
||||
// Log System Context
|
||||
builder := strbuilder_from_bytes( fmt_backing )
|
||||
@@ -99,7 +103,7 @@ main :: proc()
|
||||
}
|
||||
free_all(context.temp_allocator)
|
||||
}
|
||||
context.logger = to_odin_logger( & host_memory.logger )
|
||||
context.logger = to_odin_logger( & host_memory.host_logger )
|
||||
// Load the Enviornment API for the first-time
|
||||
{
|
||||
host_memory.client_api = load_client_api( 1 )
|
||||
@@ -107,22 +111,23 @@ main :: proc()
|
||||
}
|
||||
// Client API Startup
|
||||
host_memory.client_api.startup(& host_memory, & thread_memory)
|
||||
// Start the tick lanes
|
||||
{
|
||||
profile("thread_wide_startup")
|
||||
assert(thread_memory.id == .Master_Prepper)
|
||||
host_memory.tick_running = true
|
||||
host_memory.tick_lanes = THREAD_TICK_LANES
|
||||
barrier_init(& host_memory.lane_sync, THREAD_TICK_LANES)
|
||||
when THREAD_TICK_LANES > 1 {
|
||||
for id in 1 ..= (THREAD_TICK_LANES - 1) {
|
||||
launch_tick_lane_thread(cast(WorkerID) id)
|
||||
{
|
||||
profile("Tick Lanes")
|
||||
host_memory.tick_running = true
|
||||
host_memory.tick_lanes = THREAD_TICK_LANES
|
||||
barrier_init(& host_memory.lane_sync, THREAD_TICK_LANES)
|
||||
when THREAD_TICK_LANES > 1 {
|
||||
for id in 1 ..= (THREAD_TICK_LANES - 1) {
|
||||
launch_tick_lane_thread(cast(WorkerID) id)
|
||||
}
|
||||
}
|
||||
}
|
||||
grime_set_profiler_module_context(& host_memory.spall_context)
|
||||
grime_set_profiler_thread_buffer(& thread_memory.spall_buffer)
|
||||
// Job System Setup
|
||||
{
|
||||
profile("Job System")
|
||||
host_memory.job_system.running = true
|
||||
host_memory.job_system.worker_num = THREAD_JOB_WORKERS
|
||||
for & list in host_memory.job_system.job_lists {
|
||||
@@ -131,6 +136,7 @@ main :: proc()
|
||||
// Determine number of physical cores
|
||||
barrier_init(& host_memory.job_hot_reload_sync, THREAD_JOB_WORKERS + 1)
|
||||
for id in THREAD_JOB_WORKER_ID_START ..< THREAD_JOB_WORKER_ID_END {
|
||||
log_print_fmt("Spawned job worker: %v", cast(WorkerID) id)
|
||||
worker_thread := thread_create(host_job_worker_entrypoint, .Normal)
|
||||
worker_thread.user_index = int(id)
|
||||
host_memory.threads[worker_thread.user_index] = worker_thread
|
||||
@@ -141,18 +147,21 @@ main :: proc()
|
||||
}
|
||||
host_tick_lane()
|
||||
|
||||
profile_begin("Host Shutdown")
|
||||
if thread_memory.id == .Master_Prepper {
|
||||
thread_join_multiple(.. host_memory.threads[1:THREAD_TICK_LANES + THREAD_JOB_WORKERS])
|
||||
}
|
||||
|
||||
unload_client_api( & host_memory.client_api )
|
||||
|
||||
// End profiling
|
||||
spall_context_destroy( & host_memory.spall_context )
|
||||
|
||||
log_print("Succesfuly closed")
|
||||
file_close( host_memory.logger.file )
|
||||
file_close( host_memory.host_logger.file )
|
||||
file_rename( str_pfmt_tmp( "%s/sectr.log", Path_Logs), host_memory.path_logger_finalized )
|
||||
profile_end()
|
||||
|
||||
// End profiling
|
||||
spall_buffer_destroy(& host_memory.spall_context, & thread_memory.spall_buffer)
|
||||
spall_context_destroy( & host_memory.spall_context )
|
||||
}
|
||||
launch_tick_lane_thread :: proc(id : WorkerID) {
|
||||
assert_contextless(thread_memory.id == .Master_Prepper)
|
||||
@@ -183,12 +192,13 @@ host_tick_lane :: proc()
|
||||
for ; sync_load(& host_memory.tick_running, .Relaxed);
|
||||
{
|
||||
profile("Host Tick")
|
||||
leader := barrier_wait(& host_memory.lane_sync)
|
||||
|
||||
running: b64 = host_memory.client_api.tick_lane(duration_seconds(delta_ns), delta_ns) == false
|
||||
if thread_memory.id == .Master_Prepper {
|
||||
sync_store(& host_memory.tick_running, running, .Release)
|
||||
}
|
||||
// host_memory.client_api.clean_frame()
|
||||
host_memory.client_api.clean_frame()
|
||||
|
||||
delta_ns = time_tick_lap_time( & host_tick )
|
||||
host_tick = time_tick_now()
|
||||
@@ -202,10 +212,25 @@ host_lane_shutdown :: proc()
|
||||
{
|
||||
profile(#procedure)
|
||||
if thread_memory.id == .Master_Prepper {
|
||||
jobs_enqueued := true
|
||||
if jobs_enqueued == false {
|
||||
// debug_trap()
|
||||
}
|
||||
for ; jobs_enqueued; {
|
||||
jobs_enqueued = false
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.Normal].head != nil
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.Low].head != nil
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.High].head != nil
|
||||
if jobs_enqueued == false {
|
||||
// debug_trap()
|
||||
}
|
||||
}
|
||||
sync_store(& host_memory.job_system.running, false, .Release)
|
||||
}
|
||||
if thread_memory.id != .Master_Prepper {
|
||||
spall_buffer_destroy( & host_memory.spall_context, & thread_memory.spall_buffer )
|
||||
}
|
||||
leader := barrier_wait(& host_memory.lane_job_sync)
|
||||
// spall_buffer_destroy( & host_memory.spall_context, & thread_memory.spall_buffer )
|
||||
}
|
||||
|
||||
host_job_worker_entrypoint :: proc(worker_thread: ^SysThread)
|
||||
@@ -218,9 +243,13 @@ host_job_worker_entrypoint :: proc(worker_thread: ^SysThread)
|
||||
host_memory.client_api.tick_lane_startup(& thread_memory)
|
||||
grime_set_profiler_thread_buffer(& thread_memory.spall_buffer)
|
||||
}
|
||||
jobs_enqueued := false
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.Normal].head != nil
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.Low].head != nil
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.High].head != nil
|
||||
delta_ns: Duration
|
||||
host_tick := time_tick_now()
|
||||
for ; sync_load(& host_memory.job_system.running, .Relaxed);
|
||||
for ; jobs_enqueued || sync_load(& host_memory.job_system.running, .Relaxed);
|
||||
{
|
||||
profile("Host Job Tick")
|
||||
|
||||
@@ -229,7 +258,12 @@ host_job_worker_entrypoint :: proc(worker_thread: ^SysThread)
|
||||
delta_ns = time_tick_lap_time( & host_tick )
|
||||
host_tick = time_tick_now()
|
||||
|
||||
if sync_load(& host_memory.client_api_hot_reloaded, .Acquire) {
|
||||
jobs_enqueued = false
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.Normal].head != nil
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.Low].head != nil
|
||||
jobs_enqueued |= host_memory.job_system.job_lists[.High].head != nil
|
||||
|
||||
if jobs_enqueued == false && sync_load(& host_memory.client_api_hot_reloaded, .Acquire) {
|
||||
// Signals to main hread when all jobs have drained.
|
||||
leader :=barrier_wait(& host_memory.job_hot_reload_sync)
|
||||
// Job threads wait here until client module is back
|
||||
@@ -237,6 +271,7 @@ host_job_worker_entrypoint :: proc(worker_thread: ^SysThread)
|
||||
host_memory.client_api.hot_reload(& host_memory, & thread_memory)
|
||||
}
|
||||
}
|
||||
spall_buffer_destroy( & host_memory.spall_context, & thread_memory.spall_buffer )
|
||||
// Were exiting, wait for tick lanes.
|
||||
leader := barrier_wait(& host_memory.lane_job_sync)
|
||||
}
|
||||
@@ -278,6 +313,7 @@ sync_client_api :: proc()
|
||||
}
|
||||
unload_client_api :: proc( module : ^Client_API )
|
||||
{
|
||||
profile(#procedure)
|
||||
os_lib_unload( module.lib )
|
||||
file_remove( Path_Sectr_Live_Module )
|
||||
module^ = {}
|
||||
|
||||
@@ -16,11 +16,12 @@ ModuleAPI :: struct {
|
||||
write_time: FileTime,
|
||||
lib_version : int,
|
||||
|
||||
startup: type_of( startup ),
|
||||
startup: type_of( startup),
|
||||
shutdown: type_of( sectr_shutdown),
|
||||
tick_lane_startup: type_of( tick_lane_startup),
|
||||
job_worker_startup: type_of( job_worker_startup),
|
||||
hot_reload: type_of( hot_reload ),
|
||||
tick_lane: type_of( tick_lane ),
|
||||
hot_reload: type_of( hot_reload),
|
||||
tick_lane: type_of( tick_lane),
|
||||
clean_frame: type_of( clean_frame),
|
||||
jobsys_worker_tick: type_of( jobsys_worker_tick)
|
||||
}
|
||||
@@ -35,13 +36,18 @@ then prepare for multi-threaded "laned" tick: thread_wide_startup.
|
||||
startup :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
|
||||
{
|
||||
// Rad Debugger driving me crazy..
|
||||
// NOTE(Ed): This is problably not necessary, they're just loops for my sanity.
|
||||
// NOTE(Ed): This is not necessary, they're just loops for my sanity.
|
||||
for ; memory == nil; { memory = host_mem }
|
||||
for ; thread == nil; { thread = thread_mem }
|
||||
grime_set_profiler_module_context(& memory.spall_context)
|
||||
grime_set_profiler_thread_buffer(& thread.spall_buffer)
|
||||
profile(#procedure)
|
||||
|
||||
startup_tick := tick_now()
|
||||
|
||||
logger_init(& memory.client_memory.logger, "Sectr", memory.host_logger.file_path, memory.host_logger.file)
|
||||
context.logger = to_odin_logger(& memory.client_memory.logger)
|
||||
|
||||
using memory.client_memory
|
||||
|
||||
// Configuration Load
|
||||
@@ -78,7 +84,34 @@ startup :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
|
||||
Desired_OS_Scheduler_MS :: 1
|
||||
sleep_is_granular = set__scheduler_granularity( Desired_OS_Scheduler_MS )
|
||||
|
||||
// TODO(Ed): String Cache (Not backed by slab!)
|
||||
|
||||
// TODO(Ed): Setup input system
|
||||
|
||||
// TODO(Ed): Setup sokol_app
|
||||
// TODO(Ed): Setup sokol_gfx
|
||||
// TODO(Ed): Setup sokol_gp
|
||||
|
||||
// TODO(Ed): Use job system to load fonts!!!
|
||||
|
||||
// TODO(Ed): Setup screen ui state
|
||||
// TODO(Ed): Setup proper workspace scaffold
|
||||
|
||||
startup_ms := duration_ms( tick_lap_time( & startup_tick))
|
||||
log_print_fmt("Startup time: %v ms", startup_ms)
|
||||
}
|
||||
|
||||
// For some reason odin's symbols conflict with native foreign symbols...
|
||||
@export
|
||||
sectr_shutdown :: proc()
|
||||
{
|
||||
context.logger = to_odin_logger(& memory.client_memory.logger)
|
||||
|
||||
// TODO(Ed): Shut down font system
|
||||
|
||||
// TODO(Ed): Shutdown sokol gp, gfx, and app.
|
||||
|
||||
log_print("Client module shutdown complete")
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -105,12 +138,21 @@ hot_reload :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
|
||||
profile(#procedure)
|
||||
// Do hot-reload stuff...
|
||||
{
|
||||
// Test dispatching 64 jobs during hot_reload loop (when the above store is uncommented)
|
||||
for job_id := 1; job_id < 64; job_id += 1 {
|
||||
memory.job_info_reload[job_id].id = job_id
|
||||
memory.job_reload[job_id] = make_job_raw(& memory.job_group_reload, & memory.job_info_reload[job_id], test_job, {}, "Job Test (Hot-Reload)")
|
||||
job_dispatch_single(& memory.job_reload[job_id], .Normal)
|
||||
context.logger = to_odin_logger(& memory.client_memory.logger)
|
||||
|
||||
// TODO(Ed): Setup context alloators
|
||||
|
||||
|
||||
// TODO(Ed): Patch Sokol contextes
|
||||
|
||||
// We hopefully don't have to patch third-party allocators anymore per-hot-reload.
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
// TODO(Ed): Reload the font system
|
||||
|
||||
log_print("Module reloaded")
|
||||
}
|
||||
// Critical reference synchronization
|
||||
{
|
||||
@@ -158,37 +200,68 @@ Host handles the loop.
|
||||
tick_lane :: proc(host_delta_time_ms: f64, host_delta_ns: Duration) -> (should_close: b64 = false)
|
||||
{
|
||||
profile(#procedure)
|
||||
@thread_local dummy: int = 0
|
||||
dummy += 1
|
||||
|
||||
EXIT_TIME :: 1
|
||||
|
||||
// profile_begin("sokol_app: pre_client_tick")
|
||||
// should_close |= cast(b64) sokol_app.pre_client_frame()
|
||||
@static timer: f64
|
||||
if thread.id == .Master_Prepper {
|
||||
timer += host_delta_time_ms
|
||||
// sync_store(& should_close, timer > EXIT_TIME, .Release)
|
||||
|
||||
}
|
||||
// profile_end()
|
||||
|
||||
profile_begin("Client Tick")
|
||||
|
||||
if thread.id == .Master_Prepper && timer > EXIT_TIME {
|
||||
// Test dispatching 64 jobs during the last iteration before exiting.
|
||||
for job_id := 1; job_id < 64; job_id += 1 {
|
||||
memory.job_info_exit[job_id].id = job_id
|
||||
memory.job_exit[job_id] = make_job_raw(& memory.job_group_exit, & memory.job_info_exit[job_id], test_job, {}, "Job Test (Exit)")
|
||||
job_dispatch_single(& memory.job_exit[job_id], .Normal)
|
||||
}
|
||||
}
|
||||
client_tick := tick_now()
|
||||
|
||||
profile_begin("sokol_app: pre_client_tick")
|
||||
// should_close |= cast(b64) sokol_app.pre_client_frame() // TODO(Ed): SOKOL!
|
||||
profile_end()
|
||||
|
||||
// profile_begin("sokol_app: post_client_tick")
|
||||
// profile_end()
|
||||
profile_begin("Client Tick")
|
||||
{
|
||||
profile("Work frame")
|
||||
context.logger = to_odin_logger( & memory.client_memory.logger )
|
||||
|
||||
// TODO(Ed): Setup frame alloator
|
||||
|
||||
if thread.id == .Master_Prepper
|
||||
{
|
||||
// config := & memory.client_memory.config
|
||||
// debug := & memory.client_memory.debug
|
||||
|
||||
// debug.draw_ui_box_bounds_points = false
|
||||
// debug.draw_ui_padding_bounds = false
|
||||
// debug.draw_ui_content_bounds = false
|
||||
|
||||
// config.engine_refresh_hz = 165
|
||||
|
||||
// config.color_theme = App_Thm_Light
|
||||
// config.color_theme = App_Thm_Dusk
|
||||
// config.color_theme = App_Thm_Dark
|
||||
|
||||
// sokol_width := sokol_app.widthf()
|
||||
// sokol_height := sokol_app.heightf()
|
||||
|
||||
// window := & get_state().app_window
|
||||
// if int(window.extent.x) != int(sokol_width) || int(window.extent.y) != int(sokol_height) {
|
||||
// window.resized = true
|
||||
// window.extent.x = sokol_width * 0.5
|
||||
// window.extent.y = sokol_height * 0.5
|
||||
// log("sokol_app: Event-based frame callback triggered (detected a resize")
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
// Test dispatching 64 jobs during hot_reload loop (when the above store is uncommented)
|
||||
if true
|
||||
{
|
||||
if thread.id == .Master_Prepper {
|
||||
profile("dispatching")
|
||||
for job_id := 1; job_id < JOB_TEST_NUM; job_id += 1 {
|
||||
memory.job_info_reload[job_id].id = job_id
|
||||
memory.job_reload[job_id] = make_job_raw(& memory.job_group_reload, & memory.job_info_reload[job_id], test_job, {}, "Job Test (Hot-Reload)")
|
||||
job_dispatch_single(& memory.job_reload[job_id], .Normal)
|
||||
}
|
||||
}
|
||||
should_close = true
|
||||
}
|
||||
// should_close |= update( host_delta_time_ms )
|
||||
// render()
|
||||
}
|
||||
client_tick := tick_now()
|
||||
profile_end()
|
||||
|
||||
profile_begin("sokol_app: post_client_tick")
|
||||
// sokol_app.post_client_frame() // TODO(Ed): SOKOL!
|
||||
profile_end()
|
||||
|
||||
tick_lane_frametime(& client_tick, host_delta_time_ms, host_delta_ns)
|
||||
return sync_load(& should_close, .Acquire)
|
||||
@@ -197,7 +270,8 @@ tick_lane :: proc(host_delta_time_ms: f64, host_delta_ns: Duration) -> (should_c
|
||||
@export
|
||||
jobsys_worker_tick :: proc(host_delta_time_ms: f64, host_delta_ns: Duration)
|
||||
{
|
||||
// profile("Worker Tick")
|
||||
profile("Worker Tick")
|
||||
context.logger = to_odin_logger(& memory.client_memory.logger)
|
||||
|
||||
ORDERED_PRIORITIES :: [len(JobPriority)]JobPriority{.High, .Normal, .Low}
|
||||
block: for priority in ORDERED_PRIORITIES
|
||||
@@ -225,11 +299,9 @@ jobsys_worker_tick :: proc(host_delta_time_ms: f64, host_delta_ns: Duration)
|
||||
sync_mutex_unlock(& memory.job_system.job_lists[priority].mutex)
|
||||
}
|
||||
}
|
||||
|
||||
// Updating worker frametime
|
||||
// Updating worker timing
|
||||
{
|
||||
// TODO(Ed): Setup
|
||||
|
||||
// TODO(Ed): Setup timing
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,7 +312,7 @@ test_job :: proc(data: rawptr)
|
||||
{
|
||||
profile(#procedure)
|
||||
info := cast(^TestJobInfo) data
|
||||
// log_print_fmt("Test job succeeded: %v", info.id)
|
||||
log_print_fmt("Test job succeeded: %v", info.id)
|
||||
}
|
||||
|
||||
Frametime_High_Perf_Threshold_MS :: 1 / 240.0
|
||||
@@ -248,14 +320,13 @@ Frametime_High_Perf_Threshold_MS :: 1 / 240.0
|
||||
tick_lane_frametime :: proc(client_tick: ^Tick, host_delta_time_ms: f64, host_delta_ns: Duration, can_sleep := true)
|
||||
{
|
||||
profile(#procedure)
|
||||
config := app_config()
|
||||
frametime := & memory.client_memory.frametime
|
||||
// context.allocator = frame_slab_allocator()
|
||||
// context.temp_allocator = transient_allocator()
|
||||
config := app_config()
|
||||
|
||||
if thread.id == .Master_Prepper
|
||||
{
|
||||
frametime.target_ms = 1.0 / f64(config.engine_refresh_hz) * S_To_MS
|
||||
frametime := & memory.client_memory.frametime
|
||||
|
||||
frametime.target_ms = 1.0 / f64(config.engine_refresh_hz)
|
||||
sub_ms_granularity_required := frametime.target_ms <= Frametime_High_Perf_Threshold_MS
|
||||
|
||||
frametime.delta_ns = tick_lap_time( client_tick )
|
||||
@@ -309,9 +380,11 @@ tick_lane_frametime :: proc(client_tick: ^Tick, host_delta_time_ms: f64, host_de
|
||||
clean_frame :: proc()
|
||||
{
|
||||
profile(#procedure)
|
||||
context.logger = to_odin_logger(& memory.client_memory.logger)
|
||||
|
||||
if thread.id == .Master_Prepper
|
||||
{
|
||||
|
||||
// mem_reset( frame_allocator() )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ ProcessMemory :: struct {
|
||||
host_api: Host_API, // Client -> Host Interface
|
||||
|
||||
// Textual Logging
|
||||
logger: Logger,
|
||||
host_logger: Logger,
|
||||
path_logger_finalized: string,
|
||||
|
||||
// Profiling
|
||||
@@ -45,12 +45,10 @@ ProcessMemory :: struct {
|
||||
|
||||
// Testing
|
||||
job_group_reload: JobGroup,
|
||||
job_info_reload: [64]TestJobInfo,
|
||||
job_reload: [64]Job,
|
||||
job_group_exit: JobGroup,
|
||||
job_info_exit: [64]TestJobInfo,
|
||||
job_exit: [64]Job,
|
||||
job_info_reload: [JOB_TEST_NUM]TestJobInfo,
|
||||
job_reload: [JOB_TEST_NUM]Job,
|
||||
}
|
||||
JOB_TEST_NUM :: 64
|
||||
|
||||
Host_API :: struct {
|
||||
request_virtual_memory: #type proc(), // All dynamic allocations will utilize vmem interfaces
|
||||
@@ -61,7 +59,7 @@ ThreadMemory :: struct {
|
||||
using _: ThreadWorkerContext,
|
||||
|
||||
// Per-thread profiling
|
||||
spall_buffer_backing: [SPALL_BUFFER_DEFAULT_SIZE * 4]byte,
|
||||
spall_buffer_backing: [SPALL_BUFFER_DEFAULT_SIZE]byte,
|
||||
spall_buffer: Spall_Buffer,
|
||||
|
||||
client_memory: ThreadState,
|
||||
|
||||
@@ -57,13 +57,15 @@ import "core:time"
|
||||
tick_now :: time.tick_now
|
||||
|
||||
import "codebase:grime"
|
||||
Logger :: grime.Logger
|
||||
Logger :: grime.Logger
|
||||
logger_init :: grime.logger_init
|
||||
to_odin_logger :: grime.to_odin_logger
|
||||
|
||||
set__scheduler_granularity :: grime.set__scheduler_granularity
|
||||
|
||||
grime_set_profiler_module_context :: grime.set_profiler_module_context
|
||||
grime_set_profiler_thread_buffer :: grime.set_profiler_thread_buffer
|
||||
|
||||
set__scheduler_granularity :: grime.set__scheduler_granularity
|
||||
|
||||
Kilo :: 1024
|
||||
Mega :: Kilo * 1024
|
||||
Giga :: Mega * 1024
|
||||
|
||||
@@ -67,6 +67,8 @@ State :: struct {
|
||||
|
||||
// Overall frametime of the tick frame (currently main thread's)
|
||||
using frametime : FrameTime,
|
||||
|
||||
logger: Logger,
|
||||
}
|
||||
|
||||
ThreadState :: struct {
|
||||
|
||||
Reference in New Issue
Block a user