got multi-laned hot-reload
This commit is contained in:
14
.gitignore
vendored
14
.gitignore
vendored
@@ -8,13 +8,17 @@ build/**
|
||||
|
||||
# folders
|
||||
assets/TX-02-1WN9N6Q8
|
||||
thirdparty/backtrace
|
||||
thirdparty/harfbuzz
|
||||
thirdparty/ini
|
||||
thirdparty/sokol
|
||||
thirdparty/sokol-tools
|
||||
thirdparty/harfbuzz/**
|
||||
thirdparty/ini/**
|
||||
thirdparty/sokol/**
|
||||
thirdparty/sokol-tools/**
|
||||
!**/sokol/app/**
|
||||
!**/sokol/gfx/**
|
||||
|
||||
toolchain/**
|
||||
!**/Odin/base/**
|
||||
!**/Odin/core/**
|
||||
!**/Odin/vendor/**
|
||||
|
||||
# logs
|
||||
logs
|
||||
|
33
code2/grime/assert.odin
Normal file
33
code2/grime/assert.odin
Normal file
@@ -0,0 +1,33 @@
|
||||
package grime
|
||||
|
||||
import "core:os"
|
||||
|
||||
// Below should be defined per-package
|
||||
|
||||
ensure :: #force_inline proc( condition : b32, msg : string, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log_print( msg, LoggerLevel.Warning, location )
|
||||
debug_trap()
|
||||
}
|
||||
|
||||
// TODO(Ed) : Setup exit codes!
|
||||
fatal :: #force_inline proc( msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
log_print( msg, LoggerLevel.Fatal, location )
|
||||
debug_trap()
|
||||
process_exit( exit_code )
|
||||
}
|
||||
|
||||
// TODO(Ed) : Setup exit codes!
|
||||
verify :: #force_inline proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log_print( msg, LoggerLevel.Fatal, location )
|
||||
debug_trap()
|
||||
process_exit( exit_code )
|
||||
}
|
@@ -8,7 +8,7 @@ file_copy_sync :: proc( path_src, path_dst: string, allocator := context.allocat
|
||||
{
|
||||
path_info, result := file_status( path_src, allocator )
|
||||
if result != OS_ERROR_NONE {
|
||||
log_fmt("Could not get file info: %v", result, LoggerLevel.Error )
|
||||
log_print_fmt("Could not get file info: %v", result, LoggerLevel.Error )
|
||||
return false
|
||||
}
|
||||
file_size = path_info.size
|
||||
@@ -16,14 +16,14 @@ file_copy_sync :: proc( path_src, path_dst: string, allocator := context.allocat
|
||||
|
||||
src_content, result := file_read_entire( path_src, allocator )
|
||||
if ! result {
|
||||
log_fmt( "Failed to read file to copy: %v", path_src, LoggerLevel.Error )
|
||||
log_print_fmt( "Failed to read file to copy: %v", path_src, LoggerLevel.Error )
|
||||
debug_trap()
|
||||
return false
|
||||
}
|
||||
|
||||
result = file_write_entire( path_dst, src_content, false )
|
||||
if ! result {
|
||||
log_fmt( "Failed to copy file: %v", path_dst, LoggerLevel.Error )
|
||||
log_print_fmt( "Failed to copy file: %v", path_dst, LoggerLevel.Error )
|
||||
debug_trap()
|
||||
return false
|
||||
}
|
||||
|
@@ -17,4 +17,3 @@ sll_queue_push_nz :: proc "contextless" (first: ^$ParentType, last, n: ^^$Type,
|
||||
}
|
||||
}
|
||||
sll_queue_push_n :: #force_inline proc "contextless" (first: $ParentType, last, n: ^^$Type) { sll_queue_push_nz(first, last, n, nil) }
|
||||
|
||||
|
@@ -42,26 +42,10 @@ logger_init :: proc( logger : ^ Logger, id : string, file_path : string, file :
|
||||
LOGGER_VARENA_BASE_ADDRESS : uintptr = 2 * Tera
|
||||
@static vmem_init_counter : uintptr = 0
|
||||
|
||||
// alloc_error : AllocatorError
|
||||
// logger.varena, alloc_error = varena_init(
|
||||
// LOGGER_VARENA_BASE_ADDRESS + vmem_init_counter * 250 * Megabyte,
|
||||
// 1 * Megabyte,
|
||||
// 128 * Kilobyte,
|
||||
// growth_policy = nil,
|
||||
// allow_any_resize = true,
|
||||
// dbg_name = "logger varena",
|
||||
// enable_mem_tracking = false )
|
||||
// verify( alloc_error == .None, "Failed to allocate logger's virtual arena")
|
||||
vmem_init_counter += 1
|
||||
|
||||
// TODO(Ed): Figure out another solution here...
|
||||
// logger.entries, alloc_error = array_init(Array(LoggerEntry), 8192, runtime.heap_allocator())
|
||||
// verify( alloc_error == .None, "Failed to allocate logger's entries array")
|
||||
|
||||
context.logger = { logger_interface, logger, LoggerLevel.Debug, Default_File_Logger_Opts }
|
||||
log("Initialized Logger")
|
||||
log_print("Initialized Logger")
|
||||
when false {
|
||||
log("This sentence is over 80 characters long on purpose to test the ability of this logger to properfly wrap long as logs with a new line and then at the end of that pad it with the appropraite signature.")
|
||||
log_print("This sentence is over 80 characters long on purpose to test the ability of this logger to properfly wrap long as logs with a new line and then at the end of that pad it with the appropraite signature.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,24 +121,13 @@ logger_interface :: proc(
|
||||
str_pfmt_file_ln( logger.file, to_string(builder) )
|
||||
}
|
||||
|
||||
// This buffer is used below excluisvely to prevent any allocator recusion when verbose logging from allocators.
|
||||
// This means a single line is limited to 32k buffer (increase naturally if this SOMEHOW becomes a bottleneck...)
|
||||
Logger_Allocator_Buffer : [32 * Kilo]u8
|
||||
// Below are made on demand per-package.
|
||||
// They should strict only use a scratch allocator...
|
||||
|
||||
log :: proc( msg : string, level := LoggerLevel.Info, loc := #caller_location ) {
|
||||
// TODO(Ed): Finish this
|
||||
// temp_arena : Arena; arena_init(& temp_arena, Logger_Allocator_Buffer[:])
|
||||
// context.allocator = arena_allocator(& temp_arena)
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
// core_log.log( level, msg, location = loc )
|
||||
log_print :: proc( msg : string, level := LoggerLevel.Info, loc := #caller_location ) {
|
||||
core_log.log( level, msg, location = loc )
|
||||
}
|
||||
|
||||
log_fmt :: proc( fmt : string, args : ..any, level := LoggerLevel.Info, loc := #caller_location ) {
|
||||
// TODO(Ed): Finish this
|
||||
// temp_arena : Arena; arena_init(& temp_arena, Logger_Allocator_Buffer[:])
|
||||
// context.allocator = arena_allocator(& temp_arena)
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
// core_log.logf( level, fmt, ..args, location = loc )
|
||||
log_print_fmt :: proc( fmt : string, args : ..any, level := LoggerLevel.Info, loc := #caller_location ) {
|
||||
core_log.logf( level, fmt, ..args, location = loc )
|
||||
}
|
||||
|
@@ -32,7 +32,7 @@ import fmt_io "core:fmt"
|
||||
str_pfmt_builder :: fmt_io.sbprintf
|
||||
str_pfmt_buffer :: fmt_io.bprintf
|
||||
str_pfmt_file_ln :: fmt_io.fprintln
|
||||
str_tmp_from_any :: fmt_io.tprint
|
||||
str_tmp_from_any :: #force_inline proc(args: ..any, sep := " ") -> string { context.temp_allocator = resolve_odin_allocator(context.temp_allocator); return fmt_io.tprint(..args, sep = sep) }
|
||||
|
||||
import "core:log"
|
||||
Default_File_Logger_Opts :: log.Default_File_Logger_Opts
|
||||
@@ -53,31 +53,33 @@ import "core:mem"
|
||||
import "core:mem/virtual"
|
||||
VirtualProtectFlags :: virtual.Protect_Flags
|
||||
|
||||
import core_os "core:os"
|
||||
FS_Open_Readonly :: core_os.O_RDONLY
|
||||
FS_Open_Writeonly :: core_os.O_WRONLY
|
||||
FS_Open_Create :: core_os.O_CREATE
|
||||
FS_Open_Trunc :: core_os.O_TRUNC
|
||||
import "core:os"
|
||||
FS_Open_Readonly :: os.O_RDONLY
|
||||
FS_Open_Writeonly :: os.O_WRONLY
|
||||
FS_Open_Create :: os.O_CREATE
|
||||
FS_Open_Trunc :: os.O_TRUNC
|
||||
|
||||
OS_ERROR_NONE :: core_os.ERROR_NONE
|
||||
OS_Handle :: core_os.Handle
|
||||
OS_ERROR_HANDLE_EOF :: core_os.ERROR_HANDLE_EOF
|
||||
OS_INVALID_HANDLE :: core_os.INVALID_HANDLE
|
||||
OS_ERROR_NONE :: os.ERROR_NONE
|
||||
OS_Handle :: os.Handle
|
||||
OS_ERROR_HANDLE_EOF :: os.ERROR_HANDLE_EOF
|
||||
OS_INVALID_HANDLE :: os.INVALID_HANDLE
|
||||
|
||||
FileFlag_Create :: core_os.O_CREATE
|
||||
FileFlag_ReadWrite :: core_os.O_RDWR
|
||||
FileTime :: core_os.File_Time
|
||||
file_close :: core_os.close
|
||||
file_open :: core_os.open
|
||||
file_read :: core_os.read
|
||||
file_remove :: core_os.remove
|
||||
file_seek :: core_os.seek
|
||||
file_status :: core_os.stat
|
||||
file_truncate :: core_os.truncate
|
||||
file_write :: core_os.write
|
||||
process_exit :: os.exit
|
||||
|
||||
file_read_entire :: core_os.read_entire_file
|
||||
file_write_entire :: core_os.write_entire_file
|
||||
FileFlag_Create :: os.O_CREATE
|
||||
FileFlag_ReadWrite :: os.O_RDWR
|
||||
FileTime :: os.File_Time
|
||||
file_close :: os.close
|
||||
file_open :: os.open
|
||||
file_read :: os.read
|
||||
file_remove :: os.remove
|
||||
file_seek :: os.seek
|
||||
file_status :: os.stat
|
||||
file_truncate :: os.truncate
|
||||
file_write :: os.write
|
||||
|
||||
file_read_entire :: os.read_entire_file
|
||||
file_write_entire :: os.write_entire_file
|
||||
|
||||
import "core:strings"
|
||||
StrBuilder :: strings.Builder
|
||||
|
37
code2/grime/profiler.odin
Normal file
37
code2/grime/profiler.odin
Normal file
@@ -0,0 +1,37 @@
|
||||
package grime
|
||||
|
||||
/*
|
||||
This is just a snippet file, do not use directly.
|
||||
*/
|
||||
|
||||
import "base:runtime"
|
||||
import "core:prof/spall"
|
||||
|
||||
SpallProfiler :: struct {
|
||||
ctx : spall.Context,
|
||||
buffer : spall.Buffer,
|
||||
}
|
||||
|
||||
// @(private)
|
||||
// Module_Context : ^SpallProfiler
|
||||
|
||||
// set_profiler_module_context :: #force_inline proc "contextless" ( ctx : ^SpallProfiler ) {
|
||||
// Module_Context = ctx
|
||||
// }
|
||||
|
||||
DISABLE_PROFILING :: true
|
||||
|
||||
@(deferred_none = profile_end, disabled = DISABLE_PROFILING)
|
||||
profile :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
// spall._buffer_begin( & Module_Context.ctx, & Module_Context.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
@(disabled = DISABLE_PROFILING)
|
||||
profile_begin :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
// spall._buffer_begin( & Module_Context.ctx, & Module_Context.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
@(disabled = DISABLE_PROFILING)
|
||||
profile_end :: #force_inline proc "contextless" () {
|
||||
// spall._buffer_end( & Module_Context.ctx, & Module_Context.buffer)
|
||||
}
|
@@ -19,7 +19,7 @@ thread__highres_wait :: proc( desired_ms : f64, loc := #caller_location ) -> b32
|
||||
timer := win32.CreateWaitableTimerExW( nil, nil, win32.CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, win32.TIMER_ALL_ACCESS )
|
||||
if timer == nil {
|
||||
msg := str_pfmt("Failed to create win32 timer - ErrorCode: %v", win32.GetLastError() )
|
||||
log( msg, LoggerLevel.Warning, loc)
|
||||
log_print( msg, LoggerLevel.Warning, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ thread__highres_wait :: proc( desired_ms : f64, loc := #caller_location ) -> b32
|
||||
result := win32.SetWaitableTimerEx( timer, & due_time, 0, nil, nil, nil, 0 )
|
||||
if ! result {
|
||||
msg := str_pfmt("Failed to set win32 timer - ErrorCode: %v", win32.GetLastError() )
|
||||
log( msg, LoggerLevel.Warning, loc)
|
||||
log_print( msg, LoggerLevel.Warning, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -42,22 +42,22 @@ thread__highres_wait :: proc( desired_ms : f64, loc := #caller_location ) -> b32
|
||||
{
|
||||
case WAIT_ABANDONED:
|
||||
msg := str_pfmt("Failed to wait for win32 timer - Error: WAIT_ABANDONED" )
|
||||
log( msg, LoggerLevel.Error, loc)
|
||||
log_print( msg, LoggerLevel.Error, loc)
|
||||
return false
|
||||
|
||||
case WAIT_IO_COMPLETION:
|
||||
msg := str_pfmt("Waited for win32 timer: Ended by APC queued to the thread" )
|
||||
log( msg, LoggerLevel.Error, loc)
|
||||
log_print( msg, LoggerLevel.Error, loc)
|
||||
return false
|
||||
|
||||
case WAIT_OBJECT_0:
|
||||
msg := str_pfmt("Waited for win32 timer- Reason : WAIT_OBJECT_0" )
|
||||
log( msg, loc = loc)
|
||||
log_print( msg, loc = loc)
|
||||
return false
|
||||
|
||||
case WAIT_FAILED:
|
||||
msg := str_pfmt("Waited for win32 timer failed - ErrorCode: $v", win32.GetLastError() )
|
||||
log( msg, LoggerLevel.Error, loc)
|
||||
log_print( msg, LoggerLevel.Error, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
# Host Module
|
||||
|
||||
The sole job of this module is to provide a bare launch pad and runtime module hot-reload support for the client module (sectr). To achieve this the static memory of the client module is tracked by the host and provides an api for the client to reload itself when a change is detected. The client is reponsible for populating the static memory reference and doing anything else it needs via the host api that it cannot do on its own.
|
||||
|
||||
Uses the core's Arena allocator.
|
||||
|
@@ -1,27 +1,77 @@
|
||||
package host
|
||||
|
||||
import "core:thread"
|
||||
import "core:sync"
|
||||
|
||||
Path_Logs :: "../logs"
|
||||
when ODIN_OS == .Windows
|
||||
{
|
||||
Path_Sectr_Module :: "sectr.dll"
|
||||
Path_Sectr_Live_Module :: "sectr_live.dll"
|
||||
Path_Sectr_Debug_Symbols :: "sectr.pdb"
|
||||
Path_Sectr_Spall_Record :: "sectr.spall"
|
||||
}
|
||||
|
||||
// Only static memory host has.
|
||||
host_memory: HostMemory
|
||||
host_memory: ProcessMemory
|
||||
|
||||
@(thread_local)
|
||||
thread_memory: ThreadMemory
|
||||
|
||||
load_client_api :: proc(version_id: int) -> (loaded_module: Client_API)
|
||||
{
|
||||
write_time, result := file_last_write_time_by_name("sectr.dll")
|
||||
if result != OS_ERROR_NONE {
|
||||
panic_contextless( "Could not resolve the last write time for sectr")
|
||||
}
|
||||
|
||||
thread_sleep( Millisecond * 100 )
|
||||
|
||||
live_file := Path_Sectr_Live_Module
|
||||
file_copy_sync( Path_Sectr_Module, live_file, allocator = context.temp_allocator )
|
||||
|
||||
lib, load_result := os_lib_load( live_file )
|
||||
if ! load_result {
|
||||
panic( "Failed to load the sectr module." )
|
||||
}
|
||||
|
||||
startup := cast( type_of( host_memory.client_api.startup)) os_lib_get_proc(lib, "startup")
|
||||
tick_lane_startup := cast( type_of( host_memory.client_api.tick_lane_startup)) os_lib_get_proc(lib, "tick_lane_startup")
|
||||
hot_reload := cast( type_of( host_memory.client_api.hot_reload)) os_lib_get_proc(lib, "hot_reload")
|
||||
tick_lane := cast( type_of( host_memory.client_api.tick_lane)) os_lib_get_proc(lib, "tick_lane")
|
||||
clean_frame := cast( type_of( host_memory.client_api.clean_frame)) os_lib_get_proc(lib, "clean_frame")
|
||||
if startup == nil do panic("Failed to load sectr.startup symbol" )
|
||||
if tick_lane_startup == nil do panic("Failed to load sectr.tick_lane_startup symbol" )
|
||||
if hot_reload == nil do panic("Failed to load sectr.hot_reload symbol" )
|
||||
if tick_lane == nil do panic("Failed to load sectr.tick_lane symbol" )
|
||||
if clean_frame == nil do panic("Failed to load sectr.clean_frmae symbol" )
|
||||
|
||||
loaded_module.lib = lib
|
||||
loaded_module.write_time = write_time
|
||||
loaded_module.lib_version = version_id
|
||||
loaded_module.startup = startup
|
||||
loaded_module.tick_lane_startup = tick_lane_startup
|
||||
loaded_module.hot_reload = hot_reload
|
||||
loaded_module.tick_lane = tick_lane
|
||||
loaded_module.clean_frame = clean_frame
|
||||
return
|
||||
}
|
||||
|
||||
master_prepper_proc :: proc(thread: ^SysThread) {}
|
||||
main :: proc()
|
||||
{
|
||||
// TODO(Ed): Change this
|
||||
host_scratch: Arena; arena_init(& host_scratch, host_memory.host_scratch[:])
|
||||
context.allocator = arena_allocator(& host_scratch)
|
||||
context.temp_allocator = context.allocator
|
||||
|
||||
// Setup host arenas
|
||||
arena_init(& host_memory.host_persist, host_memory.host_persist_buf[:])
|
||||
arena_init(& host_memory.host_scratch, host_memory.host_scratch_buf[:])
|
||||
context.allocator = arena_allocator(& host_memory.host_persist)
|
||||
context.temp_allocator = arena_allocator(& host_memory.host_scratch)
|
||||
// Setup the profiler
|
||||
{
|
||||
buffer_backing := make([]u8, SPALL_BUFFER_DEFAULT_SIZE * 4)
|
||||
host_memory.spall_profiler.ctx = spall_context_create(Path_Sectr_Spall_Record)
|
||||
host_memory.spall_profiler.buffer = spall_buffer_create(buffer_backing)
|
||||
}
|
||||
// Setu the "Master Prepper" thread
|
||||
thread_memory.id = .Master_Prepper
|
||||
thread_id := thread_current_id()
|
||||
{
|
||||
@@ -34,62 +84,137 @@ main :: proc()
|
||||
// system_ctx.win32_thread_id = w32_get_current_thread_id()
|
||||
system_ctx.id = cast(int) system_ctx.win32_thread_id
|
||||
}
|
||||
free_all(context.temp_allocator)
|
||||
}
|
||||
|
||||
write_time, result := file_last_write_time_by_name("sectr.dll")
|
||||
if result != OS_ERROR_NONE {
|
||||
panic_contextless( "Could not resolve the last write time for sectr")
|
||||
}
|
||||
|
||||
thread_sleep( Millisecond * 100 )
|
||||
|
||||
live_file := Path_Sectr_Live_Module
|
||||
file_copy_sync( Path_Sectr_Module, live_file, allocator = context.temp_allocator )
|
||||
// Setup the logger
|
||||
{
|
||||
lib, load_result := os_lib_load( live_file )
|
||||
if ! load_result {
|
||||
panic( "Failed to load the sectr module." )
|
||||
fmt_backing := make([]byte, 32 * Kilo)
|
||||
defer free_all(context.temp_allocator)
|
||||
|
||||
// Generating the logger's name, it will be used when the app is shutting down.
|
||||
path_logger_finalized : string
|
||||
{
|
||||
startup_time := time_now()
|
||||
year, month, day := time_date( startup_time)
|
||||
hour, min, sec := time_clock_from_time( startup_time)
|
||||
|
||||
if ! os_is_directory( Path_Logs ) {
|
||||
os_make_directory( Path_Logs )
|
||||
}
|
||||
timestamp := str_pfmt_buffer( fmt_backing, "%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec)
|
||||
path_logger_finalized = str_pfmt_buffer( fmt_backing, "%s/sectr_%v.log", Path_Logs, timestamp)
|
||||
}
|
||||
logger_init( & host_memory.logger, "Sectr Host", str_pfmt_buffer( fmt_backing, "%s/sectr.log", Path_Logs ) )
|
||||
context.logger = to_odin_logger( & host_memory.logger )
|
||||
{
|
||||
// Log System Context
|
||||
builder := strbuilder_from_bytes( fmt_backing )
|
||||
str_pfmt_builder( & builder, "Core Count: %v, ", os_core_count() )
|
||||
str_pfmt_builder( & builder, "Page Size: %v", os_page_size() )
|
||||
log_print( to_str(builder) )
|
||||
}
|
||||
|
||||
startup := cast( type_of( host_memory.client_api.startup)) os_lib_get_proc(lib, "startup")
|
||||
hot_reload := cast( type_of( host_memory.client_api.hot_reload)) os_lib_get_proc(lib, "hot_reload")
|
||||
tick_lane_startup := cast( type_of( host_memory.client_api.tick_lane_startup)) os_lib_get_proc(lib, "tick_lane_startup")
|
||||
if startup == nil do panic("Failed to load sectr.startup symbol" )
|
||||
if hot_reload == nil do panic("Failed to load sectr.hot_reload symbol" )
|
||||
if tick_lane_startup == nil do panic("Failed to load sectr.tick_lane_startup symbol" )
|
||||
|
||||
host_memory.client_api.lib = lib
|
||||
host_memory.client_api.startup = startup
|
||||
host_memory.client_api.hot_reload = hot_reload
|
||||
host_memory.client_api.tick_lane_startup = tick_lane_startup
|
||||
}
|
||||
context.logger = to_odin_logger( & host_memory.logger )
|
||||
// Load the Enviornment API for the first-time
|
||||
{
|
||||
host_memory.client_api = load_client_api( 1 )
|
||||
verify( host_memory.client_api.lib_version != 0, "Failed to initially load the sectr module" )
|
||||
}
|
||||
|
||||
// Client API Startup
|
||||
host_memory.host_api.sync_client_module = sync_client_api
|
||||
host_memory.host_api.launch_tick_lane_thread = launch_tick_lane_thread
|
||||
host_memory.client_api.startup(& host_memory, & thread_memory)
|
||||
|
||||
// Start the tick lanes
|
||||
thread_wide_startup()
|
||||
}
|
||||
|
||||
@export
|
||||
sync_client_api :: proc() {
|
||||
assert_contextless(thread_memory.id == .Master_Prepper)
|
||||
// Fill out detection and reloading of client api.
|
||||
|
||||
// Needs to flag and atomic to spin-lock live helepr threads when reloading
|
||||
thread_wide_startup :: proc()
|
||||
{
|
||||
assert(thread_memory.id == .Master_Prepper)
|
||||
if THREAD_TICK_LANES > 1 {
|
||||
launch_tick_lane_thread(.Atomic_Accountant)
|
||||
sync.barrier_init(& host_memory.client_api_sync_lock, THREAD_TICK_LANES)
|
||||
}
|
||||
host_tick_lane_startup(thread_memory.system_ctx)
|
||||
}
|
||||
|
||||
import "core:thread"
|
||||
|
||||
|
||||
@export
|
||||
launch_tick_lane_thread :: proc(id : WorkerID) {
|
||||
assert_contextless(thread_memory.id == .Master_Prepper)
|
||||
// TODO(Ed): We need to make our own version of this that doesn't allocate memory.
|
||||
lane_thread := thread.create(host_tick_lane_startup, .High)
|
||||
lane_thread := thread.create(host_tick_lane_startup, .High)
|
||||
lane_thread.user_index = int(id)
|
||||
thread.start(lane_thread)
|
||||
}
|
||||
|
||||
host_tick_lane_startup :: proc(lane_thread: ^SysThread) {
|
||||
thread_memory.system_ctx = lane_thread
|
||||
thread_memory.id = cast(WorkerID) lane_thread.user_index
|
||||
thread_memory.id = cast(WorkerID) lane_thread.user_index
|
||||
host_memory.client_api.tick_lane_startup(& thread_memory)
|
||||
|
||||
host_tick_lane()
|
||||
}
|
||||
|
||||
host_tick_lane :: proc()
|
||||
{
|
||||
delta_ns: Duration
|
||||
|
||||
host_tick := time_tick_now()
|
||||
|
||||
running : b64 = true
|
||||
for ; running ;
|
||||
{
|
||||
profile("Host Tick")
|
||||
sync_client_api()
|
||||
|
||||
running = host_memory.client_api.tick_lane( duration_seconds(delta_ns), delta_ns )
|
||||
// host_memory.client_api.clean_frame()
|
||||
|
||||
delta_ns = time_tick_lap_time( & host_tick )
|
||||
host_tick = time_tick_now()
|
||||
}
|
||||
}
|
||||
|
||||
@export
|
||||
sync_client_api :: proc()
|
||||
{
|
||||
leader := sync.barrier_wait(& host_memory.client_api_sync_lock)
|
||||
free_all(context.temp_allocator)
|
||||
profile(#procedure)
|
||||
if thread_memory.id == .Master_Prepper
|
||||
{
|
||||
write_time, result := file_last_write_time_by_name( Path_Sectr_Module );
|
||||
if result == OS_ERROR_NONE && host_memory.client_api.write_time != write_time
|
||||
{
|
||||
thread_coherent_store(& host_memory.client_api_hot_reloaded, true)
|
||||
|
||||
version_id := host_memory.client_api.lib_version + 1
|
||||
unload_client_api( & host_memory.client_api )
|
||||
|
||||
// Wait for pdb to unlock (linker may still be writting)
|
||||
for ; file_is_locked( Path_Sectr_Debug_Symbols ) && file_is_locked( Path_Sectr_Live_Module ); {}
|
||||
thread_sleep( Millisecond * 100 )
|
||||
|
||||
host_memory.client_api = load_client_api( version_id )
|
||||
verify( host_memory.client_api.lib_version != 0, "Failed to hot-reload the sectr module" )
|
||||
}
|
||||
}
|
||||
leader = sync.barrier_wait(& host_memory.client_api_sync_lock)
|
||||
if thread_coherent_load(& host_memory.client_api_hot_reloaded)
|
||||
{
|
||||
host_memory.client_api.hot_reload(& host_memory, & thread_memory)
|
||||
if thread_memory.id == .Master_Prepper {
|
||||
thread_coherent_store(& host_memory.client_api_hot_reloaded, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unload_client_api :: proc( module : ^Client_API )
|
||||
{
|
||||
os_lib_unload( module.lib )
|
||||
file_remove( Path_Sectr_Live_Module )
|
||||
module^ = {}
|
||||
log_print("Unloaded sectr API")
|
||||
}
|
||||
|
@@ -1,9 +1,9 @@
|
||||
package host
|
||||
|
||||
import "base:builtin"
|
||||
// import "base:builtin"
|
||||
// Odin_OS_Type :: type_of(ODIN_OS)
|
||||
|
||||
import "base:intrinsics"
|
||||
// import "base:intrinsics"
|
||||
// atomic_thread_fence :: intrinsics.atomic_thread_fence
|
||||
// mem_zero :: intrinsics.mem_zero
|
||||
// mem_zero_volatile :: intrinsics.mem_zero_volatile
|
||||
@@ -11,47 +11,142 @@ import "base:intrinsics"
|
||||
// mem_copy_overlapping :: intrinsics.mem_copy
|
||||
|
||||
import "base:runtime"
|
||||
// Assertion_Failure_Proc :: runtime.Assertion_Failure_Proc
|
||||
// Logger :: runtime.Logger
|
||||
debug_trap :: runtime.debug_trap
|
||||
|
||||
import "core:dynlib"
|
||||
os_lib_load :: dynlib.load_library
|
||||
os_lib_unload :: dynlib.unload_library
|
||||
os_lib_get_proc :: dynlib.symbol_address
|
||||
|
||||
import "core:fmt"
|
||||
str_pfmt_builder :: fmt.sbprintf
|
||||
str_pfmt_buffer :: fmt.bprintf
|
||||
|
||||
import "core:log"
|
||||
LoggerLevel :: log.Level
|
||||
|
||||
import "core:mem"
|
||||
Arena :: mem.Arena
|
||||
arena_allocator :: mem.arena_allocator
|
||||
arena_init :: mem.arena_init
|
||||
|
||||
import core_os "core:os"
|
||||
file_last_write_time_by_name :: core_os.last_write_time_by_name
|
||||
OS_ERROR_NONE :: core_os.ERROR_NONE
|
||||
import "core:os"
|
||||
FileTime :: os.File_Time
|
||||
file_last_write_time_by_name :: os.last_write_time_by_name
|
||||
file_remove :: os.remove
|
||||
OS_ERROR_NONE :: os.ERROR_NONE
|
||||
os_is_directory :: os.is_dir
|
||||
os_make_directory :: os.make_directory
|
||||
os_core_count :: os.processor_core_count
|
||||
os_page_size :: os.get_page_size
|
||||
process_exit :: os.exit
|
||||
|
||||
import "core:prof/spall"
|
||||
SPALL_BUFFER_DEFAULT_SIZE :: spall.BUFFER_DEFAULT_SIZE
|
||||
spall_context_create :: spall.context_create
|
||||
spall_buffer_create :: spall.buffer_create
|
||||
|
||||
import "core:strings"
|
||||
strbuilder_from_bytes :: strings.builder_from_bytes
|
||||
builder_to_str :: strings.to_string
|
||||
|
||||
import "core:sync"
|
||||
thread_current_id :: sync.current_thread_id
|
||||
thread_current_id :: sync.current_thread_id
|
||||
thread_coherent_load :: sync.atomic_load
|
||||
thread_coherent_store :: sync.atomic_store
|
||||
|
||||
import "core:time"
|
||||
Millisecond :: time.Millisecond
|
||||
Second :: time.Second
|
||||
Duration :: time.Duration
|
||||
duration_seconds :: time.duration_seconds
|
||||
thread_sleep :: time.sleep
|
||||
Millisecond :: time.Millisecond
|
||||
Second :: time.Second
|
||||
Duration :: time.Duration
|
||||
time_clock_from_time :: time.clock_from_time
|
||||
duration_seconds :: time.duration_seconds
|
||||
time_date :: time.date
|
||||
time_now :: time.now
|
||||
thread_sleep :: time.sleep
|
||||
time_tick_now :: time.tick_now
|
||||
time_tick_lap_time :: time.tick_lap_time
|
||||
|
||||
import "core:thread"
|
||||
SysThread :: thread.Thread
|
||||
|
||||
import grime "codebase:grime"
|
||||
file_copy_sync :: grime.file_copy_sync
|
||||
DISABLE_PROFILING :: grime.DISABLE_PROFILING
|
||||
file_copy_sync :: grime.file_copy_sync
|
||||
file_is_locked :: grime.file_is_locked
|
||||
logger_init :: grime.logger_init
|
||||
to_odin_logger :: grime.to_odin_logger
|
||||
|
||||
import "codebase:sectr"
|
||||
MAX_THREADS :: sectr.MAX_THREADS
|
||||
Client_API :: sectr.ModuleAPI
|
||||
HostMemory :: sectr.HostMemory
|
||||
ThreadMemory :: sectr.ThreadMemory
|
||||
WorkerID :: sectr.WorkerID
|
||||
MAX_THREADS :: sectr.MAX_THREADS
|
||||
THREAD_TICK_LANES :: sectr.THREAD_TICK_LANES
|
||||
Client_API :: sectr.ModuleAPI
|
||||
ProcessMemory :: sectr.ProcessMemory
|
||||
ThreadMemory :: sectr.ThreadMemory
|
||||
WorkerID :: sectr.WorkerID
|
||||
SpallProfiler :: sectr.SpallProfiler
|
||||
|
||||
ensure :: #force_inline proc( condition : b32, msg : string, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log_print( msg, LoggerLevel.Warning, location )
|
||||
debug_trap()
|
||||
}
|
||||
|
||||
// TODO(Ed) : Setup exit codes!
|
||||
fatal :: #force_inline proc( msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
log_print( msg, LoggerLevel.Fatal, location )
|
||||
debug_trap()
|
||||
process_exit( exit_code )
|
||||
}
|
||||
|
||||
// TODO(Ed) : Setup exit codes!
|
||||
verify :: #force_inline proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log_print( msg, LoggerLevel.Fatal, location )
|
||||
debug_trap()
|
||||
process_exit( exit_code )
|
||||
}
|
||||
|
||||
|
||||
log_print :: proc( msg : string, level := LoggerLevel.Info, loc := #caller_location ) {
|
||||
context.allocator = arena_allocator(& host_memory.host_scratch)
|
||||
context.temp_allocator = arena_allocator(& host_memory.host_scratch)
|
||||
log.log( level, msg, location = loc )
|
||||
}
|
||||
|
||||
log_print_fmt :: proc( fmt : string, args : ..any, level := LoggerLevel.Info, loc := #caller_location ) {
|
||||
context.allocator = arena_allocator(& host_memory.host_scratch)
|
||||
context.temp_allocator = arena_allocator(& host_memory.host_scratch)
|
||||
log.logf( level, fmt, ..args, location = loc )
|
||||
}
|
||||
|
||||
@(deferred_none = profile_end, disabled = DISABLE_PROFILING)
|
||||
profile :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & host_memory.spall_profiler.ctx, & host_memory.spall_profiler.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
@(disabled = DISABLE_PROFILING)
|
||||
profile_begin :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & host_memory.spall_profiler.ctx, & host_memory.spall_profiler.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
@(disabled = DISABLE_PROFILING)
|
||||
profile_end :: #force_inline proc "contextless" () {
|
||||
spall._buffer_end( & host_memory.spall_profiler.ctx, & host_memory.spall_profiler.buffer)
|
||||
}
|
||||
|
||||
Kilo :: 1024
|
||||
Mega :: Kilo * 1024
|
||||
Giga :: Mega * 1024
|
||||
Tera :: Giga * 1024
|
||||
|
||||
to_str :: proc {
|
||||
builder_to_str,
|
||||
}
|
||||
|
@@ -1,3 +1,15 @@
|
||||
# Sectr Package
|
||||
|
||||
This is the monolithic package representing the prototype itself. Relative to the host package this represents what define's the client module API, process memory, and thread memory.
|
||||
|
||||
Many definitions that are considered independent of the prototype have been lifted to the grime package, vefontcache, or in the future other packages within this codebase collection.
|
||||
|
||||
All allocators and containers within Sectr are derived from Grime.
|
||||
|
||||
The memory heurstics for sectr are categorized for now into:
|
||||
|
||||
* Persistent Static: Never released for process lifetime.
|
||||
* Persistent Conservative: Can be wiped
|
||||
* Frame
|
||||
* File Mappings
|
||||
* Codebase DB
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package sectr
|
||||
|
||||
// import "base:runtime"
|
||||
// import c "core:c/libc"
|
||||
import "core:dynlib"
|
||||
import "core:dynlib"
|
||||
import "core:sync"
|
||||
|
||||
Path_Assets :: "../assets/"
|
||||
@@ -10,60 +8,70 @@ Path_Shaders :: "../shaders/"
|
||||
Path_Input_Replay :: "input.sectr_replay"
|
||||
|
||||
ModuleAPI :: struct {
|
||||
lib: dynlib.Library,
|
||||
// write-time: FileTime,
|
||||
lib: dynlib.Library,
|
||||
write_time: FileTime,
|
||||
lib_version : int,
|
||||
|
||||
startup: type_of( startup ),
|
||||
hot_reload: type_of( hot_reload ),
|
||||
tick_lane_startup: type_of( tick_lane_startup),
|
||||
hot_reload: type_of( hot_reload ),
|
||||
tick_lane: type_of( tick_lane ),
|
||||
clean_frame: type_of( clean_frame),
|
||||
}
|
||||
|
||||
StartupContext :: struct {}
|
||||
|
||||
/*
|
||||
Called by host.main when it completes its setup.
|
||||
|
||||
The goal of startup is to first prapre persistent state,
|
||||
then prepare for multi-threaded "laned" tick: thread_wide_startup.
|
||||
*/
|
||||
@export
|
||||
startup :: proc(host_mem: ^HostMemory, thread_mem: ^ThreadMemory)
|
||||
startup :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
|
||||
{
|
||||
dummy : int = 0
|
||||
dummy += 1
|
||||
|
||||
memory = host_mem
|
||||
|
||||
thread_wide_startup(thread_mem)
|
||||
}
|
||||
|
||||
thread_wide_startup :: proc(thread_mem: ^ThreadMemory)
|
||||
/*
|
||||
Called by sync_client_api when the client module has be reloaded.
|
||||
Threads will eventually return to their tick_lane upon completion.
|
||||
*/
|
||||
@export
|
||||
hot_reload :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
|
||||
{
|
||||
if thread_mem.id == .Master_Prepper {
|
||||
sync.barrier_init(& memory.client_api_sync_lock, THREAD_TICK_LANES)
|
||||
thread_ctx = thread_mem
|
||||
if thread_ctx.id == .Master_Prepper {
|
||||
thread_coherent_store(& memory, host_mem)
|
||||
}
|
||||
memory.host_api.launch_tick_lane_thread(.Atomic_Accountant)
|
||||
tick_lane_startup(thread_mem)
|
||||
}
|
||||
|
||||
/*
|
||||
Called by host_tick_lane_startup
|
||||
Used for lane specific startup operations
|
||||
|
||||
The lane tick cannot be handled it, its call must be done by the host module.
|
||||
(We need threads to not be within a client callstack in the even of a hot-reload)
|
||||
*/
|
||||
@export
|
||||
tick_lane_startup :: proc(thread_mem: ^ThreadMemory)
|
||||
{
|
||||
thread_memory = thread_mem
|
||||
thread_memory.live_lanes = THREAD_TICK_LANES
|
||||
tick_lane()
|
||||
}
|
||||
|
||||
tick_lane :: proc()
|
||||
{
|
||||
dummy : int = 0
|
||||
for ;;
|
||||
{
|
||||
dummy += 1
|
||||
if thread_memory.id == .Master_Prepper
|
||||
{
|
||||
memory.host_api.sync_client_module()
|
||||
}
|
||||
leader := sync.barrier_wait(& memory.client_api_sync_lock)
|
||||
}
|
||||
thread_ctx = thread_mem
|
||||
thread_ctx.live_lanes = THREAD_TICK_LANES
|
||||
}
|
||||
|
||||
@export
|
||||
hot_reload :: proc(host_mem: ^HostMemory, thread_mem: ^ThreadMemory)
|
||||
tick_lane :: proc(host_delta_time_ms: f64, host_delta_ns: Duration) -> (should_close: b64)
|
||||
{
|
||||
|
||||
@thread_local dummy: int = 0;
|
||||
dummy += 2
|
||||
return true
|
||||
}
|
||||
|
||||
@export
|
||||
clean_frame :: proc()
|
||||
{
|
||||
@thread_local dummy: int = 0;
|
||||
dummy += 1
|
||||
return
|
||||
}
|
||||
|
@@ -2,16 +2,35 @@ package sectr
|
||||
|
||||
import "core:sync"
|
||||
|
||||
HostMemory :: struct {
|
||||
host_scratch: [256 * Kilo]byte,
|
||||
/*
|
||||
Everything defined for the host module within the client module
|
||||
so that the client module has full awareness of relevant host definitions
|
||||
*/
|
||||
|
||||
ProcessMemory :: struct {
|
||||
// Host
|
||||
host_persist_buf: [64 * Mega]byte,
|
||||
host_scratch_buf: [32 * Mega]byte,
|
||||
host_persist: Odin_Arena,
|
||||
host_scratch: Odin_Arena,
|
||||
host_api: Host_API,
|
||||
|
||||
// Textual Logging
|
||||
logger: Logger,
|
||||
|
||||
// Profiling
|
||||
spall_profiler: SpallProfiler,
|
||||
// TODO(Ed): Try Superluminal!
|
||||
|
||||
// Multi-threading
|
||||
threads: [MAX_THREADS](SysThread),
|
||||
|
||||
client_api_sync_lock: sync.Barrier,
|
||||
client_api_hot_reloaded: b64,
|
||||
client_api_sync_lock: sync.Barrier,
|
||||
|
||||
// Client Module
|
||||
client_api: ModuleAPI,
|
||||
client_memory: State,
|
||||
host_api: Host_API,
|
||||
}
|
||||
|
||||
Host_API :: struct {
|
||||
|
@@ -1,11 +1,25 @@
|
||||
package sectr
|
||||
|
||||
import "core:mem"
|
||||
Odin_Arena :: mem.Arena
|
||||
|
||||
import "core:os"
|
||||
FileTime :: os.File_Time
|
||||
|
||||
import "core:sync"
|
||||
AtomicMutex :: sync.Atomic_Mutex
|
||||
thread_coherent_store :: sync.atomic_store
|
||||
|
||||
import "core:thread"
|
||||
SysThread :: thread.Thread
|
||||
|
||||
import "core:time"
|
||||
Duration :: time.Duration
|
||||
|
||||
import "codebase:grime"
|
||||
Logger :: grime.Logger
|
||||
SpallProfiler :: grime.SpallProfiler
|
||||
|
||||
Kilo :: 1024
|
||||
Mega :: Kilo * 1024
|
||||
Giga :: Mega * 1024
|
||||
|
@@ -1,10 +1,10 @@
|
||||
package sectr
|
||||
|
||||
// This should be the only global on client module side.
|
||||
memory: ^HostMemory
|
||||
memory: ^ProcessMemory
|
||||
|
||||
@(thread_local)
|
||||
thread_memory: ^ThreadMemory
|
||||
thread_ctx: ^ThreadMemory
|
||||
|
||||
THREAD_TICK_LANES :: 2
|
||||
|
||||
|
Reference in New Issue
Block a user