more progress on code 2

This commit is contained in:
2025-10-11 21:24:46 -04:00
parent 05e979907a
commit d25757da61
13 changed files with 630 additions and 35 deletions

View File

@@ -0,0 +1,62 @@
package grime
// TODO(Ed): Review when os2 is done.
// TODO(Ed): Make an async option...
file_copy_sync :: proc( path_src, path_dst: string, allocator := context.allocator ) -> b32
{
file_size : i64
{
path_info, result := file_status( path_src, allocator )
if result != OS_ERROR_NONE {
log_fmt("Could not get file info: %v", result, LoggerLevel.Error )
return false
}
file_size = path_info.size
}
src_content, result := file_read_entire( path_src, allocator )
if ! result {
log_fmt( "Failed to read file to copy: %v", path_src, LoggerLevel.Error )
debug_trap()
return false
}
result = file_write_entire( path_dst, src_content, false )
if ! result {
log_fmt( "Failed to copy file: %v", path_dst, LoggerLevel.Error )
debug_trap()
return false
}
return true
}
file_exists :: proc( file_path : string, allocator := context.allocator ) -> b32 {
path_info, result := file_status( file_path, allocator )
if result != OS_ERROR_NONE {
return false
}
return true;
}
file_is_locked :: proc( file_path : string ) -> b32 {
handle, err := file_open(file_path, FS_Open_Readonly)
if err != OS_ERROR_NONE {
// If the error indicates the file is in use, return true.
return true
}
// If the file opens successfully, close it and return false.
file_close(handle)
return false
}
file_rewind :: proc( file : OS_Handle ) {
file_seek( file, 0, 0 )
}
file_read_looped :: proc( file : OS_Handle, data : []byte ) {
total_read, result_code := file_read( file, data )
if result_code == OS_ERROR_HANDLE_EOF {
file_rewind( file )
}
}

160
code2/grime/logger.odin Normal file
View File

@@ -0,0 +1,160 @@
package grime
import core_log "core:log"
Max_Logger_Message_Width :: 160
LoggerEntry :: struct {
text : string,
timestamp : string,
level : string,
location : string,
}
Logger :: struct {
file_path : string,
file : OS_Handle,
id : string,
// varena : VArena,
// entries : Array(LoggerEntry),
}
to_odin_logger :: proc( logger : ^ Logger ) -> Odin_Logger {
return { logger_interface, logger, LoggerLevel.Debug, Default_File_Logger_Opts }
}
logger_init :: proc( logger : ^ Logger, id : string, file_path : string, file := OS_INVALID_HANDLE )
{
if file == OS_INVALID_HANDLE
{
logger_file, result_code := file_open( file_path, FS_Open_Readonly | FS_Open_Create )
assert( result_code == OS_ERROR_NONE, "Log failures are fatal and must never occur at runtime (there is no logging)" )
logger.file = logger_file
file_truncate( file_path, 0 )
}
else {
logger.file = file
}
logger.file_path = file_path
logger.id = id
LOGGER_VARENA_BASE_ADDRESS : uintptr = 2 * Tera
@static vmem_init_counter : uintptr = 0
// alloc_error : AllocatorError
// logger.varena, alloc_error = varena_init(
// LOGGER_VARENA_BASE_ADDRESS + vmem_init_counter * 250 * Megabyte,
// 1 * Megabyte,
// 128 * Kilobyte,
// growth_policy = nil,
// allow_any_resize = true,
// dbg_name = "logger varena",
// enable_mem_tracking = false )
// verify( alloc_error == .None, "Failed to allocate logger's virtual arena")
vmem_init_counter += 1
// TODO(Ed): Figure out another solution here...
// logger.entries, alloc_error = array_init(Array(LoggerEntry), 8192, runtime.heap_allocator())
// verify( alloc_error == .None, "Failed to allocate logger's entries array")
context.logger = { logger_interface, logger, LoggerLevel.Debug, Default_File_Logger_Opts }
log("Initialized Logger")
when false {
log("This sentence is over 80 characters long on purpose to test the ability of this logger to properfly wrap long as logs with a new line and then at the end of that pad it with the appropraite signature.")
}
}
logger_interface :: proc(
logger_data : rawptr,
level : LoggerLevel,
text : string,
options : LoggerOptions,
location := #caller_location )
{
logger := cast(^ Logger) logger_data
@static builder_backing : [16 * Kilo] byte; {
mem_zero( cursor(builder_backing[:]), len(builder_backing) )
}
builder := strbuilder_from_bytes( builder_backing[:] )
first_line_length := len(text) > Max_Logger_Message_Width ? Max_Logger_Message_Width : len(text)
first_line := transmute(string) text[ 0 : first_line_length ]
str_pfmt_builder( & builder, "%s ", first_line )
// str_pfmt_builder( & builder, "%-s ", first_line )
// Signature
{
when TIME_IS_SUPPORTED
{
if Logger_Full_Timestamp_Opts & options != nil {
str_pfmt_builder( & builder, "[")
t := time_now()
year, month, day := time_date(t)
hour, minute, second := time_clock(t)
if .Date in options {
str_pfmt_builder( & builder, "%d-%02d-%02d ", year, month, day )
}
if .Time in options {
str_pfmt_builder( & builder, "%02d:%02d:%02d", hour, minute, second)
}
str_pfmt_builder( & builder, "] ")
}
}
core_log.do_level_header( options, & builder, level )
if logger.id != "" {
str_pfmt_builder( & builder, "[%s] ", logger.id )
}
core_log.do_location_header( options, & builder, location )
}
// Oversized message handling
if len(text) > Max_Logger_Message_Width
{
offset := Max_Logger_Message_Width
bytes := transmute( []u8 ) text
for left := len(bytes) - Max_Logger_Message_Width; left > 0; left -= Max_Logger_Message_Width
{
str_pfmt_builder( & builder, "\n" )
subset_length := len(text) - offset
if subset_length > Max_Logger_Message_Width {
subset_length = Max_Logger_Message_Width
}
subset := slice( cursor(bytes)[offset:], subset_length )
str_pfmt_builder( & builder, "%s", transmute(string) subset )
offset += Max_Logger_Message_Width
}
}
str_pfmt_file_ln( logger.file, to_string(builder) )
}
// This buffer is used below excluisvely to prevent any allocator recusion when verbose logging from allocators.
// This means a single line is limited to 32k buffer (increase naturally if this SOMEHOW becomes a bottleneck...)
Logger_Allocator_Buffer : [32 * Kilo]u8
log :: proc( msg : string, level := LoggerLevel.Info, loc := #caller_location ) {
// TODO(Ed): Finish this
// temp_arena : Arena; arena_init(& temp_arena, Logger_Allocator_Buffer[:])
// context.allocator = arena_allocator(& temp_arena)
// context.temp_allocator = arena_allocator(& temp_arena)
// core_log.log( level, msg, location = loc )
}
log_fmt :: proc( fmt : string, args : ..any, level := LoggerLevel.Info, loc := #caller_location ) {
// TODO(Ed): Finish this
// temp_arena : Arena; arena_init(& temp_arena, Logger_Allocator_Buffer[:])
// context.allocator = arena_allocator(& temp_arena)
// context.temp_allocator = arena_allocator(& temp_arena)
// core_log.logf( level, fmt, ..args, location = loc )
}

View File

@@ -81,6 +81,13 @@ AllocatorQueryFlag :: enum u64 {
Hint_Per_Frame_Temporary,
Hint_Debug_Support,
}
AllocatorError :: enum byte {
None = 0,
Out_Of_Memory = 1,
Invalid_Pointer = 2,
Invalid_Argument = 3,
Mode_Not_Implemented = 4,
}
AllocatorQueryFlags :: bit_set[AllocatorQueryFlag; u64]
AllocatorSP :: struct {
type_sig: AllocatorProc,
@@ -106,7 +113,7 @@ AllocatorProc_Out :: struct {
left: int,
max_alloc: int,
min_alloc: int,
continuity_break: b32,
error: AllocatorError,
}
AllocatorQueryInfo :: struct {
save_point: AllocatorSP,

View File

@@ -12,12 +12,69 @@ import "base:intrinsics"
import "base:runtime"
Assertion_Failure_Proc :: runtime.Assertion_Failure_Proc
Logger :: runtime.Logger
debug_trap :: runtime.debug_trap
Odin_Logger :: runtime.Logger
LoggerLevel :: runtime.Logger_Level
LoggerOptions :: runtime.Logger_Options
Random_Generator :: runtime.Random_Generator
slice_copy_overlapping :: runtime.copy_slice
import fmt_io "core:fmt"
str_pfmt_out :: fmt_io.printf
str_pfmt_tmp :: fmt_io.tprintf
str_pfmt :: fmt_io.aprintf // Decided to make aprintf the default. (It will always be the default allocator)
str_pfmt_builder :: fmt_io.sbprintf
str_pfmt_buffer :: fmt_io.bprintf
str_pfmt_file_ln :: fmt_io.fprintln
str_tmp_from_any :: fmt_io.tprint
import "core:log"
Default_File_Logger_Opts :: log.Default_File_Logger_Opts
Logger_Full_Timestamp_Opts :: log.Full_Timestamp_Opts
import core_os "core:os"
// ODIN_OS :: core_os.ODIN_OS
FS_Open_Readonly :: core_os.O_RDONLY
FS_Open_Writeonly :: core_os.O_WRONLY
FS_Open_Create :: core_os.O_CREATE
FS_Open_Trunc :: core_os.O_TRUNC
OS_ERROR_NONE :: core_os.ERROR_NONE
OS_Handle :: core_os.Handle
OS_ERROR_HANDLE_EOF :: core_os.ERROR_HANDLE_EOF
OS_INVALID_HANDLE :: core_os.INVALID_HANDLE
FileFlag_Create :: core_os.O_CREATE
FileFlag_ReadWrite :: core_os.O_RDWR
FileTime :: core_os.File_Time
file_close :: core_os.close
file_open :: core_os.open
file_read :: core_os.read
file_remove :: core_os.remove
file_seek :: core_os.seek
file_status :: core_os.stat
file_truncate :: core_os.truncate
file_write :: core_os.write
file_read_entire :: core_os.read_entire_file
file_write_entire :: core_os.write_entire_file
import "core:strings"
StrBuilder :: strings.Builder
strbuilder_from_bytes :: strings.builder_from_bytes
import "core:slice"
slice_zero :: slice.zero
import "core:time"
TIME_IS_SUPPORTED :: time.IS_SUPPORTED
time_clock :: time.clock
time_date :: time.date
time_now :: time.now
cursor :: proc {
slice_cursor,
}
to_string :: proc {
strings.to_string,
}

73
code2/host/host.odin Normal file
View File

@@ -0,0 +1,73 @@
package host
// TODO(Ed): Remove this
import "core:mem"
Path_Logs :: "../logs"
when ODIN_OS == .Windows
{
Path_Sectr_Module :: "sectr.dll"
Path_Sectr_Live_Module :: "sectr_live.dll"
Path_Sectr_Debug_Symbols :: "sectr.pdb"
}
// Only static memory host has.
host_memory: HostMemory
@(thread_local)
thread_memory: ThreadMemory
master_prepper_proc :: proc(thread: ^SysThread) {}
main :: proc()
{
// TODO(Ed): Change this
host_scratch: mem.Arena; mem.arena_init(& host_scratch, host_memory.host_scratch[:])
context.allocator = mem.arena_allocator(& host_scratch)
context.temp_allocator = context.allocator
thread_memory.index = .Master_Prepper
thread_id := thread_current_id()
{
using thread_memory
system_ctx.creation_allocator = {}
system_ctx.procedure = master_prepper_proc
when ODIN_OS == .Windows {
// system_ctx.win32_thread = w32_get_current_thread()
// system_ctx.win32_thread_id = w32_get_current_thread_id()
system_ctx.id = cast(int) system_ctx.win32_thread_id
}
}
write_time, result := file_last_write_time_by_name("sectr.dll")
if result != OS_ERROR_NONE {
panic_contextless( "Could not resolve the last write time for sectr")
}
thread_sleep( Millisecond * 100 )
live_file := Path_Sectr_Live_Module
file_copy_sync( Path_Sectr_Module, live_file, allocator = context.temp_allocator )
{
lib, load_result := os_lib_load( live_file )
if ! load_result {
panic( "Failed to load the sectr module." )
}
startup := cast( type_of( host_memory.client_api.startup )) os_lib_get_proc(lib, "startup")
hot_reload := cast( type_of( host_memory.client_api.hot_reload)) os_lib_get_proc(lib, "hot_reload")
if startup == nil do panic("Failed to load sectr.startup symbol" )
if hot_reload == nil do panic("Failed to load sectr.reload symbol" )
host_memory.client_api.lib = lib
host_memory.client_api.startup = startup
host_memory.client_api.hot_reload = hot_reload
}
host_memory.host_api.sync_client_module = sync_client_api
host_memory.client_api.startup(& host_memory, & thread_memory)
}
@export
sync_client_api :: proc()
{
// Fill out detection and reloading of client api.
}

View File

@@ -1,24 +0,0 @@
package host
Path_Logs :: "../logs"
when ODIN_OS == .Windows
{
Path_Sectr_Module :: "sectr.dll"
Path_Sectr_Live_Module :: "sectr_live.dll"
Path_Sectr_Debug_Symbols :: "sectr.pdb"
}
// Only static memory host has.
host_memory: HostMemory
main :: proc()
{
host_memory.host_api.sync_client_module = sync_client_api
host_memory.client_api.startup(& host_memory)
}
@export
sync_client_api :: proc()
{
// Fill out detection and reloading of client api.
}

View File

@@ -14,11 +14,39 @@ import "base:runtime"
// Assertion_Failure_Proc :: runtime.Assertion_Failure_Proc
// Logger :: runtime.Logger
import "core:dynlib"
os_lib_load :: dynlib.load_library
os_lib_unload :: dynlib.unload_library
os_lib_get_proc :: dynlib.symbol_address
import core_os "core:os"
file_last_write_time_by_name :: core_os.last_write_time_by_name
OS_ERROR_NONE :: core_os.ERROR_NONE
import "core:sync"
thread_current_id :: sync.current_thread_id
import "core:time"
Millisecond :: time.Millisecond
Second :: time.Second
Duration :: time.Duration
duration_seconds :: time.duration_seconds
thread_sleep :: time.sleep
import "core:thread"
SysThread :: thread.Thread
import grime "codebase:grime"
file_copy_sync :: grime.file_copy_sync
import "codebase:sectr"
Client_API :: sectr.ModuleAPI
HostMemory :: sectr.HostMemory
Client_API :: sectr.ModuleAPI
HostMemory :: sectr.HostMemory
ThreadMemory :: sectr.ThreadMemory
Kilo :: 1024
Mega :: Kilo * 1024
Giga :: Mega * 1024
Tera :: Giga * 1024

View File

@@ -19,9 +19,10 @@ ModuleAPI :: struct {
StartupContext :: struct {}
@export
startup :: proc(host_mem: ^HostMemory)
startup :: proc(host_mem: ^HostMemory, thread_mem: ^ThreadMemory)
{
dummy : int = 0
dummy += 1
thread_wide_startup()
}
@@ -32,7 +33,7 @@ thread_wide_startup :: proc()
}
@export
hot_reload :: proc()
hot_reload :: proc(host_mem: ^HostMemory, thread_mem: ^ThreadMemory)
{
}

View File

@@ -1,6 +1,8 @@
package sectr
HostMemory :: struct {
host_scratch: [256 * Kilo]byte,
client_api: ModuleAPI,
client_memory: ^State,
host_api: Host_API,
@@ -14,3 +16,7 @@ Host_API :: struct {
sync_client_module : #type proc(),
}
ThreadMemory :: struct {
using _: ThreadWorkerContext,
}

View File

@@ -0,0 +1,217 @@
package sectr
ThreadProc :: #type proc(data: rawptr)
IgnoredThreads :: bit_set[ 0 ..< 64 ]
JobProc :: #type proc(data: rawptr)
JobGroup :: struct {
counter: u64,
}
JobPriority :: enum {
Medium = 0,
Low,
High,
}
Job :: struct {
next: ^Job,
cb: JobProc,
data: rawptr,
// scratch: ^CArena,
group: ^JobGroup,
ignored: IgnoredThreads,
dbg_lbl: string,
}
JobList :: struct {
head: ^Job,
mutex: AtomicMutex,
}
JobSystemContext :: struct {
job_lists: [JobPriority]JobList,
worker_cb: ThreadProc,
worker_data: rawptr,
counter: int,
workers: [] ^ThreadWorkerContext,
running: b32,
}
ThreadWorkerContext :: struct {
system_ctx: Thread,
index: WorkerID,
}
WorkerID :: enum int {
Master_Prepper = 0,
Atomic_Accountant = 1,
Branch_Mispredictor = 2,
Callstack_Canopy = 3,
Deadlock_Daemon = 4,
Fencepost_Fiddler = 5,
Goto_Goon = 6,
Hot_Path_Hitchhiker = 7,
Lock_Free_Liar = 8,
Panic_As_A_Service = 9,
Race_Condition_Gambler = 10,
Spinlock_Spelunker = 11,
Thread_Local_Tourist = 12,
Unattended_Child = 13,
Volatile_Vandal = 14,
While_True_Wanderer = 15,
API_Apologist,
Artifical_Sweetener,
Assertion_Avenger,
Async_Antagonist,
Black_Box_Provider,
Bit_Rot_Repacker,
Big_O_Admirer,
Blitting_Bandit,
Blockchain_Believer,
Blue_Caller,
Blue_Screen_Shopper,
Breakpoint_Bandit,
Buffer_Baron,
Cafecito_Barista,
Callback_Operator,
Cache_Concierge,
Carpe_Datum,
Chief_Synergy_Officer,
Cipher_Clerk,
Conscripted_Camper,
Dean_Of_Misplaced_Delegation,
Dereference_Doctorate,
Checkbox_Validator,
Credible_Threat,
Dead_Drop_Delegate,
Deadline_Denialist,
DMA_Desperado,
Dump_Curator,
Edge_Case_Evangelist,
Exception_Excavator,
Feature_Creeper,
Fitness_Unpacker,
Flop_Flipper,
Floating_Point_Propoganda,
Forgets_To_Check,
Global_Guardian,
Ghost_Protocols,
Halting_Solver,
Handshake_Hypeman,
Headcount_Hoarder,
Heisenbug_Hunter,
Heuristic_Hypnotist,
Hotfix_Hooligan,
Idle_Malware,
Implementation_Detailer,
Interrupt_Ignorer,
Interrupt_Insurgent,
Jank_Jockey,
Jefe_De_Errores,
Kickoff_Holiday,
Kilobyte_Kingpin,
Latency_Lover,
Leeroy_Jenkins,
Legacy_Liaison,
Loop_Lobbyist,
Linter_Lamenter,
Low_Hanging_Fruit_Picker,
Malloc_Maverick,
Malpractice_Mitigator,
Merge_Conflict_Mediator,
Memory_Mangler,
Mañana_Manager,
Minimum_Wage_Multiplexer,
Monad_Masquerader,
NaN_Propagator,
NDA_Negotiator,
Null_Pointer_Enthusiast,
Off_By_One_Offender,
On_Call_Intern,
Onboarding_Overlord,
Overflow_Investor,
Out_Of_Bounds_Outlaw,
Page_Fault_Pioneer,
Patient_Zero_Pollinator,
Payload_Plunderer,
Perpetual_Peon,
Phishing_Pharmacist,
Pipeline_Plumber,
Pointer_Pilgrim,
Production_Pusher,
Query_Gremlin,
Red_Tape_Renderer,
Resting_Receptionist,
Quantum_Quibbler,
Regex_Rancher,
Register_Riveter,
Register_Spill_Rancher,
Roadmap_Revisionist,
Runtime_Ruffian,
Sabbatical_Scheduler,
Scope_Creep_Shepherd,
Shift_Manager,
Segfault_Stretcher,
Siesta_Scheduler,
Singleton_Sinner,
Sleeper_Cell_Spammer,
Spaghetti_Chef,
Speculative_Skeptic,
Stack_Smuggler,
Techdebt_Treasurer,
Tenured_Trapper,
Triage_Technician,
Tunnel_Fisherman,
Undefined_Behavior_Brokerage,
Unreachable_Utopian,
Unicode_Usurper,
Unsafe_Advocate,
Unwind_Understudy,
Voltage_Vampire,
Vibe_Checker,
Virtual_Vagrant,
Void_Voyager,
Waiting_Room_Warden,
Weltschmerz_Worker,
Write_Barrier_Warden,
XORcist,
Yellowpage_Dialer,
Zeroring_Comissioner,
Zero_Cost_Commando,
Zero_Day_Dreamer,
Zombie_Zookeeper,
Zombo_Vistor,
}
// Hard constraint for Windows
JOB_SYSTEM_MAX_WORKER_THREADS :: 64
/*
Threads are setup upfront during the client API's startup.
*/
jobsys_startup :: proc(ctx: ^JobSystemContext, num_workers : int, worker_exec: ThreadProc, worker_data: rawptr) {
ctx^ = {
worker_cb = worker_exec,
worker_data = worker_data,
counter = 1,
}
// Determine number of physical cores
// Allocate worker contextes based on number of physical cores - 1 (main thread managed by host included assumed to be index 0)
//
// num_hw_threads = min(JOB_SYSTEM_MAX_WORKER_THREADS, )
// jobsys_worker_make :
}
thread_worker_exec :: proc(_: rawptr) {
}
jobsys_shutdown :: proc(ctx: ^JobSystemContext) {
}

View File

@@ -1,3 +1,12 @@
package sectr
import "core:sync"
AtomicMutex :: sync.Atomic_Mutex
import "core:thread"
Thread :: thread.Thread
Kilo :: 1024
Mega :: Kilo * 1024
Giga :: Mega * 1024
Tera :: Giga * 1024

View File

@@ -3,7 +3,6 @@ package sectr
// This should be the only global on client module side.
host_memory: ^HostMemory
State :: struct {
job_system: JobSystemContext,
}