WIP: tick lanes were working, currently bootstrapping the job system.
This commit is contained in:
@@ -17,5 +17,17 @@ when ODIN_OS == .Windows
|
||||
DISABLE_CLIENT_PROFILING :: false
|
||||
DISABLE_HOST_PROFILING :: false
|
||||
|
||||
// Hard constraint for Windows
|
||||
MAX_THREADS :: 64
|
||||
|
||||
// TODO(Ed): We can technically hot-reload this (spin up or down lanes on reloads)
|
||||
THREAD_TICK_LANES :: 2
|
||||
THREAD_TICK_LANES :: 2 // Must be at least one for main thread.
|
||||
THREAD_JOB_WORKERS :: 2 // Must be at least one for latent IO operations.
|
||||
|
||||
/*
|
||||
Job workers are spawned in after tick lanes.
|
||||
Even if the user adjust them at runtme in the future,
|
||||
we'd have all threads drain and respawn them from scratch.
|
||||
*/
|
||||
THREAD_JOB_WORKER_ID_START :: THREAD_TICK_LANES
|
||||
THREAD_JOB_WORKER_ID_END :: (THREAD_TICK_LANES + THREAD_JOB_WORKERS)
|
||||
|
||||
@@ -16,11 +16,12 @@ ModuleAPI :: struct {
|
||||
write_time: FileTime,
|
||||
lib_version : int,
|
||||
|
||||
startup: type_of( startup ),
|
||||
tick_lane_startup: type_of( tick_lane_startup),
|
||||
hot_reload: type_of( hot_reload ),
|
||||
tick_lane: type_of( tick_lane ),
|
||||
clean_frame: type_of( clean_frame),
|
||||
startup: type_of( startup ),
|
||||
tick_lane_startup: type_of( tick_lane_startup),
|
||||
hot_reload: type_of( hot_reload ),
|
||||
tick_lane: type_of( tick_lane ),
|
||||
clean_frame: type_of( clean_frame),
|
||||
jobsys_worker_tick: type_of( jobsys_worker_tick)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -33,6 +34,10 @@ then prepare for multi-threaded "laned" tick: thread_wide_startup.
|
||||
startup :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
|
||||
{
|
||||
memory = host_mem
|
||||
thread = thread_mem
|
||||
grime_set_profiler_module_context(& memory.spall_context)
|
||||
grime_set_profiler_thread_buffer(& thread.spall_buffer)
|
||||
profile(#procedure)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -42,10 +47,13 @@ Threads will eventually return to their tick_lane upon completion.
|
||||
@export
|
||||
hot_reload :: proc(host_mem: ^ProcessMemory, thread_mem: ^ThreadMemory)
|
||||
{
|
||||
profile(#procedure)
|
||||
thread = thread_mem
|
||||
if thread.id == .Master_Prepper {
|
||||
grime_set_profiler_module_context(& memory.spall_context)
|
||||
sync_store(& memory, host_mem, .Release)
|
||||
}
|
||||
grime_set_profiler_thread_buffer(& thread.spall_buffer)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -58,38 +66,75 @@ The lane tick cannot be handled it, its call must be done by the host module.
|
||||
@export
|
||||
tick_lane_startup :: proc(thread_mem: ^ThreadMemory)
|
||||
{
|
||||
thread = thread_mem
|
||||
thread.live_lanes = THREAD_TICK_LANES
|
||||
if thread_mem.id != .Master_Prepper {
|
||||
thread = thread_mem
|
||||
grime_set_profiler_thread_buffer(& thread.spall_buffer)
|
||||
}
|
||||
profile(#procedure)
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
*/
|
||||
@export
|
||||
tick_lane :: proc(host_delta_time_ms: f64, host_delta_ns: Duration) -> (should_close: b64)
|
||||
tick_lane :: proc(host_delta_time_ms: f64, host_delta_ns: Duration) -> (should_close: b64 = false)
|
||||
{
|
||||
profile_begin("sokol_app: pre_client_tick")
|
||||
profile(#procedure)
|
||||
@thread_local dummy: int = 0
|
||||
dummy += 1
|
||||
|
||||
// profile_begin("sokol_app: pre_client_tick")
|
||||
// should_close |= cast(b64) sokol_app.pre_client_frame()
|
||||
profile_end()
|
||||
@static timer: f64
|
||||
if thread.id == .Master_Prepper {
|
||||
timer += host_delta_time_ms
|
||||
sync_store(& should_close, timer > 5, .Release)
|
||||
}
|
||||
// profile_end()
|
||||
|
||||
profile_begin("Client Tick")
|
||||
profile_end()
|
||||
// profile_begin("Client Tick")
|
||||
|
||||
profile_begin("sokol_app: post_client_tick")
|
||||
profile_end()
|
||||
// @thread_local test_job: TestJobInfo
|
||||
// for job_id := 1; job_id < 64; job_id += 1 {
|
||||
// job_dispatch(test_job, & test_job, .Medium, "Job Test")
|
||||
// }
|
||||
|
||||
// profile_end()
|
||||
|
||||
// profile_begin("sokol_app: post_client_tick")
|
||||
// profile_end()
|
||||
|
||||
tick_lane_frametime()
|
||||
return true
|
||||
return sync_load(& should_close, .Acquire)
|
||||
}
|
||||
|
||||
@export
|
||||
jobsys_worker_tick :: proc() {
|
||||
profile("Worker Tick")
|
||||
|
||||
@thread_local dummy: int = 0;
|
||||
dummy += 1
|
||||
}
|
||||
|
||||
TestJobInfo :: struct {
|
||||
id: int,
|
||||
}
|
||||
test_job :: proc(data: rawptr)
|
||||
{
|
||||
profile(#procedure)
|
||||
info := cast(^TestJobInfo) data
|
||||
// log_print_fmt("Test job succeeded: %v", info.id)
|
||||
}
|
||||
|
||||
tick_lane_frametime :: proc()
|
||||
{
|
||||
|
||||
profile(#procedure)
|
||||
}
|
||||
|
||||
@export
|
||||
clean_frame :: proc()
|
||||
{
|
||||
profile(#procedure)
|
||||
if thread.id == .Master_Prepper
|
||||
{
|
||||
|
||||
|
||||
@@ -15,35 +15,40 @@ client_memory however.
|
||||
|
||||
ProcessMemory :: struct {
|
||||
// Host
|
||||
host_persist_buf: [64 * Mega]byte,
|
||||
host_scratch_buf: [32 * Mega]byte,
|
||||
host_persist_buf: [32 * Mega]byte,
|
||||
host_scratch_buf: [64 * Mega]byte,
|
||||
host_persist: Odin_Arena,
|
||||
host_scratch: Odin_Arena,
|
||||
host_api: Host_API,
|
||||
|
||||
// Textual Logging
|
||||
logger: Logger,
|
||||
path_logger_finalized: string,
|
||||
|
||||
// Profiling
|
||||
spall_profiler: ^SpallProfiler,
|
||||
spall_context: Spall_Context,
|
||||
|
||||
// Multi-threading
|
||||
threads: [MAX_THREADS](SysThread),
|
||||
|
||||
client_api_hot_reloaded: b64,
|
||||
client_api_sync_lock: sync.Barrier,
|
||||
threads: [MAX_THREADS](^SysThread),
|
||||
job_system: JobSystemContext,
|
||||
tick_lanes: int,
|
||||
lane_sync: sync.Barrier,
|
||||
job_hot_reload_sync: sync.Barrier, // Used to sync jobs with main thread during hot-reload junction.
|
||||
|
||||
// Client Module
|
||||
client_api_hot_reloaded: b64,
|
||||
client_api: ModuleAPI,
|
||||
client_memory: State,
|
||||
}
|
||||
|
||||
Host_API :: struct {
|
||||
request_virtual_memory: #type proc(),
|
||||
request_virtual_memory: #type proc(),
|
||||
request_virtual_mapped_io: #type proc(),
|
||||
}
|
||||
|
||||
ThreadMemory :: struct {
|
||||
using _: ThreadWorkerContext,
|
||||
live_lanes: int,
|
||||
using _: ThreadWorkerContext,
|
||||
|
||||
spall_buffer_backing: [SPALL_BUFFER_DEFAULT_SIZE * 2]byte,
|
||||
spall_buffer: Spall_Buffer,
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package sectr
|
||||
|
||||
ThreadProc :: #type proc(data: rawptr)
|
||||
|
||||
IgnoredThreads :: bit_set[ 0 ..< 64 ]
|
||||
JobIgnoredThreads :: bit_set[ 0 ..< 64 ]
|
||||
|
||||
JobProc :: #type proc(data: rawptr)
|
||||
|
||||
@@ -11,7 +9,7 @@ JobGroup :: struct {
|
||||
}
|
||||
|
||||
JobPriority :: enum {
|
||||
Medium = 0,
|
||||
Normal = 0,
|
||||
Low,
|
||||
High,
|
||||
}
|
||||
@@ -22,7 +20,7 @@ Job :: struct {
|
||||
data: rawptr,
|
||||
// scratch: ^CArena,
|
||||
group: ^JobGroup,
|
||||
ignored: IgnoredThreads,
|
||||
ignored: JobIgnoredThreads,
|
||||
dbg_lbl: string,
|
||||
}
|
||||
|
||||
@@ -33,11 +31,11 @@ JobList :: struct {
|
||||
|
||||
JobSystemContext :: struct {
|
||||
job_lists: [JobPriority]JobList,
|
||||
worker_cb: ThreadProc,
|
||||
worker_data: rawptr,
|
||||
counter: int,
|
||||
workers: [] ^ThreadWorkerContext,
|
||||
running: b32,
|
||||
// worker_cb: ThreadProc,
|
||||
// worker_data: rawptr,
|
||||
worker_num: int,
|
||||
workers: [THREAD_JOB_WORKERS]^ThreadWorkerContext,
|
||||
running: b32,
|
||||
}
|
||||
|
||||
ThreadWorkerContext :: struct {
|
||||
@@ -188,30 +186,58 @@ WorkerID :: enum int {
|
||||
Zombo_Vistor,
|
||||
}
|
||||
|
||||
// Hard constraint for Windows
|
||||
MAX_THREADS :: 64
|
||||
@(private) div_ceil :: #force_inline proc(a, b: int) -> int { return (a + b - 1) / b }
|
||||
|
||||
/*
|
||||
Threads are setup upfront during the client API's startup.
|
||||
*/
|
||||
|
||||
jobsys_startup :: proc(ctx: ^JobSystemContext, num_workers : int, worker_exec: ThreadProc, worker_data: rawptr) {
|
||||
ctx^ = {
|
||||
worker_cb = worker_exec,
|
||||
worker_data = worker_data,
|
||||
counter = 1,
|
||||
}
|
||||
// Determine number of physical cores
|
||||
// Allocate worker contextes based on number of physical cores - 1 (main thread managed by host included assumed to be index 0)
|
||||
//
|
||||
// num_hw_threads = min(JOB_SYSTEM_MAX_WORKER_THREADS, )
|
||||
// jobsys_worker_make :
|
||||
make_job_raw :: proc(group: ^JobGroup, data: rawptr, cb: JobProc, ignored_threads: JobIgnoredThreads = {}) -> Job {
|
||||
assert(group != nil)
|
||||
assert(cb != nil)
|
||||
return {cb = cb, data = data, group = group}
|
||||
}
|
||||
|
||||
thread_worker_exec :: proc(_: rawptr) {
|
||||
|
||||
job_dispatch :: proc(job: Job, priorty: JobPriority = .Normal) {
|
||||
assert(job.group != nil)
|
||||
// sync_add(& job.group.atomic_counter, 1)
|
||||
// if
|
||||
|
||||
// sync_mutex_lock
|
||||
}
|
||||
|
||||
jobsys_shutdown :: proc(ctx: ^JobSystemContext) {
|
||||
// Note: it's on you to clean up the memory after the jobs if you use a custom allocator.
|
||||
// dispatch :: proc(priority: Priority = .Medium, jobs: ..Job, allocator := context.temp_allocator) -> []Job {
|
||||
// _jobs := make([]Job, len(jobs), allocator)
|
||||
// copy(_jobs, jobs)
|
||||
// dispatch_jobs(priority, _jobs)
|
||||
// return _jobs
|
||||
// }
|
||||
|
||||
// Push jobs to the queue for the given priority.
|
||||
// dispatch_jobs :: proc(priority: Priority, jobs: []Job) {
|
||||
// for &job, i in jobs {
|
||||
// assert(job.group != nil)
|
||||
// intrinsics.atomic_add(&job.group.atomic_counter, 1)
|
||||
// if i < len(jobs) - 1 {
|
||||
// job._next = &jobs[i + 1]
|
||||
// }
|
||||
// }
|
||||
|
||||
// sync.atomic_mutex_lock(&_state.job_lists[priority].mutex)
|
||||
// jobs[len(jobs) - 1]._next = _state.job_lists[priority].head
|
||||
// _state.job_lists[priority].head = &jobs[0]
|
||||
// sync.atomic_mutex_unlock(&_state.job_lists[priority].mutex)
|
||||
// }
|
||||
|
||||
// Block the current thread until all jobs in the group are finished.
|
||||
// Other queued jobs are executed while waiting.
|
||||
// wait :: proc(group: ^Group) {
|
||||
// for !group_is_finished(group) {
|
||||
// try_execute_queued_job()
|
||||
// }
|
||||
// group^ = {}
|
||||
// }
|
||||
|
||||
// Check if all jobs in the group are finished.
|
||||
// @(require_results)
|
||||
// group_is_finished :: #force_inline proc(group: ^Group) -> bool {
|
||||
// return intrinsics.atomic_load(&group.atomic_counter) <= 0
|
||||
// }
|
||||
|
||||
}
|
||||
|
||||
@@ -27,20 +27,29 @@ import "core:os"
|
||||
process_exit :: os.exit
|
||||
|
||||
import "core:prof/spall"
|
||||
SPALL_BUFFER_DEFAULT_SIZE :: spall.BUFFER_DEFAULT_SIZE
|
||||
Spall_Context :: spall.Context
|
||||
Spall_Buffer :: spall.Buffer
|
||||
|
||||
import "core:sync"
|
||||
AtomicMutex :: sync.Atomic_Mutex
|
||||
sync_store :: sync.atomic_store_explicit
|
||||
sync_load :: sync.atomic_load_explicit
|
||||
|
||||
import "core:thread"
|
||||
SysThread :: thread.Thread
|
||||
import threading "core:thread"
|
||||
SysThread :: threading.Thread
|
||||
ThreadProc :: threading.Thread_Proc
|
||||
thread_create :: threading.create
|
||||
thread_start :: threading.start
|
||||
|
||||
import "core:time"
|
||||
Duration :: time.Duration
|
||||
|
||||
import "codebase:grime"
|
||||
Logger :: grime.Logger
|
||||
SpallProfiler :: grime.SpallProfiler
|
||||
Logger :: grime.Logger
|
||||
|
||||
grime_set_profiler_module_context :: grime.set_profiler_module_context
|
||||
grime_set_profiler_thread_buffer :: grime.set_profiler_thread_buffer
|
||||
|
||||
Kilo :: 1024
|
||||
Mega :: Kilo * 1024
|
||||
@@ -77,6 +86,15 @@ log_print_fmt :: proc( fmt : string, args : ..any, level := LoggerLevel.Info, l
|
||||
log.logf( level, fmt, ..args, location = loc )
|
||||
}
|
||||
|
||||
@(deferred_none = profile_end, disabled = DISABLE_CLIENT_PROFILING) profile :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) { spall._buffer_begin( & memory.spall_profiler.ctx, & memory.spall_profiler.buffer, name, "", loc ) }
|
||||
@( disabled = DISABLE_CLIENT_PROFILING) profile_begin :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) { spall._buffer_begin( & memory.spall_profiler.ctx, & memory.spall_profiler.buffer, name, "", loc ) }
|
||||
@( disabled = DISABLE_CLIENT_PROFILING) profile_end :: #force_inline proc "contextless" () { spall._buffer_end ( & memory.spall_profiler.ctx, & memory.spall_profiler.buffer) }
|
||||
@(deferred_none = profile_end, disabled = DISABLE_CLIENT_PROFILING)
|
||||
profile :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & memory.spall_context, & thread.spall_buffer, name, "", loc )
|
||||
}
|
||||
@(disabled = DISABLE_CLIENT_PROFILING)
|
||||
profile_begin :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & memory.spall_context, & thread.spall_buffer, name, "", loc )
|
||||
}
|
||||
@(disabled = DISABLE_CLIENT_PROFILING)
|
||||
profile_end :: #force_inline proc "contextless" () {
|
||||
spall._buffer_end( & memory.spall_context, & thread.spall_buffer)
|
||||
}
|
||||
|
||||
@@ -7,5 +7,4 @@ package sectr
|
||||
//endregion STATIC MEMORy
|
||||
|
||||
State :: struct {
|
||||
job_system: JobSystemContext,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user