WIP: More progress on setting grime back up.

This commit is contained in:
2025-10-16 14:15:26 -04:00
parent 3958fac3e0
commit b4f0806d1b
14 changed files with 358 additions and 61 deletions

24
.gitignore vendored
View File

@@ -8,17 +8,21 @@ build/**
# folders
assets/TX-02-1WN9N6Q8
thirdparty/harfbuzz/**
thirdparty/ini/**
thirdparty/sokol/**
thirdparty/sokol-tools/**
!**/sokol/app/**
!**/sokol/gfx/**
thirdparty/harfbuzz/*
!thirdparty/harfbuzz/harfbuzz.odin
thirdparty/ini/*
thirdparty/sokol/*
!thirdparty/sokol/app/
!thirdparty/sokol/gfx/
!thirdparty/sokol/gp/
thirdparty/sokol-tools/*
thirdparty/stb/*
!thirdparty/stb/truetype/stb_truetype.odin
toolchain/**
!**/Odin/base/**
!**/Odin/core/**
!**/Odin/vendor/**
toolchain/Odin/*
!toolchain/Odin/base
!toolchain/Odin/core
!toolchain/Odin/vendor
# logs
logs

View File

@@ -187,7 +187,7 @@ allocator_query :: proc(ainfo := context.allocator, loc := #caller_location) ->
out: AllocatorQueryInfo; resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Query, loc = loc}, transmute(^AllocatorProc_Out) & out)
return out
}
mem_free_ainfo :: proc(mem: []byte, ainfo: AllocatorInfo, loc := #caller_location) {
mem_free_ainfo :: proc(mem: []byte, ainfo:= context.allocator, loc := #caller_location) {
assert(ainfo.procedure != nil)
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Free, old_allocation = mem, loc = loc}, & {})
}
@@ -205,7 +205,7 @@ mem_save_point :: proc(ainfo := context.allocator, loc := #caller_location) -> A
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .SavePoint, loc = loc}, & out)
return out.save_point
}
mem_alloc :: proc(size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo : $Type = context.allocator, loc := #caller_location) -> []byte {
mem_alloc :: proc(size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo : $Type = context.allocator, loc := #caller_location) -> ([]byte, AllocatorError) {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
@@ -216,7 +216,7 @@ mem_alloc :: proc(size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero:
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
return output.allocation, output.error
}
mem_grow :: proc(mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator, loc := #caller_location) -> []byte {
assert(ainfo.procedure != nil)

View File

@@ -2,6 +2,7 @@ package grime
// Below should be defined per-package
@(disabled = ODIN_DEBUG == false)
ensure :: #force_inline proc( condition : b32, msg : string, location := #caller_location ) {
if condition do return
log_print( msg, LoggerLevel.Warning, location )

View File

@@ -8,21 +8,20 @@ I didn't want to make the HMapZPL impl with the [dynamic] array for now to isola
Update 2024-5-26:
TODO(Ed): Raw_Dynamic_Array is defined within base:runtime/core.odin and exposes what we need for worst case hot-reloads.
So its best to go back to regular dynamic arrays at some point.
Update 2025-5-12:
I can use either... so I'll just keep both
*/
ArrayHeader :: struct ( $ Type : typeid ) {
backing : AllocatorInfo,
dbg_name : string,
capacity : int,
num : int,
data : [^]Type,
ArrayHeader :: struct ($Type: typeid) {
backing: Odin_Allocator,
dbg_name: string,
fixed_cap: b64,
capacity: int,
num: int,
data: [^]Type,
}
Array :: struct ( $ Type : typeid ) {
using header : ^ArrayHeader(Type),
Array :: struct ($Type: typeid) {
using header: ^ArrayHeader(Type),
}
array_underlying_slice :: proc(s: []($ Type)) -> Array(Type) {
@@ -31,28 +30,26 @@ array_underlying_slice :: proc(s: []($ Type)) -> Array(Type) {
array := cursor(to_bytes(s))[ - header_size]
return
}
array_to_slice :: #force_inline proc "contextless" ( using self : Array($ Type) ) -> []Type { return slice( data, int(num)) }
array_to_slice_capacity :: #force_inline proc "contextless" ( using self : Array($ Type) ) -> []Type { return slice( data, int(capacity)) }
array_grow_formula :: proc( value : u64 ) -> u64 {
result := (2 * value) + 8
return result
}
array_grow_formula :: #force_inline proc "contextless" (value: int) -> int { return (2 * value) + 8 }
array_init :: proc( $Array_Type : typeid/Array($Type), capacity : u64,
allocator := context.allocator, fixed_cap : b32 = false, dbg_name : string = ""
) -> ( result : Array(Type), alloc_error : AllocatorError )
//region Lifetime & Memory Resize Operations
array_init :: proc( $Array_Type : typeid / Array($Type), capacity: int,
allocator := context.allocator, fixed_cap: b64 = false, dbg_name: string = ""
) -> (result: Array(Type), alloc_error: AllocatorError)
{
header_size := size_of(ArrayHeader(Type))
array_size := header_size + int(capacity) * size_of(Type)
raw_mem : rawptr
raw_mem, alloc_error = alloc( array_size, allocator = allocator )
raw_mem: []byte
raw_mem, alloc_error = mem_alloc(array_size, ainfo = allocator)
// log( str_fmt_tmp("array reserved: %d", header_size + int(capacity) * size_of(Type) ))
if alloc_error != AllocatorError.None do return
result.header = cast( ^ArrayHeader(Type)) raw_mem
result.header = cast( ^ArrayHeader(Type)) cursor(raw_mem)
result.backing = allocator
result.dbg_name = dbg_name
result.fixed_cap = fixed_cap
@@ -60,3 +57,137 @@ array_init :: proc( $Array_Type : typeid/Array($Type), capacity : u64,
result.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) result.header)[ 1:]
return
}
array_free :: proc(self: Array($Type)) {
free(self.header, backing)
self.data = nil
}
array_grow :: proc(self: ^Array($Type), min_capacity: int) -> AllocatorError {
new_capacity := array_grow_formula(self.capacity)
if new_capacity < min_capacity do new_capacity = min_capacity
return array_set_capacity( self, new_capacity )
}
array_resize :: proc(self: ^Array($Type), num: int) -> AllocatorError {
if array.capacity < num {
grow_result := array_grow( array, array.capacity )
if grow_result != AllocatorError.None do return grow_result
}
array.num = num
return AllocatorError.None
}
array_set_capacity :: proc( self : ^Array( $ Type ), new_capacity: int) -> AllocatorError
{
if new_capacity == self.capacity do return AllocatorError.None
if new_capacity < self.num { self.num = new_capacity; return AllocatorError.None }
header_size :: size_of(ArrayHeader(Type))
new_size := header_size + new_capacity * size_of(Type)
old_size := header_size + self.capacity * size_of(Type)
// TODO(Ed): You were here..
new_mem, result_code := resize_non_zeroed( self.header, old_size, new_size, mem.DEFAULT_ALIGNMENT, allocator = self.backing )
if result_code != AllocatorError.None {
ensure( false, "Failed to allocate for new array capacity" )
log_print( "Failed to allocate for new array capacity", level = LogLevel.Warning )
return result_code
}
if new_mem == nil { ensure(false, "new_mem is nil but no allocation error"); return result_code }
self.header = cast( ^ArrayHeader(Type)) raw_data(new_mem);
self.header.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) self.header)[ 1:]
self.header.capacity = new_capacity
self.header.num = self.num
return result_code
}
//endregion Lifetime & Memory Resize Operations
// Assumes non-overlapping memory for items and appendee
array_append_array :: proc(self: ^Array($Type), other : Array(Type)) -> AllocatorError {
if self.num + other.num > self.capacity {
grow_result := array_grow( self, self.num + other.num )
if grow_result != AllocatorError.None do return grow_result
}
copy_non_overlaping(self.data[self.num:], other.data, other.num)
num += other.num
return AllocatorError.None
}
// Assume non-overlapping memory for items and appendee
array_append_slice :: proc(self : ^Array($Type), items: []Type) -> AllocatorError {
// items_num := u64(len(items))
if num + len(items) > capacity {
grow_result := array_grow(self, num + len(items))
if grow_result != AllocatorError.None do return grow_result
}
copy_non_overlaping(self.data[self.num:], cursor(items), len(items))
num += items_num
return AllocatorError.None
}
array_append_value :: proc(self: ^Array($Type), value: Type) -> AllocatorError {
if self.header.num == self.header.capacity {
grow_result := array_grow( self, self.header.capacity )
if grow_result != AllocatorError.None do return grow_result
}
self.header.data[ self.header.num ] = value
self.header.num += 1
return AllocatorError.None
}
array_append_at_value :: proc(self : ^Array($Type), item: Type, id: int) -> AllocatorError {
ensure(id < self.num, "Why are we doing an append at beyond the bounds of the current element count")
id := id; {
// TODO(Ed): Not sure I want this...
if id >= self.num do id = self.num
if id < 0 do id = 0
}
if self.capacity < self.num + 1 {
grow_result := array_grow( self, self.capacity )
if grow_result != AllocatorError.None do return grow_result
}
// libc.memmove( ptr_offset(target, 1), target, uint(num - id) * size_of(Type) )
copy(self.data[id + 1:], self.data[id], uint(self.num - id) * size_of(Type))
self.data[id] = item
self.num += 1
return AllocatorError.None
}
// Asumes non-overlapping for items.
array_append_at_slice :: proc(self : ^Array($Type ), items: []Type, id: int) -> AllocatorError {
ensure(id < self.num, "Why are we doing an append at beyond the bounds of the current element count")
id := id
if id >= self.num { return array_append_slice(items) }
if len(items) > self.capacity {
grow_result := array_grow( self, self.capacity )
if grow_result != AllocatorError.None do return grow_result
}
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE
ensure(false, "time to check....")
mem_copy (self.data[id + len(items):], self.data[id:], (self.num - id) * size_of(Type))
mem_copy_non_overlaping(self.data[id:], cursor(items), len(items) * size_of(Type) )
self.num += len(items)
return AllocatorError.None
}
array_back :: #force_inline proc "contextless" ( self : Array($Type) ) -> Type { assert(self.num > 0); return self.data[self.num - 1] }
array_clear :: #force_inline proc "contextless" (self: Array($Type), zero_data: b32 = false) {
if zero_data do zero(self.data, int(self.num) * size_of(Type))
self.num = 0
}
array_fill :: proc(self: Array($Type), begin, end: u64, value: Type) -> b32 {
ensure(end - begin <= num)
ensure(end <= num)
if (end - begin > num) || (end > num) do return false
mem_fill(data[begin:], value, end - begin)
return true
}
// Will push value into the array (will not grow if at capacity, use append instead for when that matters)
array_push_back :: #force_inline proc "contextless" (self: Array($Type)) -> b32 {
if self.num == self.capacity { return false }
self.data[self.num] = value
self.num += 1
return true
}
array_remove_at :: proc(self: Array($Type), id: int) {
verify( id < header.num, "Attempted to remove from an index larger than the array" )
mem_copy(self.data[id], self.data[id + 1], (self.num - id) * size_of(Type))
self.num -= 1
}

View File

@@ -56,10 +56,10 @@ kt1cx_init :: proc(info: KT1CX_Info, m: KT1CX_InfoMeta, result: ^KT1CX_Byte) {
assert(m.cell_depth > 0)
assert(m.table_size >= 4 * Kilo)
assert(m.type_width > 0)
table_raw := transmute(SliceByte) mem_alloc(m.table_size * m.cell_size, ainfo = allocator(info.backing_table))
slice_assert(transmute([]byte) table_raw)
table_raw.len = m.table_size
result.table = transmute([]byte) table_raw
table_raw, error := mem_alloc(m.table_size * m.cell_size, ainfo = allocator(info.backing_table))
assert(error == .None); slice_assert(transmute([]byte) table_raw)
(transmute(^SliceByte) & table_raw).len = m.table_size
result.table = table_raw
}
kt1cx_clear :: proc(kt: KT1CX_Byte, m: KT1CX_ByteMeta) {
cell_cursor := cursor(kt.table)
@@ -118,7 +118,7 @@ kt1cx_get :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> ^byte {
}
}
}
kt1cx_set :: proc(kt: KT1CX_Byte, key: u64, value: []byte, backing_cells: AllocatorInfo, m: KT1CX_ByteMeta) -> ^byte {
kt1cx_set :: proc(kt: KT1CX_Byte, key: u64, value: []byte, backing_cells: Odin_Allocator, m: KT1CX_ByteMeta) -> ^byte {
hash_index := kt1cx_slot_id(kt, key, m)
cell_offset := uintptr(hash_index) * uintptr(m.cell_size)
cell_cursor := cursor(kt.table)[cell_offset:] // KT1CX_Cell(Type) cell = kt.table[hash_index]
@@ -145,7 +145,7 @@ kt1cx_set :: proc(kt: KT1CX_Byte, key: u64, value: []byte, backing_cells: Alloca
continue
}
else {
new_cell := mem_alloc(m.cell_size, ainfo = allocator(backing_cells))
new_cell, _ := mem_alloc(m.cell_size, ainfo = backing_cells)
curr_cell.next = raw_data(new_cell)
slot = transmute(^KT1CX_Byte_Slot) cursor(new_cell)[m.slot_key_offset:]
slot.occupied = true

View File

@@ -18,7 +18,7 @@ kt1l_populate_slice_a2_Slice_Byte :: proc(kt: ^[]byte, backing: AllocatorInfo, v
assert(kt != nil)
if num_values == 0 { return }
table_size_bytes := num_values * int(m.slot_size)
kt^ = mem_alloc(table_size_bytes, ainfo = transmute(Odin_Allocator) backing)
kt^, _ = mem_alloc(table_size_bytes, ainfo = transmute(Odin_Allocator) backing)
slice_assert(kt ^)
kt_raw : SliceByte = transmute(SliceByte) kt^
for id in 0 ..< cast(uintptr) num_values {

View File

@@ -2,6 +2,9 @@ package grime
import core_log "core:log"
// TODO(Ed): This logger doesn't support multi-threading.
// TODO(Ed): Look into Lottes's wait-free logger.
Max_Logger_Message_Width :: 160
LoggerEntry :: struct {

View File

@@ -46,6 +46,8 @@ slice_copy :: #force_inline proc "contextless" (dst, src: $SliceType / []$Type)
return n
}
slice_fill :: #force_inline proc "contextless" (s: $SliceType / []$Type, value: Type) { memory_fill(cursor(s), value, len(s)) }
@(require_results) slice_to_bytes :: #force_inline proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] }
@(require_results) slice_raw :: #force_inline proc "contextless" (s: []$Type) -> SliceRaw(Type) { return transmute(SliceRaw(Type)) s }

View File

@@ -0,0 +1,112 @@
/*
This was a tracking allocator made to kill off various bugs left with grime's pool & slab allocators
It doesn't perform that well on a per-frame basis and should be avoided for general memory debugging
It only makes sure that memory allocations don't collide in the allocator and deallocations don't occur for memory never allocated.
I'm keeping it around as an artifact & for future allocators I may make.
*/
package grime
MemoryTrackerEntry :: struct {
start, end : rawptr,
}
MemoryTracker :: struct {
parent : ^MemoryTracker,
name : string,
entries : Array(MemoryTrackerEntry),
}
Track_Memory :: true
@(disabled = Track_Memory == false)
memtracker_clear :: proc (tracker: MemoryTracker) {
log_print_fmt("Clearing tracker: %v", tracker.name)
memtracker_dump_entries(tracker);
array_clear(tracker.entries)
}
@(disabled = Track_Memory == false)
memtracker_init :: proc (tracker: ^MemoryTracker, allocator: Odin_Allocator, num_entries: int, name: string) {
tracker.name = name
error: AllocatorError
tracker.entries, error = make( Array(MemoryTrackerEntry), num_entries, dbg_name = name, allocator = allocator )
if error != AllocatorError.None do fatal("Failed to allocate memory tracker's hashmap");
}
@(disabled = Track_Memory == false)
memtracker_register :: proc(tracker: ^MemoryTracker, new_entry: MemoryTrackerEntry )
{
profile(#procedure)
if tracker.entries.num == tracker.entries.capacity {
ensure(false, "Memory tracker entries array full, can no longer register any more allocations")
return
}
for idx in 0..< tracker.entries.num
{
entry := & tracker.entries.data[idx]
if new_entry.start > entry.start do continue
if (entry.end < new_entry.start) {
msg := str_pfmt("Detected a collision:\nold_entry: %v -> %v\nnew_entry: %v -> %v | %v", entry.start, entry.end, new_entry.start, new_entry.end, tracker.name )
ensure( false, msg )
memtracker_dump_entries(tracker ^)
}
array_append_at(& tracker.entries, new_entry, idx)
log_print_fmt("Registered: %v -> %v | %v", new_entry.start, new_entry.end, tracker.name)
return
}
array_append( & tracker.entries, new_entry )
log_print_fmt("Registered: %v -> %v | %v", new_entry.start, new_entry.end, tracker.name )
}
@(disabled = Track_Memory == false)
memtracker_register_auto_name :: #force_inline proc(tracker: ^MemoryTracker, start, end: rawptr) {
memtracker_register( tracker, {start, end})
}
@(disabled = Track_Memory == false)
memtracker_register_auto_name_slice :: #force_inline proc( tracker : ^MemoryTracker, slice : []byte ) {
memtracker_register( tracker, { raw_data(slice), transmute(rawptr) & cursor(slice)[len(slice) - 1] })
}
@(disabled = Track_Memory == false)
memtracker_unregister :: proc( tracker : MemoryTracker, to_remove : MemoryTrackerEntry )
{
profile(#procedure)
entries := array_to_slice(tracker.entries)
for idx in 0..< tracker.entries.num
{
entry := & entries[idx]
if entry.start == to_remove.start {
if (entry.end == to_remove.end || to_remove.end == nil) {
log_print_fmt("Unregistered: %v -> %v | %v", to_remove.start, to_remove.end, tracker.name );
array_remove_at(tracker.entries, idx)
return
}
ensure(false, str_pfmt_tmp("Found an entry with the same start address but end address was different:\nentry : %v -> %v\nto_remove: %v -> %v | %v", entry.start, entry.end, to_remove.start, to_remove.end, tracker.name ))
memtracker_dump_entries(tracker)
}
}
ensure(false, str_pfmt_tmp("Attempted to unregister an entry that was not tracked: %v -> %v | %v", to_remove.start, to_remove.end, tracker.name))
memtracker_dump_entries(tracker)
}
@(disabled = Track_Memory == false)
memtracker_check_for_collisions :: proc ( tracker : MemoryTracker )
{
profile(#procedure)
// entries := array_to_slice(tracker.entries)
for idx in 1 ..< tracker.entries.num {
// Check to make sure each allocations adjacent entries do not intersect
left := & tracker.entries.data[idx - 1]
right := & tracker.entries.data[idx]
collided := left.start > right.start || left.end > right.end
if collided {
msg := str_pfmt_tmp("Memory tracker detected a collision:\nleft: %v\nright: %v | %v", left, right, tracker.name )
memtracker_dump_entries(tracker)
}
}
}
@(disabled = Track_Memory == false)
memtracker_dump_entries :: proc( tracker : MemoryTracker ) {
log_print( "Dumping Memory Tracker:")
for idx in 0 ..< tracker.entries.num {
entry := & tracker.entries.data[idx]
log_print_fmt("%v -> %v", entry.start, entry.end)
}
}

View File

@@ -50,6 +50,8 @@ import "core:mem"
align_forward_uintptr :: mem.align_backward_uintptr
align_forward_raw :: mem.align_forward
mem_fill :: mem.set
import "core:mem/virtual"
VirtualProtectFlags :: virtual.Protect_Flags
@@ -97,6 +99,7 @@ import "core:prof/spall"
Spall_Buffer :: spall.Buffer
import "core:sync"
Mutex :: sync.Mutex
sync_load :: sync.atomic_load_explicit
sync_store :: sync.atomic_store_explicit
@@ -114,6 +117,18 @@ import "core:unicode/utf8"
runes_to_string :: utf8.runes_to_string
// string_to_runes :: utf8.string_to_runes
array_append :: proc {
array_append_value,
array_append_array,
array_append_slice,
}
array_append_at :: proc {
array_append_at_array,
array_append_at_slice,
array_append_at_value,
}
cursor :: proc {
raw_cursor,
ptr_cursor,
@@ -127,10 +142,6 @@ end :: proc {
string_end,
}
to_string :: proc {
strings.to_string,
}
copy :: proc {
mem_copy,
slice_copy,
@@ -141,11 +152,24 @@ copy_non_overlaping :: proc {
slice_copy_overlapping,
}
fill :: proc {
mem_fill,
slice_fill,
}
make :: proc {
array_init,
}
to_bytes :: proc {
slice_to_bytes,
type_to_bytes,
}
to_string :: proc {
strings.to_string,
}
zero :: proc {
mem_zero,
slice_zero,

View File

@@ -1 +1,25 @@
package grime
/*
So this is a virtual memory backed arena allocator designed
to take advantage of one large contigous reserve of memory.
With the expectation that resizes with its interface will only occur using the last allocated block.
Note(Ed): Odin's mem allocator now has that feature
All virtual address space memory for this application is managed by a virtual arena.
No other part of the program will directly touch the vitual memory interface direclty other than it.
Thus for the scope of this prototype the Virtual Arena are the only interfaces to dynamic address spaces for the runtime of the client app.
The host application as well ideally (although this may not be the case for a while)
*/
VArena_GrowthPolicyProc :: #type proc( commit_used, committed, reserved, requested_size : uint ) -> uint
VArena :: struct {
using vmem: VirtualMemoryRegion,
tracker: MemoryTracker,
dbg_name: string,
commit_used: uint,
growth_policy: VArena_GrowthPolicyProc,
allow_any_resize: b32,
mutex: Mutex,
}

View File

@@ -47,6 +47,7 @@ master_prepper_proc :: proc(thread: ^SysThread) {}
main :: proc()
{
// Setup host arenas
// TODO(Ed): Preferablly I want to eliminate usage of this. We should be able to do almost everything here with fixed allocations..
arena_init(& host_memory.host_persist, host_memory.host_persist_buf[:])
arena_init(& host_memory.host_scratch, host_memory.host_scratch_buf[:])
context.allocator = arena_allocator(& host_memory.host_persist)
@@ -78,8 +79,6 @@ main :: proc()
path_logger_finalized: string
{
profile("Setup the logger")
fmt_backing := make([]byte, 32 * Kilo, allocator = context.temp_allocator);
// Generating the logger's name, it will be used when the app is shutting down.
{
startup_time := time_now()
@@ -89,14 +88,14 @@ main :: proc()
if ! os_is_directory( Path_Logs ) {
os_make_directory( Path_Logs )
}
timestamp := str_pfmt_buffer( fmt_backing, "%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec)
host_memory.path_logger_finalized = str_pfmt_buffer( fmt_backing, "%s/sectr_%v.log", Path_Logs, timestamp)
timestamp := str_pfmt_tmp("%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec)
host_memory.path_logger_finalized = str_pfmt("%s/sectr_%v.log", Path_Logs, timestamp)
}
logger_init( & host_memory.host_logger, "Sectr Host", str_pfmt_buffer( fmt_backing, "%s/sectr.log", Path_Logs ) )
logger_init( & host_memory.host_logger, "Sectr Host", str_pfmt_tmp("%s/sectr.log", Path_Logs))
context.logger = to_odin_logger( & host_memory.host_logger )
{
// Log System Context
builder := strbuilder_from_bytes( fmt_backing )
builder := strbuilder_make_len(16 * Kilo, context.temp_allocator)
str_pfmt_builder( & builder, "Core Count: %v, ", os_core_count() )
str_pfmt_builder( & builder, "Page Size: %v", os_page_size() )
log_print( to_str(builder) )
@@ -145,6 +144,7 @@ main :: proc()
}
barrier_init(& host_memory.lane_job_sync, THREAD_TICK_LANES + THREAD_JOB_WORKERS)
}
free_all(context.temp_allocator)
host_tick_lane()
profile_begin("Host Shutdown")
@@ -156,7 +156,7 @@ main :: proc()
log_print("Succesfuly closed")
file_close( host_memory.host_logger.file )
file_rename( str_pfmt_tmp( "%s/sectr.log", Path_Logs), host_memory.path_logger_finalized )
file_rename( str_pfmt_tmp("%s/sectr.log", Path_Logs), host_memory.path_logger_finalized )
profile_end()
// End profiling
@@ -202,7 +202,6 @@ host_tick_lane :: proc()
delta_ns = time_tick_lap_time( & host_tick )
host_tick = time_tick_now()
// Lanes are synced before doing running check..
sync_client_api()
}
@@ -213,17 +212,13 @@ host_lane_shutdown :: proc()
profile(#procedure)
if thread_memory.id == .Master_Prepper {
jobs_enqueued := true
if jobs_enqueued == false {
// debug_trap()
}
// if jobs_enqueued == false do debug_trap()
for ; jobs_enqueued; {
jobs_enqueued = false
jobs_enqueued |= host_memory.job_system.job_lists[.Normal].head != nil
jobs_enqueued |= host_memory.job_system.job_lists[.Low].head != nil
jobs_enqueued |= host_memory.job_system.job_lists[.High].head != nil
if jobs_enqueued == false {
// debug_trap()
}
// if jobs_enqueued == false do debug_trap()
}
sync_store(& host_memory.job_system.running, false, .Release)
}
@@ -297,7 +292,7 @@ sync_client_api :: proc()
// Wait for pdb to unlock (linker may still be writting)
for ; file_is_locked( Path_Sectr_Debug_Symbols ) && file_is_locked( Path_Sectr_Live_Module ); {}
thread_sleep( Millisecond * 50 )
thread_sleep( Millisecond * 25 )
host_memory.client_api = load_client_api( version_id )
verify( host_memory.client_api.lib_version != 0, "Failed to hot-reload the sectr module" )

View File

@@ -11,6 +11,7 @@ import "core:dynlib"
import "core:fmt"
str_pfmt_builder :: fmt.sbprintf
str_pfmt_buffer :: fmt.bprintf
str_pfmt :: fmt.aprintf
str_pfmt_tmp :: fmt.tprintf
import "core:log"
@@ -44,6 +45,7 @@ import "core:prof/spall"
import "core:strings"
strbuilder_from_bytes :: strings.builder_from_bytes
strbuilder_make_len :: strings.builder_make_len
builder_to_str :: strings.to_string
import "core:sync"

View File

@@ -238,7 +238,6 @@ tick_lane :: proc(host_delta_time_ms: f64, host_delta_ns: Duration) -> (should_c
// log("sokol_app: Event-based frame callback triggered (detected a resize")
// }
}
// Test dispatching 64 jobs during hot_reload loop (when the above store is uncommented)
if true