Mostly reviewing progress so far

Prepping in the background for swapping raylib with sokol.
This commit is contained in:
2024-05-21 23:35:36 -04:00
parent 31bc207c75
commit 405716e52b
19 changed files with 98 additions and 89 deletions

View File

@ -2,7 +2,7 @@
The default arena allocator Odin provides does fragmented resizes even for the last most allocated block getting resized.
This is an alternative to alleviates that.
TODO(Ed): Implement?
TODO(Ed): Implement? Maybe we should trash this I' haven't seen a need to step away from using odin's
*/
package sectr
@ -19,8 +19,6 @@ sub_arena_init :: proc( address : ^byte, size : int ) -> ( ^ Arena) {
return sub_arena
}
// TODO(Ed) : Once this is done (ArenaFixed), rename to just Arena as we're not going to use the core implementation
ArenaFixedHeader :: struct {
data : []byte,
offset : uint,

View File

@ -7,13 +7,6 @@ import "core:c/libc"
import "core:mem"
import "core:slice"
// Array :: struct ( $ Type : typeid ) {
// bakcing : Allocator,
// capacity : u64,
// num : u64,
// data : [^]Type,
// }
ArrayHeader :: struct ( $ Type : typeid ) {
backing : Allocator,
dbg_name : string,
@ -105,6 +98,7 @@ array_append_array :: proc( using self: ^Array( $ Type), other : Array(Type)) ->
// libc.memcpy( ptr_offset(data, num), raw_data(items), len(items) * size_of(Type) )
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE.
ensure(false, "time to check....")
target := ptr_offset( data, num )
copy( slice_ptr(target, int(capacity - num)), array_to_slice(other) )
@ -177,6 +171,7 @@ array_append_at_slice :: proc( using self : ^Array( $ Type ), items : []Type, id
// libc.memcpy ( src, raw_data(items), len(items) * size_of(Type) )
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE
ensure(false, "time to check....")
target := & data[id + len(items)]
dst := slice_ptr( target, num - id - len(items) )
src := slice_ptr( & data[id], num - id )
@ -189,15 +184,15 @@ array_append_at_slice :: proc( using self : ^Array( $ Type ), items : []Type, id
// array_back :: proc( )
array_push_back :: proc( using self : Array( $ Type)) -> b32 {
if num == capacity {
return false
}
// array_push_back :: proc( using self : Array( $ Type)) -> b32 {
// if num == capacity {
// return false
// }
data[ num ] = value
num += 1
return true
}
// data[ num ] = value
// num += 1
// return true
// }
array_clear :: proc "contextless" ( using self : Array( $ Type ), zero_data : b32 = false ) {
if zero_data {

View File

@ -1,12 +1,11 @@
// TODO(Ed) : Move this to a grime package
package sectr
// TODO(Ed): Review these when os2 is done.
import "core:fmt"
import "core:os"
import "base:runtime"
// Test
file_copy_sync :: proc( path_src, path_dst: string, allocator := context.temp_allocator ) -> b32
{
file_size : i64

View File

@ -2,6 +2,11 @@
This is an alternative to Odin's default map type.
The only reason I may need this is due to issues with allocator callbacks or something else going on
with hot-reloads...
---------------------------------------------------------------------------------------------------------
5-21-2024 Update: Still haven't taken the time to see why but just to add the original case for the above
was I believe exclusively when I didn't set the base addresss of vmem
OR when I was attempting to use Casey's brute force replay feature with memory.
---------------------------------------------------------------------------------------------------------
This implementation uses two ZPL-Based Arrays to hold entires and the actual hash table.
Instead of using separate chains, it maintains linked entries within the array.

View File

@ -0,0 +1,131 @@
package sectr
import "base:runtime"
import "core:fmt"
import "core:mem"
import "core:os"
import str "core:strings"
import "core:time"
import core_log "core:log"
Max_Logger_Message_Width :: 180
LogLevel :: core_log.Level
Logger :: struct {
file_path : string,
file : os.Handle,
id : string,
}
to_odin_logger :: proc( logger : ^ Logger ) -> core_log.Logger {
return { logger_interface, logger, core_log.Level.Debug, core_log.Default_File_Logger_Opts }
}
logger_init :: proc( logger : ^ Logger, id : string, file_path : string, file := os.INVALID_HANDLE )
{
if file == os.INVALID_HANDLE
{
logger_file, result_code := file_open( file_path, os.O_RDWR | os.O_CREATE )
if result_code != os.ERROR_NONE {
// Log failures are fatal and must never occur at runtime (there is no logging)
runtime.debug_trap()
os.exit( -1 )
// TODO(Ed) : Figure out the error code enums..
}
logger.file = logger_file
}
else {
logger.file = file
}
logger.file_path = file_path
logger.id = id
context.logger = { logger_interface, logger, core_log.Level.Debug, core_log.Default_File_Logger_Opts }
log("Initialized Logger")
when false {
log("This sentence is over 80 characters long on purpose to test the ability of this logger to properfly wrap long as logs with a new line and then at the end of that pad it with the appropraite signature.")
}
}
logger_interface :: proc(
logger_data : rawptr,
level : core_log.Level,
text : string,
options : core_log.Options,
location := #caller_location )
{
logger := cast(^ Logger) logger_data
@static builder_backing : [16 * Kilobyte] byte; {
mem.set( raw_data( builder_backing[:] ), 0, len(builder_backing) )
}
builder := str.builder_from_bytes( builder_backing[:] )
first_line_length := len(text) > Max_Logger_Message_Width ? Max_Logger_Message_Width : len(text)
first_line := transmute(string) text[ 0 : first_line_length ]
str_fmt_builder( & builder, "%-*s ", Max_Logger_Message_Width, first_line )
// Signature
{
when time.IS_SUPPORTED
{
if core_log.Full_Timestamp_Opts & options != nil {
str_fmt_builder( & builder, "[")
t := time.now()
year, month, day := time.date(t)
hour, minute, second := time.clock(t)
if .Date in options {
str_fmt_builder( & builder, "%d-%02d-%02d ", year, month, day )
}
if .Time in options {
str_fmt_builder( & builder, "%02d:%02d:%02d", hour, minute, second)
}
str_fmt_builder( & builder, "] ")
}
}
core_log.do_level_header( options, level, & builder )
if logger.id != "" {
str_fmt_builder( & builder, "[%s] ", logger.id )
}
core_log.do_location_header( options, & builder, location )
}
// Oversized message handling
if len(text) > Max_Logger_Message_Width
{
offset := Max_Logger_Message_Width
bytes := transmute( []u8 ) text
for left := len(bytes) - Max_Logger_Message_Width; left > 0; left -= Max_Logger_Message_Width
{
str_fmt_builder( & builder, "\n" )
subset_length := len(text) - offset
if subset_length > Max_Logger_Message_Width {
subset_length = Max_Logger_Message_Width
}
subset := slice_ptr( ptr_offset( raw_data(bytes), offset), subset_length )
str_fmt_builder( & builder, "%s", transmute(string) subset )
offset += Max_Logger_Message_Width
}
}
str_to_file_ln( logger.file, to_string(builder) )
}
// TODO(Ed): Use a fixed size block allocation for message formatting used by core_log
// This will prevent stack overflows with the virtual arena debug logs at worst case and not need to do
// some inline arena allocation on-site such as with the memory tracker
log :: proc( msg : string, level := LogLevel.Info, loc := #caller_location ) {
core_log.log( level, msg, location = loc )
}
logf :: proc( fmt : string, args : ..any, level := LogLevel.Info, loc := #caller_location ) {
// context.allocator = transient_allocator()
core_log.logf( level, fmt, ..args, location = loc )
}

View File

@ -39,8 +39,8 @@ str_cache_init :: proc( /*allocator : Allocator*/ ) -> ( cache : StringCache ) {
policy : SlabPolicy
policy_ptr := & policy
// push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 8, alignment })
// push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 16, alignment })
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 8, alignment })
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 16, alignment })
push( policy_ptr, SlabSizeClass { 128 * Kilobyte, 32, alignment })
push( policy_ptr, SlabSizeClass { 128 * Kilobyte, 64, alignment })
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 128, alignment })