Changed codebase to be foldered (breaking compiler's conventions)
I now generate the layout the compiler wants, eventually I'll just have a custom policy so that the compiler can accept the non-idiomatic layout See scripts/build.ps1 & gen_staged_compiler_codebase.ps1 for how this is handled.
This commit is contained in:
62
code/grime/arena.odin
Normal file
62
code/grime/arena.odin
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
The default arena allocator Odin provides does fragmented resizes even for the last most allocated block getting resized.
|
||||
This is an alternative to alleviates that.
|
||||
|
||||
TODO(Ed): Implement?
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "core:mem"
|
||||
|
||||
// Initialize a sub-section of our virtual memory as a sub-arena
|
||||
sub_arena_init :: proc( address : ^byte, size : int ) -> ( ^ Arena) {
|
||||
Arena :: mem.Arena
|
||||
|
||||
arena_size :: size_of( Arena)
|
||||
sub_arena := cast( ^ Arena ) address
|
||||
mem_slice := slice_ptr( ptr_offset( address, arena_size), size )
|
||||
arena_init( sub_arena, mem_slice )
|
||||
return sub_arena
|
||||
}
|
||||
|
||||
// TODO(Ed) : Once this is done (ArenaFixed), rename to just Arena as we're not going to use the core implementation
|
||||
|
||||
ArenaFixedHeader :: struct {
|
||||
data : []byte,
|
||||
offset : uint,
|
||||
peak_used : uint,
|
||||
}
|
||||
|
||||
ArenaFixed :: struct {
|
||||
using header : ^ArenaFixedHeader,
|
||||
}
|
||||
|
||||
arena_fixed_init :: proc( backing : []byte ) -> (arena : ArenaFixed) {
|
||||
header_size := size_of(ArenaFixedHeader)
|
||||
|
||||
verify(len(backing) >= (header_size + Kilobyte), "Attempted to init an arena with less than kilobyte of memory...")
|
||||
|
||||
arena.header = cast(^ArenaFixedHeader) raw_data(backing)
|
||||
using arena.header
|
||||
data_ptr := cast([^]byte) (cast( [^]ArenaFixedHeader) arena.header)[ 1:]
|
||||
data = slice_ptr( data_ptr, len(backing) - header_size )
|
||||
offset = 0
|
||||
peak_used = 0
|
||||
return
|
||||
}
|
||||
|
||||
arena_fixed_allocator_proc :: proc(
|
||||
allocator_data : rawptr,
|
||||
mode : AllocatorMode,
|
||||
size : int,
|
||||
alignment : int,
|
||||
old_memory : rawptr,
|
||||
old_size : int,
|
||||
location := #caller_location
|
||||
) -> ([]byte, AllocatorError)
|
||||
{
|
||||
|
||||
|
||||
return nil, .Out_Of_Memory
|
||||
}
|
||||
|
308
code/grime/array.odin
Normal file
308
code/grime/array.odin
Normal file
@ -0,0 +1,308 @@
|
||||
// Based on gencpp's and thus zpl's Array implementation
|
||||
// Made becasue of the map issue with fonts during hot-reload.
|
||||
// I didn't want to make the HMapZPL impl with the [dynamic] array for now to isolate the hot-reload issue (when I was diagnoising)
|
||||
package sectr
|
||||
|
||||
import "core:c/libc"
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
|
||||
// Array :: struct ( $ Type : typeid ) {
|
||||
// bakcing : Allocator,
|
||||
// capacity : u64,
|
||||
// num : u64,
|
||||
// data : [^]Type,
|
||||
// }
|
||||
|
||||
ArrayHeader :: struct ( $ Type : typeid ) {
|
||||
backing : Allocator,
|
||||
dbg_name : string,
|
||||
fixed_cap : b32,
|
||||
capacity : u64,
|
||||
num : u64,
|
||||
data : [^]Type,
|
||||
}
|
||||
|
||||
Array :: struct ( $ Type : typeid ) {
|
||||
using header : ^ArrayHeader(Type),
|
||||
}
|
||||
|
||||
array_underlying_slice :: proc(slice: []($ Type)) -> Array(Type)
|
||||
{
|
||||
if len(slice) == 0 {
|
||||
return nil
|
||||
}
|
||||
array_size := size_of( Array(Type))
|
||||
raw_data := & slice[0]
|
||||
array_ptr := cast( ^Array(Type)) ( uintptr(first_element_ptr) - uintptr(array_size))
|
||||
return array_ptr ^
|
||||
}
|
||||
|
||||
array_to_slice :: proc( using self : Array($ Type) ) -> []Type {
|
||||
return slice_ptr( data, int(num) )
|
||||
}
|
||||
|
||||
array_to_slice_capacity :: proc( using self : Array($ Type) ) -> []Type {
|
||||
return slice_ptr( data, int(capacity))
|
||||
}
|
||||
|
||||
array_grow_formula :: proc( value : u64 ) -> u64 {
|
||||
result := (2 * value) + 8
|
||||
return result
|
||||
}
|
||||
|
||||
array_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( Array(Type), AllocatorError ) {
|
||||
return array_init_reserve( Type, allocator, array_grow_formula(0) )
|
||||
}
|
||||
|
||||
array_init_reserve :: proc
|
||||
( $ Type : typeid, allocator : Allocator, capacity : u64, fixed_cap : b32 = false, dbg_name : string = "" ) -> ( result : Array(Type), alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := size_of(ArrayHeader(Type))
|
||||
array_size := header_size + int(capacity) * size_of(Type)
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( array_size, allocator = allocator )
|
||||
// log( str_fmt_tmp("array reserved: %d", header_size + int(capacity) * size_of(Type) ))
|
||||
if alloc_error != AllocatorError.None do return
|
||||
|
||||
result.header = cast( ^ArrayHeader(Type)) raw_mem
|
||||
result.backing = allocator
|
||||
// result.dbg_name = dbg_name
|
||||
result.fixed_cap = fixed_cap
|
||||
result.capacity = capacity
|
||||
result.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) result.header)[ 1:]
|
||||
return
|
||||
}
|
||||
|
||||
array_append :: proc( self : ^Array( $ Type), value : Type ) -> AllocatorError
|
||||
{
|
||||
// profile(#procedure)
|
||||
if self.header.num == self.header.capacity
|
||||
{
|
||||
grow_result := array_grow( self, self.header.capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
self.header.data[ self.header.num ] = value
|
||||
self.header.num += 1
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_append_slice :: proc( using self : ^Array( $ Type ), items : []Type ) -> AllocatorError
|
||||
{
|
||||
if num + len(items) > capacity
|
||||
{
|
||||
grow_result := array_grow( self, capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
// Note(Ed) : Original code from gencpp
|
||||
// libc.memcpy( ptr_offset(data, num), raw_data(items), len(items) * size_of(Type) )
|
||||
|
||||
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE.
|
||||
target := ptr_offset( data, num )
|
||||
copy( slice_ptr(target, capacity - num), items )
|
||||
|
||||
num += len(items)
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_append_at :: proc( using self : ^Array( $ Type ), item : Type, id : u64 ) -> AllocatorError
|
||||
{
|
||||
id := id
|
||||
if id >= num {
|
||||
id = num - 1
|
||||
}
|
||||
if id < 0 {
|
||||
id = 0
|
||||
}
|
||||
|
||||
if capacity < num + 1
|
||||
{
|
||||
grow_result := array_grow( self, capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
target := & data[id]
|
||||
libc.memmove( ptr_offset(target, 1), target, uint(num - id) * size_of(Type) )
|
||||
|
||||
data[id] = item
|
||||
num += 1
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_append_at_slice :: proc( using self : ^Array( $ Type ), items : []Type, id : u64 ) -> AllocatorError
|
||||
{
|
||||
id := id
|
||||
if id >= num {
|
||||
return array_append_slice( items )
|
||||
}
|
||||
if len(items) > capacity
|
||||
{
|
||||
grow_result := array_grow( self, capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
// Note(Ed) : Original code from gencpp
|
||||
// target := ptr_offset( data, id + len(items) )
|
||||
// src := ptr_offset( data, id )
|
||||
// libc.memmove( target, src, num - id * size_of(Type) )
|
||||
// libc.memcpy ( src, raw_data(items), len(items) * size_of(Type) )
|
||||
|
||||
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE
|
||||
target := & data[id + len(items)]
|
||||
dst := slice_ptr( target, num - id - len(items) )
|
||||
src := slice_ptr( & data[id], num - id )
|
||||
copy( dst, src )
|
||||
copy( src, items )
|
||||
|
||||
num += len(items)
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
// array_back :: proc( )
|
||||
|
||||
array_push_back :: proc( using self : Array( $ Type)) -> b32 {
|
||||
if num == capacity {
|
||||
return false
|
||||
}
|
||||
|
||||
data[ num ] = value
|
||||
num += 1
|
||||
return true
|
||||
}
|
||||
|
||||
array_clear :: proc "contextless" ( using self : Array( $ Type ), zero_data : b32 = false ) {
|
||||
if zero_data {
|
||||
mem.set( data, 0, int(num * size_of(Type)) )
|
||||
}
|
||||
header.num = 0
|
||||
}
|
||||
|
||||
array_fill :: proc( using self : Array( $ Type ), begin, end : u64, value : Type ) -> b32
|
||||
{
|
||||
if begin < 0 || end >= num {
|
||||
return false
|
||||
}
|
||||
|
||||
// data_slice := slice_ptr( ptr_offset( data, begin ), end - begin )
|
||||
// slice.fill( data_slice, cast(int) value )
|
||||
|
||||
for id := begin; id < end; id += 1 {
|
||||
data[ id ] = value
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
array_free :: proc( using self : Array( $ Type ) ) {
|
||||
free( self.header, backing )
|
||||
self.data = nil
|
||||
}
|
||||
|
||||
array_grow :: proc( using self : ^Array( $ Type ), min_capacity : u64 ) -> AllocatorError
|
||||
{
|
||||
// profile(#procedure)
|
||||
new_capacity := array_grow_formula( capacity )
|
||||
|
||||
if new_capacity < min_capacity {
|
||||
new_capacity = min_capacity
|
||||
}
|
||||
return array_set_capacity( self, new_capacity )
|
||||
}
|
||||
|
||||
array_pop :: proc( using self : Array( $ Type ) ) {
|
||||
verify( num != 0, "Attempted to pop an array with no elements" )
|
||||
num -= 1
|
||||
}
|
||||
|
||||
array_remove_at :: proc( using self : Array( $ Type ), id : u64 )
|
||||
{
|
||||
verify( id < header.num, "Attempted to remove from an index larger than the array" )
|
||||
|
||||
left := & data[id]
|
||||
right := & data[id + 1]
|
||||
libc.memmove( left, right, uint(num - id) * size_of(Type) )
|
||||
|
||||
header.num -= 1
|
||||
}
|
||||
|
||||
array_reserve :: proc( using self : ^Array( $ Type ), new_capacity : u64 ) -> AllocatorError
|
||||
{
|
||||
if capacity < new_capacity {
|
||||
return array_set_capacity( self, new_capacity )
|
||||
}
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_resize :: proc( array : ^Array( $ Type ), num : u64 ) -> AllocatorError
|
||||
{
|
||||
if array.capacity < num
|
||||
{
|
||||
grow_result := array_grow( array, array.capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
array.num = num
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_set_capacity :: proc( self : ^Array( $ Type ), new_capacity : u64 ) -> AllocatorError
|
||||
{
|
||||
if new_capacity == self.capacity {
|
||||
return AllocatorError.None
|
||||
}
|
||||
if new_capacity < self.num {
|
||||
self.num = new_capacity
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
header_size :: size_of(ArrayHeader(Type))
|
||||
|
||||
new_size := header_size + (cast(int) new_capacity ) * size_of(Type)
|
||||
old_size := header_size + (cast(int) self.capacity) * size_of(Type)
|
||||
|
||||
new_mem, result_code := resize_non_zeroed( self.header, old_size, new_size, mem.DEFAULT_ALIGNMENT, allocator = self.backing )
|
||||
|
||||
if result_code != AllocatorError.None {
|
||||
ensure( false, "Failed to allocate for new array capacity" )
|
||||
return result_code
|
||||
}
|
||||
if new_mem == nil {
|
||||
ensure(false, "new_mem is nil but no allocation error")
|
||||
return result_code
|
||||
}
|
||||
|
||||
self.header = cast( ^ArrayHeader(Type)) raw_data(new_mem);
|
||||
self.header.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) self.header)[ 1:]
|
||||
self.header.capacity = new_capacity
|
||||
self.header.num = self.num
|
||||
return result_code
|
||||
}
|
||||
|
||||
array_block_size :: proc "contextless" ( self : Array( $Type ) ) -> u64 {
|
||||
header_size :: size_of(ArrayHeader(Type))
|
||||
block_size := cast(u64) (header_size + self.capacity * size_of(Type))
|
||||
return block_size
|
||||
}
|
||||
|
||||
array_memtracker_entry :: proc( self : Array( $Type ), name : string ) -> MemoryTrackerEntry {
|
||||
header_size :: size_of(ArrayHeader(Type))
|
||||
block_size := cast(uintptr) (header_size + (cast(uintptr) self.capacity) * size_of(Type))
|
||||
|
||||
block_start := transmute(^u8) self.header
|
||||
block_end := ptr_offset( block_start, block_size )
|
||||
|
||||
tracker_entry := MemoryTrackerEntry { name, block_start, block_end }
|
||||
return tracker_entry
|
||||
}
|
62
code/grime/assert.odin
Normal file
62
code/grime/assert.odin
Normal file
@ -0,0 +1,62 @@
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:io"
|
||||
import "core:os"
|
||||
import "core:text/table"
|
||||
|
||||
dump_stacktrace :: proc( allocator := context.temp_allocator ) -> string
|
||||
{
|
||||
trace_result := stacktrace()
|
||||
lines, error := stacktrace_lines( trace_result )
|
||||
|
||||
padding := " "
|
||||
|
||||
log_table := table.init( & table.Table{}, context.temp_allocator, context.temp_allocator )
|
||||
for line in lines {
|
||||
table.row( log_table, padding, line.symbol, " - ", line.location )
|
||||
}
|
||||
table.build(log_table)
|
||||
|
||||
// writer_builder_backing : [Kilobyte * 16] u8
|
||||
// writer_builder := from_bytes( writer_builder_backing[:] )
|
||||
writer_builder : StringBuilder
|
||||
str_builder_init( & writer_builder, allocator = allocator )
|
||||
|
||||
writer := to_writer( & writer_builder )
|
||||
for row in 2 ..< log_table.nr_rows {
|
||||
for col in 0 ..< log_table.nr_cols {
|
||||
table.write_table_cell( writer, log_table, row, col )
|
||||
}
|
||||
io.write_byte( writer, '\n' )
|
||||
}
|
||||
|
||||
return to_string( writer_builder )
|
||||
}
|
||||
|
||||
ensure :: proc( condition : b32, msg : string, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log( msg, LogLevel.Warning, location )
|
||||
runtime.debug_trap()
|
||||
}
|
||||
|
||||
// TODO(Ed) : Setup exit codes!
|
||||
fatal :: proc( msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
log( msg, LogLevel.Fatal, location )
|
||||
runtime.debug_trap()
|
||||
os.exit( exit_code )
|
||||
}
|
||||
|
||||
verify :: proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log( msg, LogLevel.Fatal, location )
|
||||
runtime.debug_trap()
|
||||
os.exit( exit_code )
|
||||
}
|
6
code/grime/context.odin
Normal file
6
code/grime/context.odin
Normal file
@ -0,0 +1,6 @@
|
||||
package sectr
|
||||
|
||||
context_ext :: proc( $ Type : typeid ) -> (^Type) {
|
||||
return cast(^Type) context.user_ptr
|
||||
}
|
||||
|
67
code/grime/filesystem.odin
Normal file
67
code/grime/filesystem.odin
Normal file
@ -0,0 +1,67 @@
|
||||
// TODO(Ed) : Move this to a grime package
|
||||
package sectr
|
||||
|
||||
import "core:fmt"
|
||||
import "core:os"
|
||||
import "base:runtime"
|
||||
|
||||
// Test
|
||||
|
||||
file_copy_sync :: proc( path_src, path_dst: string, allocator := context.temp_allocator ) -> b32
|
||||
{
|
||||
file_size : i64
|
||||
{
|
||||
path_info, result := file_status( path_src, allocator )
|
||||
if result != os.ERROR_NONE {
|
||||
logf("Could not get file info: %v", result, LogLevel.Error )
|
||||
return false
|
||||
}
|
||||
file_size = path_info.size
|
||||
}
|
||||
|
||||
src_content, result := os.read_entire_file( path_src, allocator )
|
||||
if ! result {
|
||||
logf( "Failed to read file to copy: %v", path_src, LogLevel.Error )
|
||||
runtime.debug_trap()
|
||||
return false
|
||||
}
|
||||
|
||||
result = os.write_entire_file( path_dst, src_content, false )
|
||||
if ! result {
|
||||
logf( "Failed to copy file: %v", path_dst, LogLevel.Error )
|
||||
runtime.debug_trap()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
file_exists :: proc( file_path : string ) -> b32 {
|
||||
path_info, result := file_status( file_path, frame_allocator() )
|
||||
if result != os.ERROR_NONE {
|
||||
return false
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
file_is_locked :: proc( file_path : string ) -> b32 {
|
||||
handle, err := file_open(file_path, os.O_RDONLY)
|
||||
if err != os.ERROR_NONE {
|
||||
// If the error indicates the file is in use, return true.
|
||||
return true
|
||||
}
|
||||
|
||||
// If the file opens successfully, close it and return false.
|
||||
file_close(handle)
|
||||
return false
|
||||
}
|
||||
|
||||
file_rewind :: proc( file : os.Handle ) {
|
||||
file_seek( file, 0, 0 )
|
||||
}
|
||||
|
||||
file_read_looped :: proc( file : os.Handle, data : []byte ) {
|
||||
total_read, result_code := file_read( file, data )
|
||||
if result_code == os.ERROR_HANDLE_EOF {
|
||||
file_rewind( file )
|
||||
}
|
||||
}
|
324
code/grime/grime.odin
Normal file
324
code/grime/grime.odin
Normal file
@ -0,0 +1,324 @@
|
||||
|
||||
package sectr
|
||||
|
||||
#region("Import Aliases")
|
||||
|
||||
import "base:builtin"
|
||||
copy :: builtin.copy
|
||||
import "base:intrinsics"
|
||||
mem_zero :: intrinsics.mem_zero
|
||||
ptr_sub :: intrinsics.ptr_sub
|
||||
type_has_field :: intrinsics.type_has_field
|
||||
type_elem_type :: intrinsics.type_elem_type
|
||||
import "base:runtime"
|
||||
Byte :: runtime.Byte
|
||||
Kilobyte :: runtime.Kilobyte
|
||||
Megabyte :: runtime.Megabyte
|
||||
Gigabyte :: runtime.Gigabyte
|
||||
Terabyte :: runtime.Terabyte
|
||||
Petabyte :: runtime.Petabyte
|
||||
Exabyte :: runtime.Exabyte
|
||||
resize_non_zeroed :: runtime.non_zero_mem_resize
|
||||
SourceCodeLocation :: runtime.Source_Code_Location
|
||||
import c "core:c/libc"
|
||||
import "core:dynlib"
|
||||
import "core:hash"
|
||||
crc32 :: hash.crc32
|
||||
import "core:hash/xxhash"
|
||||
xxh32 :: xxhash.XXH32
|
||||
import fmt_io "core:fmt"
|
||||
str_fmt :: fmt_io.printf
|
||||
str_fmt_tmp :: fmt_io.tprintf
|
||||
str_fmt_alloc :: fmt_io.aprintf
|
||||
str_fmt_builder :: fmt_io.sbprintf
|
||||
str_fmt_buffer :: fmt_io.bprintf
|
||||
str_to_file_ln :: fmt_io.fprintln
|
||||
str_tmp_from_any :: fmt_io.tprint
|
||||
import "core:math"
|
||||
import "core:mem"
|
||||
align_forward_int :: mem.align_forward_int
|
||||
align_forward_uint :: mem.align_forward_uint
|
||||
align_forward_uintptr :: mem.align_forward_uintptr
|
||||
Allocator :: mem.Allocator
|
||||
AllocatorError :: mem.Allocator_Error
|
||||
AllocatorMode :: mem.Allocator_Mode
|
||||
AllocatorModeSet :: mem.Allocator_Mode_Set
|
||||
alloc :: mem.alloc
|
||||
alloc_bytes :: mem.alloc_bytes
|
||||
alloc_bytes_non_zeroed :: mem.alloc_bytes_non_zeroed
|
||||
Arena :: mem.Arena
|
||||
arena_allocator :: mem.arena_allocator
|
||||
arena_init :: mem.arena_init
|
||||
byte_slice :: mem.byte_slice
|
||||
copy_non_overlapping :: mem.copy_non_overlapping
|
||||
free :: mem.free
|
||||
is_power_of_two_uintptr :: mem.is_power_of_two
|
||||
ptr_offset :: mem.ptr_offset
|
||||
resize :: mem.resize
|
||||
slice_ptr :: mem.slice_ptr
|
||||
TrackingAllocator :: mem.Tracking_Allocator
|
||||
tracking_allocator :: mem.tracking_allocator
|
||||
tracking_allocator_init :: mem.tracking_allocator_init
|
||||
import "core:mem/virtual"
|
||||
VirtualProtectFlags :: virtual.Protect_Flags
|
||||
// import "core:odin"
|
||||
import "core:os"
|
||||
FileFlag_Create :: os.O_CREATE
|
||||
FileFlag_ReadWrite :: os.O_RDWR
|
||||
FileTime :: os.File_Time
|
||||
file_close :: os.close
|
||||
file_open :: os.open
|
||||
file_read :: os.read
|
||||
file_remove :: os.remove
|
||||
file_seek :: os.seek
|
||||
file_status :: os.stat
|
||||
file_write :: os.write
|
||||
import "core:path/filepath"
|
||||
file_name_from_path :: filepath.short_stem
|
||||
import str "core:strings"
|
||||
StringBuilder :: str.Builder
|
||||
str_builder_from_bytes :: str.builder_from_bytes
|
||||
str_builder_init :: str.builder_init
|
||||
str_builder_to_writer :: str.to_writer
|
||||
str_builder_to_string :: str.to_string
|
||||
import "core:time"
|
||||
Duration :: time.Duration
|
||||
duration_seconds :: time.duration_seconds
|
||||
duration_ms :: time.duration_milliseconds
|
||||
thread_sleep :: time.sleep
|
||||
import "core:unicode"
|
||||
is_white_space :: unicode.is_white_space
|
||||
import "core:unicode/utf8"
|
||||
str_rune_count :: utf8.rune_count_in_string
|
||||
runes_to_string :: utf8.runes_to_string
|
||||
// string_to_runes :: utf8.string_to_runes
|
||||
import "thirdparty:backtrace"
|
||||
StackTraceData :: backtrace.Trace_Const
|
||||
stacktrace :: backtrace.trace
|
||||
stacktrace_lines :: backtrace.lines
|
||||
|
||||
#endregion("Import Aliases")
|
||||
|
||||
#region("Proc overload mappings")
|
||||
|
||||
// This has to be done on a per-module basis.
|
||||
|
||||
add :: proc {
|
||||
add_range2,
|
||||
}
|
||||
|
||||
bivec3 :: proc {
|
||||
bivec3_via_f32s,
|
||||
vec3_to_bivec3,
|
||||
}
|
||||
|
||||
cm_to_pixels :: proc {
|
||||
f32_cm_to_pixels,
|
||||
vec2_cm_to_pixels,
|
||||
range2_cm_to_pixels,
|
||||
}
|
||||
|
||||
regress :: proc {
|
||||
regress_bivec3,
|
||||
}
|
||||
|
||||
cross :: proc {
|
||||
cross_vec3,
|
||||
}
|
||||
|
||||
dot :: proc {
|
||||
dot_vec2,
|
||||
dot_vec3,
|
||||
dot_v3_unitv3,
|
||||
dot_unitv3_vs,
|
||||
}
|
||||
|
||||
ws_view_draw_text :: proc {
|
||||
ws_view_draw_text_string,
|
||||
ws_view_draw_text_StrRunesPair,
|
||||
}
|
||||
|
||||
from_bytes :: proc {
|
||||
str_builder_from_bytes,
|
||||
}
|
||||
|
||||
get_bounds :: proc {
|
||||
view_get_bounds,
|
||||
}
|
||||
|
||||
inverse_mag :: proc {
|
||||
inverse_mag_vec3,
|
||||
// inverse_mag_rotor3,
|
||||
}
|
||||
|
||||
is_power_of_two :: proc {
|
||||
is_power_of_two_u32,
|
||||
is_power_of_two_uintptr,
|
||||
}
|
||||
|
||||
measure_text_size :: proc {
|
||||
measure_text_size_raylib,
|
||||
}
|
||||
|
||||
mov_avg_exp :: proc {
|
||||
mov_avg_exp_f32,
|
||||
mov_avg_exp_f64,
|
||||
}
|
||||
|
||||
pixels_to_cm :: proc {
|
||||
f32_pixels_to_cm,
|
||||
vec2_pixels_to_cm,
|
||||
range2_pixels_to_cm,
|
||||
}
|
||||
|
||||
points_to_pixels :: proc {
|
||||
f32_points_to_pixels,
|
||||
vec2_points_to_pixels,
|
||||
}
|
||||
|
||||
pop :: proc {
|
||||
stack_pop,
|
||||
stack_allocator_pop,
|
||||
}
|
||||
|
||||
pow :: proc{
|
||||
math.pow_f16,
|
||||
math.pow_f16le,
|
||||
math.pow_f16be,
|
||||
math.pow_f32,
|
||||
math.pow_f32le,
|
||||
math.pow_f32be,
|
||||
math.pow_f64,
|
||||
math.pow_f64le,
|
||||
math.pow_f64be,
|
||||
}
|
||||
|
||||
pow2 :: proc {
|
||||
pow2_vec3,
|
||||
}
|
||||
|
||||
pressed :: proc {
|
||||
btn_pressed,
|
||||
}
|
||||
|
||||
push :: proc {
|
||||
stack_push,
|
||||
stack_allocator_push,
|
||||
}
|
||||
|
||||
rotor3 :: proc {
|
||||
rotor3_via_comps,
|
||||
rotor3_via_bv_s,
|
||||
rotor3_via_from_to,
|
||||
}
|
||||
|
||||
released :: proc {
|
||||
btn_released,
|
||||
}
|
||||
|
||||
sqrt :: proc{
|
||||
math.sqrt_f16,
|
||||
math.sqrt_f16le,
|
||||
math.sqrt_f16be,
|
||||
math.sqrt_f32,
|
||||
math.sqrt_f32le,
|
||||
math.sqrt_f32be,
|
||||
math.sqrt_f64,
|
||||
math.sqrt_f64le,
|
||||
math.sqrt_f64be,
|
||||
}
|
||||
|
||||
inverse_sqrt :: proc {
|
||||
inverse_sqrt_f32,
|
||||
}
|
||||
|
||||
sub :: proc {
|
||||
sub_point3,
|
||||
sub_range2,
|
||||
sub_bivec3,
|
||||
}
|
||||
|
||||
to_quat128 :: proc {
|
||||
rotor3_to_quat128,
|
||||
}
|
||||
|
||||
to_rl_rect :: proc {
|
||||
range2_to_rl_rect,
|
||||
}
|
||||
|
||||
to_runes :: proc {
|
||||
string_to_runes,
|
||||
}
|
||||
|
||||
to_string :: proc {
|
||||
runes_to_string,
|
||||
str_builder_to_string,
|
||||
}
|
||||
|
||||
vec3 :: proc {
|
||||
vec3_via_f32s,
|
||||
bivec3_to_vec3,
|
||||
point3_to_vec3,
|
||||
pointflat3_to_vec3,
|
||||
unitvec3_to_vec3,
|
||||
}
|
||||
|
||||
vec4 :: proc {
|
||||
unitvec4_to_vec4,
|
||||
}
|
||||
|
||||
to_writer :: proc {
|
||||
str_builder_to_writer,
|
||||
}
|
||||
|
||||
to_ui_layout_side :: proc {
|
||||
to_ui_layout_side_f32,
|
||||
to_ui_layout_side_vec2,
|
||||
}
|
||||
|
||||
ui_compute_layout :: proc {
|
||||
ui_core_compute_layout,
|
||||
ui_box_compute_layout,
|
||||
}
|
||||
|
||||
ui_floating :: proc {
|
||||
ui_floating_just_builder,
|
||||
ui_floating_with_capture,
|
||||
}
|
||||
|
||||
ui_layout_push :: proc {
|
||||
ui_layout_push_layout,
|
||||
ui_layout_push_theme,
|
||||
}
|
||||
|
||||
ui_layout :: proc {
|
||||
ui_layout_via_layout,
|
||||
ui_layout_via_combo,
|
||||
}
|
||||
|
||||
ui_style_push :: proc {
|
||||
ui_style_push_style,
|
||||
ui_style_push_combo,
|
||||
}
|
||||
|
||||
ui_style :: proc {
|
||||
ui_style_via_style,
|
||||
ui_style_via_combo,
|
||||
}
|
||||
|
||||
ui_theme :: proc {
|
||||
ui_theme_via_layout_style,
|
||||
ui_theme_via_combos,
|
||||
ui_theme_via_theme,
|
||||
}
|
||||
|
||||
wedge :: proc {
|
||||
wedge_vec3,
|
||||
wedge_bivec3,
|
||||
}
|
||||
|
||||
#endregion("Proc overload mappings")
|
||||
|
||||
OS_Type :: type_of(ODIN_OS)
|
||||
|
||||
swap :: #force_inline proc( a, b : ^ $Type ) -> ( ^ Type, ^ Type ) { return b, a }
|
225
code/grime/hashmap_chained.odin
Normal file
225
code/grime/hashmap_chained.odin
Normal file
@ -0,0 +1,225 @@
|
||||
/*
|
||||
Separate chaining hashtable with tombstone (vacancy aware)
|
||||
|
||||
This is an alternative to odin's map and the zpl hashtable I first used for this codebase.
|
||||
I haven't felt the need to go back to dealing with odin's map for my edge case hot reload/memory replay failure.
|
||||
|
||||
So this is a hahstable loosely based at what I saw in the raddbg codebase.
|
||||
It uses a fixed-size lookup table for the base layer of entries that can be chained.
|
||||
Each slot keeps track of its vacancy (tombstone, is occupied).
|
||||
If its occupied a new slot is chained using the fixed bucket-size pool allocator which will have its blocks sized to the type of the table.
|
||||
|
||||
This is ideal for tables have an indeterminate scope for how entires are added,
|
||||
and direct pointers are kept across the codebase instead of a key to the slot.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "core:mem"
|
||||
|
||||
HTable_Minimum_Capacity :: 4 * Kilobyte
|
||||
|
||||
HMapChainedSlot :: struct( $Type : typeid ) {
|
||||
using links : DLL_NodePN(HMapChainedSlot(Type)),
|
||||
value : Type,
|
||||
key : u64,
|
||||
occupied : b32,
|
||||
}
|
||||
|
||||
HMapChained :: struct( $ Type : typeid ) {
|
||||
pool : Pool,
|
||||
lookup : [] ^HMapChainedSlot(Type),
|
||||
}
|
||||
|
||||
HMapChainedPtr :: struct( $ Type : typeid) {
|
||||
using header : ^HMapChained(Type),
|
||||
}
|
||||
|
||||
// Provides the nearest prime number value for the given capacity
|
||||
hmap_closest_prime :: proc( capacity : uint ) -> uint
|
||||
{
|
||||
prime_table : []uint = {
|
||||
53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593,
|
||||
49157, 98317, 196613, 393241, 786433, 1572869, 3145739,
|
||||
6291469, 12582917, 25165843, 50331653, 100663319,
|
||||
201326611, 402653189, 805306457, 1610612741, 3221225473, 6442450941
|
||||
};
|
||||
for slot in prime_table {
|
||||
if slot >= capacity {
|
||||
return slot
|
||||
}
|
||||
}
|
||||
return prime_table[len(prime_table) - 1]
|
||||
}
|
||||
|
||||
hmap_chained_init :: proc( $Type : typeid, lookup_capacity : uint, allocator : Allocator,
|
||||
pool_bucket_cap : uint = 1 * Kilo,
|
||||
pool_bucket_reserve_num : uint = 0,
|
||||
pool_alignment : uint = mem.DEFAULT_ALIGNMENT,
|
||||
dbg_name : string = ""
|
||||
) -> (table : HMapChainedPtr(Type), error : AllocatorError)
|
||||
{
|
||||
header_size := size_of(HMapChained(Type))
|
||||
size := header_size + int(lookup_capacity) * size_of( ^HMapChainedSlot(Type)) + size_of(int)
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, error = alloc( size, allocator = allocator )
|
||||
if error != AllocatorError.None do return
|
||||
|
||||
table.header = cast( ^HMapChained(Type)) raw_mem
|
||||
table.pool, error = pool_init(
|
||||
should_zero_buckets = false,
|
||||
block_size = size_of(HMapChainedSlot(Type)),
|
||||
bucket_capacity = pool_bucket_cap,
|
||||
bucket_reserve_num = pool_bucket_reserve_num,
|
||||
alignment = pool_alignment,
|
||||
allocator = allocator,
|
||||
dbg_name = str_intern(str_fmt_tmp("%v: pool", dbg_name)).str
|
||||
)
|
||||
data := transmute([^] ^HMapChainedSlot(Type)) (transmute( [^]HMapChained(Type)) table.header)[1:]
|
||||
table.lookup = slice_ptr( data, int(lookup_capacity) )
|
||||
return
|
||||
}
|
||||
|
||||
hmap_chained_clear :: proc( using self : HMapChainedPtr($Type))
|
||||
{
|
||||
for slot in lookup
|
||||
{
|
||||
if slot == nil {
|
||||
continue
|
||||
}
|
||||
for probe_slot = slot.next; probe_slot != nil; probe_slot = probe_slot.next {
|
||||
slot.occupied = false
|
||||
}
|
||||
slot.occupied = false
|
||||
}
|
||||
}
|
||||
|
||||
hmap_chained_destroy :: proc( using self : ^HMapChainedPtr($Type)) {
|
||||
pool_destroy( pool )
|
||||
free( self.header, backing)
|
||||
self = nil
|
||||
}
|
||||
|
||||
hmap_chained_lookup_id :: #force_inline proc( using self : HMapChainedPtr($Type), key : u64 ) -> u64
|
||||
{
|
||||
hash_index := key % u64( len(lookup) )
|
||||
return hash_index
|
||||
}
|
||||
|
||||
hmap_chained_get :: proc( using self : HMapChainedPtr($Type), key : u64) -> ^Type
|
||||
{
|
||||
// profile(#procedure)
|
||||
surface_slot := lookup[hmap_chained_lookup_id(self, key)]
|
||||
|
||||
if surface_slot == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if surface_slot.occupied && surface_slot.key == key {
|
||||
return & surface_slot.value
|
||||
}
|
||||
|
||||
for slot := surface_slot.next; slot != nil; slot = slot.next {
|
||||
if slot.occupied && slot.key == key {
|
||||
return & surface_slot.value
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
hmap_chained_reload :: proc( self : HMapChainedPtr($Type), allocator : Allocator )
|
||||
{
|
||||
pool_reload(self.pool, allocator)
|
||||
}
|
||||
|
||||
// Returns true if an slot was actually found and marked as vacant
|
||||
// Entries already found to be vacant will not return true
|
||||
hmap_chained_remove :: proc( self : HMapChainedPtr($Type), key : u64 ) -> b32
|
||||
{
|
||||
surface_slot := lookup[hmap_chained_lookup_id(self, key)]
|
||||
|
||||
if surface_slot == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if surface_slot.occupied && surface_slot.key == key {
|
||||
surface_slot.occupied = false
|
||||
return true
|
||||
}
|
||||
|
||||
for slot := surface_slot.next; slot != nil; slot.next
|
||||
{
|
||||
if slot.occupied && slot.key == key {
|
||||
slot.occupied = false
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Sets the value to a vacant slot
|
||||
// Will preemptively allocate the next slot in the hashtable if its null for the slot.
|
||||
hmap_chained_set :: proc( using self : HMapChainedPtr($Type), key : u64, value : Type ) -> (^ Type, AllocatorError)
|
||||
{
|
||||
// profile(#procedure)
|
||||
hash_index := hmap_chained_lookup_id(self, key)
|
||||
surface_slot := lookup[hash_index]
|
||||
set_slot :: #force_inline proc( using self : HMapChainedPtr(Type),
|
||||
slot : ^HMapChainedSlot(Type),
|
||||
key : u64,
|
||||
value : Type
|
||||
) -> (^ Type, AllocatorError )
|
||||
{
|
||||
error := AllocatorError.None
|
||||
if slot.next == nil {
|
||||
block : []byte
|
||||
block, error = pool_grab(pool)
|
||||
next := transmute( ^HMapChainedSlot(Type)) & block[0]
|
||||
slot.next = next
|
||||
next.prev = slot
|
||||
}
|
||||
slot.key = key
|
||||
slot.value = value
|
||||
slot.occupied = true
|
||||
return & slot.value, error
|
||||
}
|
||||
|
||||
if surface_slot == nil {
|
||||
block, error := pool_grab(pool)
|
||||
surface_slot := transmute( ^HMapChainedSlot(Type)) & block[0]
|
||||
surface_slot.key = key
|
||||
surface_slot.value = value
|
||||
surface_slot.occupied = true
|
||||
if error != AllocatorError.None {
|
||||
ensure(error != AllocatorError.None, "Allocation failure for chained slot in hash table")
|
||||
return nil, error
|
||||
}
|
||||
lookup[hash_index] = surface_slot
|
||||
|
||||
block, error = pool_grab(pool)
|
||||
next := transmute( ^HMapChainedSlot(Type)) & block[0]
|
||||
surface_slot.next = next
|
||||
next.prev = surface_slot
|
||||
return & surface_slot.value, error
|
||||
}
|
||||
|
||||
if ! surface_slot.occupied
|
||||
{
|
||||
result, error := set_slot( self, surface_slot, key, value)
|
||||
return result, error
|
||||
}
|
||||
|
||||
slot := surface_slot.next
|
||||
for ; slot != nil; slot = slot.next
|
||||
{
|
||||
if !slot.occupied
|
||||
{
|
||||
result, error := set_slot( self, surface_slot, key, value)
|
||||
return result, error
|
||||
}
|
||||
}
|
||||
ensure(false, "Somehow got to a null slot that wasn't preemptively allocated from a previus set")
|
||||
return nil, AllocatorError.None
|
||||
}
|
274
code/grime/hashmap_zpl.odin
Normal file
274
code/grime/hashmap_zpl.odin
Normal file
@ -0,0 +1,274 @@
|
||||
/*
|
||||
This is an alternative to Odin's default map type.
|
||||
The only reason I may need this is due to issues with allocator callbacks or something else going on
|
||||
with hot-reloads...
|
||||
|
||||
This implementation uses two ZPL-Based Arrays to hold entires and the actual hash table.
|
||||
Instead of using separate chains, it maintains linked entries within the array.
|
||||
Each entry contains a next field, which is an index pointing to the next entry in the same array.
|
||||
|
||||
Growing this hashtable is destructive, so it should usually be kept to a fixed-size unless
|
||||
the populating operations only occur in one place and from then on its read-only.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "core:slice"
|
||||
|
||||
// Note(Ed) : See core:hash for hasing procs.
|
||||
|
||||
HMapZPL_MapProc :: #type proc( $ Type : typeid, key : u64, value : Type )
|
||||
HMapZPL_MapMutProc :: #type proc( $ Type : typeid, key : u64, value : ^ Type )
|
||||
|
||||
HMapZPL_CritialLoadScale :: 0.70
|
||||
HMapZPL_HashToEntryRatio :: 1.50
|
||||
|
||||
HMapZPL_FindResult :: struct {
|
||||
hash_index : i64,
|
||||
prev_index : i64,
|
||||
entry_index : i64,
|
||||
}
|
||||
|
||||
HMapZPL_Entry :: struct ( $ Type : typeid) {
|
||||
key : u64,
|
||||
next : i64,
|
||||
value : Type,
|
||||
}
|
||||
|
||||
HMapZPL :: struct ( $ Type : typeid ) {
|
||||
table : Array( i64 ),
|
||||
entries : Array( HMapZPL_Entry(Type) ),
|
||||
}
|
||||
|
||||
zpl_hmap_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( HMapZPL( Type), AllocatorError ) {
|
||||
return zpl_hmap_init_reserve( Type, allocator )
|
||||
}
|
||||
|
||||
zpl_hmap_init_reserve :: proc
|
||||
( $ Type : typeid, allocator : Allocator, num : u64, dbg_name : string = "" ) -> ( HMapZPL( Type), AllocatorError )
|
||||
{
|
||||
result : HMapZPL(Type)
|
||||
table_result, entries_result : AllocatorError
|
||||
|
||||
result.table, table_result = array_init_reserve( i64, allocator, num, dbg_name = dbg_name )
|
||||
if table_result != AllocatorError.None {
|
||||
ensure( false, "Failed to allocate table array" )
|
||||
return result, table_result
|
||||
}
|
||||
array_resize( & result.table, num )
|
||||
slice.fill( slice_ptr( result.table.data, cast(int) result.table.num), -1 )
|
||||
|
||||
result.entries, entries_result = array_init_reserve( HMapZPL_Entry(Type), allocator, num, dbg_name = dbg_name )
|
||||
if entries_result != AllocatorError.None {
|
||||
ensure( false, "Failed to allocate entries array" )
|
||||
return result, entries_result
|
||||
}
|
||||
return result, AllocatorError.None
|
||||
}
|
||||
|
||||
zpl_hmap_clear :: proc( using self : ^ HMapZPL( $ Type ) ) {
|
||||
for id := 0; id < table.num; id += 1 {
|
||||
table[id] = -1
|
||||
}
|
||||
|
||||
array_clear( table )
|
||||
array_clear( entries )
|
||||
}
|
||||
|
||||
zpl_hmap_destroy :: proc( using self : ^ HMapZPL( $ Type ) ) {
|
||||
if table.data != nil && table.capacity > 0 {
|
||||
array_free( table )
|
||||
array_free( entries )
|
||||
}
|
||||
}
|
||||
|
||||
zpl_hmap_get :: proc ( using self : ^ HMapZPL( $ Type ), key : u64 ) -> ^ Type
|
||||
{
|
||||
// profile(#procedure)
|
||||
id := zpl_hmap_find( self, key ).entry_index
|
||||
if id >= 0 {
|
||||
return & entries.data[id].value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
zpl_hmap_map :: proc( using self : ^ HMapZPL( $ Type), map_proc : HMapZPL_MapProc ) {
|
||||
ensure( map_proc != nil, "Mapping procedure must not be null" )
|
||||
for id := 0; id < entries.num; id += 1 {
|
||||
map_proc( Type, entries[id].key, entries[id].value )
|
||||
}
|
||||
}
|
||||
|
||||
zpl_hmap_map_mut :: proc( using self : ^ HMapZPL( $ Type), map_proc : HMapZPL_MapMutProc ) {
|
||||
ensure( map_proc != nil, "Mapping procedure must not be null" )
|
||||
for id := 0; id < entries.num; id += 1 {
|
||||
map_proc( Type, entries[id].key, & entries[id].value )
|
||||
}
|
||||
}
|
||||
|
||||
zpl_hmap_grow :: proc( using self : ^ HMapZPL( $ Type ) ) -> AllocatorError {
|
||||
new_num := array_grow_formula( entries.num )
|
||||
return zpl_hmap_rehash( self, new_num )
|
||||
}
|
||||
|
||||
zpl_hmap_rehash :: proc( ht : ^ HMapZPL( $ Type ), new_num : u64 ) -> AllocatorError
|
||||
{
|
||||
profile(#procedure)
|
||||
// For now the prototype should never allow this to happen.
|
||||
ensure( false, "ZPL HMAP IS REHASHING" )
|
||||
last_added_index : i64
|
||||
|
||||
new_ht, init_result := zpl_hmap_init_reserve( Type, ht.table.backing, new_num, ht.table.dbg_name )
|
||||
if init_result != AllocatorError.None {
|
||||
ensure( false, "New zpl_hmap failed to allocate" )
|
||||
return init_result
|
||||
}
|
||||
|
||||
for id : u64 = 0; id < ht.entries.num; id += 1 {
|
||||
find_result : HMapZPL_FindResult
|
||||
|
||||
entry := & ht.entries.data[id]
|
||||
find_result = zpl_hmap_find( & new_ht, entry.key )
|
||||
last_added_index = zpl_hmap_add_entry( & new_ht, entry.key )
|
||||
|
||||
if find_result.prev_index < 0 {
|
||||
new_ht.table.data[ find_result.hash_index ] = last_added_index
|
||||
}
|
||||
else {
|
||||
new_ht.entries.data[ find_result.prev_index ].next = last_added_index
|
||||
}
|
||||
|
||||
new_ht.entries.data[ last_added_index ].next = find_result.entry_index
|
||||
new_ht.entries.data[ last_added_index ].value = entry.value
|
||||
}
|
||||
|
||||
zpl_hmap_destroy( ht )
|
||||
|
||||
(ht ^) = new_ht
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
zpl_hmap_rehash_fast :: proc( using self : ^ HMapZPL( $ Type ) )
|
||||
{
|
||||
for id := 0; id < entries.num; id += 1 {
|
||||
entries[id].Next = -1;
|
||||
}
|
||||
for id := 0; id < table.num; id += 1 {
|
||||
table[id] = -1
|
||||
}
|
||||
for id := 0; id < entries.num; id += 1 {
|
||||
entry := & entries[id]
|
||||
find_result := zpl_hmap_find( entry.key )
|
||||
|
||||
if find_result.prev_index < 0 {
|
||||
table[ find_result.hash_index ] = id
|
||||
}
|
||||
else {
|
||||
entries[ find_result.prev_index ].next = id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Used when the address space of the allocator changes and the backing reference must be updated
|
||||
zpl_hmap_reload :: proc( using self : ^HMapZPL($Type), new_backing : Allocator ) {
|
||||
table.backing = new_backing
|
||||
entries.backing = new_backing
|
||||
}
|
||||
|
||||
zpl_hmap_remove :: proc( self : ^ HMapZPL( $ Type ), key : u64 ) {
|
||||
find_result := zpl_hmap_find( key )
|
||||
|
||||
if find_result.entry_index >= 0 {
|
||||
array_remove_at( & entries, find_result.entry_index )
|
||||
zpl_hmap_rehash_fast( self )
|
||||
}
|
||||
}
|
||||
|
||||
zpl_hmap_remove_entry :: proc( using self : ^ HMapZPL( $ Type ), id : i64 ) {
|
||||
array_remove_at( & entries, id )
|
||||
}
|
||||
|
||||
zpl_hmap_set :: proc( using self : ^ HMapZPL( $ Type), key : u64, value : Type ) -> (^ Type, AllocatorError)
|
||||
{
|
||||
// profile(#procedure)
|
||||
id : i64 = 0
|
||||
find_result : HMapZPL_FindResult
|
||||
|
||||
if zpl_hmap_full( self )
|
||||
{
|
||||
grow_result := zpl_hmap_grow( self )
|
||||
if grow_result != AllocatorError.None {
|
||||
return nil, grow_result
|
||||
}
|
||||
}
|
||||
|
||||
find_result = zpl_hmap_find( self, key )
|
||||
if find_result.entry_index >= 0 {
|
||||
id = find_result.entry_index
|
||||
}
|
||||
else
|
||||
{
|
||||
id = zpl_hmap_add_entry( self, key )
|
||||
if find_result.prev_index >= 0 {
|
||||
entries.data[ find_result.prev_index ].next = id
|
||||
}
|
||||
else {
|
||||
table.data[ find_result.hash_index ] = id
|
||||
}
|
||||
}
|
||||
|
||||
entries.data[id].value = value
|
||||
|
||||
if zpl_hmap_full( self ) {
|
||||
alloc_error := zpl_hmap_grow( self )
|
||||
return & entries.data[id].value, alloc_error
|
||||
}
|
||||
|
||||
return & entries.data[id].value, AllocatorError.None
|
||||
}
|
||||
|
||||
zpl_hmap_slot :: proc( using self : ^ HMapZPL( $ Type), key : u64 ) -> i64 {
|
||||
for id : i64 = 0; id < table.num; id += 1 {
|
||||
if table.data[id] == key {
|
||||
return id
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
zpl_hmap_add_entry :: proc( using self : ^ HMapZPL( $ Type), key : u64 ) -> i64 {
|
||||
entry : HMapZPL_Entry(Type) = { key, -1, {} }
|
||||
id := cast(i64) entries.num
|
||||
array_append( & entries, entry )
|
||||
return id
|
||||
}
|
||||
|
||||
zpl_hmap_find :: proc( using self : ^ HMapZPL( $ Type), key : u64 ) -> HMapZPL_FindResult
|
||||
{
|
||||
// profile(#procedure)
|
||||
result : HMapZPL_FindResult = { -1, -1, -1 }
|
||||
|
||||
if table.num > 0 {
|
||||
result.hash_index = cast(i64)( key % table.num )
|
||||
result.entry_index = table.data[ result.hash_index ]
|
||||
|
||||
verify( result.entry_index < i64(entries.num), "Entry index is larger than the number of entries" )
|
||||
|
||||
for ; result.entry_index >= 0; {
|
||||
entry := & entries.data[ result.entry_index ]
|
||||
if entry.key == key {
|
||||
break
|
||||
}
|
||||
|
||||
result.prev_index = result.entry_index
|
||||
result.entry_index = entry.next
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
zpl_hmap_full :: proc( using self : ^ HMapZPL( $ Type) ) -> b32 {
|
||||
critical_load := u64(HMapZPL_CritialLoadScale * cast(f64) table.num)
|
||||
result : b32 = entries.num > critical_load
|
||||
return result
|
||||
}
|
190
code/grime/linked_list.odin
Normal file
190
code/grime/linked_list.odin
Normal file
@ -0,0 +1,190 @@
|
||||
package sectr
|
||||
|
||||
LL_Node :: struct ( $ Type : typeid ) {
|
||||
next : ^Type,
|
||||
}
|
||||
|
||||
// ll_push :: proc( list_ptr : ^(^ ($ Type)), node : ^Type ) {
|
||||
ll_push :: #force_inline proc "contextless" ( list_ptr : ^(^ ($ Type)), node : ^Type ) {
|
||||
list : ^Type = (list_ptr^)
|
||||
node.next = list
|
||||
(list_ptr^) = node
|
||||
}
|
||||
|
||||
ll_pop :: #force_inline proc "contextless" ( list_ptr : ^(^ ($ Type)) ) -> ( node : ^Type ) {
|
||||
list : ^Type = (list_ptr^)
|
||||
(list_ptr^) = list.next
|
||||
return list
|
||||
}
|
||||
|
||||
//region Intrusive Doubly-Linked-List
|
||||
|
||||
DLL_Node :: struct ( $ Type : typeid ) #raw_union {
|
||||
using _ : struct {
|
||||
left, right : ^Type,
|
||||
},
|
||||
using _ : struct {
|
||||
prev, next : ^Type,
|
||||
},
|
||||
using _ : struct {
|
||||
first, last : ^Type,
|
||||
},
|
||||
using _ : struct {
|
||||
bottom, top : ^Type,
|
||||
}
|
||||
}
|
||||
|
||||
DLL_NodeFull :: struct ( $ Type : typeid ) {
|
||||
// using _ : DLL_NodeFL(Type),
|
||||
first, last : ^Type,
|
||||
prev, next : ^Type,
|
||||
}
|
||||
|
||||
DLL_NodePN :: struct ( $ Type : typeid ) {
|
||||
// using _ : struct {
|
||||
prev, next : ^Type,
|
||||
// },
|
||||
// using _ : struct {
|
||||
// left, right : ^Type,
|
||||
// },
|
||||
}
|
||||
|
||||
DLL_NodeFL :: struct ( $ Type : typeid ) {
|
||||
// using _ : struct {
|
||||
first, last : ^Type,
|
||||
// },
|
||||
|
||||
// TODO(Ed): Review this
|
||||
// using _ : struct {
|
||||
// bottom, top: ^Type,
|
||||
// },
|
||||
}
|
||||
|
||||
type_is_node :: #force_inline proc "contextless" ( $ Type : typeid ) -> bool
|
||||
{
|
||||
// elem_type := type_elem_type(Type)
|
||||
return type_has_field( type_elem_type(Type), "prev" ) && type_has_field( type_elem_type(Type), "next" )
|
||||
}
|
||||
|
||||
// First/Last append
|
||||
dll_fl_append :: proc ( list : ^( $TypeList), node : ^( $TypeNode) )
|
||||
{
|
||||
if list.first == nil {
|
||||
list.first = node
|
||||
list.last = node
|
||||
}
|
||||
else {
|
||||
list.last = node
|
||||
}
|
||||
}
|
||||
|
||||
dll_push_back :: proc "contextless" ( current_ptr : ^(^ ($ TypeCurr)), node : ^$TypeNode )
|
||||
{
|
||||
current : ^TypeCurr = (current_ptr ^)
|
||||
|
||||
if current == nil
|
||||
{
|
||||
(current_ptr ^) = node
|
||||
node.prev = nil
|
||||
}
|
||||
else
|
||||
{
|
||||
node.prev = current
|
||||
(current_ptr^) = node
|
||||
current.next = node
|
||||
}
|
||||
|
||||
node.next = nil
|
||||
}
|
||||
|
||||
dll_pn_pop :: proc "contextless" ( node : ^$Type )
|
||||
{
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
if node.prev != nil {
|
||||
node.prev.next = nil
|
||||
node.prev = nil
|
||||
}
|
||||
if node.next != nil {
|
||||
node.next.prev = nil
|
||||
node.next = nil
|
||||
}
|
||||
}
|
||||
|
||||
dll_pop_back :: #force_inline proc "contextless" ( current_ptr : ^(^ ($ Type)) )
|
||||
{
|
||||
to_remove : ^Type = (current_ptr ^)
|
||||
if to_remove == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if to_remove.prev == nil {
|
||||
(current_ptr ^) = nil
|
||||
}
|
||||
else {
|
||||
(current_ptr ^) = to_remove.prev
|
||||
(current_ptr ^).next = nil
|
||||
}
|
||||
}
|
||||
|
||||
dll_full_insert_raw :: proc "contextless" ( null : ^($ Type), parent : ^$ParentType, pos, node : ^Type )
|
||||
{
|
||||
if parent.first == null {
|
||||
parent.first = node
|
||||
parent.last = node
|
||||
node.next = null
|
||||
node.prev = null
|
||||
}
|
||||
else if pos == null {
|
||||
// Position is not set, insert at beginning
|
||||
node.next = parent.first
|
||||
parent.first.prev = node
|
||||
parent.first = node
|
||||
node.prev = null
|
||||
}
|
||||
else if pos == parent.last {
|
||||
// Positin is set to last, insert at end
|
||||
parent.last.next = node
|
||||
node.prev = parent.last
|
||||
parent.last = node
|
||||
node.next = null
|
||||
}
|
||||
else
|
||||
{
|
||||
if pos.next != null {
|
||||
pos.next.prev = node
|
||||
}
|
||||
node.next = pos.next
|
||||
pos.next = node
|
||||
node.prev = pos
|
||||
}
|
||||
}
|
||||
|
||||
dll_full_pop :: proc "contextless" ( node : ^$NodeType, parent : ^$ParentType ) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
if parent.first == node {
|
||||
parent.first = node.next
|
||||
}
|
||||
if parent.last == node {
|
||||
parent.last = node.prev
|
||||
}
|
||||
prev := node.prev
|
||||
next := node.next
|
||||
if prev != nil {
|
||||
prev.next = next
|
||||
node.prev = nil
|
||||
}
|
||||
if next != nil {
|
||||
next.prev = prev
|
||||
node.next = nil
|
||||
}
|
||||
}
|
||||
|
||||
dll_full_push_back :: proc "contextless" ( parent : ^$ParentType, node : ^$Type, null : ^Type ) {
|
||||
dll_full_insert_raw( null, parent, parent.last, node )
|
||||
}
|
||||
|
||||
//endregion Intrusive Doubly-Linked-List
|
91
code/grime/memory.odin
Normal file
91
code/grime/memory.odin
Normal file
@ -0,0 +1,91 @@
|
||||
// TODO(Ed) : Move this to a grime package problably
|
||||
package sectr
|
||||
|
||||
import "core:fmt"
|
||||
import "core:mem"
|
||||
import "core:mem/virtual"
|
||||
import "base:runtime"
|
||||
import "core:os"
|
||||
|
||||
kilobytes :: #force_inline proc "contextless" ( kb : $ integer_type ) -> integer_type {
|
||||
return kb * Kilobyte
|
||||
}
|
||||
megabytes :: #force_inline proc "contextless" ( mb : $ integer_type ) -> integer_type {
|
||||
return mb * Megabyte
|
||||
}
|
||||
gigabytes :: #force_inline proc "contextless" ( gb : $ integer_type ) -> integer_type {
|
||||
return gb * Gigabyte
|
||||
}
|
||||
terabytes :: #force_inline proc "contextless" ( tb : $ integer_type ) -> integer_type {
|
||||
return tb * Terabyte
|
||||
}
|
||||
|
||||
//region Memory Math
|
||||
|
||||
// See: core/mem.odin, I wanted to study it an didn't like the naming.
|
||||
@(require_results)
|
||||
calc_padding_with_header :: proc "contextless" (pointer: uintptr, alignment: uintptr, header_size: int) -> int
|
||||
{
|
||||
alignment_offset := pointer & (alignment - 1)
|
||||
|
||||
initial_padding := uintptr(0)
|
||||
if alignment_offset != 0 {
|
||||
initial_padding = alignment - alignment_offset
|
||||
}
|
||||
|
||||
header_space_adjustment := uintptr(header_size)
|
||||
if initial_padding < header_space_adjustment
|
||||
{
|
||||
additional_space_needed := header_space_adjustment - initial_padding
|
||||
unaligned_extra_space := additional_space_needed & (alignment - 1)
|
||||
|
||||
if unaligned_extra_space > 0 {
|
||||
initial_padding += alignment * (1 + (additional_space_needed / alignment))
|
||||
}
|
||||
else {
|
||||
initial_padding += alignment * (additional_space_needed / alignment)
|
||||
}
|
||||
}
|
||||
|
||||
return int(initial_padding)
|
||||
}
|
||||
|
||||
// Helper to get the the beginning of memory after a slice
|
||||
memory_after :: #force_inline proc "contextless" ( slice : []byte ) -> ( ^ byte) {
|
||||
return ptr_offset( & slice[0], len(slice) )
|
||||
}
|
||||
|
||||
memory_after_header :: #force_inline proc "contextless" ( header : ^($ Type) ) -> ( [^]byte) {
|
||||
result := cast( [^]byte) ptr_offset( header, 1 )
|
||||
// result := cast( [^]byte) (cast( [^]Type) header)[ 1:]
|
||||
return result
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
memory_align_formula :: #force_inline proc "contextless" ( size, align : uint) -> uint {
|
||||
result := size + align - 1
|
||||
return result - result % align
|
||||
}
|
||||
|
||||
// This is here just for docs
|
||||
memory_misalignment :: #force_inline proc ( address, alignment : uintptr) -> uint {
|
||||
// address % alignment
|
||||
assert(is_power_of_two(alignment))
|
||||
return uint( address & (alignment - 1) )
|
||||
}
|
||||
|
||||
// This is here just for docs
|
||||
@(require_results)
|
||||
memory_aign_forward :: #force_inline proc( address, alignment : uintptr) -> uintptr
|
||||
{
|
||||
assert(is_power_of_two(alignment))
|
||||
|
||||
aligned_address := address
|
||||
misalignment := cast(uintptr) memory_misalignment( address, alignment )
|
||||
if misalignment != 0 {
|
||||
aligned_address += alignment - misalignment
|
||||
}
|
||||
return aligned_address
|
||||
}
|
||||
|
||||
//endregion Memory Math
|
172
code/grime/memory_tracker.odin
Normal file
172
code/grime/memory_tracker.odin
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
This was a tracking allocator made to kill off various bugs left with grime's pool & slab allocators
|
||||
It doesn't perform that well on a per-frame basis and should be avoided for general memory debugging
|
||||
|
||||
It only makes sure that memory allocations don't collide in the allocator and deallocations don't occur for memory never allocated.
|
||||
|
||||
I'm keeping it around as an artifact & for future allocators I may make.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
MemoryTrackerEntry :: struct {
|
||||
start, end : rawptr,
|
||||
}
|
||||
|
||||
MemoryTracker :: struct {
|
||||
name : string,
|
||||
entries : Array(MemoryTrackerEntry),
|
||||
}
|
||||
|
||||
Track_Memory :: false
|
||||
|
||||
tracker_msg_buffer : [Kilobyte * 16]u8
|
||||
|
||||
memtracker_clear :: proc ( tracker : MemoryTracker ) {
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
logf("Clearing tracker: %v", tracker.name)
|
||||
memtracker_dump_entries(tracker);
|
||||
array_clear(tracker.entries)
|
||||
}
|
||||
|
||||
memtracker_init :: proc ( tracker : ^MemoryTracker, allocator : Allocator, num_entries : u64, name : string )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
tracker.name = name
|
||||
|
||||
error : AllocatorError
|
||||
tracker.entries, error = array_init_reserve( MemoryTrackerEntry, allocator, num_entries, dbg_name = name )
|
||||
if error != AllocatorError.None {
|
||||
fatal("Failed to allocate memory tracker's hashmap");
|
||||
}
|
||||
}
|
||||
|
||||
memtracker_register :: proc( tracker : ^MemoryTracker, new_entry : MemoryTrackerEntry )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
profile(#procedure)
|
||||
temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
if tracker.entries.num == tracker.entries.capacity {
|
||||
ensure(false, "Memory tracker entries array full, can no longer register any more allocations")
|
||||
return
|
||||
}
|
||||
|
||||
for idx in 0..< tracker.entries.num
|
||||
{
|
||||
entry := & tracker.entries.data[idx]
|
||||
if new_entry.start > entry.start {
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.end < new_entry.start)
|
||||
{
|
||||
msg := str_fmt_tmp("Memory tracker(%v) detected a collision:\nold_entry: %v\nnew_entry: %v", tracker.name, entry, new_entry)
|
||||
ensure( false, msg )
|
||||
memtracker_dump_entries(tracker ^)
|
||||
}
|
||||
array_append_at( & tracker.entries, new_entry, idx )
|
||||
log(str_fmt_tmp("%v : Registered: %v", tracker.name, new_entry) )
|
||||
return
|
||||
}
|
||||
|
||||
array_append( & tracker.entries, new_entry )
|
||||
log(str_fmt_tmp("%v : Registered: %v", tracker.name, new_entry) )
|
||||
}
|
||||
|
||||
memtracker_register_auto_name :: proc( tracker : ^MemoryTracker, start, end : rawptr )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
memtracker_register( tracker, {start, end})
|
||||
}
|
||||
|
||||
memtracker_register_auto_name_slice :: proc( tracker : ^MemoryTracker, slice : []byte )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
start := raw_data(slice)
|
||||
end := & slice[ len(slice) - 1 ]
|
||||
memtracker_register( tracker, {start, end})
|
||||
}
|
||||
|
||||
memtracker_unregister :: proc( tracker : MemoryTracker, to_remove : MemoryTrackerEntry )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
profile(#procedure)
|
||||
temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
entries := array_to_slice(tracker.entries)
|
||||
for idx in 0..< tracker.entries.num
|
||||
{
|
||||
entry := & entries[idx]
|
||||
if entry.start == to_remove.start {
|
||||
if (entry.end == to_remove.end || to_remove.end == nil) {
|
||||
log(str_fmt_tmp("%v: Unregistered: %v", tracker.name, to_remove));
|
||||
array_remove_at(tracker.entries, idx)
|
||||
return
|
||||
}
|
||||
|
||||
ensure(false, str_fmt_tmp("%v: Found an entry with the same start address but end address was different:\nentry : %v\nto_remove: %v", tracker.name, entry, to_remove))
|
||||
memtracker_dump_entries(tracker)
|
||||
}
|
||||
}
|
||||
|
||||
ensure(false, str_fmt_tmp("%v: Attempted to unregister an entry that was not tracked: %v", tracker.name, to_remove))
|
||||
memtracker_dump_entries(tracker)
|
||||
}
|
||||
|
||||
memtracker_check_for_collisions :: proc ( tracker : MemoryTracker )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
profile(#procedure)
|
||||
temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
entries := array_to_slice(tracker.entries)
|
||||
for idx in 1 ..< tracker.entries.num {
|
||||
// Check to make sure each allocations adjacent entries do not intersect
|
||||
left := & entries[idx - 1]
|
||||
right := & entries[idx]
|
||||
|
||||
collided := left.start > right.start || left.end > right.end
|
||||
if collided {
|
||||
msg := str_fmt_tmp("%v: Memory tracker detected a collision:\nleft: %v\nright: %v", tracker.name, left, right)
|
||||
memtracker_dump_entries(tracker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memtracker_dump_entries :: proc( tracker : MemoryTracker )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
log( "Dumping Memory Tracker:")
|
||||
for idx in 0 ..< tracker.entries.num {
|
||||
entry := & tracker.entries.data[idx]
|
||||
log( str_fmt_tmp("%v", entry) )
|
||||
}
|
||||
}
|
361
code/grime/pool_allocator.odin
Normal file
361
code/grime/pool_allocator.odin
Normal file
@ -0,0 +1,361 @@
|
||||
/*
|
||||
This is a pool allocator setup to grow incrementally via buckets.
|
||||
Buckets are stored in singly-linked lists so that allocations aren't necessrily contiguous.
|
||||
|
||||
The pool is setup with the intention to only grab single entires from the bucket,
|
||||
not for a contiguous array of them.
|
||||
Thus the free-list only tracks the last free entries thrown out by the user,
|
||||
irrespective of the bucket the originated from.
|
||||
This means if there is a heavy recyling of entires in a pool
|
||||
there can be a large discrepancy of memory localicty if buckets are small.
|
||||
|
||||
The pool doesn't allocate any buckets on initialization unless the user specifies.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "base:intrinsics"
|
||||
import "base:runtime"
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
|
||||
Pool :: struct {
|
||||
using header : ^PoolHeader,
|
||||
}
|
||||
|
||||
PoolHeader :: struct {
|
||||
backing : Allocator,
|
||||
dbg_name : string,
|
||||
tracker : MemoryTracker,
|
||||
|
||||
zero_bucket : b32,
|
||||
block_size : uint,
|
||||
bucket_capacity : uint,
|
||||
alignment : uint,
|
||||
|
||||
free_list_head : ^Pool_FreeBlock,
|
||||
current_bucket : ^PoolBucket,
|
||||
bucket_list : DLL_NodeFL( PoolBucket),
|
||||
}
|
||||
|
||||
PoolBucket :: struct {
|
||||
using nodes : DLL_NodePN( PoolBucket),
|
||||
next_block : uint,
|
||||
blocks : [^]byte,
|
||||
}
|
||||
|
||||
Pool_FreeBlock :: struct {
|
||||
next : ^Pool_FreeBlock,
|
||||
}
|
||||
|
||||
Pool_Check_Release_Object_Validity :: true
|
||||
|
||||
pool_init :: proc (
|
||||
should_zero_buckets : b32,
|
||||
block_size : uint,
|
||||
bucket_capacity : uint,
|
||||
bucket_reserve_num : uint = 0,
|
||||
alignment : uint = mem.DEFAULT_ALIGNMENT,
|
||||
allocator : Allocator = context.allocator,
|
||||
dbg_name : string,
|
||||
) -> ( pool : Pool, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := align_forward_int( size_of(PoolHeader), int(alignment) )
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( header_size, int(alignment), allocator )
|
||||
if alloc_error != .None do return
|
||||
|
||||
ensure(block_size > 0, "Bad block size provided")
|
||||
ensure(bucket_capacity > 0, "Bad bucket capacity provided")
|
||||
|
||||
pool.header = cast( ^PoolHeader) raw_mem
|
||||
pool.zero_bucket = should_zero_buckets
|
||||
pool.backing = allocator
|
||||
pool.dbg_name = dbg_name
|
||||
pool.block_size = align_forward_uint(block_size, alignment)
|
||||
pool.bucket_capacity = bucket_capacity
|
||||
pool.alignment = alignment
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_init( & pool.tracker, allocator, Kilobyte * 96, dbg_name )
|
||||
}
|
||||
|
||||
if bucket_reserve_num > 0 {
|
||||
alloc_error = pool_allocate_buckets( pool, bucket_reserve_num )
|
||||
}
|
||||
|
||||
pool.current_bucket = pool.bucket_list.first
|
||||
return
|
||||
}
|
||||
|
||||
pool_reload :: proc( pool : Pool, allocator : Allocator ) {
|
||||
pool.backing = allocator
|
||||
}
|
||||
|
||||
pool_destroy :: proc ( using self : Pool )
|
||||
{
|
||||
if bucket_list.first != nil
|
||||
{
|
||||
bucket := bucket_list.first
|
||||
for ; bucket != nil; bucket = bucket.next {
|
||||
free( bucket, backing )
|
||||
}
|
||||
}
|
||||
|
||||
free( self.header, backing )
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_clear( self.tracker )
|
||||
}
|
||||
}
|
||||
|
||||
pool_allocate_buckets :: proc( pool : Pool, num_buckets : uint ) -> AllocatorError
|
||||
{
|
||||
profile(#procedure)
|
||||
if num_buckets == 0 {
|
||||
return .Invalid_Argument
|
||||
}
|
||||
header_size := cast(uint) align_forward_int( size_of(PoolBucket), int(pool.alignment))
|
||||
bucket_size := header_size + pool.bucket_capacity
|
||||
to_allocate := cast(int) (bucket_size * num_buckets)
|
||||
|
||||
// log(str_fmt_tmp("Allocating %d bytes for %d buckets with header_size %d bytes & bucket_size %d", to_allocate, num_buckets, header_size, bucket_size ))
|
||||
|
||||
bucket_memory : []byte
|
||||
alloc_error : AllocatorError
|
||||
|
||||
pool_validate( pool )
|
||||
if pool.zero_bucket {
|
||||
bucket_memory, alloc_error = alloc_bytes( to_allocate, int(pool.alignment), pool.backing )
|
||||
}
|
||||
else {
|
||||
bucket_memory, alloc_error = alloc_bytes_non_zeroed( to_allocate, int(pool.alignment), pool.backing )
|
||||
}
|
||||
pool_validate( pool )
|
||||
|
||||
// log(str_fmt_tmp("Bucket memory size: %d bytes, without header: %d", len(bucket_memory), len(bucket_memory) - int(header_size)))
|
||||
|
||||
if alloc_error != .None {
|
||||
return alloc_error
|
||||
}
|
||||
verify( bucket_memory != nil, "Bucket memory is null")
|
||||
|
||||
next_bucket_ptr := cast( [^]byte) raw_data(bucket_memory)
|
||||
for index in 0 ..< num_buckets
|
||||
{
|
||||
bucket := cast( ^PoolBucket) next_bucket_ptr
|
||||
bucket.blocks = memory_after_header(bucket)
|
||||
bucket.next_block = 0
|
||||
// log( str_fmt_tmp("\tPool (%d) allocated bucket: %p start %p capacity: %d (raw: %d)",
|
||||
// pool.block_size,
|
||||
// raw_data(bucket_memory),
|
||||
// bucket.blocks,
|
||||
// pool.bucket_capacity / pool.block_size,
|
||||
// pool.bucket_capacity ))
|
||||
|
||||
if pool.bucket_list.first == nil {
|
||||
pool.bucket_list.first = bucket
|
||||
pool.bucket_list.last = bucket
|
||||
}
|
||||
else {
|
||||
dll_push_back( & pool.bucket_list.last, bucket )
|
||||
}
|
||||
// log( str_fmt_tmp("Bucket List First: %p", self.bucket_list.first))
|
||||
|
||||
next_bucket_ptr = next_bucket_ptr[ bucket_size: ]
|
||||
}
|
||||
return alloc_error
|
||||
}
|
||||
|
||||
pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
pool := pool
|
||||
if pool.current_bucket != nil {
|
||||
if ( pool.current_bucket.blocks == nil ) {
|
||||
ensure( false, str_fmt_tmp("(corruption) current_bucket was wiped %p", pool.current_bucket) )
|
||||
}
|
||||
// verify( pool.current_bucket.blocks != nil, str_fmt_tmp("(corruption) current_bucket was wiped %p", pool.current_bucket) )
|
||||
}
|
||||
// profile(#procedure)
|
||||
alloc_error = .None
|
||||
|
||||
// Check the free-list first for a block
|
||||
if pool.free_list_head != nil
|
||||
{
|
||||
head := & pool.free_list_head
|
||||
|
||||
// Compiler Bug? Fails to compile
|
||||
// last_free := ll_pop( & pool.free_list_head )
|
||||
last_free : ^Pool_FreeBlock = pool.free_list_head
|
||||
pool.free_list_head = pool.free_list_head.next
|
||||
|
||||
block = byte_slice( cast([^]byte) last_free, int(pool.block_size) )
|
||||
// log( str_fmt_tmp("\tReturning free block: %p %d", raw_data(block), pool.block_size))
|
||||
if zero_memory {
|
||||
slice.zero(block)
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name_slice( & pool.tracker, block)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if pool.current_bucket == nil
|
||||
{
|
||||
alloc_error = pool_allocate_buckets( pool, 1 )
|
||||
if alloc_error != .None {
|
||||
ensure(false, "Failed to allocate bucket")
|
||||
return
|
||||
}
|
||||
pool.current_bucket = pool.bucket_list.first
|
||||
// log( "First bucket allocation")
|
||||
}
|
||||
|
||||
next := uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
|
||||
end := uintptr(pool.current_bucket.blocks) + uintptr(pool.bucket_capacity)
|
||||
|
||||
blocks_left, overflow_signal := intrinsics.overflow_sub( end, next )
|
||||
if blocks_left == 0 || overflow_signal
|
||||
{
|
||||
// Compiler Bug
|
||||
// if current_bucket.next != nil {
|
||||
if pool.current_bucket.next != nil {
|
||||
// current_bucket = current_bucket.next
|
||||
// log( str_fmt_tmp("\tBucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
|
||||
pool.current_bucket = pool.current_bucket.next
|
||||
verify( pool.current_bucket.blocks != nil, "New current_bucket's blocks are null (new current_bucket is corrupted)" )
|
||||
}
|
||||
else
|
||||
{
|
||||
// log( "\tAll previous buckets exhausted, allocating new bucket")
|
||||
alloc_error := pool_allocate_buckets( pool, 1 )
|
||||
if alloc_error != .None {
|
||||
ensure(false, "Failed to allocate bucket")
|
||||
return
|
||||
}
|
||||
pool.current_bucket = pool.current_bucket.next
|
||||
verify( pool.current_bucket.blocks != nil, "Next's blocks are null (Post new bucket alloc)" )
|
||||
}
|
||||
}
|
||||
|
||||
verify( pool.current_bucket != nil, "Attempted to grab a block from a null bucket reference" )
|
||||
|
||||
// Compiler Bug
|
||||
// block = slice_ptr( current_bucket.blocks[ current_bucket.next_block:], int(block_size) )
|
||||
// self.current_bucket.next_block += block_size
|
||||
|
||||
block_ptr := cast(rawptr) (uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block))
|
||||
|
||||
block = byte_slice( block_ptr, int(pool.block_size) )
|
||||
pool.current_bucket.next_block += pool.block_size
|
||||
|
||||
next = uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
|
||||
// log( str_fmt_tmp("\tgrabbing block: %p from %p blocks left: %d", raw_data(block), pool.current_bucket.blocks, (end - next) / uintptr(pool.block_size) ))
|
||||
|
||||
if zero_memory {
|
||||
slice.zero(block)
|
||||
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", block_ptr, cast(rawptr) (uintptr(block_ptr) + uintptr(pool.block_size))))
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name_slice( & pool.tracker, block)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
pool_release :: proc( self : Pool, block : []byte, loc := #caller_location )
|
||||
{
|
||||
// profile(#procedure)
|
||||
if Pool_Check_Release_Object_Validity {
|
||||
within_bucket := pool_validate_ownership( self, block )
|
||||
verify( within_bucket, "Attempted to release data that is not within a bucket of this pool", location = loc )
|
||||
}
|
||||
|
||||
// Compiler bug
|
||||
// ll_push( & self.free_list_head, cast(^Pool_FreeBlock) raw_data(block) )
|
||||
|
||||
pool_watch := self
|
||||
head_watch := & self.free_list_head
|
||||
|
||||
// ll_push:
|
||||
new_free_block := cast(^Pool_FreeBlock) raw_data(block)
|
||||
(new_free_block ^) = {}
|
||||
new_free_block.next = self.free_list_head
|
||||
self.free_list_head = new_free_block
|
||||
|
||||
// new_free_block = new_free_block
|
||||
// log( str_fmt_tmp("Released block: %p %d", new_free_block, self.block_size))
|
||||
|
||||
start := new_free_block
|
||||
end := transmute(rawptr) (uintptr(new_free_block) + uintptr(self.block_size) - 1)
|
||||
when ODIN_DEBUG {
|
||||
memtracker_unregister( self.tracker, { start, end } )
|
||||
}
|
||||
}
|
||||
|
||||
pool_reset :: proc( using pool : Pool )
|
||||
{
|
||||
bucket : ^PoolBucket = bucket_list.first // TODO(Ed): Compiler bug? Build fails unless ^PoolBucket is explcitly specified.
|
||||
for ; bucket != nil; {
|
||||
bucket.next_block = 0
|
||||
}
|
||||
|
||||
pool.free_list_head = nil
|
||||
pool.current_bucket = bucket_list.first
|
||||
}
|
||||
|
||||
pool_validate :: proc( pool : Pool )
|
||||
{
|
||||
when !ODIN_DEBUG do return
|
||||
pool := pool
|
||||
// Make sure all buckets don't show any indication of corruption
|
||||
bucket : ^PoolBucket = pool.bucket_list.first
|
||||
|
||||
if bucket != nil && uintptr(bucket) < 0x10000000000 {
|
||||
ensure(false, str_fmt_tmp("Found a corrupted bucket %p", bucket ))
|
||||
}
|
||||
// Compiler bug ^^ same as pool_reset
|
||||
for ; bucket != nil; bucket = bucket.next
|
||||
{
|
||||
if bucket != nil && uintptr(bucket) < 0x10000000000 {
|
||||
ensure(false, str_fmt_tmp("Found a corrupted bucket %p", bucket ))
|
||||
}
|
||||
|
||||
if ( bucket.blocks == nil ) {
|
||||
ensure(false, str_fmt_tmp("Found a corrupted bucket %p", bucket ))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pool_validate_ownership :: proc( using self : Pool, block : [] byte ) -> b32
|
||||
{
|
||||
profile(#procedure)
|
||||
within_bucket := b32(false)
|
||||
|
||||
// Compiler Bug : Same as pool_reset
|
||||
bucket : ^PoolBucket = bucket_list.first
|
||||
for ; bucket != nil; bucket = bucket.next
|
||||
{
|
||||
start := uintptr( bucket.blocks )
|
||||
end := start + uintptr(bucket_capacity)
|
||||
block_address := uintptr(raw_data(block))
|
||||
|
||||
if start <= block_address && block_address < end
|
||||
{
|
||||
misalignment := (block_address - start) % uintptr(block_size)
|
||||
if misalignment != 0 {
|
||||
ensure(false, "pool_validate_ownership: This data is within this pool's buckets, however its not aligned to the start of a block")
|
||||
log(str_fmt_tmp("Block address: %p Misalignment: %p closest: %p",
|
||||
transmute(rawptr)block_address,
|
||||
transmute(rawptr)misalignment,
|
||||
rawptr(block_address - misalignment)))
|
||||
}
|
||||
|
||||
within_bucket = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return within_bucket
|
||||
}
|
22
code/grime/profiler.odin
Normal file
22
code/grime/profiler.odin
Normal file
@ -0,0 +1,22 @@
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:prof/spall"
|
||||
|
||||
SpallProfiler :: struct {
|
||||
ctx : spall.Context,
|
||||
buffer : spall.Buffer,
|
||||
}
|
||||
|
||||
@(deferred_none=profile_end)
|
||||
profile :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & Memory_App.profiler.ctx, & Memory_App.profiler.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
profile_begin :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & Memory_App.profiler.ctx, & Memory_App.profiler.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
profile_end :: #force_inline proc "contextless" () {
|
||||
spall._buffer_end( & Memory_App.profiler.ctx, & Memory_App.profiler.buffer)
|
||||
}
|
30
code/grime/ptr.odin
Normal file
30
code/grime/ptr.odin
Normal file
@ -0,0 +1,30 @@
|
||||
package sectr
|
||||
|
||||
// Provides an alternative syntax for pointers
|
||||
|
||||
Ptr :: struct( $ Type : typeid ) {
|
||||
v : Type,
|
||||
}
|
||||
|
||||
exmaple_ptr :: proc()
|
||||
{
|
||||
a, b : int
|
||||
var : ^Ptr(int)
|
||||
reg : ^int
|
||||
|
||||
a = 1
|
||||
b = 1
|
||||
|
||||
var = &{a}
|
||||
var.v = 2
|
||||
var = &{b}
|
||||
var.v = 3
|
||||
|
||||
a = 1
|
||||
b = 1
|
||||
|
||||
reg = (& a)
|
||||
(reg^) = 2
|
||||
reg = (& b)
|
||||
(reg^) = 3
|
||||
}
|
335
code/grime/slab_allocator.odin
Normal file
335
code/grime/slab_allocator.odin
Normal file
@ -0,0 +1,335 @@
|
||||
/* Slab Allocator
|
||||
These are a collection of pool allocators serving as a general way
|
||||
to allocate a large amount of dynamic sized data.
|
||||
|
||||
The usual use case for this is an arena, stack,
|
||||
or dedicated pool allocator fail to be enough to handle a data structure
|
||||
that either is too random with its size (ex: strings)
|
||||
or is intended to grow an abitrary degree with an unknown upper bound (dynamic arrays, and hashtables).
|
||||
|
||||
The protototype will use slab allocators for two purposes:
|
||||
* String interning
|
||||
* General purpose set for handling large arrays & hash tables within some underlying arena or stack.
|
||||
|
||||
Technically speaking the general purpose situations can instead be grown on demand
|
||||
with a dedicated segement of vmem, however this might be overkill
|
||||
if the worst case buckets allocated are < 500 mb for most app usage.
|
||||
|
||||
The slab allocators are expected to hold growable pool allocators,
|
||||
where each pool stores a 'bucket' of fixed-sized blocks of memory.
|
||||
When a pools bucket is full it will request another bucket from its arena
|
||||
for permanent usage within the arena's lifetime.
|
||||
|
||||
A freelist is tracked for free-blocks for each pool (provided by the underlying pool allocator)
|
||||
|
||||
A slab starts out with pools initialized with no buckets and grows as needed.
|
||||
When a slab is initialized the slab policy is provided to know how many size-classes there should be
|
||||
which each contain the ratio of bucket to block size.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
|
||||
SlabSizeClass :: struct {
|
||||
bucket_capacity : uint,
|
||||
block_size : uint,
|
||||
block_alignment : uint,
|
||||
}
|
||||
|
||||
Slab_Max_Size_Classes :: 64
|
||||
|
||||
SlabPolicy :: StackFixed(SlabSizeClass, Slab_Max_Size_Classes)
|
||||
|
||||
SlabHeader :: struct {
|
||||
dbg_name : string,
|
||||
tracker : MemoryTracker,
|
||||
backing : Allocator,
|
||||
pools : StackFixed(Pool, Slab_Max_Size_Classes),
|
||||
}
|
||||
|
||||
Slab :: struct {
|
||||
using header : ^SlabHeader,
|
||||
}
|
||||
|
||||
slab_allocator :: proc( slab : Slab ) -> ( allocator : Allocator ) {
|
||||
allocator.procedure = slab_allocator_proc
|
||||
allocator.data = slab.header
|
||||
return
|
||||
}
|
||||
|
||||
slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocator : Allocator, dbg_name : string = "", should_zero_buckets : b32 = false ) -> ( slab : Slab, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size :: size_of( SlabHeader )
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( header_size, mem.DEFAULT_ALIGNMENT, allocator )
|
||||
if alloc_error != .None do return
|
||||
|
||||
slab.header = cast( ^SlabHeader) raw_mem
|
||||
slab.backing = allocator
|
||||
slab.dbg_name = dbg_name
|
||||
when ODIN_DEBUG {
|
||||
memtracker_init( & slab.tracker, allocator, Kilobyte * 256, dbg_name )
|
||||
}
|
||||
alloc_error = slab_init_pools( slab, policy, bucket_reserve_num, should_zero_buckets )
|
||||
return
|
||||
}
|
||||
|
||||
slab_init_pools :: proc ( using self : Slab, policy : ^SlabPolicy, bucket_reserve_num : uint = 0, should_zero_buckets : b32 ) -> AllocatorError
|
||||
{
|
||||
profile(#procedure)
|
||||
|
||||
for id in 0 ..< policy.idx {
|
||||
using size_class := policy.items[id]
|
||||
|
||||
pool_dbg_name := str_fmt_alloc("%v pool[%v]", dbg_name, block_size, allocator = backing)
|
||||
pool, alloc_error := pool_init( should_zero_buckets, block_size, bucket_capacity, bucket_reserve_num, block_alignment, backing, pool_dbg_name )
|
||||
if alloc_error != .None do return alloc_error
|
||||
|
||||
push( & self.pools, pool )
|
||||
}
|
||||
return .None
|
||||
}
|
||||
|
||||
slab_reload :: proc ( slab : Slab, allocator : Allocator )
|
||||
{
|
||||
slab.backing = allocator
|
||||
|
||||
for id in 0 ..< slab.pools.idx {
|
||||
pool := slab.pools.items[id]
|
||||
pool_reload( pool, slab.backing )
|
||||
}
|
||||
}
|
||||
|
||||
slab_destroy :: proc( using self : Slab )
|
||||
{
|
||||
for id in 0 ..< pools.idx {
|
||||
pool := pools.items[id]
|
||||
pool_destroy( pool )
|
||||
}
|
||||
|
||||
free( self.header, backing )
|
||||
when ODIN_DEBUG {
|
||||
memtracker_clear(tracker)
|
||||
}
|
||||
}
|
||||
|
||||
slab_alloc :: proc( self : Slab,
|
||||
size : uint,
|
||||
alignment : uint,
|
||||
zero_memory := true,
|
||||
loc := #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
// profile(#procedure)
|
||||
pool : Pool
|
||||
id : u32 = 0
|
||||
for ; id < self.pools.idx; id += 1 {
|
||||
pool = self.pools.items[id]
|
||||
|
||||
if pool.block_size >= size && pool.alignment >= alignment {
|
||||
break
|
||||
}
|
||||
}
|
||||
verify( id < self.pools.idx, "There is not a size class in the slab's policy to satisfy the requested allocation", location = loc )
|
||||
verify( pool.header != nil, "Requested alloc not supported by the slab allocator", location = loc )
|
||||
|
||||
block : []byte
|
||||
slab_validate_pools( self )
|
||||
block, alloc_error = pool_grab(pool)
|
||||
slab_validate_pools( self )
|
||||
|
||||
if block == nil || alloc_error != .None {
|
||||
ensure(false, "Bad block from pool")
|
||||
return nil, alloc_error
|
||||
}
|
||||
// log( str_fmt_tmp("%v: Retrieved block: %p %d", self.dbg_name, raw_data(block), len(block) ))
|
||||
|
||||
data = byte_slice(raw_data(block), size)
|
||||
if zero_memory {
|
||||
slice.zero(data)
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & self.tracker, raw_data(block), & block[ len(block) - 1 ] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
slab_free :: proc( using self : Slab, data : []byte, loc := #caller_location )
|
||||
{
|
||||
// profile(#procedure)
|
||||
pool : Pool
|
||||
for id in 0 ..< pools.idx
|
||||
{
|
||||
pool = pools.items[id]
|
||||
if pool_validate_ownership( pool, data ) {
|
||||
start := raw_data(data)
|
||||
end := ptr_offset(start, pool.block_size - 1)
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_unregister( self.tracker, { start, end } )
|
||||
}
|
||||
|
||||
pool_release( pool, data, loc )
|
||||
return
|
||||
}
|
||||
}
|
||||
verify(false, "Attempted to free a block not within a pool of this slab", location = loc)
|
||||
}
|
||||
|
||||
slab_resize :: proc( using self : Slab,
|
||||
data : []byte,
|
||||
new_size : uint,
|
||||
alignment : uint,
|
||||
zero_memory := true,
|
||||
loc := #caller_location
|
||||
) -> ( new_data : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
// profile(#procedure)
|
||||
old_size := uint( len(data))
|
||||
|
||||
pool_resize, pool_old : Pool
|
||||
for id in 0 ..< pools.idx
|
||||
{
|
||||
pool := pools.items[id]
|
||||
|
||||
if pool.block_size >= new_size && pool.alignment >= alignment {
|
||||
pool_resize = pool
|
||||
}
|
||||
if pool_validate_ownership( pool, data ) {
|
||||
pool_old = pool
|
||||
}
|
||||
if pool_resize.header != nil && pool_old.header != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
verify( pool_resize.header != nil, "Requested resize not supported by the slab allocator", location = loc )
|
||||
|
||||
// Resize will keep block in the same size_class, just give it more of its already allocated block
|
||||
if pool_old.block_size == pool_resize.block_size
|
||||
{
|
||||
new_data_ptr := memory_after(data)
|
||||
new_data = byte_slice( raw_data(data), new_size )
|
||||
// log( dump_stacktrace() )
|
||||
// log( str_fmt_tmp("%v: Resize via expanding block space allocation %p %d", dbg_name, new_data_ptr, int(new_size - old_size)))
|
||||
|
||||
if zero_memory && new_size > old_size {
|
||||
to_zero := byte_slice( new_data_ptr, int(new_size - old_size) )
|
||||
|
||||
slab_validate_pools( self )
|
||||
slice.zero( to_zero )
|
||||
slab_validate_pools( self )
|
||||
|
||||
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", new_data_ptr, cast(rawptr) (uintptr(new_data_ptr) + uintptr(new_size - old_size))))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// We'll need to provide an entirely new block, so the data will need to be copied over.
|
||||
new_block : []byte
|
||||
|
||||
slab_validate_pools( self )
|
||||
new_block, alloc_error = pool_grab( pool_resize )
|
||||
slab_validate_pools( self )
|
||||
|
||||
if new_block == nil {
|
||||
ensure(false, "Retreived a null block")
|
||||
return
|
||||
}
|
||||
|
||||
if alloc_error != .None do return
|
||||
|
||||
// TODO(Ed): Reapply this when safe.
|
||||
if zero_memory {
|
||||
slice.zero( new_block )
|
||||
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", raw_data(new_block), cast(rawptr) (uintptr(raw_data(new_block)) + uintptr(new_size))))
|
||||
}
|
||||
|
||||
// log( str_fmt_tmp("Resize via new block: %p %d (old : %p $d )", raw_data(new_block), len(new_block), raw_data(data), old_size ))
|
||||
|
||||
if raw_data(data) != raw_data(new_block) {
|
||||
// log( str_fmt_tmp("%v: Resize via new block, copying from old data block to new block: (%p %d), (%p %d)", dbg_name, raw_data(data), len(data), raw_data(new_block), len(new_block)))
|
||||
copy_non_overlapping( raw_data(new_block), raw_data(data), int(old_size) )
|
||||
pool_release( pool_old, data )
|
||||
|
||||
start := raw_data( data )
|
||||
end := rawptr(uintptr(start) + uintptr(pool_old.block_size) - 1)
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_unregister( self.tracker, { start, end } )
|
||||
}
|
||||
}
|
||||
|
||||
new_data = new_block[ :new_size]
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & self.tracker, raw_data(new_block), & new_block[ len(new_block) - 1 ] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
slab_reset :: proc( slab : Slab )
|
||||
{
|
||||
for id in 0 ..< slab.pools.idx {
|
||||
pool := slab.pools.items[id]
|
||||
pool_reset( pool )
|
||||
}
|
||||
when ODIN_DEBUG {
|
||||
memtracker_clear(slab.tracker)
|
||||
}
|
||||
}
|
||||
|
||||
slab_validate_pools :: proc( slab : Slab )
|
||||
{
|
||||
slab := slab
|
||||
for id in 0 ..< slab.pools.idx {
|
||||
pool := slab.pools.items[id]
|
||||
pool_validate( pool )
|
||||
}
|
||||
}
|
||||
|
||||
slab_allocator_proc :: proc(
|
||||
allocator_data : rawptr,
|
||||
mode : AllocatorMode,
|
||||
size : int,
|
||||
alignment : int,
|
||||
old_memory : rawptr,
|
||||
old_size : int,
|
||||
loc := #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError)
|
||||
{
|
||||
slab : Slab
|
||||
slab.header = cast( ^SlabHeader) allocator_data
|
||||
|
||||
size := uint(size)
|
||||
alignment := uint(alignment)
|
||||
old_size := uint(old_size)
|
||||
|
||||
switch mode
|
||||
{
|
||||
case .Alloc, .Alloc_Non_Zeroed:
|
||||
return slab_alloc( slab, size, alignment, (mode != .Alloc_Non_Zeroed), loc)
|
||||
|
||||
case .Free:
|
||||
slab_free( slab, byte_slice( old_memory, int(old_size)), loc )
|
||||
|
||||
case .Free_All:
|
||||
slab_reset( slab )
|
||||
|
||||
case .Resize, .Resize_Non_Zeroed:
|
||||
return slab_resize( slab, byte_slice(old_memory, int(old_size)), size, alignment, (mode != .Resize_Non_Zeroed), loc)
|
||||
|
||||
case .Query_Features:
|
||||
set := cast( ^AllocatorModeSet) old_memory
|
||||
if set != nil {
|
||||
(set ^) = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
|
||||
}
|
||||
|
||||
case .Query_Info:
|
||||
alloc_error = .Mode_Not_Implemented
|
||||
}
|
||||
return
|
||||
}
|
280
code/grime/stack.odin
Normal file
280
code/grime/stack.odin
Normal file
@ -0,0 +1,280 @@
|
||||
package sectr
|
||||
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
|
||||
//region Fixed Stack
|
||||
|
||||
StackFixed :: struct ( $ Type : typeid, $ Size : u32 ) {
|
||||
idx : u32,
|
||||
items : [ Size ] Type,
|
||||
}
|
||||
|
||||
stack_clear :: #force_inline proc ( using stack : ^StackFixed( $Type, $Size)) {
|
||||
idx = 0
|
||||
}
|
||||
|
||||
stack_push :: #force_inline proc( using stack : ^ StackFixed( $ Type, $ Size ), value : Type ) {
|
||||
verify( idx < len( items ), "Attempted to push on a full stack" )
|
||||
|
||||
items[ idx ] = value
|
||||
idx += 1
|
||||
}
|
||||
|
||||
stack_pop :: #force_inline proc( using stack : ^StackFixed( $ Type, $ Size ) ) {
|
||||
verify( idx > 0, "Attempted to pop an empty stack" )
|
||||
|
||||
idx -= 1
|
||||
if idx == 0 {
|
||||
items[idx] = {}
|
||||
}
|
||||
}
|
||||
|
||||
stack_peek_ref :: #force_inline proc "contextless" ( using stack : ^StackFixed( $ Type, $ Size ) ) -> ( ^Type) {
|
||||
last_idx := max( 0, idx - 1 ) if idx > 0 else 0
|
||||
last := & items[last_idx]
|
||||
return last
|
||||
}
|
||||
|
||||
stack_peek :: #force_inline proc "contextless" ( using stack : ^StackFixed( $ Type, $ Size ) ) -> Type {
|
||||
last := max( 0, idx - 1 ) if idx > 0 else 0
|
||||
return items[last]
|
||||
}
|
||||
|
||||
//endregion Fixed Stack
|
||||
|
||||
//region Stack Allocator
|
||||
|
||||
// TODO(Ed) : This is untested and problably filled with bugs.
|
||||
/* Growing Stack allocator
|
||||
This implementation can support growing if the backing allocator supports
|
||||
it without fragmenting the backing allocator.
|
||||
|
||||
Each block in the stack is tracked with a doubly-linked list to have debug stats.
|
||||
(It could be removed for non-debug builds)
|
||||
*/
|
||||
|
||||
StackAllocatorBase :: struct {
|
||||
backing : Allocator,
|
||||
|
||||
using links : DLL_NodeFL(StackAllocatorHeader),
|
||||
peak_used : int,
|
||||
size : int,
|
||||
data : [^]byte,
|
||||
}
|
||||
|
||||
StackAllocator :: struct {
|
||||
using base : ^StackAllocatorBase,
|
||||
}
|
||||
|
||||
StackAllocatorHeader :: struct {
|
||||
using links : DLL_NodePN(StackAllocatorHeader),
|
||||
block_size : int,
|
||||
padding : int,
|
||||
}
|
||||
|
||||
stack_allocator :: proc( using self : StackAllocator ) -> ( allocator : Allocator ) {
|
||||
allocator.procedure = stack_allocator_proc
|
||||
allocator.data = self.base
|
||||
return
|
||||
}
|
||||
|
||||
stack_allocator_init :: proc( size : int, allocator := context.allocator ) -> ( stack : StackAllocator, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := size_of(StackAllocatorBase)
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( header_size + size, mem.DEFAULT_ALIGNMENT )
|
||||
if alloc_error != AllocatorError.None do return
|
||||
|
||||
stack.base = cast( ^StackAllocatorBase) raw_mem
|
||||
stack.size = size
|
||||
stack.data = cast( [^]byte) (cast( [^]StackAllocatorBase) stack.base)[ 1:]
|
||||
|
||||
stack.last = cast(^StackAllocatorHeader) stack.data
|
||||
stack.first = stack.last
|
||||
return
|
||||
}
|
||||
|
||||
stack_allocator_destroy :: proc( using self : StackAllocator )
|
||||
{
|
||||
free( self.base, backing )
|
||||
}
|
||||
|
||||
stack_allocator_init_via_memory :: proc( memory : []byte ) -> ( stack : StackAllocator )
|
||||
{
|
||||
header_size := size_of(StackAllocatorBase)
|
||||
|
||||
if len(memory) < (header_size + Kilobyte) {
|
||||
verify(false, "Assigning a stack allocator less than a kilobyte of space")
|
||||
return
|
||||
}
|
||||
|
||||
stack.base = cast( ^StackAllocatorBase) & memory[0]
|
||||
stack.size = len(memory) - header_size
|
||||
stack.data = cast( [^]byte ) (cast( [^]StackAllocatorBase) stack.base)[ 1:]
|
||||
|
||||
stack.last = cast( ^StackAllocatorHeader) stack.data
|
||||
stack.first = stack.last
|
||||
return
|
||||
}
|
||||
|
||||
stack_allocator_push :: proc( using self : StackAllocator, block_size, alignment : int, zero_memory : bool ) -> ( []byte, AllocatorError )
|
||||
{
|
||||
// TODO(Ed): Make sure first push is fine.
|
||||
verify( block_size > Kilobyte, "Attempted to push onto the stack less than a Kilobyte")
|
||||
top_block_ptr := memory_after_header( last )
|
||||
|
||||
theoretical_size := cast(int) (uintptr(top_block_ptr) + uintptr(block_size) - uintptr(first))
|
||||
if theoretical_size > size {
|
||||
// TODO(Ed) : Check if backing allocator supports resize, if it does attempt to grow.
|
||||
return nil, .Out_Of_Memory
|
||||
}
|
||||
|
||||
top_block_slice := slice_ptr( top_block_ptr, last.block_size )
|
||||
next_spot := uintptr( top_block_ptr) + uintptr(last.block_size)
|
||||
|
||||
header_offset_pad := calc_padding_with_header( uintptr(next_spot), uintptr(alignment), size_of(StackAllocatorHeader) )
|
||||
header := cast( ^StackAllocatorHeader) (next_spot + uintptr(header_offset_pad) - uintptr(size_of( StackAllocatorHeader)))
|
||||
header.padding = header_offset_pad
|
||||
header.prev = last
|
||||
header.block_size = block_size
|
||||
|
||||
curr_block_ptr := memory_after_header( header )
|
||||
curr_block := slice_ptr( curr_block_ptr, block_size )
|
||||
|
||||
curr_used := cast(int) (uintptr(curr_block_ptr) + uintptr(block_size) - uintptr(self.last))
|
||||
self.peak_used += max( peak_used, curr_used )
|
||||
|
||||
dll_push_back( & base.links.last, header )
|
||||
|
||||
if zero_memory {
|
||||
slice.zero( curr_block )
|
||||
}
|
||||
|
||||
return curr_block, .None
|
||||
}
|
||||
|
||||
stack_allocator_resize_top :: proc( using self : StackAllocator, new_block_size, alignment : int, zero_memory : bool ) -> AllocatorError
|
||||
{
|
||||
verify( new_block_size > Kilobyte, "Attempted to resize the last pushed on the stack to less than a Kilobyte")
|
||||
top_block_ptr := memory_after_header( last )
|
||||
|
||||
theoretical_size := cast(int) (uintptr(top_block_ptr) + uintptr(last.block_size) - uintptr(first))
|
||||
if theoretical_size > size {
|
||||
// TODO(Ed) : Check if backing allocator supports resize, if it does attempt to grow.
|
||||
return .Out_Of_Memory
|
||||
}
|
||||
|
||||
if zero_memory && new_block_size > last.block_size {
|
||||
added_ptr := top_block_ptr[ last.block_size:]
|
||||
added_slice := slice_ptr( added_ptr, new_block_size - last.block_size )
|
||||
slice.zero( added_slice )
|
||||
}
|
||||
|
||||
last.block_size = new_block_size
|
||||
return .None
|
||||
}
|
||||
|
||||
stack_allocator_pop :: proc( using self : StackAllocator ) {
|
||||
base.links.last = last.prev
|
||||
base.links.last.next = nil
|
||||
}
|
||||
|
||||
|
||||
stack_allocator_proc :: proc(
|
||||
allocator_data : rawptr,
|
||||
mode : AllocatorMode,
|
||||
block_size : int,
|
||||
alignment : int,
|
||||
old_memory : rawptr,
|
||||
old_size : int,
|
||||
location : SourceCodeLocation = #caller_location
|
||||
) -> ([]byte, AllocatorError)
|
||||
{
|
||||
stack := StackAllocator { cast( ^StackAllocatorBase) allocator_data }
|
||||
|
||||
if stack.data == nil {
|
||||
return nil, AllocatorError.Invalid_Argument
|
||||
}
|
||||
|
||||
switch mode
|
||||
{
|
||||
case .Alloc, .Alloc_Non_Zeroed:
|
||||
{
|
||||
return stack_allocator_push( stack, block_size, alignment, mode == .Alloc )
|
||||
}
|
||||
case .Free:
|
||||
{
|
||||
if old_memory == nil {
|
||||
return nil, .None
|
||||
}
|
||||
|
||||
start := uintptr(stack.data)
|
||||
end := start + uintptr(block_size)
|
||||
curr_addr := uintptr(old_memory)
|
||||
|
||||
verify( start <= curr_addr && curr_addr < end, "Out of bounds memory address passed to stack allocator (free)" )
|
||||
|
||||
block_ptr := memory_after_header( stack.last )
|
||||
|
||||
if curr_addr >= start + uintptr(block_ptr) {
|
||||
return nil, .None
|
||||
}
|
||||
|
||||
dll_pop_back( & stack.last )
|
||||
}
|
||||
case .Free_All:
|
||||
// TODO(Ed) : Review that we don't have any header issues with the reset.
|
||||
stack.first = stack.last
|
||||
stack.last.next = nil
|
||||
stack.last.block_size = 0
|
||||
|
||||
case .Resize, .Resize_Non_Zeroed:
|
||||
{
|
||||
// Check if old_memory is at the first on the stack, if it is, just grow its size
|
||||
// Otherwise, log that the user cannot resize stack items that are not at the top of the stack allocated.
|
||||
if old_memory == nil {
|
||||
return stack_allocator_push(stack, block_size, alignment, mode == .Resize )
|
||||
}
|
||||
if block_size == 0 {
|
||||
return nil, .None
|
||||
}
|
||||
|
||||
start := uintptr(stack.data)
|
||||
end := start + uintptr(block_size)
|
||||
curr_addr := uintptr(old_memory)
|
||||
|
||||
verify( start <= curr_addr && curr_addr < end, "Out of bounds memory address passed to stack allocator (resize)" )
|
||||
|
||||
block_ptr := memory_after_header( stack.last )
|
||||
if block_ptr != old_memory {
|
||||
ensure( false, "Attempted to reszie a block of memory on the stack other than top most" )
|
||||
return nil, .None
|
||||
}
|
||||
|
||||
if old_size == block_size {
|
||||
return byte_slice( old_memory, block_size ), .None
|
||||
}
|
||||
|
||||
stack_allocator_resize_top( stack, block_size, alignment, mode == .Resize )
|
||||
return byte_slice( block_ptr, block_size ), .None
|
||||
}
|
||||
case .Query_Features:
|
||||
{
|
||||
feature_flags := ( ^AllocatorModeSet)(old_memory)
|
||||
if feature_flags != nil {
|
||||
(feature_flags ^) = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
|
||||
}
|
||||
return nil, .None
|
||||
}
|
||||
case .Query_Info:
|
||||
{
|
||||
return nil, .Mode_Not_Implemented
|
||||
}
|
||||
}
|
||||
|
||||
return nil, .None
|
||||
}
|
||||
|
||||
//endregion Stack Allocator
|
11
code/grime/string_format.odin
Normal file
11
code/grime/string_format.odin
Normal file
@ -0,0 +1,11 @@
|
||||
// This provides a string generator using a token replacement approach instead of a %<id> verb-syntax to parse.
|
||||
// This was done just for preference as I personally don't like the c-printf-like syntax.
|
||||
package sectr
|
||||
|
||||
|
||||
|
||||
// str_format :: proc ( format : string, tokens : ..args ) {
|
||||
|
||||
// }
|
||||
|
||||
|
117
code/grime/string_interning.odin
Normal file
117
code/grime/string_interning.odin
Normal file
@ -0,0 +1,117 @@
|
||||
/*
|
||||
This is a quick and dirty string table.
|
||||
IT uses the HMapZPL for the hashtable of strings, and the string's content is stored in a dedicated slab.
|
||||
|
||||
Future Plans (IF needed for performance):
|
||||
The goal is to eventually swap out the slab with possilby a dedicated growing vmem arena for the strings.
|
||||
The table would be swapped with a table stored in the general slab and uses either linear probing or open addressing
|
||||
|
||||
If linear probing, the hash node list per table bucket is store with the strigns in the same arena.
|
||||
If open addressing, we just keep the open addressed array of node slots in the general slab (but hopefully better perf)
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
import "core:strings"
|
||||
|
||||
StringKey :: distinct u64
|
||||
RunesCached :: []rune
|
||||
|
||||
StrRunesPair :: struct {
|
||||
str : string,
|
||||
runes : []rune,
|
||||
}
|
||||
to_str_runes_pair :: proc ( content : string ) -> StrRunesPair {
|
||||
return { content, to_runes(content) }
|
||||
}
|
||||
|
||||
StringCache :: struct {
|
||||
slab : Slab,
|
||||
table : HMapZPL(StrRunesPair),
|
||||
}
|
||||
|
||||
str_cache_init :: proc( /*allocator : Allocator*/ ) -> ( cache : StringCache ) {
|
||||
alignment := uint(mem.DEFAULT_ALIGNMENT)
|
||||
|
||||
policy : SlabPolicy
|
||||
policy_ptr := & policy
|
||||
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 16, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 32, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 64, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 128, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 256, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 64 * Kilobyte, 512, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 1 * Megabyte, 1 * Kilobyte, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 4 * Megabyte, 4 * Kilobyte, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 16 * Megabyte, 16 * Kilobyte, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 32 * Megabyte, 32 * Kilobyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 64 * Megabyte, 64 * Kilobyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 64 * Megabyte, 128 * Kilobyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 64 * Megabyte, 256 * Kilobyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 64 * Megabyte, 512 * Kilobyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 64 * Megabyte, 1 * Megabyte, alignment })
|
||||
|
||||
header_size :: size_of( Slab )
|
||||
|
||||
@static dbg_name := "StringCache slab"
|
||||
|
||||
state := get_state()
|
||||
|
||||
alloc_error : AllocatorError
|
||||
cache.slab, alloc_error = slab_init( & policy, allocator = persistent_allocator(), dbg_name = dbg_name )
|
||||
verify(alloc_error == .None, "Failed to initialize the string cache" )
|
||||
|
||||
cache.table, alloc_error = zpl_hmap_init_reserve( StrRunesPair, persistent_allocator(), 4 * Megabyte, dbg_name )
|
||||
return
|
||||
}
|
||||
|
||||
str_intern_key :: #force_inline proc( content : string ) -> StringKey { return cast(StringKey) crc32( transmute([]byte) content ) }
|
||||
str_intern_lookup :: #force_inline proc( key : StringKey ) -> (^StrRunesPair) { return zpl_hmap_get( & get_state().string_cache.table, transmute(u64) key ) }
|
||||
|
||||
str_intern :: proc(
|
||||
content : string
|
||||
) -> StrRunesPair
|
||||
{
|
||||
// profile(#procedure)
|
||||
cache := & get_state().string_cache
|
||||
|
||||
key := str_intern_key(content)
|
||||
result := zpl_hmap_get( & cache.table, transmute(u64) key )
|
||||
if result != nil {
|
||||
return (result ^)
|
||||
}
|
||||
|
||||
// profile_begin("new entry")
|
||||
{
|
||||
length := len(content)
|
||||
// str_mem, alloc_error := alloc( length, mem.DEFAULT_ALIGNMENT )
|
||||
str_mem, alloc_error := slab_alloc( cache.slab, uint(length), uint(mem.DEFAULT_ALIGNMENT), zero_memory = false )
|
||||
verify( alloc_error == .None, "String cache had a backing allocator error" )
|
||||
|
||||
// copy_non_overlapping( str_mem, raw_data(content), length )
|
||||
copy_non_overlapping( raw_data(str_mem), raw_data(content), length )
|
||||
|
||||
runes : []rune
|
||||
// runes, alloc_error = to_runes( content, persistent_allocator() )
|
||||
runes, alloc_error = to_runes( content, slab_allocator(cache.slab) )
|
||||
verify( alloc_error == .None, "String cache had a backing allocator error" )
|
||||
|
||||
slab_validate_pools( get_state().persistent_slab )
|
||||
|
||||
// result, alloc_error = zpl_hmap_set( & cache.table, key, StrRunesPair { transmute(string) byte_slice(str_mem, length), runes } )
|
||||
result, alloc_error = zpl_hmap_set( & cache.table, transmute(u64) key, StrRunesPair { transmute(string) str_mem, runes } )
|
||||
verify( alloc_error == .None, "String cache had a backing allocator error" )
|
||||
|
||||
slab_validate_pools( get_state().persistent_slab )
|
||||
}
|
||||
// profile_end()
|
||||
|
||||
return (result ^)
|
||||
}
|
||||
|
||||
// runes_intern :: proc( content : []rune ) -> StrRunesPair
|
||||
// {
|
||||
// cache := get_state().string_cache
|
||||
// }
|
43
code/grime/unicode.odin
Normal file
43
code/grime/unicode.odin
Normal file
@ -0,0 +1,43 @@
|
||||
package sectr
|
||||
|
||||
rune16 :: distinct u16
|
||||
|
||||
|
||||
|
||||
|
||||
// Exposing the alloc_error
|
||||
@(require_results)
|
||||
string_to_runes :: proc ( content : string, allocator := context.allocator) -> (runes : []rune, alloc_error : AllocatorError) #optional_allocator_error {
|
||||
num := str_rune_count(content)
|
||||
|
||||
runes, alloc_error = make([]rune, num, allocator)
|
||||
if runes == nil || alloc_error != AllocatorError.None {
|
||||
return
|
||||
}
|
||||
|
||||
idx := 0
|
||||
for codepoint in content {
|
||||
runes[idx] = codepoint
|
||||
idx += 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
string_to_runes_array :: proc( content : string, allocator := context.allocator ) -> ( []rune, AllocatorError )
|
||||
{
|
||||
num := cast(u64) str_rune_count(content)
|
||||
|
||||
runes_array, alloc_error := array_init_reserve( rune, allocator, num )
|
||||
if alloc_error != AllocatorError.None {
|
||||
return nil, alloc_error
|
||||
}
|
||||
|
||||
runes := array_to_slice_capacity(runes_array)
|
||||
|
||||
idx := 0
|
||||
for codepoint in content {
|
||||
runes[idx] = codepoint
|
||||
idx += 1
|
||||
}
|
||||
return runes, alloc_error
|
||||
}
|
312
code/grime/virtual_arena.odin
Normal file
312
code/grime/virtual_arena.odin
Normal file
@ -0,0 +1,312 @@
|
||||
/*
|
||||
Odin's virtual arena allocator doesn't do what I ideally want for allocation resizing.
|
||||
(It was also a nice exercise along with making the other allocators)
|
||||
|
||||
So this is a virtual memory backed arena allocator designed
|
||||
to take advantage of one large contigous reserve of memory.
|
||||
With the expectation that resizes with its interface will only occur using the last allocated block.
|
||||
|
||||
All virtual address space memory for this application is managed by a virtual arena.
|
||||
No other part of the program will directly touch the vitual memory interface direclty other than it.
|
||||
|
||||
Thus for the scope of this prototype the Virtual Arena are the only interfaces to dynamic address spaces for the runtime of the client app.
|
||||
The host application as well ideally (although this may not be the case for a while)
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "base:intrinsics"
|
||||
import "base:runtime"
|
||||
import "core:mem"
|
||||
import "core:os"
|
||||
import "core:slice"
|
||||
import "core:sync"
|
||||
|
||||
VArena_GrowthPolicyProc :: #type proc( commit_used, committed, reserved, requested_size : uint ) -> uint
|
||||
|
||||
VArena :: struct {
|
||||
using vmem : VirtualMemoryRegion,
|
||||
dbg_name : string,
|
||||
tracker : MemoryTracker,
|
||||
commit_used : uint,
|
||||
growth_policy : VArena_GrowthPolicyProc,
|
||||
allow_any_reize : b32,
|
||||
mutex : sync.Mutex,
|
||||
}
|
||||
|
||||
varena_default_growth_policy :: proc( commit_used, committed, reserved, requested_size : uint ) -> uint
|
||||
{
|
||||
@static commit_limit := uint(1 * Megabyte)
|
||||
@static increment := uint(16 * Kilobyte)
|
||||
page_size := uint(virtual_get_page_size())
|
||||
|
||||
if increment < Gigabyte && committed > commit_limit {
|
||||
commit_limit *= 2
|
||||
increment *= 2
|
||||
|
||||
increment = clamp( increment, Megabyte, Gigabyte )
|
||||
}
|
||||
|
||||
remaining_reserve := reserved - committed
|
||||
growth_increment := max( increment, requested_size )
|
||||
growth_increment = clamp( growth_increment, page_size, remaining_reserve )
|
||||
next_commit_size := memory_align_formula( committed + growth_increment, page_size )
|
||||
return next_commit_size
|
||||
}
|
||||
|
||||
varena_allocator :: proc( arena : ^VArena ) -> ( allocator : Allocator ) {
|
||||
allocator.procedure = varena_allocator_proc
|
||||
allocator.data = arena
|
||||
return
|
||||
}
|
||||
|
||||
// Default growth_policy is nil
|
||||
varena_init :: proc( base_address : uintptr, to_reserve, to_commit : uint,
|
||||
growth_policy : VArena_GrowthPolicyProc, allow_any_reize : b32 = false, dbg_name : string
|
||||
) -> ( arena : VArena, alloc_error : AllocatorError)
|
||||
{
|
||||
page_size := uint(virtual_get_page_size())
|
||||
verify( page_size > size_of(VirtualMemoryRegion), "Make sure page size is not smaller than a VirtualMemoryRegion?")
|
||||
verify( to_reserve >= page_size, "Attempted to reserve less than a page size" )
|
||||
verify( to_commit >= page_size, "Attempted to commit less than a page size")
|
||||
verify( to_reserve >= to_commit, "Attempted to commit more than there is to reserve" )
|
||||
|
||||
vmem : VirtualMemoryRegion
|
||||
vmem, alloc_error = virtual_reserve_and_commit( base_address, to_reserve, to_commit )
|
||||
if vmem.base_address == nil || alloc_error != .None {
|
||||
ensure(false, "Failed to allocate requested virtual memory for virtual arena")
|
||||
return
|
||||
}
|
||||
|
||||
arena.vmem = vmem
|
||||
arena.commit_used = 0
|
||||
|
||||
if growth_policy == nil {
|
||||
arena.growth_policy = varena_default_growth_policy
|
||||
}
|
||||
else {
|
||||
arena.growth_policy = growth_policy
|
||||
}
|
||||
arena.allow_any_reize = allow_any_reize
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_init( & arena.tracker, runtime.heap_allocator(), Kilobyte * 128, dbg_name )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
varena_alloc :: proc( using self : ^VArena,
|
||||
size : uint,
|
||||
alignment : uint = mem.DEFAULT_ALIGNMENT,
|
||||
zero_memory := true,
|
||||
location := #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
verify( alignment & (alignment - 1) == 0, "Non-power of two alignment", location = location )
|
||||
page_size := uint(virtual_get_page_size())
|
||||
|
||||
requested_size := size
|
||||
if requested_size == 0 {
|
||||
ensure(false, "Requested 0 size")
|
||||
return nil, .Invalid_Argument
|
||||
}
|
||||
// ensure( requested_size > page_size, "Requested less than a page size, going to allocate a page size")
|
||||
// requested_size = max(requested_size, page_size)
|
||||
|
||||
sync.mutex_guard( & mutex )
|
||||
|
||||
alignment_offset := uint(0)
|
||||
current_offset := uintptr(self.reserve_start) + uintptr(commit_used)
|
||||
mask := uintptr(alignment - 1)
|
||||
|
||||
if current_offset & mask != 0 {
|
||||
alignment_offset = alignment - uint(current_offset & mask)
|
||||
}
|
||||
|
||||
size_to_allocate, overflow_signal := intrinsics.overflow_add( requested_size, alignment_offset )
|
||||
if overflow_signal {
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
to_be_used : uint
|
||||
to_be_used, overflow_signal = intrinsics.overflow_add( commit_used, size_to_allocate )
|
||||
if overflow_signal || to_be_used > reserved {
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
header_offset := uint( uintptr(reserve_start) - uintptr(base_address) )
|
||||
|
||||
commit_left := committed - commit_used - header_offset
|
||||
needs_more_committed := commit_left < size_to_allocate
|
||||
if needs_more_committed
|
||||
{
|
||||
profile("VArena Growing")
|
||||
next_commit_size := growth_policy( commit_used, committed, reserved, size_to_allocate )
|
||||
alloc_error = virtual_commit( vmem, next_commit_size )
|
||||
if alloc_error != .None {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
data_ptr := rawptr(current_offset + uintptr(alignment_offset))
|
||||
data = byte_slice( data_ptr, int(requested_size) )
|
||||
self.commit_used += size_to_allocate
|
||||
alloc_error = .None
|
||||
|
||||
// log_backing : [Kilobyte * 16]byte
|
||||
// backing_slice := byte_slice( & log_backing[0], len(log_backing))
|
||||
// log( str_fmt_buffer( backing_slice, "varena alloc - BASE: %p PTR: %X, SIZE: %d", cast(rawptr) self.base_address, & data[0], requested_size) )
|
||||
|
||||
if zero_memory
|
||||
{
|
||||
// log( str_fmt_buffer( backing_slice, "Zeroring data (Range: %p to %p)", raw_data(data), cast(rawptr) (uintptr(raw_data(data)) + uintptr(requested_size))))
|
||||
// slice.zero( data )
|
||||
mem_zero( data_ptr, int(requested_size) )
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
varena_free_all :: proc( using self : ^VArena )
|
||||
{
|
||||
sync.mutex_guard( & mutex )
|
||||
commit_used = 0
|
||||
|
||||
when ODIN_DEBUG && Track_Memory {
|
||||
array_clear(tracker.entries)
|
||||
}
|
||||
}
|
||||
|
||||
varena_release :: proc( using self : ^VArena )
|
||||
{
|
||||
sync.mutex_guard( & mutex )
|
||||
|
||||
virtual_release( vmem )
|
||||
commit_used = 0
|
||||
}
|
||||
|
||||
varena_allocator_proc :: proc(
|
||||
allocator_data : rawptr,
|
||||
mode : AllocatorMode,
|
||||
size : int,
|
||||
alignment : int,
|
||||
old_memory : rawptr,
|
||||
old_size : int,
|
||||
location : SourceCodeLocation = #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError)
|
||||
{
|
||||
arena := cast( ^VArena) allocator_data
|
||||
|
||||
size := uint(size)
|
||||
alignment := uint(alignment)
|
||||
old_size := uint(old_size)
|
||||
|
||||
page_size := uint(virtual_get_page_size())
|
||||
|
||||
switch mode
|
||||
{
|
||||
case .Alloc, .Alloc_Non_Zeroed:
|
||||
data, alloc_error = varena_alloc( arena, size, alignment, (mode != .Alloc_Non_Zeroed), location )
|
||||
return
|
||||
|
||||
case .Free:
|
||||
alloc_error = .Mode_Not_Implemented
|
||||
|
||||
case .Free_All:
|
||||
varena_free_all( arena )
|
||||
|
||||
case .Resize, .Resize_Non_Zeroed:
|
||||
if old_memory == nil {
|
||||
ensure(false, "Resizing without old_memory?")
|
||||
data, alloc_error = varena_alloc( arena, size, alignment, (mode != .Resize_Non_Zeroed), location )
|
||||
return
|
||||
}
|
||||
|
||||
if size == old_size {
|
||||
ensure(false, "Requested resize when none needed")
|
||||
data = byte_slice( old_memory, old_size )
|
||||
return
|
||||
}
|
||||
|
||||
alignment_offset := uintptr(old_memory) & uintptr(alignment - 1)
|
||||
if alignment_offset == 0 && size < old_size {
|
||||
ensure(false, "Requested a shrink from a virtual arena")
|
||||
data = byte_slice( old_memory, size )
|
||||
return
|
||||
}
|
||||
|
||||
old_memory_offset := uintptr(old_memory) + uintptr(old_size)
|
||||
current_offset := uintptr(arena.reserve_start) + uintptr(arena.commit_used)
|
||||
|
||||
// if old_size < page_size {
|
||||
// // We're dealing with an allocation that requested less than the minimum allocated on vmem.
|
||||
// // Provide them more of their actual memory
|
||||
// data = byte_slice( old_memory, size )
|
||||
// return
|
||||
// }
|
||||
|
||||
verify( old_memory_offset == current_offset || arena.allow_any_reize,
|
||||
"Cannot resize existing allocation in vitual arena to a larger size unless it was the last allocated" )
|
||||
|
||||
log_backing : [Kilobyte * 16]byte
|
||||
backing_slice := byte_slice( & log_backing[0], len(log_backing))
|
||||
|
||||
if old_memory_offset != current_offset && arena.allow_any_reize
|
||||
{
|
||||
// Give it new memory and copy the old over. Old memory is unrecoverable until clear.
|
||||
new_region : []byte
|
||||
new_region, alloc_error = varena_alloc( arena, size, alignment, (mode != .Resize_Non_Zeroed), location )
|
||||
if new_region == nil || alloc_error != .None {
|
||||
ensure(false, "Failed to grab new region")
|
||||
data = byte_slice( old_memory, old_size )
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
copy_non_overlapping( raw_data(new_region), old_memory, int(old_size) )
|
||||
data = new_region
|
||||
// log( str_fmt_tmp("varena resize (new): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size))
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
new_region : []byte
|
||||
new_region, alloc_error = varena_alloc( arena, size - old_size, alignment, (mode != .Resize_Non_Zeroed), location )
|
||||
if new_region == nil || alloc_error != .None {
|
||||
ensure(false, "Failed to grab new region")
|
||||
data = byte_slice( old_memory, old_size )
|
||||
return
|
||||
}
|
||||
|
||||
data = byte_slice( old_memory, size )
|
||||
// log( str_fmt_tmp("varena resize (expanded): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size))
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
|
||||
case .Query_Features:
|
||||
{
|
||||
set := cast( ^AllocatorModeSet) old_memory
|
||||
if set != nil {
|
||||
(set ^) = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
|
||||
}
|
||||
}
|
||||
case .Query_Info:
|
||||
{
|
||||
alloc_error = .Mode_Not_Implemented
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
116
code/grime/virtual_memory.odin
Normal file
116
code/grime/virtual_memory.odin
Normal file
@ -0,0 +1,116 @@
|
||||
/* Virtual Memory OS Interface
|
||||
This is an alternative to the virtual core library provided by odin, suppport setting the base address among other things.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import core_virtual "core:mem/virtual"
|
||||
import "core:os"
|
||||
|
||||
VirtualMemoryRegionHeader :: struct {
|
||||
committed : uint,
|
||||
reserved : uint,
|
||||
reserve_start : [^]byte,
|
||||
}
|
||||
|
||||
VirtualMemoryRegion :: struct {
|
||||
using base_address : ^VirtualMemoryRegionHeader
|
||||
}
|
||||
|
||||
virtual_get_page_size :: proc "contextless" () -> int {
|
||||
@static page_size := 0
|
||||
if page_size == 0 {
|
||||
page_size = os.get_page_size()
|
||||
}
|
||||
return page_size
|
||||
}
|
||||
|
||||
virtual_reserve_remaining :: proc "contextless" ( using vmem : VirtualMemoryRegion ) -> uint {
|
||||
header_offset := cast(uint) (uintptr(reserve_start) - uintptr(vmem.base_address))
|
||||
return reserved - header_offset
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
virtual_commit :: proc "contextless" ( using vmem : VirtualMemoryRegion, size : uint ) -> ( alloc_error : AllocatorError )
|
||||
{
|
||||
if size < committed {
|
||||
return .None
|
||||
}
|
||||
|
||||
header_size := size_of(VirtualMemoryRegionHeader)
|
||||
page_size := uint(virtual_get_page_size())
|
||||
to_commit := memory_align_formula( size, page_size )
|
||||
|
||||
alloc_error = core_virtual.commit( base_address, to_commit )
|
||||
if alloc_error != .None {
|
||||
return alloc_error
|
||||
}
|
||||
|
||||
base_address.committed = size
|
||||
return alloc_error
|
||||
}
|
||||
|
||||
virtual_decommit :: proc "contextless" ( vmem : VirtualMemoryRegion, size : uint ) {
|
||||
core_virtual.decommit( vmem.base_address, size )
|
||||
}
|
||||
|
||||
virtual_protect :: proc "contextless" ( vmem : VirtualMemoryRegion, region : []byte, flags : VirtualProtectFlags ) -> b32
|
||||
{
|
||||
page_size := virtual_get_page_size()
|
||||
|
||||
if len(region) % page_size != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return cast(b32) core_virtual.protect( raw_data(region), len(region), flags )
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
virtual_reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( VirtualMemoryRegion, AllocatorError ) {
|
||||
page_size := uint(virtual_get_page_size())
|
||||
to_reserve := memory_align_formula( size, page_size )
|
||||
return virtual__reserve( base_address, to_reserve )
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
virtual_reserve_and_commit :: proc "contextless" (
|
||||
base_address : uintptr, reserve_size, commit_size : uint
|
||||
) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
|
||||
{
|
||||
if reserve_size < commit_size {
|
||||
alloc_error = .Invalid_Argument
|
||||
return
|
||||
}
|
||||
|
||||
vmem, alloc_error = virtual_reserve( base_address, reserve_size )
|
||||
if alloc_error != .None {
|
||||
return
|
||||
}
|
||||
|
||||
alloc_error = virtual_commit( vmem, commit_size )
|
||||
return
|
||||
}
|
||||
|
||||
virtual_release :: proc "contextless" ( vmem : VirtualMemoryRegion ) {
|
||||
core_virtual.release( vmem.base_address, vmem.reserved )
|
||||
}
|
||||
|
||||
// If the OS is not windows, we just use the library's interface which does not support base_address.
|
||||
when ODIN_OS != OS_Type.Windows {
|
||||
|
||||
virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
|
||||
|
||||
// Ignoring the base address, add an os specific impl if you want it.
|
||||
data : []byte
|
||||
data, alloc_error := core_virtual.reserve( header_size + size ) or_return
|
||||
alloc_error := core_virtual.commit( header_size )
|
||||
|
||||
vmem.base_address := cast( ^VirtualMemoryRegionHeader ) raw_data(data)
|
||||
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
|
||||
vmem.reserved = len(data)
|
||||
vmem.committed = header_size
|
||||
return
|
||||
}
|
||||
|
||||
} // END: ODIN_OS != runtime.Odin_OS_Type.Windows
|
112
code/grime/windows.odin
Normal file
112
code/grime/windows.odin
Normal file
@ -0,0 +1,112 @@
|
||||
package sectr
|
||||
|
||||
import "core:c"
|
||||
import "core:c/libc"
|
||||
import "core:fmt"
|
||||
import "core:mem"
|
||||
import core_virtual "core:mem/virtual"
|
||||
import "core:strings"
|
||||
import win32 "core:sys/windows"
|
||||
|
||||
when ODIN_OS == OS_Type.Windows {
|
||||
|
||||
thread__highres_wait :: proc( desired_ms : f64, loc := #caller_location ) -> b32
|
||||
{
|
||||
// label_backing : [1 * Megabyte]u8
|
||||
// label_arena : Arena
|
||||
// arena_init( & label_arena, slice_ptr( & label_backing[0], len(label_backing)) )
|
||||
// label_u8 := str_fmt_tmp( "SECTR: WAIT TIMER")//, allocator = arena_allocator( &label_arena) )
|
||||
// label_u16 := win32.utf8_to_utf16( label_u8, context.temp_allocator) //arena_allocator( & label_arena) )
|
||||
|
||||
timer := win32.CreateWaitableTimerExW( nil, nil, win32.CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, win32.TIMER_ALL_ACCESS )
|
||||
if timer == nil {
|
||||
msg := str_fmt_tmp("Failed to create win32 timer - ErrorCode: %v", win32.GetLastError() )
|
||||
log( msg, LogLevel.Warning, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
due_time := win32.LARGE_INTEGER(desired_ms * MS_To_NS)
|
||||
result := win32.SetWaitableTimerEx( timer, & due_time, 0, nil, nil, nil, 0 )
|
||||
if ! result {
|
||||
msg := str_fmt_tmp("Failed to set win32 timer - ErrorCode: %v", win32.GetLastError() )
|
||||
log( msg, LogLevel.Warning, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
WAIT_ABANDONED : win32.DWORD : 0x00000080
|
||||
WAIT_IO_COMPLETION : win32.DWORD : 0x000000C0
|
||||
WAIT_OBJECT_0 : win32.DWORD : 0x00000000
|
||||
WAIT_TIMEOUT : win32.DWORD : 0x00000102
|
||||
WAIT_FAILED : win32.DWORD : 0xFFFFFFFF
|
||||
|
||||
wait_result := win32.WaitForSingleObjectEx( timer, win32.INFINITE, win32.BOOL(true) )
|
||||
switch wait_result
|
||||
{
|
||||
case WAIT_ABANDONED:
|
||||
msg := str_fmt_tmp("Failed to wait for win32 timer - Error: WAIT_ABANDONED" )
|
||||
log( msg, LogLevel.Error, loc)
|
||||
return false
|
||||
|
||||
case WAIT_IO_COMPLETION:
|
||||
msg := str_fmt_tmp("Waited for win32 timer: Ended by APC queued to the thread" )
|
||||
log( msg, LogLevel.Error, loc)
|
||||
return false
|
||||
|
||||
case WAIT_OBJECT_0:
|
||||
msg := str_fmt_tmp("Waited for win32 timer- Reason : WAIT_OBJECT_0" )
|
||||
log( msg, loc = loc)
|
||||
return false
|
||||
|
||||
case WAIT_FAILED:
|
||||
msg := str_fmt_tmp("Waited for win32 timer failed - ErrorCode: $v", win32.GetLastError() )
|
||||
log( msg, LogLevel.Error, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
set__scheduler_granularity :: proc "contextless" ( desired_ms : u32 ) -> b32 {
|
||||
return win32.timeBeginPeriod( desired_ms ) == win32.TIMERR_NOERROR
|
||||
}
|
||||
|
||||
WIN32_ERROR_INVALID_ADDRESS :: 487
|
||||
WIN32_ERROR_COMMITMENT_LIMIT :: 1455
|
||||
|
||||
@(require_results)
|
||||
virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := cast(uint) memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
|
||||
|
||||
result := win32.VirtualAlloc( rawptr(base_address), header_size + size, win32.MEM_RESERVE, win32.PAGE_READWRITE )
|
||||
if result == nil {
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
result = win32.VirtualAlloc( rawptr(base_address), header_size, win32.MEM_COMMIT, win32.PAGE_READWRITE )
|
||||
if result == nil
|
||||
{
|
||||
switch err := win32.GetLastError(); err
|
||||
{
|
||||
case 0:
|
||||
alloc_error = .Invalid_Argument
|
||||
return
|
||||
|
||||
case WIN32_ERROR_INVALID_ADDRESS, WIN32_ERROR_COMMITMENT_LIMIT:
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
vmem.base_address = cast(^VirtualMemoryRegionHeader) result
|
||||
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
|
||||
vmem.reserved = size
|
||||
vmem.committed = header_size
|
||||
alloc_error = .None
|
||||
return
|
||||
}
|
||||
|
||||
} // END: ODIN_OS == runtime.Odin_OS_Type.Windows
|
Reference in New Issue
Block a user