Beginning to lift the "grime" files to their own pacakge
This commit is contained in:
@ -1,327 +0,0 @@
|
||||
/*
|
||||
Based on gencpp's and thus zpl's Array implementation
|
||||
Made becasue of the map issue with fonts during hot-reload.
|
||||
I didn't want to make the HMapZPL impl with the [dynamic] array for now to isolate the hot-reload issue (when I was diagnoising)
|
||||
|
||||
Update 5-26-2024:
|
||||
TODO(Ed): Raw_Dynamic_Array is defined within base:runtime/core.odin and exposes what we need for worst case hot-reloads.
|
||||
So its best to go back to regular dynamic arrays at some point.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "core:c/libc"
|
||||
import "core:mem"
|
||||
import "core:slice"
|
||||
|
||||
ArrayHeader :: struct ( $ Type : typeid ) {
|
||||
backing : Allocator,
|
||||
dbg_name : string,
|
||||
fixed_cap : b32,
|
||||
capacity : u64,
|
||||
num : u64,
|
||||
data : [^]Type,
|
||||
}
|
||||
|
||||
Array :: struct ( $ Type : typeid ) {
|
||||
using header : ^ArrayHeader(Type),
|
||||
}
|
||||
|
||||
array_underlying_slice :: proc(slice: []($ Type)) -> Array(Type)
|
||||
{
|
||||
if len(slice) == 0 {
|
||||
return nil
|
||||
}
|
||||
array_size := size_of( Array(Type))
|
||||
raw_data := & slice[0]
|
||||
array_ptr := cast( ^Array(Type)) ( uintptr(first_element_ptr) - uintptr(array_size))
|
||||
return array_ptr ^
|
||||
}
|
||||
|
||||
array_to_slice :: proc( using self : Array($ Type) ) -> []Type {
|
||||
return slice_ptr( data, int(num) )
|
||||
}
|
||||
|
||||
array_to_slice_capacity :: proc( using self : Array($ Type) ) -> []Type {
|
||||
return slice_ptr( data, int(capacity))
|
||||
}
|
||||
|
||||
array_grow_formula :: proc( value : u64 ) -> u64 {
|
||||
result := (2 * value) + 8
|
||||
return result
|
||||
}
|
||||
|
||||
array_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( Array(Type), AllocatorError ) {
|
||||
return array_init_reserve( Type, allocator, array_grow_formula(0) )
|
||||
}
|
||||
|
||||
array_init_reserve :: proc
|
||||
( $ Type : typeid, allocator : Allocator, capacity : u64, fixed_cap : b32 = false, dbg_name : string = "" ) -> ( result : Array(Type), alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := size_of(ArrayHeader(Type))
|
||||
array_size := header_size + int(capacity) * size_of(Type)
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( array_size, allocator = allocator )
|
||||
// log( str_fmt_tmp("array reserved: %d", header_size + int(capacity) * size_of(Type) ))
|
||||
if alloc_error != AllocatorError.None do return
|
||||
|
||||
result.header = cast( ^ArrayHeader(Type)) raw_mem
|
||||
result.backing = allocator
|
||||
result.dbg_name = dbg_name
|
||||
result.fixed_cap = fixed_cap
|
||||
result.capacity = capacity
|
||||
result.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) result.header)[ 1:]
|
||||
return
|
||||
}
|
||||
|
||||
array_append_value :: proc( self : ^Array( $ Type), value : Type ) -> AllocatorError
|
||||
{
|
||||
// profile(#procedure)
|
||||
if self.header.num == self.header.capacity
|
||||
{
|
||||
grow_result := array_grow( self, self.header.capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
self.header.data[ self.header.num ] = value
|
||||
self.header.num += 1
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_append_array :: proc( using self: ^Array( $ Type), other : Array(Type)) -> AllocatorError
|
||||
{
|
||||
if num + other.num > capacity
|
||||
{
|
||||
grow_result := array_grow( self, num + other.num )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
// Note(Ed) : Original code from gencpp
|
||||
// libc.memcpy( ptr_offset(data, num), raw_data(items), len(items) * size_of(Type) )
|
||||
|
||||
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE.
|
||||
ensure(false, "time to check....")
|
||||
target := ptr_offset( data, num )
|
||||
copy( slice_ptr(target, int(capacity - num)), array_to_slice(other) )
|
||||
|
||||
num += other.num
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_append_slice :: proc( using self : ^Array( $ Type ), items : []Type ) -> AllocatorError
|
||||
{
|
||||
items_num :=u64(len(items))
|
||||
if num + items_num > capacity
|
||||
{
|
||||
grow_result := array_grow( self, num + items_num )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
target := ptr_offset( data, num )
|
||||
copy( slice_ptr(target, int(capacity - num)), items )
|
||||
|
||||
num += items_num
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_append_at :: proc( using self : ^Array( $ Type ), item : Type, id : u64 ) -> AllocatorError
|
||||
{
|
||||
id := id
|
||||
if id >= num {
|
||||
id = num - 1
|
||||
}
|
||||
if id < 0 {
|
||||
id = 0
|
||||
}
|
||||
|
||||
if capacity < num + 1
|
||||
{
|
||||
grow_result := array_grow( self, capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
target := & data[id]
|
||||
libc.memmove( ptr_offset(target, 1), target, uint(num - id) * size_of(Type) )
|
||||
|
||||
data[id] = item
|
||||
num += 1
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_append_at_slice :: proc( using self : ^Array( $ Type ), items : []Type, id : u64 ) -> AllocatorError
|
||||
{
|
||||
id := id
|
||||
if id >= num {
|
||||
return array_append_slice( items )
|
||||
}
|
||||
if len(items) > capacity
|
||||
{
|
||||
grow_result := array_grow( self, capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
// Note(Ed) : Original code from gencpp
|
||||
// target := ptr_offset( data, id + len(items) )
|
||||
// src := ptr_offset( data, id )
|
||||
// libc.memmove( target, src, num - id * size_of(Type) )
|
||||
// libc.memcpy ( src, raw_data(items), len(items) * size_of(Type) )
|
||||
|
||||
// TODO(Ed) : VERIFY VIA DEBUG THIS COPY IS FINE
|
||||
ensure(false, "time to check....")
|
||||
target := & data[id + len(items)]
|
||||
dst := slice_ptr( target, num - id - len(items) )
|
||||
src := slice_ptr( & data[id], num - id )
|
||||
copy( dst, src )
|
||||
copy( src, items )
|
||||
|
||||
num += len(items)
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
// array_back :: proc( )
|
||||
|
||||
// array_push_back :: proc( using self : Array( $ Type)) -> b32 {
|
||||
// if num == capacity {
|
||||
// return false
|
||||
// }
|
||||
|
||||
// data[ num ] = value
|
||||
// num += 1
|
||||
// return true
|
||||
// }
|
||||
|
||||
array_clear :: proc "contextless" ( using self : Array( $ Type ), zero_data : b32 = false ) {
|
||||
if zero_data {
|
||||
mem.set( data, 0, int(num * size_of(Type)) )
|
||||
}
|
||||
header.num = 0
|
||||
}
|
||||
|
||||
array_fill :: proc( using self : Array( $ Type ), begin, end : u64, value : Type ) -> b32
|
||||
{
|
||||
if begin < 0 || end >= num {
|
||||
return false
|
||||
}
|
||||
|
||||
// data_slice := slice_ptr( ptr_offset( data, begin ), end - begin )
|
||||
// slice.fill( data_slice, cast(int) value )
|
||||
|
||||
for id := begin; id < end; id += 1 {
|
||||
data[ id ] = value
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
array_free :: proc( using self : Array( $ Type ) ) {
|
||||
free( self.header, backing )
|
||||
self.data = nil
|
||||
}
|
||||
|
||||
array_grow :: proc( using self : ^Array( $ Type ), min_capacity : u64 ) -> AllocatorError
|
||||
{
|
||||
// profile(#procedure)
|
||||
new_capacity := array_grow_formula( capacity )
|
||||
|
||||
if new_capacity < min_capacity {
|
||||
new_capacity = min_capacity
|
||||
}
|
||||
return array_set_capacity( self, new_capacity )
|
||||
}
|
||||
|
||||
array_pop :: proc( using self : Array( $ Type ) ) {
|
||||
verify( num != 0, "Attempted to pop an array with no elements" )
|
||||
num -= 1
|
||||
}
|
||||
|
||||
array_remove_at :: proc( using self : Array( $ Type ), id : u64 )
|
||||
{
|
||||
verify( id < header.num, "Attempted to remove from an index larger than the array" )
|
||||
|
||||
left := & data[id]
|
||||
right := & data[id + 1]
|
||||
libc.memmove( left, right, uint(num - id) * size_of(Type) )
|
||||
|
||||
header.num -= 1
|
||||
}
|
||||
|
||||
array_reserve :: proc( using self : ^Array( $ Type ), new_capacity : u64 ) -> AllocatorError
|
||||
{
|
||||
if capacity < new_capacity {
|
||||
return array_set_capacity( self, new_capacity )
|
||||
}
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_resize :: proc( array : ^Array( $ Type ), num : u64 ) -> AllocatorError
|
||||
{
|
||||
if array.capacity < num
|
||||
{
|
||||
grow_result := array_grow( array, array.capacity )
|
||||
if grow_result != AllocatorError.None {
|
||||
return grow_result
|
||||
}
|
||||
}
|
||||
|
||||
array.num = num
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
array_set_capacity :: proc( self : ^Array( $ Type ), new_capacity : u64 ) -> AllocatorError
|
||||
{
|
||||
if new_capacity == self.capacity {
|
||||
return AllocatorError.None
|
||||
}
|
||||
if new_capacity < self.num {
|
||||
self.num = new_capacity
|
||||
return AllocatorError.None
|
||||
}
|
||||
|
||||
header_size :: size_of(ArrayHeader(Type))
|
||||
|
||||
new_size := header_size + (cast(int) new_capacity ) * size_of(Type)
|
||||
old_size := header_size + (cast(int) self.capacity) * size_of(Type)
|
||||
|
||||
new_mem, result_code := resize_non_zeroed( self.header, old_size, new_size, mem.DEFAULT_ALIGNMENT, allocator = self.backing )
|
||||
|
||||
if result_code != AllocatorError.None {
|
||||
ensure( false, "Failed to allocate for new array capacity" )
|
||||
return result_code
|
||||
}
|
||||
if new_mem == nil {
|
||||
ensure(false, "new_mem is nil but no allocation error")
|
||||
return result_code
|
||||
}
|
||||
|
||||
self.header = cast( ^ArrayHeader(Type)) raw_data(new_mem);
|
||||
self.header.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) self.header)[ 1:]
|
||||
self.header.capacity = new_capacity
|
||||
self.header.num = self.num
|
||||
return result_code
|
||||
}
|
||||
|
||||
array_block_size :: proc "contextless" ( self : Array( $Type ) ) -> u64 {
|
||||
header_size :: size_of(ArrayHeader(Type))
|
||||
block_size := cast(u64) (header_size + self.capacity * size_of(Type))
|
||||
return block_size
|
||||
}
|
||||
|
||||
array_memtracker_entry :: proc( self : Array( $Type ), name : string ) -> MemoryTrackerEntry {
|
||||
header_size :: size_of(ArrayHeader(Type))
|
||||
block_size := cast(uintptr) (header_size + (cast(uintptr) self.capacity) * size_of(Type))
|
||||
|
||||
block_start := transmute(^u8) self.header
|
||||
block_end := ptr_offset( block_start, block_size )
|
||||
|
||||
tracker_entry := MemoryTrackerEntry { name, block_start, block_end }
|
||||
return tracker_entry
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:io"
|
||||
import "core:os"
|
||||
import "core:text/table"
|
||||
|
||||
dump_stacktrace :: proc( allocator := context.temp_allocator ) -> string
|
||||
{
|
||||
trace_result := stacktrace()
|
||||
lines, error := stacktrace_lines( trace_result )
|
||||
|
||||
padding := " "
|
||||
|
||||
log_table := table.init( & table.Table{}, context.temp_allocator, context.temp_allocator )
|
||||
for line in lines {
|
||||
table.row( log_table, padding, line.symbol, " - ", line.location )
|
||||
}
|
||||
table.build(log_table)
|
||||
|
||||
// writer_builder_backing : [Kilobyte * 16] u8
|
||||
// writer_builder := from_bytes( writer_builder_backing[:] )
|
||||
writer_builder : StringBuilder
|
||||
str_builder_init( & writer_builder, allocator = allocator )
|
||||
|
||||
writer := to_writer( & writer_builder )
|
||||
for row in 2 ..< log_table.nr_rows {
|
||||
for col in 0 ..< log_table.nr_cols {
|
||||
table.write_table_cell( writer, log_table, row, col )
|
||||
}
|
||||
io.write_byte( writer, '\n' )
|
||||
}
|
||||
|
||||
return to_string( writer_builder )
|
||||
}
|
||||
|
||||
ensure :: proc( condition : b32, msg : string, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log( msg, LogLevel.Warning, location )
|
||||
runtime.debug_trap()
|
||||
}
|
||||
|
||||
// TODO(Ed) : Setup exit codes!
|
||||
fatal :: proc( msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
log( msg, LogLevel.Fatal, location )
|
||||
runtime.debug_trap()
|
||||
os.exit( exit_code )
|
||||
}
|
||||
|
||||
verify :: proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location )
|
||||
{
|
||||
if condition {
|
||||
return
|
||||
}
|
||||
log( msg, LogLevel.Fatal, location )
|
||||
runtime.debug_trap()
|
||||
os.exit( exit_code )
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
package sectr
|
||||
|
||||
// TODO(Ed): Review these when os2 is done.
|
||||
|
||||
import "core:fmt"
|
||||
import "core:os"
|
||||
import "base:runtime"
|
||||
|
||||
file_copy_sync :: proc( path_src, path_dst: string, allocator := context.temp_allocator ) -> b32
|
||||
{
|
||||
file_size : i64
|
||||
{
|
||||
path_info, result := file_status( path_src, allocator )
|
||||
if result != os.ERROR_NONE {
|
||||
logf("Could not get file info: %v", result, LogLevel.Error )
|
||||
return false
|
||||
}
|
||||
file_size = path_info.size
|
||||
}
|
||||
|
||||
src_content, result := os.read_entire_file( path_src, allocator )
|
||||
if ! result {
|
||||
logf( "Failed to read file to copy: %v", path_src, LogLevel.Error )
|
||||
runtime.debug_trap()
|
||||
return false
|
||||
}
|
||||
|
||||
result = os.write_entire_file( path_dst, src_content, false )
|
||||
if ! result {
|
||||
logf( "Failed to copy file: %v", path_dst, LogLevel.Error )
|
||||
runtime.debug_trap()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
file_exists :: proc( file_path : string ) -> b32 {
|
||||
path_info, result := file_status( file_path, frame_allocator() )
|
||||
if result != os.ERROR_NONE {
|
||||
return false
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
file_is_locked :: proc( file_path : string ) -> b32 {
|
||||
handle, err := file_open(file_path, os.O_RDONLY)
|
||||
if err != os.ERROR_NONE {
|
||||
// If the error indicates the file is in use, return true.
|
||||
return true
|
||||
}
|
||||
|
||||
// If the file opens successfully, close it and return false.
|
||||
file_close(handle)
|
||||
return false
|
||||
}
|
||||
|
||||
file_rewind :: proc( file : os.Handle ) {
|
||||
file_seek( file, 0, 0 )
|
||||
}
|
||||
|
||||
file_read_looped :: proc( file : os.Handle, data : []byte ) {
|
||||
total_read, result_code := file_read( file, data )
|
||||
if result_code == os.ERROR_HANDLE_EOF {
|
||||
file_rewind( file )
|
||||
}
|
||||
}
|
@ -47,11 +47,7 @@ HMapZPL :: struct ( $ Type : typeid ) {
|
||||
entries : Array( HMapZPL_Entry(Type) ),
|
||||
}
|
||||
|
||||
hamp_zpl_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( HMapZPL( Type), AllocatorError ) {
|
||||
return hamp_zpl_init_reserve( Type, allocator )
|
||||
}
|
||||
|
||||
hamp_zpl_init_reserve :: proc
|
||||
hamp_zpl_init :: proc
|
||||
( $ Type : typeid, allocator : Allocator, num : u64, dbg_name : string = "" ) -> ( HMapZPL( Type), AllocatorError )
|
||||
{
|
||||
result : HMapZPL(Type)
|
||||
@ -126,7 +122,7 @@ hamp_zpl_rehash :: proc( ht : ^ HMapZPL( $ Type ), new_num : u64 ) -> AllocatorE
|
||||
ensure( false, "ZPL HMAP IS REHASHING" )
|
||||
last_added_index : i64
|
||||
|
||||
new_ht, init_result := hamp_zpl_init_reserve( Type, ht.table.backing, new_num, ht.table.dbg_name )
|
||||
new_ht, init_result := hamp_zpl_init( Type, ht.table.backing, new_num, ht.table.dbg_name )
|
||||
if init_result != AllocatorError.None {
|
||||
ensure( false, "New hamp_zpl failed to allocate" )
|
||||
return init_result
|
||||
|
@ -1,190 +0,0 @@
|
||||
package sectr
|
||||
|
||||
LL_Node :: struct ( $ Type : typeid ) {
|
||||
next : ^Type,
|
||||
}
|
||||
|
||||
// ll_push :: proc( list_ptr : ^(^ ($ Type)), node : ^Type ) {
|
||||
ll_push :: #force_inline proc "contextless" ( list_ptr : ^(^ ($ Type)), node : ^Type ) {
|
||||
list : ^Type = (list_ptr^)
|
||||
node.next = list
|
||||
(list_ptr^) = node
|
||||
}
|
||||
|
||||
ll_pop :: #force_inline proc "contextless" ( list_ptr : ^(^ ($ Type)) ) -> ( node : ^Type ) {
|
||||
list : ^Type = (list_ptr^)
|
||||
(list_ptr^) = list.next
|
||||
return list
|
||||
}
|
||||
|
||||
//region Intrusive Doubly-Linked-List
|
||||
|
||||
DLL_Node :: struct ( $ Type : typeid ) #raw_union {
|
||||
using _ : struct {
|
||||
left, right : ^Type,
|
||||
},
|
||||
using _ : struct {
|
||||
prev, next : ^Type,
|
||||
},
|
||||
using _ : struct {
|
||||
first, last : ^Type,
|
||||
},
|
||||
using _ : struct {
|
||||
bottom, top : ^Type,
|
||||
}
|
||||
}
|
||||
|
||||
DLL_NodeFull :: struct ( $ Type : typeid ) {
|
||||
// using _ : DLL_NodeFL(Type),
|
||||
first, last : ^Type,
|
||||
prev, next : ^Type,
|
||||
}
|
||||
|
||||
DLL_NodePN :: struct ( $ Type : typeid ) {
|
||||
// using _ : struct {
|
||||
prev, next : ^Type,
|
||||
// },
|
||||
// using _ : struct {
|
||||
// left, right : ^Type,
|
||||
// },
|
||||
}
|
||||
|
||||
DLL_NodeFL :: struct ( $ Type : typeid ) {
|
||||
// using _ : struct {
|
||||
first, last : ^Type,
|
||||
// },
|
||||
|
||||
// TODO(Ed): Review this
|
||||
// using _ : struct {
|
||||
// bottom, top: ^Type,
|
||||
// },
|
||||
}
|
||||
|
||||
type_is_node :: #force_inline proc "contextless" ( $ Type : typeid ) -> bool
|
||||
{
|
||||
// elem_type := type_elem_type(Type)
|
||||
return type_has_field( type_elem_type(Type), "prev" ) && type_has_field( type_elem_type(Type), "next" )
|
||||
}
|
||||
|
||||
// First/Last append
|
||||
dll_fl_append :: proc ( list : ^( $TypeList), node : ^( $TypeNode) )
|
||||
{
|
||||
if list.first == nil {
|
||||
list.first = node
|
||||
list.last = node
|
||||
}
|
||||
else {
|
||||
list.last = node
|
||||
}
|
||||
}
|
||||
|
||||
dll_push_back :: proc "contextless" ( current_ptr : ^(^ ($ TypeCurr)), node : ^$TypeNode )
|
||||
{
|
||||
current : ^TypeCurr = (current_ptr ^)
|
||||
|
||||
if current == nil
|
||||
{
|
||||
(current_ptr ^) = node
|
||||
node.prev = nil
|
||||
}
|
||||
else
|
||||
{
|
||||
node.prev = current
|
||||
(current_ptr^) = node
|
||||
current.next = node
|
||||
}
|
||||
|
||||
node.next = nil
|
||||
}
|
||||
|
||||
dll_pn_pop :: proc "contextless" ( node : ^$Type )
|
||||
{
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
if node.prev != nil {
|
||||
node.prev.next = nil
|
||||
node.prev = nil
|
||||
}
|
||||
if node.next != nil {
|
||||
node.next.prev = nil
|
||||
node.next = nil
|
||||
}
|
||||
}
|
||||
|
||||
dll_pop_back :: #force_inline proc "contextless" ( current_ptr : ^(^ ($ Type)) )
|
||||
{
|
||||
to_remove : ^Type = (current_ptr ^)
|
||||
if to_remove == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if to_remove.prev == nil {
|
||||
(current_ptr ^) = nil
|
||||
}
|
||||
else {
|
||||
(current_ptr ^) = to_remove.prev
|
||||
(current_ptr ^).next = nil
|
||||
}
|
||||
}
|
||||
|
||||
dll_full_insert_raw :: proc "contextless" ( null : ^($ Type), parent : ^$ParentType, pos, node : ^Type )
|
||||
{
|
||||
if parent.first == null {
|
||||
parent.first = node
|
||||
parent.last = node
|
||||
node.next = null
|
||||
node.prev = null
|
||||
}
|
||||
else if pos == null {
|
||||
// Position is not set, insert at beginning
|
||||
node.next = parent.first
|
||||
parent.first.prev = node
|
||||
parent.first = node
|
||||
node.prev = null
|
||||
}
|
||||
else if pos == parent.last {
|
||||
// Positin is set to last, insert at end
|
||||
parent.last.next = node
|
||||
node.prev = parent.last
|
||||
parent.last = node
|
||||
node.next = null
|
||||
}
|
||||
else
|
||||
{
|
||||
if pos.next != null {
|
||||
pos.next.prev = node
|
||||
}
|
||||
node.next = pos.next
|
||||
pos.next = node
|
||||
node.prev = pos
|
||||
}
|
||||
}
|
||||
|
||||
dll_full_pop :: proc "contextless" ( node : ^$NodeType, parent : ^$ParentType ) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
if parent.first == node {
|
||||
parent.first = node.next
|
||||
}
|
||||
if parent.last == node {
|
||||
parent.last = node.prev
|
||||
}
|
||||
prev := node.prev
|
||||
next := node.next
|
||||
if prev != nil {
|
||||
prev.next = next
|
||||
node.prev = nil
|
||||
}
|
||||
if next != nil {
|
||||
next.prev = prev
|
||||
node.next = nil
|
||||
}
|
||||
}
|
||||
|
||||
dll_full_push_back :: proc "contextless" ( parent : ^$ParentType, node : ^$Type, null : ^Type ) {
|
||||
dll_full_insert_raw( null, parent, parent.last, node )
|
||||
}
|
||||
|
||||
//endregion Intrusive Doubly-Linked-List
|
@ -1,15 +1,17 @@
|
||||
|
||||
package sectr
|
||||
|
||||
#region("Import Aliases")
|
||||
#region("base")
|
||||
|
||||
import "base:builtin"
|
||||
copy :: builtin.copy
|
||||
|
||||
import "base:intrinsics"
|
||||
mem_zero :: intrinsics.mem_zero
|
||||
ptr_sub :: intrinsics.ptr_sub
|
||||
type_has_field :: intrinsics.type_has_field
|
||||
type_elem_type :: intrinsics.type_elem_type
|
||||
|
||||
import "base:runtime"
|
||||
Byte :: runtime.Byte
|
||||
Kilobyte :: runtime.Kilobyte
|
||||
@ -20,12 +22,22 @@ import "base:runtime"
|
||||
Exabyte :: runtime.Exabyte
|
||||
resize_non_zeroed :: runtime.non_zero_mem_resize
|
||||
SourceCodeLocation :: runtime.Source_Code_Location
|
||||
debug_trap :: runtime.debug_trap
|
||||
|
||||
#endregion("base")
|
||||
|
||||
#region("core")
|
||||
|
||||
import c "core:c/libc"
|
||||
import "core:dynlib"
|
||||
|
||||
// import "core:dynlib"
|
||||
|
||||
import "core:hash"
|
||||
crc32 :: hash.crc32
|
||||
|
||||
import "core:hash/xxhash"
|
||||
xxh32 :: xxhash.XXH32
|
||||
|
||||
import fmt_io "core:fmt"
|
||||
str_fmt_out :: fmt_io.printf
|
||||
str_fmt_tmp :: fmt_io.tprintf
|
||||
@ -34,7 +46,9 @@ import fmt_io "core:fmt"
|
||||
str_fmt_buffer :: fmt_io.bprintf
|
||||
str_to_file_ln :: fmt_io.fprintln
|
||||
str_tmp_from_any :: fmt_io.tprint
|
||||
|
||||
import "core:math"
|
||||
|
||||
import "core:mem"
|
||||
align_forward_int :: mem.align_forward_int
|
||||
align_forward_uint :: mem.align_forward_uint
|
||||
@ -59,9 +73,12 @@ import "core:mem"
|
||||
TrackingAllocator :: mem.Tracking_Allocator
|
||||
tracking_allocator :: mem.tracking_allocator
|
||||
tracking_allocator_init :: mem.tracking_allocator_init
|
||||
|
||||
import "core:mem/virtual"
|
||||
VirtualProtectFlags :: virtual.Protect_Flags
|
||||
|
||||
// import "core:odin"
|
||||
|
||||
import "core:os"
|
||||
FileFlag_Create :: os.O_CREATE
|
||||
FileFlag_ReadWrite :: os.O_RDWR
|
||||
@ -73,37 +90,152 @@ import "core:os"
|
||||
file_seek :: os.seek
|
||||
file_status :: os.stat
|
||||
file_write :: os.write
|
||||
|
||||
import "core:path/filepath"
|
||||
file_name_from_path :: filepath.short_stem
|
||||
|
||||
import "core:strconv"
|
||||
parse_f32 :: strconv.parse_f32
|
||||
parse_u64 :: strconv.parse_u64
|
||||
parse_uint :: strconv.parse_uint
|
||||
|
||||
import str "core:strings"
|
||||
StringBuilder :: str.Builder
|
||||
str_builder_from_bytes :: str.builder_from_bytes
|
||||
str_builder_init :: str.builder_init
|
||||
str_builder_to_writer :: str.to_writer
|
||||
str_builder_to_string :: str.to_string
|
||||
|
||||
import "core:time"
|
||||
Duration :: time.Duration
|
||||
duration_seconds :: time.duration_seconds
|
||||
duration_ms :: time.duration_milliseconds
|
||||
thread_sleep :: time.sleep
|
||||
|
||||
import "core:unicode"
|
||||
is_white_space :: unicode.is_white_space
|
||||
|
||||
import "core:unicode/utf8"
|
||||
str_rune_count :: utf8.rune_count_in_string
|
||||
runes_to_string :: utf8.runes_to_string
|
||||
// string_to_runes :: utf8.string_to_runes
|
||||
|
||||
#endregion("core")
|
||||
|
||||
import "thirdparty:backtrace"
|
||||
StackTraceData :: backtrace.Trace_Const
|
||||
stacktrace :: backtrace.trace
|
||||
stacktrace_lines :: backtrace.lines
|
||||
|
||||
#endregion("Import Aliases")
|
||||
import "codebase:grime"
|
||||
// asserts
|
||||
ensure :: grime.ensure
|
||||
fatal :: grime.fatal
|
||||
verify :: grime.verify
|
||||
|
||||
#region("Proc overload mappings")
|
||||
// chrono
|
||||
NS_To_MS :: grime.NS_To_MS
|
||||
NS_To_US :: grime.NS_To_US
|
||||
NS_To_S :: grime.NS_To_S
|
||||
|
||||
US_To_NS :: grime.US_To_NS
|
||||
US_To_MS :: grime.US_To_MS
|
||||
US_To_S :: grime.US_To_S
|
||||
|
||||
MS_To_NS :: grime.MS_To_NS
|
||||
MS_To_US :: grime.MS_To_US
|
||||
MS_To_S :: grime.MS_To_S
|
||||
|
||||
S_To_NS :: grime.S_To_NS
|
||||
S_To_US :: grime.S_To_US
|
||||
S_To_MS :: grime.S_To_MS
|
||||
|
||||
// container
|
||||
Array :: grime.Array
|
||||
|
||||
array_to_slice :: grime.array_to_slice
|
||||
array_init_reserve :: grime.array_init_reserve
|
||||
array_append :: grime.array_append
|
||||
array_append_at :: grime.array_append_at
|
||||
array_clear :: grime.array_clear
|
||||
array_free :: grime.array_free
|
||||
array_grow_formula :: grime.array_grow_formula
|
||||
array_remove_at :: grime.array_remove_at
|
||||
array_resize :: grime.array_resize
|
||||
|
||||
// filesystem
|
||||
file_exists :: grime.file_exists
|
||||
file_rewind :: grime.file_rewind
|
||||
|
||||
// linked lists
|
||||
LL_Node :: grime.LL_Node
|
||||
|
||||
ll_push :: grime.ll_push
|
||||
ll_pop :: grime.ll_pop
|
||||
|
||||
DLL_Node :: grime.DLL_Node
|
||||
DLL_NodeFull :: grime.DLL_NodeFull
|
||||
DLL_NodePN :: grime.DLL_NodePN
|
||||
DLL_NodeFL :: grime.DLL_NodeFL
|
||||
|
||||
dll_full_push_back :: grime.dll_full_push_back
|
||||
dll_full_pop :: grime.dll_full_pop
|
||||
dll_push_back :: grime.dll_push_back
|
||||
dll_pop_back :: grime.dll_pop_back
|
||||
|
||||
// logger
|
||||
Logger :: grime.Logger
|
||||
LogLevel :: grime.LogLevel
|
||||
|
||||
to_odin_logger :: grime.to_odin_logger
|
||||
logger_init :: grime.logger_init
|
||||
log :: grime.log
|
||||
logf :: grime.logf
|
||||
|
||||
// memory
|
||||
MemoryTracker :: grime.MemoryTracker
|
||||
MemoryTrackerEntry :: grime.MemoryTrackerEntry
|
||||
|
||||
memtracker_clear :: grime.memtracker_clear
|
||||
memtracker_init :: grime.memtracker_init
|
||||
memtracker_register_auto_name :: grime.memtracker_register_auto_name
|
||||
memtracker_register_auto_name_slice :: grime.memtracker_register_auto_name_slice
|
||||
memtracker_unregister :: grime.memtracker_unregister
|
||||
|
||||
|
||||
calc_padding_with_header :: grime.calc_padding_with_header
|
||||
memory_after_header :: grime.memory_after_header
|
||||
memory_after :: grime.memory_after
|
||||
swap :: grime.swap
|
||||
|
||||
// profiler
|
||||
SpallProfiler :: grime.SpallProfiler
|
||||
|
||||
set_profiler_module_context :: grime.set_profiler_module_context
|
||||
|
||||
profile :: grime.profile
|
||||
profile_begin :: grime.profile_begin
|
||||
profile_end :: grime.profile_end
|
||||
|
||||
// os
|
||||
OS_Type :: grime.OS_Type
|
||||
|
||||
// timing
|
||||
when ODIN_OS == OS_Type.Windows {
|
||||
set__scheduler_granularity :: grime.set__scheduler_granularity
|
||||
}
|
||||
|
||||
// unicode
|
||||
string_to_runes :: grime.string_to_runes
|
||||
string_to_runes_array :: grime.string_to_runes_array
|
||||
|
||||
// virutal memory
|
||||
VArena :: grime.VArena
|
||||
VirtualMemoryRegion :: grime.VirtualMemoryRegion
|
||||
|
||||
varena_allocator :: grime.varena_allocator
|
||||
|
||||
#region("Procedure overload mappings")
|
||||
|
||||
// This has to be done on a per-module basis.
|
||||
|
||||
@ -111,12 +243,6 @@ add :: proc {
|
||||
add_range2,
|
||||
}
|
||||
|
||||
array_append :: proc {
|
||||
array_append_value,
|
||||
array_append_array,
|
||||
array_append_slice,
|
||||
}
|
||||
|
||||
bivec3 :: proc {
|
||||
bivec3_via_f32s,
|
||||
vec3_to_bivec3,
|
||||
@ -359,7 +485,3 @@ wedge :: proc {
|
||||
}
|
||||
|
||||
#endregion("Proc overload mappings")
|
||||
|
||||
OS_Type :: type_of(ODIN_OS)
|
||||
|
||||
swap :: #force_inline proc( a, b : ^ $Type ) -> ( ^ Type, ^ Type ) { return b, a }
|
@ -1,91 +0,0 @@
|
||||
// TODO(Ed) : Move this to a grime package problably
|
||||
package sectr
|
||||
|
||||
import "core:fmt"
|
||||
import "core:mem"
|
||||
import "core:mem/virtual"
|
||||
import "base:runtime"
|
||||
import "core:os"
|
||||
|
||||
kilobytes :: #force_inline proc "contextless" ( kb : $ integer_type ) -> integer_type {
|
||||
return kb * Kilobyte
|
||||
}
|
||||
megabytes :: #force_inline proc "contextless" ( mb : $ integer_type ) -> integer_type {
|
||||
return mb * Megabyte
|
||||
}
|
||||
gigabytes :: #force_inline proc "contextless" ( gb : $ integer_type ) -> integer_type {
|
||||
return gb * Gigabyte
|
||||
}
|
||||
terabytes :: #force_inline proc "contextless" ( tb : $ integer_type ) -> integer_type {
|
||||
return tb * Terabyte
|
||||
}
|
||||
|
||||
//region Memory Math
|
||||
|
||||
// See: core/mem.odin, I wanted to study it an didn't like the naming.
|
||||
@(require_results)
|
||||
calc_padding_with_header :: proc "contextless" (pointer: uintptr, alignment: uintptr, header_size: int) -> int
|
||||
{
|
||||
alignment_offset := pointer & (alignment - 1)
|
||||
|
||||
initial_padding := uintptr(0)
|
||||
if alignment_offset != 0 {
|
||||
initial_padding = alignment - alignment_offset
|
||||
}
|
||||
|
||||
header_space_adjustment := uintptr(header_size)
|
||||
if initial_padding < header_space_adjustment
|
||||
{
|
||||
additional_space_needed := header_space_adjustment - initial_padding
|
||||
unaligned_extra_space := additional_space_needed & (alignment - 1)
|
||||
|
||||
if unaligned_extra_space > 0 {
|
||||
initial_padding += alignment * (1 + (additional_space_needed / alignment))
|
||||
}
|
||||
else {
|
||||
initial_padding += alignment * (additional_space_needed / alignment)
|
||||
}
|
||||
}
|
||||
|
||||
return int(initial_padding)
|
||||
}
|
||||
|
||||
// Helper to get the the beginning of memory after a slice
|
||||
memory_after :: #force_inline proc "contextless" ( slice : []byte ) -> ( ^ byte) {
|
||||
return ptr_offset( & slice[0], len(slice) )
|
||||
}
|
||||
|
||||
memory_after_header :: #force_inline proc "contextless" ( header : ^($ Type) ) -> ( [^]byte) {
|
||||
result := cast( [^]byte) ptr_offset( header, 1 )
|
||||
// result := cast( [^]byte) (cast( [^]Type) header)[ 1:]
|
||||
return result
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
memory_align_formula :: #force_inline proc "contextless" ( size, align : uint) -> uint {
|
||||
result := size + align - 1
|
||||
return result - result % align
|
||||
}
|
||||
|
||||
// This is here just for docs
|
||||
memory_misalignment :: #force_inline proc ( address, alignment : uintptr) -> uint {
|
||||
// address % alignment
|
||||
assert(is_power_of_two(alignment))
|
||||
return uint( address & (alignment - 1) )
|
||||
}
|
||||
|
||||
// This is here just for docs
|
||||
@(require_results)
|
||||
memory_aign_forward :: #force_inline proc( address, alignment : uintptr) -> uintptr
|
||||
{
|
||||
assert(is_power_of_two(alignment))
|
||||
|
||||
aligned_address := address
|
||||
misalignment := cast(uintptr) memory_misalignment( address, alignment )
|
||||
if misalignment != 0 {
|
||||
aligned_address += alignment - misalignment
|
||||
}
|
||||
return aligned_address
|
||||
}
|
||||
|
||||
//endregion Memory Math
|
@ -1,172 +0,0 @@
|
||||
/*
|
||||
This was a tracking allocator made to kill off various bugs left with grime's pool & slab allocators
|
||||
It doesn't perform that well on a per-frame basis and should be avoided for general memory debugging
|
||||
|
||||
It only makes sure that memory allocations don't collide in the allocator and deallocations don't occur for memory never allocated.
|
||||
|
||||
I'm keeping it around as an artifact & for future allocators I may make.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
MemoryTrackerEntry :: struct {
|
||||
start, end : rawptr,
|
||||
}
|
||||
|
||||
MemoryTracker :: struct {
|
||||
name : string,
|
||||
entries : Array(MemoryTrackerEntry),
|
||||
}
|
||||
|
||||
Track_Memory :: false
|
||||
|
||||
// tracker_msg_buffer : [Kilobyte * 16]u8
|
||||
|
||||
memtracker_clear :: proc ( tracker : MemoryTracker ) {
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
// temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
logf("Clearing tracker: %v", tracker.name)
|
||||
memtracker_dump_entries(tracker);
|
||||
array_clear(tracker.entries)
|
||||
}
|
||||
|
||||
memtracker_init :: proc ( tracker : ^MemoryTracker, allocator : Allocator, num_entries : u64, name : string )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
// temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
tracker.name = name
|
||||
|
||||
error : AllocatorError
|
||||
tracker.entries, error = array_init_reserve( MemoryTrackerEntry, allocator, num_entries, dbg_name = name )
|
||||
if error != AllocatorError.None {
|
||||
fatal("Failed to allocate memory tracker's hashmap");
|
||||
}
|
||||
}
|
||||
|
||||
memtracker_register :: proc( tracker : ^MemoryTracker, new_entry : MemoryTrackerEntry )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
profile(#procedure)
|
||||
// temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
if tracker.entries.num == tracker.entries.capacity {
|
||||
ensure(false, "Memory tracker entries array full, can no longer register any more allocations")
|
||||
return
|
||||
}
|
||||
|
||||
for idx in 0..< tracker.entries.num
|
||||
{
|
||||
entry := & tracker.entries.data[idx]
|
||||
if new_entry.start > entry.start {
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.end < new_entry.start)
|
||||
{
|
||||
msg := str_fmt("Memory tracker(%v) detected a collision:\nold_entry: %v\nnew_entry: %v", tracker.name, entry, new_entry)
|
||||
ensure( false, msg )
|
||||
memtracker_dump_entries(tracker ^)
|
||||
}
|
||||
array_append_at( & tracker.entries, new_entry, idx )
|
||||
log(str_fmt("%v : Registered: %v", tracker.name, new_entry) )
|
||||
return
|
||||
}
|
||||
|
||||
array_append( & tracker.entries, new_entry )
|
||||
log(str_fmt("%v : Registered: %v", tracker.name, new_entry) )
|
||||
}
|
||||
|
||||
memtracker_register_auto_name :: proc( tracker : ^MemoryTracker, start, end : rawptr )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
memtracker_register( tracker, {start, end})
|
||||
}
|
||||
|
||||
memtracker_register_auto_name_slice :: proc( tracker : ^MemoryTracker, slice : []byte )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
start := raw_data(slice)
|
||||
end := & slice[ len(slice) - 1 ]
|
||||
memtracker_register( tracker, {start, end})
|
||||
}
|
||||
|
||||
memtracker_unregister :: proc( tracker : MemoryTracker, to_remove : MemoryTrackerEntry )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
profile(#procedure)
|
||||
// temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
entries := array_to_slice(tracker.entries)
|
||||
for idx in 0..< tracker.entries.num
|
||||
{
|
||||
entry := & entries[idx]
|
||||
if entry.start == to_remove.start {
|
||||
if (entry.end == to_remove.end || to_remove.end == nil) {
|
||||
log(str_fmt("%v: Unregistered: %v", tracker.name, to_remove));
|
||||
array_remove_at(tracker.entries, idx)
|
||||
return
|
||||
}
|
||||
|
||||
ensure(false, str_fmt("%v: Found an entry with the same start address but end address was different:\nentry : %v\nto_remove: %v", tracker.name, entry, to_remove))
|
||||
memtracker_dump_entries(tracker)
|
||||
}
|
||||
}
|
||||
|
||||
ensure(false, str_fmt("%v: Attempted to unregister an entry that was not tracked: %v", tracker.name, to_remove))
|
||||
memtracker_dump_entries(tracker)
|
||||
}
|
||||
|
||||
memtracker_check_for_collisions :: proc ( tracker : MemoryTracker )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
profile(#procedure)
|
||||
// temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
entries := array_to_slice(tracker.entries)
|
||||
for idx in 1 ..< tracker.entries.num {
|
||||
// Check to make sure each allocations adjacent entries do not intersect
|
||||
left := & entries[idx - 1]
|
||||
right := & entries[idx]
|
||||
|
||||
collided := left.start > right.start || left.end > right.end
|
||||
if collided {
|
||||
msg := str_fmt("%v: Memory tracker detected a collision:\nleft: %v\nright: %v", tracker.name, left, right)
|
||||
memtracker_dump_entries(tracker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memtracker_dump_entries :: proc( tracker : MemoryTracker )
|
||||
{
|
||||
when ! Track_Memory {
|
||||
return
|
||||
}
|
||||
// temp_arena : Arena; arena_init(& temp_arena, tracker_msg_buffer[:])
|
||||
// context.temp_allocator = arena_allocator(& temp_arena)
|
||||
|
||||
log( "Dumping Memory Tracker:")
|
||||
for idx in 0 ..< tracker.entries.num {
|
||||
entry := & tracker.entries.data[idx]
|
||||
log( str_fmt("%v", entry) )
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:prof/spall"
|
||||
|
||||
SpallProfiler :: struct {
|
||||
ctx : spall.Context,
|
||||
buffer : spall.Buffer,
|
||||
}
|
||||
|
||||
@(deferred_none=profile_end)
|
||||
profile :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & Memory_App.profiler.ctx, & Memory_App.profiler.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
profile_begin :: #force_inline proc "contextless" ( name : string, loc := #caller_location ) {
|
||||
spall._buffer_begin( & Memory_App.profiler.ctx, & Memory_App.profiler.buffer, name, "", loc )
|
||||
}
|
||||
|
||||
profile_end :: #force_inline proc "contextless" () {
|
||||
spall._buffer_end( & Memory_App.profiler.ctx, & Memory_App.profiler.buffer)
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
package sectr
|
||||
|
||||
// Provides an alternative syntax for pointers
|
||||
|
||||
Ptr :: struct( $ Type : typeid ) {
|
||||
v : Type,
|
||||
}
|
||||
|
||||
exmaple_ptr :: proc()
|
||||
{
|
||||
a, b : int
|
||||
var : ^Ptr(int)
|
||||
reg : ^int
|
||||
|
||||
a = 1
|
||||
b = 1
|
||||
|
||||
var = &{a}
|
||||
var.v = 2
|
||||
var = &{b}
|
||||
var.v = 3
|
||||
|
||||
a = 1
|
||||
b = 1
|
||||
|
||||
reg = (& a)
|
||||
(reg^) = 2
|
||||
reg = (& b)
|
||||
(reg^) = 3
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
// This provides a string generator using a token replacement approach instead of a %<id> verb-syntax to parse.
|
||||
// This was done just for preference as I personally don't like the c-printf-like syntax.
|
||||
package sectr
|
||||
|
||||
|
||||
|
||||
// str_format :: proc ( format : string, tokens : ..args ) {
|
||||
|
||||
// }
|
||||
|
||||
|
@ -66,7 +66,7 @@ str_cache_init :: proc( /*allocator : Allocator*/ ) -> ( cache : StringCache ) {
|
||||
cache.slab, alloc_error = slab_init( & policy, allocator = persistent_allocator(), dbg_name = dbg_name )
|
||||
verify(alloc_error == .None, "Failed to initialize the string cache" )
|
||||
|
||||
cache.table, alloc_error = hamp_zpl_init_reserve( StrRunesPair, persistent_allocator(), 4 * Megabyte, dbg_name )
|
||||
cache.table, alloc_error = hamp_zpl_init( StrRunesPair, persistent_allocator(), 4 * Megabyte, dbg_name )
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1,43 +0,0 @@
|
||||
package sectr
|
||||
|
||||
rune16 :: distinct u16
|
||||
|
||||
|
||||
|
||||
|
||||
// Exposing the alloc_error
|
||||
@(require_results)
|
||||
string_to_runes :: proc ( content : string, allocator := context.allocator) -> (runes : []rune, alloc_error : AllocatorError) #optional_allocator_error {
|
||||
num := str_rune_count(content)
|
||||
|
||||
runes, alloc_error = make([]rune, num, allocator)
|
||||
if runes == nil || alloc_error != AllocatorError.None {
|
||||
return
|
||||
}
|
||||
|
||||
idx := 0
|
||||
for codepoint in content {
|
||||
runes[idx] = codepoint
|
||||
idx += 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
string_to_runes_array :: proc( content : string, allocator := context.allocator ) -> ( []rune, AllocatorError )
|
||||
{
|
||||
num := cast(u64) str_rune_count(content)
|
||||
|
||||
runes_array, alloc_error := array_init_reserve( rune, allocator, num )
|
||||
if alloc_error != AllocatorError.None {
|
||||
return nil, alloc_error
|
||||
}
|
||||
|
||||
runes := array_to_slice_capacity(runes_array)
|
||||
|
||||
idx := 0
|
||||
for codepoint in content {
|
||||
runes[idx] = codepoint
|
||||
idx += 1
|
||||
}
|
||||
return runes, alloc_error
|
||||
}
|
@ -1,312 +0,0 @@
|
||||
/*
|
||||
Odin's virtual arena allocator doesn't do what I ideally want for allocation resizing.
|
||||
(It was also a nice exercise along with making the other allocators)
|
||||
|
||||
So this is a virtual memory backed arena allocator designed
|
||||
to take advantage of one large contigous reserve of memory.
|
||||
With the expectation that resizes with its interface will only occur using the last allocated block.
|
||||
|
||||
All virtual address space memory for this application is managed by a virtual arena.
|
||||
No other part of the program will directly touch the vitual memory interface direclty other than it.
|
||||
|
||||
Thus for the scope of this prototype the Virtual Arena are the only interfaces to dynamic address spaces for the runtime of the client app.
|
||||
The host application as well ideally (although this may not be the case for a while)
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import "base:intrinsics"
|
||||
import "base:runtime"
|
||||
import "core:mem"
|
||||
import "core:os"
|
||||
import "core:slice"
|
||||
import "core:sync"
|
||||
|
||||
VArena_GrowthPolicyProc :: #type proc( commit_used, committed, reserved, requested_size : uint ) -> uint
|
||||
|
||||
VArena :: struct {
|
||||
using vmem : VirtualMemoryRegion,
|
||||
dbg_name : string,
|
||||
tracker : MemoryTracker,
|
||||
commit_used : uint,
|
||||
growth_policy : VArena_GrowthPolicyProc,
|
||||
allow_any_reize : b32,
|
||||
mutex : sync.Mutex,
|
||||
}
|
||||
|
||||
varena_default_growth_policy :: proc( commit_used, committed, reserved, requested_size : uint ) -> uint
|
||||
{
|
||||
@static commit_limit := uint(1 * Megabyte)
|
||||
@static increment := uint(16 * Kilobyte)
|
||||
page_size := uint(virtual_get_page_size())
|
||||
|
||||
if increment < Gigabyte && committed > commit_limit {
|
||||
commit_limit *= 2
|
||||
increment *= 2
|
||||
|
||||
increment = clamp( increment, Megabyte, Gigabyte )
|
||||
}
|
||||
|
||||
remaining_reserve := reserved - committed
|
||||
growth_increment := max( increment, requested_size )
|
||||
growth_increment = clamp( growth_increment, page_size, remaining_reserve )
|
||||
next_commit_size := memory_align_formula( committed + growth_increment, page_size )
|
||||
return next_commit_size
|
||||
}
|
||||
|
||||
varena_allocator :: proc( arena : ^VArena ) -> ( allocator : Allocator ) {
|
||||
allocator.procedure = varena_allocator_proc
|
||||
allocator.data = arena
|
||||
return
|
||||
}
|
||||
|
||||
// Default growth_policy is nil
|
||||
varena_init :: proc( base_address : uintptr, to_reserve, to_commit : uint,
|
||||
growth_policy : VArena_GrowthPolicyProc, allow_any_reize : b32 = false, dbg_name : string
|
||||
) -> ( arena : VArena, alloc_error : AllocatorError)
|
||||
{
|
||||
page_size := uint(virtual_get_page_size())
|
||||
verify( page_size > size_of(VirtualMemoryRegion), "Make sure page size is not smaller than a VirtualMemoryRegion?")
|
||||
verify( to_reserve >= page_size, "Attempted to reserve less than a page size" )
|
||||
verify( to_commit >= page_size, "Attempted to commit less than a page size")
|
||||
verify( to_reserve >= to_commit, "Attempted to commit more than there is to reserve" )
|
||||
|
||||
vmem : VirtualMemoryRegion
|
||||
vmem, alloc_error = virtual_reserve_and_commit( base_address, to_reserve, to_commit )
|
||||
if vmem.base_address == nil || alloc_error != .None {
|
||||
ensure(false, "Failed to allocate requested virtual memory for virtual arena")
|
||||
return
|
||||
}
|
||||
|
||||
arena.vmem = vmem
|
||||
arena.commit_used = 0
|
||||
|
||||
if growth_policy == nil {
|
||||
arena.growth_policy = varena_default_growth_policy
|
||||
}
|
||||
else {
|
||||
arena.growth_policy = growth_policy
|
||||
}
|
||||
arena.allow_any_reize = allow_any_reize
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_init( & arena.tracker, runtime.heap_allocator(), Kilobyte * 128, dbg_name )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
varena_alloc :: proc( using self : ^VArena,
|
||||
size : uint,
|
||||
alignment : uint = mem.DEFAULT_ALIGNMENT,
|
||||
zero_memory := true,
|
||||
location := #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
verify( alignment & (alignment - 1) == 0, "Non-power of two alignment", location = location )
|
||||
page_size := uint(virtual_get_page_size())
|
||||
|
||||
requested_size := size
|
||||
if requested_size == 0 {
|
||||
ensure(false, "Requested 0 size")
|
||||
return nil, .Invalid_Argument
|
||||
}
|
||||
// ensure( requested_size > page_size, "Requested less than a page size, going to allocate a page size")
|
||||
// requested_size = max(requested_size, page_size)
|
||||
|
||||
sync.mutex_guard( & mutex )
|
||||
|
||||
alignment_offset := uint(0)
|
||||
current_offset := uintptr(self.reserve_start) + uintptr(commit_used)
|
||||
mask := uintptr(alignment - 1)
|
||||
|
||||
if current_offset & mask != 0 {
|
||||
alignment_offset = alignment - uint(current_offset & mask)
|
||||
}
|
||||
|
||||
size_to_allocate, overflow_signal := intrinsics.overflow_add( requested_size, alignment_offset )
|
||||
if overflow_signal {
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
to_be_used : uint
|
||||
to_be_used, overflow_signal = intrinsics.overflow_add( commit_used, size_to_allocate )
|
||||
if overflow_signal || to_be_used > reserved {
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
header_offset := uint( uintptr(reserve_start) - uintptr(base_address) )
|
||||
|
||||
commit_left := committed - commit_used - header_offset
|
||||
needs_more_committed := commit_left < size_to_allocate
|
||||
if needs_more_committed
|
||||
{
|
||||
profile("VArena Growing")
|
||||
next_commit_size := growth_policy( commit_used, committed, reserved, size_to_allocate )
|
||||
alloc_error = virtual_commit( vmem, next_commit_size )
|
||||
if alloc_error != .None {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
data_ptr := rawptr(current_offset + uintptr(alignment_offset))
|
||||
data = byte_slice( data_ptr, int(requested_size) )
|
||||
self.commit_used += size_to_allocate
|
||||
alloc_error = .None
|
||||
|
||||
// log_backing : [Kilobyte * 16]byte
|
||||
// backing_slice := byte_slice( & log_backing[0], len(log_backing))
|
||||
// log( str_fmt_buffer( backing_slice, "varena alloc - BASE: %p PTR: %X, SIZE: %d", cast(rawptr) self.base_address, & data[0], requested_size) )
|
||||
|
||||
if zero_memory
|
||||
{
|
||||
// log( str_fmt_buffer( backing_slice, "Zeroring data (Range: %p to %p)", raw_data(data), cast(rawptr) (uintptr(raw_data(data)) + uintptr(requested_size))))
|
||||
// slice.zero( data )
|
||||
mem_zero( data_ptr, int(requested_size) )
|
||||
}
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
varena_free_all :: proc( using self : ^VArena )
|
||||
{
|
||||
sync.mutex_guard( & mutex )
|
||||
commit_used = 0
|
||||
|
||||
when ODIN_DEBUG && Track_Memory {
|
||||
array_clear(tracker.entries)
|
||||
}
|
||||
}
|
||||
|
||||
varena_release :: proc( using self : ^VArena )
|
||||
{
|
||||
sync.mutex_guard( & mutex )
|
||||
|
||||
virtual_release( vmem )
|
||||
commit_used = 0
|
||||
}
|
||||
|
||||
varena_allocator_proc :: proc(
|
||||
allocator_data : rawptr,
|
||||
mode : AllocatorMode,
|
||||
size : int,
|
||||
alignment : int,
|
||||
old_memory : rawptr,
|
||||
old_size : int,
|
||||
location : SourceCodeLocation = #caller_location
|
||||
) -> ( data : []byte, alloc_error : AllocatorError)
|
||||
{
|
||||
arena := cast( ^VArena) allocator_data
|
||||
|
||||
size := uint(size)
|
||||
alignment := uint(alignment)
|
||||
old_size := uint(old_size)
|
||||
|
||||
page_size := uint(virtual_get_page_size())
|
||||
|
||||
switch mode
|
||||
{
|
||||
case .Alloc, .Alloc_Non_Zeroed:
|
||||
data, alloc_error = varena_alloc( arena, size, alignment, (mode != .Alloc_Non_Zeroed), location )
|
||||
return
|
||||
|
||||
case .Free:
|
||||
alloc_error = .Mode_Not_Implemented
|
||||
|
||||
case .Free_All:
|
||||
varena_free_all( arena )
|
||||
|
||||
case .Resize, .Resize_Non_Zeroed:
|
||||
if old_memory == nil {
|
||||
ensure(false, "Resizing without old_memory?")
|
||||
data, alloc_error = varena_alloc( arena, size, alignment, (mode != .Resize_Non_Zeroed), location )
|
||||
return
|
||||
}
|
||||
|
||||
if size == old_size {
|
||||
ensure(false, "Requested resize when none needed")
|
||||
data = byte_slice( old_memory, old_size )
|
||||
return
|
||||
}
|
||||
|
||||
alignment_offset := uintptr(old_memory) & uintptr(alignment - 1)
|
||||
if alignment_offset == 0 && size < old_size {
|
||||
ensure(false, "Requested a shrink from a virtual arena")
|
||||
data = byte_slice( old_memory, size )
|
||||
return
|
||||
}
|
||||
|
||||
old_memory_offset := uintptr(old_memory) + uintptr(old_size)
|
||||
current_offset := uintptr(arena.reserve_start) + uintptr(arena.commit_used)
|
||||
|
||||
// if old_size < page_size {
|
||||
// // We're dealing with an allocation that requested less than the minimum allocated on vmem.
|
||||
// // Provide them more of their actual memory
|
||||
// data = byte_slice( old_memory, size )
|
||||
// return
|
||||
// }
|
||||
|
||||
verify( old_memory_offset == current_offset || arena.allow_any_reize,
|
||||
"Cannot resize existing allocation in vitual arena to a larger size unless it was the last allocated" )
|
||||
|
||||
log_backing : [Kilobyte * 16]byte
|
||||
backing_slice := byte_slice( & log_backing[0], len(log_backing))
|
||||
|
||||
if old_memory_offset != current_offset && arena.allow_any_reize
|
||||
{
|
||||
// Give it new memory and copy the old over. Old memory is unrecoverable until clear.
|
||||
new_region : []byte
|
||||
new_region, alloc_error = varena_alloc( arena, size, alignment, (mode != .Resize_Non_Zeroed), location )
|
||||
if new_region == nil || alloc_error != .None {
|
||||
ensure(false, "Failed to grab new region")
|
||||
data = byte_slice( old_memory, old_size )
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
copy_non_overlapping( raw_data(new_region), old_memory, int(old_size) )
|
||||
data = new_region
|
||||
// log( str_fmt_tmp("varena resize (new): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size))
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
new_region : []byte
|
||||
new_region, alloc_error = varena_alloc( arena, size - old_size, alignment, (mode != .Resize_Non_Zeroed), location )
|
||||
if new_region == nil || alloc_error != .None {
|
||||
ensure(false, "Failed to grab new region")
|
||||
data = byte_slice( old_memory, old_size )
|
||||
return
|
||||
}
|
||||
|
||||
data = byte_slice( old_memory, size )
|
||||
// log( str_fmt_tmp("varena resize (expanded): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size))
|
||||
|
||||
when ODIN_DEBUG {
|
||||
memtracker_register_auto_name( & arena.tracker, & data[0], & data[len(data) - 1] )
|
||||
}
|
||||
return
|
||||
|
||||
case .Query_Features:
|
||||
{
|
||||
set := cast( ^AllocatorModeSet) old_memory
|
||||
if set != nil {
|
||||
(set ^) = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
|
||||
}
|
||||
}
|
||||
case .Query_Info:
|
||||
{
|
||||
alloc_error = .Mode_Not_Implemented
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
@ -1,116 +0,0 @@
|
||||
/* Virtual Memory OS Interface
|
||||
This is an alternative to the virtual core library provided by odin, suppport setting the base address among other things.
|
||||
*/
|
||||
package sectr
|
||||
|
||||
import core_virtual "core:mem/virtual"
|
||||
import "core:os"
|
||||
|
||||
VirtualMemoryRegionHeader :: struct {
|
||||
committed : uint,
|
||||
reserved : uint,
|
||||
reserve_start : [^]byte,
|
||||
}
|
||||
|
||||
VirtualMemoryRegion :: struct {
|
||||
using base_address : ^VirtualMemoryRegionHeader
|
||||
}
|
||||
|
||||
virtual_get_page_size :: proc "contextless" () -> int {
|
||||
@static page_size := 0
|
||||
if page_size == 0 {
|
||||
page_size = os.get_page_size()
|
||||
}
|
||||
return page_size
|
||||
}
|
||||
|
||||
virtual_reserve_remaining :: proc "contextless" ( using vmem : VirtualMemoryRegion ) -> uint {
|
||||
header_offset := cast(uint) (uintptr(reserve_start) - uintptr(vmem.base_address))
|
||||
return reserved - header_offset
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
virtual_commit :: proc "contextless" ( using vmem : VirtualMemoryRegion, size : uint ) -> ( alloc_error : AllocatorError )
|
||||
{
|
||||
if size < committed {
|
||||
return .None
|
||||
}
|
||||
|
||||
header_size := size_of(VirtualMemoryRegionHeader)
|
||||
page_size := uint(virtual_get_page_size())
|
||||
to_commit := memory_align_formula( size, page_size )
|
||||
|
||||
alloc_error = core_virtual.commit( base_address, to_commit )
|
||||
if alloc_error != .None {
|
||||
return alloc_error
|
||||
}
|
||||
|
||||
base_address.committed = size
|
||||
return alloc_error
|
||||
}
|
||||
|
||||
virtual_decommit :: proc "contextless" ( vmem : VirtualMemoryRegion, size : uint ) {
|
||||
core_virtual.decommit( vmem.base_address, size )
|
||||
}
|
||||
|
||||
virtual_protect :: proc "contextless" ( vmem : VirtualMemoryRegion, region : []byte, flags : VirtualProtectFlags ) -> b32
|
||||
{
|
||||
page_size := virtual_get_page_size()
|
||||
|
||||
if len(region) % page_size != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return cast(b32) core_virtual.protect( raw_data(region), len(region), flags )
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
virtual_reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( VirtualMemoryRegion, AllocatorError ) {
|
||||
page_size := uint(virtual_get_page_size())
|
||||
to_reserve := memory_align_formula( size, page_size )
|
||||
return virtual__reserve( base_address, to_reserve )
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
virtual_reserve_and_commit :: proc "contextless" (
|
||||
base_address : uintptr, reserve_size, commit_size : uint
|
||||
) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
|
||||
{
|
||||
if reserve_size < commit_size {
|
||||
alloc_error = .Invalid_Argument
|
||||
return
|
||||
}
|
||||
|
||||
vmem, alloc_error = virtual_reserve( base_address, reserve_size )
|
||||
if alloc_error != .None {
|
||||
return
|
||||
}
|
||||
|
||||
alloc_error = virtual_commit( vmem, commit_size )
|
||||
return
|
||||
}
|
||||
|
||||
virtual_release :: proc "contextless" ( vmem : VirtualMemoryRegion ) {
|
||||
core_virtual.release( vmem.base_address, vmem.reserved )
|
||||
}
|
||||
|
||||
// If the OS is not windows, we just use the library's interface which does not support base_address.
|
||||
when ODIN_OS != OS_Type.Windows {
|
||||
|
||||
virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
|
||||
|
||||
// Ignoring the base address, add an os specific impl if you want it.
|
||||
data : []byte
|
||||
data, alloc_error := core_virtual.reserve( header_size + size ) or_return
|
||||
alloc_error := core_virtual.commit( header_size )
|
||||
|
||||
vmem.base_address := cast( ^VirtualMemoryRegionHeader ) raw_data(data)
|
||||
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
|
||||
vmem.reserved = len(data)
|
||||
vmem.committed = header_size
|
||||
return
|
||||
}
|
||||
|
||||
} // END: ODIN_OS != runtime.Odin_OS_Type.Windows
|
@ -1,112 +0,0 @@
|
||||
package sectr
|
||||
|
||||
import "core:c"
|
||||
import "core:c/libc"
|
||||
import "core:fmt"
|
||||
import "core:mem"
|
||||
import core_virtual "core:mem/virtual"
|
||||
import "core:strings"
|
||||
import win32 "core:sys/windows"
|
||||
|
||||
when ODIN_OS == OS_Type.Windows {
|
||||
|
||||
thread__highres_wait :: proc( desired_ms : f64, loc := #caller_location ) -> b32
|
||||
{
|
||||
// label_backing : [1 * Megabyte]u8
|
||||
// label_arena : Arena
|
||||
// arena_init( & label_arena, slice_ptr( & label_backing[0], len(label_backing)) )
|
||||
// label_u8 := str_fmt_tmp( "SECTR: WAIT TIMER")//, allocator = arena_allocator( &label_arena) )
|
||||
// label_u16 := win32.utf8_to_utf16( label_u8, context.temp_allocator) //arena_allocator( & label_arena) )
|
||||
|
||||
timer := win32.CreateWaitableTimerExW( nil, nil, win32.CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, win32.TIMER_ALL_ACCESS )
|
||||
if timer == nil {
|
||||
msg := str_fmt("Failed to create win32 timer - ErrorCode: %v", win32.GetLastError() )
|
||||
log( msg, LogLevel.Warning, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
due_time := win32.LARGE_INTEGER(desired_ms * MS_To_NS)
|
||||
result := win32.SetWaitableTimerEx( timer, & due_time, 0, nil, nil, nil, 0 )
|
||||
if ! result {
|
||||
msg := str_fmt("Failed to set win32 timer - ErrorCode: %v", win32.GetLastError() )
|
||||
log( msg, LogLevel.Warning, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
WAIT_ABANDONED : win32.DWORD : 0x00000080
|
||||
WAIT_IO_COMPLETION : win32.DWORD : 0x000000C0
|
||||
WAIT_OBJECT_0 : win32.DWORD : 0x00000000
|
||||
WAIT_TIMEOUT : win32.DWORD : 0x00000102
|
||||
WAIT_FAILED : win32.DWORD : 0xFFFFFFFF
|
||||
|
||||
wait_result := win32.WaitForSingleObjectEx( timer, win32.INFINITE, win32.BOOL(true) )
|
||||
switch wait_result
|
||||
{
|
||||
case WAIT_ABANDONED:
|
||||
msg := str_fmt("Failed to wait for win32 timer - Error: WAIT_ABANDONED" )
|
||||
log( msg, LogLevel.Error, loc)
|
||||
return false
|
||||
|
||||
case WAIT_IO_COMPLETION:
|
||||
msg := str_fmt("Waited for win32 timer: Ended by APC queued to the thread" )
|
||||
log( msg, LogLevel.Error, loc)
|
||||
return false
|
||||
|
||||
case WAIT_OBJECT_0:
|
||||
msg := str_fmt("Waited for win32 timer- Reason : WAIT_OBJECT_0" )
|
||||
log( msg, loc = loc)
|
||||
return false
|
||||
|
||||
case WAIT_FAILED:
|
||||
msg := str_fmt("Waited for win32 timer failed - ErrorCode: $v", win32.GetLastError() )
|
||||
log( msg, LogLevel.Error, loc)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
set__scheduler_granularity :: proc "contextless" ( desired_ms : u32 ) -> b32 {
|
||||
return win32.timeBeginPeriod( desired_ms ) == win32.TIMERR_NOERROR
|
||||
}
|
||||
|
||||
WIN32_ERROR_INVALID_ADDRESS :: 487
|
||||
WIN32_ERROR_COMMITMENT_LIMIT :: 1455
|
||||
|
||||
@(require_results)
|
||||
virtual__reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size := cast(uint) memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
|
||||
|
||||
result := win32.VirtualAlloc( rawptr(base_address), header_size + size, win32.MEM_RESERVE, win32.PAGE_READWRITE )
|
||||
if result == nil {
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
result = win32.VirtualAlloc( rawptr(base_address), header_size, win32.MEM_COMMIT, win32.PAGE_READWRITE )
|
||||
if result == nil
|
||||
{
|
||||
switch err := win32.GetLastError(); err
|
||||
{
|
||||
case 0:
|
||||
alloc_error = .Invalid_Argument
|
||||
return
|
||||
|
||||
case WIN32_ERROR_INVALID_ADDRESS, WIN32_ERROR_COMMITMENT_LIMIT:
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
alloc_error = .Out_Of_Memory
|
||||
return
|
||||
}
|
||||
|
||||
vmem.base_address = cast(^VirtualMemoryRegionHeader) result
|
||||
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
|
||||
vmem.reserved = size
|
||||
vmem.committed = header_size
|
||||
alloc_error = .None
|
||||
return
|
||||
}
|
||||
|
||||
} // END: ODIN_OS == runtime.Odin_OS_Type.Windows
|
Reference in New Issue
Block a user