Compare commits

...

5 Commits

Author SHA1 Message Date
Ed_
a0ddc3c26e minor misc (end of day stuff) 2025-10-21 23:21:07 -04:00
Ed_
2303866c81 code2/grime progress 2025-10-21 22:57:23 -04:00
Ed_
96c6d58ea0 Progress on code2/grime allocators 2025-10-21 22:10:48 -04:00
Ed_
f63b52f910 curate fixed stack 2025-10-21 22:10:23 -04:00
Ed_
6d5215ac1e Make ensures/verifies in Array asserts 2025-10-21 22:08:29 -04:00
17 changed files with 321 additions and 128 deletions

View File

@@ -116,7 +116,7 @@ AllocatorInfo :: struct {
AllocatorProcID :: enum uintptr {
FArena,
VArena,
// CArena,
Arena,
// Pool,
// Slab,
// Odin_Arena,
@@ -128,7 +128,7 @@ resolve_allocator_proc :: #force_inline proc "contextless" (procedure: $Allocato
switch (transmute(AllocatorProcID)procedure) {
case .FArena: return farena_allocator_proc
case .VArena: return varena_allocator_proc
// case .CArena: return carena_allocator_proc
case .Arena: return arena_allocator_proc
// case .Pool: return pool_allocator_proc
// case .Slab: return slab_allocator_proc
// case .Odin_Arena: return odin_arena_allocator_proc
@@ -146,7 +146,7 @@ resolve_odin_allocator :: #force_inline proc "contextless" (allocator: Odin_Allo
switch (transmute(AllocatorProcID)allocator.procedure) {
case .FArena: return { farena_odin_allocator_proc, allocator.data }
case .VArena: return { varena_odin_allocator_proc, allocator.data }
// case .CArena: return { carena_odin_allocator_proc, allocator.data }
case .Arena: return { arena_odin_allocator_proc, allocator.data }
// case .Pool: return nil // pool_allocator_proc
// case .Slab: return nil // slab_allocator_proc
// case .Odin_Arena: return nil // odin_arena_allocator_proc
@@ -157,7 +157,7 @@ resolve_odin_allocator :: #force_inline proc "contextless" (allocator: Odin_Allo
switch (allocator.procedure) {
case farena_allocator_proc: return { farena_odin_allocator_proc, allocator.data }
case varena_allocator_proc: return { varena_odin_allocator_proc, allocator.data }
case carena_allocator_proc: return { carena_odin_allocator_proc, allocator.data }
case arena_allocator_proc: return { arena_odin_allocator_proc, allocator.data }
}
}
panic_contextless("Unresolvable procedure")

View File

@@ -1,6 +1,6 @@
package grime
// Below should be defined per-package
// TODO(Ed): Below should be defined per-package?
ensure :: #force_inline proc(condition: bool, msg: string, location := #caller_location) -> bool {
if condition do return true

View File

@@ -128,7 +128,7 @@ array_append_value :: proc(self: ^Array($Type), value: Type) -> AllocatorError {
// Asumes non-overlapping for items.
array_append_at_slice :: proc(self : ^Array($Type ), items: []Type, id: int) -> AllocatorError {
ensure(id < self.num, "Why are we doing an append at beyond the bounds of the current element count")
assert(id < self.num, "Why are we doing an append at beyond the bounds of the current element count")
id := id
if id >= self.num { return array_append_slice(items) }
if len(items) > self.capacity {
@@ -143,7 +143,7 @@ array_append_at_slice :: proc(self : ^Array($Type ), items: []Type, id: int) ->
return AllocatorError.None
}
array_append_at_value :: proc(self: ^Array($Type), item: Type, id: int) -> AllocatorError {
ensure(id < self.num, "Why are we doing an append at beyond the bounds of the current element count")
assert(id < self.num, "Why are we doing an append at beyond the bounds of the current element count")
id := id; {
// TODO(Ed): Not sure I want this...
if id >= self.num do id = self.num
@@ -167,8 +167,8 @@ array_clear :: #force_inline proc "contextless" (self: Array($Type), zero_data:
}
array_fill :: proc(self: Array($Type), begin, end: u64, value: Type) -> bool {
ensure(end - begin <= num)
ensure(end <= num)
assert(end - begin <= num)
assert(end <= num)
if (end - begin > num) || (end > num) do return false
mem_fill(data[begin:], value, end - begin)
return true
@@ -183,7 +183,7 @@ array_push_back :: #force_inline proc "contextless" (self: Array($Type)) -> bool
}
array_remove_at :: proc(self: Array($Type), id: int) {
verify( id < self.num, "Attempted to remove from an index larger than the array" )
assert( id < self.num, "Attempted to remove from an index larger than the array" )
mem_copy(self.data[id:], self.data[id + 1:], (self.num - id) * size_of(Type))
self.num -= 1
}

View File

@@ -1,7 +1,7 @@
package grime
// TODO(Ed): Review when os2 is done.
// TODO(Ed): Make an async option...
// TODO(Ed): Make an async option?
file_copy_sync :: proc( path_src, path_dst: string, allocator := context.allocator ) -> b32
{
file_size : i64

View File

@@ -0,0 +1,29 @@
package grime
FStack :: struct ($Type: typeid, $Size: u32) {
items: [Size]Type,
idx: u32,
}
stack_clear :: #force_inline proc "contextless" (stack: ^FStack($Type, $Size)) { stack.idx = 0 }
stack_push :: #force_inline proc "contextless" (stack: ^FStack($Type, $Size ), value: Type) {
assert_contextless(stack.idx < u32(len( stack.items )), "Attempted to push on a full stack")
stack.items[stack.idx] = value
stack.idx += 1
}
stack_pop :: #force_inline proc "contextless" (stack: ^FStack($Type, $Size)) {
assert(stack.idx > 0, "Attempted to pop an empty stack")
stack.idx -= 1
if stack.idx == 0 {
stack.items[stack.idx] = {}
}
}
stack_peek_ref :: #force_inline proc "contextless" (s: ^FStack($Type, $Size)) -> (^Type) {
return & s.items[/*last_idx*/ max( 0, s.idx - 1 )]
}
stack_peek :: #force_inline proc "contextless" (s: ^FStack($Type, $Size)) -> Type {
return s.items[/*last_idx*/ max( 0, s.idx - 1 )]
}
stack_push_contextless :: #force_inline proc "contextless" (s: ^FStack($Type, $Size), value: Type) {
s.items[s.idx] = value
s.idx += 1
}

View File

@@ -13,7 +13,7 @@ hash64_djb8 :: #force_inline proc "contextless" (hash: ^u64, bytes: []byte ) {
hash32_fnv1a :: #force_inline proc "contextless" (hash: ^u32, data: []byte, seed := u32(0x811c9dc5)) {
hash^ = seed; for b in data { hash^ = (hash^ ~ u32(b)) * 0x01000193 }
}
// Ripped from core:hash, fnv64a
@(optimization_mode="favor_size")
hash64_fnv1a :: #force_inline proc "contextless" (hash: ^u64, data: []byte, seed := u64(0xcbf29ce484222325)) {
hash^ = seed; for b in data { hash^ = (hash^ ~ u64(b)) * 0x100000001b3 }

View File

@@ -18,14 +18,14 @@ KTL_Meta :: struct {
type: typeid,
}
ktl_get :: #force_inline proc(kt: []KTL_Slot($Type), key: u64) -> ^Type {
ktl_get :: #force_inline proc "contextless" (kt: []KTL_Slot($Type), key: u64) -> ^Type {
for & slot in kt { if key == slot.key do return & slot.value; }
return nil
}
// Unique populator for key-value pair strings
ktl_populate_slice_a2_str :: #force_inline proc (kt: ^[]KTL_Slot(string), backing: Odin_Allocator, values: [][2]string) {
ktl_populate_slice_a2_str :: #force_inline proc(kt: ^[]KTL_Slot(string), backing: Odin_Allocator, values: [][2]string) {
assert(kt != nil)
if len(values) == 0 { return }
raw_bytes, error := mem_alloc(size_of(KTL_Slot(string)) * len(values), ainfo = backing); assert(error == .None);

View File

@@ -8,7 +8,7 @@ So it has been stripped and instead applied on procedure site,
the parent container or is responsible for tracking that.
TODO(Ed): Resolve appropriate Key-Table term for it.
TODO(Ed): Complete this later if we actually have issues with KT1CX or Odin's map.
TODO(Ed): Complete this later if we actually want something beyond KT1CX or Odin's map.
*/
KT_Slot :: struct(

View File

@@ -27,6 +27,11 @@ ptr_cursor :: #force_inline proc "contextless" (ptr: ^$Type) -> [^]Type { return
@(require_results) is_power_of_two :: #force_inline proc "contextless" (x: uintptr) -> bool { return (x > 0) && ((x & (x-1)) == 0) }
@(require_results)
align_pow2_uint :: #force_inline proc "contextless" (ptr, align: uint) -> uint {
assert_contextless(is_power_of_two(uintptr(align)))
return ptr & ~(align-1)
}
@(require_results)
align_pow2 :: #force_inline proc "contextless" (ptr, align: int) -> int {
assert_contextless(is_power_of_two(uintptr(align)))
return ptr & ~(align-1)
@@ -68,8 +73,8 @@ slice_copy :: #force_inline proc "contextless" (dst, src: $SliceType / []$Type)
slice_fill :: #force_inline proc "contextless" (s: $SliceType / []$Type, value: Type) { memory_fill(cursor(s), value, len(s)) }
@(require_results) slice_to_bytes :: #force_inline proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] }
@(require_results) slice_raw :: #force_inline proc "contextless" (s: []$Type) -> SliceRaw(Type) { return transmute(SliceRaw(Type)) s }
@(require_results) slice_to_bytes :: #force_inline proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] }
@(require_results) slice_raw :: #force_inline proc "contextless" (s: []$Type) -> SliceRaw(Type) { return transmute(SliceRaw(Type)) s }
@(require_results) type_to_bytes :: #force_inline proc "contextless" (obj: ^$Type) -> []byte { return ([^]byte)(obj)[:size_of(Type)] }

View File

@@ -5,6 +5,8 @@
It only makes sure that memory allocations don't collide in the allocator and deallocations don't occur for memory never allocated.
I'm keeping it around as an artifact & for future allocators I may make.
NOTE(Ed): Perfer sanitizers
*/
package grime

View File

@@ -1,7 +1,10 @@
package grime
// TODO(Ed): Review this
import "base:runtime"
// TODO(Ed): Support address sanitizer
/*
So this is a virtual memory backed arena allocator designed
to take advantage of one large contigous reserve of memory.
@@ -15,62 +18,39 @@ Thus for the scope of this prototype the Virtual Arena are the only interfaces t
The host application as well ideally (although this may not be the case for a while)
*/
VArena :: struct {
using vmem: VirtualMemoryRegion,
tracker: MemoryTracker,
dbg_name: string,
commit_used: uint,
growth_policy: VArena_GrowthPolicyProc,
allow_any_resize: b64,
mutex: Mutex,
VArenaFlags :: bit_set[VArenaFlag; u32]
VArenaFlag :: enum u32 {
No_Large_Pages,
}
VArena_GrowthPolicyProc :: #type proc(commit_used, committed, reserved, requested_size: uint) -> uint
varena_default_growth_policy :: proc(commit_used, committed, reserved, requested_size: uint) -> uint {
@static commit_limit := uint(1 * Mega)
@static increment := uint(16 * Kilo)
page_size := uint(virtual_get_page_size())
if increment < Giga && committed > commit_limit {
commit_limit *= 2
increment *= 2
increment = clamp(increment, Mega, Giga)
}
remaining_reserve := reserved - committed
growth_increment := max( increment, requested_size )
growth_increment = clamp( growth_increment, page_size, remaining_reserve )
next_commit_size := memory_align_formula( committed + growth_increment, page_size )
return next_commit_size
VArena :: struct {
using vmem: VirtualMemoryRegion,
commit_size: int,
commit_used: int,
flags: VArenaFlags,
}
// Default growth_policy is varena_default_growth_policy
varena_init :: proc(base_address: uintptr, to_reserve, to_commit: uint,
growth_policy: VArena_GrowthPolicyProc = varena_default_growth_policy,
allow_any_resize: bool = false,
dbg_name: string = "",
enable_mem_tracking: bool = false,
) -> (arena: VArena, alloc_error: AllocatorError)
varena_make :: proc(to_reserve, commit_size: int, base_address: uintptr, flags: VArenaFlags = {}
) -> (arena: ^VArena, alloc_error: AllocatorError)
{
page_size := uint(virtual_get_page_size())
verify( page_size > size_of(VirtualMemoryRegion), "Make sure page size is not smaller than a VirtualMemoryRegion?")
verify( to_reserve >= page_size, "Attempted to reserve less than a page size" )
verify( to_commit >= page_size, "Attempted to commit less than a page size")
verify( to_reserve >= to_commit, "Attempted to commit more than there is to reserve" )
page_size := virtual_get_page_size()
verify( page_size > size_of(VirtualMemoryRegion), "Make sure page size is not smaller than a VirtualMemoryRegion?")
verify( to_reserve >= page_size, "Attempted to reserve less than a page size" )
verify( commit_size >= page_size, "Attempted to commit less than a page size")
verify( to_reserve >= commit_size, "Attempted to commit more than there is to reserve" )
vmem : VirtualMemoryRegion
vmem, alloc_error = virtual_reserve_and_commit( base_address, to_reserve, to_commit )
vmem, alloc_error = virtual_reserve_and_commit( base_address, uint(to_reserve), uint(commit_size) )
if ensure(vmem.base_address == nil || alloc_error != .None, "Failed to allocate requested virtual memory for virtual arena") {
return
}
arena = transmute(^VArena) vmem.base_address;
arena.vmem = vmem
arena.commit_used = 0
if growth_policy == nil do arena.growth_policy = varena_default_growth_policy
else do arena.growth_policy = growth_policy
arena.allow_any_resize = b64(allow_any_resize)
if Track_Memory && enable_mem_tracking {
memtracker_init( & arena.tracker, runtime.heap_allocator(), Kilo * 128, dbg_name)
}
arena.commit_used = align_pow2(size_of(arena), MEMORY_ALIGNMENT_DEFAULT)
arena.flags = flags
return
}
varena_alloc :: proc(using self: ^VArena,
varena_alloc :: proc(self: ^VArena,
size: int,
alignment: int = MEMORY_ALIGNMENT_DEFAULT,
zero_memory := true,
@@ -78,7 +58,7 @@ varena_alloc :: proc(using self: ^VArena,
) -> (data: []byte, alloc_error: AllocatorError)
{
verify( alignment & (alignment - 1) == 0, "Non-power of two alignment", location = location )
page_size := uint(virtual_get_page_size())
page_size := uint(virtual_get_page_size())
requested_size := uint(size)
if ensure(requested_size == 0, "Requested 0 size") do return nil, .Invalid_Argument
// ensure( requested_size > page_size, "Requested less than a page size, going to allocate a page size")
@@ -87,8 +67,12 @@ varena_alloc :: proc(using self: ^VArena,
// TODO(Ed): Prevent multiple threads from entering here extrusively?
// sync.mutex_guard( & mutex )
commit_used := uint(self.commit_used)
reserved := uint(self.reserved)
commit_size := uint(self.commit_size)
alignment_offset := uint(0)
current_offset := uintptr(self.reserve_start) + uintptr(commit_used)
current_offset := uintptr(self.reserve_start) + uintptr(self.commit_used)
mask := uintptr(alignment - 1)
if (current_offset & mask != 0) do alignment_offset = uint(alignment) - uint(current_offset & mask)
@@ -98,13 +82,13 @@ varena_alloc :: proc(using self: ^VArena,
to_be_used, overflow_signal = add_overflow( commit_used, size_to_allocate )
if (overflow_signal || to_be_used > reserved) do return {}, .Out_Of_Memory
header_offset := uint( uintptr(reserve_start) - uintptr(base_address) )
commit_left := committed - commit_used - header_offset
header_offset := uint( uintptr(self.reserve_start) - uintptr(self.base_address) )
commit_left := self.committed - commit_used - header_offset
needs_more_committed := commit_left < size_to_allocate
if needs_more_committed {
profile("VArena Growing")
next_commit_size := growth_policy( commit_used, committed, reserved, size_to_allocate )
alloc_error = virtual_commit( vmem, next_commit_size )
next_commit_size := max(to_be_used, commit_size)
alloc_error = virtual_commit( self.vmem, next_commit_size )
if alloc_error != .None do return
}
data_ptr := ([^]byte)(current_offset + uintptr(alignment_offset))
@@ -118,12 +102,10 @@ varena_alloc :: proc(using self: ^VArena,
// zero( data )
mem_zero( data_ptr, int(requested_size) )
}
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & tracker, & data[0], & data[len(data) - 1] )
}
return
}
varena_grow :: #force_inline proc(self: ^VArena, old_memory: []byte, requested_size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, should_zero := true, loc := #caller_location) -> (data: []byte, error: AllocatorError)
varena_grow :: #force_inline proc(self: ^VArena, old_memory: []byte, requested_size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, should_zero := true, loc := #caller_location
) -> (data: []byte, error: AllocatorError)
{
if ensure(old_memory == nil, "Growing without old_memory?") {
data, error = varena_alloc(self, requested_size, alignment, should_zero, loc)
@@ -148,29 +130,21 @@ varena_grow :: #force_inline proc(self: ^VArena, old_memory: []byte, requested_s
return
}
}
verify( old_memory_offset == current_offset || bool(self.allow_any_resize),
verify( old_memory_offset == current_offset,
"Cannot grow existing allocation in vitual arena to a larger size unless it was the last allocated" )
log_backing: [Kilo * 16]byte
backing_slice := log_backing[:]
if old_memory_offset != current_offset && self.allow_any_resize
if old_memory_offset != current_offset
{
// Give it new memory and copy the old over. Old memory is unrecoverable until clear.
new_region : []byte
new_region, error = varena_alloc( self, requested_size, alignment, should_zero, loc )
if ensure(new_region == nil || error != .None, "Failed to grab new region") {
data = old_memory
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & self.tracker, & data[0], & data[len(data) - 1] )
}
return
}
copy_non_overlapping( cursor(new_region), cursor(old_memory), len(old_memory) )
data = new_region
// log_print_fmt("varena resize (new): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size)
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & self.tracker, & data[0], & data[len(data) - 1] )
}
return
}
new_region : []byte
@@ -181,29 +155,22 @@ varena_grow :: #force_inline proc(self: ^VArena, old_memory: []byte, requested_s
}
data = slice(cursor(old_memory), requested_size )
// log_print_fmt("varena resize (expanded): old: %p %v new: %p %v", old_memory, old_size, (& data[0]), size)
if Track_Memory && self.tracker.entries.header != nil {
memtracker_register_auto_name( & self.tracker, & data[0], & data[len(data) - 1] )
}
return
}
varena_shrink :: proc(self: ^VArena, memory: []byte, requested_size: int, loc := #caller_location) -> (data: []byte, error: AllocatorError)
{
varena_shrink :: proc(self: ^VArena, memory: []byte, requested_size: int, loc := #caller_location) -> (data: []byte, error: AllocatorError) {
if requested_size == len(memory) { return memory, .None }
if ensure(memory == nil, "Shrinking without old_memory?") do return memory, .Invalid_Argument
current_offset := self.reserve_start[self.commit_used:]
shrink_amount := len(memory) - requested_size
if shrink_amount < 0 do return memory, .None
if shrink_amount < 0 { return memory, .None }
assert(cursor(memory) == current_offset)
self.commit_used -= uint(shrink_amount)
self.commit_used -= shrink_amount
return memory[:requested_size], .None
}
varena_reset :: #force_inline proc(self: ^VArena) {
// TODO(Ed): Prevent multiple threads from entering here extrusively?
// sync.mutex_guard( & mutex )
self.commit_used = 0
if Track_Memory && self.tracker.entries.header != nil {
array_clear(self.tracker.entries)
}
}
varena_release :: #force_inline proc(self: ^VArena) {
// TODO(Ed): Prevent multiple threads from entering here extrusively?
@@ -214,7 +181,7 @@ varena_release :: #force_inline proc(self: ^VArena) {
varena_rewind :: #force_inline proc(arena: ^VArena, save_point: AllocatorSP, loc := #caller_location) {
assert_contextless(save_point.type_sig == varena_allocator_proc)
assert_contextless(save_point.slot >= 0 && save_point.slot <= int(arena.commit_used))
arena.commit_used = cast(uint) save_point.slot
arena.commit_used = save_point.slot
}
varena_save :: #force_inline proc(arena: ^VArena) -> AllocatorSP { return AllocatorSP { type_sig = varena_allocator_proc, slot = cast(int) arena.commit_used }}
@@ -240,7 +207,7 @@ varena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Ou
output.save_point = varena_save(arena)
case .Query:
output.features = {.Alloc, .Reset, .Grow, .Shrink, .Rewind}
output.max_alloc = int(arena.reserved - arena.commit_used)
output.max_alloc = int(arena.reserved) - arena.commit_used
output.min_alloc = 0
output.left = output.max_alloc
output.save_point = varena_save(arena)
@@ -295,3 +262,14 @@ else {
varena_ainfo :: #force_inline proc "contextless" (arena: ^VArena) -> AllocatorInfo { return AllocatorInfo{procedure = varena_allocator_proc, data = arena} }
varena_allocator :: #force_inline proc "contextless" (arena: ^VArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{procedure = varena_allocator_proc, data = arena} }
}
varena_push_item :: #force_inline proc(va: ^VArena, $Type: typeid, alignment: int = MEMORY_ALIGNMENT_DEFAULT, should_zero := true, location := #caller_location
) -> (^Type, AllocatorError) {
raw, error := varena_alloc(va, size_of(Type), alignment, should_zero, location)
return transmute(^Type) cursor(raw), error
}
varena_push_slice :: #force_inline proc(va: ^VArena, $Type: typeid, amount: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, should_zero := true, location := #caller_location
) -> ([]Type, AllocatorError) {
raw, error := varena_alloc(va, size_of(Type) * amount, alignment, should_zero, location)
return slice(transmute([^]Type) cursor(raw), len(raw) / size_of(Type)), error
}

View File

@@ -0,0 +1,126 @@
package grime
/*
Arena (Chained Virtual Areans):
*/
ArenaFlags :: bit_set[ArenaFlag; u32]
ArenaFlag :: enum u32 {
No_Large_Pages,
No_Chaining,
}
Arena :: struct {
backing: ^VArena,
prev: ^Arena,
current: ^Arena,
base_pos: int,
pos: int,
flags: ArenaFlags,
}
arena_make :: proc(reserve_size : int = Mega * 64, commit_size : int = Mega * 64, base_addr: uintptr = 0, flags: ArenaFlags = {}) -> ^Arena {
header_size := align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT)
current, error := varena_make(reserve_size, commit_size, base_addr, transmute(VArenaFlags) flags)
assert(error == .None)
assert(current != nil)
arena: ^Arena; arena, error = varena_push_item(current, Arena, 1)
assert(error == .None)
assert(arena != nil)
arena^ = Arena {
backing = current,
prev = nil,
current = arena,
base_pos = 0,
pos = header_size,
flags = flags,
}
return arena
}
arena_alloc :: proc(arena: ^Arena, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT) -> []byte {
assert(arena != nil)
active := arena.current
size_requested := size
size_aligned := align_pow2(size_requested, alignment)
pos_pre := active.pos
pos_pst := pos_pre + size_aligned
reserved := int(active.backing.reserved)
should_chain := (.No_Chaining not_in arena.flags) && (reserved < pos_pst)
if should_chain {
new_arena := arena_make(reserved, active.backing.commit_size, 0, transmute(ArenaFlags) active.backing.flags)
new_arena.base_pos = active.base_pos + reserved
sll_stack_push_n(& arena.current, & new_arena, & new_arena.prev)
new_arena.prev = active
active = arena.current
}
result_ptr := transmute([^]byte) (uintptr(active) + uintptr(pos_pre))
vresult, error := varena_alloc(active.backing, size_aligned, alignment)
assert(error == .None)
slice_assert(vresult)
assert(raw_data(vresult) == result_ptr)
active.pos = pos_pst
return slice(result_ptr, size)
}
arena_release :: proc(arena: ^Arena) {
assert(arena != nil)
curr := arena.current
for curr != nil {
prev := curr.prev
varena_release(curr.backing)
curr = prev
}
}
arena_reset :: proc(arena: ^Arena) {
arena_rewind(arena, AllocatorSP { type_sig = arena_allocator_proc, slot = 0 })
}
arena_rewind :: proc(arena: ^Arena, save_point: AllocatorSP) {
assert(arena != nil)
assert(save_point.type_sig == arena_allocator_proc)
header_size := align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT)
curr := arena.current
big_pos := max(header_size, save_point.slot)
// Release arenas that are beyond the save point
for curr.base_pos >= big_pos {
prev := curr.prev
varena_release(curr.backing)
curr = prev
}
arena.current = curr
new_pos := big_pos - curr.base_pos
assert(new_pos <= curr.pos)
curr.pos = new_pos
varena_rewind(curr.backing, { type_sig = varena_allocator_proc, slot = curr.pos + size_of(VArena) })
}
arena_save :: #force_inline proc(arena: ^Arena) -> AllocatorSP { return { type_sig = arena_allocator_proc, slot = arena.base_pos + arena.current.pos } }
arena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Out) {
panic("not implemented")
}
arena_odin_allocator_proc :: proc(
allocator_data : rawptr,
mode : Odin_AllocatorMode,
size : int,
alignment : int,
old_memory : rawptr,
old_size : int,
location : SourceCodeLocation = #caller_location
) -> (data: []byte, alloc_error: AllocatorError)
{
panic("not implemented")
}
when ODIN_DEBUG {
arena_ainfo :: #force_inline proc "contextless" (arena: ^Arena) -> AllocatorInfo { return AllocatorInfo{proc_id = .Arena, data = arena} }
arena_allocator :: #force_inline proc "contextless" (arena: ^Arena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{proc_id = .Arena, data = arena} }
}
else {
arena_ainfo :: #force_inline proc "contextless" (arena: ^Arena) -> AllocatorInfo { return AllocatorInfo{procedure = arena_allocator_proc, data = arena} }
arena_allocator :: #force_inline proc "contextless" (arena: ^Arena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{procedure = arena_allocator_proc, data = arena} }
}
arena_push_item :: proc()
{
}
arena_push_array :: proc()
{
}

View File

@@ -0,0 +1,28 @@
package grime
// TODO(Ed): Review this
import "base:runtime"
// TODO(Ed): Support address sanitizer
/*
Pool allocator backed by chained virtual arenas.
*/
Pool_FreeBlock :: struct { next: ^Pool_FreeBlock }
VPool :: struct {
arenas: ^Arena,
block_size: uint,
// alignment: uint,
free_list_head: ^Pool_FreeBlock,
}
pool_make :: proc() -> (pool: VPool, error: AllocatorError)
{
panic("not implemented")
// return
}

View File

@@ -0,0 +1,15 @@
package grime
VSlabSizeClass :: struct {
vmem_reserve: uint,
block_size: uint,
block_alignment: uint,
}
Slab_Max_Size_Classes :: 24
SlabPolicy :: FStack(VSlabSizeClass, Slab_Max_Size_Classes)
VSlab :: struct {
pools: FStack(VPool, Slab_Max_Size_Classes),
}

View File

@@ -83,6 +83,10 @@ import grime "codebase:grime"
grime_set_profiler_module_context :: grime.set_profiler_module_context
grime_set_profiler_thread_buffer :: grime.set_profiler_thread_buffer
ensure :: grime.ensure
fatal :: grime.fatal
verify :: grime.verify
file_is_locked :: grime.file_is_locked
logger_init :: grime.logger_init
to_odin_logger :: grime.to_odin_logger
@@ -137,24 +141,24 @@ import "codebase:sectr"
ThreadMemory :: sectr.ThreadMemory
WorkerID :: sectr.WorkerID
ensure :: #force_inline proc( condition : b32, msg : string, location := #caller_location ) {
if condition do return
log_print( msg, LoggerLevel.Warning, location )
debug_trap()
}
// TODO(Ed) : Setup exit codes!
fatal :: #force_inline proc( msg : string, exit_code : int = -1, location := #caller_location ) {
log_print( msg, LoggerLevel.Fatal, location )
debug_trap()
process_exit( exit_code )
}
// TODO(Ed) : Setup exit codes!
verify :: #force_inline proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location ) {
if condition do return
log_print( msg, LoggerLevel.Fatal, location )
debug_trap()
process_exit( exit_code )
}
// ensure :: #force_inline proc( condition : b32, msg : string, location := #caller_location ) {
// if condition do return
// log_print( msg, LoggerLevel.Warning, location )
// debug_trap()
// }
// // TODO(Ed) : Setup exit codes!
// fatal :: #force_inline proc( msg : string, exit_code : int = -1, location := #caller_location ) {
// log_print( msg, LoggerLevel.Fatal, location )
// debug_trap()
// process_exit( exit_code )
// }
// // TODO(Ed) : Setup exit codes!
// verify :: #force_inline proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location ) {
// if condition do return
// log_print( msg, LoggerLevel.Fatal, location )
// debug_trap()
// process_exit( exit_code )
// }
log_print :: proc( msg : string, level := LoggerLevel.Info, loc := #caller_location ) {
context.allocator = arena_allocator(& host_memory.host_scratch)

View File

@@ -61,6 +61,10 @@ import "core:time"
tick_now :: time.tick_now
import "codebase:grime"
ensure :: grime.ensure
fatal :: grime.fatal
verify :: grime.verify
Array :: grime.Array
array_to_slice :: grime.array_to_slice
array_append_array :: grime.array_append_array
@@ -117,24 +121,24 @@ Tera :: Giga * 1024
S_To_MS :: grime.S_To_MS
ensure :: #force_inline proc( condition : b32, msg : string, location := #caller_location ) {
if condition do return
log_print( msg, LoggerLevel.Warning, location )
debug_trap()
}
// TODO(Ed) : Setup exit codes!
fatal :: #force_inline proc( msg : string, exit_code : int = -1, location := #caller_location ) {
log_print( msg, LoggerLevel.Fatal, location )
debug_trap()
process_exit( exit_code )
}
// TODO(Ed) : Setup exit codes!
verify :: #force_inline proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location ) {
if condition do return
log_print( msg, LoggerLevel.Fatal, location )
debug_trap()
process_exit( exit_code )
}
// ensure :: #force_inline proc( condition : b32, msg : string, location := #caller_location ) {
// if condition do return
// log_print( msg, LoggerLevel.Warning, location )
// debug_trap()
// }
// // TODO(Ed) : Setup exit codes!
// fatal :: #force_inline proc( msg : string, exit_code : int = -1, location := #caller_location ) {
// log_print( msg, LoggerLevel.Fatal, location )
// debug_trap()
// process_exit( exit_code )
// }
// // TODO(Ed) : Setup exit codes!
// verify :: #force_inline proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location ) {
// if condition do return
// log_print( msg, LoggerLevel.Fatal, location )
// debug_trap()
// process_exit( exit_code )
// }
log_print :: proc( msg : string, level := LoggerLevel.Info, loc := #caller_location ) {
context.allocator = odin_arena_allocator(& memory.host_scratch)

View File

@@ -12,6 +12,8 @@ $url_odin_repo = 'https://github.com/Ed94/Odin.git'
$url_sokol = 'https://github.com/Ed94/sokol-odin.git'
$url_sokol_tools = 'https://github.com/floooh/sokol-tools-bin.git'
# TODO(Ed): https://github.com/karl-zylinski/odin-handle-map
$path_harfbuzz = join-path $path_thirdparty 'harfbuzz'
$path_ini_parser = join-path $path_thirdparty 'ini'
$path_odin = join-path $path_toolchain 'Odin'