more progress on grime for code2

This commit is contained in:
2025-10-12 01:52:08 -04:00
parent 983cac0660
commit 54ff97b6c1
14 changed files with 1225 additions and 297 deletions

View File

@@ -0,0 +1,296 @@
package grime
/*
This is an non-ideomatic allocator interface inspired Odin/Jai/gb/zpl-c.
By the default the interface is still compatible Odin's context system however the user is expected to wrap the allocator struct with odin_ainfo_wrap to ideomatic procedures.
For details see: Ideomatic Compatability Wrapper (just search it)
*/
AllocatorOp :: enum u32 {
Alloc_NoZero = 0, // If Alloc exist, so must No_Zero
Alloc,
Free,
Reset,
Grow_NoZero,
Grow,
Shrink,
Rewind,
SavePoint,
Query, // Must always be implemented
}
AllocatorQueryFlag :: enum u64 {
Alloc,
Free,
Reset, // Wipe the allocator's state
Shrink,
Grow,
Resize, // Supports both grow and shrink
Rewind, // Ability to rewind to a save point (ex: arenas, stack), must also be able to save such a point
// Actually_Resize,
// Is_This_Yours,
Hint_Fast_Bump,
Hint_General_Heap,
Hint_Per_Frame_Temporary,
Hint_Debug_Support,
}
AllocatorError :: Odin_AllocatorError
// AllocatorError :: enum i32 {
// None = 0,
// Out_Of_Memory = 1,
// Invalid_Pointer = 2,
// Invalid_Argument = 3,
// Mode_Not_Implemented = 4,
// }
AllocatorQueryFlags :: bit_set[AllocatorQueryFlag; u64]
AllocatorSP :: struct {
type_sig: AllocatorProc,
slot: int,
}
AllocatorProc :: #type proc (input: AllocatorProc_In, out: ^AllocatorProc_Out)
AllocatorProc_In :: struct {
data: rawptr,
requested_size: int,
alignment: int,
using _ : struct #raw_union {
old_allocation: []byte,
save_point : AllocatorSP,
},
op: AllocatorOp,
}
AllocatorProc_Out :: struct {
using _ : struct #raw_union {
allocation: []byte,
save_point: AllocatorSP,
},
features: AllocatorQueryFlags,
left: int,
max_alloc: int,
min_alloc: int,
error: AllocatorError,
}
AllocatorQueryInfo :: struct {
save_point: AllocatorSP,
features: AllocatorQueryFlags,
left: int,
max_alloc: int,
min_alloc: int,
alignment: i32,
}
AllocatorInfo :: struct {
using _ : struct #raw_union {
procedure: AllocatorProc,
proc_id: AllocatorProcID,
},
data: rawptr,
}
// #assert(size_of(AllocatorQueryInfo) == size_of(AllocatorProc_Out))
// Listing of every single allocator (used on hot-reloadable builds)
AllocatorProcID :: enum uintptr {
FArena,
VArena,
CArena,
Pool,
Slab,
Odin_Arena,
// Odin_VArena,
}
resolve_allocator_proc :: #force_inline proc(procedure: $AllocatorProcType) -> AllocatorProc {
when ODIN_DEBUG {
switch (transmute(AllocatorProcID)procedure) {
case .FArena: return farena_allocator_proc
case .VArena: return nil // varena_allocaotr_proc
case .CArena: return nil // carena_allocator_proc
case .Pool: return nil // pool_allocator_proc
case .Slab: return nil // slab_allocator_proc
case .Odin_Arena: return nil // odin_arena_allocator_proc
// case .Odin_VArena: return odin_varena_allocator_proc
}
}
else {
return transmute(AllocatorProc) procedure
}
return nil
}
MEMORY_ALIGNMENT_DEFAULT :: 2 * size_of(rawptr)
ainfo :: #force_inline proc(ainfo := context.allocator) -> AllocatorInfo { return transmute(AllocatorInfo) ainfo }
odin_allocator :: #force_inline proc(ainfo: AllocatorInfo) -> Odin_Allocator { return transmute(Odin_Allocator) ainfo }
allocator_query :: proc(ainfo := context.allocator) -> AllocatorQueryInfo {
assert(ainfo.procedure != nil)
out: AllocatorQueryInfo; resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Query}, transmute(^AllocatorProc_Out) & out)
return out
}
mem_free_ainfo :: proc(mem: []byte, ainfo: AllocatorInfo) {
assert(ainfo.procedure != nil)
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Free, old_allocation = mem}, & {})
}
mem_reset :: proc(ainfo := context.allocator) {
assert(ainfo.procedure != nil)
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Reset}, &{})
}
mem_rewind :: proc(ainfo := context.allocator, save_point: AllocatorSP) {
assert(ainfo.procedure != nil)
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Rewind, save_point = save_point}, & {})
}
mem_save_point :: proc(ainfo := context.allocator) -> AllocatorSP {
assert(ainfo.procedure != nil)
out: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .SavePoint}, & out)
return out.save_point
}
mem_alloc :: proc(size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo : $Type = context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Alloc_NoZero : .Alloc,
requested_size = size,
alignment = alignment,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
mem_grow :: proc(mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Grow_NoZero : .Grow,
requested_size = size,
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
mem_resize :: proc(mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = len(mem) < size ? .Shrink : no_zero ? .Grow_NoZero : .Grow,
requested_size = size,
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
mem_shrink :: proc(mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = .Shrink,
requested_size = size,
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
alloc_type :: proc($Type: typeid, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> ^Type {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Alloc_NoZero : .Alloc,
requested_size = size_of(Type),
alignment = alignment,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return transmute(^Type) raw_data(output.allocation)
}
alloc_slice :: proc($SliceType: typeid / []$Type, num : int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []Type {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Alloc_NoZero : .Alloc,
requested_size = size_of(Type) * num,
alignment = alignment,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return transmute([]Type) slice(raw_data(output.allocation), num)
}
/*
Idiomatic Compatability Wrapper
Ideally we wrap all procedures that go to ideomatic odin with the following pattern:
Usually we do the following:
```
import "core:dynlib"
os_lib_load :: dynlib.load_library
```
Instead:
os_lib_load :: #force_inline proc "contextless" (... same signature as load_library, allocator := ...) { return dynlib.load_library(..., odin_ainfo_wrap(allocator)) }
*/
odin_allocator_mode_to_allocator_op :: #force_inline proc "contextless" (mode: Odin_AllocatorMode, size_diff : int) -> AllocatorOp {
switch mode {
case .Alloc: return .Alloc
case .Alloc_Non_Zeroed: return .Alloc_NoZero
case .Free: return .Free
case .Free_All: return .Reset
case .Resize: return size_diff > 0 ? .Grow : .Shrink
case .Resize_Non_Zeroed: return size_diff > 0 ? .Grow_NoZero : .Shrink
case .Query_Features: return .Query
case .Query_Info: return .Query
}
panic_contextless("Impossible path")
}
odin_allocator_wrap_proc :: proc(
allocator_data : rawptr,
mode : Odin_AllocatorMode,
size : int,
alignment : int,
old_memory : rawptr,
old_size : int,
loc := #caller_location
) -> ( data : []byte, alloc_error : Odin_AllocatorError)
{
input := AllocatorProc_In {
data = (transmute(^AllocatorInfo)allocator_data).data,
requested_size = size,
alignment = alignment,
old_allocation = slice(transmute([^]byte)old_memory, old_size),
op = odin_allocator_mode_to_allocator_op(mode, size - old_size),
}
output: AllocatorProc_Out
resolve_allocator_proc((transmute(^Odin_Allocator)allocator_data).procedure)(input, & output)
#partial switch mode {
case .Query_Features:
debug_trap() // TODO(Ed): Finish this...
return nil, nil
case .Query_Info:
info := (^Odin_AllocatorQueryInfo)(old_memory)
if info != nil && info.pointer != nil {
info.size = output.left
info.alignment = cast(int) (transmute(AllocatorQueryInfo)output).alignment
return slice(transmute(^byte)info, size_of(info^) ), nil
}
return nil, nil
}
return output.allocation, cast(Odin_AllocatorError)output.error
}
odin_ainfo_giftwrap :: #force_inline proc(ainfo := context.allocator) -> Odin_Allocator {
@(thread_local)
cursed_allocator_wrap_ref : Odin_Allocator
cursed_allocator_wrap_ref = {ainfo.procedure, ainfo.data}
return {odin_allocator_wrap_proc, & cursed_allocator_wrap_ref}
}

View File

@@ -1,8 +1,19 @@
package grime
// Context :: struct {
// }
Context :: struct {
allocator: AllocatorInfo,
temp_allocator: AllocatorInfo,
assertion_failure_proc: Assertion_Failure_Proc,
logger: Logger,
random_generator: Random_Generator,
// context_usr :: #force_inline proc( $ Type : typeid ) -> (^Type) {
// return cast(^Type) context.user_ptr
// }
user_ptr: rawptr,
user_index: int,
// Internal use only
_internal: rawptr,
}
context_usr :: #force_inline proc( $ Type : typeid ) -> (^Type) {
return cast(^Type) context.user_ptr
}

125
code2/grime/farena.odin Normal file
View File

@@ -0,0 +1,125 @@
package grime
FArena :: struct {
mem: []byte,
used: int,
}
@require_results
farena_make :: proc(backing: []byte) -> FArena {
arena := FArena {mem = backing}
return arena
}
farena_init :: proc(arena: ^FArena, backing: []byte) {
assert(arena != nil)
arena.mem = backing
arena.used = 0
}
@require_results
farena_push :: proc(arena: ^FArena, $Type: typeid, amount: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT) -> []Type {
assert(arena != nil)
if amount == 0 {
return {}
}
desired := size_of(Type) * amount
to_commit := align_pow2(desired, alignment)
unused := len(arena.mem) - arena.used
assert(to_commit <= unused)
ptr := cursor(arena.mem[arena.used:])
arena.used += to_commit
return slice(ptr, amount)
}
@require_results
farena_grow :: proc(arena: ^FArena, old_allocation: []byte, requested_size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, should_zero: bool = true) -> (allocation: []byte, err: AllocatorError) {
if len(old_allocation) == 0 {
return {}, .Invalid_Argument
}
alloc_end := end(old_allocation)
arena_end := cursor(arena.mem)[arena.used:]
if alloc_end != arena_end {
// Not at the end, can't grow in place
return {}, .Out_Of_Memory
}
// Calculate growth
grow_amount := requested_size - len(old_allocation)
aligned_grow := align_pow2(grow_amount, alignment)
unused := len(arena.mem) - arena.used
if aligned_grow > unused {
// Not enough space
return {}, .Out_Of_Memory
}
arena.used += aligned_grow
allocation = slice(cursor(old_allocation), requested_size)
if should_zero {
mem_zero( cursor(allocation)[len(old_allocation):], grow_amount )
}
err = .None
return
}
@require_results
farena_shirnk :: proc(arena: ^FArena, old_allocation: []byte, requested_size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT) -> (allocation: []byte, err: AllocatorError) {
if len(old_allocation) == 0 {
return {}, .Invalid_Argument
}
alloc_end := end(old_allocation)
arena_end := cursor(arena.mem)[arena.used:]
if alloc_end != arena_end {
// Not at the end, can't shrink but return adjusted size
allocation = old_allocation[:requested_size]
err = .None
return
}
// Calculate shrinkage
aligned_original := align_pow2(len(old_allocation), MEMORY_ALIGNMENT_DEFAULT)
aligned_new := align_pow2(requested_size, alignment)
arena.used -= (aligned_original - aligned_new)
allocation = old_allocation[:requested_size]
return
}
farena_reset :: proc(arena: ^FArena) {
arena.used = 0
}
farena_rewind :: proc(arena: ^FArena, save_point: AllocatorSP) {
assert(save_point.type_sig == farena_allocator_proc)
assert(save_point.slot >= 0 && save_point.slot <= arena.used)
arena.used = save_point.slot
}
farena_save :: #force_inline proc(arena: FArena) -> AllocatorSP { return AllocatorSP { type_sig = farena_allocator_proc, slot = arena.used } }
farena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Out) {
assert(output != nil)
assert(input.data != nil)
arena := transmute(^FArena) input.data
switch input.op
{
case .Alloc, .Alloc_NoZero:
output.allocation = to_bytes(farena_push(arena, byte, input.requested_size, input.alignment))
if input.op == .Alloc {
zero(output.allocation)
}
case .Free:
// No-op for arena
case .Reset:
farena_reset(arena)
case .Grow, .Grow_NoZero:
output.allocation, output.error = farena_grow(arena, input.old_allocation, input.requested_size, input.alignment, input.op == .Grow)
case .Shrink:
output.allocation, output.error = farena_shirnk(arena, input.old_allocation, input.requested_size, input.alignment)
case .Rewind:
farena_rewind(arena, input.save_point)
case .SavePoint:
output.save_point = farena_save(arena^)
case .Query:
output.features = {.Alloc, .Reset, .Grow, .Shrink, .Rewind}
output.max_alloc = len(arena.mem) - arena.used
output.min_alloc = 0
output.left = output.max_alloc
output.save_point = farena_save(arena^)
}
}
when ODIN_DEBUG {
farena_ainfo :: #force_inline proc "contextless" (arena: ^FArena) -> AllocatorInfo { return AllocatorInfo{proc_id = .FArena, data = arena} }
farena_allocator :: #force_inline proc "contextless" (arena: ^FArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{proc_id = .FArena, data = arena} }
}
else {
farena_ainfo :: #force_inline proc "contextless" (arena: ^FArena) -> AllocatorInfo { return AllocatorInfo{procedure = farena_allocator_proc, data = arena} }
farena_allocator :: #force_inline proc "contextless" (arena: ^FArena) -> Odin_Allocator { return transmute(Odin_Allocator) AllocatorInfo{procedure = farena_allocator_proc, data = arena} }
}

9
code2/grime/hashing.odin Normal file
View File

@@ -0,0 +1,9 @@
package grime
hash32_djb8 :: #force_inline proc "contextless" ( hash : ^u32, bytes : []byte ) {
for value in bytes do (hash^) = (( (hash^) << 8) + (hash^) ) + u32(value)
}
hash64_djb8 :: #force_inline proc "contextless" ( hash : ^u64, bytes : []byte ) {
for value in bytes do (hash^) = (( (hash^) << 8) + (hash^) ) + u64(value)
}

View File

@@ -0,0 +1,169 @@
package grime
import "base:intrinsics"
/*
Key Table 1-Layer Chained-Chunked-Cells
*/
KT1CX_Slot :: struct($type: typeid) {
value: type,
key: u64,
occupied: b32,
}
KT1CX_Cell :: struct($type: typeid, $depth: int) {
slots: [depth]KT1CX_Slot(type),
next: ^KT1CX_Cell(type, depth),
}
KT1CX :: struct($cell: typeid) {
table: []cell,
}
KT1CX_Byte_Slot :: struct {
key: u64,
occupied: b32,
}
KT1CX_Byte_Cell :: struct {
next: ^byte,
}
KT1CX_Byte :: struct {
table: []byte,
}
KT1CX_ByteMeta :: struct {
slot_size: int,
slot_key_offset: uintptr,
cell_next_offset: uintptr,
cell_depth: int,
cell_size: int,
type_width: int,
type: typeid,
}
KT1CX_InfoMeta :: struct {
cell_pool_size: int,
table_size: int,
slot_size: int,
slot_key_offset: uintptr,
cell_next_offset: uintptr,
cell_depth: int,
cell_size: int,
type_width: int,
type: typeid,
}
KT1CX_Info :: struct {
backing_table: AllocatorInfo,
backing_cells: AllocatorInfo,
}
kt1cx_init :: proc(info: KT1CX_Info, m: KT1CX_InfoMeta, result: ^KT1CX_Byte) {
assert(result != nil)
assert(info.backing_cells.procedure != nil)
assert(info.backing_table.procedure != nil)
assert(m.cell_depth > 0)
assert(m.cell_pool_size >= 4 * Kilo)
assert(m.table_size >= 4 * Kilo)
assert(m.type_width > 0)
table_raw := transmute(SliceByte) mem_alloc(m.table_size * m.cell_size, ainfo = odin_allocator(info.backing_table))
slice_assert(transmute([]byte) table_raw)
table_raw.len = m.table_size
result.table = transmute([]byte) table_raw
}
kt1cx_clear :: proc(kt: KT1CX_Byte, m: KT1CX_ByteMeta) {
cell_cursor := cursor(kt.table)
table_len := len(kt.table) * m.cell_size
for ; cell_cursor != end(kt.table); cell_cursor = cell_cursor[m.cell_size:] // for cell, cell_id in kt.table.cells
{
slots := SliceByte { cell_cursor, m.cell_depth * m.slot_size } // slots = cell.slots
slot_cursor := slots.data
for;; {
slot := slice(slot_cursor, m.slot_size) // slot = slots[slot_id]
zero(slot) // slot = {}
if slot_cursor == end(transmute([]byte) slots) { // if slot == end(slot)
next := slot_cursor[m.cell_next_offset:] // next = kt.table.cells[cell_id + 1]
if next != nil { // if next != nil
slots.data = next // slots = next.slots
slot_cursor = next
continue
}
}
slot_cursor = slot_cursor[m.slot_size:] // slot = slots[slot_id + 1]
}
}
}
kt1cx_slot_id :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> u64 {
cell_size := m.cell_size // dummy value
hash_index := key % u64(len(kt.table))
return hash_index
}
kt1cx_get :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> ^byte {
hash_index := kt1cx_slot_id(kt, key, m)
cell_offset := uintptr(hash_index) * uintptr(m.cell_size)
cell_cursor := cursor(kt.table)[cell_offset:] // cell_id = 0
{
slots := slice(cell_cursor, m.cell_depth * m.slot_size) // slots = cell[cell_id].slots
slot_cursor := cell_cursor // slot_id = 0
for;;
{
slot := transmute(^KT1CX_Byte_Slot) slot_cursor[m.slot_key_offset:] // slot = cell[slot_id]
if slot.occupied && slot.key == key {
return cast(^byte) slot_cursor
}
if slot_cursor == end(transmute([]byte) slots)
{
cell_next := cell_cursor[m.cell_next_offset:] // cell.next
if cell_next != nil {
slots = slice(cell_next, len(slots)) // slots = cell.next
slot_cursor = cell_next
cell_cursor = cell_next // cell = cell.next
continue
}
else {
return nil
}
}
slot_cursor = slot_cursor[m.slot_size:]
}
}
}
kt1cx_set :: proc(kt: KT1CX_Byte, key: u64, value: []byte, backing_cells: AllocatorInfo, m: KT1CX_ByteMeta) -> ^byte {
hash_index := kt1cx_slot_id(kt, key, m)
cell_offset := uintptr(hash_index) * uintptr(m.cell_size)
cell_cursor := cursor(kt.table)[cell_offset:] // KT1CX_Cell(Type) cell = kt.table[hash_index]
{
slots := SliceByte {cell_cursor, m.cell_depth * m.slot_size} // cell.slots
slot_cursor := slots.data
for ;;
{
slot := transmute(^KT1CX_Byte_Slot) slot_cursor[m.slot_key_offset:]
if slot.occupied == false {
slot.occupied = true
slot.key = key
return cast(^byte) slot_cursor
}
else if slot.key == key {
return cast(^byte) slot_cursor
}
if slot_cursor == end(transmute([]byte) slots) {
curr_cell := transmute(^KT1CX_Byte_Cell) (uintptr(cell_cursor) + m.cell_next_offset) // curr_cell = cell
if curr_cell != nil {
slots.data = curr_cell.next
slot_cursor = curr_cell.next
cell_cursor = curr_cell.next
continue
}
else {
new_cell := mem_alloc(m.cell_size, ainfo = odin_allocator(backing_cells))
curr_cell.next = raw_data(new_cell)
slot = transmute(^KT1CX_Byte_Slot) cursor(new_cell)[m.slot_key_offset:]
slot.occupied = true
slot.key = key
return raw_data(new_cell)
}
}
slot_cursor = slot_cursor[m.slot_size:]
}
return nil
}
}
kt1cx_assert :: proc(kt: $type / KT1CX) {
slice_assert(kt.cell_pool)
slice_assert(kt.table)
}
kt1cx_byte :: proc(kt: $type / KT1CX) -> KT1CX_Byte { return { to_bytes(kt.cell_pool), slice( transmute([^]byte) cursor(kt.table), len(kt.table)) } }

View File

@@ -0,0 +1,48 @@
package grime
/*
Key Table 1-Layer Linear (KT1L)
*/
KT1L_Slot :: struct($Type: typeid) {
key: u64,
value: Type,
}
KT1L_Meta :: struct {
slot_size: uintptr,
kt_value_offset: uintptr,
type_width: uintptr,
type: typeid,
}
kt1l_populate_slice_a2_Slice_Byte :: proc(kt: ^[]byte, backing: AllocatorInfo, values: []byte, num_values: int, m: KT1L_Meta) {
assert(kt != nil)
if num_values == 0 { return }
table_size_bytes := num_values * int(m.slot_size)
kt^ = mem_alloc(table_size_bytes, ainfo = transmute(Odin_Allocator) backing)
slice_assert(kt ^)
kt_raw : SliceByte = transmute(SliceByte) kt^
for id in 0 ..< cast(uintptr) num_values {
slot_offset := id * m.slot_size // slot id
slot_cursor := kt_raw.data[slot_offset:] // slots[id] type: KT1L_<Type>
// slot_key := transmute(^u64) slot_cursor // slots[id].key type: U64
// slot_value := slice(slot_cursor[m.kt_value_offset:], m.type_width) // slots[id].value type: <Type>
a2_offset := id * m.type_width * 2 // a2 entry id
a2_cursor := cursor(values)[a2_offset:] // a2_entries[id] type: A2_<Type>
// a2_key := (transmute(^[]byte) a2_cursor) ^ // a2_entries[id].key type: <Type>
// a2_value := slice(a2_cursor[m.type_width:], m.type_width) // a2_entries[id].value type: <Type>
mem_copy_non_overlapping(slot_cursor[m.kt_value_offset:], a2_cursor[m.type_width:], cast(int) m.type_width) // slots[id].value = a2_entries[id].value
(transmute([^]u64) slot_cursor)[0] = 0;
hash64_djb8(transmute(^u64) slot_cursor, (transmute(^[]byte) a2_cursor) ^) // slots[id].key = hash64_djb8(a2_entries[id].key)
}
kt_raw.len = num_values
}
kt1l_populate_slice_a2 :: proc($Type: typeid, kt: ^[]KT1L_Slot(Type), backing: AllocatorInfo, values: [][2]Type) {
assert(kt != nil)
values_bytes := slice(transmute([^]u8) raw_data(values), len(values) * size_of([2]Type))
kt1l_populate_slice_a2_Slice_Byte(transmute(^[]byte) kt, backing, values_bytes, len(values), {
slot_size = size_of(KT1L_Slot(Type)),
kt_value_offset = offset_of(KT1L_Slot(Type), value),
type_width = size_of(Type),
type = Type,
})
}

View File

@@ -7,20 +7,11 @@ Tera :: Giga * 1024
ptr_cursor :: #force_inline proc "contextless" (ptr: ^$Type) -> [^]Type { return transmute([^]Type) ptr }
align_pow2 :: proc(x: int, b: int) -> int {
assert(b != 0)
assert((b & (b - 1)) == 0) // Check power of 2
return ((x + b - 1) & ~(b - 1))
}
memory_zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
memory_zero_explicit :: #force_inline proc "contextless" (data: rawptr, len: int) -> rawptr {
mem_zero_volatile(data, len) // Use the volatile mem_zero
atomic_thread_fence(.Seq_Cst) // Prevent reordering
return data
}
memory_copy :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr {
mem_copy(dst, src, len)
return dst
}
SliceByte :: struct {
data: [^]byte,
@@ -38,7 +29,7 @@ slice_assert :: #force_inline proc (s: $SliceType / []$Type) {
}
slice_end :: #force_inline proc "contextless" (s : $SliceType / []$Type) -> ^Type { return & cursor(s)[len(s)] }
slice_copy :: proc "contextless" (dst, src: $SliceType / []$Type) -> int {
slice_copy :: #force_inline proc "contextless" (dst, src: $SliceType / []$Type) -> int {
n := max(0, min(len(dst), len(src)))
if n > 0 {
mem_copy(raw_data(dst), raw_data(src), n * size_of(Type))
@@ -46,289 +37,82 @@ slice_copy :: proc "contextless" (dst, src: $SliceType / []$Type) -> int {
return n
}
@(require_results) slice_to_bytes :: proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] }
@(require_results) slice_raw :: proc "contextless" (s: []$Type) -> SliceRaw(Type) { return transmute(SliceRaw(Type)) s }
@(require_results) slice_to_bytes :: #force_inline proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] }
@(require_results) slice_raw :: #force_inline proc "contextless" (s: []$Type) -> SliceRaw(Type) { return transmute(SliceRaw(Type)) s }
//region Allocator Interface
AllocatorOp :: enum u32 {
Alloc_NoZero = 0, // If Alloc exist, so must No_Zero
Alloc,
Free,
Reset,
Grow_NoZero,
Grow,
Shrink,
Rewind,
SavePoint,
Query, // Must always be implemented
}
AllocatorQueryFlag :: enum u64 {
Alloc,
Free,
Reset, // Wipe the allocator's state
//region Memory Math
Shrink,
Grow,
Resize, // Supports both grow and shrink
Rewind, // Ability to rewind to a save point (ex: arenas, stack), must also be able to save such a point
// Actually_Resize,
// Is_This_Yours,
Hint_Fast_Bump,
Hint_General_Heap,
Hint_Per_Frame_Temporary,
Hint_Debug_Support,
}
AllocatorError :: enum i32 {
None = 0,
Out_Of_Memory = 1,
Invalid_Pointer = 2,
Invalid_Argument = 3,
Mode_Not_Implemented = 4,
}
AllocatorQueryFlags :: bit_set[AllocatorQueryFlag; u64]
AllocatorSP :: struct {
type_sig: AllocatorProc,
slot: int,
}
AllocatorProc :: #type proc (input: AllocatorProc_In, out: ^AllocatorProc_Out)
AllocatorProc_In :: struct {
data: rawptr,
requested_size: int,
alignment: int,
using _ : struct #raw_union {
old_allocation: []byte,
save_point : AllocatorSP,
},
op: AllocatorOp,
}
AllocatorProc_Out :: struct {
using _ : struct #raw_union {
allocation: []byte,
save_point: AllocatorSP,
},
features: AllocatorQueryFlags,
left: int,
max_alloc: int,
min_alloc: int,
error: AllocatorError,
}
AllocatorQueryInfo :: struct {
save_point: AllocatorSP,
features: AllocatorQueryFlags,
left: int,
max_alloc: int,
min_alloc: int,
alignment: i32,
}
AllocatorInfo :: struct {
_ : struct #raw_union {
procedure: AllocatorProc,
proc_id: AllocatorProcID,
},
data: rawptr,
}
// #assert(size_of(AllocatorQueryInfo) == size_of(AllocatorProc_Out))
// Listing of every single allocator (used on hot-reloadable builds)
AllocatorProcID :: enum uintptr {
FArena,
VArena,
CArena,
Pool,
Slab,
Odin_Arena,
// Odin_VArena,
}
resolve_allocator_proc :: #force_inline proc(procedure: Odin_AllocatorProc) -> AllocatorProc {
when ODIN_DEBUG {
switch (transmute(AllocatorProcID)procedure) {
case .FArena: return nil // farena_allocator_proc
case .VArena: return nil // varena_allocaotr_proc
case .CArena: return nil // carena_allocator_proc
case .Pool: return nil // pool_allocator_proc
case .Slab: return nil // slab_allocator_proc
case .Odin_Arena: return nil // odin_arena_allocator_proc
// case .Odin_VArena: return odin_varena_allocator_proc
}
}
else {
return transmute(AllocatorProc) procedure
}
return nil
}
MEMORY_ALIGNMENT_DEFAULT :: 2 * size_of(rawptr)
allocator_query :: proc(ainfo := context.allocator) -> AllocatorQueryInfo {
assert(ainfo.procedure != nil)
out: AllocatorQueryInfo; resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Query}, transmute(^AllocatorProc_Out) & out)
return out
}
mem_free :: proc(mem: []byte, ainfo := context.allocator) {
assert(ainfo.procedure != nil)
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Free, old_allocation = mem}, & {})
}
mem_reset :: proc(ainfo := context.allocator) {
assert(ainfo.procedure != nil)
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Reset}, &{})
}
mem_rewind :: proc(ainfo := context.allocator, save_point: AllocatorSP) {
assert(ainfo.procedure != nil)
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .Rewind, save_point = save_point}, & {})
}
mem_save_point :: proc(ainfo := context.allocator) -> AllocatorSP {
assert(ainfo.procedure != nil)
out: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)({data = ainfo.data, op = .SavePoint}, & out)
return out.save_point
}
mem_alloc :: proc(size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Alloc_NoZero : .Alloc,
requested_size = size,
alignment = alignment,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
mem_grow :: proc(mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Grow_NoZero : .Grow,
requested_size = size,
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
mem_resize :: proc(mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = len(mem) < size ? .Shrink : no_zero ? .Grow_NoZero : .Grow,
requested_size = size,
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
mem_shrink :: proc(mem: []byte, size: int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []byte {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = .Shrink,
requested_size = size,
alignment = alignment,
old_allocation = mem,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return output.allocation
}
alloc_type :: proc($Type: typeid, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> ^Type {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Alloc_NoZero : .Alloc,
requested_size = size_of(Type),
alignment = alignment,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return transmute(^Type) raw_data(output.allocation)
}
alloc_slice :: proc($SliceType: typeid / []$Type, num : int, alignment: int = MEMORY_ALIGNMENT_DEFAULT, no_zero: b32 = false, ainfo := context.allocator) -> []Type {
assert(ainfo.procedure != nil)
input := AllocatorProc_In {
data = ainfo.data,
op = no_zero ? .Alloc_NoZero : .Alloc,
requested_size = size_of(Type) * num,
alignment = alignment,
}
output: AllocatorProc_Out
resolve_allocator_proc(ainfo.procedure)(input, & output)
return transmute([]Type) slice(raw_data(output.allocation), num)
}
//endregion Allocator Interface
/*
Ideally we wrap all procedures that go to ideomatic odin with the following pattern:
Usually we do the following:
```
import "core:dynlib"
os_lib_load :: dynlib.load_library
```
Instead:
os_lib_load :: #force_inline proc "contextless" (... same signature as load_library, allocator := ...) { return dynlib.load_library(..., odin_ainfo_wrap(allocator)) }
*/
odin_allocator_mode_to_allocator_op :: #force_inline proc "contextless" (mode: Odin_AllocatorMode, size_diff : int) -> AllocatorOp {
switch mode {
case .Alloc: return .Alloc
case .Alloc_Non_Zeroed: return .Alloc_NoZero
case .Free: return .Free
case .Free_All: return .Reset
case .Resize: return size_diff > 0 ? .Grow : .Shrink
case .Resize_Non_Zeroed: return size_diff > 0 ? .Grow_NoZero : .Shrink
case .Query_Features: return .Query
case .Query_Info: return .Query
}
panic_contextless("Impossible path")
}
odin_allocator_wrap_proc :: proc(
allocator_data : rawptr,
mode : Odin_AllocatorMode,
size : int,
alignment : int,
old_memory : rawptr,
old_size : int,
loc := #caller_location
) -> ( data : []byte, alloc_error : Odin_AllocatorError)
// See: core/mem.odin, I wanted to study it an didn't like the naming.
@(require_results)
calc_padding_with_header :: proc "contextless" (pointer: uintptr, alignment: uintptr, header_size: int) -> int
{
input := AllocatorProc_In {
data = (transmute(^AllocatorInfo)allocator_data).data,
requested_size = size,
alignment = alignment,
old_allocation = slice(transmute([^]byte)old_memory, old_size),
op = odin_allocator_mode_to_allocator_op(mode, size - old_size),
}
output: AllocatorProc_Out
resolve_allocator_proc((transmute(^Odin_Allocator)allocator_data).procedure)(input, & output)
alignment_offset := pointer & (alignment - 1)
#partial switch mode {
case .Query_Features:
debug_trap() // TODO(Ed): Finish this...
return nil, nil
case .Query_Info:
info := (^Odin_AllocatorQueryInfo)(old_memory)
if info != nil && info.pointer != nil {
info.size = output.left
info.alignment = cast(int) (transmute(AllocatorQueryInfo)output).alignment
return slice(transmute(^byte)info, size_of(info^) ), nil
initial_padding := uintptr(0)
if alignment_offset != 0 {
initial_padding = alignment - alignment_offset
}
header_space_adjustment := uintptr(header_size)
if initial_padding < header_space_adjustment
{
additional_space_needed := header_space_adjustment - initial_padding
unaligned_extra_space := additional_space_needed & (alignment - 1)
if unaligned_extra_space > 0 {
initial_padding += alignment * (1 + (additional_space_needed / alignment))
}
else {
initial_padding += alignment * (additional_space_needed / alignment)
}
return nil, nil
}
return output.allocation, cast(Odin_AllocatorError)output.error
return int(initial_padding)
}
odin_ainfo_wrap :: #force_inline proc(ainfo := context.allocator) -> Odin_Allocator {
@(thread_local)
cursed_allocator_wrap_ref : Odin_Allocator
cursed_allocator_wrap_ref = {ainfo.procedure, ainfo.data}
return {odin_allocator_wrap_proc, & cursed_allocator_wrap_ref}
// Helper to get the the beginning of memory after a slice
memory_after :: #force_inline proc "contextless" ( s: []byte ) -> ( ^ byte) {
return cursor(s)[len(s):]
}
memory_after_header :: #force_inline proc "contextless" ( header : ^($ Type) ) -> ( [^]byte) {
result := cast( [^]byte) ptr_offset( header, 1 )
// result := cast( [^]byte) (cast( [^]Type) header)[ 1:]
return result
}
@(require_results)
memory_align_formula :: #force_inline proc "contextless" ( size, align : uint) -> uint {
result := size + align - 1
return result - result % align
}
// This is here just for docs
memory_misalignment :: #force_inline proc ( address, alignment : uintptr) -> uint {
// address % alignment
assert(is_power_of_two(alignment))
return uint( address & (alignment - 1) )
}
// This is here just for docs
@(require_results)
memory_aign_forward :: #force_inline proc( address, alignment : uintptr) -> uintptr
{
assert(is_power_of_two(alignment))
aligned_address := address
misalignment := cast(uintptr) memory_misalignment( address, alignment )
if misalignment != 0 {
aligned_address += alignment - misalignment
}
return aligned_address
}
// align_up :: proc(address: uintptr, alignment: uintptr) -> uintptr {
// return (address + alignment - 1) & ~(alignment - 1)
// }
//endregion Memory Math
swap :: #force_inline proc "contextless" ( a, b : ^ $Type ) -> ( ^ Type, ^ Type ) { return b, a }

View File

@@ -5,10 +5,14 @@ import "base:builtin"
import "base:intrinsics"
atomic_thread_fence :: intrinsics.atomic_thread_fence
mem_zero :: intrinsics.mem_zero
mem_zero_volatile :: intrinsics.mem_zero_volatile
mem_copy :: intrinsics.mem_copy_non_overlapping
mem_copy_overlapping :: intrinsics.mem_copy
// mem_zero :: intrinsics.mem_zero
// mem_copy :: intrinsics.mem_copy_non_overlapping
// mem_copy_overlapping :: intrinsics.mem_copy
mem_zero :: #force_inline proc "contextless" (data: rawptr, len: int) { intrinsics.mem_zero (data, len) }
mem_copy_non_overlapping :: #force_inline proc "contextless" (dst, src: rawptr, len: int) { intrinsics.mem_copy_non_overlapping(dst, src, len) }
mem_copy :: #force_inline proc "contextless" (dst, src: rawptr, len: int) { intrinsics.mem_copy (dst, src, len) }
import "base:runtime"
Assertion_Failure_Proc :: runtime.Assertion_Failure_Proc
@@ -20,8 +24,9 @@ import "base:runtime"
slice_copy_overlapping :: runtime.copy_slice
import fmt_io "core:fmt"
// % based template formatters
str_pfmt_out :: fmt_io.printf
str_pfmt_tmp :: #force_inline proc(fmt: string, args: ..any, newline := false) -> string { context.temp_allocator = odin_ainfo_wrap(context.temp_allocator); return fmt_io.tprintf(fmt, ..args, newline = newline) }
str_pfmt_tmp :: #force_inline proc(fmt: string, args: ..any, newline := false) -> string { context.temp_allocator = odin_ainfo_giftwrap(context.temp_allocator); return fmt_io.tprintf(fmt, ..args, newline = newline) }
str_pfmt :: fmt_io.aprintf // Decided to make aprintf the default. (It will always be the default allocator)
str_pfmt_builder :: fmt_io.sbprintf
str_pfmt_buffer :: fmt_io.bprintf
@@ -39,6 +44,13 @@ import "core:mem"
Odin_AllocatorQueryInfo :: mem.Allocator_Query_Info
Odin_AllocatorError :: mem.Allocator_Error
align_forward_int :: mem.align_forward_int
align_forward_uintptr :: mem.align_backward_uintptr
align_forward_raw :: mem.align_forward
is_power_of_two :: mem.is_power_of_two
align_pow2 :: mem.align_forward_int
import core_os "core:os"
FS_Open_Readonly :: core_os.O_RDONLY
FS_Open_Writeonly :: core_os.O_WRONLY
@@ -78,10 +90,40 @@ import "core:time"
time_date :: time.date
time_now :: time.now
import "core:unicode/utf8"
str_rune_count :: utf8.rune_count_in_string
runes_to_string :: utf8.runes_to_string
// string_to_runes :: utf8.string_to_runes
cursor :: proc {
slice_cursor,
string_cursor,
}
end :: proc {
slice_end,
string_end,
}
to_string :: proc {
strings.to_string,
}
copy :: proc {
mem_copy,
slice_copy,
}
copy_non_overlaping :: proc {
mem_copy_non_overlapping,
slice_copy_overlapping,
}
to_bytes :: proc {
slice_to_bytes,
}
zero :: proc {
mem_zero,
slice_zero,
}

View File

@@ -0,0 +1,168 @@
package grime
RingBufferFixed :: struct( $Type: typeid, $Size: u32 ) {
head : u32,
tail : u32,
num : u32,
items : [Size] Type,
}
ringbuf_fixed_clear :: #force_inline proc "contextless" ( using buffer : ^RingBufferFixed( $Type, $Size)) {
head = 0
tail = 0
num = 0
}
ringbuf_fixed_is_full :: #force_inline proc "contextless" ( using buffer : RingBufferFixed( $Type, $Size)) -> bool {
return num == Size
}
ringbuf_fixed_is_empty :: #force_inline proc "contextless" ( using buffer : RingBufferFixed( $Type, $Size)) -> bool {
return num == 0
}
ringbuf_fixed_peek_front_ref :: #force_inline proc "contextless" ( using buffer : ^RingBufferFixed( $Type, $Size)) -> ^Type {
assert(num > 0, "Attempted to peek an empty ring buffer")
return & items[ head ]
}
ringbuf_fixed_peek_front :: #force_inline proc "contextless" ( using buffer : RingBufferFixed( $Type, $Size)) -> Type {
assert(num > 0, "Attempted to peek an empty ring buffer")
return items[ head ]
}
ringbuf_fixed_peak_back :: #force_inline proc ( using buffer : RingBufferFixed( $Type, $Size)) -> Type {
assert(num > 0, "Attempted to peek an empty ring buffer")
buf_size := u32(Size)
index := (tail - 1 + buf_size) % buf_size
return items[ index ]
}
ringbuf_fixed_push :: #force_inline proc(using buffer: ^RingBufferFixed($Type, $Size), value: Type) {
if num == Size do head = (head + 1) % Size
else do num += 1
items[ tail ] = value
tail = (tail + 1) % Size
}
ringbuf_fixed_push_slice :: proc(buffer: ^RingBufferFixed($Type, $Size), slice: []Type) -> u32
{
size := u32(Size)
slice_size := u32(len(slice))
// assert( slice_size <= size, "Attempting to append a slice that is larger than the ring buffer!" )
if slice_size == 0 do return 0
items_to_add := min( slice_size, size)
items_added : u32 = 0
if items_to_add > Size - buffer.num
{
// Some or all existing items will be overwritten
overwrite_count := items_to_add - (Size - buffer.num)
buffer.head = (buffer.head + overwrite_count) % size
buffer.num = size
}
else
{
buffer.num += items_to_add
}
if items_to_add <= size
{
// Case 1: Slice fits entirely or partially in the buffer
space_to_end := size - buffer.tail
first_chunk := min(items_to_add, space_to_end)
// First copy: from tail to end of buffer
copy( buffer.items[ buffer.tail: ] , slice[ :first_chunk ] )
if first_chunk < items_to_add {
// Second copy: wrap around to start of buffer
second_chunk := items_to_add - first_chunk
copy( buffer.items[:], slice[ first_chunk : items_to_add ] )
}
buffer.tail = (buffer.tail + items_to_add) % Size
items_added = items_to_add
}
else
{
// Case 2: Slice is larger than buffer, only keep last Size elements
to_add := slice[ slice_size - size: ]
// First copy: from start of buffer to end
first_chunk := min(Size, u32(len(to_add)))
copy( buffer.items[:], to_add[ :first_chunk ] )
if first_chunk < Size
{
// Second copy: wrap around
copy( buffer.items[ first_chunk: ], to_add[ first_chunk: ] )
}
buffer.head = 0
buffer.tail = 0
buffer.num = Size
items_added = Size
}
return items_added
}
ringbuf_fixed_pop :: #force_inline proc "contextless" ( using buffer : ^RingBufferFixed( $Type, $Size )) -> Type {
assert(num > 0, "Attempted to pop an empty ring buffer")
value := items[ head ]
head = ( head + 1 ) % Size
num -= 1
return value
}
RingBufferFixedIterator :: struct( $Type : typeid) {
items : []Type,
head : u32,
tail : u32,
index : u32,
remaining : u32,
}
iterator_ringbuf_fixed :: proc(buffer: ^RingBufferFixed($Type, $Size)) -> RingBufferFixedIterator(Type)
{
iter := RingBufferFixedIterator(Type){
items = buffer.items[:],
head = buffer.head,
tail = buffer.tail,
remaining = buffer.num,
}
buff_size := u32(Size)
if buffer.num > 0 {
// Start from the last pushed item (one before tail)
iter.index = (buffer.tail - 1 + buff_size) % buff_size
} else {
iter.index = buffer.tail // This will not be used as remaining is 0
}
return iter
}
next_ringbuf_fixed_iterator :: proc(iter : ^RingBufferFixedIterator( $Type)) -> ^Type
{
using iter
if remaining == 0 {
return nil // If there are no items left to iterate over
}
buf_size := cast(u32) len(items)
result := &items[index]
// Decrement index and wrap around if necessary
index = (index - 1 + buf_size) % buf_size
remaining -= 1
return result
}

10
code2/grime/stirngs.odin Normal file
View File

@@ -0,0 +1,10 @@
package grime
Raw_String :: struct {
data: [^]byte,
len: int,
}
string_cursor :: proc(s: string) -> [^]u8 { return slice_cursor(transmute([]byte) s) }
string_copy :: proc(dst, src: string) { slice_copy (transmute([]byte) dst, transmute([]byte) src) }
string_end :: proc(s: string) -> ^u8 { return slice_end (transmute([]byte) s) }
string_assert :: proc(s: string) { slice_assert(transmute([]byte) s) }

View File

@@ -0,0 +1,72 @@
package grime
import "core:c"
import "core:c/libc"
import "core:fmt"
import "core:mem"
import core_virtual "core:mem/virtual"
import "core:strings"
import win32 "core:sys/windows"
thread__highres_wait :: proc( desired_ms : f64, loc := #caller_location ) -> b32
{
// label_backing : [1 * Megabyte]u8
// label_arena : Arena
// arena_init( & label_arena, slice_ptr( & label_backing[0], len(label_backing)) )
// label_u8 := str_fmt_tmp( "SECTR: WAIT TIMER")//, allocator = arena_allocator( &label_arena) )
// label_u16 := win32.utf8_to_utf16( label_u8, context.temp_allocator) //arena_allocator( & label_arena) )
timer := win32.CreateWaitableTimerExW( nil, nil, win32.CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, win32.TIMER_ALL_ACCESS )
if timer == nil {
msg := str_fmt("Failed to create win32 timer - ErrorCode: %v", win32.GetLastError() )
log( msg, LogLevel.Warning, loc)
return false
}
due_time := win32.LARGE_INTEGER(desired_ms * MS_To_NS)
result := win32.SetWaitableTimerEx( timer, & due_time, 0, nil, nil, nil, 0 )
if ! result {
msg := str_fmt("Failed to set win32 timer - ErrorCode: %v", win32.GetLastError() )
log( msg, LogLevel.Warning, loc)
return false
}
WAIT_ABANDONED : win32.DWORD : 0x00000080
WAIT_IO_COMPLETION : win32.DWORD : 0x000000C0
WAIT_OBJECT_0 : win32.DWORD : 0x00000000
WAIT_TIMEOUT : win32.DWORD : 0x00000102
WAIT_FAILED : win32.DWORD : 0xFFFFFFFF
wait_result := win32.WaitForSingleObjectEx( timer, win32.INFINITE, win32.BOOL(true) )
switch wait_result
{
case WAIT_ABANDONED:
msg := str_fmt("Failed to wait for win32 timer - Error: WAIT_ABANDONED" )
log( msg, LogLevel.Error, loc)
return false
case WAIT_IO_COMPLETION:
msg := str_fmt("Waited for win32 timer: Ended by APC queued to the thread" )
log( msg, LogLevel.Error, loc)
return false
case WAIT_OBJECT_0:
msg := str_fmt("Waited for win32 timer- Reason : WAIT_OBJECT_0" )
log( msg, loc = loc)
return false
case WAIT_FAILED:
msg := str_fmt("Waited for win32 timer failed - ErrorCode: $v", win32.GetLastError() )
log( msg, LogLevel.Error, loc)
return false
}
return true
}
set__scheduler_granularity :: proc "contextless" ( desired_ms : u32 ) -> b32 {
return win32.timeBeginPeriod( desired_ms ) == win32.TIMERR_NOERROR
}
WIN32_ERROR_INVALID_ADDRESS :: 487
WIN32_ERROR_COMMITMENT_LIMIT :: 1455

38
code2/grime/unicode.odin Normal file
View File

@@ -0,0 +1,38 @@
package grime
rune16 :: distinct u16
// Exposing the alloc_error
@(require_results)
string_to_runes :: proc ( content : string, allocator := context.allocator) -> (runes : []rune, alloc_error: Odin_AllocatorError) #optional_allocator_error {
num := str_rune_count(content)
runes, alloc_error = make([]rune, num, allocator)
if runes == nil || alloc_error != .None {
return
}
idx := 0
for codepoint in content {
runes[idx] = codepoint
idx += 1
}
return
}
string_to_runes_array :: proc( content : string, allocator := context.allocator ) -> ( []rune, AllocatorError )
{
num := cast(u64) str_rune_count(content)
runes_array, alloc_error := make( Array(rune), num, allocator )
if alloc_error != .None {
return nil, alloc_error
}
runes := array_to_slice_capacity(runes_array)
idx := 0
for codepoint in content {
runes[idx] = codepoint
idx += 1
}
return runes, alloc_error
}

View File

@@ -0,0 +1,116 @@
/* Virtual Memory OS Interface
This is an alternative to the virtual core library provided by odin, suppport setting the base address among other things.
*/
package grime
import core_virtual "core:mem/virtual"
import "core:os"
VirtualMemoryRegionHeader :: struct {
committed : uint,
reserved : uint,
reserve_start : [^]byte,
}
VirtualMemoryRegion :: struct {
using base_address : ^VirtualMemoryRegionHeader
}
virtual_get_page_size :: proc "contextless" () -> int {
@static page_size := 0
if page_size == 0 {
page_size = os.get_page_size()
}
return page_size
}
virtual_reserve_remaining :: proc "contextless" ( using vmem : VirtualMemoryRegion ) -> uint {
header_offset := cast(uint) (uintptr(reserve_start) - uintptr(vmem.base_address))
return reserved - header_offset
}
@(require_results)
virtual_commit :: proc "contextless" ( using vmem : VirtualMemoryRegion, size : uint ) -> ( alloc_error : AllocatorError )
{
if size < committed {
return .None
}
header_size := size_of(VirtualMemoryRegionHeader)
page_size := uint(virtual_get_page_size())
to_commit := memory_align_formula( size, page_size )
alloc_error = core_virtual.commit( base_address, to_commit )
if alloc_error != .None {
return alloc_error
}
base_address.committed = size
return alloc_error
}
virtual_decommit :: proc "contextless" ( vmem : VirtualMemoryRegion, size : uint ) {
core_virtual.decommit( vmem.base_address, size )
}
virtual_protect :: proc "contextless" ( vmem : VirtualMemoryRegion, region : []byte, flags : VirtualProtectFlags ) -> b32
{
page_size := virtual_get_page_size()
if len(region) % page_size != 0 {
return false
}
return cast(b32) core_virtual.protect( raw_data(region), len(region), flags )
}
@(require_results)
virtual_reserve :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( VirtualMemoryRegion, AllocatorError ) {
page_size := uint(virtual_get_page_size())
to_reserve := memory_align_formula( size, page_size )
return virtual_resreve__platform_impl( base_address, to_reserve )
}
@(require_results)
virtual_reserve_and_commit :: proc "contextless" (
base_address : uintptr, reserve_size, commit_size : uint
) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
{
if reserve_size < commit_size {
alloc_error = .Invalid_Argument
return
}
vmem, alloc_error = virtual_reserve( base_address, reserve_size )
if alloc_error != .None {
return
}
alloc_error = virtual_commit( vmem, commit_size )
return
}
virtual_release :: proc "contextless" ( vmem : VirtualMemoryRegion ) {
core_virtual.release( vmem.base_address, vmem.reserved )
}
// If the OS is not windows, we just use the library's interface which does not support base_address.
when ODIN_OS != OS_Type.Windows {
virtual_resreve__platform_impl :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
{
header_size := memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
// Ignoring the base address, add an os specific impl if you want it.
data : []byte
data, alloc_error := core_virtual.reserve( header_size + size ) or_return
alloc_error := core_virtual.commit( header_size )
vmem.base_address := cast( ^VirtualMemoryRegionHeader ) raw_data(data)
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
vmem.reserved = len(data)
vmem.committed = header_size
return
}
} // END: ODIN_OS != runtime.Odin_OS_Type.Windows

View File

@@ -0,0 +1,40 @@
package grime
import "core:mem"
import win32 "core:sys/windows"
@(require_results)
virtual_resreve__platform_impl :: proc "contextless" ( base_address : uintptr, size : uint ) -> ( vmem : VirtualMemoryRegion, alloc_error : AllocatorError )
{
header_size := cast(uint) memory_align_formula(size_of(VirtualMemoryRegionHeader), mem.DEFAULT_ALIGNMENT)
result := win32.VirtualAlloc( rawptr(base_address), header_size + size, win32.MEM_RESERVE, win32.PAGE_READWRITE )
if result == nil {
alloc_error = .Out_Of_Memory
return
}
result = win32.VirtualAlloc( rawptr(base_address), header_size, win32.MEM_COMMIT, win32.PAGE_READWRITE )
if result == nil
{
switch err := win32.GetLastError(); err
{
case 0:
alloc_error = .Invalid_Argument
return
case WIN32_ERROR_INVALID_ADDRESS, WIN32_ERROR_COMMITMENT_LIMIT:
alloc_error = .Out_Of_Memory
return
}
alloc_error = .Out_Of_Memory
return
}
vmem.base_address = cast(^VirtualMemoryRegionHeader) result
vmem.reserve_start = cast([^]byte) (uintptr(vmem.base_address) + uintptr(header_size))
vmem.reserved = size
vmem.committed = header_size
alloc_error = .None
return
}