Compare commits

...

3 Commits

Author SHA1 Message Date
Ed_
3223c0a0e1 Lottes C--: key tables... 2025-10-03 17:03:33 -04:00
Ed_
0bd68bccf0 lottes c--: arena impl 2025-10-03 16:18:03 -04:00
Ed_
5f03118a0d got to varenas (lottes c--)
Still not sure how hybrid I'm going to go (away from his more purist stance on typed info embedding).

If I were to codegen this and wanted typeless vs typed for debug it would be easier to malleablly switch between...
If I decide for this sample to go full asm (no C features almost we can still keep the type info for debug...
2025-10-03 15:56:42 -04:00
2 changed files with 829 additions and 41 deletions

View File

@@ -31,6 +31,10 @@ https://youtu.be/RrL7121MOeA
#pragma clang diagnostic ignored "-Wimplicit-function-declaration"
#pragma clang diagnostic ignored "-Wcast-align"
#pragma clang diagnostic ignored "-Wunused-parameter"
#pragma clang diagnostic ignored "-Wswitch-default"
#pragma clang diagnostic ignored "-Wmissing-field-initializers"
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
#pragma clang diagnostic ignored "-Wpointer-sign"
#pragma region Header
@@ -121,6 +125,7 @@ enum { false = 0, true = 1, true_overflow, };
#define cast(type, data) ((type)(data))
#define pcast(type, data) * cast(type*, & (data))
#define nullptr cast(void*, 0)
#define null cast(U8, 0)
#define offset_of(type, member) cast(U8, & (((type*) 0)->member))
#define size_of(data) cast(U8, sizeof(data))
@@ -194,8 +199,8 @@ inline U8 align_pow2(U8 x, U8 b);
#define align_struct(type_width) ((U8)(((type_width) + 7) / 8 * 8))
#define assert_bounds(point, start, end) do { \
assert(pos_start <= pos_point); \
assert(pos_point <= pos_end); \
assert(start <= point); \
assert(point <= end); \
} while(0)
U8 mem_copy (U8 dest, U8 src, U8 length);
@@ -227,9 +232,10 @@ finline void BarW(void){__builtin_ia32_sfence();} // Write Barrier
#define sll_queue_push_n(f, l, n, next) sll_queue_push_nz(0, f, l, n, next)
typedef def_struct(Slice_Mem) { U8 ptr; U8 len; };
#define slice_mem(ptr, len) (Slice_Mem){ptr, len}
#define def_Slice(type) def_struct(tmpl(Slice,type)) { type*R_ ptr; U8 len; }; typedef def_ptr_set(tmpl(Slice,type))
#define slice_assert(slice) do { assert((slice).ptr != nullptr); assert((slice).len > 0); } while(0)
#define slice_assert(slice) do { assert((slice).ptr != 0); assert((slice).len > 0); } while(0)
#define slice_end(slice) ((slice).ptr + (slice).len)
#define size_of_slice_type(slice) size_of( * (slice).ptr )
@@ -248,6 +254,21 @@ void slice__zero(Slice_B1 mem, U8 typewidth);
#define slice_iter(container, iter) typeof((container).ptr) iter = (container).ptr; iter != slice_end(container); ++ iter
#define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) }
#define span_iter(type, iter, m_begin, op, m_end) \
tmpl(Iter_Span,type) iter = { \
.r = {(m_begin), (m_end)}, \
.cursor = (m_begin) }; \
iter.cursor op iter.r.end; \
++ iter.cursor
#define def_span(type) \
def_struct(tmpl( Span,type)) { type begin; type end; }; \
typedef def_struct(tmpl(Iter_Span,type)) { tmpl(Span,type) r; type cursor; }
typedef def_span(B1);
typedef def_span(U4);
typedef def_span(U8);
#pragma endregion Memory
#pragma region Math
@@ -287,7 +308,7 @@ typedef struct AllocatorSP AllocatorSP;
typedef void def_proc(AllocatorProc) (AllocatorProc_In In, AllocatorProc_Out_R Out);
struct AllocatorSP {
AllocatorProc* type_sig;
S8 slot;
U8 slot;
};
struct AllocatorProc_In {
U8 data;
@@ -314,8 +335,8 @@ struct AllocatorProc_Out {
A4_B1 _PAD_2;
};
typedef def_struct(AllocatorInfo) {
U8 proc;
U8 data;
AllocatorProc* proc;
U8 data;
};
static_assert(size_of(AllocatorSP) <= size_of(Slice_Mem));
typedef def_struct(AllocatorQueryInfo) {
@@ -371,7 +392,7 @@ typedef def_struct(FArena) {
typedef def_ptr_set(FArena);
FArena farena_make (Slice_Mem mem);
void farena_init (FArena_R arena, Slice_Mem byte);
Slice_Mem farena__push (FArena_R arena, U8 amount, U8 type_width, Opts_farena* opts);
Slice_Mem farena__push (FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_ opts);
void farena_reset (FArena_R arena);
void farena_rewind(FArena_R arena, AllocatorSP save_point);
AllocatorSP farena_save (FArena arena);
@@ -379,11 +400,13 @@ AllocatorSP farena_save (FArena arena);
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
#define ainfo_farena(arena) (AllocatorInfo){ .proc = farena_allocator_proc, .data = & arena }
#define farena_push_mem(arena, amount, ...) farena__push(arena, amount, 1, opt_args(Opts_farena, lit(stringify(B1)), __VA_ARGS__))
#define farena_push(arena, type, ...) \
cast(type*, farena__push(arena, size_of(type), 1, opt_args(Opts_farena_push, lit(stringify(type)), __VA_ARGS__))).ptr
cast(type*, farena__push(arena, size_of(type), 1, opt_args(Opts_farena, lit(stringify(type)), __VA_ARGS__))).ptr
#define farena_push_array(arena, type, amount, ...) \
(Slice ## type){ farena__push(arena, size_of(type), amount, opt_args(Opts_farena_push, lit(stringify(type)), __VA_ARGS__)).ptr, amount }
(Slice ## type){ farena__push(arena, size_of(type), amount, opt_args(Opts_farena, lit(stringify(type)), __VA_ARGS__)).ptr, amount }
#pragma endregion FArena
#pragma region OS
@@ -399,7 +422,7 @@ typedef def_struct(Opts_vmem) {
A4_B1 _PAD_;
};
void os_init(void);
OS_SystemInfo_R os_system_info(void);
OS_SystemInfo* os_system_info(void);
inline B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem*R_ opts);
inline U8 os__vmem_reserve( U8 size, Opts_vmem*R_ opts);
@@ -430,8 +453,8 @@ typedef def_struct(Opts_varena_make) {
VArenaFlags flags;
A4_B1 _PAD_;
};
VArena* varena__make(Opts_varena_make* opts);
#define varena_make(...) varena__make(opt_args(Opts_varena_make, __VA_ARGS__))
VArena_R varena__make(Opts_varena_make*R_ opts);
#define varena_make(...) varena__make(opt_args(Opts_varena_make, __VA_ARGS__))
Slice_Mem varena__push (VArena_R arena, U8 amount, U8 type_width, Opts_varena*R_ opts);
void varena_release(VArena_R arena);
@@ -443,6 +466,8 @@ AllocatorSP varena_save (VArena_R arena);
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
#define ainfo_varena(varena) (AllocatorInfo) { .proc = & varena_allocator_proc, .data = varena }
#define varena_push_mem(arena, amount, ...) varena__push(arena, amount, 1, opt_args(Opts_varena, lit(stringify(B1)), __VA_ARGS__))
#define varena_push(arena, type, ...) \
cast(type*R_, varena__push(arena, 1, size_of(type), opt_args(Opts_varena, lit(stringify(type)), __VA_ARGS__) ).ptr)
@@ -459,15 +484,15 @@ typedef def_enum(U4, ArenaFlags) {
typedef def_struct(Arena) {
VArena_R backing;
Arena_R prev;
U8 current;
Arena_R current;
U8 base_pos;
U8 pos;
ArenaFlags flags;
A4_B1 _PAD_;
};
typedef Opts_varena_make Opts_arena_make;
U8 arena__make (Opts_arena_make*R_ opts);
Slice_Mem arena__push (Arena_R arena, U8 amount, U8 type_width, Opts_arena* opts);
Arena_R arena__make (Opts_arena_make*R_ opts);
Slice_Mem arena__push (Arena_R arena, U8 amount, U8 type_width, Opts_arena*R_ opts);
void arena_release(Arena_R arena);
void arena_reset (Arena_R arena);
void arena_rewind (Arena_R arena, AllocatorSP save_point);
@@ -478,6 +503,8 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
#define arena_make(...) arena__make(opt_args(Opts_arena_make, __VA_ARGS__))
#define arena_push_mem(arena, amount, ...) arena__push(arena, amount, 1, opt_args(Opts_arena, lit(stringify(B1)), __VA_ARGS__))
#define arena_push(arena, type, ...) \
cast(type*R_, arena__push(arena, 1, size_of(type), opt_args(Opts_arena, lit(stringify(type)), __VA_ARGS__) ).ptr)
@@ -489,12 +516,10 @@ cast(type*R_, arena__push(arena, 1, size_of(type), opt_args(Opts_arena, lit(stri
finline
void hash64_djb8(U8_R hash, Slice_Mem bytes) {
U8 elem = bytes.ptr;
U8 curr = hash[0];
loop:
hash[0] <<= 8;
hash[0] += hash[0];
curr += elem;
hash[0] = curr;
hash[0] += elem;
if (elem != bytes.ptr + bytes.len)
goto end;
++ elem;
@@ -589,7 +614,7 @@ typedef def_struct(KT1CX_Info) {
AllocatorInfo backing_table;
AllocatorInfo backing_cells;
};
void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result);
void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte*R_ result);
void kt1cx_clear (KT1CX_Byte kt, KT1CX_ByteMeta meta);
U8 kt1cx_slot_id(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta meta);
U8 kt1cx_get (KT1CX_Byte kt, U8 key, KT1CX_ByteMeta meta);
@@ -772,34 +797,796 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines);
#pragma region Implementation
#pragma region Memory Operations
void* __cdecl memcpy (void* _Dst, void const* _Src, U8 _Size);
void* __cdecl memmove(void* _Dst, void const* _Src, U8 _Size);
void* __cdecl memset (void* _Dst, int _Val, U8 _Size);
void* __cdecl memcpy (void*R_ _Dst, void const*R_ _Src, U8 _Size);
void* __cdecl memmove(void* _Dst, void const* _Src, U8 _Size);
void* __cdecl memset (void*R_ _Dst, int _Val, U8 _Size);
inline
U8 align_pow2(U8 x, U8 b) {
assert(b != 0);
assert((b & (b - 1)) == 0); // Check power of 2
return ((x + b - 1) & (~(b - 1)));
}
U8 memory_copy(U8_R dest, U8_R src, U8 len) __asm__("memcpy");
U8 memory_copy_overlapping(U8_R dest, U8_R src, U8 len) __asm__("memmove");
U8 memory_copy(U8 dest, U8 src, U8 len) __asm__("memcpy");
U8 memory_copy_overlapping(U8 dest, U8 src, U8 len) __asm__("memmove");
inline
B4 memory_zero(U8_R dest, U8 length) {
if (dest == nullptr) return false;
memset((unsigned char*)dest, 0, length);
B4 memory_zero(U8 dest, U8 length) {
if (dest == 0) return false;
memset((void*R_)dest, 0, length);
return true;
}
inline void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); memory_zero( u8_r(mem.ptr), mem.len); }
inline void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); memory_zero(u8_(mem.ptr), mem.len); }
inline
void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth) {
assert(dest.len >= src.len);
slice_assert(dest);
slice_assert(src);
memory_copy(u8_r(dest.ptr), u8_r(src.ptr), src.len);
memory_copy(u8_(dest.ptr), u8_(src.ptr), src.len);
}
#pragma endregion Memory Operations
#pragma endrgion Implementation
#pragma region Allocator Interface
inline
AllocatorQueryInfo allocator_query(AllocatorInfo ainfo) {
assert(ainfo.proc != nullptr);
AllocatorQueryInfo out; ainfo.proc((AllocatorProc_In){ .data = ainfo.data, .op = AllocatorOp_Query}, (AllocatorProc_Out_R)& out);
return out;
}
inline
void mem_free(AllocatorInfo ainfo, Slice_Mem mem) {
assert(ainfo.proc != nullptr);
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_Free, .old_allocation = mem}, &(AllocatorProc_Out){});
}
inline
void mem_reset(AllocatorInfo ainfo) {
assert(ainfo.proc != nullptr);
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_Reset}, &(AllocatorProc_Out){});
}
inline
void mem_rewind(AllocatorInfo ainfo, AllocatorSP save_point) {
assert(ainfo.proc != nullptr);
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_Rewind, .save_point = save_point}, &(AllocatorProc_Out){});
}
inline
AllocatorSP mem_save_point(AllocatorInfo ainfo) {
assert(ainfo.proc != nullptr);
AllocatorProc_Out out;
ainfo.proc((AllocatorProc_In){.data = ainfo.data, .op = AllocatorOp_SavePoint}, & out);
return out.save_point;
}
inline
Slice_Mem mem__alloc(AllocatorInfo ainfo, U8 size, Opts_mem_alloc* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
AllocatorProc_In in = {
.data = ainfo.data,
.op = opts->no_zero ? AllocatorOp_Alloc_NoZero : AllocatorOp_Alloc,
.requested_size = size,
.alignment = opts->alignment,
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
return out.allocation;
}
inline
Slice_Mem mem__grow(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_grow* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
AllocatorProc_In in = {
.data = ainfo.data,
.op = opts->no_zero ? AllocatorOp_Grow_NoZero : AllocatorOp_Grow,
.requested_size = size,
.alignment = opts->alignment,
.old_allocation = mem
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
return out.allocation;
}
inline
Slice_Mem mem__resize(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_resize* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
AllocatorProc_In in = {
.data = ainfo.data,
.op = mem.len < size ? AllocatorOp_Shrink : (opts->no_zero ? AllocatorOp_Grow : AllocatorOp_Grow_NoZero),
.requested_size = size,
.alignment = opts->alignment,
.old_allocation = mem,
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
return out.allocation;
}
inline
Slice_Mem mem__shrink(AllocatorInfo ainfo, Slice_Mem mem, U8 size, Opts_mem_shrink* opts) {
assert(ainfo.proc != nullptr);
assert(opts != nullptr);
AllocatorProc_In in = {
.data = ainfo.data,
.op = AllocatorOp_Shrink,
.requested_size = size,
.alignment = opts->alignment,
.old_allocation = mem
};
AllocatorProc_Out out;
ainfo.proc(in, & out);
return out.allocation;
}
#pragma endregion Allocator Interface
#pragma region FArena (Fixed-Sized Arena)
inline
void farena_init(FArena* arena, Slice_Mem mem) {
assert(arena != nullptr);
arena->start = mem.ptr;
arena->capacity = mem.len;
arena->used = 0;
}
inline FArena farena_make(Slice_Mem mem) { FArena a; farena_init(& a, mem); return a; }
inline
Slice_Mem farena__push(FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_ opts) {
assert(opts != nullptr);
if (amount == 0) {
return (Slice_Mem){};
}
U8 desired = type_width * amount;
U8 to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 unused = arena->capacity - arena->used;
assert(to_commit <= unused);
U8 ptr = arena->start + arena->used;
arena->used += to_commit;
return (Slice_Mem){ptr, desired};
}
inline void farena_reset(FArena* arena) { arena->used = 0; }
inline
void farena_rewind(FArena_R arena, AllocatorSP save_point) {
assert(save_point.type_sig == & farena_allocator_proc);
U8 end = arena->start + arena->used;
assert_bounds(save_point.slot, arena->start, end);
arena->used -= save_point.slot - arena->start;
}
inline
AllocatorSP farena_save (FArena arena) {
AllocatorSP sp = { .type_sig = & farena_allocator_proc, .slot = arena.used };
return sp;
}
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
{
assert(out != nullptr);
assert(in.data != 0);
FArena* arena = cast(FArena*, in.data);
switch (in.op)
{
case AllocatorOp_Alloc:
case AllocatorOp_Alloc_NoZero:
out->allocation = farena_push_mem(arena, in.requested_size, .alignment = in.alignment);
memory_zero(out->allocation.ptr, out->allocation.len * in.op);
break;
case AllocatorOp_Free:
break;
case AllocatorOp_Reset:
farena_reset(arena);
break;
case AllocatorOp_Grow:
case AllocatorOp_Grow_NoZero: {
// Check if the allocation is at the end of the arena
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
U8 arena_end = arena->start + arena->used;
if (alloc_end != arena_end) {
// Not at the end, can't grow in place
out->allocation = (Slice_Mem){0};
break;
}
// Calculate growth
U8 grow_amount = in.requested_size - in.old_allocation.len;
U8 aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 unused = arena->capacity - arena->used;
if (aligned_grow > unused) {
// Not enough space
out->allocation = (Slice_Mem){0};
break;
}
arena->used += aligned_grow;
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * in.op - AllocatorOp_Grow_NoZero);
}
break;
case AllocatorOp_Shrink: {
// Check if the allocation is at the end of the arena
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
U8 arena_end = arena->start + arena->used;
if (alloc_end != arena_end) {
// Not at the end, can't shrink but return adjusted size
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
break;
}
// Calculate shrinkage
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
U8 aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
U8 aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
arena->used -= (aligned_original - aligned_new);
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
}
break;
case AllocatorOp_Rewind:
farena_rewind(arena, in.save_point);
break;
case AllocatorOp_SavePoint:
out->save_point = farena_save(* arena);
break;
case AllocatorOp_Query:
out->features =
AllocatorQuery_Alloc
| AllocatorQuery_Reset
| AllocatorQuery_Resize
| AllocatorQuery_Rewind
;
out->max_alloc = arena->capacity - arena->used;
out->min_alloc = 0;
out->left = out->max_alloc;
out->save_point = farena_save(* arena);
break;
}
return;
}
#pragma endregion FArena
#pragma region OS
#pragma warning(push)
#pragma warning(disable: 4820)
#pragma comment(lib, "Kernel32.lib")
#pragma comment(lib, "Advapi32.lib")
#define MS_INVALID_HANDLE_VALUE ((MS_HANDLE)(__int64)-1)
#define MS_ANYSIZE_ARRAY 1
#define MS_MEM_COMMIT 0x00001000
#define MS_MEM_RESERVE 0x00002000
#define MS_MEM_LARGE_PAGES 0x20000000
#define MS_PAGE_READWRITE 0x04
#define MS_TOKEN_ADJUST_PRIVILEGES (0x0020)
#define MS_SE_PRIVILEGE_ENABLED (0x00000002L)
#define MS_TOKEN_QUERY (0x0008)
#define MS__TEXT(quote) L ## quote // r_winnt
#define MS_TEXT(quote) MS__TEXT(quote) // r_winnt
#define MS_SE_LOCK_MEMORY_NAME MS_TEXT("SeLockMemoryPrivilege")
typedef int MS_BOOL;
typedef unsigned long MS_DWORD;
typedef MS_DWORD* MS_PDWORD;
typedef void* MS_HANDLE;
typedef MS_HANDLE* MS_PHANDLE;
typedef long MS_LONG;
typedef S8 MS_LONGLONG;
typedef char const* MS_LPCSTR;
typedef unsigned short* MS_LPWSTR, *MS_PWSTR;
typedef void* MS_LPVOID;
typedef MS_DWORD* MS_LPDWORD;
typedef U8 MS_ULONG_PTR, *MS_PULONG_PTR;
typedef void const* MS_LPCVOID;
typedef struct MS_SECURITY_ATTRIBUTES *MS_PSECURITY_ATTRIBUTES, *MS_LPSECURITY_ATTRIBUTES;
typedef struct MS_OVERLAPPED *MS_LPOVERLAPPED;
typedef def_union(MS_LARGE_INTEGER) { struct { MS_DWORD LowPart; MS_LONG HighPart; } _; struct { MS_DWORD LowPart; MS_LONG HighPart; } u; MS_LONGLONG QuadPart; };
typedef def_struct(MS_FILE) { void* _Placeholder; };
typedef def_struct(MS_SECURITY_ATTRIBUTES) { MS_DWORD nLength; MS_LPVOID lpSecurityDescriptor; MS_BOOL bInheritHandle; };
typedef def_struct(MS_OVERLAPPED) { MS_ULONG_PTR Internal; MS_ULONG_PTR InternalHigh; union { struct { MS_DWORD Offset; MS_DWORD OffsetHigh; } _; void* Pointer; } _; MS_HANDLE hEvent; };
typedef struct MS_LUID* MS_PLUID;
typedef struct MS_LUID_AND_ATTRIBUTES* MS_PLUID_AND_ATTRIBUTES;
typedef struct MS_TOKEN_PRIVILEGES* MS_PTOKEN_PRIVILEGES;
typedef def_struct(MS_LUID) { MS_DWORD LowPart; MS_LONG HighPart; };
typedef def_struct(MS_LUID_AND_ATTRIBUTES) { MS_LUID Luid; MS_DWORD Attributes; };
typedef def_struct(MS_TOKEN_PRIVILEGES) { MS_DWORD PrivilegeCount; MS_LUID_AND_ATTRIBUTES Privileges[MS_ANYSIZE_ARRAY]; };
__declspec(dllimport) MS_BOOL __stdcall CloseHandle(MS_HANDLE hObject);
__declspec(dllimport) MS_BOOL __stdcall AdjustTokenPrivileges(MS_HANDLE TokenHandle, MS_BOOL DisableAllPrivileges, MS_PTOKEN_PRIVILEGES NewState, MS_DWORD BufferLength, MS_PTOKEN_PRIVILEGES PreviousState, MS_PDWORD ReturnLength);
__declspec(dllimport) MS_HANDLE __stdcall GetCurrentProcess(void);
__declspec(dllimport) U8 __stdcall GetLargePageMinimum(void);
__declspec(dllimport) MS_BOOL __stdcall LookupPrivilegeValueW(MS_LPWSTR lpSystemName, MS_LPWSTR lpName, MS_PLUID lpLuid);
__declspec(dllimport) MS_BOOL __stdcall OpenProcessToken(MS_HANDLE ProcessHandle, MS_DWORD DesiredAccess, MS_PHANDLE TokenHandle);
__declspec(dllimport) MS_LPVOID __stdcall VirtualAlloc(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD flAllocationType, MS_DWORD flProtect);
__declspec(dllimport) MS_BOOL __stdcall VirtualFree (MS_LPVOID lpAddress, U8 dwSize, MS_DWORD dwFreeType);
#pragma warning(pop)
typedef def_struct(OS_Windows_State) {
OS_SystemInfo system_info;
};
global OS_Windows_State os__windows_info;
inline
OS_SystemInfo* os_system_info(void) {
return & os__windows_info.system_info;
}
inline
void os__enable_large_pages(void) {
MS_HANDLE token;
if (OpenProcessToken(GetCurrentProcess(), MS_TOKEN_ADJUST_PRIVILEGES | MS_TOKEN_QUERY, &token))
{
MS_LUID luid;
if (LookupPrivilegeValueW(0, MS_SE_LOCK_MEMORY_NAME, &luid))
{
MS_TOKEN_PRIVILEGES priv;
priv.PrivilegeCount = 1;
priv.Privileges[0].Luid = luid;
priv.Privileges[0].Attributes = MS_SE_PRIVILEGE_ENABLED;
AdjustTokenPrivileges(token, 0, & priv, size_of(priv), 0, 0);
}
CloseHandle(token);
}
}
inline
void os_init(void) {
os__enable_large_pages();
OS_SystemInfo* info = & os__windows_info.system_info;
info->target_page_size = (U8)GetLargePageMinimum();
}
// TODO(Ed): Large pages disabled for now... (not failing gracefully)
inline U8 os__vmem_reserve(U8 size, Opts_vmem* opts) {
assert(opts != nullptr);
void* result = VirtualAlloc(cast(void*, opts->base_addr), size
, MS_MEM_RESERVE
// |MS_MEM_COMMIT|(opts->no_large_pages == false ? MS_MEM_LARGE_PAGES : 0)
, MS_PAGE_READWRITE
);
return u8_(result);
}
inline B4 os__vmem_commit(U8 vm, U8 size, Opts_vmem* opts) {
assert(opts != nullptr);
// if (opts->no_large_pages == false ) { return 1; }
B4 result = (VirtualAlloc(cast(MS_LPVOID, vm), size, MS_MEM_COMMIT, MS_PAGE_READWRITE) != 0);
return result;
}
inline void os_vmem_release(U8 vm, U8 size) { VirtualFree(cast(MS_LPVOID, vm), 0, MS_MEM_RESERVE); }
#pragma endregion OS
#pragma region VArena (Virutal Address Space Arena)
inline
VArena_R varena__make(Opts_varena_make*R_ opts) {
assert(opts != nullptr);
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); }
if (opts->commit_size == 0) { opts->commit_size = mega(64); }
U8 reserve_size = align_pow2(opts->reserve_size, os_system_info()->target_page_size);
U8 commit_size = align_pow2(opts->commit_size, os_system_info()->target_page_size);
B4 no_large_pages = (opts->flags & VArenaFlag_NoLargePages) != 0;
U8 base = os_vmem_reserve(reserve_size, .base_addr = opts->base_addr, .no_large_pages = no_large_pages);
assert(base != 0);
os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages);
U8 header_size = align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT);
VArena_R vm = cast(VArena_R, base);
vm[0] = (VArena){
.reserve_start = base + header_size,
.reserve = reserve_size,
.commit_size = commit_size,
.committed = commit_size,
.commit_used = header_size,
.flags = opts->flags
};
return vm;
}
inline
Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opts) {
assert(amount != 0);
U8 alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
U8 requested_size = amount * type_width;
U8 aligned_size = align_pow2(requested_size, alignment);
U8 current_offset = vm->reserve_start + vm->commit_used;
U8 to_be_used = vm->commit_used + aligned_size;
U8 reserve_left = vm->reserve - vm->commit_used;
U8 commit_left = vm->committed - vm->commit_used;
B4 exhausted = commit_left < to_be_used;
assert(to_be_used < reserve_left);
if (exhausted)
{
U8 next_commit_size = reserve_left > 0 ?
max(vm->commit_size, to_be_used)
: align_pow2( reserve_left, os_system_info()->target_page_size);
if (next_commit_size) {
U8 next_commit_start = u8_(vm) + vm->committed;
B4 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0;
B4 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages);
if (commit_result == false) {
return (Slice_Mem){0};
}
vm->committed += next_commit_size;
}
}
vm->commit_used = to_be_used;
return (Slice_Mem){.ptr = current_offset, .len = requested_size};
}
inline void varena_release(VArena_R arena) { os_vmem_release(u8_(arena), arena->reserve); }
inline Slice_Mem varena__shrink(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, Opts_varena* opts) {
assert(opts != nullptr);
Slice_Mem result = {0};
U8 current_offset = vm->reserve_start + vm->commit_used;
U8 shrink_amount = old_allocation.len - requested_size;
if (lt_s(shrink_amount, 0)) {
result = old_allocation;
return result;
}
assert(old_allocation.ptr == current_offset);
vm->commit_used -= shrink_amount;
result = (Slice_Mem){ old_allocation.ptr, requested_size };
return result;
}
inline
void varena_rewind(VArena* vm, AllocatorSP sp) {
assert(vm != nullptr);
assert(sp.type_sig == & varena_allocator_proc);
vm->commit_used = max(sp.slot, sizeof(VArena));
}
inline AllocatorSP varena_save(VArena* vm) { return (AllocatorSP){varena_allocator_proc, vm->commit_used}; }
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
{
VArena* vm = cast(VArena*, in.data);
switch (in.op)
{
case AllocatorOp_Alloc:
case AllocatorOp_Alloc_NoZero:
out->allocation = varena_push_mem(vm, in.requested_size, .alignment = in.alignment);
memory_zero(out->allocation.ptr, out->allocation.len * in.op);
break;
case AllocatorOp_Free:
break;
case AllocatorOp_Reset:
vm->commit_used = 0;
break;
case AllocatorOp_Grow_NoZero:
case AllocatorOp_Grow: {
U8 grow_amount = in.requested_size - in.old_allocation.len;
if (grow_amount == 0) {
out->allocation = in.old_allocation;
return;
}
U8 current_offset = vm->reserve_start + vm->commit_used;
// Growing when not the last allocation not allowed
assert(in.old_allocation.ptr == current_offset);
Slice_Mem allocation = varena_push_mem(vm, grow_amount, .alignment = in.alignment);
assert(allocation.ptr != 0);
out->allocation = (Slice_Mem){ in.old_allocation.ptr, in.requested_size };
memory_zero(out->allocation.ptr, out->allocation.len * (in.op - AllocatorOp_Grow_NoZero));
}
break;
case AllocatorOp_Shrink: {
U8 current_offset = vm->reserve_start + vm->commit_used;
U8 shrink_amount = in.old_allocation.len - in.requested_size;
if (lt_s(shrink_amount, 0)) {
out->allocation = in.old_allocation;
return;
}
assert(in.old_allocation.ptr == current_offset);
vm->commit_used -= shrink_amount;
out->allocation = (Slice_Mem){ in.old_allocation.ptr, in.requested_size };
}
break;
case AllocatorOp_Rewind:
vm->commit_used = in.save_point.slot;
break;
case AllocatorOp_SavePoint:
out->save_point = varena_save(vm);
break;
case AllocatorOp_Query:
out->features =
AllocatorQuery_Alloc
| AllocatorQuery_Resize
| AllocatorQuery_Reset
| AllocatorQuery_Rewind
;
out->max_alloc = vm->reserve - vm->committed;
out->min_alloc = kilo(4);
out->left = out->max_alloc;
out->save_point = varena_save(vm);
break;
}
}
#pragma endregion VArena
#pragma region Arena (Chained Arena)
inline
Arena_R arena__make(Opts_arena_make*R_ opts) {
assert(opts != nullptr);
U8 header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
VArena* current = varena__make(opts);
assert(current != nullptr);
Arena* arena = varena_push(current, Arena);
* arena = (Arena){
.backing = current,
.prev = nullptr,
.current = arena,
.base_pos = 0,
.pos = header_size,
.flags = opts->flags,
};
return arena;
}
Slice_Mem arena__push(Arena_R arena, U8 amount, U8 type_width, Opts_arena* opts) {
assert(arena != nullptr);
assert(opts != nullptr);
Arena_R active = arena->current;
U8 size_requested = amount * type_width;
U8 alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
U8 size_aligned = align_pow2(size_requested, alignment);
U8 pos_pre = active->pos;
U8 pos_pst = pos_pre + size_aligned;
B4 should_chain =
((arena->flags & ArenaFlag_NoChain) == 0)
&& active->backing->reserve < pos_pst;
if (should_chain)
{
Arena* new_arena = arena_make(
.base_addr = 0,
.reserve_size = active->backing->reserve,
.commit_size = active->backing->commit_size,
.flags = active->backing->flags,
);
new_arena->base_pos = active->base_pos + active->backing->reserve;
sll_stack_push_n(arena->current, new_arena, prev);
active = arena->current;
}
U8 result = u8_(active) + pos_pre;
Slice_Mem vresult = varena_push_mem(active->backing, size_aligned, .alignment = alignment);
slice_assert(vresult);
assert(result == vresult.ptr);
active->pos = pos_pst;
return vresult;
}
inline
void arena_release(Arena* arena) {
assert(arena != nullptr);
Arena_R curr = arena->current;
Arena_R prev = nullptr;
for (; curr != nullptr; curr = prev) {
prev = curr->prev;
varena_release(curr->backing);
}
}
inline void arena_reset(Arena* arena) { arena_rewind(arena, (AllocatorSP){.type_sig = arena_allocator_proc, .slot = 0}); }
void arena_rewind(Arena* arena, AllocatorSP save_point) {
assert(arena != nullptr);
assert(save_point.type_sig == arena_allocator_proc);
U8 header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
Arena_R curr = arena->current;
U8 big_pos = clamp_bot(header_size, save_point.slot);
for (Arena_R prev = nullptr; curr->base_pos >= big_pos; curr = prev) {
prev = curr->prev;
varena_release(curr->backing);
}
arena->current = curr;
U8 new_pos = big_pos - curr->base_pos;
assert(new_pos <= curr->pos);
curr->pos = new_pos;
varena_rewind(curr->backing, (AllocatorSP){varena_allocator_proc, curr->pos + size_of(VArena)});
}
inline AllocatorSP arena_save(Arena_R arena) { return (AllocatorSP){arena_allocator_proc, arena->base_pos + arena->current->pos}; }
void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
{
assert(out != nullptr);
Arena* arena = cast(Arena*, in.data);
assert(arena != nullptr);
switch (in.op)
{
case AllocatorOp_Alloc:
case AllocatorOp_Alloc_NoZero:
out->allocation = arena_push_mem(arena, in.requested_size, .alignment = in.alignment);
memory_zero(out->allocation.ptr, out->allocation.len * in.op);
break;
case AllocatorOp_Free:
break;
case AllocatorOp_Reset:
arena_reset(arena);
break;
case AllocatorOp_Grow:
case AllocatorOp_Grow_NoZero: {
Arena_R active = arena->current;
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
U8 arena_end = u8_(active) + active->pos;
if (alloc_end == arena_end)
{
U8 grow_amount = in.requested_size - in.old_allocation.len;
U8 aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
if (active->pos + aligned_grow <= active->backing->reserve)
{
Slice_Mem vresult = varena_push_mem(active->backing, aligned_grow, .alignment = in.alignment);
if (vresult.ptr != null)
{
active->pos += aligned_grow;
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
out->continuity_break = false;
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * in.op - AllocatorOp_Grow_NoZero);
break;
}
}
}
Slice_Mem new_alloc = arena__push(arena, in.requested_size, 1, &(Opts_arena){.alignment = in.alignment});
if (new_alloc.ptr == null) {
out->allocation = (Slice_Mem){0};
break;
}
memory_copy(new_alloc.ptr, in.old_allocation.ptr, in.old_allocation.len);
memory_zero(new_alloc.ptr + in.old_allocation.len, (in.requested_size - in.old_allocation.len) * in.op - AllocatorOp_Grow_NoZero);
out->allocation = new_alloc;
out->continuity_break = true;
}
break;
case AllocatorOp_Shrink: {
Arena_R active = arena->current;
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
U8 arena_end = u8_(active) + active->pos;
if (alloc_end != arena_end) {
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
break;
}
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
U8 aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
U8 aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
U8 pos_reduction = aligned_original - aligned_new;
active->pos -= pos_reduction;
varena__shrink(active->backing, in.old_allocation, in.requested_size, &(Opts_varena){.alignment = in.alignment});
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
}
break;
case AllocatorOp_Rewind:
arena_rewind(arena, in.save_point);
break;
case AllocatorOp_SavePoint:
out->save_point = arena_save(arena);
break;
case AllocatorOp_Query:
out->features =
AllocatorQuery_Alloc
| AllocatorQuery_Resize
| AllocatorQuery_Reset
| AllocatorQuery_Rewind
;
out->max_alloc = arena->backing->reserve;
out->min_alloc = kilo(4);
out->left = out->max_alloc - arena->backing->commit_used;
out->save_point = arena_save(arena);
break;
}
}
#pragma endregion Arena
// C--
#pragma region Key Table 1-Layer Linear (KT1L)
void kt1l__populate_slice_a2(KT1L_Byte*R_ kt, AllocatorInfo backing, KT1L_Meta m, Slice_Mem values, U8 num_values ) {
assert(kt != nullptr);
if (num_values == 0) { return; }
kt[0] = mem_alloc(backing, m.slot_size * num_values ); slice_assert(* kt);
U8 iter = 0;
loop: {
U8 slot_offset = iter * m.slot_size; // slot id
U8 slot_cursor = kt->ptr + slot_offset; // slots[id] type: KT1L_<Type>
U8 slot_value = slot_cursor + m.kt_value_offset; // slots[id].value type: <Type>
U8 a2_offset = iter * m.type_width * 2; // a2 entry id
U8 a2_cursor = values.ptr + a2_offset; // a2_entries[id] type: A2_<Type>
U8 a2_value = a2_cursor + m.type_width; // a2_entries[id].value type: <Type>
memory_copy(slot_value, a2_value, m.type_width); // slots[id].value = a2_entries[id].value
u1_r(slot_cursor)[0] = 0;
hash64_djb8(u8_r(slot_cursor), slice_mem(a2_cursor, m.type_width)); // slots[id].key = hash64_djb8(a2_entries[id].key)
++ iter;
if (iter < num_values) goto loop;
}
kt->len = num_values;
}
#pragma endregion KT1L
#pragma region Key Table 1-Layer Chained-Chunked_Cells (KT1CX)
inline
void kt1cx_init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result) {
assert(result != nullptr);
assert(info.backing_cells.proc != nullptr);
assert(info.backing_table.proc != nullptr);
assert(m.cell_depth > 0);
assert(m.cell_pool_size >= kilo(4));
assert(m.table_size >= kilo(4));
assert(m.type_width > 0);
result->table = mem_alloc(info.backing_table, m.table_size * m.cell_size); slice_assert(result->table);
result->cell_pool = mem_alloc(info.backing_cells, m.cell_size * m.cell_pool_size); slice_assert(result->cell_pool);
result->table.len = m.table_size; // Setting to the table number of elements instead of byte length.
}
void kt1cx_clear(KT1CX_Byte kt, KT1CX_ByteMeta m) {
U8 cell_cursor = kt.table.ptr;
U8 table_len = kt.table.len * m.cell_size;
for (; cell_cursor != slice_end(kt.table); cell_cursor += m.cell_size ) // for cell in kt.table.cells
{
Slice_Mem slots = {cell_cursor, m.cell_depth * m.slot_size }; // slots = cell.slots
U8 slot_cursor = slots.ptr;
for (; slot_cursor < slice_end(slots); slot_cursor += m.slot_size) {
process_slots:
Slice_Mem slot = {slot_cursor, m.slot_size}; // slot = slots[id]
memory_zero(slot.ptr, slot.len); // clear(slot)
}
U8 next = slot_cursor + m.cell_next_offset; // next = slots + next_cell_offset
if (next != null) {
slots.ptr = next; // slots = next
slot_cursor = next;
goto process_slots;
}
}
}
inline
U8 kt1cx_slot_id(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta m) {
U8 hash_index = key % kt.table.len;
return hash_index;
}
U8 kt1cx_get(KT1CX_Byte kt, U8 key, KT1CX_ByteMeta m) {
U8 hash_index = kt1cx_slot_id(kt, key, m);
U8 cell_offset = hash_index * m.cell_size;
U8 cell_cursor = kt.table.ptr + cell_offset; // KT1CX_Cell_<Type> cell = kt.table[hash_index]
{
Slice_Mem slots = {cell_cursor, m.cell_depth * m.slot_size}; // KT1CX_Slot_<Type>[kt.cell_depth] slots = cell.slots
U8 slot_cursor = slots.ptr;
for (; slot_cursor != slice_end(slots); slot_cursor += m.slot_size) {
process_slots:
KT1CX_Byte_Slot* slot = cast(KT1CX_Byte_Slot*, slot_cursor + m.slot_key_offset); // slot = slots[id] KT1CX_Slot_<Type>
if (slot->occupied && slot->key == key) {
return slot_cursor;
}
}
U8 cell_next = cell_cursor + m.cell_next_offset; // cell.next
if (cell_next != null) {
slots.ptr = cell_next; // slots = cell_next
slot_cursor = cell_next;
cell_cursor = cell_next; // cell = cell_next
goto process_slots;
}
else {
return null;
}
}
}
inline
U8 kt1cx_set(KT1CX_Byte kt, U8 key, Slice_Mem value, AllocatorInfo backing_cells, KT1CX_ByteMeta m) {
U8 hash_index = kt1cx_slot_id(kt, key, m);
U8 cell_offset = hash_index * m.cell_size;
U8 cell_cursor = kt.table.ptr + cell_offset; // KT1CX_Cell_<Type> cell = kt.table[hash_index]
{
Slice_Mem slots = {cell_cursor, m.cell_depth * m.slot_size}; // cell.slots
U8 slot_cursor = slots.ptr;
for (; slot_cursor != slice_end(slots); slot_cursor += m.slot_size) {
process_slots:
KT1CX_Byte_Slot_R slot = cast(KT1CX_Byte_Slot_R, slot_cursor + m.slot_key_offset);
if (slot->occupied == false) {
slot->occupied = true;
slot->key = key;
return slot_cursor;
}
else if (slot->key == key) {
return slot_cursor;
}
}
KT1CX_Byte_Cell curr_cell = { cell_cursor + m.cell_next_offset }; // curr_cell = cell
if ( curr_cell.next != null) {
slots.ptr = curr_cell.next;
slot_cursor = curr_cell.next;
cell_cursor = curr_cell.next;
goto process_slots;
}
else {
Slice_Mem new_cell = mem_alloc(backing_cells, m.cell_size);
curr_cell.next = new_cell.ptr;
KT1CX_Byte_Slot_R slot = cast(KT1CX_Byte_Slot_R, new_cell.ptr + m.slot_key_offset);
slot->occupied = true;
slot->key = key;
return new_cell.ptr;
}
}
assert_msg(false, "impossible path");
return null;
}
#pragma endregion Key Table
#pragma endregion Implementation
int main(void)
{

View File

@@ -1366,23 +1366,24 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
}
#pragma endregion Arena
// Modern C+
#pragma region Key Table 1-Layer Linear (KT1L)
void kt1l__populate_slice_a2(KT1L_Byte* kt, AllocatorInfo backing, KT1L_Meta m, Slice_Byte values, SSIZE num_values ) {
void kt1l__populate_slice_a2(KT1L_Byte*restrict kt, AllocatorInfo backing, KT1L_Meta m, Slice_Byte values, SSIZE num_values ) {
assert(kt != nullptr);
if (num_values == 0) { return; }
* kt = alloc_slice(backing, Byte, m.slot_size * num_values );
slice_assert(* kt);
for (span_iter(SSIZE, iter, 0, <, num_values)) {
SSIZE slot_offset = iter.cursor * m.slot_size; // slot id
Byte* slot_cursor = & kt->ptr[slot_offset]; // slots[id] type: KT1L_<Type>
U64* slot_key = (U64*)slot_cursor; // slots[id].key type: U64
Slice_Byte slot_value = { slot_cursor + m.kt_value_offset, m.type_width }; // slots[id].value type: <Type>
SSIZE a2_offset = iter.cursor * m.type_width * 2; // a2 entry id
Byte* a2_cursor = & values.ptr[a2_offset]; // a2_entries[id] type: A2_<Type>
Slice_Byte a2_key = * cast(Slice_Byte*, a2_cursor); // a2_entries[id].key type: <Type>
Slice_Byte a2_value = { a2_cursor + m.type_width, m.type_width }; // a2_entries[id].value type: <Type>
slice_copy(slot_value, a2_value); // slots[id].value = a2_entries[id].value
* slot_key = 0; hash64_djb8(slot_key, a2_key); // slots[id].key = hash64_djb8(a2_entries[id].key)
SSIZE slot_offset = iter.cursor * m.slot_size; // slot id
Byte*restrict slot_cursor = & kt->ptr[slot_offset]; // slots[id] type: KT1L_<Type>
U64*restrict slot_key = (U64*restrict)slot_cursor; // slots[id].key type: U64
Slice_Byte slot_value = { slot_cursor + m.kt_value_offset, m.type_width }; // slots[id].value type: <Type>
SSIZE a2_offset = iter.cursor * m.type_width * 2; // a2 entry id
Byte*restrict a2_cursor = & values.ptr[a2_offset]; // a2_entries[id] type: A2_<Type>
Slice_Byte a2_key = * cast(Slice_Byte*restrict, a2_cursor); // a2_entries[id].key type: <Type>
Slice_Byte a2_value = { a2_cursor + m.type_width, m.type_width }; // a2_entries[id].value type: <Type>
slice_copy(slot_value, a2_value); // slots[id].value = a2_entries[id].value
* slot_key = 0; hash64_djb8(slot_key, a2_key); // slots[id].key = hash64_djb8(a2_entries[id].key)
}
kt->len = num_values;
}