mirror of
https://github.com/Ed94/WATL_Exercise.git
synced 2025-11-08 17:49:18 -08:00
Compare commits
3 Commits
816ed5debd
...
3554615244
| Author | SHA1 | Date | |
|---|---|---|---|
| 3554615244 | |||
| aad7b59179 | |||
| 9179f77f05 |
@@ -1,2 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
@@ -41,11 +41,15 @@ Win CRT imports will also be typeless signatures.
|
|||||||
#pragma region Header
|
#pragma region Header
|
||||||
|
|
||||||
#pragma region DSL
|
#pragma region DSL
|
||||||
|
#define local_persist static
|
||||||
|
#define global static
|
||||||
|
#define internal static
|
||||||
|
|
||||||
#define A_(x) __attribute__((aligned (x)))
|
#define A_(x) __attribute__((aligned (x)))
|
||||||
#define E_(x,y) __builtin_expect(x,y)
|
#define E_(x,y) __builtin_expect(x,y)
|
||||||
#define S_ static
|
#define S_ static
|
||||||
#define I_ static inline __attribute__((always_inline))
|
#define I_ internal inline __attribute__((always_inline))
|
||||||
#define N_ static __attribute__((noinline))
|
#define N_ internal __attribute__((noinline))
|
||||||
#define R_ __restrict
|
#define R_ __restrict
|
||||||
#define V_ volatile
|
#define V_ volatile
|
||||||
#define W_ __attribute((__stdcall__)) __attribute__((__force_align_arg_pointer__))
|
#define W_ __attribute((__stdcall__)) __attribute__((__force_align_arg_pointer__))
|
||||||
@@ -58,10 +62,6 @@ Win CRT imports will also be typeless signatures.
|
|||||||
#define stringify(S) stringify_impl(S)
|
#define stringify(S) stringify_impl(S)
|
||||||
#define tmpl(prefix, type) prefix ## _ ## type
|
#define tmpl(prefix, type) prefix ## _ ## type
|
||||||
|
|
||||||
#define local_persist static
|
|
||||||
#define global static
|
|
||||||
#define internal static
|
|
||||||
|
|
||||||
#define static_assert _Static_assert
|
#define static_assert _Static_assert
|
||||||
#define typeof __typeof__
|
#define typeof __typeof__
|
||||||
#define typeof_ptr(ptr) typeof(ptr[0])
|
#define typeof_ptr(ptr) typeof(ptr[0])
|
||||||
@@ -440,7 +440,7 @@ typedef def_struct(FArena) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
I_ void farena_init__u (U8 arena, U8 mem_ptr, U8 mem_len);
|
I_ void farena_init__u (U8 arena, U8 mem_ptr, U8 mem_len);
|
||||||
void farena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_mem);
|
void farena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
|
||||||
I_ void farena_reset__u (U8 arena);
|
I_ void farena_reset__u (U8 arena);
|
||||||
I_ void farena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot);
|
I_ void farena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot);
|
||||||
I_ void farena_save__u (U8 arena, U8 sp);
|
I_ void farena_save__u (U8 arena, U8 sp);
|
||||||
@@ -468,70 +468,21 @@ cast(type*, farena__push(arena, size_of(type), 1, opt_args(Opts_farena, __VA_ARG
|
|||||||
typedef def_struct(OS_SystemInfo) { U8 target_page_size; };
|
typedef def_struct(OS_SystemInfo) { U8 target_page_size; };
|
||||||
typedef def_struct(Opts_vmem) { U8 base_addr; B4 no_large_pages; A4_B1 _PAD_; };
|
typedef def_struct(Opts_vmem) { U8 base_addr; B4 no_large_pages; A4_B1 _PAD_; };
|
||||||
|
|
||||||
#define MS_INVALID_HANDLE_VALUE ((MS_HANDLE)(S8)-1)
|
|
||||||
#define MS_ANYSIZE_ARRAY 1
|
|
||||||
#define MS_MEM_COMMIT 0x00001000
|
|
||||||
#define MS_MEM_RESERVE 0x00002000
|
|
||||||
#define MS_MEM_LARGE_PAGES 0x20000000
|
|
||||||
#define MS_PAGE_READWRITE 0x04
|
|
||||||
#define MS_TOKEN_ADJUST_PRIVILEGES (0x0020)
|
|
||||||
#define MS_SE_PRIVILEGE_ENABLED (0x00000002L)
|
|
||||||
#define MS_TOKEN_QUERY (0x0008)
|
|
||||||
#define MS__TEXT(quote) L ## quote
|
|
||||||
#define MS_TEXT(quote) MS__TEXT(quote)
|
|
||||||
#define MS_SE_LOCK_MEMORY_NAME MS_TEXT("SeLockMemoryPrivilege")
|
|
||||||
|
|
||||||
typedef int MS_BOOL;
|
|
||||||
typedef unsigned long MS_DWORD;
|
|
||||||
typedef MS_DWORD* MS_PDWORD;
|
|
||||||
typedef void* MS_HANDLE;
|
|
||||||
typedef MS_HANDLE* MS_PHANDLE;
|
|
||||||
typedef long MS_LONG;
|
|
||||||
typedef S8 MS_LONGLONG;
|
|
||||||
typedef char const* MS_LPCSTR;
|
|
||||||
typedef unsigned short* MS_LPWSTR, *MS_PWSTR;
|
|
||||||
typedef void* MS_LPVOID;
|
|
||||||
typedef MS_DWORD* MS_LPDWORD;
|
|
||||||
typedef U8 MS_ULONG_PTR, *MS_PULONG_PTR;
|
|
||||||
typedef void const* MS_LPCVOID;
|
|
||||||
typedef struct MS_SECURITY_ATTRIBUTES *MS_PSECURITY_ATTRIBUTES, *MS_LPSECURITY_ATTRIBUTES;
|
|
||||||
typedef struct MS_OVERLAPPED *MS_LPOVERLAPPED;
|
|
||||||
typedef def_union(MS_LARGE_INTEGER) { struct { MS_DWORD LowPart; MS_LONG HighPart; } _; struct { MS_DWORD LowPart; MS_LONG HighPart; } u; MS_LONGLONG QuadPart; };
|
|
||||||
typedef def_struct(MS_FILE) { void* _Placeholder; };
|
|
||||||
typedef def_struct(MS_SECURITY_ATTRIBUTES) { MS_DWORD nLength; A4_B1 _PAD_; MS_LPVOID lpSecurityDescriptor; MS_BOOL bInheritHandle; };
|
|
||||||
typedef def_struct(MS_OVERLAPPED) { MS_ULONG_PTR Internal; MS_ULONG_PTR InternalHigh; union { struct { MS_DWORD Offset; MS_DWORD OffsetHigh; } _; void* Pointer; } _; MS_HANDLE hEvent; };
|
|
||||||
typedef struct MS_LUID* MS_PLUID;
|
|
||||||
typedef struct MS_LUID_AND_ATTRIBUTES* MS_PLUID_AND_ATTRIBUTES;
|
|
||||||
typedef struct MS_TOKEN_PRIVILEGES* MS_PTOKEN_PRIVILEGES;
|
|
||||||
typedef def_struct(MS_LUID) { MS_DWORD LowPart; MS_LONG HighPart; };
|
|
||||||
typedef def_struct(MS_LUID_AND_ATTRIBUTES) { MS_LUID Luid; MS_DWORD Attributes; };
|
|
||||||
typedef def_struct(MS_TOKEN_PRIVILEGES) { MS_DWORD PrivilegeCount; MS_LUID_AND_ATTRIBUTES Privileges[MS_ANYSIZE_ARRAY]; };
|
|
||||||
|
|
||||||
W_ MS_BOOL CloseHandle(MS_HANDLE hObject);
|
|
||||||
W_ MS_BOOL AdjustTokenPrivileges(MS_HANDLE TokenHandle, MS_BOOL DisableAllPrivileges, MS_PTOKEN_PRIVILEGES NewState, MS_DWORD BufferLength, MS_PTOKEN_PRIVILEGES PreviousState, MS_PDWORD ReturnLength);
|
|
||||||
W_ MS_HANDLE GetCurrentProcess(void);
|
|
||||||
W_ U8 GetLargePageMinimum(void);
|
|
||||||
W_ MS_BOOL LookupPrivilegeValueW(MS_LPWSTR lpSystemName, MS_LPWSTR lpName, MS_PLUID lpLuid);
|
|
||||||
W_ MS_BOOL OpenProcessToken(MS_HANDLE ProcessHandle, MS_DWORD DesiredAccess, MS_PHANDLE TokenHandle);
|
|
||||||
W_ MS_LPVOID VirtualAlloc(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD flAllocationType, MS_DWORD flProtect);
|
|
||||||
W_ MS_BOOL VirtualFree (MS_LPVOID lpAddress, U8 dwSize, MS_DWORD dwFreeType);
|
|
||||||
|
|
||||||
typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; };
|
typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; };
|
||||||
global OS_Windows_State os__windows_info;
|
global OS_Windows_State os__windows_info;
|
||||||
|
|
||||||
I_ OS_SystemInfo* os_system_info(void);
|
I_ OS_SystemInfo* os_system_info(void);
|
||||||
I_ void os_init(void);
|
I_ void os_init (void);
|
||||||
|
|
||||||
I_ U8 os__vmem_reserve__u(U8 size, U8 opts_addr);
|
I_ U8 os_vmem_reserve__u( U8 size, U4 no_large_pages, U8 base_addr);
|
||||||
I_ B4 os__vmem_commit__u (U8 vm, U8 size, U8 opts_addr);
|
I_ B4 os_vmem_commit__u (U8 vm, U8 size);
|
||||||
I_ void os_vmem_release__u(U8 vm, U8 size);
|
I_ void os_vmem_release__u(U8 vm, U8 size);
|
||||||
|
|
||||||
I_ U8 os__vmem_reserve(U8 size, Opts_vmem_R opts);
|
I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts);
|
||||||
I_ B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem_R opts);
|
I_ B4 os_vmem_commit (U8 vm, U8 size);
|
||||||
I_ void os_vmem_release(U8 vm, U8 size);
|
I_ void os_vmem_release (U8 vm, U8 size);
|
||||||
|
|
||||||
#define os_vmem_reserve(size, ...) os__vmem_reserve(size, opt_args(Opts_vmem, __VA_ARGS__))
|
#define os_vmem_reserve(size, ...) os__vmem_reserve(size, opt_args(Opts_vmem, __VA_ARGS__))
|
||||||
#define os_vmem_commit(vm, size, ...) os__vmem_commit (vm, size, opt_args(Opts_vmem, __VA_ARGS__))
|
|
||||||
#pragma endregion OS
|
#pragma endregion OS
|
||||||
|
|
||||||
#pragma region VArena (Virtual Address Space Arena)
|
#pragma region VArena (Virtual Address Space Arena)
|
||||||
@@ -554,22 +505,22 @@ typedef def_struct(Opts_varena_make) {
|
|||||||
VArenaFlags flags;
|
VArenaFlags flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
I_ U8 varena__make__u (Opts_varena_make_R opts);
|
I_ U8 varena__make__u (U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr);
|
||||||
I_ void varena_release__u(U8 arena);
|
I_ void varena_release__u(U8 arena);
|
||||||
I_ void varena_reset__u (U8 arena);
|
I_ void varena_reset__u (U8 arena);
|
||||||
I_ void varena_rewind__u (U8 arena, U8 sp_type_sig, U8 sp_slot);
|
I_ void varena_rewind__u (U8 arena, U8 sp_type_sig, U8 sp_slot);
|
||||||
I_ void varena_save__u (U8 arena, U8 sp_addr);
|
I_ void varena_save__u (U8 arena, U8 sp_addr);
|
||||||
void varena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
|
void varena__push__u (U8 arena, U8 amount, U8 type_width, U8 alignment, U8 slice_addr);
|
||||||
void varena__grow__u (U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero);
|
void varena__grow__u (U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero);
|
||||||
void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment);
|
void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment);
|
||||||
|
|
||||||
VArena* varena__make (Opts_varena_make*R_ opts);
|
I_ VArena* varena__make (Opts_varena_make*R_ opts);
|
||||||
Slice_Mem varena__push (VArena_R arena, U8 amount, U8 type_width, Opts_varena*R_ opts);
|
I_ Slice_Mem varena__push (VArena_R arena, U8 amount, U8 type_width, Opts_varena*R_ opts);
|
||||||
void varena_release(VArena_R arena);
|
I_ void varena_release(VArena_R arena);
|
||||||
void varena_reset (VArena_R arena);
|
I_ void varena_reset (VArena_R arena);
|
||||||
void varena_rewind (VArena_R arena, AllocatorSP save_point);
|
I_ void varena_rewind (VArena_R arena, AllocatorSP save_point);
|
||||||
Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts);
|
I_ Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts);
|
||||||
AllocatorSP varena_save (VArena_R arena);
|
I_ AllocatorSP varena_save (VArena_R arena);
|
||||||
|
|
||||||
void varena_allocator_proc(U8 data, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out);
|
void varena_allocator_proc(U8 data, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, /*AllocatorProc_Out*/U8 out);
|
||||||
|
|
||||||
@@ -785,271 +736,258 @@ void farena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr
|
|||||||
#pragma endregion FArena
|
#pragma endregion FArena
|
||||||
|
|
||||||
#pragma region OS
|
#pragma region OS
|
||||||
|
#pragma warning(push)
|
||||||
|
#pragma warning(disable: 4820)
|
||||||
|
#pragma comment(lib, "Kernel32.lib")
|
||||||
|
#pragma comment(lib, "Advapi32.lib")
|
||||||
|
#define MS_INVALID_HANDLE_VALUE ((MS_HANDLE)(S8)-1)
|
||||||
|
#define MS_ANYSIZE_ARRAY 1
|
||||||
|
#define MS_MEM_COMMIT 0x00001000
|
||||||
|
#define MS_MEM_RESERVE 0x00002000
|
||||||
|
#define MS_MEM_LARGE_PAGES 0x20000000
|
||||||
|
#define MS_PAGE_READWRITE 0x04
|
||||||
|
#define MS_TOKEN_ADJUST_PRIVILEGES (0x0020)
|
||||||
|
#define MS_SE_PRIVILEGE_ENABLED (0x00000002L)
|
||||||
|
#define MS_TOKEN_QUERY (0x0008)
|
||||||
|
#define MS__TEXT(quote) L ## quote
|
||||||
|
#define MS_TEXT(quote) MS__TEXT(quote)
|
||||||
|
#define MS_SE_LOCK_MEMORY_NAME MS_TEXT("SeLockMemoryPrivilege")
|
||||||
|
|
||||||
|
typedef U4 MS_BOOL;
|
||||||
|
typedef U4 MS_DWORD;
|
||||||
|
typedef U8 MS_PDWORD;
|
||||||
|
typedef U8 MS_HANDLE;
|
||||||
|
typedef U8 MS_PHANDLE;
|
||||||
|
typedef U4 MS_LONG;
|
||||||
|
typedef U8 MS_LONGLONG;
|
||||||
|
typedef U8 MS_LPCSTR;
|
||||||
|
typedef U8 MS_LPWSTR, MS_PWSTR;
|
||||||
|
typedef U8 MS_LPVOID;
|
||||||
|
typedef U8 MS_LPDWORD;
|
||||||
|
typedef U8 MS_ULONG_PTR, MS_PULONG_PTR;
|
||||||
|
typedef U8 MS_LPCVOID;
|
||||||
|
typedef struct MS_SECURITY_ATTRIBUTES MS_SECURITY_ATTRIBUTES; typedef U8 MS_PSECURITY_ATTRIBUTES, MS_LPSECURITY_ATTRIBUTES;
|
||||||
|
typedef struct MS_OVERLAPPED MS_OVERLAPPED; typedef U8 MS_LPOVERLAPPED;
|
||||||
|
typedef def_union(MS_LARGE_INTEGER) { struct { MS_DWORD LowPart; MS_LONG HighPart; } _; struct { MS_DWORD LowPart; MS_LONG HighPart; } u; MS_LONGLONG QuadPart; };
|
||||||
|
typedef def_struct(MS_FILE) { U8 _Placeholder; };
|
||||||
|
typedef def_struct(MS_SECURITY_ATTRIBUTES) { MS_DWORD nLength; A4_B1 _PAD_; MS_LPVOID lpSecurityDescriptor; MS_BOOL bInheritHandle; };
|
||||||
|
typedef def_struct(MS_OVERLAPPED) { MS_ULONG_PTR Internal; MS_ULONG_PTR InternalHigh; union { struct { MS_DWORD Offset; MS_DWORD OffsetHigh; } _; U8 Pointer; } _; MS_HANDLE hEvent; };
|
||||||
|
typedef struct MS_LUID MS_LUID; typedef U8 MS_PLUID;
|
||||||
|
typedef struct MS_LUID_AND_ATTRIBUTES MS_LUID_AND_ATTRIBUTES; typedef U8 MS_PLUID_AND_ATTRIBUTES;
|
||||||
|
typedef struct MS_TOKEN_PRIVILEGES MS_TOKEN_PRIVILEGES; typedef U8 MS_PTOKEN_PRIVILEGES;
|
||||||
|
typedef def_struct(MS_LUID) { MS_DWORD LowPart; MS_LONG HighPart; };
|
||||||
|
typedef def_struct(MS_LUID_AND_ATTRIBUTES) { MS_LUID Luid; MS_DWORD Attributes; };
|
||||||
|
typedef def_struct(MS_TOKEN_PRIVILEGES) { MS_DWORD PrivilegeCount; MS_LUID_AND_ATTRIBUTES Privileges[MS_ANYSIZE_ARRAY]; };
|
||||||
|
|
||||||
|
W_ MS_BOOL ms_close_handle(MS_HANDLE hObject) __asm__("CloseHandle");
|
||||||
|
W_ MS_BOOL ms_adjust_token_privleges(MS_HANDLE TokenHandle, MS_BOOL DisableAllPrivileges, MS_PTOKEN_PRIVILEGES NewState, MS_DWORD BufferLength, MS_PTOKEN_PRIVILEGES PreviousState, MS_PDWORD ReturnLength) __asm__("AdjustTokenPrivileges");
|
||||||
|
W_ MS_HANDLE ms_get_current_process(void) __asm__("GetCurrentProcess");
|
||||||
|
W_ U8 ms_get_larg_page_minimum(void) __asm__("GetCurrentProcess");
|
||||||
|
W_ MS_BOOL ms_lookup_priviledge_value_w(MS_LPWSTR lpSystemName, MS_LPWSTR lpName, MS_PLUID lpLuid) __asm__("LookupPrivilegeValueW");
|
||||||
|
W_ MS_BOOL ms_open_process_token(MS_HANDLE ProcessHandle, MS_DWORD DesiredAccess, MS_PHANDLE TokenHandle) __asm__("OpenProcessToken");
|
||||||
|
W_ MS_LPVOID ms_virtual_alloc(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD flAllocationType, MS_DWORD flProtect) __asm__("VirtualAlloc");
|
||||||
|
W_ MS_BOOL ms_virtual_free(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD dwFreeType) __asm__("VirtualFree");
|
||||||
|
#pragma warning(pop)
|
||||||
|
|
||||||
I_ OS_SystemInfo* os_system_info(void) {
|
I_ OS_SystemInfo* os_system_info(void) {
|
||||||
return & os__windows_info.system_info;
|
return & os__windows_info.system_info;
|
||||||
}
|
}
|
||||||
I_ void os__enable_large_pages(void) {
|
I_ void os__enable_large_pages(void) {
|
||||||
MS_HANDLE token;
|
MS_HANDLE token;
|
||||||
if (OpenProcessToken(GetCurrentProcess(), MS_TOKEN_ADJUST_PRIVILEGES | MS_TOKEN_QUERY, & token)) {
|
if (ms_open_process_token(ms_get_current_process(), MS_TOKEN_ADJUST_PRIVILEGES | MS_TOKEN_QUERY, u8_(& token))) {
|
||||||
MS_LUID luid;
|
MS_LUID luid;
|
||||||
if (LookupPrivilegeValueW(0, MS_SE_LOCK_MEMORY_NAME, & luid)) {
|
if (ms_lookup_priviledge_value_w(0, u8_(MS_SE_LOCK_MEMORY_NAME), u8_(& luid))) {
|
||||||
MS_TOKEN_PRIVILEGES priv;
|
MS_TOKEN_PRIVILEGES priv;
|
||||||
priv.PrivilegeCount = 1;
|
priv.PrivilegeCount = 1;
|
||||||
priv.Privileges[0].Luid = luid;
|
priv.Privileges[0].Luid = luid;
|
||||||
priv.Privileges[0].Attributes = MS_SE_PRIVILEGE_ENABLED;
|
priv.Privileges[0].Attributes = MS_SE_PRIVILEGE_ENABLED;
|
||||||
AdjustTokenPrivileges(token, 0, & priv, size_of(priv), 0, 0);
|
ms_adjust_token_privleges(token, 0, u8_(& priv), size_of(priv), 0, 0);
|
||||||
}
|
}
|
||||||
CloseHandle(token);
|
ms_close_handle(token);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
I_ void os_init(void) {
|
I_ void os_init(void) {
|
||||||
os__enable_large_pages();
|
os__enable_large_pages();
|
||||||
os_system_info()->target_page_size = GetLargePageMinimum();
|
os_system_info()->target_page_size = ms_get_larg_page_minimum();
|
||||||
}
|
}
|
||||||
I_ U8 os__vmem_reserve__u(U8 size, U8 opts_addr) {
|
I_ U8 os_vmem_reserve__u(U8 size, B4 no_large_pages, U8 base_addr) {
|
||||||
Opts_vmem_R opts = cast(Opts_vmem_R, opts_addr); assert(opts != nullptr);
|
return cast(U8, VirtualAlloc(cast(MS_LPVOID, base_addr), size, MS_MEM_RESERVE,
|
||||||
MS_LPVOID base = VirtualAlloc(cast(MS_LPVOID, opts->base_addr), size, MS_MEM_RESERVE,
|
MS_PAGE_READWRITE /* | (opts->no_large_pages ? 0 : MS_MEM_LARGE_PAGES) */)
|
||||||
MS_PAGE_READWRITE /* | (opts->no_large_pages ? 0 : MS_MEM_LARGE_PAGES) */);
|
);
|
||||||
return u8_(base);
|
|
||||||
}
|
}
|
||||||
I_ B4 os__vmem_commit__u(U8 vm, U8 size, U8 opts_addr) { return VirtualAlloc(cast(MS_LPVOID, vm), size, MS_MEM_COMMIT, MS_PAGE_READWRITE) != nullptr; }
|
I_ B4 os_vmem_commit__u (U8 vm, U8 size) { return ms_virtual_alloc(cast(MS_LPVOID, vm), size, MS_MEM_COMMIT, MS_PAGE_READWRITE) != null; }
|
||||||
I_ void os_vmem_release__u(U8 vm, U8 size) { VirtualFree(cast(MS_LPVOID, vm), 0, MS_MEM_RESERVE); }
|
I_ void os_vmem_release__u(U8 vm, U8 size) { ms_virtual_free(cast(MS_LPVOID, vm), 0, MS_MEM_RESERVE); }
|
||||||
|
|
||||||
I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts) { return os__vmem_reserve__u( size, u8_(opts)); }
|
I_ U8 os__vmem_reserve( U8 size, Opts_vmem_R opts) {
|
||||||
I_ B4 os__vmem_commit (U8 vm, U8 size, Opts_vmem_R opts) { return os__vmem_commit__u (vm, size, u8_(opts)); }
|
assert(opts != nullptr);
|
||||||
I_ void os_vmem_release (U8 vm, U8 size) { os_vmem_release__u(vm, size); }
|
return os_vmem_reserve__u(size, opts->no_large_pages, opts->base_addr);
|
||||||
|
}
|
||||||
|
I_ B4 os_vmem_commit (U8 vm, U8 size) { return os_vmem_commit__u(vm, size); }
|
||||||
|
I_ void os_vmem_release(U8 vm, U8 size) { os_vmem_release__u(vm, size); }
|
||||||
#pragma endregion OS
|
#pragma endregion OS
|
||||||
|
|
||||||
#pragma region VArena (Virtual Address Space Arena)
|
#pragma region VArena (Virtual Address Space Arena)
|
||||||
I_ U8 varena__header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
|
I_ U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
|
||||||
|
|
||||||
I_ U8 varena__make__u(Opts_varena_make_R opts) {
|
I_ U8 varena__make__u(U8 reserve_size, U8 commit_size, U4 flags, U8 base_addr) {
|
||||||
assert(opts != nullptr);
|
if (reserve_size == 0) { reserve_size = mega(64); }
|
||||||
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); }
|
if (commit_size == 0) { commit_size = mega(64); }
|
||||||
if (opts->commit_size == 0) { opts->commit_size = mega(64); }
|
|
||||||
U8 reg page = os_system_info()->target_page_size;
|
U8 reg page = os_system_info()->target_page_size;
|
||||||
U8 reg reserve_sz = align_pow2(opts->reserve_size, page);
|
U8 reg reserve_sz = align_pow2(reserve_size, page);
|
||||||
U8 reg commit_sz = align_pow2(opts->commit_size, page);
|
U8 reg commit_sz = align_pow2(commit_size, page);
|
||||||
U8 base = os_vmem_reserve(reserve_sz, .base_addr = opts->base_addr, .no_large_pages = no_large);
|
B4 reg no_large = (flags & VArenaFlag_NoLargePages) != 0;
|
||||||
assert(base != 0);
|
U8 base = os_vmem_reserve__u(reserve_sz, no_large, base_addr); assert(base != 0);
|
||||||
B4 reg no_large = (opts->flags & VArenaFlag_NoLargePages) != 0;
|
B4 ok = os_vmem_commit__u(base, commit_sz); assert(ok != 0);
|
||||||
B4 ok = os_vmem_commit(base, commit_sz, .no_large_pages = no_large);
|
U8 header = varena_header_size();
|
||||||
assert(ok != 0);
|
|
||||||
U8 header = varena__header_size();
|
|
||||||
U8 data_start = base + header;
|
U8 data_start = base + header;
|
||||||
u8_r(base + offset_of(VArena, reserve_start))[0] = data_start;
|
u8_r(base + offset_of(VArena, reserve_start))[0] = data_start;
|
||||||
u8_r(base + offset_of(VArena, reserve ))[0] = reserve_sz;
|
u8_r(base + offset_of(VArena, reserve ))[0] = reserve_sz;
|
||||||
u8_r(base + offset_of(VArena, commit_size ))[0] = commit_sz;
|
u8_r(base + offset_of(VArena, commit_size ))[0] = commit_sz;
|
||||||
u8_r(base + offset_of(VArena, committed ))[0] = commit_sz;
|
u8_r(base + offset_of(VArena, committed ))[0] = commit_sz;
|
||||||
u8_r(base + offset_of(VArena, commit_used ))[0] = header;
|
u8_r(base + offset_of(VArena, commit_used ))[0] = header;
|
||||||
u4_r(base + offset_of(VArena, flags ))[0] = opts->flags;
|
u4_r(base + offset_of(VArena, flags ))[0] = flags;
|
||||||
return base;
|
return base;
|
||||||
}
|
}
|
||||||
|
inline
|
||||||
I_ void varena_release__u(U8 arena) {
|
void varena__push__u(U8 vm, U8 amount, U8 type_width, U8 alignment, U8 result) {
|
||||||
if (arena == null) { return; }
|
assert(result != null);
|
||||||
os_vmem_release__u(arena, u8_r(arena + offset_of(VArena, reserve))[0]);
|
assert(vm != null);
|
||||||
}
|
|
||||||
|
|
||||||
I_ void varena_reset__u(U8 arena) {
|
|
||||||
if (arena == null) { return; }
|
|
||||||
u8_r(arena + offset_of(VArena, commit_used))[0] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
I_ void varena_rewind__u(U8 arena, U8 sp_type_sig, U8 sp_slot) {
|
|
||||||
if (arena == null) { return; }
|
|
||||||
assert(sp_type_sig == (U8) varena_allocator_proc);
|
|
||||||
U8 header = varena__header_size();
|
|
||||||
if (sp_slot < header) { sp_slot = header; }
|
|
||||||
u8_r(arena + offset_of(VArena, commit_used))[0] = sp_slot;
|
|
||||||
}
|
|
||||||
|
|
||||||
I_ void varena_save__u(U8 arena, U8 sp_addr) {
|
|
||||||
if (sp_addr == null) { return; }
|
|
||||||
u8_r(sp_addr + offset_of(AllocatorSP, type_sig))[0] = (U8) varena_allocator_proc;
|
|
||||||
u8_r(sp_addr + offset_of(AllocatorSP, slot ))[0] = u8_r(arena + offset_of(VArena, commit_used))[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
void varena__push__u(U8 arena, U8 amount, U8 type_width, U8 alignment, U8 result) {
|
|
||||||
if (result == null || arena == null) { return; }
|
|
||||||
if (amount == 0) { struct_zero(Slice_Mem, result); return; }
|
if (amount == 0) { struct_zero(Slice_Mem, result); return; }
|
||||||
U8 reg align = alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT;
|
U8 align = alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT;
|
||||||
U8 reg requested_size = amount * type_width;
|
U8 requested_size = amount * type_width;
|
||||||
U8 reg aligned_size = align_pow2(requested_size, align);
|
U8 reg aligned_size = align_pow2(requested_size, align);
|
||||||
U8 reg reserve_start = u8_r(arena + offset_of(VArena, reserve_start))[0];
|
U8_R reg commit_used = u8_r(vm + offset_of(VArena, commit_used ));
|
||||||
U8_R reg commit_used = u8_r(arena + offset_of(VArena, commit_used));
|
U8 reg reserve_left = u8_r(vm + offset_of(VArena, reserve ))[0] - commit_used[0];
|
||||||
U8 reg current_offset = reserve_start + commit_used[0];
|
|
||||||
U8 reg reserve_total = u8_r(arena + offset_of(VArena, reserve))[0];
|
|
||||||
U8 reg reserve_left = reserve_total - commit_used[0];
|
|
||||||
if (aligned_size > reserve_left) { struct_zero(Slice_Mem, result); return; }
|
if (aligned_size > reserve_left) { struct_zero(Slice_Mem, result); return; }
|
||||||
U8 reg committed = u8_r(arena + offset_of(VArena, committed))[0];
|
U8 reg committed = u8_r(vm + offset_of(VArena, committed ))[0];
|
||||||
U8 reg commit_size = u8_r(arena + offset_of(VArena, commit_size))[0];
|
U8 commit_left = committed - commit_used[0];
|
||||||
U8 reg commit_left = committed - commit_used[0];
|
|
||||||
if (commit_left < aligned_size) {
|
if (commit_left < aligned_size) {
|
||||||
|
U8 reg commit_size = u8_r(vm + offset_of(VArena, commit_size))[0];
|
||||||
U8 reg next_commit = reserve_left > aligned_size ? max(commit_size, aligned_size) : reserve_left;
|
U8 reg next_commit = reserve_left > aligned_size ? max(commit_size, aligned_size) : reserve_left;
|
||||||
if (next_commit != 0) {
|
if (next_commit != 0) {
|
||||||
B4 no_large = (u4_r(arena + offset_of(VArena, flags))[0] & VArenaFlag_NoLargePages) != 0;
|
B4 no_large = (u4_r(vm + offset_of(VArena, flags))[0] & VArenaFlag_NoLargePages) != 0;
|
||||||
U8 reg next_commit_start = arena + committed;
|
U8 reg next_commit_start = vm + committed;
|
||||||
if (! os_vmem_commit(next_commit_start, next_commit, .no_large_pages = no_large)) {
|
if (os_vmem_commit__u(next_commit_start, next_commit) == false) {
|
||||||
struct_zero(Slice_Mem, result);
|
struct_zero(Slice_Mem, result);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
committed += next_commit;
|
committed += next_commit;
|
||||||
u8_r(arena + offset_of(VArena, committed))[0] = committed;
|
u8_r(vm + offset_of(VArena, committed))[0] = committed;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
commit_used[0] += aligned_size;
|
commit_used[0] += aligned_size; {
|
||||||
struct_copy(Slice_Mem, result, (U8)& slice_mem(current_offset, requested_size));
|
U8 reg current_offset = u8_r(vm + offset_of(VArena, reserve_start))[0] + commit_used[0];
|
||||||
|
struct_copy(Slice_Mem, result, (U8)& slice_mem(current_offset, requested_size));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
inline
|
||||||
void varena__grow__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) {
|
void varena__grow__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment, B4 should_zero) {
|
||||||
if (result == null || arena == null) { return; }
|
assert(vm != null);
|
||||||
if (old_ptr == 0 || requested_size <= old_len) {
|
assert(result != null);
|
||||||
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, requested_size));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
U8_R reg commit_used = u8_r(arena + offset_of(VArena, commit_used));
|
|
||||||
U8 reg reserve_start = u8_r(arena + offset_of(VArena, reserve_start))[0];
|
|
||||||
U8 reg current_offset = reserve_start + commit_used[0];
|
|
||||||
if (old_ptr + old_len != current_offset) {
|
|
||||||
struct_zero(Slice_Mem, result);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
U8 reg grow_amount = requested_size - old_len;
|
U8 reg grow_amount = requested_size - old_len;
|
||||||
uvar(Slice_Mem, extra) = {0};
|
if (grow_amount == 0) { struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, old_len)); return; }
|
||||||
varena__push__u(arena, grow_amount, 1, alignment, u8_(extra));
|
U8 reg current_offset = u8_r(vm + offset_of(VArena, reserve_start))[0] + u8_r(vm + offset_of(VArena, commit_used))[0];
|
||||||
U8 extra_ptr = u8_r(extra + offset_of(Slice_Mem, ptr))[0];
|
// Growing when not the last allocation not allowed
|
||||||
U8 extra_len = u8_r(extra + offset_of(Slice_Mem, len))[0];
|
assert(old_ptr == current_offset);
|
||||||
if (extra_ptr == 0) {
|
uvar(Slice_Mem, allocation); varena__push__u(vm, grow_amount, 1, alignment, u8_(allocation));
|
||||||
struct_zero(Slice_Mem, result);
|
|
||||||
return;
|
}
|
||||||
}
|
void varena__shrink__u(U8 result, U8 vm, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) {
|
||||||
U8 reg new_len = old_len + extra_len;
|
assert(vm != null);
|
||||||
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, new_len));
|
assert(result != null);
|
||||||
if (should_zero && grow_amount != 0) {
|
|
||||||
memory_zero(old_ptr + old_len, grow_amount);
|
}
|
||||||
}
|
I_ void varena_release__u(U8 vm) {
|
||||||
|
assert(vm != null);
|
||||||
|
os_vmem_release__u(vm, u8_r(vm + offset_of(VArena, reserve))[0]);
|
||||||
|
}
|
||||||
|
I_ void varena_reset__u(U8 vm) {
|
||||||
|
assert(vm != null);
|
||||||
|
u8_r(vm + offset_of(VArena, commit_used))[0] = 0;
|
||||||
|
}
|
||||||
|
I_ void varena_rewind__u(U8 vm, U8 sp_type_sig, U8 sp_slot) {
|
||||||
|
assert(vm != null);
|
||||||
|
assert(sp_type_sig == (U8) varena_allocator_proc);
|
||||||
|
U8 reg header = varena_header_size();
|
||||||
|
if (sp_slot < header) { sp_slot = header; }
|
||||||
|
u8_r(vm + offset_of(VArena, commit_used))[0] = sp_slot;
|
||||||
|
}
|
||||||
|
I_ void varena_save__u(U8 vm, U8 sp_addr) {
|
||||||
|
assert(vm != null);
|
||||||
|
assert(sp_addr != null);
|
||||||
|
u8_r(sp_addr + offset_of(AllocatorSP, type_sig))[0] = (U8) varena_allocator_proc;
|
||||||
|
u8_r(sp_addr + offset_of(AllocatorSP, slot ))[0] = u8_r(vm + offset_of(VArena, commit_used))[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
void varena__shrink__u(U8 result, U8 arena, U8 old_ptr, U8 old_len, U8 requested_size, U8 alignment) {
|
I_ VArena* varena__make(Opts_varena_make*R_ opts) {
|
||||||
if (result == null || arena == null) { return; }
|
assert(opts != nullptr);
|
||||||
if (old_ptr == 0 || requested_size >= old_len) {
|
return cast(VArena*, varena__make__u(opts->reserve_size, opts->commit_size, opts->flags, opts->base_addr));
|
||||||
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, min(requested_size, old_len)));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
U8_R reg commit_used = u8_r(arena + offset_of(VArena, commit_used));
|
|
||||||
U8 reg reserve_start = u8_r(arena + offset_of(VArena, reserve_start))[0];
|
|
||||||
U8 reg current_offset = reserve_start + commit_used[0];
|
|
||||||
if (old_ptr + old_len != current_offset) {
|
|
||||||
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, requested_size));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
U8 reg aligned_original = align_pow2(old_len, MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
U8 reg aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
if (aligned_new > aligned_original) { aligned_new = aligned_original; }
|
|
||||||
commit_used[0] -= (aligned_original - aligned_new);
|
|
||||||
struct_copy(Slice_Mem, result, (U8)& slice_mem(old_ptr, requested_size));
|
|
||||||
}
|
}
|
||||||
|
I_ Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena* opts) {
|
||||||
VArena* varena__make(Opts_varena_make* opts) {
|
|
||||||
return cast(VArena*, varena__make__u(opts));
|
|
||||||
}
|
|
||||||
|
|
||||||
Slice_Mem varena__push(VArena_R arena, U8 amount, U8 type_width, Opts_varena* opts) {
|
|
||||||
Slice_Mem result;
|
Slice_Mem result;
|
||||||
varena__push__u(u8_(arena), amount, type_width, opts ? opts->alignment : 0, u8_(& result));
|
varena__push__u(u8_(vm), amount, type_width, opts ? opts->alignment : 0, u8_(& result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
I_ Slice_Mem varena__shrink(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, Opts_varena* opts) {
|
||||||
|
Slice_Mem result;
|
||||||
|
varena__shrink__u(u8_(& result), u8_(vm), old_allocation.ptr, old_allocation.len, requested_size, opts ? opts->alignment : 0);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void varena_release(VArena_R arena) { varena_release__u(u8_(arena)); }
|
I_ void varena_release(VArena_R vm) { varena_release__u(u8_(vm)); }
|
||||||
void varena_reset (VArena_R arena) { varena_reset__u (u8_(arena)); }
|
I_ void varena_reset (VArena_R vm) { varena_reset__u (u8_(vm)); }
|
||||||
|
|
||||||
void varena_rewind(VArena_R arena, AllocatorSP save_point) {
|
I_ void varena_rewind (VArena_R vm, AllocatorSP save_point) {
|
||||||
varena_rewind__u(u8_(arena), u8_(save_point.type_sig), save_point.slot);
|
varena_rewind__u(u8_(vm), u8_(save_point.type_sig), save_point.slot);
|
||||||
}
|
}
|
||||||
|
I_ AllocatorSP varena_save(VArena_R vm) { AllocatorSP sp; varena_save__u(u8_(vm), u8_(& sp)); return sp; }
|
||||||
|
|
||||||
Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size, Opts_varena* opts) {
|
void varena_allocator_proc(U8 vm, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, U8 out_addr)
|
||||||
Slice_Mem result;
|
|
||||||
varena__shrink__u(u8_(& result), u8_(arena), old_allocation.ptr, old_allocation.len, requested_size, opts ? opts->alignment : 0);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
AllocatorSP varena_save(VArena_R arena) {
|
|
||||||
AllocatorSP sp;
|
|
||||||
varena_save__u(u8_(arena), u8_(& sp));
|
|
||||||
return sp;
|
|
||||||
}
|
|
||||||
|
|
||||||
void varena_allocator_proc(U8 arena, U8 requested_size, U8 alignment, U8 old_ptr, U8 old_len, U4 op, U8 out_addr)
|
|
||||||
{
|
{
|
||||||
AllocatorProc_Out* out = cast(AllocatorProc_Out*, out_addr);
|
assert(vm != null);
|
||||||
U8 allocation_addr = out_addr ? out_addr + offset_of(AllocatorProc_Out, allocation) : 0;
|
assert(out_addr != null);
|
||||||
if (arena == null) {
|
U8 out_allocation = out_addr ? out_addr + offset_of(AllocatorProc_Out, allocation) : 0;
|
||||||
if (allocation_addr) { struct_zero(Slice_Mem, allocation_addr); }
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
switch (op)
|
switch (op)
|
||||||
{
|
{
|
||||||
case AllocatorOp_Alloc:
|
case AllocatorOp_Alloc:
|
||||||
case AllocatorOp_Alloc_NoZero:
|
case AllocatorOp_Alloc_NoZero:
|
||||||
if (allocation_addr) {
|
varena__push__u(vm, requested_size, 1, alignment, out_allocation);
|
||||||
varena__push__u(arena, requested_size, 1, alignment, allocation_addr);
|
if (op == AllocatorOp_Alloc) {
|
||||||
if (op == AllocatorOp_Alloc) {
|
U8 ptr = u8_r(out_allocation + offset_of(Slice_Mem, ptr))[0];
|
||||||
U8 ptr = u8_r(allocation_addr + offset_of(Slice_Mem, ptr))[0];
|
U8 len = u8_r(out_allocation + offset_of(Slice_Mem, len))[0];
|
||||||
U8 len = u8_r(allocation_addr + offset_of(Slice_Mem, len))[0];
|
if (ptr && len) { memory_zero(ptr, len); }
|
||||||
if (ptr && len) { memory_zero(ptr, len); }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Free:
|
case AllocatorOp_Free: break;
|
||||||
break;
|
case AllocatorOp_Reset: varena_reset__u(vm); break;
|
||||||
|
|
||||||
case AllocatorOp_Reset:
|
|
||||||
varena_reset__u(arena);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Grow:
|
case AllocatorOp_Grow:
|
||||||
case AllocatorOp_Grow_NoZero:
|
case AllocatorOp_Grow_NoZero:
|
||||||
if (allocation_addr) {
|
varena__grow__u(out_allocation, vm, old_ptr, old_len, requested_size, alignment, op - AllocatorOp_Grow_NoZero);
|
||||||
varena__grow__u(allocation_addr, arena, old_ptr, old_len, requested_size, alignment, op - AllocatorOp_Grow_NoZero);
|
break;
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Shrink:
|
case AllocatorOp_Shrink:
|
||||||
if (allocation_addr) {
|
varena__shrink__u(out_allocation, vm, old_ptr, old_len, requested_size, alignment);
|
||||||
varena__shrink__u(allocation_addr, arena, old_ptr, old_len, requested_size, alignment);
|
break;
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Rewind:
|
case AllocatorOp_Rewind: varena_rewind__u(vm, old_ptr, old_len); break;
|
||||||
varena_rewind__u(arena, old_ptr, old_len);
|
case AllocatorOp_SavePoint: varena_save__u (vm, out_addr + offset_of(AllocatorProc_Out, save_point)); break;
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_SavePoint:
|
|
||||||
if (out_addr) { varena_save__u(arena, out_addr + offset_of(AllocatorProc_Out, save_point)); }
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Query:
|
case AllocatorOp_Query:
|
||||||
if (out_addr) {
|
|
||||||
u4_r(out_addr + offset_of(AllocatorQueryInfo, features))[0] =
|
u4_r(out_addr + offset_of(AllocatorQueryInfo, features))[0] =
|
||||||
AllocatorQuery_Alloc
|
AllocatorQuery_Alloc
|
||||||
| AllocatorQuery_Reset
|
| AllocatorQuery_Reset
|
||||||
| AllocatorQuery_Resize
|
| AllocatorQuery_Resize
|
||||||
| AllocatorQuery_Rewind;
|
| AllocatorQuery_Rewind;
|
||||||
U8 reserve = u8_r(arena + offset_of(VArena, reserve))[0];
|
U8 reserve = u8_r(vm + offset_of(VArena, reserve ))[0];
|
||||||
U8 committed = u8_r(arena + offset_of(VArena, committed))[0];
|
U8 committed = u8_r(vm + offset_of(VArena, committed))[0];
|
||||||
U8 max_alloc = (reserve > committed) ? (reserve - committed) : 0;
|
U8 max_alloc = (reserve > committed) ? (reserve - committed) : 0;
|
||||||
u8_r(out_addr + offset_of(AllocatorQueryInfo, max_alloc))[0] = max_alloc;
|
u8_r(out_addr + offset_of(AllocatorQueryInfo, max_alloc))[0] = max_alloc;
|
||||||
u8_r(out_addr + offset_of(AllocatorQueryInfo, min_alloc))[0] = kilo(4);
|
u8_r(out_addr + offset_of(AllocatorQueryInfo, min_alloc))[0] = kilo(4);
|
||||||
u8_r(out_addr + offset_of(AllocatorQueryInfo, left ))[0] = max_alloc;
|
u8_r(out_addr + offset_of(AllocatorQueryInfo, left ))[0] = max_alloc;
|
||||||
AllocatorSP sp = { .type_sig = varena_allocator_proc, .slot = u8_r(arena + offset_of(VArena, commit_used))[0] };
|
AllocatorSP sp = { .type_sig = varena_allocator_proc, .slot = u8_r(vm + offset_of(VArena, commit_used))[0] };
|
||||||
struct_copy(AllocatorSP, out_addr + offset_of(AllocatorQueryInfo, save_point), (U8)& sp);
|
struct_copy(AllocatorSP, out_addr + offset_of(AllocatorQueryInfo, save_point), (U8)& sp);
|
||||||
}
|
break;
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#pragma endregion VArena
|
#pragma endregion VArena
|
||||||
|
|||||||
@@ -117,6 +117,8 @@ enum { false = 0, true = 1, true_overflow, };
|
|||||||
|
|
||||||
#define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
|
#define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
|
||||||
#define v_(ptr) cast(typeof_ptr(ptr)*V_, ptr)
|
#define v_(ptr) cast(typeof_ptr(ptr)*V_, ptr)
|
||||||
|
#define tr_(type, ptr) cast(type*R_, ptr)
|
||||||
|
#define tv_(type, ptr) cast(type*V_, ptr)
|
||||||
|
|
||||||
#define kilo(n) (cast(U8, n) << 10)
|
#define kilo(n) (cast(U8, n) << 10)
|
||||||
#define mega(n) (cast(U8, n) << 20)
|
#define mega(n) (cast(U8, n) << 20)
|
||||||
@@ -197,7 +199,17 @@ typedef def_farray(B1, 2);
|
|||||||
typedef def_farray(B1, 4);
|
typedef def_farray(B1, 4);
|
||||||
typedef def_farray(B1, 8);
|
typedef def_farray(B1, 8);
|
||||||
|
|
||||||
finline U8 align_pow2(U8 x, U8 b);
|
finline U8 mem_copy (U8 dest, U8 src, U8 len) { return (U8)(__builtin_memcpy ((void*)dest, (void const*)src, len)); }
|
||||||
|
finline U8 mem_copy_overlapping(U8 dest, U8 src, U8 len) { return (U8)(__builtin_memmove((void*)dest, (void const*)src, len)); }
|
||||||
|
finline U8 mem_fill (U8 dest, U8 value, U8 len) { return (U8)(__builtin_memset ((void*)dest, (int) value, len)); }
|
||||||
|
finline B4 mem_zero (U8 dest, U8 len) { if (dest == 0) return false; mem_fill(dest, 0, len); return true; }
|
||||||
|
|
||||||
|
finline
|
||||||
|
U8 align_pow2(U8 x, U8 b) {
|
||||||
|
assert(b != 0);
|
||||||
|
assert((b & (b - 1)) == 0); // Check power of 2
|
||||||
|
return ((x + b - 1) & (~(b - 1)));
|
||||||
|
}
|
||||||
|
|
||||||
#define align_struct(type_width) ((U8)(((type_width) + 7) / 8 * 8))
|
#define align_struct(type_width) ((U8)(((type_width) + 7) / 8 * 8))
|
||||||
|
|
||||||
@@ -206,10 +218,6 @@ finline U8 align_pow2(U8 x, U8 b);
|
|||||||
assert(point <= end); \
|
assert(point <= end); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
U8 mem_copy (U8 dest, U8 src, U8 length);
|
|
||||||
U8 mem_copy_overlapping(U8 dest, U8 src, U8 length);
|
|
||||||
B4 mem_zero (U8 dest, U8 length);
|
|
||||||
|
|
||||||
#define check_nil(nil, p) ((p) == 0 || (p) == nil)
|
#define check_nil(nil, p) ((p) == 0 || (p) == nil)
|
||||||
#define set_nil(nil, p) ((p) = nil)
|
#define set_nil(nil, p) ((p) = nil)
|
||||||
|
|
||||||
@@ -243,13 +251,20 @@ typedef def_Slice(B1);
|
|||||||
#define slice_to_bytes(slice) ((Slice_B1){cast(B1*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
|
#define slice_to_bytes(slice) ((Slice_B1){cast(B1*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
|
||||||
#define slice_fmem(mem) slice_mem(u8_(mem), size_of(mem))
|
#define slice_fmem(mem) slice_mem(u8_(mem), size_of(mem))
|
||||||
|
|
||||||
finline void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth);
|
finline void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); mem_zero(u8_(mem.ptr), mem.len); }
|
||||||
finline void slice__zero(Slice_B1 mem, U8 typewidth);
|
#define slice_zero(slice) slice__zero(slice_mem_s(slice), size_of_slice_type(slice))
|
||||||
|
|
||||||
|
finline
|
||||||
|
void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth) {
|
||||||
|
assert(dest.len >= src.len);
|
||||||
|
slice_assert(dest);
|
||||||
|
slice_assert(src);
|
||||||
|
mem_copy(u8_(dest.ptr), u8_(src.ptr), src.len);
|
||||||
|
}
|
||||||
#define slice_copy(dest, src) do { \
|
#define slice_copy(dest, src) do { \
|
||||||
static_assert(typeof_same(dest, src)); \
|
static_assert(typeof_same(dest, src)); \
|
||||||
slice__copy(slice_to_bytes(dest), size_of_slice_type(dest), slice_to_bytes(src), size_of_slice_type(src)); \
|
slice__copy(slice_to_bytes(dest), size_of_slice_type(dest), slice_to_bytes(src), size_of_slice_type(src)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define slice_zero(slice) slice__zero(slice_mem_s(slice), size_of_slice_type(slice))
|
|
||||||
|
|
||||||
#define slice_iter(container, iter) (typeof((container).ptr) iter = (container).ptr; iter != slice_end(container); ++ iter)
|
#define slice_iter(container, iter) (typeof((container).ptr) iter = (container).ptr; iter != slice_end(container); ++ iter)
|
||||||
#define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) }
|
#define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { .ptr = farray_init(type, __VA_ARGS__), .len = farray_len( farray_init(type, __VA_ARGS__)) }
|
||||||
@@ -458,7 +473,7 @@ VArena* varena__make(Opts_varena_make*R_ opts);
|
|||||||
finline void varena_release(VArena_R arena);
|
finline void varena_release(VArena_R arena);
|
||||||
finline void varena_rewind (VArena_R arena, AllocatorSP save_point);
|
finline void varena_rewind (VArena_R arena, AllocatorSP save_point);
|
||||||
void varena_reset (VArena_R arena);
|
void varena_reset (VArena_R arena);
|
||||||
Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts);
|
Slice_Mem varena__shrink(VArena_R arena, Slice_Mem old_allocation, U8 requested_size);
|
||||||
finline AllocatorSP varena_save (VArena_R arena);
|
finline AllocatorSP varena_save (VArena_R arena);
|
||||||
|
|
||||||
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
|
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out_R out);
|
||||||
@@ -783,34 +798,6 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines);
|
|||||||
|
|
||||||
#pragma region Implementation
|
#pragma region Implementation
|
||||||
|
|
||||||
#pragma region Memory Operations
|
|
||||||
void* __cdecl memcpy (void*R_ _Dst, void const*R_ _Src, U8 _Size);
|
|
||||||
void* __cdecl memmove(void* _Dst, void const* _Src, U8 _Size);
|
|
||||||
void* __cdecl memset (void*R_ _Dst, int _Val, U8 _Size);
|
|
||||||
finline
|
|
||||||
U8 align_pow2(U8 x, U8 b) {
|
|
||||||
assert(b != 0);
|
|
||||||
assert((b & (b - 1)) == 0); // Check power of 2
|
|
||||||
return ((x + b - 1) & (~(b - 1)));
|
|
||||||
}
|
|
||||||
U8 memory_copy (U8 dest, U8 src, U8 len) __asm__("memcpy");
|
|
||||||
U8 memory_copy_overlapping(U8 dest, U8 src, U8 len) __asm__("memmove");
|
|
||||||
finline
|
|
||||||
B4 memory_zero(U8 dest, U8 length) {
|
|
||||||
if (dest == 0) return false;
|
|
||||||
memset((void*R_)dest, 0, length);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
finline void slice__zero(Slice_B1 mem, U8 typewidth) { slice_assert(mem); memory_zero(u8_(mem.ptr), mem.len); }
|
|
||||||
finline
|
|
||||||
void slice__copy(Slice_B1 dest, U8 dest_typewidth, Slice_B1 src, U8 src_typewidth) {
|
|
||||||
assert(dest.len >= src.len);
|
|
||||||
slice_assert(dest);
|
|
||||||
slice_assert(src);
|
|
||||||
memory_copy(u8_(dest.ptr), u8_(src.ptr), src.len);
|
|
||||||
}
|
|
||||||
#pragma endregion Memory Operations
|
|
||||||
|
|
||||||
#pragma region Allocator Interface
|
#pragma region Allocator Interface
|
||||||
finline
|
finline
|
||||||
AllocatorQueryInfo allocator_query(AllocatorInfo ainfo) {
|
AllocatorQueryInfo allocator_query(AllocatorInfo ainfo) {
|
||||||
@@ -916,24 +903,58 @@ Slice_Mem farena__push(FArena_R arena, U8 amount, U8 type_width, Opts_farena*R_
|
|||||||
if (amount == 0) { return (Slice_Mem){}; }
|
if (amount == 0) { return (Slice_Mem){}; }
|
||||||
U8 desired = type_width * amount;
|
U8 desired = type_width * amount;
|
||||||
U8 to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT);
|
U8 to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
U8 unused = arena->capacity - arena->used;
|
U8 unused = arena->capacity - arena->used; assert(to_commit <= unused);
|
||||||
assert(to_commit <= unused);
|
|
||||||
U8 ptr = arena->start + arena->used;
|
U8 ptr = arena->start + arena->used;
|
||||||
arena->used += to_commit;
|
arena->used += to_commit;
|
||||||
return (Slice_Mem){ptr, desired};
|
return (Slice_Mem){ptr, desired};
|
||||||
}
|
}
|
||||||
|
inline
|
||||||
|
Slice_Mem farena__grow(FArena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment, B4 should_zero) {
|
||||||
|
// Check if the allocation is at the end of the arena
|
||||||
|
U8 alloc_end = old_allocation.ptr + old_allocation.len;
|
||||||
|
U8 arena_end = arena->start + arena->used;
|
||||||
|
if (alloc_end != arena_end) {
|
||||||
|
// Not at the end, can't grow in place
|
||||||
|
return (Slice_Mem){0};
|
||||||
|
}
|
||||||
|
// Calculate growth
|
||||||
|
U8 grow_amount = requested_size - old_allocation.len;
|
||||||
|
U8 aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
U8 unused = arena->capacity - arena->used;
|
||||||
|
if (aligned_grow > unused) {
|
||||||
|
// Not enough space
|
||||||
|
return (Slice_Mem){0};
|
||||||
|
}
|
||||||
|
arena->used += aligned_grow;
|
||||||
|
Slice_Mem result = (Slice_Mem){ old_allocation.ptr, aligned_grow + requested_size };
|
||||||
|
mem_zero(old_allocation.ptr + old_allocation.len, grow_amount * cast(U8, should_zero));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
inline
|
||||||
|
Slice_Mem farena__shrink(FArena_R arena, Slice_Mem old_allocation, U8 requested_size, U8 alignment)
|
||||||
|
{
|
||||||
|
// Check if the allocation is at the end of the arena
|
||||||
|
U8 alloc_end = old_allocation.ptr + old_allocation.len;
|
||||||
|
U8 arena_end = arena->start + arena->used;
|
||||||
|
if (alloc_end != arena_end) {
|
||||||
|
// Not at the end, can't shrink but return adjusted size
|
||||||
|
return (Slice_Mem){old_allocation.ptr, requested_size};
|
||||||
|
}
|
||||||
|
U8 aligned_original = align_pow2(old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
U8 aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
arena->used -= (aligned_original - aligned_new);
|
||||||
|
return (Slice_Mem){old_allocation.ptr, requested_size};
|
||||||
|
}
|
||||||
finline void farena_reset(FArena_R arena) { arena->used = 0; }
|
finline void farena_reset(FArena_R arena) { arena->used = 0; }
|
||||||
finline
|
finline
|
||||||
void farena_rewind(FArena_R arena, AllocatorSP save_point) {
|
void farena_rewind(FArena_R arena, AllocatorSP save_point) {
|
||||||
assert(save_point.type_sig == & farena_allocator_proc);
|
assert(save_point.type_sig == & farena_allocator_proc);
|
||||||
U8 end = arena->start + arena->used;
|
U8 end = arena->start + arena->used; assert_bounds(save_point.slot, arena->start, end);
|
||||||
assert_bounds(save_point.slot, arena->start, end);
|
|
||||||
arena->used -= save_point.slot - arena->start;
|
arena->used -= save_point.slot - arena->start;
|
||||||
}
|
}
|
||||||
finline
|
finline
|
||||||
AllocatorSP farena_save (FArena arena) {
|
AllocatorSP farena_save (FArena arena) {
|
||||||
AllocatorSP sp = { .type_sig = & farena_allocator_proc, .slot = arena.used };
|
return (AllocatorSP){ .type_sig = & farena_allocator_proc, .slot = arena.used };
|
||||||
return sp;
|
|
||||||
}
|
}
|
||||||
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
|
void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
|
||||||
{
|
{
|
||||||
@@ -945,64 +966,22 @@ void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
|
|||||||
case AllocatorOp_Alloc:
|
case AllocatorOp_Alloc:
|
||||||
case AllocatorOp_Alloc_NoZero:
|
case AllocatorOp_Alloc_NoZero:
|
||||||
out->allocation = farena_push_mem(arena, in.requested_size, .alignment = in.alignment);
|
out->allocation = farena_push_mem(arena, in.requested_size, .alignment = in.alignment);
|
||||||
memory_zero(out->allocation.ptr, out->allocation.len * in.op);
|
mem_zero(out->allocation.ptr, out->allocation.len * in.op);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Free:
|
case AllocatorOp_Free: break;
|
||||||
break;
|
case AllocatorOp_Reset: farena_reset(arena); break;
|
||||||
case AllocatorOp_Reset:
|
|
||||||
farena_reset(arena);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Grow:
|
case AllocatorOp_Grow:
|
||||||
case AllocatorOp_Grow_NoZero: {
|
case AllocatorOp_Grow_NoZero:
|
||||||
// Check if the allocation is at the end of the arena
|
out->allocation = farena__grow(arena, in.old_allocation, in.requested_size, in.alignment, in.op - AllocatorOp_Grow_NoZero);
|
||||||
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
break;
|
||||||
U8 arena_end = arena->start + arena->used;
|
case AllocatorOp_Shrink:
|
||||||
if (alloc_end != arena_end) {
|
out->allocation = farena__shrink(arena, in.old_allocation, in.requested_size, in.alignment);
|
||||||
// Not at the end, can't grow in place
|
|
||||||
out->allocation = (Slice_Mem){0};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Calculate growth
|
|
||||||
U8 grow_amount = in.requested_size - in.old_allocation.len;
|
|
||||||
U8 aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
U8 unused = arena->capacity - arena->used;
|
|
||||||
if (aligned_grow > unused) {
|
|
||||||
// Not enough space
|
|
||||||
out->allocation = (Slice_Mem){0};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
arena->used += aligned_grow;
|
|
||||||
out->allocation = (Slice_Mem){ in.old_allocation.ptr, aligned_grow + in.requested_size };
|
|
||||||
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * in.op - AllocatorOp_Grow_NoZero);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Shrink: {
|
case AllocatorOp_Rewind: farena_rewind(arena, in.save_point); break;
|
||||||
// Check if the allocation is at the end of the arena
|
case AllocatorOp_SavePoint: out->save_point = farena_save(arena[0]); break;
|
||||||
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
|
||||||
U8 arena_end = arena->start + arena->used;
|
|
||||||
if (alloc_end != arena_end) {
|
|
||||||
// Not at the end, can't shrink but return adjusted size
|
|
||||||
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Calculate shrinkage
|
|
||||||
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
|
|
||||||
U8 aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
U8 aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
arena->used -= (aligned_original - aligned_new);
|
|
||||||
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Rewind:
|
|
||||||
farena_rewind(arena, in.save_point);
|
|
||||||
break;
|
|
||||||
case AllocatorOp_SavePoint:
|
|
||||||
out->save_point = farena_save(arena[0]);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Query:
|
case AllocatorOp_Query:
|
||||||
out->features =
|
out->features =
|
||||||
@@ -1073,15 +1052,10 @@ W_ MS_LPVOID VirtualAlloc(MS_LPVOID lpAddress, U8 dwSize, MS_DWORD flAllocationT
|
|||||||
W_ MS_BOOL VirtualFree (MS_LPVOID lpAddress, U8 dwSize, MS_DWORD dwFreeType);
|
W_ MS_BOOL VirtualFree (MS_LPVOID lpAddress, U8 dwSize, MS_DWORD dwFreeType);
|
||||||
#pragma warning(pop)
|
#pragma warning(pop)
|
||||||
|
|
||||||
typedef def_struct(OS_Windows_State) {
|
typedef def_struct(OS_Windows_State) { OS_SystemInfo system_info; };
|
||||||
OS_SystemInfo system_info;
|
|
||||||
};
|
|
||||||
global OS_Windows_State os__windows_info;
|
global OS_Windows_State os__windows_info;
|
||||||
|
|
||||||
finline
|
finline OS_SystemInfo* os_system_info(void) { return & os__windows_info.system_info; }
|
||||||
OS_SystemInfo* os_system_info(void) {
|
|
||||||
return & os__windows_info.system_info;
|
|
||||||
}
|
|
||||||
inline
|
inline
|
||||||
void os__enable_large_pages(void) {
|
void os__enable_large_pages(void) {
|
||||||
MS_HANDLE token;
|
MS_HANDLE token;
|
||||||
@@ -1125,6 +1099,8 @@ inline void os_vmem_release(U8 vm, U8 size) { VirtualFree(cast(MS_LPVOID, vm),
|
|||||||
#pragma endregion OS
|
#pragma endregion OS
|
||||||
|
|
||||||
#pragma region VArena (Virutal Address Space Arena)
|
#pragma region VArena (Virutal Address Space Arena)
|
||||||
|
finline U8 varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
|
||||||
|
|
||||||
inline
|
inline
|
||||||
VArena* varena__make(Opts_varena_make*R_ opts) {
|
VArena* varena__make(Opts_varena_make*R_ opts) {
|
||||||
assert(opts != nullptr);
|
assert(opts != nullptr);
|
||||||
@@ -1136,9 +1112,8 @@ VArena* varena__make(Opts_varena_make*R_ opts) {
|
|||||||
U8 base = os_vmem_reserve(reserve_size, .base_addr = opts->base_addr, .no_large_pages = no_large_pages);
|
U8 base = os_vmem_reserve(reserve_size, .base_addr = opts->base_addr, .no_large_pages = no_large_pages);
|
||||||
assert(base != 0);
|
assert(base != 0);
|
||||||
os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages);
|
os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages);
|
||||||
U8 header_size = align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT);
|
U8 header_size = varena_header_size();
|
||||||
VArena* vm = cast(VArena*, base);
|
VArena* vm = cast(VArena*, base); r_(vm)[0] = (VArena){
|
||||||
r_(vm)[0] = (VArena){
|
|
||||||
.reserve_start = base + header_size,
|
.reserve_start = base + header_size,
|
||||||
.reserve = reserve_size,
|
.reserve = reserve_size,
|
||||||
.commit_size = commit_size,
|
.commit_size = commit_size,
|
||||||
@@ -1150,6 +1125,7 @@ VArena* varena__make(Opts_varena_make*R_ opts) {
|
|||||||
}
|
}
|
||||||
inline
|
inline
|
||||||
Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opts) {
|
Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opts) {
|
||||||
|
assert(vm != nullptr);
|
||||||
assert(amount != 0);
|
assert(amount != 0);
|
||||||
U8 alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
|
U8 alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
|
||||||
U8 requested_size = amount * type_width;
|
U8 requested_size = amount * type_width;
|
||||||
@@ -1158,8 +1134,7 @@ Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opt
|
|||||||
U8 to_be_used = vm->commit_used + aligned_size;
|
U8 to_be_used = vm->commit_used + aligned_size;
|
||||||
U8 reserve_left = vm->reserve - vm->commit_used;
|
U8 reserve_left = vm->reserve - vm->commit_used;
|
||||||
U8 commit_left = vm->committed - vm->commit_used;
|
U8 commit_left = vm->committed - vm->commit_used;
|
||||||
B4 exhausted = commit_left < to_be_used;
|
B4 exhausted = commit_left < to_be_used; assert(to_be_used < reserve_left);
|
||||||
assert(to_be_used < reserve_left);
|
|
||||||
if (exhausted)
|
if (exhausted)
|
||||||
{
|
{
|
||||||
U8 next_commit_size = reserve_left > 0 ?
|
U8 next_commit_size = reserve_left > 0 ?
|
||||||
@@ -1169,30 +1144,31 @@ Slice_Mem varena__push(VArena_R vm, U8 amount, U8 type_width, Opts_varena*R_ opt
|
|||||||
U8 next_commit_start = u8_(vm) + vm->committed;
|
U8 next_commit_start = u8_(vm) + vm->committed;
|
||||||
B4 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0;
|
B4 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0;
|
||||||
B4 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages);
|
B4 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages);
|
||||||
if (commit_result == false) {
|
if (commit_result == false) { return (Slice_Mem){0}; }
|
||||||
return (Slice_Mem){0};
|
|
||||||
}
|
|
||||||
vm->committed += next_commit_size;
|
vm->committed += next_commit_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vm->commit_used = to_be_used;
|
vm->commit_used = to_be_used;
|
||||||
return (Slice_Mem){.ptr = current_offset, .len = requested_size};
|
return (Slice_Mem){.ptr = current_offset, .len = requested_size};
|
||||||
}
|
}
|
||||||
|
inline
|
||||||
|
Slice_Mem varena__grow(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, U8 alignment, B4 no_zero) {
|
||||||
|
U8 grow_amount = requested_size - old_allocation.len;
|
||||||
|
if (grow_amount == 0) { return old_allocation; } // Growing when not the last allocation not allowed
|
||||||
|
U8 current_offset = vm->reserve_start + vm->commit_used; assert(old_allocation.ptr == current_offset);
|
||||||
|
Slice_Mem allocation = varena_push_mem(vm, grow_amount, alignment); assert(allocation.ptr != 0);
|
||||||
|
Slice_Mem result = (Slice_Mem){ old_allocation.ptr, requested_size + allocation.len };
|
||||||
|
mem_zero(result.ptr, result.len * no_zero);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
finline void varena_release(VArena_R arena) { os_vmem_release(u8_(arena), arena->reserve); }
|
finline void varena_release(VArena_R arena) { os_vmem_release(u8_(arena), arena->reserve); }
|
||||||
inline
|
inline
|
||||||
Slice_Mem varena__shrink(VArena_R vm, Slice_Mem old_allocation, U8 requested_size, Opts_varena*R_ opts) {
|
Slice_Mem varena__shrink(VArena_R vm, Slice_Mem old_allocation, U8 requested_size) {
|
||||||
assert(opts != nullptr);
|
U8 shrink_amount = old_allocation.len - requested_size;
|
||||||
Slice_Mem result = {0};
|
if (lt_s(shrink_amount, 0)) { return old_allocation; }
|
||||||
U8 current_offset = vm->reserve_start + vm->commit_used;
|
U8 current_offset = vm->reserve_start + vm->commit_used; assert(old_allocation.ptr == current_offset);
|
||||||
U8 shrink_amount = old_allocation.len - requested_size;
|
vm->commit_used -= shrink_amount;
|
||||||
if (lt_s(shrink_amount, 0)) {
|
return (Slice_Mem){ old_allocation.ptr, requested_size };
|
||||||
result = old_allocation;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
assert(old_allocation.ptr == current_offset);
|
|
||||||
vm->commit_used -= shrink_amount;
|
|
||||||
result = (Slice_Mem){ old_allocation.ptr, requested_size };
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
finline
|
finline
|
||||||
void varena_rewind(VArena_R vm, AllocatorSP sp) {
|
void varena_rewind(VArena_R vm, AllocatorSP sp) {
|
||||||
@@ -1209,50 +1185,22 @@ void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
|||||||
case AllocatorOp_Alloc:
|
case AllocatorOp_Alloc:
|
||||||
case AllocatorOp_Alloc_NoZero:
|
case AllocatorOp_Alloc_NoZero:
|
||||||
out->allocation = varena_push_mem(vm, in.requested_size, .alignment = in.alignment);
|
out->allocation = varena_push_mem(vm, in.requested_size, .alignment = in.alignment);
|
||||||
memory_zero(out->allocation.ptr, out->allocation.len * in.op);
|
mem_zero(out->allocation.ptr, out->allocation.len * in.op);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Free:
|
case AllocatorOp_Free: break;
|
||||||
break;
|
case AllocatorOp_Reset: vm->commit_used = 0; break;
|
||||||
case AllocatorOp_Reset:
|
|
||||||
vm->commit_used = 0;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Grow_NoZero:
|
case AllocatorOp_Grow_NoZero:
|
||||||
case AllocatorOp_Grow: {
|
case AllocatorOp_Grow:
|
||||||
U8 grow_amount = in.requested_size - in.old_allocation.len;
|
out->allocation = varena__grow(vm, in.old_allocation, in.requested_size, in.alignment, in.op - AllocatorOp_Grow_NoZero);
|
||||||
if (grow_amount == 0) {
|
|
||||||
out->allocation = in.old_allocation;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
U8 current_offset = vm->reserve_start + vm->commit_used;
|
|
||||||
// Growing when not the last allocation not allowed
|
|
||||||
assert(in.old_allocation.ptr == current_offset);
|
|
||||||
Slice_Mem allocation = varena_push_mem(vm, grow_amount, .alignment = in.alignment);
|
|
||||||
assert(allocation.ptr != 0);
|
|
||||||
out->allocation = (Slice_Mem){ in.old_allocation.ptr, in.requested_size + allocation.len };
|
|
||||||
memory_zero(out->allocation.ptr, out->allocation.len * (in.op - AllocatorOp_Grow_NoZero));
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case AllocatorOp_Shrink: {
|
case AllocatorOp_Shrink:
|
||||||
U8 current_offset = vm->reserve_start + vm->commit_used;
|
out->allocation = varena__shrink(vm, in.old_allocation, in.requested_size);
|
||||||
U8 shrink_amount = in.old_allocation.len - in.requested_size;
|
|
||||||
if (lt_s(shrink_amount, 0)) {
|
|
||||||
out->allocation = in.old_allocation;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(in.old_allocation.ptr == current_offset);
|
|
||||||
vm->commit_used -= shrink_amount;
|
|
||||||
out->allocation = (Slice_Mem){ in.old_allocation.ptr, in.requested_size };
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Rewind:
|
case AllocatorOp_Rewind: vm->commit_used = in.save_point.slot; break;
|
||||||
vm->commit_used = in.save_point.slot;
|
case AllocatorOp_SavePoint: out->save_point = varena_save(vm); break;
|
||||||
break;
|
|
||||||
case AllocatorOp_SavePoint:
|
|
||||||
out->save_point = varena_save(vm);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Query:
|
case AllocatorOp_Query:
|
||||||
out->features =
|
out->features =
|
||||||
@@ -1277,8 +1225,7 @@ Arena* arena__make(Opts_arena_make*R_ opts) {
|
|||||||
U8 header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
|
U8 header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
|
||||||
VArena_R current = varena__make(opts);
|
VArena_R current = varena__make(opts);
|
||||||
assert(current != nullptr);
|
assert(current != nullptr);
|
||||||
Arena* arena = varena_push(current, Arena);
|
Arena* arena = varena_push(current, Arena); r_(arena)[0] = (Arena){
|
||||||
r_(arena)[0] = (Arena){
|
|
||||||
.backing = current,
|
.backing = current,
|
||||||
.prev = nullptr,
|
.prev = nullptr,
|
||||||
.current = arena,
|
.current = arena,
|
||||||
@@ -1354,86 +1301,80 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out*R_ out)
|
|||||||
assert(arena != nullptr);
|
assert(arena != nullptr);
|
||||||
switch (in.op)
|
switch (in.op)
|
||||||
{
|
{
|
||||||
case AllocatorOp_Alloc:
|
case AllocatorOp_Alloc:
|
||||||
case AllocatorOp_Alloc_NoZero:
|
case AllocatorOp_Alloc_NoZero:
|
||||||
out->allocation = arena_push_mem(arena, in.requested_size, .alignment = in.alignment);
|
out->allocation = arena_push_mem(arena, in.requested_size, .alignment = in.alignment);
|
||||||
memory_zero(out->allocation.ptr, out->allocation.len * in.op);
|
mem_zero(out->allocation.ptr, out->allocation.len * in.op);
|
||||||
break;
|
break;
|
||||||
case AllocatorOp_Free:
|
|
||||||
break;
|
|
||||||
case AllocatorOp_Reset:
|
|
||||||
arena_reset(arena);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Grow:
|
case AllocatorOp_Free: break;
|
||||||
case AllocatorOp_Grow_NoZero: {
|
case AllocatorOp_Reset: arena_reset(arena); break;
|
||||||
Arena_R active = arena->current;
|
|
||||||
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
case AllocatorOp_Grow:
|
||||||
U8 arena_end = u8_(active) + active->pos;
|
case AllocatorOp_Grow_NoZero: {
|
||||||
if (alloc_end == arena_end)
|
Arena_R active = arena->current;
|
||||||
|
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
||||||
|
U8 arena_end = u8_(active) + active->pos;
|
||||||
|
if (alloc_end == arena_end)
|
||||||
|
{
|
||||||
|
U8 grow_amount = in.requested_size - in.old_allocation.len;
|
||||||
|
U8 aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
if (active->pos + aligned_grow <= active->backing->reserve)
|
||||||
{
|
{
|
||||||
U8 grow_amount = in.requested_size - in.old_allocation.len;
|
Slice_Mem vresult = varena_push_mem(active->backing, aligned_grow, .alignment = in.alignment);
|
||||||
U8 aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
if (vresult.ptr != null)
|
||||||
if (active->pos + aligned_grow <= active->backing->reserve)
|
|
||||||
{
|
{
|
||||||
Slice_Mem vresult = varena_push_mem(active->backing, aligned_grow, .alignment = in.alignment);
|
active->pos += aligned_grow;
|
||||||
if (vresult.ptr != null)
|
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
||||||
{
|
mem_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * in.op - AllocatorOp_Grow_NoZero);
|
||||||
active->pos += aligned_grow;
|
break;
|
||||||
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
|
||||||
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * in.op - AllocatorOp_Grow_NoZero);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Slice_Mem new_alloc = arena__push(arena, in.requested_size, 1, &(Opts_arena){.alignment = in.alignment});
|
|
||||||
if (new_alloc.ptr == null) {
|
|
||||||
out->allocation = (Slice_Mem){0};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
memory_copy(new_alloc.ptr, in.old_allocation.ptr, in.old_allocation.len);
|
|
||||||
memory_zero(new_alloc.ptr + in.old_allocation.len, (in.requested_size - in.old_allocation.len) * in.op - AllocatorOp_Grow_NoZero);
|
|
||||||
out->allocation = new_alloc;
|
|
||||||
}
|
}
|
||||||
break;
|
Slice_Mem new_alloc = arena__push(arena, in.requested_size, 1, &(Opts_arena){.alignment = in.alignment});
|
||||||
|
if (new_alloc.ptr == null) {
|
||||||
|
out->allocation = (Slice_Mem){0};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mem_copy(new_alloc.ptr, in.old_allocation.ptr, in.old_allocation.len);
|
||||||
|
mem_zero(new_alloc.ptr + in.old_allocation.len, (in.requested_size - in.old_allocation.len) * in.op - AllocatorOp_Grow_NoZero);
|
||||||
|
out->allocation = new_alloc;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Shrink: {
|
case AllocatorOp_Shrink: {
|
||||||
Arena_R active = arena->current;
|
Arena_R active = arena->current;
|
||||||
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
U8 alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
||||||
U8 arena_end = u8_(active) + active->pos;
|
U8 arena_end = u8_(active) + active->pos;
|
||||||
if (alloc_end != arena_end) {
|
if (alloc_end != arena_end) {
|
||||||
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
|
|
||||||
U8 aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
U8 aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
U8 pos_reduction = aligned_original - aligned_new;
|
|
||||||
active->pos -= pos_reduction;
|
|
||||||
varena__shrink(active->backing, in.old_allocation, in.requested_size, &(Opts_varena){.alignment = in.alignment});
|
|
||||||
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
break;
|
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
|
||||||
|
U8 aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
U8 aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
U8 pos_reduction = aligned_original - aligned_new;
|
||||||
|
active->pos -= pos_reduction;
|
||||||
|
varena__shrink(active->backing, in.old_allocation, in.requested_size);
|
||||||
|
out->allocation = (Slice_Mem){in.old_allocation.ptr, in.requested_size};
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Rewind:
|
case AllocatorOp_Rewind: arena_rewind(arena, in.save_point); break;
|
||||||
arena_rewind(arena, in.save_point);
|
case AllocatorOp_SavePoint: out->save_point = arena_save(arena); break;
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_SavePoint:
|
case AllocatorOp_Query:
|
||||||
out->save_point = arena_save(arena);
|
out->features =
|
||||||
break;
|
AllocatorQuery_Alloc
|
||||||
case AllocatorOp_Query:
|
| AllocatorQuery_Resize
|
||||||
out->features =
|
| AllocatorQuery_Reset
|
||||||
AllocatorQuery_Alloc
|
| AllocatorQuery_Rewind
|
||||||
| AllocatorQuery_Resize
|
;
|
||||||
| AllocatorQuery_Reset
|
out->max_alloc = arena->backing->reserve;
|
||||||
| AllocatorQuery_Rewind
|
out->min_alloc = kilo(4);
|
||||||
;
|
out->left = out->max_alloc - arena->backing->commit_used;
|
||||||
out->max_alloc = arena->backing->reserve;
|
out->save_point = arena_save(arena);
|
||||||
out->min_alloc = kilo(4);
|
break;
|
||||||
out->left = out->max_alloc - arena->backing->commit_used;
|
|
||||||
out->save_point = arena_save(arena);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#pragma endregion Arena
|
#pragma endregion Arena
|
||||||
@@ -1445,7 +1386,7 @@ void ktl_populate_slice_a2_str8(KTL_Str8*R_ kt, AllocatorInfo backing, Slice_A2_
|
|||||||
if (values.len == 0) return;
|
if (values.len == 0) return;
|
||||||
* kt = alloc_slice(backing, KTL_Slot_Str8, values.len);
|
* kt = alloc_slice(backing, KTL_Slot_Str8, values.len);
|
||||||
for span_iter(U8, id, 0, <, values.len) {
|
for span_iter(U8, id, 0, <, values.len) {
|
||||||
memory_copy(u8_(& kt->ptr[id.cursor].value), u8_(& values.ptr[id.cursor][1]), size_of(Str8));
|
mem_copy(u8_(& kt->ptr[id.cursor].value), u8_(& values.ptr[id.cursor][1]), size_of(Str8));
|
||||||
hash64_fnv1a(& kt->ptr[id.cursor].key, slice_mem_s(values.ptr[id.cursor][0]));
|
hash64_fnv1a(& kt->ptr[id.cursor].key, slice_mem_s(values.ptr[id.cursor][0]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1475,7 +1416,7 @@ void kt1cx_clear(KT1CX_Byte kt, KT1CX_ByteMeta m) {
|
|||||||
for (; slot_cursor < slice_end(slots); slot_cursor += m.slot_size) {
|
for (; slot_cursor < slice_end(slots); slot_cursor += m.slot_size) {
|
||||||
process_slots:
|
process_slots:
|
||||||
Slice_Mem slot = {slot_cursor, m.slot_size}; // slot = slots[id]
|
Slice_Mem slot = {slot_cursor, m.slot_size}; // slot = slots[id]
|
||||||
memory_zero(slot.ptr, slot.len); // clear(slot)
|
mem_zero(slot.ptr, slot.len); // clear(slot)
|
||||||
}
|
}
|
||||||
U8 next = slot_cursor + m.cell_next_offset; // next = slots + next_cell_offset
|
U8 next = slot_cursor + m.cell_next_offset; // next = slots + next_cell_offset
|
||||||
if (next != null) {
|
if (next != null) {
|
||||||
@@ -1562,7 +1503,7 @@ U8 kt1cx_set(KT1CX_Byte kt, U8 key, Slice_Mem value, AllocatorInfo backing_cells
|
|||||||
finline
|
finline
|
||||||
char* str8_to_cstr_capped(Str8 content, Slice_Mem mem) {
|
char* str8_to_cstr_capped(Str8 content, Slice_Mem mem) {
|
||||||
U8 copy_len = min(content.len, mem.len - 1);
|
U8 copy_len = min(content.len, mem.len - 1);
|
||||||
memory_copy(mem.ptr, u8_(content.ptr), copy_len);
|
mem_copy(mem.ptr, u8_(content.ptr), copy_len);
|
||||||
u1_r(mem.ptr)[copy_len] = '\0';
|
u1_r(mem.ptr)[copy_len] = '\0';
|
||||||
return cast(char*, mem.ptr);
|
return cast(char*, mem.ptr);
|
||||||
}
|
}
|
||||||
@@ -1662,7 +1603,7 @@ Str8 str8__fmt_ktl(AllocatorInfo ainfo, Slice_Mem*R_ _buffer, KTL_Str8 table, St
|
|||||||
while (cursor_fmt[copy_offset] != '<' && (cursor_fmt + copy_offset) < slice_end(fmt_template)) {
|
while (cursor_fmt[copy_offset] != '<' && (cursor_fmt + copy_offset) < slice_end(fmt_template)) {
|
||||||
++ copy_offset;
|
++ copy_offset;
|
||||||
}
|
}
|
||||||
memory_copy(u8_(cursor_buffer), u8_(cursor_fmt), copy_offset);
|
mem_copy(u8_(cursor_buffer), u8_(cursor_fmt), copy_offset);
|
||||||
buffer_remaining -= copy_offset;
|
buffer_remaining -= copy_offset;
|
||||||
left_fmt -= copy_offset;
|
left_fmt -= copy_offset;
|
||||||
cursor_buffer += copy_offset;
|
cursor_buffer += copy_offset;
|
||||||
@@ -1700,7 +1641,7 @@ Str8 str8__fmt_ktl(AllocatorInfo ainfo, Slice_Mem*R_ _buffer, KTL_Str8 table, St
|
|||||||
buffer_remaining += potential_token_len;
|
buffer_remaining += potential_token_len;
|
||||||
}
|
}
|
||||||
assert((buffer_remaining - potential_token_len) > 0);
|
assert((buffer_remaining - potential_token_len) > 0);
|
||||||
memory_copy(u8_(cursor_buffer), u8_(value->ptr), value->len);
|
mem_copy(u8_(cursor_buffer), u8_(value->ptr), value->len);
|
||||||
// Sync cursor format to after the processed token
|
// Sync cursor format to after the processed token
|
||||||
cursor_buffer += value->len;
|
cursor_buffer += value->len;
|
||||||
buffer_remaining -= value->len;
|
buffer_remaining -= value->len;
|
||||||
@@ -1822,7 +1763,7 @@ Str8* str8cache_set(KT1CX_Str8 kt, U8 key, Str8 value, AllocatorInfo str_reserve
|
|||||||
finline
|
finline
|
||||||
Str8 cache_str8(Str8Cache_R cache, Str8 str) {
|
Str8 cache_str8(Str8Cache_R cache, Str8 str) {
|
||||||
assert(cache != nullptr);
|
assert(cache != nullptr);
|
||||||
U8 key = 0; hash64_fnv1a(& key, slice_mem_s(str));
|
U8 key = 0; hash64_fnv1a(& key, slice_mem_s(str));
|
||||||
Str8_R result = str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve);
|
Str8_R result = str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve);
|
||||||
return result[0];
|
return result[0];
|
||||||
}
|
}
|
||||||
@@ -1939,7 +1880,7 @@ void api_file_read_contents(FileOpInfo_R result, Str8 path, Opts_read_file_conte
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (opts.zero_backing) {
|
if (opts.zero_backing) {
|
||||||
memory_zero(buffer.ptr, buffer.len);
|
mem_zero(buffer.ptr, buffer.len);
|
||||||
}
|
}
|
||||||
MS_DWORD amount_read = 0;
|
MS_DWORD amount_read = 0;
|
||||||
MS_BOOL read_result = ReadFile(
|
MS_BOOL read_result = ReadFile(
|
||||||
|
|||||||
244
C/watl.v0.msvc.c
244
C/watl.v0.msvc.c
@@ -25,6 +25,7 @@ Toolchain: MSVC 19.43, C-Stanard: 11
|
|||||||
#define local_persist static
|
#define local_persist static
|
||||||
#define global static
|
#define global static
|
||||||
#define internal static
|
#define internal static
|
||||||
|
#define finline __forceinline
|
||||||
|
|
||||||
#define static_assert _Static_assert
|
#define static_assert _Static_assert
|
||||||
#define typeof __typeof__
|
#define typeof __typeof__
|
||||||
@@ -57,6 +58,13 @@ enum { false = 0, true = 1, true_overflow, };
|
|||||||
#define offset_of(type, member) cast(SSIZE, & (((type*) 0)->member))
|
#define offset_of(type, member) cast(SSIZE, & (((type*) 0)->member))
|
||||||
#define size_of(data) cast(SSIZE, sizeof(data))
|
#define size_of(data) cast(SSIZE, sizeof(data))
|
||||||
|
|
||||||
|
// Not using this since its lottes related.
|
||||||
|
// #define R_ __restrict
|
||||||
|
// #define V_ volatile
|
||||||
|
// #define r_(ptr) cast(typeof_ptr(ptr)*R_, ptr)
|
||||||
|
// #define v_(ptr) cast(typeof_ptr(ptr)*V_, ptr)
|
||||||
|
#define ssize(value) cast(SSIZE, value)
|
||||||
|
|
||||||
#define kilo(n) (cast(SSIZE, n) << 10)
|
#define kilo(n) (cast(SSIZE, n) << 10)
|
||||||
#define mega(n) (cast(SSIZE, n) << 20)
|
#define mega(n) (cast(SSIZE, n) << 20)
|
||||||
#define giga(n) (cast(SSIZE, n) << 30)
|
#define giga(n) (cast(SSIZE, n) << 30)
|
||||||
@@ -106,11 +114,8 @@ inline SSIZE align_pow2(SSIZE x, SSIZE b);
|
|||||||
#define align_struct(type_width) ((SSIZE)(((type_width) + 7) / 8 * 8))
|
#define align_struct(type_width) ((SSIZE)(((type_width) + 7) / 8 * 8))
|
||||||
|
|
||||||
#define assert_bounds(point, start, end) do { \
|
#define assert_bounds(point, start, end) do { \
|
||||||
SSIZE pos_point = cast(SSIZE, point); \
|
assert(ssize(start) <= ssize(point)); \
|
||||||
SSIZE pos_start = cast(SSIZE, start); \
|
assert(ssize(point) <= ssize(end)); \
|
||||||
SSIZE pos_end = cast(SSIZE, end); \
|
|
||||||
assert(pos_start <= pos_point); \
|
|
||||||
assert(pos_point <= pos_end); \
|
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
void* memory_copy (void* restrict dest, void const* restrict src, USIZE length);
|
void* memory_copy (void* restrict dest, void const* restrict src, USIZE length);
|
||||||
@@ -365,7 +370,7 @@ Slice_Byte varena__push (VArena* arena, SSIZE amount, SSIZE type_width, Opts_v
|
|||||||
void varena_release(VArena* arena);
|
void varena_release(VArena* arena);
|
||||||
void varena_rewind (VArena* arena, AllocatorSP save_point);
|
void varena_rewind (VArena* arena, AllocatorSP save_point);
|
||||||
void varena_reset (VArena* arena);
|
void varena_reset (VArena* arena);
|
||||||
Slice_Byte varena__shrink(VArena* arena, Slice_Byte old_allocation, SSIZE requested_size, Opts_varena* opts);
|
Slice_Byte varena__shrink(VArena* arena, Slice_Byte old_allocation, SSIZE requested_size);
|
||||||
AllocatorSP varena_save (VArena* arena);
|
AllocatorSP varena_save (VArena* arena);
|
||||||
|
|
||||||
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out);
|
void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out);
|
||||||
@@ -833,18 +838,54 @@ Slice_Byte farena__push(FArena* arena, SSIZE amount, SSIZE type_width, Opts_fare
|
|||||||
}
|
}
|
||||||
SSIZE desired = type_width * amount;
|
SSIZE desired = type_width * amount;
|
||||||
SSIZE to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT);
|
SSIZE to_commit = align_pow2(desired, opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
SSIZE unused = arena->capacity - arena->used;
|
SSIZE unused = arena->capacity - arena->used; assert(to_commit <= unused);
|
||||||
assert(to_commit <= unused);
|
Byte* ptr = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
|
||||||
Byte* ptr = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
|
arena->used += to_commit;
|
||||||
arena->used += to_commit;
|
|
||||||
return (Slice_Byte){ptr, desired};
|
return (Slice_Byte){ptr, desired};
|
||||||
}
|
}
|
||||||
|
inline
|
||||||
|
Slice_Byte farena__grow(FArena* arena, SSIZE requested_size, Slice_Byte old_allocation, SSIZE alignment, B32 no_zero) {
|
||||||
|
// Check if the allocation is at the end of the arena
|
||||||
|
Byte* alloc_end = old_allocation.ptr + old_allocation.len;
|
||||||
|
Byte* arena_end = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
|
||||||
|
if (alloc_end != arena_end) {
|
||||||
|
// Not at the end, can't grow in place
|
||||||
|
return (Slice_Byte){0};
|
||||||
|
}
|
||||||
|
// Calculate growth
|
||||||
|
SSIZE grow_amount = requested_size - old_allocation.len;
|
||||||
|
SSIZE aligned_grow = align_pow2(grow_amount, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
SSIZE unused = arena->capacity - arena->used;
|
||||||
|
if (aligned_grow > unused) {
|
||||||
|
// Not enough space
|
||||||
|
return (Slice_Byte){0};
|
||||||
|
}
|
||||||
|
arena->used += aligned_grow;
|
||||||
|
Slice_Byte result = (Slice_Byte){old_allocation.ptr, requested_size};
|
||||||
|
memory_zero(old_allocation.ptr + old_allocation.len, grow_amount * cast(SSIZE, no_zero));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
inline
|
||||||
|
Slice_Byte farena__shrink(FArena* arena, Slice_Byte old_allocation, SSIZE requested_size, SSIZE alignment) {
|
||||||
|
// Check if the allocation is at the end of the arena
|
||||||
|
Byte* alloc_end = old_allocation.ptr + old_allocation.len;
|
||||||
|
Byte* arena_end = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
|
||||||
|
if (alloc_end != arena_end) {
|
||||||
|
// Not at the end, can't shrink but return adjusted size
|
||||||
|
return (Slice_Byte){old_allocation.ptr, requested_size};
|
||||||
|
}
|
||||||
|
// Calculate shrinkage
|
||||||
|
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
|
||||||
|
SSIZE aligned_original = align_pow2(old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
SSIZE aligned_new = align_pow2(requested_size, alignment ? alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
|
arena->used -= (aligned_original - aligned_new);
|
||||||
|
return (Slice_Byte){old_allocation.ptr, requested_size};
|
||||||
|
}
|
||||||
inline void farena_reset(FArena* arena) { arena->used = 0; }
|
inline void farena_reset(FArena* arena) { arena->used = 0; }
|
||||||
inline
|
inline
|
||||||
void farena_rewind(FArena* arena, AllocatorSP save_point) {
|
void farena_rewind(FArena* arena, AllocatorSP save_point) {
|
||||||
assert(save_point.type_sig == & farena_allocator_proc);
|
assert(save_point.type_sig == & farena_allocator_proc);
|
||||||
Byte* end = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
|
Byte* end = cast(Byte*, cast(SSIZE, arena->start) + arena->used); assert_bounds(save_point.slot, arena->start, end);
|
||||||
assert_bounds(save_point.slot, arena->start, end);
|
|
||||||
arena->used -= save_point.slot - cast(SSIZE, arena->start);
|
arena->used -= save_point.slot - cast(SSIZE, arena->start);
|
||||||
}
|
}
|
||||||
inline
|
inline
|
||||||
@@ -865,61 +906,19 @@ void farena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
|||||||
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
|
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Free:
|
case AllocatorOp_Free: break;
|
||||||
break;
|
case AllocatorOp_Reset: farena_reset(arena); break;
|
||||||
case AllocatorOp_Reset:
|
|
||||||
farena_reset(arena);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Grow:
|
case AllocatorOp_Grow:
|
||||||
case AllocatorOp_Grow_NoZero: {
|
case AllocatorOp_Grow_NoZero:
|
||||||
// Check if the allocation is at the end of the arena
|
out->allocation = farena__grow(arena, in.requested_size, in.old_allocation, in.alignment, in.op - AllocatorOp_Grow_NoZero);
|
||||||
Byte* alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
break;
|
||||||
Byte* arena_end = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
|
case AllocatorOp_Shrink:
|
||||||
if (alloc_end != arena_end) {
|
out->allocation = farena__shrink(arena, in.old_allocation, in.requested_size, in.alignment);
|
||||||
// Not at the end, can't grow in place
|
|
||||||
out->allocation = (Slice_Byte){0};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Calculate growth
|
|
||||||
SSIZE grow_amount = in.requested_size - in.old_allocation.len;
|
|
||||||
SSIZE aligned_grow = align_pow2(grow_amount, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
SSIZE unused = arena->capacity - arena->used;
|
|
||||||
if (aligned_grow > unused) {
|
|
||||||
// Not enough space
|
|
||||||
out->allocation = (Slice_Byte){0};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
arena->used += aligned_grow;
|
|
||||||
out->allocation = (Slice_Byte){in.old_allocation.ptr, in.requested_size};
|
|
||||||
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * cast(SSIZE, in.op - AllocatorOp_Grow_NoZero));
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Shrink: {
|
case AllocatorOp_Rewind: farena_rewind(arena, in.save_point); break;
|
||||||
// Check if the allocation is at the end of the arena
|
case AllocatorOp_SavePoint: out->save_point = farena_save(* arena); break;
|
||||||
Byte* alloc_end = in.old_allocation.ptr + in.old_allocation.len;
|
|
||||||
Byte* arena_end = cast(Byte*, cast(SSIZE, arena->start) + arena->used);
|
|
||||||
if (alloc_end != arena_end) {
|
|
||||||
// Not at the end, can't shrink but return adjusted size
|
|
||||||
out->allocation = (Slice_Byte){in.old_allocation.ptr, in.requested_size};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Calculate shrinkage
|
|
||||||
//SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
|
|
||||||
SSIZE aligned_original = align_pow2(in.old_allocation.len, MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
SSIZE aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
|
||||||
arena->used -= (aligned_original - aligned_new);
|
|
||||||
out->allocation = (Slice_Byte){in.old_allocation.ptr, in.requested_size};
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Rewind:
|
|
||||||
farena_rewind(arena, in.save_point);
|
|
||||||
break;
|
|
||||||
case AllocatorOp_SavePoint:
|
|
||||||
out->save_point = farena_save(* arena);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Query:
|
case AllocatorOp_Query:
|
||||||
out->features =
|
out->features =
|
||||||
@@ -1042,20 +1041,20 @@ inline void os_vmem_release(void* vm, SSIZE size) { VirtualFree(vm, 0, MS_MEM_R
|
|||||||
#pragma endregion OS
|
#pragma endregion OS
|
||||||
|
|
||||||
#pragma region VArena (Virutal Address Space Arena)
|
#pragma region VArena (Virutal Address Space Arena)
|
||||||
|
finline SSIZE varena_header_size(void) { return align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT); }
|
||||||
inline
|
inline
|
||||||
VArena* varena__make(Opts_varena_make* opts) {
|
VArena* varena__make(Opts_varena_make* opts) {
|
||||||
assert(opts != nullptr);
|
assert(opts != nullptr);
|
||||||
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); }
|
if (opts->reserve_size == 0) { opts->reserve_size = mega(64); }
|
||||||
if (opts->commit_size == 0) { opts->commit_size = mega(64); }
|
if (opts->commit_size == 0) { opts->commit_size = mega(64); }
|
||||||
SSIZE reserve_size = align_pow2(opts->reserve_size, os_system_info()->target_page_size);
|
SSIZE reserve_size = align_pow2(opts->reserve_size, os_system_info()->target_page_size);
|
||||||
SSIZE commit_size = align_pow2(opts->commit_size, os_system_info()->target_page_size);
|
SSIZE commit_size = align_pow2(opts->commit_size, os_system_info()->target_page_size);
|
||||||
B32 no_large_pages = (opts->flags & VArenaFlag_NoLargePages) != 0;
|
B32 no_large_pages = (opts->flags & VArenaFlag_NoLargePages) != 0;
|
||||||
Byte* base = os__vmem_reserve(reserve_size, &(Opts_vmem){.base_addr = opts->base_addr, .no_large_pages = no_large_pages});
|
Byte* base = os_vmem_reserve(reserve_size, .base_addr = opts->base_addr, .no_large_pages = no_large_pages);
|
||||||
assert(base != nullptr);
|
assert(base != nullptr);
|
||||||
os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages);
|
os_vmem_commit(base, commit_size, .no_large_pages = no_large_pages);
|
||||||
SSIZE header_size = align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT);
|
SSIZE header_size = align_pow2(size_of(VArena), MEMORY_ALIGNMENT_DEFAULT);
|
||||||
VArena* vm = cast(VArena*, base);
|
VArena* vm = cast(VArena*, base); * vm = (VArena){
|
||||||
* vm = (VArena){
|
|
||||||
.reserve_start = cast(SSIZE, base) + header_size,
|
.reserve_start = cast(SSIZE, base) + header_size,
|
||||||
.reserve = reserve_size,
|
.reserve = reserve_size,
|
||||||
.commit_size = commit_size,
|
.commit_size = commit_size,
|
||||||
@@ -1067,6 +1066,7 @@ VArena* varena__make(Opts_varena_make* opts) {
|
|||||||
}
|
}
|
||||||
inline
|
inline
|
||||||
Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena* opts) {
|
Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena* opts) {
|
||||||
|
assert(vm != nullptr);
|
||||||
assert(amount != 0);
|
assert(amount != 0);
|
||||||
SSIZE alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
|
SSIZE alignment = opts->alignment ? opts->alignment : MEMORY_ALIGNMENT_DEFAULT;
|
||||||
SSIZE requested_size = amount * type_width;
|
SSIZE requested_size = amount * type_width;
|
||||||
@@ -1075,10 +1075,8 @@ Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena*
|
|||||||
SSIZE to_be_used = vm->commit_used + aligned_size;
|
SSIZE to_be_used = vm->commit_used + aligned_size;
|
||||||
SSIZE reserve_left = vm->reserve - vm->commit_used;
|
SSIZE reserve_left = vm->reserve - vm->commit_used;
|
||||||
SSIZE commit_left = vm->committed - vm->commit_used;
|
SSIZE commit_left = vm->committed - vm->commit_used;
|
||||||
B32 exhausted = commit_left < to_be_used;
|
B32 exhausted = commit_left < to_be_used; assert(to_be_used < reserve_left);
|
||||||
assert(to_be_used < reserve_left);
|
if (exhausted) {
|
||||||
if (exhausted)
|
|
||||||
{
|
|
||||||
SSIZE next_commit_size = reserve_left > 0 ?
|
SSIZE next_commit_size = reserve_left > 0 ?
|
||||||
max(vm->commit_size, to_be_used)
|
max(vm->commit_size, to_be_used)
|
||||||
: cast(SSIZE, align_pow2( reserve_left, os_system_info()->target_page_size));
|
: cast(SSIZE, align_pow2( reserve_left, os_system_info()->target_page_size));
|
||||||
@@ -1086,30 +1084,33 @@ Slice_Byte varena__push(VArena* vm, SSIZE amount, SSIZE type_width, Opts_varena*
|
|||||||
Byte* next_commit_start = cast(Byte*, cast(SSIZE, vm) + vm->committed);
|
Byte* next_commit_start = cast(Byte*, cast(SSIZE, vm) + vm->committed);
|
||||||
B32 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0;
|
B32 no_large_pages = (vm->flags & VArenaFlag_NoLargePages) != 0;
|
||||||
B32 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages);
|
B32 commit_result = os_vmem_commit(next_commit_start, next_commit_size, .no_large_pages = no_large_pages);
|
||||||
if (commit_result == false) {
|
if (commit_result == false) { return (Slice_Byte){0}; }
|
||||||
return (Slice_Byte){0};
|
|
||||||
}
|
|
||||||
vm->committed += next_commit_size;
|
vm->committed += next_commit_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vm->commit_used = to_be_used;
|
vm->commit_used = to_be_used;
|
||||||
return (Slice_Byte){.ptr = cast(Byte*, current_offset), .len = requested_size};
|
return (Slice_Byte){.ptr = cast(Byte*, current_offset), .len = requested_size};
|
||||||
}
|
}
|
||||||
inline void varena_release(VArena* arena) { os_vmem_release(arena, arena->reserve); }
|
inline
|
||||||
inline Slice_Byte varena__shrink(VArena* vm, Slice_Byte old_allocation, SSIZE requested_size, Opts_varena* opts) {
|
Slice_Byte varena__grow(VArena* vm, SSIZE requested_size, Slice_Byte old_allocation, SSIZE alignment, B32 no_zero) {
|
||||||
assert(opts != nullptr);
|
assert(vm != nullptr);
|
||||||
Slice_Byte result = {0};
|
SSIZE grow_amount = requested_size - old_allocation.len;
|
||||||
SSIZE current_offset = vm->reserve_start + vm->commit_used;
|
if (grow_amount == 0) { return old_allocation; } // Growing when not the last allocation not allowed
|
||||||
SSIZE shrink_amount = old_allocation.len - requested_size;
|
SSIZE current_offset = vm->reserve_start + vm->commit_used; assert(old_allocation.ptr == cast(Byte*, current_offset));
|
||||||
if (shrink_amount < 0) {
|
Slice_Byte allocation = varena_push_array(vm, Byte, grow_amount, alignment); assert(allocation.ptr != nullptr);
|
||||||
result = old_allocation;
|
Slice_Byte result = (Slice_Byte){ old_allocation.ptr, requested_size };
|
||||||
return result;
|
memory_zero(allocation.ptr, allocation.len * no_zero);
|
||||||
}
|
|
||||||
assert(old_allocation.ptr == cast(Byte*, current_offset));
|
|
||||||
vm->commit_used -= shrink_amount;
|
|
||||||
result = (Slice_Byte){ old_allocation.ptr, requested_size };
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
inline Slice_Byte varena__shrink(VArena* vm, Slice_Byte old_allocation, SSIZE requested_size) {
|
||||||
|
SSIZE current_offset = vm->reserve_start + vm->commit_used;
|
||||||
|
SSIZE shrink_amount = old_allocation.len - requested_size;
|
||||||
|
if (shrink_amount < 0) { return old_allocation; }
|
||||||
|
assert(old_allocation.ptr == cast(Byte*, current_offset));
|
||||||
|
vm->commit_used -= shrink_amount;
|
||||||
|
return (Slice_Byte){ old_allocation.ptr, requested_size };
|
||||||
|
}
|
||||||
|
inline void varena_release(VArena* arena) { os_vmem_release(arena, arena->reserve); }
|
||||||
inline
|
inline
|
||||||
void varena_rewind(VArena* vm, AllocatorSP sp) {
|
void varena_rewind(VArena* vm, AllocatorSP sp) {
|
||||||
assert(vm != nullptr);
|
assert(vm != nullptr);
|
||||||
@@ -1128,47 +1129,19 @@ void varena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
|||||||
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
|
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Free:
|
case AllocatorOp_Free: break;
|
||||||
break;
|
case AllocatorOp_Reset: vm->commit_used = 0; break;
|
||||||
case AllocatorOp_Reset:
|
|
||||||
vm->commit_used = 0;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Grow_NoZero:
|
case AllocatorOp_Grow_NoZero:
|
||||||
case AllocatorOp_Grow: {
|
case AllocatorOp_Grow:
|
||||||
SSIZE grow_amount = in.requested_size - in.old_allocation.len;
|
out->allocation = varena__grow(vm, in.requested_size, in.old_allocation, in.alignment, in.op - AllocatorOp_Grow_NoZero);
|
||||||
if (grow_amount == 0) {
|
|
||||||
out->allocation = in.old_allocation;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
SSIZE current_offset = vm->reserve_start + vm->commit_used;
|
|
||||||
// Growing when not the last allocation not allowed
|
|
||||||
assert(in.old_allocation.ptr == cast(Byte*, current_offset));
|
|
||||||
Slice_Byte allocation = varena_push_array(vm, Byte, grow_amount, .alignment = in.alignment);
|
|
||||||
assert(allocation.ptr != nullptr);
|
|
||||||
out->allocation = (Slice_Byte){ in.old_allocation.ptr, in.requested_size };
|
|
||||||
memory_zero(out->allocation.ptr, out->allocation.len * (in.op - AllocatorOp_Grow_NoZero));
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case AllocatorOp_Shrink: {
|
case AllocatorOp_Shrink:
|
||||||
SSIZE current_offset = vm->reserve_start + vm->commit_used;
|
out->allocation = varena__shrink(vm, in.old_allocation, in.requested_size);
|
||||||
SSIZE shrink_amount = in.old_allocation.len - in.requested_size;
|
|
||||||
if (shrink_amount < 0) {
|
|
||||||
out->allocation = in.old_allocation;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(in.old_allocation.ptr == cast(Byte*, current_offset));
|
|
||||||
vm->commit_used -= shrink_amount;
|
|
||||||
out->allocation = (Slice_Byte){ in.old_allocation.ptr, in.requested_size };
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Rewind:
|
case AllocatorOp_Rewind: vm->commit_used = in.save_point.slot; break;
|
||||||
vm->commit_used = in.save_point.slot;
|
case AllocatorOp_SavePoint: out->save_point = varena_save(vm); break;
|
||||||
break;
|
|
||||||
case AllocatorOp_SavePoint:
|
|
||||||
out->save_point = varena_save(vm);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Query:
|
case AllocatorOp_Query:
|
||||||
out->features =
|
out->features =
|
||||||
@@ -1191,10 +1164,8 @@ inline
|
|||||||
Arena* arena__make(Opts_arena_make* opts) {
|
Arena* arena__make(Opts_arena_make* opts) {
|
||||||
assert(opts != nullptr);
|
assert(opts != nullptr);
|
||||||
SSIZE header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
|
SSIZE header_size = align_pow2(size_of(Arena), MEMORY_ALIGNMENT_DEFAULT);
|
||||||
VArena* current = varena__make(opts);
|
VArena* current = varena__make(opts); assert(current != nullptr);
|
||||||
assert(current != nullptr);
|
Arena* arena = varena_push(current, Arena); * arena = (Arena){
|
||||||
Arena* arena = varena_push(current, Arena);
|
|
||||||
* arena = (Arena){
|
|
||||||
.backing = current,
|
.backing = current,
|
||||||
.prev = nullptr,
|
.prev = nullptr,
|
||||||
.current = arena,
|
.current = arena,
|
||||||
@@ -1275,11 +1246,9 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
|||||||
out->allocation = arena_push_array(arena, Byte, in.requested_size, .alignment = in.alignment);
|
out->allocation = arena_push_array(arena, Byte, in.requested_size, .alignment = in.alignment);
|
||||||
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
|
memory_zero(out->allocation.ptr, out->allocation.len * cast(SSIZE, in.op));
|
||||||
break;
|
break;
|
||||||
case AllocatorOp_Free:
|
|
||||||
break;
|
case AllocatorOp_Free: break;
|
||||||
case AllocatorOp_Reset:
|
case AllocatorOp_Reset: arena_reset(arena); break;
|
||||||
arena_reset(arena);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_Grow:
|
case AllocatorOp_Grow:
|
||||||
case AllocatorOp_Grow_NoZero: {
|
case AllocatorOp_Grow_NoZero: {
|
||||||
@@ -1326,18 +1295,13 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
|||||||
SSIZE aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
SSIZE aligned_new = align_pow2(in.requested_size, in.alignment ? in.alignment : MEMORY_ALIGNMENT_DEFAULT);
|
||||||
SSIZE pos_reduction = aligned_original - aligned_new;
|
SSIZE pos_reduction = aligned_original - aligned_new;
|
||||||
active->pos -= pos_reduction;
|
active->pos -= pos_reduction;
|
||||||
varena__shrink(active->backing, in.old_allocation, in.requested_size, &(Opts_varena){.alignment = in.alignment});
|
out->allocation = varena__shrink(active->backing, in.old_allocation, in.requested_size);
|
||||||
out->allocation = (Slice_Byte){in.old_allocation.ptr, in.requested_size};
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AllocatorOp_Rewind:
|
case AllocatorOp_Rewind: arena_rewind(arena, in.save_point); break;
|
||||||
arena_rewind(arena, in.save_point);
|
case AllocatorOp_SavePoint: out->save_point = arena_save(arena); break;
|
||||||
break;
|
|
||||||
|
|
||||||
case AllocatorOp_SavePoint:
|
|
||||||
out->save_point = arena_save(arena);
|
|
||||||
break;
|
|
||||||
case AllocatorOp_Query:
|
case AllocatorOp_Query:
|
||||||
out->features =
|
out->features =
|
||||||
AllocatorQuery_Alloc
|
AllocatorQuery_Alloc
|
||||||
|
|||||||
Reference in New Issue
Block a user