Added zpl's ivrtual memory to dependencies (unused for now)

This commit is contained in:
Edward R. Gonzalez 2023-08-23 11:05:49 -04:00
parent c81f4b34ee
commit f9117a2353
7 changed files with 173 additions and 3 deletions

View File

@ -40,6 +40,7 @@
</AdditionalIncludePaths>
<Defines>
<Define>GEN_TIME</Define>
<Define>GEN_SYSTEM_WINDOWS</Define>
</Defines>
<ConfigProperties>
<ConfigAndPlatform>

View File

@ -779,6 +779,9 @@ String AST::to_string()
{
result.append_fmt( "%S", Name );
}
if ( IsParamPack )
result.append("...");
}
break;

View File

@ -265,6 +265,7 @@ struct AST
ModuleFlag ModuleFlags;
union {
b32 IsFunction; // Used by typedef to not serialize the name field.
b32 IsParamPack; // Used by typename to know if type should be considered a parameter pack.
OperatorT Op;
AccessSpec ParentAccess;
s32 NumEntries;
@ -315,6 +316,7 @@ struct AST_POD
ModuleFlag ModuleFlags;
union {
b32 IsFunction; // Used by typedef to not serialize the name field.
b32 IsParamPack; // Used by typename to know if type should be considered a parameter pack.
OperatorT Op;
AccessSpec ParentAccess;
s32 NumEntries;

View File

@ -459,7 +459,8 @@ struct AST_Type
Code Parent;
StringCached Name;
CodeT Type;
char _PAD_UNUSED_[ sizeof(ModuleFlag) + sizeof(u32) ];
char _PAD_UNUSED_[ sizeof(ModuleFlag) ];
b32 IsParamPack;
};
static_assert( sizeof(AST_Type) == sizeof(AST), "ERROR: AST_Type is not the same size as AST");

View File

@ -4455,6 +4455,13 @@ CodeType parse_type( bool* is_function )
is_first_capture = false;
}
bool is_param_pack = false;
if ( check(TokType::Varadic_Argument) )
{
is_param_pack = true;
eat( TokType::Varadic_Argument );
}
using namespace ECode;
CodeType
@ -4480,6 +4487,9 @@ CodeType parse_type( bool* is_function )
if ( attributes )
result->Attributes = attributes;
if ( is_param_pack )
result->IsParamPack = true;
Context.pop();
return result;
}

View File

@ -206,6 +206,132 @@ void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw ali
return ptr;
}
#pragma region VirtualMemory
VirtualMemory vm_from_memory( void* data, sw size )
{
VirtualMemory vm;
vm.data = data;
vm.size = size;
return vm;
}
#if defined( GEN_SYSTEM_WINDOWS )
VirtualMemory vm_alloc( void* addr, sw size )
{
VirtualMemory vm;
GEN_ASSERT( size > 0 );
vm.data = VirtualAlloc( addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE );
vm.size = size;
return vm;
}
b32 vm_free( VirtualMemory vm )
{
MEMORY_BASIC_INFORMATION info;
while ( vm.size > 0 )
{
if ( VirtualQuery( vm.data, &info, size_of( info ) ) == 0 )
return false;
if ( info.BaseAddress != vm.data || info.AllocationBase != vm.data || info.State != MEM_COMMIT || info.RegionSize > zpl_cast( uw ) vm.size )
{
return false;
}
if ( VirtualFree( vm.data, 0, MEM_RELEASE ) == 0 )
return false;
vm.data = pointer_add( vm.data, info.RegionSize );
vm.size -= info.RegionSize;
}
return true;
}
VirtualMemory vm_trim( VirtualMemory vm, sw lead_size, sw size )
{
VirtualMemory new_vm = { 0 };
void* ptr;
GEN_ASSERT( vm.size >= lead_size + size );
ptr = pointer_add( vm.data, lead_size );
vm_free( vm );
new_vm = vm_alloc( ptr, size );
if ( new_vm.data == ptr )
return new_vm;
if ( new_vm.data )
vm_free( new_vm );
return new_vm;
}
b32 vm_purge( VirtualMemory vm )
{
VirtualAlloc( vm.data, vm.size, MEM_RESET, PAGE_READWRITE );
// NOTE: Can this really fail?
return true;
}
sw virtual_memory_page_size( sw* alignment_out )
{
SYSTEM_INFO info;
GetSystemInfo( &info );
if ( alignment_out )
*alignment_out = info.dwAllocationGranularity;
return info.dwPageSize;
}
#else
# include <sys/mman.h>
# ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
# endif
VirtualMemory vm_alloc( void* addr, sw size )
{
VirtualMemory vm;
GEN_ASSERT( size > 0 );
vm.data = mmap( addr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0 );
vm.size = size;
return vm;
}
b32 vm_free( VirtualMemory vm )
{
munmap( vm.data, vm.size );
return true;
}
VirtualMemory vm_trim( VirtualMemory vm, sw lead_size, sw size )
{
void* ptr;
sw trail_size;
GEN_ASSERT( vm.size >= lead_size + size );
ptr = pointer_add( vm.data, lead_size );
trail_size = vm.size - lead_size - size;
if ( lead_size != 0 )
vm_free( vm_from_memory(( vm.data, lead_size ) );
if ( trail_size != 0 )
vm_free( vm_from_memory( ptr, trail_size ) );
return vm_from_memory( ptr, size );
}
b32 vm_purge( VirtualMemory vm )
{
int err = madvise( vm.data, vm.size, MADV_DONTNEED );
return err != 0;
}
sw virtual_memory_page_size( sw* alignment_out )
{
// TODO: Is this always true?
sw result = zpl_cast( sw ) sysconf( _SC_PAGE_SIZE );
if ( alignment_out )
*alignment_out = result;
return result;
}
#endif
#pragma endregion VirtualMemory
void* Arena::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
{
Arena* arena = rcast(Arena*, allocator_data);

View File

@ -363,6 +363,33 @@ GEN_IMPL_INLINE void zero_size( void* ptr, sw size )
mem_set( ptr, 0, size );
}
struct VirtualMemory
{
void* data;
sw size;
};
//! Initialize virtual memory from existing data.
VirtualMemory vm_from_memory( void* data, sw size );
//! Allocate virtual memory at address with size.
//! @param addr The starting address of the region to reserve. If NULL, it lets operating system to decide where to allocate it.
//! @param size The size to serve.
VirtualMemory vm_alloc( void* addr, sw size );
//! Release the virtual memory.
b32 vm_free( VirtualMemory vm );
//! Trim virtual memory.
VirtualMemory vm_trim( VirtualMemory vm, sw lead_size, sw size );
//! Purge virtual memory.
b32 gen_vm_purge( VirtualMemory vm );
//! Retrieve VM's page size and alignment.
sw gen_virtual_memory_page_size( sw* alignment_out );
struct Arena
{
static