gencpp/project/dependencies/memory.hpp

517 lines
12 KiB
C++

#pragma once
#include "debug.hpp"
#pragma region Memory
#define kilobytes( x ) ( ( x ) * ( s64 )( 1024 ) )
#define megabytes( x ) ( kilobytes( x ) * ( s64 )( 1024 ) )
#define gigabytes( x ) ( megabytes( x ) * ( s64 )( 1024 ) )
#define terabytes( x ) ( gigabytes( x ) * ( s64 )( 1024 ) )
#define GEN__ONES ( zpl_cast( uw ) - 1 / GEN_U8_MAX )
#define GEN__HIGHS ( GEN__ONES * ( GEN_U8_MAX / 2 + 1 ) )
#define GEN__HAS_ZERO( x ) ( ( ( x )-GEN__ONES ) & ~( x )&GEN__HIGHS )
//! Checks if value is power of 2.
GEN_DEF_INLINE b32 is_power_of_two( sw x );
//! Aligns address to specified alignment.
GEN_DEF_INLINE void* align_forward( void* ptr, sw alignment );
//! Aligns value to a specified alignment.
GEN_DEF_INLINE s64 align_forward_i64( s64 value, sw alignment );
//! Moves pointer forward by bytes.
GEN_DEF_INLINE void* pointer_add( void* ptr, sw bytes );
//! Moves pointer forward by bytes.
GEN_DEF_INLINE void const* pointer_add_const( void const* ptr, sw bytes );
//! Calculates difference between two addresses.
GEN_DEF_INLINE sw pointer_diff( void const* begin, void const* end );
//! Copy non-overlapping memory from source to destination.
void* mem_copy( void* dest, void const* source, sw size );
//! Search for a constant value within the size limit at memory location.
void const* mem_find( void const* data, u8 byte_value, sw size );
//! Copy memory from source to destination.
GEN_DEF_INLINE void* mem_move( void* dest, void const* source, sw size );
//! Set constant value at memory location with specified size.
GEN_DEF_INLINE void* mem_set( void* data, u8 byte_value, sw size );
//! @param ptr Memory location to clear up.
//! @param size The size to clear up with.
GEN_DEF_INLINE void zero_size( void* ptr, sw size );
//! Clears up an item.
#define zero_item( t ) zero_size( ( t ), size_of( *( t ) ) ) // NOTE: Pass pointer of struct
//! Clears up an array.
#define zero_array( a, count ) zero_size( ( a ), size_of( *( a ) ) * count )
enum AllocType : u8
{
EAllocation_ALLOC,
EAllocation_FREE,
EAllocation_FREE_ALL,
EAllocation_RESIZE,
};
using AllocatorProc = void* ( void* allocator_data, AllocType type
, sw size, sw alignment
, void* old_memory, sw old_size
, u64 flags );
struct AllocatorInfo
{
AllocatorProc* Proc;
void* Data;
};
enum AllocFlag
{
ALLOCATOR_FLAG_CLEAR_TO_ZERO = bit( 0 ),
};
#ifndef GEN_DEFAULT_MEMORY_ALIGNMENT
# define GEN_DEFAULT_MEMORY_ALIGNMENT ( 2 * size_of( void* ) )
#endif
#ifndef GEN_DEFAULT_ALLOCATOR_FLAGS
# define GEN_DEFAULT_ALLOCATOR_FLAGS ( ALLOCATOR_FLAG_CLEAR_TO_ZERO )
#endif
//! Allocate memory with default alignment.
GEN_DEF_INLINE void* alloc( AllocatorInfo a, sw size );
//! Allocate memory with specified alignment.
GEN_DEF_INLINE void* alloc_align( AllocatorInfo a, sw size, sw alignment );
//! Free allocated memory.
GEN_DEF_INLINE void free( AllocatorInfo a, void* ptr );
//! Free all memory allocated by an allocator.
GEN_DEF_INLINE void free_all( AllocatorInfo a );
//! Resize an allocated memory.
GEN_DEF_INLINE void* resize( AllocatorInfo a, void* ptr, sw old_size, sw new_size );
//! Resize an allocated memory with specified alignment.
GEN_DEF_INLINE void* resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment );
//! Allocate memory for an item.
#define alloc_item( allocator_, Type ) ( Type* )alloc( allocator_, size_of( Type ) )
//! Allocate memory for an array of items.
#define alloc_array( allocator_, Type, count ) ( Type* )alloc( allocator_, size_of( Type ) * ( count ) )
/* heap memory analysis tools */
/* define GEN_HEAP_ANALYSIS to enable this feature */
/* call zpl_heap_stats_init at the beginning of the entry point */
/* you can call zpl_heap_stats_check near the end of the execution to validate any possible leaks */
void heap_stats_init( void );
sw heap_stats_used_memory( void );
sw heap_stats_alloc_count( void );
void heap_stats_check( void );
//! Allocate/Resize memory using default options.
//! Use this if you don't need a "fancy" resize allocation
GEN_DEF_INLINE void* default_resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment );
void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
//! The heap allocator backed by operating system's memory manager.
constexpr AllocatorInfo heap( void ) { return { heap_allocator_proc, nullptr }; }
//! Helper to allocate memory using heap allocator.
#define malloc( sz ) alloc( heap(), sz )
//! Helper to free memory allocated by heap allocator.
#define mfree( ptr ) free( heap(), ptr )
GEN_IMPL_INLINE b32 is_power_of_two( sw x )
{
if ( x <= 0 )
return false;
return ! ( x & ( x - 1 ) );
}
GEN_IMPL_INLINE void* align_forward( void* ptr, sw alignment )
{
uptr p;
GEN_ASSERT( is_power_of_two( alignment ) );
p = zpl_cast( uptr ) ptr;
return zpl_cast( void* )( ( p + ( alignment - 1 ) ) & ~( alignment - 1 ) );
}
GEN_IMPL_INLINE s64 align_forward_i64( s64 value, sw alignment )
{
return value + ( alignment - value % alignment ) % alignment;
}
GEN_IMPL_INLINE void* pointer_add( void* ptr, sw bytes )
{
return zpl_cast( void* )( zpl_cast( u8* ) ptr + bytes );
}
GEN_IMPL_INLINE void const* pointer_add_const( void const* ptr, sw bytes )
{
return zpl_cast( void const* )( zpl_cast( u8 const* ) ptr + bytes );
}
GEN_IMPL_INLINE sw pointer_diff( void const* begin, void const* end )
{
return zpl_cast( sw )( zpl_cast( u8 const* ) end - zpl_cast( u8 const* ) begin );
}
GEN_IMPL_INLINE void* mem_move( void* dest, void const* source, sw n )
{
if ( dest == NULL )
{
return NULL;
}
u8* d = zpl_cast( u8* ) dest;
u8 const* s = zpl_cast( u8 const* ) source;
if ( d == s )
return d;
if ( s + n <= d || d + n <= s ) // NOTE: Non-overlapping
return mem_copy( d, s, n );
if ( d < s )
{
if ( zpl_cast( uptr ) s % size_of( sw ) == zpl_cast( uptr ) d % size_of( sw ) )
{
while ( zpl_cast( uptr ) d % size_of( sw ) )
{
if ( ! n-- )
return dest;
*d++ = *s++;
}
while ( n >= size_of( sw ) )
{
*zpl_cast( sw* ) d = *zpl_cast( sw* ) s;
n -= size_of( sw );
d += size_of( sw );
s += size_of( sw );
}
}
for ( ; n; n-- )
*d++ = *s++;
}
else
{
if ( ( zpl_cast( uptr ) s % size_of( sw ) ) == ( zpl_cast( uptr ) d % size_of( sw ) ) )
{
while ( zpl_cast( uptr )( d + n ) % size_of( sw ) )
{
if ( ! n-- )
return dest;
d[ n ] = s[ n ];
}
while ( n >= size_of( sw ) )
{
n -= size_of( sw );
*zpl_cast( sw* )( d + n ) = *zpl_cast( sw* )( s + n );
}
}
while ( n )
n--, d[ n ] = s[ n ];
}
return dest;
}
GEN_IMPL_INLINE void* mem_set( void* dest, u8 c, sw n )
{
if ( dest == NULL )
{
return NULL;
}
u8* s = zpl_cast( u8* ) dest;
sw k;
u32 c32 = ( ( u32 )-1 ) / 255 * c;
if ( n == 0 )
return dest;
s[ 0 ] = s[ n - 1 ] = c;
if ( n < 3 )
return dest;
s[ 1 ] = s[ n - 2 ] = c;
s[ 2 ] = s[ n - 3 ] = c;
if ( n < 7 )
return dest;
s[ 3 ] = s[ n - 4 ] = c;
if ( n < 9 )
return dest;
k = -zpl_cast( sptr ) s & 3;
s += k;
n -= k;
n &= -4;
*zpl_cast( u32* )( s + 0 ) = c32;
*zpl_cast( u32* )( s + n - 4 ) = c32;
if ( n < 9 )
return dest;
*zpl_cast( u32* )( s + 4 ) = c32;
*zpl_cast( u32* )( s + 8 ) = c32;
*zpl_cast( u32* )( s + n - 12 ) = c32;
*zpl_cast( u32* )( s + n - 8 ) = c32;
if ( n < 25 )
return dest;
*zpl_cast( u32* )( s + 12 ) = c32;
*zpl_cast( u32* )( s + 16 ) = c32;
*zpl_cast( u32* )( s + 20 ) = c32;
*zpl_cast( u32* )( s + 24 ) = c32;
*zpl_cast( u32* )( s + n - 28 ) = c32;
*zpl_cast( u32* )( s + n - 24 ) = c32;
*zpl_cast( u32* )( s + n - 20 ) = c32;
*zpl_cast( u32* )( s + n - 16 ) = c32;
k = 24 + ( zpl_cast( uptr ) s & 4 );
s += k;
n -= k;
{
u64 c64 = ( zpl_cast( u64 ) c32 << 32 ) | c32;
while ( n > 31 )
{
*zpl_cast( u64* )( s + 0 ) = c64;
*zpl_cast( u64* )( s + 8 ) = c64;
*zpl_cast( u64* )( s + 16 ) = c64;
*zpl_cast( u64* )( s + 24 ) = c64;
n -= 32;
s += 32;
}
}
return dest;
}
GEN_IMPL_INLINE void* alloc_align( AllocatorInfo a, sw size, sw alignment )
{
return a.Proc( a.Data, EAllocation_ALLOC, size, alignment, nullptr, 0, GEN_DEFAULT_ALLOCATOR_FLAGS );
}
GEN_IMPL_INLINE void* alloc( AllocatorInfo a, sw size )
{
return alloc_align( a, size, GEN_DEFAULT_MEMORY_ALIGNMENT );
}
GEN_IMPL_INLINE void free( AllocatorInfo a, void* ptr )
{
if ( ptr != nullptr )
a.Proc( a.Data, EAllocation_FREE, 0, 0, ptr, 0, GEN_DEFAULT_ALLOCATOR_FLAGS );
}
GEN_IMPL_INLINE void free_all( AllocatorInfo a )
{
a.Proc( a.Data, EAllocation_FREE_ALL, 0, 0, nullptr, 0, GEN_DEFAULT_ALLOCATOR_FLAGS );
}
GEN_IMPL_INLINE void* resize( AllocatorInfo a, void* ptr, sw old_size, sw new_size )
{
return resize_align( a, ptr, old_size, new_size, GEN_DEFAULT_MEMORY_ALIGNMENT );
}
GEN_IMPL_INLINE void* resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment )
{
return a.Proc( a.Data, EAllocation_RESIZE, new_size, alignment, ptr, old_size, GEN_DEFAULT_ALLOCATOR_FLAGS );
}
GEN_IMPL_INLINE void* default_resize_align( AllocatorInfo a, void* old_memory, sw old_size, sw new_size, sw alignment )
{
if ( ! old_memory )
return alloc_align( a, new_size, alignment );
if ( new_size == 0 )
{
free( a, old_memory );
return nullptr;
}
if ( new_size < old_size )
new_size = old_size;
if ( old_size == new_size )
{
return old_memory;
}
else
{
void* new_memory = alloc_align( a, new_size, alignment );
if ( ! new_memory )
return nullptr;
mem_move( new_memory, old_memory, min( new_size, old_size ) );
free( a, old_memory );
return new_memory;
}
}
GEN_IMPL_INLINE void zero_size( void* ptr, sw size )
{
mem_set( ptr, 0, size );
}
struct VirtualMemory
{
void* data;
sw size;
};
//! Initialize virtual memory from existing data.
VirtualMemory vm_from_memory( void* data, sw size );
//! Allocate virtual memory at address with size.
//! @param addr The starting address of the region to reserve. If NULL, it lets operating system to decide where to allocate it.
//! @param size The size to serve.
VirtualMemory vm_alloc( void* addr, sw size );
//! Release the virtual memory.
b32 vm_free( VirtualMemory vm );
//! Trim virtual memory.
VirtualMemory vm_trim( VirtualMemory vm, sw lead_size, sw size );
//! Purge virtual memory.
b32 gen_vm_purge( VirtualMemory vm );
//! Retrieve VM's page size and alignment.
sw gen_virtual_memory_page_size( sw* alignment_out );
struct Arena
{
static
void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
static
Arena init_from_memory( void* start, sw size )
{
return
{
{ nullptr, nullptr },
start,
size,
0,
0
};
}
static
Arena init_from_allocator( AllocatorInfo backing, sw size )
{
Arena result =
{
backing,
alloc( backing, size),
size,
0,
0
};
return result;
}
static
Arena init_sub( Arena& parent, sw size )
{
return init_from_allocator( parent.Backing, size );
}
sw alignment_of( sw alignment )
{
sw alignment_offset, result_pointer, mask;
GEN_ASSERT( is_power_of_two( alignment ) );
alignment_offset = 0;
result_pointer = (sw) PhysicalStart + TotalUsed;
mask = alignment - 1;
if ( result_pointer & mask )
alignment_offset = alignment - ( result_pointer & mask );
return alignment_offset;
}
void check()
{
GEN_ASSERT( TempCount == 0 );
}
void free()
{
if ( Backing.Proc )
{
gen::free( Backing, PhysicalStart );
PhysicalStart = nullptr;
}
}
sw size_remaining( sw alignment )
{
sw result = TotalSize - ( TotalUsed + alignment_of( alignment ) );
return result;
}
AllocatorInfo Backing;
void* PhysicalStart;
sw TotalSize;
sw TotalUsed;
sw TempCount;
operator AllocatorInfo()
{
return { allocator_proc, this };
}
};
struct Pool
{
static
void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
static
Pool init( AllocatorInfo backing, sw num_blocks, sw block_size )
{
return init_align( backing, num_blocks, block_size, GEN_DEFAULT_MEMORY_ALIGNMENT );
}
static
Pool init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align );
void clear();
void free()
{
if ( Backing.Proc )
{
gen::free( Backing, PhysicalStart );
}
}
AllocatorInfo Backing;
void* PhysicalStart;
void* FreeList;
sw BlockSize;
sw BlockAlign;
sw TotalSize;
sw NumBlocks;
operator AllocatorInfo()
{
return { allocator_proc, this };
}
};
#pragma endregion Memory