mirror of
https://github.com/Ed94/gencpp.git
synced 2025-07-07 06:05:44 -07:00
Preparing to implement ADT for csv functions.
I'm rewritting it the way I'd like to learn it. - I want to use csv parsing heavily with the library so I'm just going to add it to the scanner. - Globaly memory allocator moved to regular gen header/source as its something really just made for the library. - Some small refactors to macros - The parser was updated to support tokenizing preprocessor directives. - The purpose is based off intuition that it will be required for the scanner.
This commit is contained in:
@ -225,372 +225,6 @@ namespace gen
|
||||
}
|
||||
#pragma endregion String Ops
|
||||
|
||||
#pragma region Memory
|
||||
void* mem_copy( void* dest, void const* source, sw n )
|
||||
{
|
||||
if ( dest == NULL )
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return memcpy( dest, source, n );
|
||||
}
|
||||
|
||||
void const* mem_find( void const* data, u8 c, sw n )
|
||||
{
|
||||
u8 const* s = zpl_cast( u8 const* ) data;
|
||||
while ( ( zpl_cast( uptr ) s & ( sizeof( uw ) - 1 ) ) && n && *s != c )
|
||||
{
|
||||
s++;
|
||||
n--;
|
||||
}
|
||||
if ( n && *s != c )
|
||||
{
|
||||
sw const* w;
|
||||
sw k = GEN__ONES * c;
|
||||
w = zpl_cast( sw const* ) s;
|
||||
while ( n >= size_of( sw ) && ! GEN__HAS_ZERO( *w ^ k ) )
|
||||
{
|
||||
w++;
|
||||
n -= size_of( sw );
|
||||
}
|
||||
s = zpl_cast( u8 const* ) w;
|
||||
while ( n && *s != c )
|
||||
{
|
||||
s++;
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
return n ? zpl_cast( void const* ) s : NULL;
|
||||
}
|
||||
|
||||
#define GEN_HEAP_STATS_MAGIC 0xDEADC0DE
|
||||
|
||||
struct _heap_stats
|
||||
{
|
||||
u32 magic;
|
||||
sw used_memory;
|
||||
sw alloc_count;
|
||||
};
|
||||
|
||||
global _heap_stats _heap_stats_info;
|
||||
|
||||
void heap_stats_init( void )
|
||||
{
|
||||
zero_item( &_heap_stats_info );
|
||||
_heap_stats_info.magic = GEN_HEAP_STATS_MAGIC;
|
||||
}
|
||||
|
||||
sw heap_stats_used_memory( void )
|
||||
{
|
||||
GEN_ASSERT_MSG( _heap_stats_info.magic == GEN_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
|
||||
return _heap_stats_info.used_memory;
|
||||
}
|
||||
|
||||
sw heap_stats_alloc_count( void )
|
||||
{
|
||||
GEN_ASSERT_MSG( _heap_stats_info.magic == GEN_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
|
||||
return _heap_stats_info.alloc_count;
|
||||
}
|
||||
|
||||
void heap_stats_check( void )
|
||||
{
|
||||
GEN_ASSERT_MSG( _heap_stats_info.magic == GEN_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
|
||||
GEN_ASSERT( _heap_stats_info.used_memory == 0 );
|
||||
GEN_ASSERT( _heap_stats_info.alloc_count == 0 );
|
||||
}
|
||||
|
||||
struct _heap_alloc_info
|
||||
{
|
||||
sw size;
|
||||
void* physical_start;
|
||||
};
|
||||
|
||||
void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
|
||||
{
|
||||
void* ptr = NULL;
|
||||
// unused( allocator_data );
|
||||
// unused( old_size );
|
||||
if ( ! alignment )
|
||||
alignment = GEN_DEFAULT_MEMORY_ALIGNMENT;
|
||||
|
||||
#ifdef GEN_HEAP_ANALYSIS
|
||||
sw alloc_info_size = size_of( _heap_alloc_info );
|
||||
sw alloc_info_remainder = ( alloc_info_size % alignment );
|
||||
sw track_size = max( alloc_info_size, alignment ) + alloc_info_remainder;
|
||||
switch ( type )
|
||||
{
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
if ( ! old_memory )
|
||||
break;
|
||||
_heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* ) old_memory - 1;
|
||||
_heap_stats_info.used_memory -= alloc_info->size;
|
||||
_heap_stats_info.alloc_count--;
|
||||
old_memory = alloc_info->physical_start;
|
||||
}
|
||||
break;
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
size += track_size;
|
||||
}
|
||||
break;
|
||||
default :
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
#if defined( GEN_COMPILER_MSVC ) || ( defined( GEN_COMPILER_GCC ) && defined( GEN_SYSTEM_WINDOWS ) ) || ( defined( GEN_COMPILER_TINYC ) && defined( GEN_SYSTEM_WINDOWS ) )
|
||||
case EAllocation_ALLOC :
|
||||
ptr = _aligned_malloc( size, alignment );
|
||||
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
zero_size( ptr, size );
|
||||
break;
|
||||
case EAllocation_FREE :
|
||||
_aligned_free( old_memory );
|
||||
break;
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
AllocatorInfo a = heap();
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
|
||||
#elif defined( GEN_SYSTEM_LINUX ) && ! defined( GEN_CPU_ARM ) && ! defined( GEN_COMPILER_TINYC )
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
ptr = aligned_alloc( alignment, ( size + alignment - 1 ) & ~( alignment - 1 ) );
|
||||
|
||||
if ( flags & GEN_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
{
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
free( old_memory );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
AllocatorInfo a = heap();
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
#else
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
posix_memalign( &ptr, alignment, size );
|
||||
|
||||
if ( flags & GEN_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
{
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
free( old_memory );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
AllocatorInfo a = heap();
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
case EAllocation_FREE_ALL :
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef GEN_HEAP_ANALYSIS
|
||||
if ( type == EAllocation_ALLOC )
|
||||
{
|
||||
_heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* )( zpl_cast( char* ) ptr + alloc_info_remainder );
|
||||
zero_item( alloc_info );
|
||||
alloc_info->size = size - track_size;
|
||||
alloc_info->physical_start = ptr;
|
||||
ptr = zpl_cast( void* )( alloc_info + 1 );
|
||||
_heap_stats_info.used_memory += alloc_info->size;
|
||||
_heap_stats_info.alloc_count++;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* Arena::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
|
||||
{
|
||||
Arena* arena = rcast(Arena*, allocator_data);
|
||||
void* ptr = NULL;
|
||||
|
||||
// unused( old_size );
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
void* end = pointer_add( arena->PhysicalStart, arena->TotalUsed );
|
||||
sw total_size = align_forward_i64( size, alignment );
|
||||
|
||||
// NOTE: Out of memory
|
||||
if ( arena->TotalUsed + total_size > (sw) arena->TotalSize )
|
||||
{
|
||||
// zpl__printf_err("%s", "Arena out of memory\n");
|
||||
fatal("Arena out of memory! (Possibly could not fit for the largest size Arena!!)");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ptr = align_forward( end, alignment );
|
||||
arena->TotalUsed += total_size;
|
||||
|
||||
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
// NOTE: Free all at once
|
||||
// Use Temp_Arena_Memory if you want to free a block
|
||||
break;
|
||||
|
||||
case EAllocation_FREE_ALL :
|
||||
arena->TotalUsed = 0;
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
// TODO : Check if ptr is on top of stack and just extend
|
||||
AllocatorInfo a = arena->Backing;
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* Pool::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
|
||||
{
|
||||
Pool* pool = zpl_cast( Pool* ) allocator_data;
|
||||
void* ptr = NULL;
|
||||
|
||||
// unused( old_size );
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
uptr next_free;
|
||||
|
||||
GEN_ASSERT( size == pool->BlockSize );
|
||||
GEN_ASSERT( alignment == pool->BlockAlign );
|
||||
GEN_ASSERT( pool->FreeList != NULL );
|
||||
|
||||
next_free = *zpl_cast( uptr* ) pool->FreeList;
|
||||
ptr = pool->FreeList;
|
||||
pool->FreeList = zpl_cast( void* ) next_free;
|
||||
pool->TotalSize += pool->BlockSize;
|
||||
|
||||
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
uptr* next;
|
||||
if ( old_memory == NULL )
|
||||
return NULL;
|
||||
|
||||
next = zpl_cast( uptr* ) old_memory;
|
||||
*next = zpl_cast( uptr ) pool->FreeList;
|
||||
pool->FreeList = old_memory;
|
||||
pool->TotalSize -= pool->BlockSize;
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE_ALL :
|
||||
{
|
||||
sw actual_block_size, block_index;
|
||||
void* curr;
|
||||
uptr* end;
|
||||
|
||||
actual_block_size = pool->BlockSize + pool->BlockAlign;
|
||||
pool->TotalSize = 0;
|
||||
|
||||
// NOTE: Init intrusive freelist
|
||||
curr = pool->PhysicalStart;
|
||||
for ( block_index = 0; block_index < pool->NumBlocks - 1; block_index++ )
|
||||
{
|
||||
uptr* next = zpl_cast( uptr* ) curr;
|
||||
*next = zpl_cast( uptr ) curr + actual_block_size;
|
||||
curr = pointer_add( curr, actual_block_size );
|
||||
}
|
||||
|
||||
end = zpl_cast( uptr* ) curr;
|
||||
*end = zpl_cast( uptr ) NULL;
|
||||
pool->FreeList = pool->PhysicalStart;
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
// NOTE: Cannot resize
|
||||
GEN_PANIC( "You cannot resize something allocated by with a pool." );
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
Pool Pool::init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align )
|
||||
{
|
||||
Pool pool = {};
|
||||
|
||||
sw actual_block_size, pool_size, block_index;
|
||||
void *data, *curr;
|
||||
uptr* end;
|
||||
|
||||
zero_item( &pool );
|
||||
|
||||
pool.Backing = backing;
|
||||
pool.BlockSize = block_size;
|
||||
pool.BlockAlign = block_align;
|
||||
pool.NumBlocks = num_blocks;
|
||||
|
||||
actual_block_size = block_size + block_align;
|
||||
pool_size = num_blocks * actual_block_size;
|
||||
|
||||
data = alloc_align( backing, pool_size, block_align );
|
||||
|
||||
// NOTE: Init intrusive freelist
|
||||
curr = data;
|
||||
for ( block_index = 0; block_index < num_blocks - 1; block_index++ )
|
||||
{
|
||||
uptr* next = ( uptr* ) curr;
|
||||
*next = ( uptr ) curr + actual_block_size;
|
||||
curr = pointer_add( curr, actual_block_size );
|
||||
}
|
||||
|
||||
end = ( uptr* ) curr;
|
||||
*end = ( uptr ) NULL;
|
||||
|
||||
pool.PhysicalStart = data;
|
||||
pool.FreeList = data;
|
||||
|
||||
return pool;
|
||||
}
|
||||
#pragma endregion Memory
|
||||
|
||||
#pragma region Printing
|
||||
enum
|
||||
{
|
||||
@ -1144,6 +778,402 @@ namespace gen
|
||||
}
|
||||
#pragma endregion Printing
|
||||
|
||||
#pragma region Memory
|
||||
void* mem_copy( void* dest, void const* source, sw n )
|
||||
{
|
||||
if ( dest == NULL )
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return memcpy( dest, source, n );
|
||||
}
|
||||
|
||||
void const* mem_find( void const* data, u8 c, sw n )
|
||||
{
|
||||
u8 const* s = zpl_cast( u8 const* ) data;
|
||||
while ( ( zpl_cast( uptr ) s & ( sizeof( uw ) - 1 ) ) && n && *s != c )
|
||||
{
|
||||
s++;
|
||||
n--;
|
||||
}
|
||||
if ( n && *s != c )
|
||||
{
|
||||
sw const* w;
|
||||
sw k = GEN__ONES * c;
|
||||
w = zpl_cast( sw const* ) s;
|
||||
while ( n >= size_of( sw ) && ! GEN__HAS_ZERO( *w ^ k ) )
|
||||
{
|
||||
w++;
|
||||
n -= size_of( sw );
|
||||
}
|
||||
s = zpl_cast( u8 const* ) w;
|
||||
while ( n && *s != c )
|
||||
{
|
||||
s++;
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
return n ? zpl_cast( void const* ) s : NULL;
|
||||
}
|
||||
|
||||
#define GEN_HEAP_STATS_MAGIC 0xDEADC0DE
|
||||
|
||||
struct _heap_stats
|
||||
{
|
||||
u32 magic;
|
||||
sw used_memory;
|
||||
sw alloc_count;
|
||||
};
|
||||
|
||||
global _heap_stats _heap_stats_info;
|
||||
|
||||
void heap_stats_init( void )
|
||||
{
|
||||
zero_item( &_heap_stats_info );
|
||||
_heap_stats_info.magic = GEN_HEAP_STATS_MAGIC;
|
||||
}
|
||||
|
||||
sw heap_stats_used_memory( void )
|
||||
{
|
||||
GEN_ASSERT_MSG( _heap_stats_info.magic == GEN_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
|
||||
return _heap_stats_info.used_memory;
|
||||
}
|
||||
|
||||
sw heap_stats_alloc_count( void )
|
||||
{
|
||||
GEN_ASSERT_MSG( _heap_stats_info.magic == GEN_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
|
||||
return _heap_stats_info.alloc_count;
|
||||
}
|
||||
|
||||
void heap_stats_check( void )
|
||||
{
|
||||
GEN_ASSERT_MSG( _heap_stats_info.magic == GEN_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
|
||||
GEN_ASSERT( _heap_stats_info.used_memory == 0 );
|
||||
GEN_ASSERT( _heap_stats_info.alloc_count == 0 );
|
||||
}
|
||||
|
||||
struct _heap_alloc_info
|
||||
{
|
||||
sw size;
|
||||
void* physical_start;
|
||||
};
|
||||
|
||||
void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
|
||||
{
|
||||
void* ptr = NULL;
|
||||
// unused( allocator_data );
|
||||
// unused( old_size );
|
||||
if ( ! alignment )
|
||||
alignment = GEN_DEFAULT_MEMORY_ALIGNMENT;
|
||||
|
||||
#ifdef GEN_HEAP_ANALYSIS
|
||||
sw alloc_info_size = size_of( _heap_alloc_info );
|
||||
sw alloc_info_remainder = ( alloc_info_size % alignment );
|
||||
sw track_size = max( alloc_info_size, alignment ) + alloc_info_remainder;
|
||||
switch ( type )
|
||||
{
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
if ( ! old_memory )
|
||||
break;
|
||||
_heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* ) old_memory - 1;
|
||||
_heap_stats_info.used_memory -= alloc_info->size;
|
||||
_heap_stats_info.alloc_count--;
|
||||
old_memory = alloc_info->physical_start;
|
||||
}
|
||||
break;
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
size += track_size;
|
||||
}
|
||||
break;
|
||||
default :
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
#if defined( GEN_COMPILER_MSVC ) || ( defined( GEN_COMPILER_GCC ) && defined( GEN_SYSTEM_WINDOWS ) ) || ( defined( GEN_COMPILER_TINYC ) && defined( GEN_SYSTEM_WINDOWS ) )
|
||||
case EAllocation_ALLOC :
|
||||
ptr = _aligned_malloc( size, alignment );
|
||||
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
zero_size( ptr, size );
|
||||
break;
|
||||
case EAllocation_FREE :
|
||||
_aligned_free( old_memory );
|
||||
break;
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
AllocatorInfo a = heap();
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
|
||||
#elif defined( GEN_SYSTEM_LINUX ) && ! defined( GEN_CPU_ARM ) && ! defined( GEN_COMPILER_TINYC )
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
ptr = aligned_alloc( alignment, ( size + alignment - 1 ) & ~( alignment - 1 ) );
|
||||
|
||||
if ( flags & GEN_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
{
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
free( old_memory );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
AllocatorInfo a = heap();
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
#else
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
posix_memalign( &ptr, alignment, size );
|
||||
|
||||
if ( flags & GEN_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
{
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
free( old_memory );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
AllocatorInfo a = heap();
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
case EAllocation_FREE_ALL :
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef GEN_HEAP_ANALYSIS
|
||||
if ( type == EAllocation_ALLOC )
|
||||
{
|
||||
_heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* )( zpl_cast( char* ) ptr + alloc_info_remainder );
|
||||
zero_item( alloc_info );
|
||||
alloc_info->size = size - track_size;
|
||||
alloc_info->physical_start = ptr;
|
||||
ptr = zpl_cast( void* )( alloc_info + 1 );
|
||||
_heap_stats_info.used_memory += alloc_info->size;
|
||||
_heap_stats_info.alloc_count++;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* Arena::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
|
||||
{
|
||||
Arena* arena = rcast(Arena*, allocator_data);
|
||||
void* ptr = NULL;
|
||||
|
||||
// unused( old_size );
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
void* end = pointer_add( arena->PhysicalStart, arena->TotalUsed );
|
||||
sw total_size = align_forward_i64( size, alignment );
|
||||
|
||||
// NOTE: Out of memory
|
||||
if ( arena->TotalUsed + total_size > (sw) arena->TotalSize )
|
||||
{
|
||||
// zpl__printf_err("%s", "Arena out of memory\n");
|
||||
fatal("Arena out of memory! (Possibly could not fit for the largest size Arena!!)");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ptr = align_forward( end, alignment );
|
||||
arena->TotalUsed += total_size;
|
||||
|
||||
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
// NOTE: Free all at once
|
||||
// Use Temp_Arena_Memory if you want to free a block
|
||||
break;
|
||||
|
||||
case EAllocation_FREE_ALL :
|
||||
arena->TotalUsed = 0;
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
{
|
||||
// TODO : Check if ptr is on top of stack and just extend
|
||||
AllocatorInfo a = arena->Backing;
|
||||
ptr = default_resize_align( a, old_memory, old_size, size, alignment );
|
||||
}
|
||||
break;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* Pool::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
|
||||
{
|
||||
Pool* pool = zpl_cast( Pool* ) allocator_data;
|
||||
void* ptr = NULL;
|
||||
|
||||
// unused( old_size );
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
case EAllocation_ALLOC :
|
||||
{
|
||||
uptr next_free;
|
||||
|
||||
GEN_ASSERT( size == pool->BlockSize );
|
||||
GEN_ASSERT( alignment == pool->BlockAlign );
|
||||
GEN_ASSERT( pool->FreeList != NULL );
|
||||
|
||||
next_free = *zpl_cast( uptr* ) pool->FreeList;
|
||||
ptr = pool->FreeList;
|
||||
pool->FreeList = zpl_cast( void* ) next_free;
|
||||
pool->TotalSize += pool->BlockSize;
|
||||
|
||||
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
|
||||
zero_size( ptr, size );
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE :
|
||||
{
|
||||
uptr* next;
|
||||
if ( old_memory == NULL )
|
||||
return NULL;
|
||||
|
||||
next = zpl_cast( uptr* ) old_memory;
|
||||
*next = zpl_cast( uptr ) pool->FreeList;
|
||||
pool->FreeList = old_memory;
|
||||
pool->TotalSize -= pool->BlockSize;
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_FREE_ALL :
|
||||
{
|
||||
sw actual_block_size, block_index;
|
||||
void* curr;
|
||||
uptr* end;
|
||||
|
||||
actual_block_size = pool->BlockSize + pool->BlockAlign;
|
||||
pool->TotalSize = 0;
|
||||
|
||||
// NOTE: Init intrusive freelist
|
||||
curr = pool->PhysicalStart;
|
||||
for ( block_index = 0; block_index < pool->NumBlocks - 1; block_index++ )
|
||||
{
|
||||
uptr* next = zpl_cast( uptr* ) curr;
|
||||
*next = zpl_cast( uptr ) curr + actual_block_size;
|
||||
curr = pointer_add( curr, actual_block_size );
|
||||
}
|
||||
|
||||
end = zpl_cast( uptr* ) curr;
|
||||
*end = zpl_cast( uptr ) NULL;
|
||||
pool->FreeList = pool->PhysicalStart;
|
||||
}
|
||||
break;
|
||||
|
||||
case EAllocation_RESIZE :
|
||||
// NOTE: Cannot resize
|
||||
GEN_PANIC( "You cannot resize something allocated by with a pool." );
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
Pool Pool::init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align )
|
||||
{
|
||||
Pool pool = {};
|
||||
|
||||
sw actual_block_size, pool_size, block_index;
|
||||
void *data, *curr;
|
||||
uptr* end;
|
||||
|
||||
zero_item( &pool );
|
||||
|
||||
pool.Backing = backing;
|
||||
pool.BlockSize = block_size;
|
||||
pool.BlockAlign = block_align;
|
||||
pool.NumBlocks = num_blocks;
|
||||
|
||||
actual_block_size = block_size + block_align;
|
||||
pool_size = num_blocks * actual_block_size;
|
||||
|
||||
data = alloc_align( backing, pool_size, block_align );
|
||||
|
||||
// NOTE: Init intrusive freelist
|
||||
curr = data;
|
||||
for ( block_index = 0; block_index < num_blocks - 1; block_index++ )
|
||||
{
|
||||
uptr* next = ( uptr* ) curr;
|
||||
*next = ( uptr ) curr + actual_block_size;
|
||||
curr = pointer_add( curr, actual_block_size );
|
||||
}
|
||||
|
||||
end = ( uptr* ) curr;
|
||||
*end = ( uptr ) NULL;
|
||||
|
||||
pool.PhysicalStart = data;
|
||||
pool.FreeList = data;
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
||||
void Pool::clear()
|
||||
{
|
||||
sw actual_block_size, block_index;
|
||||
void* curr;
|
||||
uptr* end;
|
||||
|
||||
actual_block_size = BlockSize + BlockAlign;
|
||||
|
||||
curr = PhysicalStart;
|
||||
for ( block_index = 0; block_index < NumBlocks - 1; block_index++ )
|
||||
{
|
||||
uptr* next = ( uptr* ) curr;
|
||||
*next = ( uptr ) curr + actual_block_size;
|
||||
curr = pointer_add( curr, actual_block_size );
|
||||
}
|
||||
|
||||
end = ( uptr* ) curr;
|
||||
*end = ( uptr ) NULL;
|
||||
|
||||
FreeList = PhysicalStart;
|
||||
}
|
||||
#pragma endregion Memory
|
||||
|
||||
#pragma region ADT
|
||||
|
||||
#pragma endregion ADT
|
||||
|
||||
#pragma region CSV
|
||||
|
||||
#pragma endregion CSV
|
||||
|
||||
#pragma region Hashing
|
||||
global u32 const _crc32_table[ 256 ] = {
|
||||
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd,
|
||||
@ -1840,107 +1870,6 @@ namespace gen
|
||||
#pragma endregion Timing
|
||||
#endif
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
global AllocatorInfo GlobalAllocator;
|
||||
global Array<Arena> Global_AllocatorBuckets;
|
||||
|
||||
void* Global_Allocator_Proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
|
||||
{
|
||||
Arena& last = Global_AllocatorBuckets.back();
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
case EAllocation_ALLOC:
|
||||
{
|
||||
if ( last.TotalUsed + size > last.TotalSize )
|
||||
{
|
||||
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
|
||||
|
||||
if ( bucket.PhysicalStart == nullptr )
|
||||
fatal( "Failed to create bucket for Global_AllocatorBuckets");
|
||||
|
||||
if ( ! Global_AllocatorBuckets.append( bucket ) )
|
||||
fatal( "Failed to append bucket to Global_AllocatorBuckets");
|
||||
|
||||
last = Global_AllocatorBuckets.back();
|
||||
}
|
||||
|
||||
return alloc_align( last, size, alignment );
|
||||
}
|
||||
case EAllocation_FREE:
|
||||
{
|
||||
// Doesn't recycle.
|
||||
}
|
||||
break;
|
||||
case EAllocation_FREE_ALL:
|
||||
{
|
||||
// Memory::cleanup instead.
|
||||
}
|
||||
break;
|
||||
case EAllocation_RESIZE:
|
||||
{
|
||||
if ( last.TotalUsed + size > last.TotalSize )
|
||||
{
|
||||
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
|
||||
|
||||
if ( bucket.PhysicalStart == nullptr )
|
||||
fatal( "Failed to create bucket for Global_AllocatorBuckets");
|
||||
|
||||
if ( ! Global_AllocatorBuckets.append( bucket ) )
|
||||
fatal( "Failed to append bucket to Global_AllocatorBuckets");
|
||||
|
||||
last = Global_AllocatorBuckets.back();
|
||||
}
|
||||
|
||||
void* result = alloc_align( last.Backing, size, alignment );
|
||||
|
||||
if ( result != nullptr && old_memory != nullptr )
|
||||
{
|
||||
mem_copy( result, old_memory, old_size );
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void setup()
|
||||
{
|
||||
GlobalAllocator = AllocatorInfo { & Global_Allocator_Proc, nullptr };
|
||||
|
||||
Global_AllocatorBuckets = Array<Arena>::init_reserve( heap(), 128 );
|
||||
|
||||
if ( Global_AllocatorBuckets == nullptr )
|
||||
fatal( "Failed to reserve memory for Global_AllocatorBuckets");
|
||||
|
||||
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
|
||||
|
||||
if ( bucket.PhysicalStart == nullptr )
|
||||
fatal( "Failed to create first bucket for Global_AllocatorBuckets");
|
||||
|
||||
Global_AllocatorBuckets.append( bucket );
|
||||
}
|
||||
|
||||
void cleanup()
|
||||
{
|
||||
s32 index = 0;
|
||||
s32 left = Global_AllocatorBuckets.num();
|
||||
do
|
||||
{
|
||||
Arena* bucket = & Global_AllocatorBuckets[ index ];
|
||||
bucket->free();
|
||||
index++;
|
||||
}
|
||||
while ( left--, left );
|
||||
|
||||
Global_AllocatorBuckets.free();
|
||||
}
|
||||
|
||||
// namespace Memory
|
||||
}
|
||||
// namespace gen
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user