diff --git a/.vscode/gencpp.natvis b/.vscode/gencpp.natvis
index 427e735..8a9a7c3 100644
--- a/.vscode/gencpp.natvis
+++ b/.vscode/gencpp.natvis
@@ -1,11 +1,33 @@
-
+
+ Data:{Data} Proc:{Proc}
+
+
+
+ Num:{((Header*)((char*)Data - sizeof(Header)))->Num}, Capacity:{((Header*)((char*)Data - sizeof(Header)))->Capacity}
+
+
+ {(Header*)((char*)Data - sizeof(Header))}
+
+ - ((Header*)((char*)Data - sizeof(Header)))->Allocator
+ - ((Header*)((char*)Data - sizeof(Header)))->Capacity
+ - ((Header*)((char*)Data - sizeof(Header)))->Num
+
+
+
+ ((Header*)((char*)Data - sizeof(Header)))->Capacity
+ Data
+
+
+
+
+
Len:{Len} Ptr:{Ptr, [Len]s}
-
+
null
{Data,na}
@@ -20,7 +42,7 @@
-
+
Length: {Length}, Capacity: {Capacity}
- Allocator
@@ -41,7 +63,7 @@
- ArrStatic
- StaticIndex
- ArrDyn
- - ((ArrayHeader*)((char*)ArrDyn - sizeof(ArrayHeader)))->count
+ - ArrDyn.num()
@@ -57,7 +79,7 @@
- ast->ArrStatic
- ast->StaticIndex
- ast->ArrDyn
- - ((ArrayHeader*)((char*)ast->ArrDyn - sizeof(ArrayHeader)))->count
+ - ast->ArrDyn.num()
@@ -71,21 +93,6 @@
Current[ { Arr[Idx] } ] Idx:{ Idx }
-
-
- {(ArrayHeader*)((char*)Arr - sizeof(ArrayHeader))}
-
- - ((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->elem_size
- - ((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->count
- - ((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->capacity
- - ((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->allocator
-
-
-
- ((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->count
- Arr
-
-
diff --git a/project/Bloat.cpp b/project/Bloat.cpp
index 2e40923..3aa7089 100644
--- a/project/Bloat.cpp
+++ b/project/Bloat.cpp
@@ -2,265 +2,812 @@
#include "Bloat.hpp"
-void* gen_Arena::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
+namespace gen
{
- gen_Arena* arena = rcast(gen_Arena*, allocator_data);
- void* ptr = NULL;
+#pragma region Memory
- // unused( old_size );
-
- switch ( type )
+ struct _heap_stats
{
- case EAllocation_ALLOC :
- {
- void* end = pointer_add( arena->PhysicalStart, arena->TotalUsed );
- sw total_size = align_forward_i64( size, alignment );
+ u32 magic;
+ sw used_memory;
+ sw alloc_count;
+ };
- // NOTE: Out of memory
- if ( arena->TotalUsed + total_size > (sw) arena->TotalSize )
- {
- // zpl__printf_err("%s", "Arena out of memory\n");
- return nullptr;
- }
+ global _heap_stats _heap_stats_info;
- ptr = align_forward( end, alignment );
- arena->TotalUsed += total_size;
-
- if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
- zero_size( ptr, size );
- }
- break;
-
- case EAllocation_FREE :
- // NOTE: Free all at once
- // Use Temp_Arena_Memory if you want to free a block
- break;
-
- case EAllocation_FREE_ALL :
- arena->TotalUsed = 0;
- break;
-
- case EAllocation_RESIZE :
- {
- // TODO : Check if ptr is on top of stack and just extend
- AllocatorInfo a = arena->Backing;
- ptr = default_resize_align( a, old_memory, old_size, size, alignment );
- }
- break;
- }
- return ptr;
-}
-
-void* gen_Pool::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
-{
- gen_Pool* pool = zpl_cast( gen_Pool* ) allocator_data;
- void* ptr = NULL;
-
- // unused( old_size );
-
- switch ( type )
+ void heap_stats_init( void )
{
- case EAllocation_ALLOC :
- {
- uptr next_free;
-
- ZPL_ASSERT( size == pool->BlockSize );
- ZPL_ASSERT( alignment == pool->BlockAlign );
- ZPL_ASSERT( pool->FreeList != NULL );
-
- next_free = *zpl_cast( uptr* ) pool->FreeList;
- ptr = pool->FreeList;
- pool->FreeList = zpl_cast( void* ) next_free;
- pool->TotalSize += pool->BlockSize;
-
- if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
- zero_size( ptr, size );
- }
- break;
-
- case EAllocation_FREE :
- {
- uptr* next;
- if ( old_memory == NULL )
- return NULL;
-
- next = zpl_cast( uptr* ) old_memory;
- *next = zpl_cast( uptr ) pool->FreeList;
- pool->FreeList = old_memory;
- pool->TotalSize -= pool->BlockSize;
- }
- break;
-
- case EAllocation_FREE_ALL :
- {
- sw actual_block_size, block_index;
- void* curr;
- uptr* end;
-
- actual_block_size = pool->BlockSize + pool->BlockAlign;
- pool->TotalSize = 0;
-
- // NOTE: Init intrusive freelist
- curr = pool->PhysicalStart;
- for ( block_index = 0; block_index < pool->NumBlocks - 1; block_index++ )
- {
- uptr* next = zpl_cast( uptr* ) curr;
- *next = zpl_cast( uptr ) curr + actual_block_size;
- curr = pointer_add( curr, actual_block_size );
- }
-
- end = zpl_cast( uptr* ) curr;
- *end = zpl_cast( uptr ) NULL;
- pool->FreeList = pool->PhysicalStart;
- }
- break;
-
- case EAllocation_RESIZE :
- // NOTE: Cannot resize
- ZPL_PANIC( "You cannot resize something allocated by with a pool." );
- break;
+ zero_item( &_heap_stats_info );
+ _heap_stats_info.magic = ZPL_HEAP_STATS_MAGIC;
}
- return ptr;
-}
-
-gen_Pool gen_Pool::init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align )
-{
- gen_Pool pool = {};
-
- sw actual_block_size, pool_size, block_index;
- void *data, *curr;
- uptr* end;
-
- pool.Backing = backing;
- pool.BlockSize = block_size;
- pool.BlockAlign = block_align;
- pool.NumBlocks = num_blocks;
-
- actual_block_size = block_size + block_align;
- pool_size = num_blocks * actual_block_size;
-
- data = alloc_align( backing, pool_size, block_align );
-
- // NOTE: Init intrusive freelist
- curr = data;
- for ( block_index = 0; block_index < num_blocks - 1; block_index++ )
+ sw heap_stats_used_memory( void )
{
- uptr* next = ( uptr* ) curr;
- *next = ( uptr ) curr + actual_block_size;
- curr = pointer_add( curr, actual_block_size );
+ ZPL_ASSERT_MSG( _heap_stats_info.magic == ZPL_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
+ return _heap_stats_info.used_memory;
}
- end = ( uptr* ) curr;
- *end = ( uptr ) 0;
-
- pool.PhysicalStart = data;
- pool.FreeList = data;
-
- return pool;
-}
-
-
-namespace Memory
-{
- using namespace zpl;
-
- global AllocatorInfo GlobalAllocator;
-
- global Array(Arena) Global_AllocatorBuckets;
-
- void* Global_Allocator_Proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
+ sw heap_stats_alloc_count( void )
{
- Arena* last = & array_back( Global_AllocatorBuckets );
+ ZPL_ASSERT_MSG( _heap_stats_info.magic == ZPL_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
+ return _heap_stats_info.alloc_count;
+ }
+
+ void heap_stats_check( void )
+ {
+ ZPL_ASSERT_MSG( _heap_stats_info.magic == ZPL_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
+ ZPL_ASSERT( _heap_stats_info.used_memory == 0 );
+ ZPL_ASSERT( _heap_stats_info.alloc_count == 0 );
+ }
+
+ struct _heap_alloc_info
+ {
+ sw size;
+ void* physical_start;
+ };
+
+ void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
+ {
+ return zpl::heap_allocator_proc( allocator_data, (zpl::AllocType)type, size, alignment, old_memory, old_size, flags );
+
+ // void* ptr = NULL;
+ // // unused( allocator_data );
+ // // unused( old_size );
+ // if ( ! alignment )
+ // alignment = ZPL_DEFAULT_MEMORY_ALIGNMENT;
+
+ // #ifdef ZPL_HEAP_ANALYSIS
+ // sw alloc_info_size = size_of( _heap_alloc_info );
+ // sw alloc_info_remainder = ( alloc_info_size % alignment );
+ // sw track_size = max( alloc_info_size, alignment ) + alloc_info_remainder;
+ // switch ( type )
+ // {
+ // case EAllocation_FREE :
+ // {
+ // if ( ! old_memory )
+ // break;
+ // _heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* ) old_memory - 1;
+ // _heap_stats_info.used_memory -= alloc_info->size;
+ // _heap_stats_info.alloc_count--;
+ // old_memory = alloc_info->physical_start;
+ // }
+ // break;
+ // case EAllocation_ALLOC :
+ // {
+ // size += track_size;
+ // }
+ // break;
+ // default :
+ // break;
+ // }
+ // #endif
+
+ // switch ( type )
+ // {
+ // #if defined( ZPL_COMPILER_MSVC ) || ( defined( ZPL_COMPILER_GCC ) && defined( ZPL_SYSTEM_WINDOWS ) ) || ( defined( ZPL_COMPILER_TINYC ) && defined( ZPL_SYSTEM_WINDOWS ) )
+ // case EAllocation_ALLOC :
+ // ptr = _aligned_malloc( size, alignment );
+ // if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
+ // zero_size( ptr, size );
+ // break;
+ // case EAllocation_FREE :
+ // _aligned_free( old_memory );
+ // break;
+ // case EAllocation_RESIZE :
+ // {
+ // AllocatorInfo a = heap();
+ // ptr = default_resize_align( a, old_memory, old_size, size, alignment );
+ // }
+ // break;
+
+ // #elif defined( ZPL_SYSTEM_LINUX ) && ! defined( ZPL_CPU_ARM ) && ! defined( ZPL_COMPILER_TINYC )
+ // case EAllocation_ALLOC :
+ // {
+ // ptr = aligned_alloc( alignment, ( size + alignment - 1 ) & ~( alignment - 1 ) );
+
+ // if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
+ // {
+ // zero_size( ptr, size );
+ // }
+ // }
+ // break;
+
+ // case EAllocation_FREE :
+ // {
+ // free( old_memory );
+ // }
+ // break;
+
+ // case EAllocation_RESIZE :
+ // {
+ // AllocatorInfo a = heap();
+ // ptr = default_resize_align( a, old_memory, old_size, size, alignment );
+ // }
+ // break;
+ // #else
+ // case EAllocation_ALLOC :
+ // {
+ // posix_memalign( &ptr, alignment, size );
+
+ // if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
+ // {
+ // zero_size( ptr, size );
+ // }
+ // }
+ // break;
+
+ // case EAllocation_FREE :
+ // {
+ // free( old_memory );
+ // }
+ // break;
+
+ // case EAllocation_RESIZE :
+ // {
+ // AllocatorInfo a = heap();
+ // ptr = default_resize_align( a, old_memory, old_size, size, alignment );
+ // }
+ // break;
+ // #endif
+
+ // case EAllocation_FREE_ALL :
+ // break;
+ // }
+
+ // #ifdef ZPL_HEAP_ANALYSIS
+ // if ( type == EAllocation_ALLOC )
+ // {
+ // _heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* )( zpl_cast( char* ) ptr + alloc_info_remainder );
+ // zero_item( alloc_info );
+ // alloc_info->size = size - track_size;
+ // alloc_info->physical_start = ptr;
+ // ptr = zpl_cast( void* )( alloc_info + 1 );
+ // _heap_stats_info.used_memory += alloc_info->size;
+ // _heap_stats_info.alloc_count++;
+ // }
+ // #endif
+
+ // return ptr;
+ }
+
+ void* Arena::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
+ {
+ Arena* arena = rcast(Arena*, allocator_data);
+ void* ptr = NULL;
+
+ // unused( old_size );
switch ( type )
{
- case EAllocation_ALLOC:
- {
- if ( last->total_allocated + size > last->total_size )
+ case EAllocation_ALLOC :
{
- Arena bucket;
- arena_init_from_allocator( & bucket, heap(), BucketSize );
+ void* end = pointer_add( arena->PhysicalStart, arena->TotalUsed );
+ sw total_size = align_forward_i64( size, alignment );
- if ( bucket.physical_start == nullptr )
- fatal( "Failed to create bucket for Global_AllocatorBuckets");
+ // NOTE: Out of memory
+ if ( arena->TotalUsed + total_size > (sw) arena->TotalSize )
+ {
+ // zpl__printf_err("%s", "Arena out of memory\n");
+ return nullptr;
+ }
- if ( ! array_append( Global_AllocatorBuckets, bucket ) )
- fatal( "Failed to append bucket to Global_AllocatorBuckets");
+ ptr = align_forward( end, alignment );
+ arena->TotalUsed += total_size;
- last = & array_back( Global_AllocatorBuckets );
+ if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
+ zero_size( ptr, size );
}
+ break;
- return alloc_align( arena_allocator( last), size, alignment );
- }
- case EAllocation_FREE:
- {
- // Doesn't recycle.
- }
- break;
- case EAllocation_FREE_ALL:
- {
- // Memory::cleanup instead.
- }
- break;
- case EAllocation_RESIZE:
- {
- if ( last->total_allocated + size > last->total_size )
+ case EAllocation_FREE :
+ // NOTE: Free all at once
+ // Use Temp_Arena_Memory if you want to free a block
+ break;
+
+ case EAllocation_FREE_ALL :
+ arena->TotalUsed = 0;
+ break;
+
+ case EAllocation_RESIZE :
{
- Arena bucket;
- arena_init_from_allocator( & bucket, heap(), BucketSize );
-
- if ( bucket.physical_start == nullptr )
- fatal( "Failed to create bucket for Global_AllocatorBuckets");
-
- if ( ! array_append( Global_AllocatorBuckets, bucket ) )
- fatal( "Failed to append bucket to Global_AllocatorBuckets");
-
- last = & array_back( Global_AllocatorBuckets );
+ // TODO : Check if ptr is on top of stack and just extend
+ AllocatorInfo a = arena->Backing;
+ ptr = default_resize_align( a, old_memory, old_size, size, alignment );
}
-
- void* result = alloc_align( arena_allocator( last), size, alignment );
-
- if ( result != nullptr && old_memory != nullptr )
- {
- mem_copy( result, old_memory, size );
- }
-
- return result;
- }
+ break;
}
-
- return nullptr;
+ return ptr;
}
- void setup()
+ void* Pool::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
{
- GlobalAllocator = AllocatorInfo { & Global_Allocator_Proc, nullptr };
+ Pool* pool = zpl_cast( Pool* ) allocator_data;
+ void* ptr = NULL;
- if ( ! array_init_reserve( Global_AllocatorBuckets, heap(), 128 ) )
- fatal( "Failed to reserve memory for Global_AllocatorBuckets");
+ // unused( old_size );
- Arena bucket;
- arena_init_from_allocator( & bucket, heap(), BucketSize );
-
- if ( bucket.physical_start == nullptr )
- fatal( "Failed to create first bucket for Global_AllocatorBuckets");
-
- array_append( Global_AllocatorBuckets, bucket );
- }
-
- void cleanup()
- {
- s32 index = 0;
- s32 left = array_count( Global_AllocatorBuckets );
- do
+ switch ( type )
{
- Arena* bucket = & Global_AllocatorBuckets[ index ];
- arena_free( bucket );
- index++;
- }
- while ( left--, left );
+ case EAllocation_ALLOC :
+ {
+ uptr next_free;
- array_free( Global_AllocatorBuckets );
+ ZPL_ASSERT( size == pool->BlockSize );
+ ZPL_ASSERT( alignment == pool->BlockAlign );
+ ZPL_ASSERT( pool->FreeList != NULL );
+
+ next_free = *zpl_cast( uptr* ) pool->FreeList;
+ ptr = pool->FreeList;
+ pool->FreeList = zpl_cast( void* ) next_free;
+ pool->TotalSize += pool->BlockSize;
+
+ if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
+ zero_size( ptr, size );
+ }
+ break;
+
+ case EAllocation_FREE :
+ {
+ uptr* next;
+ if ( old_memory == NULL )
+ return NULL;
+
+ next = zpl_cast( uptr* ) old_memory;
+ *next = zpl_cast( uptr ) pool->FreeList;
+ pool->FreeList = old_memory;
+ pool->TotalSize -= pool->BlockSize;
+ }
+ break;
+
+ case EAllocation_FREE_ALL :
+ {
+ sw actual_block_size, block_index;
+ void* curr;
+ uptr* end;
+
+ actual_block_size = pool->BlockSize + pool->BlockAlign;
+ pool->TotalSize = 0;
+
+ // NOTE: Init intrusive freelist
+ curr = pool->PhysicalStart;
+ for ( block_index = 0; block_index < pool->NumBlocks - 1; block_index++ )
+ {
+ uptr* next = zpl_cast( uptr* ) curr;
+ *next = zpl_cast( uptr ) curr + actual_block_size;
+ curr = pointer_add( curr, actual_block_size );
+ }
+
+ end = zpl_cast( uptr* ) curr;
+ *end = zpl_cast( uptr ) NULL;
+ pool->FreeList = pool->PhysicalStart;
+ }
+ break;
+
+ case EAllocation_RESIZE :
+ // NOTE: Cannot resize
+ ZPL_PANIC( "You cannot resize something allocated by with a pool." );
+ break;
+ }
+
+ return ptr;
}
-}
+
+ Pool Pool::init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align )
+ {
+ Pool pool = {};
+
+ sw actual_block_size, pool_size, block_index;
+ void *data, *curr;
+ uptr* end;
+
+ pool.Backing = backing;
+ pool.BlockSize = block_size;
+ pool.BlockAlign = block_align;
+ pool.NumBlocks = num_blocks;
+
+ actual_block_size = block_size + block_align;
+ pool_size = num_blocks * actual_block_size;
+
+ data = alloc_align( backing, pool_size, block_align );
+
+ // NOTE: Init intrusive freelist
+ curr = data;
+ for ( block_index = 0; block_index < num_blocks - 1; block_index++ )
+ {
+ uptr* next = ( uptr* ) curr;
+ *next = ( uptr ) curr + actual_block_size;
+ curr = pointer_add( curr, actual_block_size );
+ }
+
+ end = ( uptr* ) curr;
+ *end = ( uptr ) 0;
+
+ pool.PhysicalStart = data;
+ pool.FreeList = data;
+
+ return pool;
+ }
+
+#pragma endregion Memory
+
+#pragma region File Handling
+
+ #if defined( ZPL_SYSTEM_WINDOWS ) || defined( ZPL_SYSTEM_CYGWIN )
+
+ internal wchar_t* _alloc_utf8_to_ucs2( AllocatorInfo a, char const* text, sw* w_len_ )
+ {
+ wchar_t* w_text = NULL;
+ sw len = 0, w_len = 0, w_len1 = 0;
+ if ( text == NULL )
+ {
+ if ( w_len_ )
+ *w_len_ = w_len;
+ return NULL;
+ }
+ len = str_len( text );
+ if ( len == 0 )
+ {
+ if ( w_len_ )
+ *w_len_ = w_len;
+ return NULL;
+ }
+ w_len = MultiByteToWideChar( CP_UTF8, MB_ERR_INVALID_CHARS, text, zpl_cast( int ) len, NULL, 0 );
+ if ( w_len == 0 )
+ {
+ if ( w_len_ )
+ *w_len_ = w_len;
+ return NULL;
+ }
+ w_text = alloc_array( a, wchar_t, w_len + 1 );
+ w_len1 = MultiByteToWideChar( CP_UTF8, MB_ERR_INVALID_CHARS, text, zpl_cast( int ) len, w_text, zpl_cast( int ) w_len );
+ if ( w_len1 == 0 )
+ {
+ free( a, w_text );
+ if ( w_len_ )
+ *w_len_ = 0;
+ return NULL;
+ }
+ w_text[ w_len ] = 0;
+ if ( w_len_ )
+ *w_len_ = w_len;
+ return w_text;
+ }
+
+ internal ZPL_FILE_SEEK_PROC( _win32_file_seek )
+ {
+ LARGE_INTEGER li_offset;
+ li_offset.QuadPart = offset;
+ if ( ! SetFilePointerEx( fd.p, li_offset, &li_offset, whence ) )
+ {
+ return false;
+ }
+
+ if ( new_offset )
+ *new_offset = li_offset.QuadPart;
+ return true;
+ }
+
+ internal ZPL_FILE_READ_AT_PROC( _win32_file_read )
+ {
+ // unused( stop_at_newline );
+ b32 result = false;
+ _win32_file_seek( fd, offset, ESeekWhence_BEGIN, NULL );
+ DWORD size_ = zpl_cast( DWORD )( size > ZPL_I32_MAX ? ZPL_I32_MAX : size );
+ DWORD bytes_read_;
+ if ( ReadFile( fd.p, buffer, size_, &bytes_read_, NULL ) )
+ {
+ if ( bytes_read )
+ *bytes_read = bytes_read_;
+ result = true;
+ }
+
+ return result;
+ }
+
+ internal ZPL_FILE_WRITE_AT_PROC( _win32_file_write )
+ {
+ DWORD size_ = zpl_cast( DWORD )( size > ZPL_I32_MAX ? ZPL_I32_MAX : size );
+ DWORD bytes_written_;
+ _win32_file_seek( fd, offset, ESeekWhence_BEGIN, NULL );
+ if ( WriteFile( fd.p, buffer, size_, &bytes_written_, NULL ) )
+ {
+ if ( bytes_written )
+ *bytes_written = bytes_written_;
+ return true;
+ }
+ return false;
+ }
+
+ internal ZPL_FILE_CLOSE_PROC( _win32_file_close )
+ {
+ CloseHandle( fd.p );
+ }
+
+ FileOperations const default_file_operations = { _win32_file_read, _win32_file_write, _win32_file_seek, _win32_file_close };
+
+ ZPL_NEVER_INLINE ZPL_FILE_OPEN_PROC( _win32_file_open )
+ {
+ DWORD desired_access;
+ DWORD creation_disposition;
+ void* handle;
+ wchar_t* w_text;
+
+ switch ( mode & ZPL_FILE_MODES )
+ {
+ case EFileMode_READ :
+ desired_access = GENERIC_READ;
+ creation_disposition = OPEN_EXISTING;
+ break;
+ case EFileMode_WRITE :
+ desired_access = GENERIC_WRITE;
+ creation_disposition = CREATE_ALWAYS;
+ break;
+ case EFileMode_APPEND :
+ desired_access = GENERIC_WRITE;
+ creation_disposition = OPEN_ALWAYS;
+ break;
+ case EFileMode_READ | EFileMode_RW :
+ desired_access = GENERIC_READ | GENERIC_WRITE;
+ creation_disposition = OPEN_EXISTING;
+ break;
+ case EFileMode_WRITE | EFileMode_RW :
+ desired_access = GENERIC_READ | GENERIC_WRITE;
+ creation_disposition = CREATE_ALWAYS;
+ break;
+ case EFileMode_APPEND | EFileMode_RW :
+ desired_access = GENERIC_READ | GENERIC_WRITE;
+ creation_disposition = OPEN_ALWAYS;
+ break;
+ default :
+ ZPL_PANIC( "Invalid file mode" );
+ return EFileError_INVALID;
+ }
+
+ w_text = _alloc_utf8_to_ucs2( heap(), filename, NULL );
+ handle = CreateFileW( w_text, desired_access, FILE_SHARE_READ | FILE_SHARE_DELETE, NULL, creation_disposition, FILE_ATTRIBUTE_NORMAL, NULL );
+
+ free( heap(), w_text );
+
+ if ( handle == INVALID_HANDLE_VALUE )
+ {
+ DWORD err = GetLastError();
+ switch ( err )
+ {
+ case ERROR_FILE_NOT_FOUND :
+ return EFileError_NOT_EXISTS;
+ case ERROR_FILE_EXISTS :
+ return EFileError_EXISTS;
+ case ERROR_ALREADY_EXISTS :
+ return EFileError_EXISTS;
+ case ERROR_ACCESS_DENIED :
+ return EFileError_PERMISSION;
+ }
+ return EFileError_INVALID;
+ }
+
+ if ( mode & EFileMode_APPEND )
+ {
+ LARGE_INTEGER offset = { { 0 } };
+ if ( ! SetFilePointerEx( handle, offset, NULL, ESeekWhence_END ) )
+ {
+ CloseHandle( handle );
+ return EFileError_INVALID;
+ }
+ }
+
+ fd->p = handle;
+ *ops = default_file_operations;
+ return EFileError_NONE;
+ }
+
+ #else // POSIX
+ # include
+
+ internal ZPL_FILE_SEEK_PROC( _posix_file_seek )
+ {
+ # if defined( ZPL_SYSTEM_OSX )
+ s64 res = lseek( fd.i, offset, whence );
+ # else // TODO(ZaKlaus): @fixme lseek64
+ s64 res = lseek( fd.i, offset, whence );
+ # endif
+ if ( res < 0 )
+ return false;
+ if ( new_offset )
+ *new_offset = res;
+ return true;
+ }
+
+ internal ZPL_FILE_READ_AT_PROC( _posix_file_read )
+ {
+ unused( stop_at_newline );
+ sw res = pread( fd.i, buffer, size, offset );
+ if ( res < 0 )
+ return false;
+ if ( bytes_read )
+ *bytes_read = res;
+ return true;
+ }
+
+ internal ZPL_FILE_WRITE_AT_PROC( _posix_file_write )
+ {
+ sw res;
+ s64 curr_offset = 0;
+ _posix_file_seek( fd, 0, ESeekWhence_CURRENT, &curr_offset );
+ if ( curr_offset == offset )
+ {
+ // NOTE: Writing to stdout et al. doesn't like pwrite for numerous reasons
+ res = write( zpl_cast( int ) fd.i, buffer, size );
+ }
+ else
+ {
+ res = pwrite( zpl_cast( int ) fd.i, buffer, size, offset );
+ }
+ if ( res < 0 )
+ return false;
+ if ( bytes_written )
+ *bytes_written = res;
+ return true;
+ }
+
+ internal ZPL_FILE_CLOSE_PROC( _posix_file_close )
+ {
+ close( fd.i );
+ }
+
+ FileOperations const default_file_operations = { _posix_file_read, _posix_file_write, _posix_file_seek, _posix_file_close };
+
+ ZPL_NEVER_INLINE ZPL_FILE_OPEN_PROC( _posix_file_open )
+ {
+ s32 os_mode;
+ switch ( mode & ZPL_FILE_MODES )
+ {
+ case EFileMode_READ :
+ os_mode = O_RDONLY;
+ break;
+ case EFileMode_WRITE :
+ os_mode = O_WRONLY | O_CREAT | O_TRUNC;
+ break;
+ case EFileMode_APPEND :
+ os_mode = O_WRONLY | O_APPEND | O_CREAT;
+ break;
+ case EFileMode_READ | EFileMode_RW :
+ os_mode = O_RDWR;
+ break;
+ case EFileMode_WRITE | EFileMode_RW :
+ os_mode = O_RDWR | O_CREAT | O_TRUNC;
+ break;
+ case EFileMode_APPEND | EFileMode_RW :
+ os_mode = O_RDWR | O_APPEND | O_CREAT;
+ break;
+ default :
+ ZPL_PANIC( "Invalid file mode" );
+ return EFileError_INVALID;
+ }
+
+ fd->i = open( filename, os_mode, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH );
+ if ( fd->i < 0 )
+ {
+ // TODO: More file errors
+ return EFileError_INVALID;
+ }
+
+ *ops = default_file_operations;
+ return EFileError_NONE;
+ }
+
+ // POSIX
+ #endif
+
+ internal void _dirinfo_free_entry( DirEntry* entry );
+
+ FileError file_close( FileInfo* f )
+ {
+ if ( ! f )
+ return EFileError_INVALID;
+
+ if ( f->Filename )
+ free( heap(), zpl_cast( char* ) f->Filename );
+
+ #if defined( ZPL_SYSTEM_WINDOWS )
+ if ( f->FD.p == INVALID_HANDLE_VALUE )
+ return EFileError_INVALID;
+ #else
+ if ( f->fd.i < 0 )
+ return EFileError_INVALID;
+ #endif
+
+ if ( f->IsTemp )
+ {
+ f->Ops.close( f->FD );
+ return EFileError_NONE;
+ }
+
+ if ( ! f->Ops.read_at )
+ f->Ops = default_file_operations;
+ f->Ops.close( f->FD );
+
+ if ( f->Dir )
+ {
+ _dirinfo_free_entry( f->Dir );
+ mfree( f->Dir );
+ f->Dir = NULL;
+ }
+
+ return EFileError_NONE;
+ }
+
+ FileError file_new( FileInfo* f, FileDescriptor fd, FileOperations ops, char const* filename )
+ {
+ FileError err = EFileError_NONE;
+ sw len = str_len( filename );
+
+ f->Ops = ops;
+ f->FD = fd;
+ f->Dir = nullptr;
+ f->LastWriteTime = 0;
+ f->Filename = alloc_array( heap(), char, len + 1 );
+ mem_copy( zpl_cast( char* ) f->Filename, zpl_cast( char* ) filename, len + 1 );
+
+ return err;
+ }
+
+ FileError file_open_mode( FileInfo* f, FileMode mode, char const* filename )
+ {
+ FileInfo file_ =
+ {
+ { nullptr, nullptr, nullptr, nullptr },
+ { nullptr },
+ 0,
+ nullptr,
+ 0,
+ nullptr
+ };
+
+ *f = file_;
+ FileError err;
+
+ #if defined( ZPL_SYSTEM_WINDOWS ) || defined( ZPL_SYSTEM_CYGWIN )
+ err = _win32_file_open( &f->FD, &f->Ops, mode, filename );
+ #else
+ err = _posix_file_open( &f->fd, &f->ops, mode, filename );
+ #endif
+
+ if ( err == EFileError_NONE )
+ return file_new( f, f->FD, f->Ops, filename );
+
+ return err;
+ }
+
+ internal void _dirinfo_free_entry( DirEntry* entry )
+ {
+ if ( entry->Info )
+ {
+ dirinfo_free( entry->Info );
+ mfree( entry->Info );
+ entry->Info = nullptr;
+ }
+ }
+
+ void dirinfo_free( DirInfo* dir )
+ {
+ ZPL_ASSERT_NOT_NULL( dir );
+
+ for ( sw i = 0; i < array_count( dir->Entries ); ++i )
+ {
+ _dirinfo_free_entry( dir->Entries + i );
+ }
+
+ array_free( dir->Entries );
+ array_free( dir->Filenames );
+ // string_free( dir->Buffer );
+ dir->Buffer.free();
+ mfree( ( void* )dir->FullPath );
+ }
+
+#pragma endreigon File Handling
+
+ namespace Memory
+ {
+ global AllocatorInfo GlobalAllocator;
+
+ global Array Global_AllocatorBuckets;
+
+ void* Global_Allocator_Proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
+ {
+ Arena& last = Global_AllocatorBuckets.back();
+
+ switch ( type )
+ {
+ case EAllocation_ALLOC:
+ {
+ if ( last.TotalUsed + size > last.TotalSize )
+ {
+ Arena bucket = Arena::init_from_allocator( heap(), BucketSize );
+
+ if ( bucket.PhysicalStart == nullptr )
+ fatal( "Failed to create bucket for Global_AllocatorBuckets");
+
+ if ( ! Global_AllocatorBuckets.append( bucket ) )
+ fatal( "Failed to append bucket to Global_AllocatorBuckets");
+
+ last = Global_AllocatorBuckets.back();
+ }
+
+ return alloc_align( last, size, alignment );
+ }
+ case EAllocation_FREE:
+ {
+ // Doesn't recycle.
+ }
+ break;
+ case EAllocation_FREE_ALL:
+ {
+ // Memory::cleanup instead.
+ }
+ break;
+ case EAllocation_RESIZE:
+ {
+ if ( last.TotalUsed + size > last.TotalSize )
+ {
+ Arena bucket = Arena::init_from_allocator( heap(), BucketSize );
+
+ if ( bucket.PhysicalStart == nullptr )
+ fatal( "Failed to create bucket for Global_AllocatorBuckets");
+
+ if ( ! Global_AllocatorBuckets.append( bucket ) )
+ fatal( "Failed to append bucket to Global_AllocatorBuckets");
+
+ last = Global_AllocatorBuckets.back();
+ }
+
+ void* result = alloc_align( last.Backing, size, alignment );
+
+ if ( result != nullptr && old_memory != nullptr )
+ {
+ mem_copy( result, old_memory, old_size );
+ }
+
+ return result;
+ }
+ }
+
+ return nullptr;
+ }
+
+ void setup()
+ {
+ GlobalAllocator = AllocatorInfo { & Global_Allocator_Proc, nullptr };
+
+ Global_AllocatorBuckets = Array::init_reserve( heap(), 128 );
+
+ if ( Global_AllocatorBuckets == nullptr )
+ fatal( "Failed to reserve memory for Global_AllocatorBuckets");
+
+ Arena bucket = Arena::init_from_allocator( heap(), BucketSize );
+
+ if ( bucket.PhysicalStart == nullptr )
+ fatal( "Failed to create first bucket for Global_AllocatorBuckets");
+
+ Global_AllocatorBuckets.append( bucket );
+ }
+
+ void cleanup()
+ {
+ s32 index = 0;
+ s32 left = Global_AllocatorBuckets.num();
+ do
+ {
+ Arena* bucket = & Global_AllocatorBuckets[ index ];
+ bucket->free();
+ index++;
+ }
+ while ( left--, left );
+
+ Global_AllocatorBuckets.free();
+ }
+
+ // namespace Memory
+ }
+
+// namespace gen
+}
\ No newline at end of file
diff --git a/project/Bloat.hpp b/project/Bloat.hpp
index e2dd1cd..abcca78 100644
--- a/project/Bloat.hpp
+++ b/project/Bloat.hpp
@@ -27,6 +27,11 @@
# define ZPL_MODULE_HASHING
#include "zpl.h"
+#undef Array
+#undef heap
+#undef alloc_item
+#undef alloc_array
+
using zpl::b32;
using zpl::s8;
using zpl::s16;
@@ -40,32 +45,32 @@ using zpl::sw;
using zpl::sptr;
using zpl::uptr;
-using zpl::AllocType;
-using zpl::Arena;
-using zpl::AllocatorInfo;
-using zpl::ArrayHeader;
-using zpl::FileInfo;
-using zpl::FileError;
-using zpl::Pool;
+// using zpl::AllocType;
+// using zpl::Arena;
+// using zpl::AllocatorInfo;
+// using zpl::ArrayHeader;
+// using zpl::FileInfo;
+// using zpl::FileError;
+// using zpl::Pool;
// using zpl::String;
-using zpl::EAllocation_ALLOC;
-using zpl::EAllocation_FREE;
-using zpl::EAllocation_FREE_ALL;
-using zpl::EAllocation_RESIZE;
-using zpl::EFileMode_WRITE;
-using zpl::EFileError_NONE;
+// using zpl::EAllocation_ALLOC;
+// using zpl::EAllocation_FREE;
+// using zpl::EAllocation_FREE_ALL;
+// using zpl::EAllocation_RESIZE;
+// using zpl::EFileMode_WRITE;
+// using zpl::EFileError_NONE;
-using zpl::ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO;
+// using zpl::ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO;
using zpl::align_forward;
using zpl::align_forward_i64;
-using zpl::alloc;
-using zpl::alloc_align;
-using zpl::arena_allocator;
-using zpl::arena_init_from_memory;
-using zpl::arena_init_from_allocator;
-using zpl::arena_free;
+// using zpl::alloc;
+// using zpl::alloc_align;
+// using zpl::arena_allocator;
+// using zpl::arena_init_from_memory;
+// using zpl::arena_init_from_allocator;
+// using zpl::arena_free;
using zpl::assert_crash;
using zpl::char_first_occurence;
using zpl::char_is_alpha;
@@ -74,15 +79,15 @@ using zpl::char_is_digit;
using zpl::char_is_hex_digit;
using zpl::char_is_space;
using zpl::crc32;
-using zpl::free_all;
+// using zpl::free_all;
using zpl::is_power_of_two;
using zpl::mem_copy;
using zpl::mem_move;
using zpl::mem_set;
using zpl::pointer_add;
-using zpl::pool_allocator;
-using zpl::pool_init;
-using zpl::pool_free;
+// using zpl::pool_allocator;
+// using zpl::pool_init;
+// using zpl::pool_free;
using zpl::process_exit;
using zpl::str_compare;
using zpl::str_copy;
@@ -184,1002 +189,1411 @@ do \
} \
while(0);
-constexpr
-char const* Msg_Invalid_Value = "INVALID VALUE PROVIDED";
-
-#pragma region Memory
-
-// TODO : Use it.
-struct gen_Arena
+namespace gen
{
- static
- void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
+ constexpr
+ char const* Msg_Invalid_Value = "INVALID VALUE PROVIDED";
- static
- gen_Arena init_from_memory( void* start, sw size )
+ #pragma region Memory
+
+ enum AllocType : u8
{
- return
- {
- { nullptr, nullptr },
- start,
- size,
- 0,
- 0
- };
- }
-
- static
- gen_Arena init_from_allocator( AllocatorInfo backing, sw size )
- {
- gen_Arena result =
- {
- backing,
- alloc( backing, size),
- size,
- 0,
- 0
- };
- return result;
- }
-
- static
- gen_Arena init_sub( gen_Arena& parent, sw size )
- {
- return init_from_allocator( parent.Backing, size );
- }
-
- sw alignment_of( sw alignment )
- {
- sw alignment_offset, result_pointer, mask;
- ZPL_ASSERT( is_power_of_two( alignment ) );
-
- alignment_offset = 0;
- result_pointer = (sw) PhysicalStart + TotalUsed;
- mask = alignment - 1;
-
- if ( result_pointer & mask )
- alignment_offset = alignment - ( result_pointer & mask );
-
- return alignment_offset;
- }
-
- void check()
- {
- ZPL_ASSERT( TempCount == 0 );
- }
-
- void free()
- {
- if ( Backing.proc )
- {
- zpl::free( Backing, PhysicalStart );
- PhysicalStart = nullptr;
- }
- }
-
- sw size_remaining( sw alignment )
- {
- sw result = TotalSize - ( TotalUsed + alignment_of( alignment ) );
- return result;
- }
-
- AllocatorInfo Backing;
- void* PhysicalStart;
- sw TotalSize;
- sw TotalUsed;
- sw TempCount;
-};
-
-struct gen_Pool
-{
- static
- void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
-
- static
- gen_Pool init( AllocatorInfo backing, sw num_blocks, sw block_size )
- {
- return init_align( backing, num_blocks, block_size, ZPL_DEFAULT_MEMORY_ALIGNMENT );
- }
-
- static
- gen_Pool init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align );
-
- void free()
- {
- if ( Backing.proc )
- {
- zpl::free( Backing, PhysicalStart );
- }
- }
-
- AllocatorInfo Backing;
- void* PhysicalStart;
- void* FreeList;
- sw BlockSize;
- sw BlockAlign;
- sw TotalSize;
- sw NumBlocks;
-};
-
-#pragma endregion Memory
-
-#pragma region Containers
-#pragma push_macro("template")
-#undef template
-
-// TODO : Use it.
-template
-struct TArray
-{
- struct Header
- {
- AllocatorInfo Allocator;
- uw Capacity;
- uw Num;
+ EAllocation_ALLOC,
+ EAllocation_FREE,
+ EAllocation_FREE_ALL,
+ EAllocation_RESIZE,
};
- static
- TArray init( AllocatorInfo allocator )
+ using AllocatorProc = void* ( void* allocator_data, AllocType type
+ , sw size, sw alignment
+ , void* old_memory, sw old_size
+ , u64 flags );
+
+ struct AllocatorInfo
{
- return init_reserve( allocator, grow_formula(0) );
- }
-
- static
- TArray init_reserve( AllocatorInfo allocator, sw capacity )
- {
- Header* header = rcast( Header*, alloc( allocator, sizeof(Header) + sizeof(Type) ));
-
- if ( header == nullptr )
- return { nullptr };
-
- header->Allocator = allocator;
- header->Capacity = capacity;
- header->Num = 0;
-
- return { rcast( Type*, header + 1) };
- }
-
- static
- uw grow_formula( uw value )
- {
- return 2 * value * 8;
- }
-
- bool append( Type value )
- {
- Header& header = get_header();
-
- if ( header.Num == header.Capacity )
- {
- if ( ! grow( header.Capacity ))
- return false;
- }
-
- Data[ header.Num ] = value;
- header.Num++;
-
- return true;
- }
-
- Type& back( void )
- {
- Header& header = get_header();
- return Data[ header.Num - 1 ];
- }
-
- void clear( void )
- {
- Header& header = get_header();
- header.Num = 0;
- }
-
- bool fill( uw begin, uw end, Type value )
- {
- Header& header = get_header();
-
- if ( begin < 0 || end >= header.Num )
- return false;
-
- for ( sw idx = begin; idx < end; idx++ )
- {
- Data[ idx ] = value;
- }
-
- return true;
- }
-
- void free( void )
- {
- Header& header = get_header();
- zpl::free( header.Allocator, &header );
- }
-
- Header& get_header( void )
- {
- return *( reinterpret_cast< Header* >( Data ) - 1 );
- }
-
- bool grow( uw min_capacity )
- {
- Header& header = get_header();
- uw new_capacity = grow_formula( header.Capacity );
-
- if ( new_capacity < min_capacity )
- new_capacity = 8;
-
- return set_capacity( new_capacity );
- }
-
- uw num( void )
- {
- return get_header().Num;
- }
-
- bool pop( void )
- {
- Header& header = get_header();
-
- ZPL_ASSERT( header.Num > 0 );
- header.Num--;
- }
-
- void remove_at( uw idx )
- {
- Header* header = &get_header();
- ZPL_ASSERT( idx < header->Num );
-
- mem_move( header + idx, header + idx + 1, sizeof( Type ) * ( header->Num - idx - 1 ) );
- header->Num--;
- }
-
- bool reserve( uw new_capacity )
- {
- Header& header = get_header();
-
- if ( header.Capacity < new_capacity )
- return set_capacity( new_capacity );
-
- return true;
- }
-
- bool resize( uw num )
- {
- Header& header = get_header();
-
- if ( num > header.Capacity )
- {
- if ( ! grow( header.Capacity ) )
- return false;
- }
-
- header.Num = num;
- return true;
- }
-
- bool set_capacity( uw new_capacity )
- {
- Header& header = get_header();
-
- if ( new_capacity == header.Capacity )
- return true;
-
- if ( new_capacity < header.Num )
- header.Num = new_capacity;
-
- sw size = sizeof( Header ) + sizeof( Type ) * new_capacity;
- Header* new_header = reinterpret_cast< Header* >( alloc( header.Allocator, size ) );
-
- if ( new_header == nullptr )
- return false;
-
- mem_move( new_header, &header, sizeof( Header ) + sizeof( Type ) * header.Num );
-
- new_header->Allocator = header.Allocator;
- new_header->Num = header.Num;
- new_header->Capacity = new_capacity;
-
- zpl::free( header.Allocator, &header );
-
- Data = ( Type* )new_header + 1;
- return true;
- }
-
- Type* Data;
-
- operator Type*()
- {
- return Data;
- }
-
- operator Type const*() const
- {
- return Data;
- }
-};
-
-// TODO : Use it.
-template
-struct THashTable
-{
- struct FindResult
- {
- sw HashIndex;
- sw PrevIndex;
- sw EntryIndex;
+ AllocatorProc* Proc;
+ void* Data;
};
- struct Entry
+ enum AllocFlag
{
- u64 Key;
- sw Next;
- Type Value;
+ ALLOCATOR_FLAG_CLEAR_TO_ZERO = bit( 0 ),
};
- static
- THashTable init( AllocatorInfo allocator )
+ //! Allocate memory with default alignment.
+ ZPL_DEF_INLINE void* alloc( AllocatorInfo a, sw size );
+
+ //! Allocate memory with specified alignment.
+ ZPL_DEF_INLINE void* alloc_align( AllocatorInfo a, sw size, sw alignment );
+
+ //! Free allocated memory.
+ ZPL_DEF_INLINE void free( AllocatorInfo a, void* ptr );
+
+ //! Free all memory allocated by an allocator.
+ ZPL_DEF_INLINE void free_all( AllocatorInfo a );
+
+ //! Resize an allocated memory.
+ ZPL_DEF_INLINE void* resize( AllocatorInfo a, void* ptr, sw old_size, sw new_size );
+
+ //! Resize an allocated memory with specified alignment.
+ ZPL_DEF_INLINE void* resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment );
+
+ #ifndef alloc_item
+ //! Allocate memory for an item.
+ # define alloc_item( allocator_, Type ) ( Type* )alloc( allocator_, size_of( Type ) )
+
+ //! Allocate memory for an array of items.
+ # define alloc_array( allocator_, Type, count ) ( Type* )alloc( allocator_, size_of( Type ) * ( count ) )
+ #endif
+
+
+ /* heap memory analysis tools */
+ /* define ZPL_HEAP_ANALYSIS to enable this feature */
+ /* call zpl_heap_stats_init at the beginning of the entry point */
+ /* you can call zpl_heap_stats_check near the end of the execution to validate any possible leaks */
+ void heap_stats_init( void );
+ sw heap_stats_used_memory( void );
+ sw heap_stats_alloc_count( void );
+ void heap_stats_check( void );
+
+ //! Allocate/Resize memory using default options.
+
+ //! Use this if you don't need a "fancy" resize allocation
+ ZPL_DEF_INLINE void* default_resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment );
+
+ void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
+
+ //! The heap allocator backed by operating system's memory manager.
+ constexpr AllocatorInfo heap( void ) { return { heap_allocator_proc, nullptr }; }
+
+ // #ifndef malloc
+
+ // //! Helper to allocate memory using heap allocator.
+ // # define malloc( sz ) ZPL_NS( alloc )( ZPL_NS( heap_allocator )(), sz )
+
+ // //! Helper to free memory allocated by heap allocator.
+ // # define mfree( ptr ) ZPL_NS( free )( ZPL_NS( heap_allocator )(), ptr )
+
+ // //! Alias to heap allocator.
+ // # define heap ZPL_NS( heap_allocator )
+ // #endif
+
+ ZPL_IMPL_INLINE void* alloc_align( AllocatorInfo a, sw size, sw alignment )
{
- THashTable result = {0};
-
- result.Hashes.init( allocator );
- result.Entries.init( allocator );
-
- return result;
+ return a.Proc( a.Data, EAllocation_ALLOC, size, alignment, nullptr, 0, ZPL_DEFAULT_ALLOCATOR_FLAGS );
}
- void clear( void )
+ ZPL_IMPL_INLINE void* alloc( AllocatorInfo a, sw size )
{
- for ( sw idx = 0; idx < Hashes.num(); idx++ )
- Hashes[ idx ] = -1;
-
- Hashes.clear();
- Entries.clear();
+ return alloc_align( a, size, ZPL_DEFAULT_MEMORY_ALIGNMENT );
}
- void destroy( void )
+ ZPL_IMPL_INLINE void free( AllocatorInfo a, void* ptr )
{
- if ( Hashes )
- Hashes.free();
- if ( Entries )
- Entries.free();
+ if ( ptr != nullptr )
+ a.Proc( a.Data, EAllocation_FREE, 0, 0, ptr, 0, ZPL_DEFAULT_ALLOCATOR_FLAGS );
}
- Type* get( u64 key )
+ ZPL_IMPL_INLINE void free_all( AllocatorInfo a )
{
- sw idx = find( key ).EntryIndex;
- if ( idx > 0 )
- return & Entries[ idx ].Value;
-
- return nullptr;
+ a.Proc( a.Data, EAllocation_FREE_ALL, 0, 0, nullptr, 0, ZPL_DEFAULT_ALLOCATOR_FLAGS );
}
- using MapProc = void (*)( u64 key, Type value );
-
- void map( MapProc map_proc )
+ ZPL_IMPL_INLINE void* resize( AllocatorInfo a, void* ptr, sw old_size, sw new_size )
{
- ZPL_ASSERT_NOT_NULL( map_proc );
+ return resize_align( a, ptr, old_size, new_size, ZPL_DEFAULT_MEMORY_ALIGNMENT );
+ }
- for ( sw idx = 0; idx < Entries.num(); idx++ )
+ ZPL_IMPL_INLINE void* resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment )
+ {
+ return a.Proc( a.Data, EAllocation_RESIZE, new_size, alignment, ptr, old_size, ZPL_DEFAULT_ALLOCATOR_FLAGS );
+ }
+
+ ZPL_IMPL_INLINE void* default_resize_align( AllocatorInfo a, void* old_memory, sw old_size, sw new_size, sw alignment )
+ {
+ if ( ! old_memory )
+ return alloc_align( a, new_size, alignment );
+
+ if ( new_size == 0 )
{
- map_proc( Entries[ idx ].Key, Entries[ idx ].Value );
- }
- }
-
- using MapMutProc = void (*)( u64 key, Type* value );
-
- void map_mut( MapMutProc map_proc )
- {
- ZPL_ASSERT_NOT_NULL( map_proc );
-
- for ( sw idx = 0; idx < Entries.num(); idx++ )
- {
- map_proc( Entries[ idx ].Key, & Entries[ idx ].Value );
- }
- }
-
- void grow()
- {
- sw new_num = TArray::grow_formula( Entries.num() )
- rehash( new_num );
- }
-
- void rehash( sw new_num )
- {
- sw idx;
- sw last_added_index;
-
- THashTable new_ht = init( Hashes.get_header().Allocator );
-
- new_ht.Hashes.resize( new_num );
- new_ht.Entries.reserve( new_ht.Hashes.num() );
-
- for ( idx = 0; idx < new_ht.Hashes.num(); ++idx )
- new_ht.Hashes[ idx ] = -1;
-
- for ( idx = 0; idx < Entries.num(); ++idx )
- {
- Entry& entry = Entries[ idx ];
-
- FindResult find_result;
-
- if ( new_ht.Hashes.num() == 0 )
- new_ht.grow();
-
- entry = Entries[ idx ];
- find_result = new_ht.find( entry.Key );
- last_added_index = new_ht.add_entry( entry.Key );
-
- if ( find_result.PrevIndex < 0 )
- new_ht.Hashes[ find_result.HashIndex ] = last_added_index;
-
- else
- new_ht.Entries[ find_result.PrevIndex ].Next = last_added_index;
-
- new_ht.Entries[ last_added_index ].Next = find_result.EntryIndex;
- new_ht.Entries[ last_added_index ].Value = entry.Value;
+ free( a, old_memory );
+ return nullptr;
}
- // *this = new_ht;
+ if ( new_size < old_size )
+ new_size = old_size;
- // old_ht.destroy();
-
- destroy();
- Hashes = new_ht.Hashes;
- Entries = new_ht.Entries;
- }
-
- void rehash_fast()
- {
- sw idx;
-
- for ( idx = 0; idx < Entries.num(); idx++ )
- Entries[ idx ].Next = -1;
-
- for ( idx = 0; idx < Hashes.num(); idx++ )
- Hashes[ idx ] = -1;
-
- for ( idx = 0; idx < Entries.num(); idx++ )
+ if ( old_size == new_size )
{
- Entry* entry;
-
- FindResult find_result;
- }
- }
-
- void remove( u64 key )
- {
- FindResult find_result = find( key);
-
- if ( find_result.EntryIndex >= 0 )
- {
- Entries.remove_at( find_result.EntryIndex );
- rehash_fast();
- }
- }
-
- void remove_entry( sw idx )
- {
- Entries.remove_at( idx );
- }
-
- void set( u64 key, Type value )
- {
- sw idx;
- FindResult find_result;
-
- if ( Hashes.num() == 0 )
- grow();
-
- find_result = find( key );
-
- if ( find_result.EntryIndex >= 0 )
- {
- idx = find_result.EntryIndex;
+ return old_memory;
}
else
{
- idx = add_entry( key );
-
- if ( find_result.PrevIndex >= 0 )
- {
- Entries[ find_result.PrevIndex ].Next = idx;
- }
- else
- {
- Hashes[ find_result.HashIndex ] = idx;
- }
+ void* new_memory = alloc_align( a, new_size, alignment );
+ if ( ! new_memory )
+ return nullptr;
+ mem_move( new_memory, old_memory, min( new_size, old_size ) );
+ free( a, old_memory );
+ return new_memory;
}
-
- Entries[ idx ].Value = value;
-
- if ( full() )
- grow();
}
- sw slot( u64 key )
+ // ZPL_IMPL_INLINE AllocatorInfo heap( void )
+ // {
+ // AllocatorInfo a;
+ // a.Proc = heap_allocator_proc;
+ // a.Data = nullptr;
+ // return a;
+ // }
+
+ struct Arena
{
- for ( sw idx = 0; idx < Hashes.num(); ++idx )
- if ( Hashes[ idx ] == key )
- return idx;
-
- return -1;
- }
-
- TArray< sw> Hashes;
- TArray< Entry> Entries;
-
-protected:
-
- sw add_entry( u64 key )
- {
- sw idx;
- Entry entry = { key, -1 };
-
- idx = Entries.num();
- Entries.append( entry );
- return idx;
- }
-
- FindResult find( u64 key )
- {
- FindResult result = { -1, -1, -1 };
-
- if ( Hashes.num() > 0 )
- {
- result.HashIndex = key % Hashes.num();
- result.EntryIndex = Hashes[ result.HashIndex ];
-
- while ( result.EntryIndex >= 0 )
- {
- if ( Entries[ result.EntryIndex ].Key == key )
- break;
-
- result.PrevIndex = result.EntryIndex;
- result.EntryIndex = Entries[ result.EntryIndex ].Next;
- }
- }
-
- return result;
- }
-
- b32 full()
- {
- return 0.75f * Hashes.num() < Entries.num();
- }
-};
-
-#pragma pop_macro("template")
-#pragma endregion Containers
-
-#pragma region String
- // Constant string with length.
- struct StrC
- {
- sw Len;
- char const* Ptr;
-
- static constexpr
- StrC from( char const* str )
- {
- return { str_len( str ), str };
- }
-
- operator char const* () const
- {
- return Ptr;
- }
- };
-
- // Dynamic String
- // This is directly based off the ZPL string api.
- // They used a header pattern
- // I kept it for simplicty of porting but its not necessary to keep it that way.
- struct String
- {
- struct Header
- {
- AllocatorInfo Allocator;
- sw Length;
- sw Capacity;
- };
+ static
+ void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
static
- String make( AllocatorInfo allocator, char const* str )
+ Arena init_from_memory( void* start, sw size )
{
- sw length = str ? str_len( str ) : 0;
- return make_length( allocator, str, length );
+ return
+ {
+ { nullptr, nullptr },
+ start,
+ size,
+ 0,
+ 0
+ };
}
static
- String make( AllocatorInfo allocator, StrC str )
+ Arena init_from_allocator( AllocatorInfo backing, sw size )
{
- return make_length( allocator, str.Ptr, str.Len );
- }
-
- static
- String make_reserve( AllocatorInfo allocator, sw capacity )
- {
- constexpr sw header_size = sizeof( Header );
-
- s32 alloc_size = header_size + capacity + 1;
- void* allocation = alloc( allocator, alloc_size );
-
- if ( allocation == nullptr )
- return { nullptr };
-
- mem_set( allocation, 0, alloc_size );
-
- Header*
- header = rcast(Header*, allocation);
- header->Allocator = allocator;
- header->Capacity = capacity;
- header->Length = 0;
-
- String result = { (char*)allocation + header_size };
+ Arena result =
+ {
+ backing,
+ alloc( backing, size),
+ size,
+ 0,
+ 0
+ };
return result;
}
static
- String make_length( AllocatorInfo allocator, char const* str, sw length )
+ Arena init_sub( Arena& parent, sw size )
{
- constexpr sw header_size = sizeof( Header );
-
- s32 alloc_size = header_size + length + 1;
- void* allocation = alloc( allocator, alloc_size );
-
- if ( allocation == nullptr )
- return { nullptr };
-
- if ( ! str )
- mem_set( allocation, 0, alloc_size );
-
- Header&
- header = * rcast(Header*, allocation);
- header = { allocator, length, length };
-
- String result = { rcast( char*, allocation) + header_size };
-
- if ( length && str )
- mem_copy( result, str, length );
-
- result[ length ] = '\0';
-
- return result;
+ return init_from_allocator( parent.Backing, size );
}
- static
- String fmt( AllocatorInfo allocator, char* buf, sw buf_size, char const* fmt, ... )
+ sw alignment_of( sw alignment )
{
- va_list va;
- va_start( va, fmt );
- str_fmt_va( buf, buf_size, fmt, va );
- va_end( va );
+ sw alignment_offset, result_pointer, mask;
+ ZPL_ASSERT( is_power_of_two( alignment ) );
- return make( allocator, buf );
+ alignment_offset = 0;
+ result_pointer = (sw) PhysicalStart + TotalUsed;
+ mask = alignment - 1;
+
+ if ( result_pointer & mask )
+ alignment_offset = alignment - ( result_pointer & mask );
+
+ return alignment_offset;
}
- static
- String fmt_buf( AllocatorInfo allocator, char const* fmt, ... )
+ void check()
{
- local_persist thread_local
- char buf[ ZPL_PRINTF_MAXLEN ] = { 0 };
-
- va_list va;
- va_start( va, fmt );
- str_fmt_va( buf, ZPL_PRINTF_MAXLEN, fmt, va );
- va_end( va );
-
- return make( allocator, buf );
- }
-
- static
- String join( AllocatorInfo allocator, char const** parts, sw num_parts, char const* glue )
- {
- String result = make( allocator, "" );
-
- for ( sw idx = 0; idx < num_parts; ++idx )
- {
- result.append( parts[ idx ] );
-
- if ( idx < num_parts - 1 )
- result.append( glue );
- }
-
- return result;
- }
-
- static
- bool are_equal( String lhs, String rhs )
- {
- if ( lhs.length() != rhs.length() )
- return false;
-
- for ( sw idx = 0; idx < lhs.length(); ++idx )
- if ( lhs[ idx ] != rhs[ idx ] )
- return false;
-
- return true;
- }
-
- bool make_space_for( char const* str, sw add_len )
- {
- sw available = avail_space();
-
- // NOTE: Return if there is enough space left
- if ( available >= add_len )
- {
- return true;
- }
- else
- {
- sw new_len, old_size, new_size;
-
- void* ptr;
- void* new_ptr;
-
- AllocatorInfo allocator = get_header().Allocator;
- Header* header = nullptr;
-
- new_len = length() + add_len;
- ptr = & get_header();
- old_size = size_of( Header ) + length() + 1;
- new_size = size_of( Header ) + new_len + 1;
-
- new_ptr = resize( allocator, ptr, old_size, new_size );
-
- if ( new_ptr == nullptr )
- return false;
-
- header = zpl_cast( Header* ) new_ptr;
- header->Allocator = allocator;
- header->Capacity = new_len;
-
- Data = rcast( char*, header + 1 );
-
- return str;
- }
- }
-
- bool append( char const* str )
- {
- return append( str, str_len( str ) );
- }
-
- bool append( char const* str, sw length )
- {
- if ( sptr(str) > 0 )
- {
- sw curr_len = this->length();
-
- if ( ! make_space_for( str, length ) )
- return false;
-
- Header& header = get_header();
-
- mem_copy( Data + curr_len, str, length );
-
- Data[ curr_len + length ] = '\0';
-
- header.Length = curr_len + length;
- }
-
- return str;
- }
-
- bool append( StrC str)
- {
- return append( str.Ptr, str.Len );
- }
-
- bool append( const String other )
- {
- return append( other.Data, other.length() );
- }
-
- bool append_fmt( char const* fmt, ... )
- {
- sw res;
- char buf[ ZPL_PRINTF_MAXLEN ] = { 0 };
-
- va_list va;
- va_start( va, fmt );
- res = str_fmt_va( buf, count_of( buf ) - 1, fmt, va ) - 1;
- va_end( va );
-
- return append( buf, res );
- }
-
- sw avail_space() const
- {
- Header const&
- header = * rcast( Header const*, Data - sizeof( Header ));
-
- return header.Capacity - header.Length;
- }
-
- sw capacity() const
- {
- Header const&
- header = * rcast( Header const*, Data - sizeof( Header ));
-
- return header.Capacity;
- }
-
- void clear()
- {
- get_header().Length = 0;
- }
-
- String duplicate( AllocatorInfo allocator )
- {
- return make_length( allocator, Data, length() );
+ ZPL_ASSERT( TempCount == 0 );
}
void free()
{
- if ( ! Data )
- return;
-
- Header& header = get_header();
-
- zpl::free( header.Allocator, & header );
- }
-
- Header& get_header()
- {
- return *(Header*)(Data - sizeof(Header));
- }
-
- sw length() const
- {
- Header const&
- header = * rcast( Header const*, Data - sizeof( Header ));
-
- return header.Length;
- }
-
- void trim( char const* cut_set )
- {
- sw len = 0;
-
- char* start_pos = Data;
- char* end_pos = Data + length() - 1;
-
- while ( start_pos <= end_pos && char_first_occurence( cut_set, *start_pos ) )
- start_pos++;
-
- while ( end_pos > start_pos && char_first_occurence( cut_set, *end_pos ) )
- end_pos--;
-
- len = scast( sw, ( start_pos > end_pos ) ? 0 : ( ( end_pos - start_pos ) + 1 ) );
-
- if ( Data != start_pos )
- mem_move( Data, start_pos, len );
-
- Data[ len ] = '\0';
-
- get_header().Length = len;
- }
-
- void trim_space()
- {
- return trim( " \t\r\n\v\f" );
- }
-
- operator bool()
- {
- return Data;
- }
-
- operator char* ()
- {
- return Data;
- }
-
- operator char const* () const
- {
- return Data;
- }
-
- operator StrC() const
- {
- return
+ if ( Backing.Proc )
{
- length(),
- Data
- };
+ gen::free( Backing, PhysicalStart );
+ PhysicalStart = nullptr;
+ }
}
- // Used with cached strings
- // Essentially makes the string a string view.
- String const& operator = ( String const& other ) const
+ sw size_remaining( sw alignment )
{
- if ( this == & other )
- return *this;
-
- String& this_ = ccast( String, *this );
-
- this_.Data = other.Data;
-
- return this_;
+ sw result = TotalSize - ( TotalUsed + alignment_of( alignment ) );
+ return result;
}
- char& operator [] ( sw index )
+ AllocatorInfo Backing;
+ void* PhysicalStart;
+ sw TotalSize;
+ sw TotalUsed;
+ sw TempCount;
+
+ operator AllocatorInfo()
{
- return Data[ index ];
+ return { allocator_proc, this };
}
-
- char const& operator [] ( sw index ) const
- {
- return Data[ index ];
- }
-
- char* Data = nullptr;
};
- struct String_POD
+ struct Pool
{
- char* Data;
+ static
+ void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
- operator String()
+ static
+ Pool init( AllocatorInfo backing, sw num_blocks, sw block_size )
{
- return * rcast(String*, this);
+ return init_align( backing, num_blocks, block_size, ZPL_DEFAULT_MEMORY_ALIGNMENT );
+ }
+
+ static
+ Pool init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align );
+
+ void free()
+ {
+ if ( Backing.Proc )
+ {
+ gen::free( Backing, PhysicalStart );
+ }
+ }
+
+ AllocatorInfo Backing;
+ void* PhysicalStart;
+ void* FreeList;
+ sw BlockSize;
+ sw BlockAlign;
+ sw TotalSize;
+ sw NumBlocks;
+
+ operator AllocatorInfo()
+ {
+ return { allocator_proc, this };
}
};
- static_assert( sizeof( String_POD ) == sizeof( String ), "String is not a POD" );
-#pragma endregion String
+
+ #pragma endregion Memory
+
+ #pragma region Containers
+ #pragma push_macro("template")
+ #undef template
+
+ template
+ struct Array
+ {
+ struct Header
+ {
+ AllocatorInfo Allocator;
+ uw Capacity;
+ uw Num;
+ };
+
+ static
+ Array init( AllocatorInfo allocator )
+ {
+ return init_reserve( allocator, grow_formula(0) );
+ }
+
+ static
+ Array init_reserve( AllocatorInfo allocator, sw capacity )
+ {
+ Header* header = rcast( Header*, alloc( allocator, sizeof(Header) + sizeof(Type) * capacity ));
+
+ if ( header == nullptr )
+ return { nullptr };
+
+ header->Allocator = allocator;
+ header->Capacity = capacity;
+ header->Num = 0;
+
+ return { rcast( Type*, header + 1) };
+ }
+
+ static
+ uw grow_formula( uw value )
+ {
+ return 2 * value + 8;
+ }
+
+ bool append( Type value )
+ {
+ Header* header = get_header();
+
+ if ( header->Num == header->Capacity )
+ {
+ if ( ! grow( header->Capacity ))
+ return false;
+
+ header = get_header();
+ }
+
+ Data[ header->Num ] = value;
+ header->Num++;
+
+ return true;
+ }
+
+ Type& back( void )
+ {
+ Header& header = * get_header();
+ return Data[ header.Num - 1 ];
+ }
+
+ void clear( void )
+ {
+ Header& header = * get_header();
+ header.Num = 0;
+ }
+
+ bool fill( uw begin, uw end, Type value )
+ {
+ Header& header = * get_header();
+
+ if ( begin < 0 || end >= header.Num )
+ return false;
+
+ for ( sw idx = begin; idx < end; idx++ )
+ {
+ Data[ idx ] = value;
+ }
+ // mem_set( Data + begin, value, end - begin)
+
+ return true;
+ }
+
+ void free( void )
+ {
+ Header& header = * get_header();
+ gen::free( header.Allocator, &header );
+ }
+
+ Header* get_header( void )
+ {
+ return rcast( Header*, Data ) - 1 ;
+ }
+
+ bool grow( uw min_capacity )
+ {
+ Header& header = * get_header();
+ uw new_capacity = grow_formula( header.Capacity );
+
+ if ( new_capacity < min_capacity )
+ new_capacity = min_capacity;
+
+ return set_capacity( new_capacity );
+ }
+
+ uw num( void )
+ {
+ return get_header()->Num;
+ }
+
+ bool pop( void )
+ {
+ Header& header = * get_header();
+
+ ZPL_ASSERT( header.Num > 0 );
+ header.Num--;
+ }
+
+ void remove_at( uw idx )
+ {
+ Header* header = get_header();
+ ZPL_ASSERT( idx < header->Num );
+
+ mem_move( header + idx, header + idx + 1, sizeof( Type ) * ( header->Num - idx - 1 ) );
+ header->Num--;
+ }
+
+ bool reserve( uw new_capacity )
+ {
+ Header& header = * get_header();
+
+ if ( header.Capacity < new_capacity )
+ return set_capacity( new_capacity );
+
+ return true;
+ }
+
+ bool resize( uw num )
+ {
+ Header* header = get_header();
+
+ if ( header->Capacity < num )
+ {
+ if ( ! grow( num ) )
+ return false;
+ }
+
+ header->Num = num;
+ return true;
+ }
+
+ bool set_capacity( uw new_capacity )
+ {
+ Header& header = * get_header();
+
+ if ( new_capacity == header.Capacity )
+ return true;
+
+ if ( new_capacity < header.Num )
+ header.Num = new_capacity;
+
+ sw size = sizeof( Header ) + sizeof( Type ) * new_capacity;
+ Header* new_header = reinterpret_cast< Header* >( alloc( header.Allocator, size ) );
+
+ if ( new_header == nullptr )
+ return false;
+
+ mem_move( new_header, &header, sizeof( Header ) + sizeof( Type ) * header.Num );
+
+ new_header->Capacity = new_capacity;
+
+ gen::free( header.Allocator, &header );
+
+ Data = rcast( Type*, new_header + 1);
+ return true;
+ }
+
+ Type* Data;
+
+ operator Type*()
+ {
+ return Data;
+ }
+
+ operator Type const*() const
+ {
+ return Data;
+ }
+
+ // For-range based support
+
+ Type* begin()
+ {
+ return Data;
+ }
+
+ Type* end()
+ {
+ return Data + get_header()->Num;
+ }
+ };
+
+ template
+ struct HashTable
+ {
+ struct FindResult
+ {
+ sw HashIndex;
+ sw PrevIndex;
+ sw EntryIndex;
+ };
+
+ struct Entry
+ {
+ u64 Key;
+ sw Next;
+ Type Value;
+ };
+
+ static
+ HashTable init( AllocatorInfo allocator )
+ {
+ HashTable result = { { nullptr }, { nullptr } };
+
+ result.Hashes = Array::init( allocator );
+ result.Entries = Array::init( allocator );
+
+ return result;
+ }
+
+ void clear( void )
+ {
+ for ( sw idx = 0; idx < Hashes.num(); idx++ )
+ Hashes[ idx ] = -1;
+
+ Hashes.clear();
+ Entries.clear();
+ }
+
+ void destroy( void )
+ {
+ if ( Hashes && Hashes.get_header()->Capacity )
+ Hashes.free();
+ if ( Entries && Hashes.get_header()->Capacity )
+ Entries.free();
+ }
+
+ Type* get( u64 key )
+ {
+ sw idx = find( key ).EntryIndex;
+ if ( idx >= 0 )
+ return & Entries[ idx ].Value;
+
+ return nullptr;
+ }
+
+ using MapProc = void (*)( u64 key, Type value );
+
+ void map( MapProc map_proc )
+ {
+ ZPL_ASSERT_NOT_NULL( map_proc );
+
+ for ( sw idx = 0; idx < Entries.num(); idx++ )
+ {
+ map_proc( Entries[ idx ].Key, Entries[ idx ].Value );
+ }
+ }
+
+ using MapMutProc = void (*)( u64 key, Type* value );
+
+ void map_mut( MapMutProc map_proc )
+ {
+ ZPL_ASSERT_NOT_NULL( map_proc );
+
+ for ( sw idx = 0; idx < Entries.num(); idx++ )
+ {
+ map_proc( Entries[ idx ].Key, & Entries[ idx ].Value );
+ }
+ }
+
+ void grow()
+ {
+ sw new_num = Array::grow_formula( Entries.num() );
+ rehash( new_num );
+ }
+
+ void rehash( sw new_num )
+ {
+ sw idx;
+ sw last_added_index;
+
+ HashTable new_ht = init( Hashes.get_header()->Allocator );
+
+ new_ht.Hashes.resize( new_num );
+ new_ht.Entries.reserve( new_ht.Hashes.num() );
+
+ Array::Header* hash_header = new_ht.Hashes.get_header();
+
+ for ( idx = 0; idx < new_ht.Hashes.num(); ++idx )
+ new_ht.Hashes[ idx ] = -1;
+
+ for ( idx = 0; idx < Entries.num(); ++idx )
+ {
+ Entry& entry = Entries[ idx ];
+
+ FindResult find_result;
+
+ if ( new_ht.Hashes.num() == 0 )
+ new_ht.grow();
+
+ entry = Entries[ idx ];
+ find_result = new_ht.find( entry.Key );
+ last_added_index = new_ht.add_entry( entry.Key );
+
+ if ( find_result.PrevIndex < 0 )
+ new_ht.Hashes[ find_result.HashIndex ] = last_added_index;
+
+ else
+ new_ht.Entries[ find_result.PrevIndex ].Next = last_added_index;
+
+ new_ht.Entries[ last_added_index ].Next = find_result.EntryIndex;
+ new_ht.Entries[ last_added_index ].Value = entry.Value;
+ }
+
+ destroy();
+ // Hashes = new_ht.Hashes;
+ // Entries = new_ht.Entries;
+ *this = new_ht;
+ }
+
+ void rehash_fast()
+ {
+ sw idx;
+
+ for ( idx = 0; idx < Entries.num(); idx++ )
+ Entries[ idx ].Next = -1;
+
+ for ( idx = 0; idx < Hashes.num(); idx++ )
+ Hashes[ idx ] = -1;
+
+ for ( idx = 0; idx < Entries.num(); idx++ )
+ {
+ Entry* entry;
+
+ FindResult find_result;
+ }
+ }
+
+ void remove( u64 key )
+ {
+ FindResult find_result = find( key);
+
+ if ( find_result.EntryIndex >= 0 )
+ {
+ Entries.remove_at( find_result.EntryIndex );
+ rehash_fast();
+ }
+ }
+
+ void remove_entry( sw idx )
+ {
+ Entries.remove_at( idx );
+ }
+
+ void set( u64 key, Type value )
+ {
+ sw idx;
+ FindResult find_result;
+
+ if ( Hashes.num() == 0 )
+ grow();
+
+ find_result = find( key );
+
+ if ( find_result.EntryIndex >= 0 )
+ {
+ idx = find_result.EntryIndex;
+ }
+ else
+ {
+ idx = add_entry( key );
+
+ if ( find_result.PrevIndex >= 0 )
+ {
+ Entries[ find_result.PrevIndex ].Next = idx;
+ }
+ else
+ {
+ Hashes[ find_result.HashIndex ] = idx;
+ }
+ }
+
+ Entries[ idx ].Value = value;
+
+ if ( full() )
+ grow();
+ }
+
+ sw slot( u64 key )
+ {
+ for ( sw idx = 0; idx < Hashes.num(); ++idx )
+ if ( Hashes[ idx ] == key )
+ return idx;
+
+ return -1;
+ }
+
+ Array< sw> Hashes;
+ Array< Entry> Entries;
+
+ protected:
+
+ sw add_entry( u64 key )
+ {
+ sw idx;
+ Entry entry = { key, -1 };
+
+ idx = Entries.num();
+ Entries.append( entry );
+ return idx;
+ }
+
+ FindResult find( u64 key )
+ {
+ FindResult result = { -1, -1, -1 };
+
+ if ( Hashes.num() > 0 )
+ {
+ result.HashIndex = key % Hashes.num();
+ result.EntryIndex = Hashes[ result.HashIndex ];
+
+ while ( result.EntryIndex >= 0 )
+ {
+ if ( Entries[ result.EntryIndex ].Key == key )
+ break;
+
+ result.PrevIndex = result.EntryIndex;
+ result.EntryIndex = Entries[ result.EntryIndex ].Next;
+ }
+ }
+
+ return result;
+ }
+
+ b32 full()
+ {
+ return 0.75f * Hashes.num() < Entries.num();
+ }
+ };
+
+ #pragma pop_macro("template")
+ #pragma endregion Containers
+
+ #pragma region String
+ // Constant string with length.
+ struct StrC
+ {
+ sw Len;
+ char const* Ptr;
+
+ static constexpr
+ StrC from( char const* str )
+ {
+ return { str_len( str ), str };
+ }
+
+ operator char const* () const
+ {
+ return Ptr;
+ }
+ };
+
+ // Dynamic String
+ // This is directly based off the ZPL string api.
+ // They used a header pattern
+ // I kept it for simplicty of porting but its not necessary to keep it that way.
+ struct String
+ {
+ struct Header
+ {
+ AllocatorInfo Allocator;
+ sw Length;
+ sw Capacity;
+ };
+
+ static
+ String make( AllocatorInfo allocator, char const* str )
+ {
+ sw length = str ? str_len( str ) : 0;
+ return make_length( allocator, str, length );
+ }
+
+ static
+ String make( AllocatorInfo allocator, StrC str )
+ {
+ return make_length( allocator, str.Ptr, str.Len );
+ }
+
+ static
+ String make_reserve( AllocatorInfo allocator, sw capacity )
+ {
+ constexpr sw header_size = sizeof( Header );
+
+ s32 alloc_size = header_size + capacity + 1;
+ void* allocation = alloc( allocator, alloc_size );
+
+ if ( allocation == nullptr )
+ return { nullptr };
+
+ mem_set( allocation, 0, alloc_size );
+
+ Header*
+ header = rcast(Header*, allocation);
+ header->Allocator = allocator;
+ header->Capacity = capacity;
+ header->Length = 0;
+
+ String result = { (char*)allocation + header_size };
+ return result;
+ }
+
+ static
+ String make_length( AllocatorInfo allocator, char const* str, sw length )
+ {
+ constexpr sw header_size = sizeof( Header );
+
+ s32 alloc_size = header_size + length + 1;
+ void* allocation = alloc( allocator, alloc_size );
+
+ if ( allocation == nullptr )
+ return { nullptr };
+
+ if ( ! str )
+ mem_set( allocation, 0, alloc_size );
+
+ Header&
+ header = * rcast(Header*, allocation);
+ header = { allocator, length, length };
+
+ String result = { rcast( char*, allocation) + header_size };
+
+ if ( length && str )
+ mem_copy( result, str, length );
+
+ result[ length ] = '\0';
+
+ return result;
+ }
+
+ static
+ String fmt( AllocatorInfo allocator, char* buf, sw buf_size, char const* fmt, ... )
+ {
+ va_list va;
+ va_start( va, fmt );
+ str_fmt_va( buf, buf_size, fmt, va );
+ va_end( va );
+
+ return make( allocator, buf );
+ }
+
+ static
+ String fmt_buf( AllocatorInfo allocator, char const* fmt, ... )
+ {
+ local_persist thread_local
+ char buf[ ZPL_PRINTF_MAXLEN ] = { 0 };
+
+ va_list va;
+ va_start( va, fmt );
+ str_fmt_va( buf, ZPL_PRINTF_MAXLEN, fmt, va );
+ va_end( va );
+
+ return make( allocator, buf );
+ }
+
+ static
+ String join( AllocatorInfo allocator, char const** parts, sw num_parts, char const* glue )
+ {
+ String result = make( allocator, "" );
+
+ for ( sw idx = 0; idx < num_parts; ++idx )
+ {
+ result.append( parts[ idx ] );
+
+ if ( idx < num_parts - 1 )
+ result.append( glue );
+ }
+
+ return result;
+ }
+
+ static
+ bool are_equal( String lhs, String rhs )
+ {
+ if ( lhs.length() != rhs.length() )
+ return false;
+
+ for ( sw idx = 0; idx < lhs.length(); ++idx )
+ if ( lhs[ idx ] != rhs[ idx ] )
+ return false;
+
+ return true;
+ }
+
+ bool make_space_for( char const* str, sw add_len )
+ {
+ sw available = avail_space();
+
+ // NOTE: Return if there is enough space left
+ if ( available >= add_len )
+ {
+ return true;
+ }
+ else
+ {
+ sw new_len, old_size, new_size;
+
+ void* ptr;
+ void* new_ptr;
+
+ AllocatorInfo allocator = get_header().Allocator;
+ Header* header = nullptr;
+
+ new_len = length() + add_len;
+ ptr = & get_header();
+ old_size = size_of( Header ) + length() + 1;
+ new_size = size_of( Header ) + new_len + 1;
+
+ new_ptr = resize( allocator, ptr, old_size, new_size );
+
+ if ( new_ptr == nullptr )
+ return false;
+
+ header = zpl_cast( Header* ) new_ptr;
+ header->Allocator = allocator;
+ header->Capacity = new_len;
+
+ Data = rcast( char*, header + 1 );
+
+ return str;
+ }
+ }
+
+ bool append( char const* str )
+ {
+ return append( str, str_len( str ) );
+ }
+
+ bool append( char const* str, sw length )
+ {
+ if ( sptr(str) > 0 )
+ {
+ sw curr_len = this->length();
+
+ if ( ! make_space_for( str, length ) )
+ return false;
+
+ Header& header = get_header();
+
+ mem_copy( Data + curr_len, str, length );
+
+ Data[ curr_len + length ] = '\0';
+
+ header.Length = curr_len + length;
+ }
+
+ return str;
+ }
+
+ bool append( StrC str)
+ {
+ return append( str.Ptr, str.Len );
+ }
+
+ bool append( const String other )
+ {
+ return append( other.Data, other.length() );
+ }
+
+ bool append_fmt( char const* fmt, ... )
+ {
+ sw res;
+ char buf[ ZPL_PRINTF_MAXLEN ] = { 0 };
+
+ va_list va;
+ va_start( va, fmt );
+ res = str_fmt_va( buf, count_of( buf ) - 1, fmt, va ) - 1;
+ va_end( va );
+
+ return append( buf, res );
+ }
+
+ sw avail_space() const
+ {
+ Header const&
+ header = * rcast( Header const*, Data - sizeof( Header ));
+
+ return header.Capacity - header.Length;
+ }
+
+ sw capacity() const
+ {
+ Header const&
+ header = * rcast( Header const*, Data - sizeof( Header ));
+
+ return header.Capacity;
+ }
+
+ void clear()
+ {
+ get_header().Length = 0;
+ }
+
+ String duplicate( AllocatorInfo allocator )
+ {
+ return make_length( allocator, Data, length() );
+ }
+
+ void free()
+ {
+ if ( ! Data )
+ return;
+
+ Header& header = get_header();
+
+ gen::free( header.Allocator, & header );
+ }
+
+ Header& get_header()
+ {
+ return *(Header*)(Data - sizeof(Header));
+ }
+
+ sw length() const
+ {
+ Header const&
+ header = * rcast( Header const*, Data - sizeof( Header ));
+
+ return header.Length;
+ }
+
+ void trim( char const* cut_set )
+ {
+ sw len = 0;
+
+ char* start_pos = Data;
+ char* end_pos = Data + length() - 1;
+
+ while ( start_pos <= end_pos && char_first_occurence( cut_set, *start_pos ) )
+ start_pos++;
+
+ while ( end_pos > start_pos && char_first_occurence( cut_set, *end_pos ) )
+ end_pos--;
+
+ len = scast( sw, ( start_pos > end_pos ) ? 0 : ( ( end_pos - start_pos ) + 1 ) );
+
+ if ( Data != start_pos )
+ mem_move( Data, start_pos, len );
+
+ Data[ len ] = '\0';
+
+ get_header().Length = len;
+ }
+
+ void trim_space()
+ {
+ return trim( " \t\r\n\v\f" );
+ }
+
+ // For-range support
+
+ char* begin()
+ {
+ return Data;
+ }
+
+ char* end()
+ {
+ Header const&
+ header = * rcast( Header const*, Data - sizeof( Header ));
+
+ return Data + header.Length;
+ }
+
+ operator bool()
+ {
+ return Data;
+ }
+
+ operator char* ()
+ {
+ return Data;
+ }
+
+ operator char const* () const
+ {
+ return Data;
+ }
+
+ operator StrC() const
+ {
+ return
+ {
+ length(),
+ Data
+ };
+ }
+
+ // Used with cached strings
+ // Essentially makes the string a string view.
+ String const& operator = ( String const& other ) const
+ {
+ if ( this == & other )
+ return *this;
+
+ String& this_ = ccast( String, *this );
+
+ this_.Data = other.Data;
+
+ return this_;
+ }
+
+ char& operator [] ( sw index )
+ {
+ return Data[ index ];
+ }
+
+ char const& operator [] ( sw index ) const
+ {
+ return Data[ index ];
+ }
+
+ char* Data = nullptr;
+ };
+
+ struct String_POD
+ {
+ char* Data;
+
+ operator String()
+ {
+ return * rcast(String*, this);
+ }
+ };
+ static_assert( sizeof( String_POD ) == sizeof( String ), "String is not a POD" );
+ #pragma endregion String
+
+ #pragma region File Handling
+
+ typedef u32 FileMode;
+
+ enum FileModeFlag
+ {
+ EFileMode_READ = bit( 0 ),
+ EFileMode_WRITE = bit( 1 ),
+ EFileMode_APPEND = bit( 2 ),
+ EFileMode_RW = bit( 3 ),
+ ZPL_FILE_MODES = EFileMode_READ | EFileMode_WRITE | EFileMode_APPEND | EFileMode_RW,
+ };
+
+ // NOTE: Only used internally and for the file operations
+ enum SeekWhenceType
+ {
+ ESeekWhence_BEGIN = 0,
+ ESeekWhence_CURRENT = 1,
+ ESeekWhence_END = 2,
+ };
+
+ enum FileError
+ {
+ EFileError_NONE,
+ EFileError_INVALID,
+ EFileError_INVALID_FILENAME,
+ EFileError_EXISTS,
+ EFileError_NOT_EXISTS,
+ EFileError_PERMISSION,
+ EFileError_TRUNCATION_FAILURE,
+ EFileError_NOT_EMPTY,
+ EFileError_NAME_TOO_LONG,
+ EFileError_UNKNOWN,
+ };
+
+ union FileDescriptor
+ {
+ void* p;
+ sptr i;
+ uptr u;
+ };
+
+ typedef struct FileOperations FileOperations;
+
+ #define ZPL_FILE_OPEN_PROC( name ) FileError name( FileDescriptor* fd, FileOperations* ops, FileMode mode, char const* filename )
+ #define ZPL_FILE_READ_AT_PROC( name ) b32 name( FileDescriptor fd, void* buffer, sw size, s64 offset, sw* bytes_read, b32 stop_at_newline )
+ #define ZPL_FILE_WRITE_AT_PROC( name ) b32 name( FileDescriptor fd, void const* buffer, sw size, s64 offset, sw* bytes_written )
+ #define ZPL_FILE_SEEK_PROC( name ) b32 name( FileDescriptor fd, s64 offset, SeekWhenceType whence, s64* new_offset )
+ #define ZPL_FILE_CLOSE_PROC( name ) void name( FileDescriptor fd )
+
+ typedef ZPL_FILE_OPEN_PROC( file_open_proc );
+ typedef ZPL_FILE_READ_AT_PROC( FileReadProc );
+ typedef ZPL_FILE_WRITE_AT_PROC( FileWriteProc );
+ typedef ZPL_FILE_SEEK_PROC( FileSeekProc );
+ typedef ZPL_FILE_CLOSE_PROC( FileCloseProc );
+
+ struct FileOperations
+ {
+ FileReadProc* read_at;
+ FileWriteProc* write_at;
+ FileSeekProc* seek;
+ FileCloseProc* close;
+ };
+
+ extern FileOperations const default_file_operations;
+
+ typedef u64 FileTime;
+
+ enum DirType
+ {
+ ZPL_DIR_TYPE_FILE,
+ ZPL_DIR_TYPE_FOLDER,
+ ZPL_DIR_TYPE_UNKNOWN,
+ };
+
+ struct DirInfo;
+
+ struct DirEntry
+ {
+ char const* FileName;
+ DirInfo* Info;
+ u8 Type;
+ };
+
+ struct DirInfo
+ {
+ char const* FullPath;
+ DirEntry* Entries; // zpl_array
+
+ // Internals
+ char** Filenames; // zpl_array
+ String Buffer;
+ };
+
+ struct FileInfo
+ {
+ FileOperations Ops;
+ FileDescriptor FD;
+ b32 IsTemp;
+
+ char const* Filename;
+ FileTime LastWriteTime;
+ DirEntry* Dir;
+ };
+
+ /**
+ * Closes the file
+ * @param file
+ */
+ FileError file_close( FileInfo* file );
+
+ /**
+ * Returns the currently opened file's name
+ * @param file
+ */
+ inline
+ char const* file_name( FileInfo* file )
+ {
+ return file->Filename ? file->Filename : "";
+ }
+
+ /**
+ * Opens a file using a specified mode
+ * @param file
+ * @param mode Access mode to use
+ * @param filename
+ */
+ FileError file_open_mode( FileInfo* file, FileMode mode, char const* filename );
+
+ /**
+ * Seeks the file cursor from the beginning of file to a specific position
+ * @param file
+ * @param offset Offset to seek to
+ */
+ ZPL_DEF_INLINE s64 file_seek( FileInfo* file, s64 offset );
+
+ /**
+ * Returns the length from the beginning of the file we've read so far
+ * @param file
+ * @return Our current position in file
+ */
+ ZPL_DEF_INLINE s64 file_tell( FileInfo* file );
+
+ /**
+ * Writes to a file
+ * @param file
+ * @param buffer Buffer to read from
+ * @param size Size to read
+ */
+ b32 file_write( FileInfo* file, void const* buffer, sw size );
+
+ /**
+ * Writes to file at a specific offset
+ * @param file
+ * @param buffer Buffer to read from
+ * @param size Size to write
+ * @param offset Offset to write to
+ * @param bytes_written How much data we've actually written
+ */
+ ZPL_DEF_INLINE b32 file_write_at( FileInfo* file, void const* buffer, sw size, s64 offset );
+
+ /**
+ * Writes to file safely
+ * @param file
+ * @param buffer Buffer to read from
+ * @param size Size to write
+ * @param offset Offset to write to
+ * @param bytes_written How much data we've actually written
+ */
+ ZPL_DEF_INLINE b32 file_write_at_check( FileInfo* file, void const* buffer, sw size, s64 offset, sw* bytes_written );
+
+ ZPL_IMPL_INLINE s64 file_seek( FileInfo* f, s64 offset )
+ {
+ s64 new_offset = 0;
+
+ if ( ! f->Ops.read_at )
+ f->Ops = default_file_operations;
+
+ f->Ops.seek( f->FD, offset, ESeekWhence_BEGIN, &new_offset );
+
+ return new_offset;
+ }
+
+ ZPL_IMPL_INLINE s64 file_tell( FileInfo* f )
+ {
+ s64 new_offset = 0;
+
+ if ( ! f->Ops.read_at )
+ f->Ops = default_file_operations;
+
+ f->Ops.seek( f->FD, 0, ESeekWhence_CURRENT, &new_offset );
+
+ return new_offset;
+ }
+
+ ZPL_IMPL_INLINE b32 file_write( FileInfo* f, void const* buffer, sw size )
+ {
+ s64 cur_offset = file_tell( f );
+ b32 result = file_write_at( f, buffer, size, file_tell( f ) );
+
+ file_seek( f, cur_offset + size );
+
+ return result;
+ }
+
+ ZPL_IMPL_INLINE b32 file_write_at( FileInfo* f, void const* buffer, sw size, s64 offset )
+ {
+ return file_write_at_check( f, buffer, size, offset, NULL );
+ }
+
+ ZPL_IMPL_INLINE b32 file_write_at_check( FileInfo* f, void const* buffer, sw size, s64 offset, sw* bytes_written )
+ {
+ if ( ! f->Ops.read_at )
+ f->Ops = default_file_operations;
+ return f->Ops.write_at( f->FD, buffer, size, offset, bytes_written );
+ }
+
+ void dirinfo_free( DirInfo* dir );
+
+ #pragma endregion File Handling
+
+ namespace Memory
+ {
+ // NOTE: This limits the size of the string that can be read from a file or generated to 10 megs.
+ // If you are generating a string larger than this, increase the size of the bucket here.
+ constexpr uw BucketSize = megabytes(10);
+
+ // Global allocator used for data with process lifetime.
+ extern AllocatorInfo GlobalAllocator;
+
+ // Heap allocator is being used for now to isolate errors from being memory related (tech debt till ready to address)
+ // #define g_allocator heap()
+
+ void setup();
+ void cleanup();
+ }
-namespace Memory
-{
- // NOTE: This limits the size of the string that can be read from a file or generated to 10 megs.
- // If you are generating a string larger than this, increase the size of the bucket here.
- constexpr uw BucketSize = megabytes(10);
+ inline
+ sw log_fmt(char const* fmt, ...)
+ {
+ sw res;
+ va_list va;
- // Global allocator used for data with process lifetime.
- extern AllocatorInfo GlobalAllocator;
+ va_start(va, fmt);
+ res = str_fmt_out_va(fmt, va);
+ va_end(va);
- // Heap allocator is being used for now to isolate errors from being memory related (tech debt till ready to address)
- // #define g_allocator heap()
+ return res;
+ }
- void setup();
- void cleanup();
-}
-
-
-inline
-sw log_fmt(char const* fmt, ...)
-{
- sw res;
- va_list va;
-
- va_start(va, fmt);
- res = str_fmt_out_va(fmt, va);
- va_end(va);
-
- return res;
-}
-
-inline
-sw fatal(char const* fmt, ...)
-{
- local_persist thread_local
- char buf[ZPL_PRINTF_MAXLEN] = { 0 };
-
- va_list va;
-
-#if Build_Debug
- va_start(va, fmt);
- str_fmt_va(buf, ZPL_PRINTF_MAXLEN, fmt, va);
- va_end(va);
-
- assert_crash(buf);
- return -1;
-#else
- va_start(va, fmt);
- str_fmt_out_err_va( fmt, va);
- va_end(va);
-
- exit(1);
- return -1;
-#endif
+ inline
+ sw fatal(char const* fmt, ...)
+ {
+ local_persist thread_local
+ char buf[ZPL_PRINTF_MAXLEN] = { 0 };
+
+ va_list va;
+
+ #if Build_Debug
+ va_start(va, fmt);
+ str_fmt_va(buf, ZPL_PRINTF_MAXLEN, fmt, va);
+ va_end(va);
+
+ assert_crash(buf);
+ return -1;
+ #else
+ va_start(va, fmt);
+ str_fmt_out_err_va( fmt, va);
+ va_end(va);
+
+ exit(1);
+ return -1;
+ #endif
+ }
+
+// gen namespace
}
diff --git a/project/gen.cpp b/project/gen.cpp
index bea40c9..1e45c0a 100644
--- a/project/gen.cpp
+++ b/project/gen.cpp
@@ -7,15 +7,15 @@
namespace gen
{
- ZPL_TABLE_DEFINE( StringTable, str_tbl_, String );
+ // ZPL_TABLE_DEFINE( StringTable, str_tbl_, String );
namespace StaticData
{
- global Array(Pool) CodePools = nullptr;
- global Array(Arena) CodeEntriesArenas = nullptr;
- global Array(Arena) StringArenas = nullptr;
+ global Array< Pool > CodePools = { nullptr };
+ global Array< Arena > CodeEntriesArenas = { nullptr };
+ global Array< Arena > StringArenas = { nullptr };
- global StringTable StringMap;
+ global StringTable StringCache;
global AllocatorInfo Allocator_DataArrays = heap();
global AllocatorInfo Allocator_CodePool = heap();
@@ -837,13 +837,28 @@ namespace gen
result.append_fmt( "%s %s", entry( 0 )->to_string(), Name );
+ AST* type = entry( 0);
+ AST* type_arr = type->entry( 0 );
+
+ // TODO : This problably needs to be an iteration for all entries of type.
+ if ( type->num_entries() && type_arr->Type == ECode::Untyped )
+ result.append_fmt( "[%s]", type_arr->to_string() );
+
if ( entry( idx ) )
result.append_fmt( " = %s;", entry( idx )->to_string() );
break;
}
- result.append_fmt( "%s %s;", entry( 0 )->to_string(), Name );
+ AST* type = entry( 0);
+ AST* type_arr = type->entry( 0 );
+
+ // TODO : This problably needs to be an iteration for all entries of type.
+ if ( type->num_entries() && type_arr->Type == ECode::Untyped )
+ result.append_fmt( "%s %s[%s];", type->to_string(), Name, type_arr->to_string() );
+
+ else
+ result.append_fmt( "%s %s;", entry( 0 )->to_string(), Name );
}
break;
@@ -1033,48 +1048,52 @@ namespace gen
// Setup the arrays
{
- if (! array_init_reserve( CodePools, Allocator_DataArrays, InitSize_DataArrays ) )
+ CodePools = Array::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
+
+ if ( CodePools == nullptr )
fatal( "gen::init: Failed to initialize the CodePools array" );
- if ( ! array_init_reserve( CodeEntriesArenas, Allocator_DataArrays, InitSize_DataArrays ) )
+ CodeEntriesArenas = Array::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
+
+ if ( CodeEntriesArenas == nullptr )
fatal( "gen::init: Failed to initialize the CodeEntriesPools array" );
- if ( ! array_init_reserve( StringArenas, Allocator_DataArrays, InitSize_DataArrays ) )
+ StringArenas = Array::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
+
+ if ( StringArenas == nullptr )
fatal( "gen::init: Failed to initialize the StringArenas array" );
}
// Setup the code pool and code entries arena.
{
- Pool code_pool;
- pool_init( & code_pool, Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
+ Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
- if ( code_pool.physical_start == nullptr )
+ if ( code_pool.PhysicalStart == nullptr )
fatal( "gen::init: Failed to initialize the code pool" );
- array_append( CodePools, code_pool );
+ CodePools.append( code_pool );
- Arena code_entires_arena;
- arena_init_from_allocator( & code_entires_arena, Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
+ Arena code_entires_arena = Arena::init_from_allocator( Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
- if ( code_entires_arena.physical_start == nullptr )
+ if ( code_entires_arena.PhysicalStart == nullptr )
fatal( "gen::init: Failed to initialize the code entries arena" );
- array_append( CodeEntriesArenas, code_entires_arena );
+ CodeEntriesArenas.append( code_entires_arena );
- Arena string_arena;
- arena_init_from_allocator( & string_arena, Allocator_StringArena, SizePer_StringArena );
+ Arena string_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
- if ( string_arena.physical_start == nullptr )
+ if ( string_arena.PhysicalStart == nullptr )
fatal( "gen::init: Failed to initialize the string arena" );
- array_append( StringArenas, string_arena );
+ StringArenas.append( string_arena );
}
// Setup the hash tables
{
- str_tbl_init ( & StringMap, Allocator_StringTable );
- if ( StringMap.entries == nullptr )
- fatal( "gen::init: Failed to initialize the StringMap");
+ StringCache = StringTable::init( Allocator_StringTable );
+
+ if ( StringCache.Entries == nullptr )
+ fatal( "gen::init: Failed to initialize the StringCache");
}
Code::Global = make_code();
@@ -1195,41 +1214,40 @@ namespace gen
using namespace StaticData;
s32 index = 0;
- s32 left = array_count( CodePools );
+ s32 left = CodePools.num();
do
{
Pool* code_pool = & CodePools[index];
- pool_free( code_pool );
+ code_pool->free();
index++;
}
while ( left--, left );
index = 0;
- left = array_count( CodeEntriesArenas );
+ left = CodeEntriesArenas.num();
do
{
Arena* code_entries_arena = & CodeEntriesArenas[index];
- arena_free( code_entries_arena );
+ code_entries_arena->free();
index++;
}
while ( left--, left );
index = 0;
- left = array_count( StringArenas );
+ left = StringArenas.num();
do
{
Arena* string_arena = & StringArenas[index];
- arena_free( string_arena );
+ string_arena->free();
index++;
}
while ( left--, left );
- str_tbl_destroy( & StringMap );
- // type_tbl_destroy( & TypeMap );
+ StringCache.destroy();
- array_free( CodePools );
- array_free( CodeEntriesArenas );
- array_free( StringArenas );
+ CodePools.free();
+ CodeEntriesArenas.free();
+ StringArenas.free();
}
inline
@@ -1237,29 +1255,30 @@ namespace gen
{
using namespace StaticData;
- Arena* last = & array_back( StringArenas );
+ Arena& last = StringArenas.back();
- if ( last->total_allocated + str_length > last->total_size )
+ if ( last.TotalUsed + str_length > last.TotalSize )
{
- Arena new_arena;
- arena_init_from_allocator( & new_arena, Allocator_StringArena, SizePer_StringArena );
+ Arena new_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
- if ( ! array_append( StringArenas, new_arena ) )
+ if ( ! StringArenas.append( new_arena ) )
fatal( "gen::get_string_allocator: Failed to allocate a new string arena" );
- last = & array_back( StringArenas );
+ last = StringArenas.back();
}
- return arena_allocator( last );
+ return last;
}
// Will either make or retrive a code string.
StringCached get_cached_string( StrC str )
{
+ using namespace StaticData;
+
s32 hash_length = str.Len > kilobytes(1) ? kilobytes(1) : str.Len;
u32 key = crc32( str.Ptr, hash_length );
{
- String* result = str_tbl_get( & StaticData::StringMap, key );
+ StringCached* result = StringCache.get( key );
if ( result )
return * result;
@@ -1267,7 +1286,7 @@ namespace gen
String result = String::make( get_string_allocator( str.Len ), str );
- str_tbl_set( & StaticData::StringMap, key, result );
+ StringCache.set( key, result );
return result;
}
@@ -1279,33 +1298,32 @@ namespace gen
{
using namespace StaticData;
- AllocatorInfo allocator = { nullptr, nullptr };
+ AllocatorInfo allocator = CodePools.back();
s32 index = 0;
- s32 left = array_count( CodePools );
+ s32 left = CodePools.num();
do
{
- if ( CodePools[index].free_list != nullptr )
+ if ( CodePools[index].FreeList != nullptr )
{
- allocator = zpl::pool_allocator( & CodePools[index] );
+ allocator = CodePools[index];
break;
}
index++;
}
while ( left--, left );
- if ( allocator.data == nullptr )
+ if ( allocator.Data == nullptr )
{
- Pool code_pool;
- pool_init( & code_pool, Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
+ Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
- if ( code_pool.physical_start == nullptr )
+ if ( code_pool.PhysicalStart == nullptr )
fatal( "gen::make_code: Failed to allocate a new code pool - CodePool allcoator returned nullptr." );
- if ( ! array_append( CodePools, code_pool ) )
+ if ( ! CodePools.append( code_pool ) )
fatal( "gen::make_code: Failed to allocate a new code pool - CodePools failed to append new pool." );
- allocator = pool_allocator( CodePools );
+ allocator = * CodePools;
}
Code result { rcast( AST*, alloc( allocator, sizeof(AST) )) };
@@ -1323,37 +1341,34 @@ namespace gen
return result;
}
- Array(AST*) make_code_entries()
+ Array< AST* > make_code_entries()
{
using namespace StaticData;
AllocatorInfo allocator = { nullptr, nullptr };
s32 index = 0;
- s32 left = array_count( CodeEntriesArenas );
+ s32 left = CodeEntriesArenas.num();
do
{
- if ( arena_size_remaining( & CodeEntriesArenas[index], ZPL_DEFAULT_MEMORY_ALIGNMENT) >= InitSize_CodeEntiresArray )
- allocator = arena_allocator( & CodeEntriesArenas[index] );
+ if ( CodeEntriesArenas[index].size_remaining( ZPL_DEFAULT_MEMORY_ALIGNMENT) >= InitSize_CodeEntiresArray )
+ allocator = CodeEntriesArenas[index];
index++;
}
while( left--, left );
- if ( allocator.data == nullptr )
+ if ( allocator.Data == nullptr )
{
- Arena arena;
- arena_init_from_allocator( & arena, Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
+ Arena arena = Arena::init_from_allocator( Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
- if ( arena.physical_start == nullptr )
+ if ( arena.PhysicalStart == nullptr )
fatal( "gen::make_code: Failed to allocate a new code entries arena - CodeEntriesArena allcoator returned nullptr." );
- allocator = arena_allocator( & arena );
- array_append( CodeEntriesArenas, arena );
+ allocator = arena;
+ CodeEntriesArenas.append( arena );
}
- Array(AST*) entry_array;
- array_init( entry_array, allocator );
-
+ Array< AST* > entry_array = Array< AST* >::init( allocator );
return entry_array;
}
@@ -3357,12 +3372,12 @@ namespace gen
struct TokArray
{
- Array(Token) Arr;
+ Array Arr;
s32 Idx;
bool __eat( TokType type, char const* context )
{
- if ( array_count(Arr) - Idx <= 0 )
+ if ( Arr.num() - Idx <= 0 )
{
log_failure( "gen::%s: No tokens left", context );
return Code::Invalid;
@@ -3393,7 +3408,7 @@ namespace gen
Token* next()
{
- return Idx + 1 < array_count(Arr) ? &Arr[Idx + 1] : nullptr;
+ return Idx + 1 < Arr.num() ? &Arr[Idx + 1] : nullptr;
}
};
@@ -3423,17 +3438,18 @@ namespace gen
}
do_once_start
- arena_init_from_allocator( & LexAllocator, heap(), megabytes(10) );
+ // TODO : Use the global memory allocator for this...
+ LexAllocator = Arena::init_from_allocator( heap(), megabytes(10) );
- if ( LexAllocator.physical_start == nullptr )
+ if ( LexAllocator.PhysicalStart == nullptr )
{
log_failure( "gen::lex: failed to allocate memory for parsing constructor's lexer");
- return { nullptr, 0 };
+ return { { nullptr }, 0 };
}
do_once_end
local_persist thread_local
- Array(Token) Tokens = nullptr;
+ Array Tokens = { nullptr };
s32 left = content.Len -1;
char const* scanner = content.Ptr;
@@ -3445,13 +3461,13 @@ namespace gen
if ( left <= 0 )
{
log_failure( "gen::lex: no tokens found (only whitespace provided)" );
- return { nullptr, 0 };
+ return { { nullptr }, 0 };
}
if ( Tokens )
- array_clear( Tokens );
+ Tokens.clear();
- array_init_reserve( Tokens, arena_allocator( & LexAllocator), content.Len / 8 );
+ Tokens = Array::init_reserve( LexAllocator, content.Len / 8 );
while (left )
{
@@ -3851,7 +3867,7 @@ namespace gen
if ( token.Type != TokType::Invalid )
{
- array_append( Tokens, token );
+ Tokens.append( token );
continue;
}
@@ -3864,13 +3880,13 @@ namespace gen
}
token.Type = type;
- array_append( Tokens, token );
+ Tokens.append( token );
}
- if ( array_count(Tokens) == 0 )
+ if ( Tokens.num() == 0 )
{
log_failure( "Failed to lex any tokens" );
- return { nullptr, 0 };
+ return { { nullptr }, 0 };
}
return { Tokens, 0 };
@@ -3898,7 +3914,7 @@ namespace gen
# define currtok toks.current()
# define prevtok toks.previous()
# define eat( Type_ ) toks.__eat( Type_, context )
-# define left ( array_count(toks.Arr) - toks.Idx )
+# define left ( toks.Arr.num() - toks.Idx )
# define check( Type_ ) ( left && currtok.Type == Type_ )
#pragma endregion Helper Macros
@@ -3948,9 +3964,11 @@ namespace gen
while ( left && currtok.Type != TokType::BraceSquare_Close )
{
- untyped_tok.Length = ( (sptr)currtok.Text + currtok.Length ) - (sptr)untyped_tok.Text;
+ eat( currtok.Type );
}
+ untyped_tok.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)untyped_tok.Text;
+
Code array_expr = untyped_str( untyped_tok );
if ( left == 0 )
@@ -5729,8 +5747,8 @@ namespace gen
SpecifierT specs_found[16] { ESpecifier::Num_Specifiers };
s32 num_specifiers = 0;
- Token name = { nullptr, 0, TokType::Invalid };
- Token func_sig = { currtok.Text, 0, TokType::Invalid };
+ Token name = { nullptr, 0, TokType::Invalid };
+ Token brute_sig = { currtok.Text, 0, TokType::Invalid };
while ( left && tok_is_specifier( currtok ) )
{
@@ -5779,6 +5797,29 @@ namespace gen
name = parse_identifier( toks, context );
if ( ! name )
return Code::Invalid;
+
+ // Problably dealing with a templated symbol
+ if ( currtok.Type == TokType::Operator && currtok.Text[0] == '<' && currtok.Length == 1 )
+ {
+ eat( TokType::Operator );
+
+ s32 level = 0;
+ while ( left && ( currtok.Text[0] != '>' || level > 0 ))
+ {
+ if ( currtok.Text[0] == '<' )
+ level++;
+
+ if ( currtok.Text[0] == '>' )
+ level--;
+
+ eat( currtok.Type );
+ }
+
+ eat( TokType::Operator );
+
+ // Extend length of name to last token
+ name.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)name.Text;
+ }
}
while ( left && tok_is_specifier( currtok ) )
@@ -5837,7 +5878,7 @@ namespace gen
eat(TokType::Capture_End);
- func_sig.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)func_sig.Text;
+ brute_sig.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)brute_sig.Text;
}
using namespace ECode;
@@ -5846,10 +5887,10 @@ namespace gen
result = make_code();
result->Type = Typename;
- if ( func_sig.Length > 0 )
+ if ( brute_sig.Length > 0 )
{
// Bruteforce all tokens together.
- name = func_sig;
+ name = brute_sig;
}
else
{
@@ -6162,17 +6203,15 @@ namespace gen
sw Length;
};
- ZPL_TABLE( static, TokMap, tokmap_, TokEntry )
-
sw token_fmt_va( char* buf, uw buf_size, char const* fmt, s32 num_tokens, va_list va )
{
char const* buf_begin = buf;
sw remaining = buf_size;
- TokMap tok_map;
+ HashTable tok_map;
{
// TODO : Switch this to use an arena that makes use of the stack (cap the size of the token table to around 4096 bytes)
- tokmap_init( & tok_map, Memory::GlobalAllocator );
+ tok_map = HashTable::init( Memory::GlobalAllocator );
s32 left = num_tokens;
@@ -6189,7 +6228,7 @@ namespace gen
u32 key = crc32( token, str_len(token, 32) );
- tokmap_set( & tok_map, key, entry );
+ tok_map.set( key, entry );
}
}
@@ -6224,7 +6263,7 @@ namespace gen
char const* token = fmt + 1;
u32 key = crc32( token, tok_len );
- TokEntry* value = tokmap_get( & tok_map, key );
+ TokEntry* value = tok_map.get( key );
if ( value )
{
@@ -6254,7 +6293,7 @@ namespace gen
}
}
- tokmap_clear( & tok_map );
+ tok_map.clear();
sw result = buf_size - remaining + 1;
diff --git a/project/gen.hpp b/project/gen.hpp
index 6743b52..d626737 100644
--- a/project/gen.hpp
+++ b/project/gen.hpp
@@ -378,7 +378,7 @@ namespace gen
#pragma region Data Structures
// Implements basic string interning. Data structure is based off the ZPL Hashtable.
- ZPL_TABLE_DECLARE( ZPL_EXTERN, StringTable, str_tbl_, String );
+ using StringTable = HashTable;
// Represents strings cached with the string table.
// Should never be modified, if changed string is desired, cache_string( str ) another.
@@ -428,7 +428,7 @@ namespace gen
s32 num_entries()
{
- return DynamicEntries ? array_count( ArrDyn ) : StaticIndex;
+ return DynamicEntries ? ArrDyn.num() : StaticIndex;
}
// Parameter
@@ -548,7 +548,7 @@ namespace gen
# define Using_AST_POD \
union { \
AST* ArrStatic[AST::ArrS_Cap]; \
- Array(AST*) ArrDyn; \
+ Array< AST* > ArrDyn; \
StringCached Content; \
SpecifierT ArrSpecs[AST::ArrSpecs_Cap]; \
}; \
@@ -699,7 +699,7 @@ namespace gen
// This provides a fresh Code AST array for the entries field of the AST.
// This is done separately from the regular CodePool allocator.
- Array(AST*) make_code_entries();
+ Array< AST* > make_code_entries();
// Set these before calling gen's init() procedure.
// Data
@@ -1071,7 +1071,7 @@ namespace gen
other->duplicate() : other;
if (DynamicEntries)
- array_append( ArrDyn, to_add );
+ ArrDyn.append( to_add );
else
{
@@ -1087,11 +1087,11 @@ namespace gen
s32 index = 0;
do
{
- array_append( ArrDyn, ArrStatic[index] );
+ ArrDyn.append( ArrStatic[index] );
}
while ( StaticIndex--, StaticIndex );
- array_append( ArrDyn, to_add );
+ ArrDyn.append( to_add );
}
}
diff --git a/test/NonParsed/Array.NonParsed.hpp b/test/NonParsed/Array.NonParsed.hpp
index 9411870..51b5fa6 100644
--- a/test/NonParsed/Array.NonParsed.hpp
+++ b/test/NonParsed/Array.NonParsed.hpp
@@ -300,16 +300,16 @@ struct GenArrayRequest
StrC Dependency;
StrC Type;
};
-Array(GenArrayRequest) GenArrayRequests;
+Array GenArrayRequests;
void gen__array_request( StrC type, StrC dep = {} )
{
do_once_start
- array_init( GenArrayRequests, Memory::GlobalAllocator );
+ GenArrayRequests = Array::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenArrayRequests ); ++idx )
+ for ( sw idx = 0; idx < GenArrayRequests.num(); ++idx )
{
StrC const reqest_type = GenArrayRequests[ idx ].Type;
@@ -321,7 +321,7 @@ void gen__array_request( StrC type, StrC dep = {} )
}
GenArrayRequest request = { dep, type };
- array_append( GenArrayRequests, request );
+ GenArrayRequests.append( request );
}
#define gen_array( type ) gen__array_request( { txt_to_StrC(type) } )
@@ -338,7 +338,7 @@ u32 gen_array_file()
gen_array_file.print( array_base );
GenArrayRequest* current = GenArrayRequests;
- s32 left = array_count( GenArrayRequests );
+ s32 left = GenArrayRequests.num();
while (left--)
{
GenArrayRequest const& request = * current;
diff --git a/test/NonParsed/Buffer.NonParsed.hpp b/test/NonParsed/Buffer.NonParsed.hpp
index d23e5ce..1c00fd8 100644
--- a/test/NonParsed/Buffer.NonParsed.hpp
+++ b/test/NonParsed/Buffer.NonParsed.hpp
@@ -206,16 +206,16 @@ struct GenBufferRequest
StrC Type;
sw TypeSize;
};
-Array(GenBufferRequest) GenBufferRequests;
+Array GenBufferRequests;
void gen__buffer_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
- array_init( GenBufferRequests, Memory::GlobalAllocator );
+ GenBufferRequests = Array::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenBufferRequests ); ++idx )
+ for ( sw idx = 0; idx < GenBufferRequests.num(); ++idx )
{
StrC const reqest_type = GenBufferRequests[ idx ].Type;
@@ -227,7 +227,7 @@ void gen__buffer_request( StrC type, sw size, StrC dep = {} )
}
GenBufferRequest request = { dep, type, size};
- array_append( GenBufferRequests, request );
+ GenBufferRequests.append( request );
}
#define gen_buffer( type ) gen__buffer_request( { txt_to_StrC(type) }, sizeof( type ))
@@ -241,7 +241,7 @@ u32 gen_buffer_file()
gen_buffer_file.print( gen__buffer_base() );
GenBufferRequest* current = GenBufferRequests;
- s32 left = array_count( GenBufferRequests );
+ s32 left = GenBufferRequests.num();
while (left--)
{
GenBufferRequest const& request = * current;
diff --git a/test/NonParsed/HashTable.NonParsed.hpp b/test/NonParsed/HashTable.NonParsed.hpp
index 8c1a317..36f868d 100644
--- a/test/NonParsed/HashTable.NonParsed.hpp
+++ b/test/NonParsed/HashTable.NonParsed.hpp
@@ -21,7 +21,7 @@ Code gen__hashtable_base()
return find_result;
}
-Code gen__hashtable( StrC type, sw type_size )
+Code gen__hashtable( StrC type )
{
static Code t_allocator_info = def_type( name(AllocatorInfo) );
@@ -397,20 +397,19 @@ struct GenHashTableRequest
{
StrC Dependency;
StrC Type;
- sw TypeSize;
};
-Array(GenHashTableRequest) GenHashTableRequests;
+Array GenHashTableRequests;
-void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
+void gen__hashtable_request( StrC type, StrC dep = {} )
{
do_once_start
- array_init( GenHashTableRequests, Memory::GlobalAllocator );
+ GenHashTableRequests = Array::init( Memory::GlobalAllocator );
gen_array( sw );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenHashTableRequests ); ++idx )
+ for ( sw idx = 0; idx < GenHashTableRequests.num(); ++idx )
{
StrC const reqest_type = GenHashTableRequests[ idx ].Type;
@@ -421,10 +420,10 @@ void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
return;
}
- GenHashTableRequest request = { dep, type, size};
- array_append( GenHashTableRequests, request );
+ GenHashTableRequest request = { dep, type };
+ GenHashTableRequests.append( request );
}
-#define gen_hashtable( type ) gen__hashtable_request( { txt_to_StrC(type) }, sizeof( type ))
+#define gen_hashtable( type ) gen__hashtable_request( { txt_to_StrC(type) } )
u32 gen_hashtable_file()
{
@@ -439,12 +438,12 @@ u32 gen_hashtable_file()
gen_buffer_file.print( gen__hashtable_base());
GenHashTableRequest* current = GenHashTableRequests;
- s32 left = array_count( GenHashTableRequests );
+ s32 left = GenHashTableRequests.num();
while (left--)
{
GenHashTableRequest const& request = * current;
- Code generated_buffer = gen__hashtable( current->Type, current->TypeSize );
+ Code generated_buffer = gen__hashtable( current->Type );
if ( request.Dependency )
{
diff --git a/test/NonParsed/Ring.NonParsed.hpp b/test/NonParsed/Ring.NonParsed.hpp
index fabc141..fc3ae83 100644
--- a/test/NonParsed/Ring.NonParsed.hpp
+++ b/test/NonParsed/Ring.NonParsed.hpp
@@ -162,16 +162,16 @@ struct GenRingRequest
StrC Type;
sw TypeSize;
};
-Array(GenRingRequest) GenRingRequests;
+Array GenRingRequests;
void gen__ring_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
- array_init( GenRingRequests, Memory::GlobalAllocator );
+ GenRingRequests = Array::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenRingRequests ); ++idx )
+ for ( sw idx = 0; idx < GenRingRequests.num(); ++idx )
{
StrC const reqest_type = GenRingRequests[ idx ].Type;
@@ -186,7 +186,7 @@ void gen__ring_request( StrC type, sw size, StrC dep = {} )
gen__buffer_request( type, size, dep );
GenRingRequest request = { dep, type, size};
- array_append( GenRingRequests, request );
+ GenRingRequests.append( request );
}
#define gen_ring( type ) gen__ring_request( { txt_to_StrC(type) }, sizeof( type ))
@@ -201,7 +201,7 @@ u32 gen_ring_file()
// gen_ring_file.print( gen__ring_base() );
GenRingRequest* current = GenRingRequests;
- s32 left = array_count( GenRingRequests );
+ s32 left = GenRingRequests.num();
while (left--)
{
GenRingRequest const& request = * current;
diff --git a/test/Parsed/Array.Parsed.hpp b/test/Parsed/Array.Parsed.hpp
index 6356cc4..9ee4db1 100644
--- a/test/Parsed/Array.Parsed.hpp
+++ b/test/Parsed/Array.Parsed.hpp
@@ -224,16 +224,16 @@ struct GenArrayRequest
StrC Dependency;
StrC Type;
};
-Array(GenArrayRequest) GenArrayRequests;
+Array GenArrayRequests;
void gen__array_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
- array_init( GenArrayRequests, Memory::GlobalAllocator );
+ GenArrayRequests = Array::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenArrayRequests ); ++idx )
+ for ( sw idx = 0; idx < GenArrayRequests.num(); ++idx )
{
StrC const reqest_type = GenArrayRequests[ idx ].Type;
@@ -245,7 +245,7 @@ void gen__array_request( StrC type, sw size, StrC dep = {} )
}
GenArrayRequest request = { dep, type };
- array_append( GenArrayRequests, request );
+ GenArrayRequests.append( request );
}
#define gen_array( type ) gen__array_request( { txt_to_StrC(type) }, sizeof(type) )
@@ -262,7 +262,7 @@ u32 gen_array_file()
gen_array_file.print( array_base );
GenArrayRequest* current = GenArrayRequests;
- s32 left = array_count( GenArrayRequests );
+ s32 left = GenArrayRequests.num();
while (left--)
{
GenArrayRequest const& request = * current;
diff --git a/test/Parsed/Buffer.Parsed.hpp b/test/Parsed/Buffer.Parsed.hpp
index 82445e3..e48d94a 100644
--- a/test/Parsed/Buffer.Parsed.hpp
+++ b/test/Parsed/Buffer.Parsed.hpp
@@ -137,16 +137,16 @@ struct GenBufferRequest
StrC Dependency;
StrC Type;
};
-Array(GenBufferRequest) GenBufferRequests;
+Array GenBufferRequests;
void gen__buffer_request( StrC type, StrC dep = {} )
{
do_once_start
- array_init( GenBufferRequests, Memory::GlobalAllocator );
+ GenBufferRequests = Array::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenBufferRequests ); ++idx )
+ for ( sw idx = 0; idx < GenBufferRequests.num(); ++idx )
{
StrC const reqest_type = GenBufferRequests[ idx ].Type;
@@ -158,7 +158,7 @@ void gen__buffer_request( StrC type, StrC dep = {} )
}
GenBufferRequest request = { dep, type };
- array_append( GenBufferRequests, request );
+ GenBufferRequests.append( request );
}
#define gen_buffer( type ) gen__buffer_request( { txt_to_StrC(type) } )
@@ -172,7 +172,7 @@ u32 gen_buffer_file()
gen_buffer_file.print( gen__buffer_base() );
GenBufferRequest* current = GenBufferRequests;
- s32 left = array_count( GenBufferRequests );
+ s32 left = GenBufferRequests.num();
while (left--)
{
GenBufferRequest const& request = * current;
diff --git a/test/Parsed/HashTable.Parsed.hpp b/test/Parsed/HashTable.Parsed.hpp
index c7fda47..6147183 100644
--- a/test/Parsed/HashTable.Parsed.hpp
+++ b/test/Parsed/HashTable.Parsed.hpp
@@ -290,18 +290,18 @@ struct GenHashTableRequest
StrC Type;
sw TypeSize;
};
-Array(GenHashTableRequest) GenHashTableRequests;
+Array GenHashTableRequests;
void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
- array_init( GenHashTableRequests, Memory::GlobalAllocator );
+ GenHashTableRequests = Array::init( Memory::GlobalAllocator );
gen_array( sw );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenHashTableRequests ); ++idx )
+ for ( sw idx = 0; idx < GenHashTableRequests.num(); ++idx )
{
StrC const reqest_type = GenHashTableRequests[ idx ].Type;
@@ -313,7 +313,7 @@ void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
}
GenHashTableRequest request = { dep, type, size};
- array_append( GenHashTableRequests, request );
+ GenHashTableRequests.append( request );
}
#define gen_hashtable( type ) gen__hashtable_request( { txt_to_StrC(type) }, sizeof( type ))
@@ -329,7 +329,7 @@ u32 gen_hashtable_file()
gen_buffer_file.print( gen__hashtable_base());
GenHashTableRequest* current = GenHashTableRequests;
- s32 left = array_count( GenHashTableRequests );
+ s32 left = GenHashTableRequests.num();
while (left--)
{
GenHashTableRequest const& request = * current;
diff --git a/test/Parsed/Ring.Parsed.hpp b/test/Parsed/Ring.Parsed.hpp
index 9bdc01e..5643011 100644
--- a/test/Parsed/Ring.Parsed.hpp
+++ b/test/Parsed/Ring.Parsed.hpp
@@ -102,16 +102,16 @@ struct GenRingRequest
StrC Dependency;
StrC Type;
};
-Array(GenRingRequest) GenRingRequests;
+Array GenRingRequests;
void gen__ring_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
- array_init( GenRingRequests, Memory::GlobalAllocator );
+ GenRingRequests = Array::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
- for ( sw idx = 0; idx < array_count( GenRingRequests ); ++idx )
+ for ( sw idx = 0; idx < GenRingRequests.num(); ++idx )
{
StrC const reqest_type = GenRingRequests[ idx ].Type;
@@ -126,7 +126,7 @@ void gen__ring_request( StrC type, sw size, StrC dep = {} )
gen__buffer_request( type, dep );
GenRingRequest request = { dep, type };
- array_append( GenRingRequests, request );
+ GenRingRequests.append( request );
}
#define gen_ring( type ) gen__ring_request( { txt_to_StrC(type) }, sizeof( type ))
@@ -141,7 +141,7 @@ u32 gen_ring_file()
// gen_ring_file.print( gen__ring_base() );
GenRingRequest* current = GenRingRequests;
- s32 left = array_count( GenRingRequests );
+ s32 left = GenRingRequests.num();
while (left--)
{
GenRingRequest const& request = * current;
diff --git a/test/SOA.hpp b/test/SOA.hpp
new file mode 100644
index 0000000..c54e3ef
--- /dev/null
+++ b/test/SOA.hpp
@@ -0,0 +1,113 @@
+#pragma once
+
+#ifdef gen_time
+#include "gen.hpp"
+using namespace gen;
+
+Code gen_SOA( Code struct_def, bool use_dynamic = false )
+{
+ StrC name;
+ name.Ptr = str_fmt_buf( "SOA_%s", (char const*) struct_def->Name );
+ name.Len = str_len( name );
+
+ Code
+ soa_entry = { struct_def->duplicate() };
+ soa_entry->Name = get_cached_string( name(Entry) );
+
+ Array vars = Array::init( Memory::GlobalAllocator );;
+
+ Code soa = def_struct( name, def_struct_body( 1, soa_entry ) );
+ {
+ Code body = struct_def.body();
+ for ( s32 idx = 0; idx < body->num_entries(); idx++ )
+ {
+ Code struct_mem = { body->entry( idx ) };
+
+ if ( struct_mem->Type == ECode::Variable )
+ {
+ Code var_type = { struct_mem->entry(0) };
+
+ Code entry_arr = { nullptr };
+ if ( use_dynamic)
+ {
+ entry_arr = parse_variable( token_fmt( "Array<> ;", 2
+ , "type", (char const*)var_type->Name
+ , "name", (char const*)struct_mem->Name )
+ );
+ }
+ else
+ {
+ entry_arr = parse_variable( token_fmt( " [100];", 2
+ , "type", (char const*)var_type->Name
+ , "name", (char const*)struct_mem->Name )
+ );
+ }
+
+ vars.append( entry_arr );
+ soa.body()->add_entry( entry_arr );
+ }
+ }
+ }
+
+ Code make;
+ {
+ make = parse_function( token_fmt(
+ txt(
+ static
+ make( AllocatorInfo allocator )
+ {
+ soa = {};
+ }
+ ),
+ 1, "SOA_Type", (char const*)name
+ ));
+
+ if ( use_dynamic )
+ {
+ for ( s32 idx = 0; idx < vars.num(); idx++ )
+ {
+ Code member = vars[idx];
+
+ Code arr_init = def_execution( token_fmt( "soa. = ::init( allocator );", 2
+ , "var_name", (char const*)member->Name
+ , "var_type", (char const*)member->entry(0)->Name
+ ));
+
+ make.body()->add_entry( arr_init );
+ }
+ }
+
+ make.body()->add_entry( def_execution( code( return soa; ) ));
+ }
+
+ Code get;
+ {
+ get = parse_function( code(
+ Entry get( s32 idx )
+ {
+ }
+ ));
+
+ String content = String::make( Memory::GlobalAllocator, "return\n{\n" );
+
+ for ( s32 idx = 0; idx < vars.num(); idx ++ )
+ {
+ Code member = vars[idx];
+
+ content.append_fmt( token_fmt( "[idx],", 1
+ , "var_name", (char const*)member->Name
+ ));
+ }
+
+ content.append( "};" );
+
+ Code ret = def_execution( content );
+
+ get.body()->add_entry( ret );
+ }
+
+ soa.body()->add_entry( make );
+ soa.body()->add_entry( get );
+ return soa;
+}
+#endif
diff --git a/test/test.Parsed.cpp b/test/test.Parsed.cpp
index be0894e..3f49627 100644
--- a/test/test.Parsed.cpp
+++ b/test/test.Parsed.cpp
@@ -4,6 +4,7 @@
#include "Parsed\HashTable.Parsed.hpp"
#include "Parsed\Ring.Parsed.hpp"
#include "Parsed\Sanity.Parsed.hpp"
+#include "SOA.hpp"
#ifdef gen_time
@@ -34,6 +35,31 @@ int gen_main()
gen_hashtable_file();
gen_ring_file();
+ Builder soa_test; soa_test.open( "SOA.gen.hpp" );
+
+ soa_test.print( parse_using( code(
+ using u16 = unsigned short;
+ )));
+
+ soa_test.print( def_include( StrC::from("Bloat.hpp")));
+
+ soa_test.print( def_using_namespace( name(gen) ) );
+
+ soa_test.print( gen_SOA(
+ parse_struct( code(
+ struct TestStruct
+ {
+ u8 A;
+ u16 B;
+ u32 C;
+ u64 D;
+ };
+ )),
+ true
+ ));
+
+ soa_test.write();
+
gen::deinit();
Memory::cleanup();
return 0;