Started to move over zpl depndencies and use templated containers.

Still have a ways to go.
This commit is contained in:
Edward R. Gonzalez 2023-07-11 18:29:45 -04:00
parent 661630a88f
commit 20d307759b
15 changed files with 2485 additions and 1340 deletions

47
.vscode/gencpp.natvis vendored
View File

@ -1,11 +1,33 @@
<?xml version="1.0" encoding="utf-8"?>
<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
<Type Name="StrC">
<Type Name="gen::AllocatorInfo">
<DisplayString>Data:{Data} Proc:{Proc}</DisplayString>
</Type>
<Type Name="gen::Array&lt;*&gt;">
<DisplayString>Num:{((Header*)((char*)Data - sizeof(Header)))->Num}, Capacity:{((Header*)((char*)Data - sizeof(Header)))->Capacity}</DisplayString>
<Expand>
<Synthetic Name="Header">
<DisplayString>{(Header*)((char*)Data - sizeof(Header))}</DisplayString>
<Expand>
<Item Name="Allocator">((Header*)((char*)Data - sizeof(Header)))->Allocator</Item>
<Item Name="Capacity">((Header*)((char*)Data - sizeof(Header)))->Capacity</Item>
<Item Name="Num">((Header*)((char*)Data - sizeof(Header)))->Num</Item>
</Expand>
</Synthetic>
<ArrayItems>
<Size>((Header*)((char*)Data - sizeof(Header)))->Capacity</Size>
<ValuePointer>Data</ValuePointer>
</ArrayItems>
</Expand>
</Type>
<Type Name="gen::StrC">
<DisplayString>Len:{Len} Ptr:{Ptr, [Len]s}</DisplayString>
</Type>
<Type Name="String">
<Type Name="gen::String">
<DisplayString Condition="Data == nullptr">null</DisplayString>
<DisplayString>{Data,na}</DisplayString>
<Expand>
@ -20,7 +42,7 @@
</Expand>
</Type>
<Type Name="String::Header">
<Type Name="gen::String::Header">
<DisplayString>Length: {Length}, Capacity: {Capacity}</DisplayString>
<Expand>
<Item Name="Allocator">Allocator</Item>
@ -41,7 +63,7 @@
<Item Name="ArrStatic" Condition="DynamicEntries == false">ArrStatic</Item>
<Item Name="Index" Condition="DynamicEntries == false">StaticIndex</Item>
<Item Name="ArrDyn" Condition="DynamicEntries == true">ArrDyn</Item>
<Item Name="Index" Condition="DynamicEntries == true">((ArrayHeader*)((char*)ArrDyn - sizeof(ArrayHeader)))->count</Item>
<Item Name="Index" Condition="DynamicEntries == true">ArrDyn.num()</Item>
</Expand>
</Type>
@ -57,7 +79,7 @@
<Item Name="ArrStatic" Condition="ast->DynamicEntries == false">ast->ArrStatic</Item>
<Item Name="Index" Condition="ast->DynamicEntries == false">ast->StaticIndex</Item>
<Item Name="ArrDyn" Condition="ast->DynamicEntries == true">ast->ArrDyn</Item>
<Item Name="Index" Condition="ast->DynamicEntries == true">((ArrayHeader*)((char*)ast->ArrDyn - sizeof(ArrayHeader)))->count</Item>
<Item Name="Index" Condition="ast->DynamicEntries == true">ast->ArrDyn.num()</Item>
</Expand>
</Type>
@ -71,21 +93,6 @@
<Type Name="gen::Parser::TokArray">
<DisplayString>Current[ { Arr[Idx] } ] Idx:{ Idx }</DisplayString>
<Expand>
<Synthetic Name="Header">
<DisplayString>{(ArrayHeader*)((char*)Arr - sizeof(ArrayHeader))}</DisplayString>
<Expand>
<Item Name="elem_size">((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->elem_size</Item>
<Item Name="count">((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->count</Item>
<Item Name="capacity">((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->capacity</Item>
<Item Name="allocator">((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->allocator</Item>
</Expand>
</Synthetic>
<ArrayItems>
<Size>((ArrayHeader*)((char*)Arr - sizeof(ArrayHeader)))->count</Size>
<ValuePointer>Arr</ValuePointer>
</ArrayItems>
</Expand>
</Type>
</AutoVisualizer>

View File

@ -2,9 +2,177 @@
#include "Bloat.hpp"
void* gen_Arena::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
namespace gen
{
gen_Arena* arena = rcast(gen_Arena*, allocator_data);
#pragma region Memory
struct _heap_stats
{
u32 magic;
sw used_memory;
sw alloc_count;
};
global _heap_stats _heap_stats_info;
void heap_stats_init( void )
{
zero_item( &_heap_stats_info );
_heap_stats_info.magic = ZPL_HEAP_STATS_MAGIC;
}
sw heap_stats_used_memory( void )
{
ZPL_ASSERT_MSG( _heap_stats_info.magic == ZPL_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
return _heap_stats_info.used_memory;
}
sw heap_stats_alloc_count( void )
{
ZPL_ASSERT_MSG( _heap_stats_info.magic == ZPL_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
return _heap_stats_info.alloc_count;
}
void heap_stats_check( void )
{
ZPL_ASSERT_MSG( _heap_stats_info.magic == ZPL_HEAP_STATS_MAGIC, "heap_stats is not initialised yet, call heap_stats_init first!" );
ZPL_ASSERT( _heap_stats_info.used_memory == 0 );
ZPL_ASSERT( _heap_stats_info.alloc_count == 0 );
}
struct _heap_alloc_info
{
sw size;
void* physical_start;
};
void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
{
return zpl::heap_allocator_proc( allocator_data, (zpl::AllocType)type, size, alignment, old_memory, old_size, flags );
// void* ptr = NULL;
// // unused( allocator_data );
// // unused( old_size );
// if ( ! alignment )
// alignment = ZPL_DEFAULT_MEMORY_ALIGNMENT;
// #ifdef ZPL_HEAP_ANALYSIS
// sw alloc_info_size = size_of( _heap_alloc_info );
// sw alloc_info_remainder = ( alloc_info_size % alignment );
// sw track_size = max( alloc_info_size, alignment ) + alloc_info_remainder;
// switch ( type )
// {
// case EAllocation_FREE :
// {
// if ( ! old_memory )
// break;
// _heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* ) old_memory - 1;
// _heap_stats_info.used_memory -= alloc_info->size;
// _heap_stats_info.alloc_count--;
// old_memory = alloc_info->physical_start;
// }
// break;
// case EAllocation_ALLOC :
// {
// size += track_size;
// }
// break;
// default :
// break;
// }
// #endif
// switch ( type )
// {
// #if defined( ZPL_COMPILER_MSVC ) || ( defined( ZPL_COMPILER_GCC ) && defined( ZPL_SYSTEM_WINDOWS ) ) || ( defined( ZPL_COMPILER_TINYC ) && defined( ZPL_SYSTEM_WINDOWS ) )
// case EAllocation_ALLOC :
// ptr = _aligned_malloc( size, alignment );
// if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
// zero_size( ptr, size );
// break;
// case EAllocation_FREE :
// _aligned_free( old_memory );
// break;
// case EAllocation_RESIZE :
// {
// AllocatorInfo a = heap();
// ptr = default_resize_align( a, old_memory, old_size, size, alignment );
// }
// break;
// #elif defined( ZPL_SYSTEM_LINUX ) && ! defined( ZPL_CPU_ARM ) && ! defined( ZPL_COMPILER_TINYC )
// case EAllocation_ALLOC :
// {
// ptr = aligned_alloc( alignment, ( size + alignment - 1 ) & ~( alignment - 1 ) );
// if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
// {
// zero_size( ptr, size );
// }
// }
// break;
// case EAllocation_FREE :
// {
// free( old_memory );
// }
// break;
// case EAllocation_RESIZE :
// {
// AllocatorInfo a = heap();
// ptr = default_resize_align( a, old_memory, old_size, size, alignment );
// }
// break;
// #else
// case EAllocation_ALLOC :
// {
// posix_memalign( &ptr, alignment, size );
// if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
// {
// zero_size( ptr, size );
// }
// }
// break;
// case EAllocation_FREE :
// {
// free( old_memory );
// }
// break;
// case EAllocation_RESIZE :
// {
// AllocatorInfo a = heap();
// ptr = default_resize_align( a, old_memory, old_size, size, alignment );
// }
// break;
// #endif
// case EAllocation_FREE_ALL :
// break;
// }
// #ifdef ZPL_HEAP_ANALYSIS
// if ( type == EAllocation_ALLOC )
// {
// _heap_alloc_info* alloc_info = zpl_cast( _heap_alloc_info* )( zpl_cast( char* ) ptr + alloc_info_remainder );
// zero_item( alloc_info );
// alloc_info->size = size - track_size;
// alloc_info->physical_start = ptr;
// ptr = zpl_cast( void* )( alloc_info + 1 );
// _heap_stats_info.used_memory += alloc_info->size;
// _heap_stats_info.alloc_count++;
// }
// #endif
// return ptr;
}
void* Arena::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
{
Arena* arena = rcast(Arena*, allocator_data);
void* ptr = NULL;
// unused( old_size );
@ -26,7 +194,7 @@ void* gen_Arena::allocator_proc( void* allocator_data, AllocType type, sw size,
ptr = align_forward( end, alignment );
arena->TotalUsed += total_size;
if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
zero_size( ptr, size );
}
break;
@ -49,11 +217,11 @@ void* gen_Arena::allocator_proc( void* allocator_data, AllocType type, sw size,
break;
}
return ptr;
}
}
void* gen_Pool::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
{
gen_Pool* pool = zpl_cast( gen_Pool* ) allocator_data;
void* Pool::allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
{
Pool* pool = zpl_cast( Pool* ) allocator_data;
void* ptr = NULL;
// unused( old_size );
@ -73,7 +241,7 @@ void* gen_Pool::allocator_proc( void* allocator_data, AllocType type, sw size, s
pool->FreeList = zpl_cast( void* ) next_free;
pool->TotalSize += pool->BlockSize;
if ( flags & ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO )
if ( flags & ALLOCATOR_FLAG_CLEAR_TO_ZERO )
zero_size( ptr, size );
}
break;
@ -122,11 +290,11 @@ void* gen_Pool::allocator_proc( void* allocator_data, AllocType type, sw size, s
}
return ptr;
}
}
gen_Pool gen_Pool::init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align )
{
gen_Pool pool = {};
Pool Pool::init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align )
{
Pool pool = {};
sw actual_block_size, pool_size, block_index;
void *data, *curr;
@ -158,40 +326,414 @@ gen_Pool gen_Pool::init_align( AllocatorInfo backing, sw num_blocks, sw block_si
pool.FreeList = data;
return pool;
}
}
#pragma endregion Memory
namespace Memory
{
using namespace zpl;
#pragma region File Handling
#if defined( ZPL_SYSTEM_WINDOWS ) || defined( ZPL_SYSTEM_CYGWIN )
internal wchar_t* _alloc_utf8_to_ucs2( AllocatorInfo a, char const* text, sw* w_len_ )
{
wchar_t* w_text = NULL;
sw len = 0, w_len = 0, w_len1 = 0;
if ( text == NULL )
{
if ( w_len_ )
*w_len_ = w_len;
return NULL;
}
len = str_len( text );
if ( len == 0 )
{
if ( w_len_ )
*w_len_ = w_len;
return NULL;
}
w_len = MultiByteToWideChar( CP_UTF8, MB_ERR_INVALID_CHARS, text, zpl_cast( int ) len, NULL, 0 );
if ( w_len == 0 )
{
if ( w_len_ )
*w_len_ = w_len;
return NULL;
}
w_text = alloc_array( a, wchar_t, w_len + 1 );
w_len1 = MultiByteToWideChar( CP_UTF8, MB_ERR_INVALID_CHARS, text, zpl_cast( int ) len, w_text, zpl_cast( int ) w_len );
if ( w_len1 == 0 )
{
free( a, w_text );
if ( w_len_ )
*w_len_ = 0;
return NULL;
}
w_text[ w_len ] = 0;
if ( w_len_ )
*w_len_ = w_len;
return w_text;
}
internal ZPL_FILE_SEEK_PROC( _win32_file_seek )
{
LARGE_INTEGER li_offset;
li_offset.QuadPart = offset;
if ( ! SetFilePointerEx( fd.p, li_offset, &li_offset, whence ) )
{
return false;
}
if ( new_offset )
*new_offset = li_offset.QuadPart;
return true;
}
internal ZPL_FILE_READ_AT_PROC( _win32_file_read )
{
// unused( stop_at_newline );
b32 result = false;
_win32_file_seek( fd, offset, ESeekWhence_BEGIN, NULL );
DWORD size_ = zpl_cast( DWORD )( size > ZPL_I32_MAX ? ZPL_I32_MAX : size );
DWORD bytes_read_;
if ( ReadFile( fd.p, buffer, size_, &bytes_read_, NULL ) )
{
if ( bytes_read )
*bytes_read = bytes_read_;
result = true;
}
return result;
}
internal ZPL_FILE_WRITE_AT_PROC( _win32_file_write )
{
DWORD size_ = zpl_cast( DWORD )( size > ZPL_I32_MAX ? ZPL_I32_MAX : size );
DWORD bytes_written_;
_win32_file_seek( fd, offset, ESeekWhence_BEGIN, NULL );
if ( WriteFile( fd.p, buffer, size_, &bytes_written_, NULL ) )
{
if ( bytes_written )
*bytes_written = bytes_written_;
return true;
}
return false;
}
internal ZPL_FILE_CLOSE_PROC( _win32_file_close )
{
CloseHandle( fd.p );
}
FileOperations const default_file_operations = { _win32_file_read, _win32_file_write, _win32_file_seek, _win32_file_close };
ZPL_NEVER_INLINE ZPL_FILE_OPEN_PROC( _win32_file_open )
{
DWORD desired_access;
DWORD creation_disposition;
void* handle;
wchar_t* w_text;
switch ( mode & ZPL_FILE_MODES )
{
case EFileMode_READ :
desired_access = GENERIC_READ;
creation_disposition = OPEN_EXISTING;
break;
case EFileMode_WRITE :
desired_access = GENERIC_WRITE;
creation_disposition = CREATE_ALWAYS;
break;
case EFileMode_APPEND :
desired_access = GENERIC_WRITE;
creation_disposition = OPEN_ALWAYS;
break;
case EFileMode_READ | EFileMode_RW :
desired_access = GENERIC_READ | GENERIC_WRITE;
creation_disposition = OPEN_EXISTING;
break;
case EFileMode_WRITE | EFileMode_RW :
desired_access = GENERIC_READ | GENERIC_WRITE;
creation_disposition = CREATE_ALWAYS;
break;
case EFileMode_APPEND | EFileMode_RW :
desired_access = GENERIC_READ | GENERIC_WRITE;
creation_disposition = OPEN_ALWAYS;
break;
default :
ZPL_PANIC( "Invalid file mode" );
return EFileError_INVALID;
}
w_text = _alloc_utf8_to_ucs2( heap(), filename, NULL );
handle = CreateFileW( w_text, desired_access, FILE_SHARE_READ | FILE_SHARE_DELETE, NULL, creation_disposition, FILE_ATTRIBUTE_NORMAL, NULL );
free( heap(), w_text );
if ( handle == INVALID_HANDLE_VALUE )
{
DWORD err = GetLastError();
switch ( err )
{
case ERROR_FILE_NOT_FOUND :
return EFileError_NOT_EXISTS;
case ERROR_FILE_EXISTS :
return EFileError_EXISTS;
case ERROR_ALREADY_EXISTS :
return EFileError_EXISTS;
case ERROR_ACCESS_DENIED :
return EFileError_PERMISSION;
}
return EFileError_INVALID;
}
if ( mode & EFileMode_APPEND )
{
LARGE_INTEGER offset = { { 0 } };
if ( ! SetFilePointerEx( handle, offset, NULL, ESeekWhence_END ) )
{
CloseHandle( handle );
return EFileError_INVALID;
}
}
fd->p = handle;
*ops = default_file_operations;
return EFileError_NONE;
}
#else // POSIX
# include <fcntl.h>
internal ZPL_FILE_SEEK_PROC( _posix_file_seek )
{
# if defined( ZPL_SYSTEM_OSX )
s64 res = lseek( fd.i, offset, whence );
# else // TODO(ZaKlaus): @fixme lseek64
s64 res = lseek( fd.i, offset, whence );
# endif
if ( res < 0 )
return false;
if ( new_offset )
*new_offset = res;
return true;
}
internal ZPL_FILE_READ_AT_PROC( _posix_file_read )
{
unused( stop_at_newline );
sw res = pread( fd.i, buffer, size, offset );
if ( res < 0 )
return false;
if ( bytes_read )
*bytes_read = res;
return true;
}
internal ZPL_FILE_WRITE_AT_PROC( _posix_file_write )
{
sw res;
s64 curr_offset = 0;
_posix_file_seek( fd, 0, ESeekWhence_CURRENT, &curr_offset );
if ( curr_offset == offset )
{
// NOTE: Writing to stdout et al. doesn't like pwrite for numerous reasons
res = write( zpl_cast( int ) fd.i, buffer, size );
}
else
{
res = pwrite( zpl_cast( int ) fd.i, buffer, size, offset );
}
if ( res < 0 )
return false;
if ( bytes_written )
*bytes_written = res;
return true;
}
internal ZPL_FILE_CLOSE_PROC( _posix_file_close )
{
close( fd.i );
}
FileOperations const default_file_operations = { _posix_file_read, _posix_file_write, _posix_file_seek, _posix_file_close };
ZPL_NEVER_INLINE ZPL_FILE_OPEN_PROC( _posix_file_open )
{
s32 os_mode;
switch ( mode & ZPL_FILE_MODES )
{
case EFileMode_READ :
os_mode = O_RDONLY;
break;
case EFileMode_WRITE :
os_mode = O_WRONLY | O_CREAT | O_TRUNC;
break;
case EFileMode_APPEND :
os_mode = O_WRONLY | O_APPEND | O_CREAT;
break;
case EFileMode_READ | EFileMode_RW :
os_mode = O_RDWR;
break;
case EFileMode_WRITE | EFileMode_RW :
os_mode = O_RDWR | O_CREAT | O_TRUNC;
break;
case EFileMode_APPEND | EFileMode_RW :
os_mode = O_RDWR | O_APPEND | O_CREAT;
break;
default :
ZPL_PANIC( "Invalid file mode" );
return EFileError_INVALID;
}
fd->i = open( filename, os_mode, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH );
if ( fd->i < 0 )
{
// TODO: More file errors
return EFileError_INVALID;
}
*ops = default_file_operations;
return EFileError_NONE;
}
// POSIX
#endif
internal void _dirinfo_free_entry( DirEntry* entry );
FileError file_close( FileInfo* f )
{
if ( ! f )
return EFileError_INVALID;
if ( f->Filename )
free( heap(), zpl_cast( char* ) f->Filename );
#if defined( ZPL_SYSTEM_WINDOWS )
if ( f->FD.p == INVALID_HANDLE_VALUE )
return EFileError_INVALID;
#else
if ( f->fd.i < 0 )
return EFileError_INVALID;
#endif
if ( f->IsTemp )
{
f->Ops.close( f->FD );
return EFileError_NONE;
}
if ( ! f->Ops.read_at )
f->Ops = default_file_operations;
f->Ops.close( f->FD );
if ( f->Dir )
{
_dirinfo_free_entry( f->Dir );
mfree( f->Dir );
f->Dir = NULL;
}
return EFileError_NONE;
}
FileError file_new( FileInfo* f, FileDescriptor fd, FileOperations ops, char const* filename )
{
FileError err = EFileError_NONE;
sw len = str_len( filename );
f->Ops = ops;
f->FD = fd;
f->Dir = nullptr;
f->LastWriteTime = 0;
f->Filename = alloc_array( heap(), char, len + 1 );
mem_copy( zpl_cast( char* ) f->Filename, zpl_cast( char* ) filename, len + 1 );
return err;
}
FileError file_open_mode( FileInfo* f, FileMode mode, char const* filename )
{
FileInfo file_ =
{
{ nullptr, nullptr, nullptr, nullptr },
{ nullptr },
0,
nullptr,
0,
nullptr
};
*f = file_;
FileError err;
#if defined( ZPL_SYSTEM_WINDOWS ) || defined( ZPL_SYSTEM_CYGWIN )
err = _win32_file_open( &f->FD, &f->Ops, mode, filename );
#else
err = _posix_file_open( &f->fd, &f->ops, mode, filename );
#endif
if ( err == EFileError_NONE )
return file_new( f, f->FD, f->Ops, filename );
return err;
}
internal void _dirinfo_free_entry( DirEntry* entry )
{
if ( entry->Info )
{
dirinfo_free( entry->Info );
mfree( entry->Info );
entry->Info = nullptr;
}
}
void dirinfo_free( DirInfo* dir )
{
ZPL_ASSERT_NOT_NULL( dir );
for ( sw i = 0; i < array_count( dir->Entries ); ++i )
{
_dirinfo_free_entry( dir->Entries + i );
}
array_free( dir->Entries );
array_free( dir->Filenames );
// string_free( dir->Buffer );
dir->Buffer.free();
mfree( ( void* )dir->FullPath );
}
#pragma endreigon File Handling
namespace Memory
{
global AllocatorInfo GlobalAllocator;
global Array(Arena) Global_AllocatorBuckets;
global Array<Arena> Global_AllocatorBuckets;
void* Global_Allocator_Proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags )
{
Arena* last = & array_back( Global_AllocatorBuckets );
Arena& last = Global_AllocatorBuckets.back();
switch ( type )
{
case EAllocation_ALLOC:
{
if ( last->total_allocated + size > last->total_size )
if ( last.TotalUsed + size > last.TotalSize )
{
Arena bucket;
arena_init_from_allocator( & bucket, heap(), BucketSize );
Arena bucket = Arena::init_from_allocator( heap(), BucketSize );
if ( bucket.physical_start == nullptr )
if ( bucket.PhysicalStart == nullptr )
fatal( "Failed to create bucket for Global_AllocatorBuckets");
if ( ! array_append( Global_AllocatorBuckets, bucket ) )
if ( ! Global_AllocatorBuckets.append( bucket ) )
fatal( "Failed to append bucket to Global_AllocatorBuckets");
last = & array_back( Global_AllocatorBuckets );
last = Global_AllocatorBuckets.back();
}
return alloc_align( arena_allocator( last), size, alignment );
return alloc_align( last, size, alignment );
}
case EAllocation_FREE:
{
@ -205,25 +747,24 @@ namespace Memory
break;
case EAllocation_RESIZE:
{
if ( last->total_allocated + size > last->total_size )
if ( last.TotalUsed + size > last.TotalSize )
{
Arena bucket;
arena_init_from_allocator( & bucket, heap(), BucketSize );
Arena bucket = Arena::init_from_allocator( heap(), BucketSize );
if ( bucket.physical_start == nullptr )
if ( bucket.PhysicalStart == nullptr )
fatal( "Failed to create bucket for Global_AllocatorBuckets");
if ( ! array_append( Global_AllocatorBuckets, bucket ) )
if ( ! Global_AllocatorBuckets.append( bucket ) )
fatal( "Failed to append bucket to Global_AllocatorBuckets");
last = & array_back( Global_AllocatorBuckets );
last = Global_AllocatorBuckets.back();
}
void* result = alloc_align( arena_allocator( last), size, alignment );
void* result = alloc_align( last.Backing, size, alignment );
if ( result != nullptr && old_memory != nullptr )
{
mem_copy( result, old_memory, size );
mem_copy( result, old_memory, old_size );
}
return result;
@ -237,30 +778,36 @@ namespace Memory
{
GlobalAllocator = AllocatorInfo { & Global_Allocator_Proc, nullptr };
if ( ! array_init_reserve( Global_AllocatorBuckets, heap(), 128 ) )
Global_AllocatorBuckets = Array<Arena>::init_reserve( heap(), 128 );
if ( Global_AllocatorBuckets == nullptr )
fatal( "Failed to reserve memory for Global_AllocatorBuckets");
Arena bucket;
arena_init_from_allocator( & bucket, heap(), BucketSize );
Arena bucket = Arena::init_from_allocator( heap(), BucketSize );
if ( bucket.physical_start == nullptr )
if ( bucket.PhysicalStart == nullptr )
fatal( "Failed to create first bucket for Global_AllocatorBuckets");
array_append( Global_AllocatorBuckets, bucket );
Global_AllocatorBuckets.append( bucket );
}
void cleanup()
{
s32 index = 0;
s32 left = array_count( Global_AllocatorBuckets );
s32 left = Global_AllocatorBuckets.num();
do
{
Arena* bucket = & Global_AllocatorBuckets[ index ];
arena_free( bucket );
bucket->free();
index++;
}
while ( left--, left );
array_free( Global_AllocatorBuckets );
Global_AllocatorBuckets.free();
}
// namespace Memory
}
// namespace gen
}

View File

@ -27,6 +27,11 @@
# define ZPL_MODULE_HASHING
#include "zpl.h"
#undef Array
#undef heap
#undef alloc_item
#undef alloc_array
using zpl::b32;
using zpl::s8;
using zpl::s16;
@ -40,32 +45,32 @@ using zpl::sw;
using zpl::sptr;
using zpl::uptr;
using zpl::AllocType;
using zpl::Arena;
using zpl::AllocatorInfo;
using zpl::ArrayHeader;
using zpl::FileInfo;
using zpl::FileError;
using zpl::Pool;
// using zpl::AllocType;
// using zpl::Arena;
// using zpl::AllocatorInfo;
// using zpl::ArrayHeader;
// using zpl::FileInfo;
// using zpl::FileError;
// using zpl::Pool;
// using zpl::String;
using zpl::EAllocation_ALLOC;
using zpl::EAllocation_FREE;
using zpl::EAllocation_FREE_ALL;
using zpl::EAllocation_RESIZE;
using zpl::EFileMode_WRITE;
using zpl::EFileError_NONE;
// using zpl::EAllocation_ALLOC;
// using zpl::EAllocation_FREE;
// using zpl::EAllocation_FREE_ALL;
// using zpl::EAllocation_RESIZE;
// using zpl::EFileMode_WRITE;
// using zpl::EFileError_NONE;
using zpl::ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO;
// using zpl::ZPL_ALLOCATOR_FLAG_CLEAR_TO_ZERO;
using zpl::align_forward;
using zpl::align_forward_i64;
using zpl::alloc;
using zpl::alloc_align;
using zpl::arena_allocator;
using zpl::arena_init_from_memory;
using zpl::arena_init_from_allocator;
using zpl::arena_free;
// using zpl::alloc;
// using zpl::alloc_align;
// using zpl::arena_allocator;
// using zpl::arena_init_from_memory;
// using zpl::arena_init_from_allocator;
// using zpl::arena_free;
using zpl::assert_crash;
using zpl::char_first_occurence;
using zpl::char_is_alpha;
@ -74,15 +79,15 @@ using zpl::char_is_digit;
using zpl::char_is_hex_digit;
using zpl::char_is_space;
using zpl::crc32;
using zpl::free_all;
// using zpl::free_all;
using zpl::is_power_of_two;
using zpl::mem_copy;
using zpl::mem_move;
using zpl::mem_set;
using zpl::pointer_add;
using zpl::pool_allocator;
using zpl::pool_init;
using zpl::pool_free;
// using zpl::pool_allocator;
// using zpl::pool_init;
// using zpl::pool_free;
using zpl::process_exit;
using zpl::str_compare;
using zpl::str_copy;
@ -184,19 +189,170 @@ do \
} \
while(0);
constexpr
char const* Msg_Invalid_Value = "INVALID VALUE PROVIDED";
#pragma region Memory
// TODO : Use it.
struct gen_Arena
namespace gen
{
constexpr
char const* Msg_Invalid_Value = "INVALID VALUE PROVIDED";
#pragma region Memory
enum AllocType : u8
{
EAllocation_ALLOC,
EAllocation_FREE,
EAllocation_FREE_ALL,
EAllocation_RESIZE,
};
using AllocatorProc = void* ( void* allocator_data, AllocType type
, sw size, sw alignment
, void* old_memory, sw old_size
, u64 flags );
struct AllocatorInfo
{
AllocatorProc* Proc;
void* Data;
};
enum AllocFlag
{
ALLOCATOR_FLAG_CLEAR_TO_ZERO = bit( 0 ),
};
//! Allocate memory with default alignment.
ZPL_DEF_INLINE void* alloc( AllocatorInfo a, sw size );
//! Allocate memory with specified alignment.
ZPL_DEF_INLINE void* alloc_align( AllocatorInfo a, sw size, sw alignment );
//! Free allocated memory.
ZPL_DEF_INLINE void free( AllocatorInfo a, void* ptr );
//! Free all memory allocated by an allocator.
ZPL_DEF_INLINE void free_all( AllocatorInfo a );
//! Resize an allocated memory.
ZPL_DEF_INLINE void* resize( AllocatorInfo a, void* ptr, sw old_size, sw new_size );
//! Resize an allocated memory with specified alignment.
ZPL_DEF_INLINE void* resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment );
#ifndef alloc_item
//! Allocate memory for an item.
# define alloc_item( allocator_, Type ) ( Type* )alloc( allocator_, size_of( Type ) )
//! Allocate memory for an array of items.
# define alloc_array( allocator_, Type, count ) ( Type* )alloc( allocator_, size_of( Type ) * ( count ) )
#endif
/* heap memory analysis tools */
/* define ZPL_HEAP_ANALYSIS to enable this feature */
/* call zpl_heap_stats_init at the beginning of the entry point */
/* you can call zpl_heap_stats_check near the end of the execution to validate any possible leaks */
void heap_stats_init( void );
sw heap_stats_used_memory( void );
sw heap_stats_alloc_count( void );
void heap_stats_check( void );
//! Allocate/Resize memory using default options.
//! Use this if you don't need a "fancy" resize allocation
ZPL_DEF_INLINE void* default_resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment );
void* heap_allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
//! The heap allocator backed by operating system's memory manager.
constexpr AllocatorInfo heap( void ) { return { heap_allocator_proc, nullptr }; }
// #ifndef malloc
// //! Helper to allocate memory using heap allocator.
// # define malloc( sz ) ZPL_NS( alloc )( ZPL_NS( heap_allocator )(), sz )
// //! Helper to free memory allocated by heap allocator.
// # define mfree( ptr ) ZPL_NS( free )( ZPL_NS( heap_allocator )(), ptr )
// //! Alias to heap allocator.
// # define heap ZPL_NS( heap_allocator )
// #endif
ZPL_IMPL_INLINE void* alloc_align( AllocatorInfo a, sw size, sw alignment )
{
return a.Proc( a.Data, EAllocation_ALLOC, size, alignment, nullptr, 0, ZPL_DEFAULT_ALLOCATOR_FLAGS );
}
ZPL_IMPL_INLINE void* alloc( AllocatorInfo a, sw size )
{
return alloc_align( a, size, ZPL_DEFAULT_MEMORY_ALIGNMENT );
}
ZPL_IMPL_INLINE void free( AllocatorInfo a, void* ptr )
{
if ( ptr != nullptr )
a.Proc( a.Data, EAllocation_FREE, 0, 0, ptr, 0, ZPL_DEFAULT_ALLOCATOR_FLAGS );
}
ZPL_IMPL_INLINE void free_all( AllocatorInfo a )
{
a.Proc( a.Data, EAllocation_FREE_ALL, 0, 0, nullptr, 0, ZPL_DEFAULT_ALLOCATOR_FLAGS );
}
ZPL_IMPL_INLINE void* resize( AllocatorInfo a, void* ptr, sw old_size, sw new_size )
{
return resize_align( a, ptr, old_size, new_size, ZPL_DEFAULT_MEMORY_ALIGNMENT );
}
ZPL_IMPL_INLINE void* resize_align( AllocatorInfo a, void* ptr, sw old_size, sw new_size, sw alignment )
{
return a.Proc( a.Data, EAllocation_RESIZE, new_size, alignment, ptr, old_size, ZPL_DEFAULT_ALLOCATOR_FLAGS );
}
ZPL_IMPL_INLINE void* default_resize_align( AllocatorInfo a, void* old_memory, sw old_size, sw new_size, sw alignment )
{
if ( ! old_memory )
return alloc_align( a, new_size, alignment );
if ( new_size == 0 )
{
free( a, old_memory );
return nullptr;
}
if ( new_size < old_size )
new_size = old_size;
if ( old_size == new_size )
{
return old_memory;
}
else
{
void* new_memory = alloc_align( a, new_size, alignment );
if ( ! new_memory )
return nullptr;
mem_move( new_memory, old_memory, min( new_size, old_size ) );
free( a, old_memory );
return new_memory;
}
}
// ZPL_IMPL_INLINE AllocatorInfo heap( void )
// {
// AllocatorInfo a;
// a.Proc = heap_allocator_proc;
// a.Data = nullptr;
// return a;
// }
struct Arena
{
static
void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
static
gen_Arena init_from_memory( void* start, sw size )
Arena init_from_memory( void* start, sw size )
{
return
{
@ -209,9 +365,9 @@ struct gen_Arena
}
static
gen_Arena init_from_allocator( AllocatorInfo backing, sw size )
Arena init_from_allocator( AllocatorInfo backing, sw size )
{
gen_Arena result =
Arena result =
{
backing,
alloc( backing, size),
@ -223,7 +379,7 @@ struct gen_Arena
}
static
gen_Arena init_sub( gen_Arena& parent, sw size )
Arena init_sub( Arena& parent, sw size )
{
return init_from_allocator( parent.Backing, size );
}
@ -250,9 +406,9 @@ struct gen_Arena
void free()
{
if ( Backing.proc )
if ( Backing.Proc )
{
zpl::free( Backing, PhysicalStart );
gen::free( Backing, PhysicalStart );
PhysicalStart = nullptr;
}
}
@ -268,27 +424,32 @@ struct gen_Arena
sw TotalSize;
sw TotalUsed;
sw TempCount;
};
struct gen_Pool
{
operator AllocatorInfo()
{
return { allocator_proc, this };
}
};
struct Pool
{
static
void* allocator_proc( void* allocator_data, AllocType type, sw size, sw alignment, void* old_memory, sw old_size, u64 flags );
static
gen_Pool init( AllocatorInfo backing, sw num_blocks, sw block_size )
Pool init( AllocatorInfo backing, sw num_blocks, sw block_size )
{
return init_align( backing, num_blocks, block_size, ZPL_DEFAULT_MEMORY_ALIGNMENT );
}
static
gen_Pool init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align );
Pool init_align( AllocatorInfo backing, sw num_blocks, sw block_size, sw block_align );
void free()
{
if ( Backing.proc )
if ( Backing.Proc )
{
zpl::free( Backing, PhysicalStart );
gen::free( Backing, PhysicalStart );
}
}
@ -299,18 +460,22 @@ struct gen_Pool
sw BlockAlign;
sw TotalSize;
sw NumBlocks;
};
#pragma endregion Memory
operator AllocatorInfo()
{
return { allocator_proc, this };
}
};
#pragma region Containers
#pragma push_macro("template")
#undef template
#pragma endregion Memory
// TODO : Use it.
template<class Type>
struct TArray
{
#pragma region Containers
#pragma push_macro("template")
#undef template
template<class Type>
struct Array
{
struct Header
{
AllocatorInfo Allocator;
@ -319,15 +484,15 @@ struct TArray
};
static
TArray init( AllocatorInfo allocator )
Array init( AllocatorInfo allocator )
{
return init_reserve( allocator, grow_formula(0) );
}
static
TArray init_reserve( AllocatorInfo allocator, sw capacity )
Array init_reserve( AllocatorInfo allocator, sw capacity )
{
Header* header = rcast( Header*, alloc( allocator, sizeof(Header) + sizeof(Type) ));
Header* header = rcast( Header*, alloc( allocator, sizeof(Header) + sizeof(Type) * capacity ));
if ( header == nullptr )
return { nullptr };
@ -342,40 +507,42 @@ struct TArray
static
uw grow_formula( uw value )
{
return 2 * value * 8;
return 2 * value + 8;
}
bool append( Type value )
{
Header& header = get_header();
Header* header = get_header();
if ( header.Num == header.Capacity )
if ( header->Num == header->Capacity )
{
if ( ! grow( header.Capacity ))
if ( ! grow( header->Capacity ))
return false;
header = get_header();
}
Data[ header.Num ] = value;
header.Num++;
Data[ header->Num ] = value;
header->Num++;
return true;
}
Type& back( void )
{
Header& header = get_header();
Header& header = * get_header();
return Data[ header.Num - 1 ];
}
void clear( void )
{
Header& header = get_header();
Header& header = * get_header();
header.Num = 0;
}
bool fill( uw begin, uw end, Type value )
{
Header& header = get_header();
Header& header = * get_header();
if ( begin < 0 || end >= header.Num )
return false;
@ -384,40 +551,41 @@ struct TArray
{
Data[ idx ] = value;
}
// mem_set( Data + begin, value, end - begin)
return true;
}
void free( void )
{
Header& header = get_header();
zpl::free( header.Allocator, &header );
Header& header = * get_header();
gen::free( header.Allocator, &header );
}
Header& get_header( void )
Header* get_header( void )
{
return *( reinterpret_cast< Header* >( Data ) - 1 );
return rcast( Header*, Data ) - 1 ;
}
bool grow( uw min_capacity )
{
Header& header = get_header();
Header& header = * get_header();
uw new_capacity = grow_formula( header.Capacity );
if ( new_capacity < min_capacity )
new_capacity = 8;
new_capacity = min_capacity;
return set_capacity( new_capacity );
}
uw num( void )
{
return get_header().Num;
return get_header()->Num;
}
bool pop( void )
{
Header& header = get_header();
Header& header = * get_header();
ZPL_ASSERT( header.Num > 0 );
header.Num--;
@ -425,7 +593,7 @@ struct TArray
void remove_at( uw idx )
{
Header* header = &get_header();
Header* header = get_header();
ZPL_ASSERT( idx < header->Num );
mem_move( header + idx, header + idx + 1, sizeof( Type ) * ( header->Num - idx - 1 ) );
@ -434,7 +602,7 @@ struct TArray
bool reserve( uw new_capacity )
{
Header& header = get_header();
Header& header = * get_header();
if ( header.Capacity < new_capacity )
return set_capacity( new_capacity );
@ -444,21 +612,21 @@ struct TArray
bool resize( uw num )
{
Header& header = get_header();
Header* header = get_header();
if ( num > header.Capacity )
if ( header->Capacity < num )
{
if ( ! grow( header.Capacity ) )
if ( ! grow( num ) )
return false;
}
header.Num = num;
header->Num = num;
return true;
}
bool set_capacity( uw new_capacity )
{
Header& header = get_header();
Header& header = * get_header();
if ( new_capacity == header.Capacity )
return true;
@ -474,13 +642,11 @@ struct TArray
mem_move( new_header, &header, sizeof( Header ) + sizeof( Type ) * header.Num );
new_header->Allocator = header.Allocator;
new_header->Num = header.Num;
new_header->Capacity = new_capacity;
zpl::free( header.Allocator, &header );
gen::free( header.Allocator, &header );
Data = ( Type* )new_header + 1;
Data = rcast( Type*, new_header + 1);
return true;
}
@ -495,12 +661,23 @@ struct TArray
{
return Data;
}
};
// TODO : Use it.
template<typename Type>
struct THashTable
{
// For-range based support
Type* begin()
{
return Data;
}
Type* end()
{
return Data + get_header()->Num;
}
};
template<typename Type>
struct HashTable
{
struct FindResult
{
sw HashIndex;
@ -516,12 +693,12 @@ struct THashTable
};
static
THashTable init( AllocatorInfo allocator )
HashTable init( AllocatorInfo allocator )
{
THashTable<Type> result = {0};
HashTable<Type> result = { { nullptr }, { nullptr } };
result.Hashes.init( allocator );
result.Entries.init( allocator );
result.Hashes = Array<sw>::init( allocator );
result.Entries = Array<Entry>::init( allocator );
return result;
}
@ -537,16 +714,16 @@ struct THashTable
void destroy( void )
{
if ( Hashes )
if ( Hashes && Hashes.get_header()->Capacity )
Hashes.free();
if ( Entries )
if ( Entries && Hashes.get_header()->Capacity )
Entries.free();
}
Type* get( u64 key )
{
sw idx = find( key ).EntryIndex;
if ( idx > 0 )
if ( idx >= 0 )
return & Entries[ idx ].Value;
return nullptr;
@ -578,7 +755,7 @@ struct THashTable
void grow()
{
sw new_num = TArray<Entry>::grow_formula( Entries.num() )
sw new_num = Array<Entry>::grow_formula( Entries.num() );
rehash( new_num );
}
@ -587,11 +764,13 @@ struct THashTable
sw idx;
sw last_added_index;
THashTable<Type> new_ht = init( Hashes.get_header().Allocator );
HashTable<Type> new_ht = init( Hashes.get_header()->Allocator );
new_ht.Hashes.resize( new_num );
new_ht.Entries.reserve( new_ht.Hashes.num() );
Array<sw>::Header* hash_header = new_ht.Hashes.get_header();
for ( idx = 0; idx < new_ht.Hashes.num(); ++idx )
new_ht.Hashes[ idx ] = -1;
@ -618,13 +797,10 @@ struct THashTable
new_ht.Entries[ last_added_index ].Value = entry.Value;
}
// *this = new_ht;
// old_ht.destroy();
destroy();
Hashes = new_ht.Hashes;
Entries = new_ht.Entries;
// Hashes = new_ht.Hashes;
// Entries = new_ht.Entries;
*this = new_ht;
}
void rehash_fast()
@ -704,10 +880,10 @@ struct THashTable
return -1;
}
TArray< sw> Hashes;
TArray< Entry> Entries;
Array< sw> Hashes;
Array< Entry> Entries;
protected:
protected:
sw add_entry( u64 key )
{
@ -745,12 +921,12 @@ protected:
{
return 0.75f * Hashes.num() < Entries.num();
}
};
};
#pragma pop_macro("template")
#pragma endregion Containers
#pragma pop_macro("template")
#pragma endregion Containers
#pragma region String
#pragma region String
// Constant string with length.
struct StrC
{
@ -1021,7 +1197,7 @@ protected:
Header& header = get_header();
zpl::free( header.Allocator, & header );
gen::free( header.Allocator, & header );
}
Header& get_header()
@ -1065,6 +1241,21 @@ protected:
return trim( " \t\r\n\v\f" );
}
// For-range support
char* begin()
{
return Data;
}
char* end()
{
Header const&
header = * rcast( Header const*, Data - sizeof( Header ));
return Data + header.Length;
}
operator bool()
{
return Data;
@ -1126,11 +1317,231 @@ protected:
}
};
static_assert( sizeof( String_POD ) == sizeof( String ), "String is not a POD" );
#pragma endregion String
#pragma endregion String
#pragma region File Handling
namespace Memory
{
typedef u32 FileMode;
enum FileModeFlag
{
EFileMode_READ = bit( 0 ),
EFileMode_WRITE = bit( 1 ),
EFileMode_APPEND = bit( 2 ),
EFileMode_RW = bit( 3 ),
ZPL_FILE_MODES = EFileMode_READ | EFileMode_WRITE | EFileMode_APPEND | EFileMode_RW,
};
// NOTE: Only used internally and for the file operations
enum SeekWhenceType
{
ESeekWhence_BEGIN = 0,
ESeekWhence_CURRENT = 1,
ESeekWhence_END = 2,
};
enum FileError
{
EFileError_NONE,
EFileError_INVALID,
EFileError_INVALID_FILENAME,
EFileError_EXISTS,
EFileError_NOT_EXISTS,
EFileError_PERMISSION,
EFileError_TRUNCATION_FAILURE,
EFileError_NOT_EMPTY,
EFileError_NAME_TOO_LONG,
EFileError_UNKNOWN,
};
union FileDescriptor
{
void* p;
sptr i;
uptr u;
};
typedef struct FileOperations FileOperations;
#define ZPL_FILE_OPEN_PROC( name ) FileError name( FileDescriptor* fd, FileOperations* ops, FileMode mode, char const* filename )
#define ZPL_FILE_READ_AT_PROC( name ) b32 name( FileDescriptor fd, void* buffer, sw size, s64 offset, sw* bytes_read, b32 stop_at_newline )
#define ZPL_FILE_WRITE_AT_PROC( name ) b32 name( FileDescriptor fd, void const* buffer, sw size, s64 offset, sw* bytes_written )
#define ZPL_FILE_SEEK_PROC( name ) b32 name( FileDescriptor fd, s64 offset, SeekWhenceType whence, s64* new_offset )
#define ZPL_FILE_CLOSE_PROC( name ) void name( FileDescriptor fd )
typedef ZPL_FILE_OPEN_PROC( file_open_proc );
typedef ZPL_FILE_READ_AT_PROC( FileReadProc );
typedef ZPL_FILE_WRITE_AT_PROC( FileWriteProc );
typedef ZPL_FILE_SEEK_PROC( FileSeekProc );
typedef ZPL_FILE_CLOSE_PROC( FileCloseProc );
struct FileOperations
{
FileReadProc* read_at;
FileWriteProc* write_at;
FileSeekProc* seek;
FileCloseProc* close;
};
extern FileOperations const default_file_operations;
typedef u64 FileTime;
enum DirType
{
ZPL_DIR_TYPE_FILE,
ZPL_DIR_TYPE_FOLDER,
ZPL_DIR_TYPE_UNKNOWN,
};
struct DirInfo;
struct DirEntry
{
char const* FileName;
DirInfo* Info;
u8 Type;
};
struct DirInfo
{
char const* FullPath;
DirEntry* Entries; // zpl_array
// Internals
char** Filenames; // zpl_array
String Buffer;
};
struct FileInfo
{
FileOperations Ops;
FileDescriptor FD;
b32 IsTemp;
char const* Filename;
FileTime LastWriteTime;
DirEntry* Dir;
};
/**
* Closes the file
* @param file
*/
FileError file_close( FileInfo* file );
/**
* Returns the currently opened file's name
* @param file
*/
inline
char const* file_name( FileInfo* file )
{
return file->Filename ? file->Filename : "";
}
/**
* Opens a file using a specified mode
* @param file
* @param mode Access mode to use
* @param filename
*/
FileError file_open_mode( FileInfo* file, FileMode mode, char const* filename );
/**
* Seeks the file cursor from the beginning of file to a specific position
* @param file
* @param offset Offset to seek to
*/
ZPL_DEF_INLINE s64 file_seek( FileInfo* file, s64 offset );
/**
* Returns the length from the beginning of the file we've read so far
* @param file
* @return Our current position in file
*/
ZPL_DEF_INLINE s64 file_tell( FileInfo* file );
/**
* Writes to a file
* @param file
* @param buffer Buffer to read from
* @param size Size to read
*/
b32 file_write( FileInfo* file, void const* buffer, sw size );
/**
* Writes to file at a specific offset
* @param file
* @param buffer Buffer to read from
* @param size Size to write
* @param offset Offset to write to
* @param bytes_written How much data we've actually written
*/
ZPL_DEF_INLINE b32 file_write_at( FileInfo* file, void const* buffer, sw size, s64 offset );
/**
* Writes to file safely
* @param file
* @param buffer Buffer to read from
* @param size Size to write
* @param offset Offset to write to
* @param bytes_written How much data we've actually written
*/
ZPL_DEF_INLINE b32 file_write_at_check( FileInfo* file, void const* buffer, sw size, s64 offset, sw* bytes_written );
ZPL_IMPL_INLINE s64 file_seek( FileInfo* f, s64 offset )
{
s64 new_offset = 0;
if ( ! f->Ops.read_at )
f->Ops = default_file_operations;
f->Ops.seek( f->FD, offset, ESeekWhence_BEGIN, &new_offset );
return new_offset;
}
ZPL_IMPL_INLINE s64 file_tell( FileInfo* f )
{
s64 new_offset = 0;
if ( ! f->Ops.read_at )
f->Ops = default_file_operations;
f->Ops.seek( f->FD, 0, ESeekWhence_CURRENT, &new_offset );
return new_offset;
}
ZPL_IMPL_INLINE b32 file_write( FileInfo* f, void const* buffer, sw size )
{
s64 cur_offset = file_tell( f );
b32 result = file_write_at( f, buffer, size, file_tell( f ) );
file_seek( f, cur_offset + size );
return result;
}
ZPL_IMPL_INLINE b32 file_write_at( FileInfo* f, void const* buffer, sw size, s64 offset )
{
return file_write_at_check( f, buffer, size, offset, NULL );
}
ZPL_IMPL_INLINE b32 file_write_at_check( FileInfo* f, void const* buffer, sw size, s64 offset, sw* bytes_written )
{
if ( ! f->Ops.read_at )
f->Ops = default_file_operations;
return f->Ops.write_at( f->FD, buffer, size, offset, bytes_written );
}
void dirinfo_free( DirInfo* dir );
#pragma endregion File Handling
namespace Memory
{
// NOTE: This limits the size of the string that can be read from a file or generated to 10 megs.
// If you are generating a string larger than this, increase the size of the bucket here.
constexpr uw BucketSize = megabytes(10);
@ -1143,12 +1554,12 @@ namespace Memory
void setup();
void cleanup();
}
}
inline
sw log_fmt(char const* fmt, ...)
{
inline
sw log_fmt(char const* fmt, ...)
{
sw res;
va_list va;
@ -1157,29 +1568,32 @@ sw log_fmt(char const* fmt, ...)
va_end(va);
return res;
}
}
inline
sw fatal(char const* fmt, ...)
{
inline
sw fatal(char const* fmt, ...)
{
local_persist thread_local
char buf[ZPL_PRINTF_MAXLEN] = { 0 };
va_list va;
#if Build_Debug
#if Build_Debug
va_start(va, fmt);
str_fmt_va(buf, ZPL_PRINTF_MAXLEN, fmt, va);
va_end(va);
assert_crash(buf);
return -1;
#else
#else
va_start(va, fmt);
str_fmt_out_err_va( fmt, va);
va_end(va);
exit(1);
return -1;
#endif
#endif
}
// gen namespace
}

View File

@ -7,15 +7,15 @@
namespace gen
{
ZPL_TABLE_DEFINE( StringTable, str_tbl_, String );
// ZPL_TABLE_DEFINE( StringTable, str_tbl_, String );
namespace StaticData
{
global Array(Pool) CodePools = nullptr;
global Array(Arena) CodeEntriesArenas = nullptr;
global Array(Arena) StringArenas = nullptr;
global Array< Pool > CodePools = { nullptr };
global Array< Arena > CodeEntriesArenas = { nullptr };
global Array< Arena > StringArenas = { nullptr };
global StringTable StringMap;
global StringTable StringCache;
global AllocatorInfo Allocator_DataArrays = heap();
global AllocatorInfo Allocator_CodePool = heap();
@ -837,12 +837,27 @@ namespace gen
result.append_fmt( "%s %s", entry( 0 )->to_string(), Name );
AST* type = entry( 0);
AST* type_arr = type->entry( 0 );
// TODO : This problably needs to be an iteration for all entries of type.
if ( type->num_entries() && type_arr->Type == ECode::Untyped )
result.append_fmt( "[%s]", type_arr->to_string() );
if ( entry( idx ) )
result.append_fmt( " = %s;", entry( idx )->to_string() );
break;
}
AST* type = entry( 0);
AST* type_arr = type->entry( 0 );
// TODO : This problably needs to be an iteration for all entries of type.
if ( type->num_entries() && type_arr->Type == ECode::Untyped )
result.append_fmt( "%s %s[%s];", type->to_string(), Name, type_arr->to_string() );
else
result.append_fmt( "%s %s;", entry( 0 )->to_string(), Name );
}
break;
@ -1033,48 +1048,52 @@ namespace gen
// Setup the arrays
{
if (! array_init_reserve( CodePools, Allocator_DataArrays, InitSize_DataArrays ) )
CodePools = Array<Pool>::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
if ( CodePools == nullptr )
fatal( "gen::init: Failed to initialize the CodePools array" );
if ( ! array_init_reserve( CodeEntriesArenas, Allocator_DataArrays, InitSize_DataArrays ) )
CodeEntriesArenas = Array<Arena>::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
if ( CodeEntriesArenas == nullptr )
fatal( "gen::init: Failed to initialize the CodeEntriesPools array" );
if ( ! array_init_reserve( StringArenas, Allocator_DataArrays, InitSize_DataArrays ) )
StringArenas = Array<Arena>::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
if ( StringArenas == nullptr )
fatal( "gen::init: Failed to initialize the StringArenas array" );
}
// Setup the code pool and code entries arena.
{
Pool code_pool;
pool_init( & code_pool, Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
if ( code_pool.physical_start == nullptr )
if ( code_pool.PhysicalStart == nullptr )
fatal( "gen::init: Failed to initialize the code pool" );
array_append( CodePools, code_pool );
CodePools.append( code_pool );
Arena code_entires_arena;
arena_init_from_allocator( & code_entires_arena, Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
Arena code_entires_arena = Arena::init_from_allocator( Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
if ( code_entires_arena.physical_start == nullptr )
if ( code_entires_arena.PhysicalStart == nullptr )
fatal( "gen::init: Failed to initialize the code entries arena" );
array_append( CodeEntriesArenas, code_entires_arena );
CodeEntriesArenas.append( code_entires_arena );
Arena string_arena;
arena_init_from_allocator( & string_arena, Allocator_StringArena, SizePer_StringArena );
Arena string_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
if ( string_arena.physical_start == nullptr )
if ( string_arena.PhysicalStart == nullptr )
fatal( "gen::init: Failed to initialize the string arena" );
array_append( StringArenas, string_arena );
StringArenas.append( string_arena );
}
// Setup the hash tables
{
str_tbl_init ( & StringMap, Allocator_StringTable );
if ( StringMap.entries == nullptr )
fatal( "gen::init: Failed to initialize the StringMap");
StringCache = StringTable::init( Allocator_StringTable );
if ( StringCache.Entries == nullptr )
fatal( "gen::init: Failed to initialize the StringCache");
}
Code::Global = make_code();
@ -1195,41 +1214,40 @@ namespace gen
using namespace StaticData;
s32 index = 0;
s32 left = array_count( CodePools );
s32 left = CodePools.num();
do
{
Pool* code_pool = & CodePools[index];
pool_free( code_pool );
code_pool->free();
index++;
}
while ( left--, left );
index = 0;
left = array_count( CodeEntriesArenas );
left = CodeEntriesArenas.num();
do
{
Arena* code_entries_arena = & CodeEntriesArenas[index];
arena_free( code_entries_arena );
code_entries_arena->free();
index++;
}
while ( left--, left );
index = 0;
left = array_count( StringArenas );
left = StringArenas.num();
do
{
Arena* string_arena = & StringArenas[index];
arena_free( string_arena );
string_arena->free();
index++;
}
while ( left--, left );
str_tbl_destroy( & StringMap );
// type_tbl_destroy( & TypeMap );
StringCache.destroy();
array_free( CodePools );
array_free( CodeEntriesArenas );
array_free( StringArenas );
CodePools.free();
CodeEntriesArenas.free();
StringArenas.free();
}
inline
@ -1237,29 +1255,30 @@ namespace gen
{
using namespace StaticData;
Arena* last = & array_back( StringArenas );
Arena& last = StringArenas.back();
if ( last->total_allocated + str_length > last->total_size )
if ( last.TotalUsed + str_length > last.TotalSize )
{
Arena new_arena;
arena_init_from_allocator( & new_arena, Allocator_StringArena, SizePer_StringArena );
Arena new_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
if ( ! array_append( StringArenas, new_arena ) )
if ( ! StringArenas.append( new_arena ) )
fatal( "gen::get_string_allocator: Failed to allocate a new string arena" );
last = & array_back( StringArenas );
last = StringArenas.back();
}
return arena_allocator( last );
return last;
}
// Will either make or retrive a code string.
StringCached get_cached_string( StrC str )
{
using namespace StaticData;
s32 hash_length = str.Len > kilobytes(1) ? kilobytes(1) : str.Len;
u32 key = crc32( str.Ptr, hash_length );
{
String* result = str_tbl_get( & StaticData::StringMap, key );
StringCached* result = StringCache.get( key );
if ( result )
return * result;
@ -1267,7 +1286,7 @@ namespace gen
String result = String::make( get_string_allocator( str.Len ), str );
str_tbl_set( & StaticData::StringMap, key, result );
StringCache.set( key, result );
return result;
}
@ -1279,33 +1298,32 @@ namespace gen
{
using namespace StaticData;
AllocatorInfo allocator = { nullptr, nullptr };
AllocatorInfo allocator = CodePools.back();
s32 index = 0;
s32 left = array_count( CodePools );
s32 left = CodePools.num();
do
{
if ( CodePools[index].free_list != nullptr )
if ( CodePools[index].FreeList != nullptr )
{
allocator = zpl::pool_allocator( & CodePools[index] );
allocator = CodePools[index];
break;
}
index++;
}
while ( left--, left );
if ( allocator.data == nullptr )
if ( allocator.Data == nullptr )
{
Pool code_pool;
pool_init( & code_pool, Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
if ( code_pool.physical_start == nullptr )
if ( code_pool.PhysicalStart == nullptr )
fatal( "gen::make_code: Failed to allocate a new code pool - CodePool allcoator returned nullptr." );
if ( ! array_append( CodePools, code_pool ) )
if ( ! CodePools.append( code_pool ) )
fatal( "gen::make_code: Failed to allocate a new code pool - CodePools failed to append new pool." );
allocator = pool_allocator( CodePools );
allocator = * CodePools;
}
Code result { rcast( AST*, alloc( allocator, sizeof(AST) )) };
@ -1323,37 +1341,34 @@ namespace gen
return result;
}
Array(AST*) make_code_entries()
Array< AST* > make_code_entries()
{
using namespace StaticData;
AllocatorInfo allocator = { nullptr, nullptr };
s32 index = 0;
s32 left = array_count( CodeEntriesArenas );
s32 left = CodeEntriesArenas.num();
do
{
if ( arena_size_remaining( & CodeEntriesArenas[index], ZPL_DEFAULT_MEMORY_ALIGNMENT) >= InitSize_CodeEntiresArray )
allocator = arena_allocator( & CodeEntriesArenas[index] );
if ( CodeEntriesArenas[index].size_remaining( ZPL_DEFAULT_MEMORY_ALIGNMENT) >= InitSize_CodeEntiresArray )
allocator = CodeEntriesArenas[index];
index++;
}
while( left--, left );
if ( allocator.data == nullptr )
if ( allocator.Data == nullptr )
{
Arena arena;
arena_init_from_allocator( & arena, Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
Arena arena = Arena::init_from_allocator( Allocator_CodeEntriesArena, SizePer_CodeEntriresArena );
if ( arena.physical_start == nullptr )
if ( arena.PhysicalStart == nullptr )
fatal( "gen::make_code: Failed to allocate a new code entries arena - CodeEntriesArena allcoator returned nullptr." );
allocator = arena_allocator( & arena );
array_append( CodeEntriesArenas, arena );
allocator = arena;
CodeEntriesArenas.append( arena );
}
Array(AST*) entry_array;
array_init( entry_array, allocator );
Array< AST* > entry_array = Array< AST* >::init( allocator );
return entry_array;
}
@ -3357,12 +3372,12 @@ namespace gen
struct TokArray
{
Array(Token) Arr;
Array<Token> Arr;
s32 Idx;
bool __eat( TokType type, char const* context )
{
if ( array_count(Arr) - Idx <= 0 )
if ( Arr.num() - Idx <= 0 )
{
log_failure( "gen::%s: No tokens left", context );
return Code::Invalid;
@ -3393,7 +3408,7 @@ namespace gen
Token* next()
{
return Idx + 1 < array_count(Arr) ? &Arr[Idx + 1] : nullptr;
return Idx + 1 < Arr.num() ? &Arr[Idx + 1] : nullptr;
}
};
@ -3423,17 +3438,18 @@ namespace gen
}
do_once_start
arena_init_from_allocator( & LexAllocator, heap(), megabytes(10) );
// TODO : Use the global memory allocator for this...
LexAllocator = Arena::init_from_allocator( heap(), megabytes(10) );
if ( LexAllocator.physical_start == nullptr )
if ( LexAllocator.PhysicalStart == nullptr )
{
log_failure( "gen::lex: failed to allocate memory for parsing constructor's lexer");
return { nullptr, 0 };
return { { nullptr }, 0 };
}
do_once_end
local_persist thread_local
Array(Token) Tokens = nullptr;
Array<Token> Tokens = { nullptr };
s32 left = content.Len -1;
char const* scanner = content.Ptr;
@ -3445,13 +3461,13 @@ namespace gen
if ( left <= 0 )
{
log_failure( "gen::lex: no tokens found (only whitespace provided)" );
return { nullptr, 0 };
return { { nullptr }, 0 };
}
if ( Tokens )
array_clear( Tokens );
Tokens.clear();
array_init_reserve( Tokens, arena_allocator( & LexAllocator), content.Len / 8 );
Tokens = Array<Token>::init_reserve( LexAllocator, content.Len / 8 );
while (left )
{
@ -3851,7 +3867,7 @@ namespace gen
if ( token.Type != TokType::Invalid )
{
array_append( Tokens, token );
Tokens.append( token );
continue;
}
@ -3864,13 +3880,13 @@ namespace gen
}
token.Type = type;
array_append( Tokens, token );
Tokens.append( token );
}
if ( array_count(Tokens) == 0 )
if ( Tokens.num() == 0 )
{
log_failure( "Failed to lex any tokens" );
return { nullptr, 0 };
return { { nullptr }, 0 };
}
return { Tokens, 0 };
@ -3898,7 +3914,7 @@ namespace gen
# define currtok toks.current()
# define prevtok toks.previous()
# define eat( Type_ ) toks.__eat( Type_, context )
# define left ( array_count(toks.Arr) - toks.Idx )
# define left ( toks.Arr.num() - toks.Idx )
# define check( Type_ ) ( left && currtok.Type == Type_ )
#pragma endregion Helper Macros
@ -3948,9 +3964,11 @@ namespace gen
while ( left && currtok.Type != TokType::BraceSquare_Close )
{
untyped_tok.Length = ( (sptr)currtok.Text + currtok.Length ) - (sptr)untyped_tok.Text;
eat( currtok.Type );
}
untyped_tok.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)untyped_tok.Text;
Code array_expr = untyped_str( untyped_tok );
if ( left == 0 )
@ -5730,7 +5748,7 @@ namespace gen
s32 num_specifiers = 0;
Token name = { nullptr, 0, TokType::Invalid };
Token func_sig = { currtok.Text, 0, TokType::Invalid };
Token brute_sig = { currtok.Text, 0, TokType::Invalid };
while ( left && tok_is_specifier( currtok ) )
{
@ -5779,6 +5797,29 @@ namespace gen
name = parse_identifier( toks, context );
if ( ! name )
return Code::Invalid;
// Problably dealing with a templated symbol
if ( currtok.Type == TokType::Operator && currtok.Text[0] == '<' && currtok.Length == 1 )
{
eat( TokType::Operator );
s32 level = 0;
while ( left && ( currtok.Text[0] != '>' || level > 0 ))
{
if ( currtok.Text[0] == '<' )
level++;
if ( currtok.Text[0] == '>' )
level--;
eat( currtok.Type );
}
eat( TokType::Operator );
// Extend length of name to last token
name.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)name.Text;
}
}
while ( left && tok_is_specifier( currtok ) )
@ -5837,7 +5878,7 @@ namespace gen
eat(TokType::Capture_End);
func_sig.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)func_sig.Text;
brute_sig.Length = ( (sptr)prevtok.Text + prevtok.Length ) - (sptr)brute_sig.Text;
}
using namespace ECode;
@ -5846,10 +5887,10 @@ namespace gen
result = make_code();
result->Type = Typename;
if ( func_sig.Length > 0 )
if ( brute_sig.Length > 0 )
{
// Bruteforce all tokens together.
name = func_sig;
name = brute_sig;
}
else
{
@ -6162,17 +6203,15 @@ namespace gen
sw Length;
};
ZPL_TABLE( static, TokMap, tokmap_, TokEntry )
sw token_fmt_va( char* buf, uw buf_size, char const* fmt, s32 num_tokens, va_list va )
{
char const* buf_begin = buf;
sw remaining = buf_size;
TokMap tok_map;
HashTable<TokEntry> tok_map;
{
// TODO : Switch this to use an arena that makes use of the stack (cap the size of the token table to around 4096 bytes)
tokmap_init( & tok_map, Memory::GlobalAllocator );
tok_map = HashTable<TokEntry>::init( Memory::GlobalAllocator );
s32 left = num_tokens;
@ -6189,7 +6228,7 @@ namespace gen
u32 key = crc32( token, str_len(token, 32) );
tokmap_set( & tok_map, key, entry );
tok_map.set( key, entry );
}
}
@ -6224,7 +6263,7 @@ namespace gen
char const* token = fmt + 1;
u32 key = crc32( token, tok_len );
TokEntry* value = tokmap_get( & tok_map, key );
TokEntry* value = tok_map.get( key );
if ( value )
{
@ -6254,7 +6293,7 @@ namespace gen
}
}
tokmap_clear( & tok_map );
tok_map.clear();
sw result = buf_size - remaining + 1;

View File

@ -378,7 +378,7 @@ namespace gen
#pragma region Data Structures
// Implements basic string interning. Data structure is based off the ZPL Hashtable.
ZPL_TABLE_DECLARE( ZPL_EXTERN, StringTable, str_tbl_, String );
using StringTable = HashTable<String const>;
// Represents strings cached with the string table.
// Should never be modified, if changed string is desired, cache_string( str ) another.
@ -428,7 +428,7 @@ namespace gen
s32 num_entries()
{
return DynamicEntries ? array_count( ArrDyn ) : StaticIndex;
return DynamicEntries ? ArrDyn.num() : StaticIndex;
}
// Parameter
@ -548,7 +548,7 @@ namespace gen
# define Using_AST_POD \
union { \
AST* ArrStatic[AST::ArrS_Cap]; \
Array(AST*) ArrDyn; \
Array< AST* > ArrDyn; \
StringCached Content; \
SpecifierT ArrSpecs[AST::ArrSpecs_Cap]; \
}; \
@ -699,7 +699,7 @@ namespace gen
// This provides a fresh Code AST array for the entries field of the AST.
// This is done separately from the regular CodePool allocator.
Array(AST*) make_code_entries();
Array< AST* > make_code_entries();
// Set these before calling gen's init() procedure.
// Data
@ -1071,7 +1071,7 @@ namespace gen
other->duplicate() : other;
if (DynamicEntries)
array_append( ArrDyn, to_add );
ArrDyn.append( to_add );
else
{
@ -1087,11 +1087,11 @@ namespace gen
s32 index = 0;
do
{
array_append( ArrDyn, ArrStatic[index] );
ArrDyn.append( ArrStatic[index] );
}
while ( StaticIndex--, StaticIndex );
array_append( ArrDyn, to_add );
ArrDyn.append( to_add );
}
}

View File

@ -300,16 +300,16 @@ struct GenArrayRequest
StrC Dependency;
StrC Type;
};
Array(GenArrayRequest) GenArrayRequests;
Array<GenArrayRequest> GenArrayRequests;
void gen__array_request( StrC type, StrC dep = {} )
{
do_once_start
array_init( GenArrayRequests, Memory::GlobalAllocator );
GenArrayRequests = Array<GenArrayRequest>::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenArrayRequests ); ++idx )
for ( sw idx = 0; idx < GenArrayRequests.num(); ++idx )
{
StrC const reqest_type = GenArrayRequests[ idx ].Type;
@ -321,7 +321,7 @@ void gen__array_request( StrC type, StrC dep = {} )
}
GenArrayRequest request = { dep, type };
array_append( GenArrayRequests, request );
GenArrayRequests.append( request );
}
#define gen_array( type ) gen__array_request( { txt_to_StrC(type) } )
@ -338,7 +338,7 @@ u32 gen_array_file()
gen_array_file.print( array_base );
GenArrayRequest* current = GenArrayRequests;
s32 left = array_count( GenArrayRequests );
s32 left = GenArrayRequests.num();
while (left--)
{
GenArrayRequest const& request = * current;

View File

@ -206,16 +206,16 @@ struct GenBufferRequest
StrC Type;
sw TypeSize;
};
Array(GenBufferRequest) GenBufferRequests;
Array<GenBufferRequest> GenBufferRequests;
void gen__buffer_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
array_init( GenBufferRequests, Memory::GlobalAllocator );
GenBufferRequests = Array<GenBufferRequest>::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenBufferRequests ); ++idx )
for ( sw idx = 0; idx < GenBufferRequests.num(); ++idx )
{
StrC const reqest_type = GenBufferRequests[ idx ].Type;
@ -227,7 +227,7 @@ void gen__buffer_request( StrC type, sw size, StrC dep = {} )
}
GenBufferRequest request = { dep, type, size};
array_append( GenBufferRequests, request );
GenBufferRequests.append( request );
}
#define gen_buffer( type ) gen__buffer_request( { txt_to_StrC(type) }, sizeof( type ))
@ -241,7 +241,7 @@ u32 gen_buffer_file()
gen_buffer_file.print( gen__buffer_base() );
GenBufferRequest* current = GenBufferRequests;
s32 left = array_count( GenBufferRequests );
s32 left = GenBufferRequests.num();
while (left--)
{
GenBufferRequest const& request = * current;

View File

@ -21,7 +21,7 @@ Code gen__hashtable_base()
return find_result;
}
Code gen__hashtable( StrC type, sw type_size )
Code gen__hashtable( StrC type )
{
static Code t_allocator_info = def_type( name(AllocatorInfo) );
@ -397,20 +397,19 @@ struct GenHashTableRequest
{
StrC Dependency;
StrC Type;
sw TypeSize;
};
Array(GenHashTableRequest) GenHashTableRequests;
Array<GenHashTableRequest> GenHashTableRequests;
void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
void gen__hashtable_request( StrC type, StrC dep = {} )
{
do_once_start
array_init( GenHashTableRequests, Memory::GlobalAllocator );
GenHashTableRequests = Array<GenHashTableRequest>::init( Memory::GlobalAllocator );
gen_array( sw );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenHashTableRequests ); ++idx )
for ( sw idx = 0; idx < GenHashTableRequests.num(); ++idx )
{
StrC const reqest_type = GenHashTableRequests[ idx ].Type;
@ -421,10 +420,10 @@ void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
return;
}
GenHashTableRequest request = { dep, type, size};
array_append( GenHashTableRequests, request );
GenHashTableRequest request = { dep, type };
GenHashTableRequests.append( request );
}
#define gen_hashtable( type ) gen__hashtable_request( { txt_to_StrC(type) }, sizeof( type ))
#define gen_hashtable( type ) gen__hashtable_request( { txt_to_StrC(type) } )
u32 gen_hashtable_file()
{
@ -439,12 +438,12 @@ u32 gen_hashtable_file()
gen_buffer_file.print( gen__hashtable_base());
GenHashTableRequest* current = GenHashTableRequests;
s32 left = array_count( GenHashTableRequests );
s32 left = GenHashTableRequests.num();
while (left--)
{
GenHashTableRequest const& request = * current;
Code generated_buffer = gen__hashtable( current->Type, current->TypeSize );
Code generated_buffer = gen__hashtable( current->Type );
if ( request.Dependency )
{

View File

@ -162,16 +162,16 @@ struct GenRingRequest
StrC Type;
sw TypeSize;
};
Array(GenRingRequest) GenRingRequests;
Array<GenRingRequest> GenRingRequests;
void gen__ring_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
array_init( GenRingRequests, Memory::GlobalAllocator );
GenRingRequests = Array<GenRingRequest>::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenRingRequests ); ++idx )
for ( sw idx = 0; idx < GenRingRequests.num(); ++idx )
{
StrC const reqest_type = GenRingRequests[ idx ].Type;
@ -186,7 +186,7 @@ void gen__ring_request( StrC type, sw size, StrC dep = {} )
gen__buffer_request( type, size, dep );
GenRingRequest request = { dep, type, size};
array_append( GenRingRequests, request );
GenRingRequests.append( request );
}
#define gen_ring( type ) gen__ring_request( { txt_to_StrC(type) }, sizeof( type ))
@ -201,7 +201,7 @@ u32 gen_ring_file()
// gen_ring_file.print( gen__ring_base() );
GenRingRequest* current = GenRingRequests;
s32 left = array_count( GenRingRequests );
s32 left = GenRingRequests.num();
while (left--)
{
GenRingRequest const& request = * current;

View File

@ -224,16 +224,16 @@ struct GenArrayRequest
StrC Dependency;
StrC Type;
};
Array(GenArrayRequest) GenArrayRequests;
Array<GenArrayRequest> GenArrayRequests;
void gen__array_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
array_init( GenArrayRequests, Memory::GlobalAllocator );
GenArrayRequests = Array<GenArrayRequest>::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenArrayRequests ); ++idx )
for ( sw idx = 0; idx < GenArrayRequests.num(); ++idx )
{
StrC const reqest_type = GenArrayRequests[ idx ].Type;
@ -245,7 +245,7 @@ void gen__array_request( StrC type, sw size, StrC dep = {} )
}
GenArrayRequest request = { dep, type };
array_append( GenArrayRequests, request );
GenArrayRequests.append( request );
}
#define gen_array( type ) gen__array_request( { txt_to_StrC(type) }, sizeof(type) )
@ -262,7 +262,7 @@ u32 gen_array_file()
gen_array_file.print( array_base );
GenArrayRequest* current = GenArrayRequests;
s32 left = array_count( GenArrayRequests );
s32 left = GenArrayRequests.num();
while (left--)
{
GenArrayRequest const& request = * current;

View File

@ -137,16 +137,16 @@ struct GenBufferRequest
StrC Dependency;
StrC Type;
};
Array(GenBufferRequest) GenBufferRequests;
Array<GenBufferRequest> GenBufferRequests;
void gen__buffer_request( StrC type, StrC dep = {} )
{
do_once_start
array_init( GenBufferRequests, Memory::GlobalAllocator );
GenBufferRequests = Array<GenBufferRequest>::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenBufferRequests ); ++idx )
for ( sw idx = 0; idx < GenBufferRequests.num(); ++idx )
{
StrC const reqest_type = GenBufferRequests[ idx ].Type;
@ -158,7 +158,7 @@ void gen__buffer_request( StrC type, StrC dep = {} )
}
GenBufferRequest request = { dep, type };
array_append( GenBufferRequests, request );
GenBufferRequests.append( request );
}
#define gen_buffer( type ) gen__buffer_request( { txt_to_StrC(type) } )
@ -172,7 +172,7 @@ u32 gen_buffer_file()
gen_buffer_file.print( gen__buffer_base() );
GenBufferRequest* current = GenBufferRequests;
s32 left = array_count( GenBufferRequests );
s32 left = GenBufferRequests.num();
while (left--)
{
GenBufferRequest const& request = * current;

View File

@ -290,18 +290,18 @@ struct GenHashTableRequest
StrC Type;
sw TypeSize;
};
Array(GenHashTableRequest) GenHashTableRequests;
Array<GenHashTableRequest> GenHashTableRequests;
void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
array_init( GenHashTableRequests, Memory::GlobalAllocator );
GenHashTableRequests = Array<GenHashTableRequest>::init( Memory::GlobalAllocator );
gen_array( sw );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenHashTableRequests ); ++idx )
for ( sw idx = 0; idx < GenHashTableRequests.num(); ++idx )
{
StrC const reqest_type = GenHashTableRequests[ idx ].Type;
@ -313,7 +313,7 @@ void gen__hashtable_request( StrC type, sw size, StrC dep = {} )
}
GenHashTableRequest request = { dep, type, size};
array_append( GenHashTableRequests, request );
GenHashTableRequests.append( request );
}
#define gen_hashtable( type ) gen__hashtable_request( { txt_to_StrC(type) }, sizeof( type ))
@ -329,7 +329,7 @@ u32 gen_hashtable_file()
gen_buffer_file.print( gen__hashtable_base());
GenHashTableRequest* current = GenHashTableRequests;
s32 left = array_count( GenHashTableRequests );
s32 left = GenHashTableRequests.num();
while (left--)
{
GenHashTableRequest const& request = * current;

View File

@ -102,16 +102,16 @@ struct GenRingRequest
StrC Dependency;
StrC Type;
};
Array(GenRingRequest) GenRingRequests;
Array<GenRingRequest> GenRingRequests;
void gen__ring_request( StrC type, sw size, StrC dep = {} )
{
do_once_start
array_init( GenRingRequests, Memory::GlobalAllocator );
GenRingRequests = Array<GenRingRequest>::init( Memory::GlobalAllocator );
do_once_end
// Make sure we don't already have a request for the type.
for ( sw idx = 0; idx < array_count( GenRingRequests ); ++idx )
for ( sw idx = 0; idx < GenRingRequests.num(); ++idx )
{
StrC const reqest_type = GenRingRequests[ idx ].Type;
@ -126,7 +126,7 @@ void gen__ring_request( StrC type, sw size, StrC dep = {} )
gen__buffer_request( type, dep );
GenRingRequest request = { dep, type };
array_append( GenRingRequests, request );
GenRingRequests.append( request );
}
#define gen_ring( type ) gen__ring_request( { txt_to_StrC(type) }, sizeof( type ))
@ -141,7 +141,7 @@ u32 gen_ring_file()
// gen_ring_file.print( gen__ring_base() );
GenRingRequest* current = GenRingRequests;
s32 left = array_count( GenRingRequests );
s32 left = GenRingRequests.num();
while (left--)
{
GenRingRequest const& request = * current;

113
test/SOA.hpp Normal file
View File

@ -0,0 +1,113 @@
#pragma once
#ifdef gen_time
#include "gen.hpp"
using namespace gen;
Code gen_SOA( Code struct_def, bool use_dynamic = false )
{
StrC name;
name.Ptr = str_fmt_buf( "SOA_%s", (char const*) struct_def->Name );
name.Len = str_len( name );
Code
soa_entry = { struct_def->duplicate() };
soa_entry->Name = get_cached_string( name(Entry) );
Array<Code> vars = Array<Code>::init( Memory::GlobalAllocator );;
Code soa = def_struct( name, def_struct_body( 1, soa_entry ) );
{
Code body = struct_def.body();
for ( s32 idx = 0; idx < body->num_entries(); idx++ )
{
Code struct_mem = { body->entry( idx ) };
if ( struct_mem->Type == ECode::Variable )
{
Code var_type = { struct_mem->entry(0) };
Code entry_arr = { nullptr };
if ( use_dynamic)
{
entry_arr = parse_variable( token_fmt( "Array<<type>> <name>;", 2
, "type", (char const*)var_type->Name
, "name", (char const*)struct_mem->Name )
);
}
else
{
entry_arr = parse_variable( token_fmt( "<type> <name>[100];", 2
, "type", (char const*)var_type->Name
, "name", (char const*)struct_mem->Name )
);
}
vars.append( entry_arr );
soa.body()->add_entry( entry_arr );
}
}
}
Code make;
{
make = parse_function( token_fmt(
txt(
static
<SOA_Type> make( AllocatorInfo allocator )
{
<SOA_Type> soa = {};
}
),
1, "SOA_Type", (char const*)name
));
if ( use_dynamic )
{
for ( s32 idx = 0; idx < vars.num(); idx++ )
{
Code member = vars[idx];
Code arr_init = def_execution( token_fmt( "soa.<var_name> = <var_type>::init( allocator );", 2
, "var_name", (char const*)member->Name
, "var_type", (char const*)member->entry(0)->Name
));
make.body()->add_entry( arr_init );
}
}
make.body()->add_entry( def_execution( code( return soa; ) ));
}
Code get;
{
get = parse_function( code(
Entry get( s32 idx )
{
}
));
String content = String::make( Memory::GlobalAllocator, "return\n{\n" );
for ( s32 idx = 0; idx < vars.num(); idx ++ )
{
Code member = vars[idx];
content.append_fmt( token_fmt( "<var_name>[idx],", 1
, "var_name", (char const*)member->Name
));
}
content.append( "};" );
Code ret = def_execution( content );
get.body()->add_entry( ret );
}
soa.body()->add_entry( make );
soa.body()->add_entry( get );
return soa;
}
#endif

View File

@ -4,6 +4,7 @@
#include "Parsed\HashTable.Parsed.hpp"
#include "Parsed\Ring.Parsed.hpp"
#include "Parsed\Sanity.Parsed.hpp"
#include "SOA.hpp"
#ifdef gen_time
@ -34,6 +35,31 @@ int gen_main()
gen_hashtable_file();
gen_ring_file();
Builder soa_test; soa_test.open( "SOA.gen.hpp" );
soa_test.print( parse_using( code(
using u16 = unsigned short;
)));
soa_test.print( def_include( StrC::from("Bloat.hpp")));
soa_test.print( def_using_namespace( name(gen) ) );
soa_test.print( gen_SOA(
parse_struct( code(
struct TestStruct
{
u8 A;
u16 B;
u32 C;
u64 D;
};
)),
true
));
soa_test.write();
gen::deinit();
Memory::cleanup();
return 0;