Compare commits

...

7 Commits

Author SHA1 Message Date
Ed_
31a3609b28 some fixes to c's fixed_arena gen 2024-11-30 23:48:14 -05:00
Ed_
fbdb870986 Finished first pass reviewing memory.hpp for C lib generation 2024-11-30 23:38:27 -05:00
Ed_
6d04165b96 Reduce cpp freatures usage of Array container.
Almost ready to be inter-operable with C
2024-11-30 18:54:19 -05:00
Ed_
cc245cc263 new files 2024-11-30 17:22:06 -05:00
Ed_
06deb1e836 memory.hpp no longer uses memory mappings by default 2024-11-30 17:18:49 -05:00
Ed_
5527a27f7b prepare c_library meta-program a bit 2024-11-30 16:54:03 -05:00
Ed_
a67fdef20a dir restructuring
just making it more organized (gen_ prefix for library generation meta-programs)
2024-11-30 16:50:53 -05:00
32 changed files with 844 additions and 256 deletions

1
.gitignore vendored
View File

@ -31,3 +31,4 @@ project/auxillary/vis_ast/dependencies/temp
test/gen/original
singleheader/gen/scratch.hpp
test/gen/scratch.cpp
gen_c_library/gen

View File

@ -38,7 +38,8 @@
"android_native_app_glue.h": "c",
"raylib.h": "c",
"*.m": "cpp",
"atomic": "cpp"
"atomic": "cpp",
"gen.h": "c"
},
"C_Cpp.intelliSenseEngineFallback": "disabled",
"mesonbuild.configureOnOpen": true,

200
gen_c_library/c_library.cpp Normal file
View File

@ -0,0 +1,200 @@
#define GEN_DEFINE_LIBRARY_CODE_CONSTANTS
#define GEN_ENFORCE_STRONG_CODE_TYPES
#define GEN_EXPOSE_BACKEND
#include "../project/gen.cpp"
#include "helpers/push_ignores.inline.hpp"
#include "helpers/helper.hpp"
GEN_NS_BEGIN
#include "dependencies/parsing.cpp"
GEN_NS_END
#include "auxillary/builder.hpp"
#include "auxillary/builder.cpp"
#include "auxillary/scanner.hpp"
#include <cstdlib> // for system()
#include "components/memory.fixed_arena.hpp"
#include "components/misc.hpp"
using namespace gen;
constexpr char const* generation_notice =
"// This file was generated automatially by gencpp's c_library.cpp"
"(See: https://github.com/Ed94/gencpp)\n\n";
constexpr StrC roll_own_dependencies_guard_start = txt(R"(
//! If its desired to roll your own dependencies, define GEN_ROLL_OWN_DEPENDENCIES before including this file.
// Dependencies are derived from the c-zpl library: https://github.com/zpl-c/zpl
#ifndef GEN_ROLL_OWN_DEPENDENCIES
)");
constexpr StrC roll_own_dependencies_guard_end = txt(R"(
// GEN_ROLL_OWN_DEPENDENCIES
#endif
)");
constexpr StrC implementation_guard_start = txt(R"(
#pragma region GENCPP IMPLEMENTATION GUARD
#if defined(GEN_IMPLEMENTATION) && ! defined(GEN_IMPLEMENTED)
# define GEN_IMPLEMENTED
)");
constexpr StrC implementation_guard_end = txt(R"(
#endif
#pragma endregion GENCPP IMPLEMENTATION GUARD
)");
void format_file( char const* path )
{
String resolved_path = String::make(GlobalAllocator, to_str(path));
String style_arg = String::make(GlobalAllocator, txt("-style=file:"));
style_arg.append("../scripts/.clang-format ");
// Need to execute clang format on the generated file to get it to match the original.
#define clang_format "clang-format "
#define cf_format_inplace "-i "
#define cf_verbose "-verbose "
String command = String::make( GlobalAllocator, clang_format );
command.append( cf_format_inplace );
command.append( cf_verbose );
command.append( style_arg );
command.append( resolved_path );
log_fmt("\tRunning clang-format on file:\n");
system( command );
log_fmt("\tclang-format finished reformatting.\n");
#undef cf_cmd
#undef cf_format_inplace
#undef cf_style
#undef cf_verbse
}
Code dump_to_scratch_and_retireve( Code code )
{
Builder ecode_file_temp = Builder::open("gen/scratch.hpp");
ecode_file_temp.print(code);
ecode_file_temp.write();
format_file("gen/scratch.hpp");
Code result = scan_file( "gen/scratch.hpp" );
remove("gen/scratch.hpp");
return result;
}
CodeBody parse_file( const char* path )
{
FileContents file = file_read_contents( GlobalAllocator, true, path );
CodeBody code = parse_global_body( { file.size, (char const*)file.data } );
return code;
}
int gen_main()
{
#define project_dir "../project/"
gen::init();
Code push_ignores = scan_file( project_dir "helpers/push_ignores.inline.hpp" );
Code pop_ignores = scan_file( project_dir "helpers/pop_ignores.inline.hpp" );
Code c_library_header_start = scan_file( "components/header_start.hpp" );
Builder
header = Builder::open( "gen/gen.h" );
header.print_fmt( generation_notice );
header.print_fmt("#pragma once\n\n");
header.print( push_ignores );
// Headers
{
header.print( c_library_header_start );
Code platform = scan_file( project_dir "dependencies/platform.hpp" );
Code macros = scan_file( project_dir "dependencies/macros.hpp" );
Code basic_types = scan_file( project_dir "dependencies/basic_types.hpp" );
Code debug = scan_file( project_dir "dependencies/debug.hpp" );
CodeBody parsed_memory = parse_file( project_dir "dependencies/memory.hpp" );
CodeBody memory = def_body(ECode::Struct_Body);
for ( Code entry = parsed_memory.begin(); entry != parsed_memory.end(); ++ entry )
{
switch (entry->Type)
{
case ECode::Using:
{
log_fmt("REPLACE THIS MANUALLY: %S\n", entry->Name);
CodeUsing using_ver = entry.cast<CodeUsing>();
CodeTypedef typedef_ver = def_typedef(using_ver->Name, using_ver->UnderlyingType);
memory.append(typedef_ver);
}
break;
case ECode::Function:
{
CodeFn fn = entry.cast<CodeFn>();
s32 constexpr_found = fn->Specs.remove( ESpecifier::Constexpr );
if (constexpr_found > -1) {
log_fmt("Found constexpr proc\n");
fn->Specs.append(ESpecifier::Inline);
}
memory.append(entry);
}
break;
case ECode::Class:
case ECode::Struct:
{
CodeBody body = entry->Body->operator CodeBody();
CodeBody new_body = def_body( entry->Body->Type );
for ( Code body_entry = body.begin(); body_entry != body.end(); ++ body_entry ) switch
(body_entry->Type) {
case ECode::Preprocess_If:
{
ignore_preprocess_cond_block(txt("GEN_SUPPORT_CPP_MEMBER_FEATURES"), body_entry, body );
}
break;
default:
new_body.append(body_entry);
break;
}
entry->Body = rcast(AST*, new_body.ast);
memory.append(entry);
}
break;
case ECode::Preprocess_If:
{
ignore_preprocess_cond_block(txt("GEN_SUPPORT_CPP_MEMBER_FEATURES"), entry, memory );
}
break;
case ECode::Preprocess_Pragma:
{
swap_pragma_region_implementation( txt("FixedArena"), gen_fixed_arenas, entry, memory);
}
break;
default: {
memory.append(entry);
}
break;
}
}
header.print_fmt( roll_own_dependencies_guard_start );
header.print( platform );
header.print_fmt( "\nGEN_NS_BEGIN\n" );
header.print( macros );
header.print( basic_types );
header.print( debug );
header.print( memory );
}
header.print( pop_ignores );
header.write();
format_file( "gen/gen.h" );
gen::deinit();
return 0;
#undef project_dir
}

View File

@ -0,0 +1,14 @@
/*
gencpp: An attempt at "simple" staged metaprogramming for c/c++.
See Readme.md for more information from the project repository.
Public Address:
https://github.com/Ed94/gencpp
This is a single header C-Library variant.
Define GEN_IMPLEMENTATION before including this file in a single compilation unit.
*/
#if ! defined(GEN_DONT_ENFORCE_GEN_TIME_GUARD) && ! defined(GEN_TIME)
# error Gen.hpp : GEN_TIME not defined
#endif

View File

@ -0,0 +1,119 @@
#pragma once
#include "../project/gen.hpp"
using namespace gen;
CodeBody gen_fixed_arenas()
{
CodeBody result = def_body(ECode::Global_Body);
char const* template_struct = stringize(
struct FixedArena_<Name>
{
char memory[<Size>];
Arena arena;
};
);
char const* template_interface = stringize(
inline
void fixed_arena_init_<Name>(FixedArena_<Name>* result) {
zero_size(& result->memory[0], <Size>);
result.arena = arena_init_from_memory(& result->memory[0], <Size>);
}
inline
ssize fixed_arena_size_remaining_<Name>(FixedArena_<Name>* fixed_arena, ssize alignment) {
return size_remaining(fixed_arena->arena, alignment);
}
);
CodeStruct arena_struct_1kb = parse_struct( token_fmt_impl( 3, "Name", txt("1KB"), "Size", txt("kilobytes(1)"), template_struct ));
CodeStruct arena_struct_4kb = parse_struct( token_fmt_impl( 3, "Name", txt("4KB"), "Size", txt("kilobytes(4)"), template_struct ));
CodeStruct arena_struct_8kb = parse_struct( token_fmt_impl( 3, "Name", txt("8KB"), "Size", txt("kilobytes(8)"), template_struct ));
CodeStruct arena_struct_16kb = parse_struct( token_fmt_impl( 3, "Name", txt("16KB"), "Size", txt("kilobytes(16)"), template_struct ));
CodeStruct arena_struct_32kb = parse_struct( token_fmt_impl( 3, "Name", txt("32KB"), "Size", txt("kilobytes(32)"), template_struct ));
CodeStruct arena_struct_64kb = parse_struct( token_fmt_impl( 3, "Name", txt("64KB"), "Size", txt("kilobytes(64)"), template_struct ));
CodeStruct arena_struct_128kb = parse_struct( token_fmt_impl( 3, "Name", txt("128KB"), "Size", txt("kilobytes(128)"), template_struct ));
CodeStruct arena_struct_256kb = parse_struct( token_fmt_impl( 3, "Name", txt("256KB"), "Size", txt("kilobytes(256)"), template_struct ));
CodeStruct arena_struct_512kb = parse_struct( token_fmt_impl( 3, "Name", txt("512KB"), "Size", txt("kilobytes(512)"), template_struct ));
CodeStruct arena_struct_1mb = parse_struct( token_fmt_impl( 3, "Name", txt("1MB"), "Size", txt("megabytes(1)"), template_struct ));
CodeStruct arena_struct_2mb = parse_struct( token_fmt_impl( 3, "Name", txt("2MB"), "Size", txt("megabytes(2)"), template_struct ));
CodeStruct arena_struct_4mb = parse_struct( token_fmt_impl( 3, "Name", txt("4MB"), "Size", txt("megabytes(4)"), template_struct ));
CodeBody arena_interface_1kb = parse_global_body( token_fmt_impl( 3, "Name", txt("1KB"), "Size", txt("kilobytes(1)"), template_interface ));
CodeBody arena_interface_4kb = parse_global_body( token_fmt_impl( 3, "Name", txt("4KB"), "Size", txt("kilobytes(4)"), template_interface ));
CodeBody arena_interface_8kb = parse_global_body( token_fmt_impl( 3, "Name", txt("8KB"), "Size", txt("kilobytes(8)"), template_interface ));
CodeBody arena_interface_16kb = parse_global_body( token_fmt_impl( 3, "Name", txt("16KB"), "Size", txt("kilobytes(16)"), template_interface ));
CodeBody arena_interface_32kb = parse_global_body( token_fmt_impl( 3, "Name", txt("32KB"), "Size", txt("kilobytes(32)"), template_interface ));
CodeBody arena_interface_64kb = parse_global_body( token_fmt_impl( 3, "Name", txt("64KB"), "Size", txt("kilobytes(64)"), template_interface ));
CodeBody arena_interface_128kb = parse_global_body( token_fmt_impl( 3, "Name", txt("128KB"), "Size", txt("kilobytes(128)"), template_interface ));
CodeBody arena_interface_256kb = parse_global_body( token_fmt_impl( 3, "Name", txt("256KB"), "Size", txt("kilobytes(256)"), template_interface ));
CodeBody arena_interface_512kb = parse_global_body( token_fmt_impl( 3, "Name", txt("512KB"), "Size", txt("kilobytes(512)"), template_interface ));
CodeBody arena_interface_1mb = parse_global_body( token_fmt_impl( 3, "Name", txt("1MB"), "Size", txt("megabytes(1)"), template_interface ));
CodeBody arena_interface_2mb = parse_global_body( token_fmt_impl( 3, "Name", txt("2MB"), "Size", txt("megabytes(2)"), template_interface ));
CodeBody arena_interface_4mb = parse_global_body( token_fmt_impl( 3, "Name", txt("4MB"), "Size", txt("megabytes(4)"), template_interface ));
result.append(arena_struct_1kb);
result.append(arena_struct_4kb);
result.append(arena_struct_8kb);
result.append(arena_struct_16kb);
result.append(arena_struct_32kb);
result.append(arena_struct_128kb);
result.append(arena_struct_256kb);
result.append(arena_struct_512kb);
result.append(arena_struct_1mb);
result.append(arena_struct_2mb);
result.append(arena_struct_4mb);
result.append(arena_interface_1kb);
result.append(arena_interface_4kb);
result.append(arena_interface_8kb);
result.append(arena_interface_16kb);
result.append(arena_interface_32kb);
result.append(arena_interface_128kb);
result.append(arena_interface_256kb);
result.append(arena_interface_512kb);
result.append(arena_interface_1mb);
result.append(arena_interface_2mb);
result.append(arena_interface_4mb);
CodeDefine def = def_define(txt("fixed_arena_allocator_info(fixed_arena)"), code({ arena_allocator_proc, & fixed_arena.arena }) );
result.append(def);
result.append(parse_global_body(txt(R"(
#define fixed_arena_init(expr) _Generic((expr), \
FixedArena_1KB* : fixed_arena_init_1KB, \
FixedArena_4KB* : fixed_arena_init_4KB, \
FixedArena_8KB* : fixed_arena_init_8KB, \
FixedArena_16KB* : fixed_arena_init_16KB, \
FixedArena_32KB* : fixed_arena_init_32KB, \
FixedArena_64KB* : fixed_arena_init_64KB, \
FixedArena_128KB* : fixed_arena_init_128KB, \
FixedArena_256KB* : fixed_arena_init_256KB, \
FixedArena_512KB* : fixed_arena_init_512KB, \
FixedArena_1MB* : fixed_arena_init_1MB, \
FixedArena_2MB* : fixed_arena_init_2MB, \
FixedArena_4MB* : fixed_arena_init_4MB \
)(expr)
#define fixed_arena_size_remaining(expr, alignment) _Generic((expr), \
FixedArena_1KB* : fixed_arena_size_remaining_1KB, \
FixedArena_4KB* : fixed_arena_size_remaining_4KB, \
FixedArena_8KB* : fixed_arena_size_remaining_8KB, \
FixedArena_16KB* : fixed_arena_size_remaining_16KB, \
FixedArena_32KB* : fixed_arena_size_remaining_32KB, \
FixedArena_64KB* : fixed_arena_size_remaining_64KB, \
FixedArena_128KB* : fixed_arena_size_remaining_128KB, \
FixedArena_256KB* : fixed_arena_size_remaining_256KB, \
FixedArena_512KB* : fixed_arena_size_remaining_512KB, \
FixedArena_1MB* : fixed_arena_size_remaining_1MB, \
FixedArena_2MB* : fixed_arena_size_remaining_2MB, \
FixedArena_4MB* : fixed_arena_size_remaining_4MB \
)(expr, alignment)
)"
)));
return result;
}

View File

@ -0,0 +1,61 @@
// #pragma once
// #include "../project/gen.hpp"
// using namespace gen;
using SwapContentProc = CodeBody(void);
b32 ignore_preprocess_cond_block( StrC cond_sig, Code& entry_iter, CodeBody& body )
{
CodePreprocessCond cond = entry_iter.cast<CodePreprocessCond>();
if ( cond->Content.contains(cond_sig) )
{
s32 depth = 1;
++ entry_iter; for(b32 continue_for = true; continue_for && entry_iter != body.end(); ++ entry_iter) switch
(entry_iter->Type) {
case ECode::Preprocess_If:
case ECode::Preprocess_IfDef:
case ECode::Preprocess_IfNotDef:
depth ++;
break;
case ECode::Preprocess_EndIf:
{
depth --;
if (depth == 0) {
continue_for = false;
}
}
break;
}
}
return entry_iter != body.end();
}
void swap_pragma_region_implementation( StrC region_name, SwapContentProc* swap_content, Code& entry_iter, CodeBody& body )
{
CodePragma possible_region = entry_iter.cast<CodePragma>();
String region_sig = string_fmt_buf(GlobalAllocator, "region %s", region_name.Ptr);
String endregion_sig = string_fmt_buf(GlobalAllocator, "endregion %s", region_name.Ptr);
if ( possible_region->Content.contains(region_sig))
{
body.append(possible_region);
body.append(swap_content());
++ entry_iter; for(b32 continue_for = true; continue_for; ++entry_iter) switch
(entry_iter->Type) {
case ECode::Preprocess_Pragma:
{
CodePragma possible_end_region = entry_iter.cast<CodePragma>();
if ( possible_end_region->Content.contains(endregion_sig) ) {
body.append(possible_end_region);
continue_for = false;
}
}
break;
}
}
body.append(entry_iter);
}

View File

@ -1,4 +1,4 @@
# Singleheader
Creates a single header file version of the library using `gen.singleheader.cpp`.
Creates a single header file version of the library using `singleheader.cpp`.
Follows the same convention seen in the gb, stb, and zpl libraries.

View File

@ -12,12 +12,3 @@
#if ! defined(GEN_DONT_ENFORCE_GEN_TIME_GUARD) && ! defined(GEN_TIME)
# error Gen.hpp : GEN_TIME not defined
#endif
#ifdef GEN_DONT_USE_NAMESPACE
# define GEN_NS_BEGIN
# define GEN_NS_END
#else
# define GEN_NS_BEGIN namespace gen {
# define GEN_NS_END }
#endif

View File

@ -165,6 +165,46 @@ struct CodeSpecifiers
return -1;
}
s32 remove( SpecifierT to_remove )
{
if ( ast == nullptr )
{
log_failure("CodeSpecifiers: Attempted to append to a null specifiers AST!");
return -1;
}
if ( raw()->NumEntries == AST::ArrSpecs_Cap )
{
log_failure("CodeSpecifiers: Attempted to append over %d specifiers to a specifiers AST!", AST::ArrSpecs_Cap );
return -1;
}
s32 result = -1;
s32 curr = 0;
s32 next = 0;
for(; next < raw()->NumEntries; ++ curr, ++ next)
{
SpecifierT spec = raw()->ArrSpecs[next];
if (spec == to_remove)
{
result = next;
next ++;
if (next >= raw()->NumEntries)
break;
spec = raw()->ArrSpecs[next];
}
raw()->ArrSpecs[ curr ] = spec;
}
if (result > -1) {
raw()->NumEntries --;
}
return result;
}
void to_string( String& result );
AST* raw()
{

View File

@ -133,6 +133,7 @@ extern CodeType t_typename;
#pragma region Macros
#ifndef token_fmt
# define gen_main main
# define __ NoCode
@ -151,6 +152,7 @@ extern CodeType t_typename;
// Takes a format string (char const*) and a list of tokens (StrC) and returns a StrC of the formatted string.
# define token_fmt( ... ) GEN_NS token_fmt_impl( (num_args( __VA_ARGS__ ) + 1) / 2, __VA_ARGS__ )
#endif
#pragma endregion Macros

View File

@ -11,7 +11,7 @@ internal void deinit();
internal
void* Global_Allocator_Proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags )
{
Arena* last = & Global_AllocatorBuckets.back();
Arena* last = & back(Global_AllocatorBuckets);
switch ( type )
{
@ -19,18 +19,18 @@ void* Global_Allocator_Proc( void* allocator_data, AllocType type, ssize size, s
{
if ( ( last->TotalUsed + size ) > last->TotalSize )
{
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
Arena bucket = arena_init_from_allocator( heap(), Global_BucketSize );
if ( bucket.PhysicalStart == nullptr )
GEN_FATAL( "Failed to create bucket for Global_AllocatorBuckets");
if ( ! Global_AllocatorBuckets.append( bucket ) )
if ( ! append( Global_AllocatorBuckets, bucket ) )
GEN_FATAL( "Failed to append bucket to Global_AllocatorBuckets");
last = & Global_AllocatorBuckets.back();
last = & back(Global_AllocatorBuckets);
}
return alloc_align( * last, size, alignment );
return alloc_align( allocator_info(* last), size, alignment );
}
case EAllocation_FREE:
{
@ -46,15 +46,15 @@ void* Global_Allocator_Proc( void* allocator_data, AllocType type, ssize size, s
{
if ( last->TotalUsed + size > last->TotalSize )
{
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
Arena bucket = arena_init_from_allocator( heap(), Global_BucketSize );
if ( bucket.PhysicalStart == nullptr )
GEN_FATAL( "Failed to create bucket for Global_AllocatorBuckets");
if ( ! Global_AllocatorBuckets.append( bucket ) )
if ( ! append( Global_AllocatorBuckets, bucket ) )
GEN_FATAL( "Failed to append bucket to Global_AllocatorBuckets");
last = & Global_AllocatorBuckets.back();
last = & back(Global_AllocatorBuckets);
}
void* result = alloc_align( last->Backing, size, alignment );
@ -235,28 +235,27 @@ void init()
{
GlobalAllocator = AllocatorInfo { & Global_Allocator_Proc, nullptr };
Global_AllocatorBuckets = Array<Arena>::init_reserve( heap(), 128 );
Global_AllocatorBuckets = array_init_reserve<Arena>( heap(), 128 );
if ( Global_AllocatorBuckets == nullptr )
GEN_FATAL( "Failed to reserve memory for Global_AllocatorBuckets");
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
Arena bucket = arena_init_from_allocator( heap(), Global_BucketSize );
if ( bucket.PhysicalStart == nullptr )
GEN_FATAL( "Failed to create first bucket for Global_AllocatorBuckets");
Global_AllocatorBuckets.append( bucket );
append( Global_AllocatorBuckets, bucket );
}
// Setup the arrays
{
CodePools = Array<Pool>::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
CodePools = array_init_reserve<Pool>( Allocator_DataArrays, InitSize_DataArrays );
if ( CodePools == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the CodePools array" );
StringArenas = Array<Arena>::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
StringArenas = array_init_reserve<Arena>( Allocator_DataArrays, InitSize_DataArrays );
if ( StringArenas == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the StringArenas array" );
@ -264,21 +263,21 @@ void init()
// Setup the code pool and code entries arena.
{
Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
Pool code_pool = pool_init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
if ( code_pool.PhysicalStart == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the code pool" );
CodePools.append( code_pool );
append(CodePools, code_pool );
LexArena = Arena::init_from_allocator( Allocator_Lexer, LexAllocator_Size );
LexArena = arena_init_from_allocator( Allocator_Lexer, LexAllocator_Size );
Arena string_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
Arena string_arena = arena_init_from_allocator( Allocator_StringArena, SizePer_StringArena );
if ( string_arena.PhysicalStart == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the string arena" );
StringArenas.append( string_arena );
append(StringArenas, string_arena );
}
// Setup the hash tables
@ -290,7 +289,7 @@ void init()
}
// Preprocessor Defines
PreprocessorDefines = Array<StringCached>::init_reserve( GlobalAllocator, kilobytes(1) );
PreprocessorDefines = array_init_reserve<StringCached>( GlobalAllocator, kilobytes(1) );
define_constants();
parser::init();
@ -299,62 +298,62 @@ void init()
void deinit()
{
usize index = 0;
usize left = CodePools.num();
usize left = num(CodePools);
do
{
Pool* code_pool = & CodePools[index];
code_pool->free();
free(* code_pool);
index++;
}
while ( left--, left );
index = 0;
left = StringArenas.num();
left = num(StringArenas);
do
{
Arena* string_arena = & StringArenas[index];
string_arena->free();
free(* string_arena);
index++;
}
while ( left--, left );
StringCache.destroy();
CodePools.free();
StringArenas.free();
free(CodePools);
free(StringArenas);
LexArena.free();
free(LexArena);
PreprocessorDefines.free();
free(PreprocessorDefines);
index = 0;
left = Global_AllocatorBuckets.num();
left = num(Global_AllocatorBuckets);
do
{
Arena* bucket = & Global_AllocatorBuckets[ index ];
bucket->free();
free(* bucket);
index++;
}
while ( left--, left );
Global_AllocatorBuckets.free();
free(Global_AllocatorBuckets);
parser::deinit();
}
void reset()
{
s32 index = 0;
s32 left = CodePools.num();
s32 left = num(CodePools);
do
{
Pool* code_pool = & CodePools[index];
code_pool->clear();
clear(* code_pool);
index++;
}
while ( left--, left );
index = 0;
left = StringArenas.num();
left = num(StringArenas);
do
{
Arena* string_arena = & StringArenas[index];
@ -363,28 +362,28 @@ void reset()
}
while ( left--, left );
StringCache.clear();
clear(StringCache);
define_constants();
}
AllocatorInfo get_string_allocator( s32 str_length )
{
Arena* last = & StringArenas.back();
Arena* last = & back(StringArenas);
usize size_req = str_length + sizeof(StringHeader) + sizeof(char*);
if ( last->TotalUsed + ssize(size_req) > last->TotalSize )
{
Arena new_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
Arena new_arena = arena_init_from_allocator( Allocator_StringArena, SizePer_StringArena );
if ( ! StringArenas.append( new_arena ) )
if ( ! append(StringArenas, new_arena ) )
GEN_FATAL( "gen::get_string_allocator: Failed to allocate a new string arena" );
last = & StringArenas.back();
last = & back(StringArenas);
}
return * last;
return allocator_info(* last);
}
// Will either make or retrive a code string.
@ -408,21 +407,21 @@ StringCached get_cached_string( StrC str )
// Used internally to retireve a Code object form the CodePool.
Code make_code()
{
Pool* allocator = & CodePools.back();
Pool* allocator = & back(CodePools);
if ( allocator->FreeList == nullptr )
{
Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
Pool code_pool = pool_init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
if ( code_pool.PhysicalStart == nullptr )
GEN_FATAL( "gen::make_code: Failed to allocate a new code pool - CodePool allcoator returned nullptr." );
if ( ! CodePools.append( code_pool ) )
if ( ! append( CodePools, code_pool ) )
GEN_FATAL( "gen::make_code: Failed to allocate a new code pool - CodePools failed to append new pool." );
allocator = & CodePools.back();
allocator = & back(CodePools);
}
Code result { rcast( AST*, alloc( * allocator, sizeof(AST) )) };
Code result { rcast( AST*, alloc( allocator_info(* allocator), sizeof(AST) )) };
mem_set( result.ast, 0, sizeof(AST) );
// result->Type = ECode::Invalid;

View File

@ -16,8 +16,8 @@ ssize token_fmt_va( char* buf, usize buf_size, s32 num_tokens, va_list va )
local_persist
char tok_map_mem[ TokenFmt_TokenMap_MemSize ];
tok_map_arena = init_from_memory( tok_map_mem, sizeof(tok_map_mem) );
tok_map = HashTable<StrC>::init( tok_map_arena );
tok_map_arena = arena_init_from_memory( tok_map_mem, sizeof(tok_map_mem) );
tok_map = HashTable<StrC>::init( allocator_info(tok_map_arena) );
s32 left = num_tokens - 1;
@ -95,7 +95,7 @@ ssize token_fmt_va( char* buf, usize buf_size, s32 num_tokens, va_list va )
}
tok_map.clear();
tok_map_arena.free();
free(tok_map_arena);
ssize result = buf_size - remaining;

View File

@ -222,7 +222,7 @@ s32 lex_preprocessor_directive(
, Token& token )
{
char const* hash = scanner;
Tokens.append( { hash, 1, TokType::Preprocess_Hash, line, column, TF_Preprocess } );
append(Tokens, { hash, 1, TokType::Preprocess_Hash, line, column, TF_Preprocess } );
move_forward();
SkipWhitespace();
@ -298,14 +298,14 @@ s32 lex_preprocessor_directive(
token.Length = token.Length + token.Text - hash;
token.Text = hash;
Tokens.append( token );
append(Tokens, token );
return Lex_Continue; // Skip found token, its all handled here.
}
if ( token.Type == TokType::Preprocess_Else || token.Type == TokType::Preprocess_EndIf )
{
token.Flags |= TF_Preprocess_Cond;
Tokens.append( token );
append(Tokens, token );
end_line();
return Lex_Continue;
}
@ -314,7 +314,7 @@ s32 lex_preprocessor_directive(
token.Flags |= TF_Preprocess_Cond;
}
Tokens.append( token );
append(Tokens, token );
SkipWhitespace();
@ -338,7 +338,7 @@ s32 lex_preprocessor_directive(
name.Length++;
}
Tokens.append( name );
append(Tokens, name );
u64 key = crc32( name.Text, name.Length );
defines.set( key, name );
@ -384,7 +384,7 @@ s32 lex_preprocessor_directive(
move_forward();
}
Tokens.append( preprocess_content );
append(Tokens, preprocess_content );
return Lex_Continue; // Skip found token, its all handled here.
}
@ -446,7 +446,7 @@ s32 lex_preprocessor_directive(
preprocess_content.Length++;
}
Tokens.append( preprocess_content );
append(Tokens, preprocess_content );
return Lex_Continue; // Skip found token, its all handled here.
}
@ -461,7 +461,7 @@ void lex_found_token( StrC& content
{
if ( token.Type != TokType::Invalid )
{
Tokens.append( token );
append(Tokens, token );
return;
}
@ -488,7 +488,7 @@ void lex_found_token( StrC& content
}
token.Type = type;
Tokens.append( token );
append(Tokens, token );
return;
}
@ -498,7 +498,7 @@ void lex_found_token( StrC& content
{
token.Type = type;
token.Flags |= TF_Specifier;
Tokens.append( token );
append(Tokens, token );
return;
}
@ -506,7 +506,7 @@ void lex_found_token( StrC& content
if ( type != TokType::Invalid )
{
token.Type = type;
Tokens.append( token );
append(Tokens, token );
return;
}
@ -558,7 +558,7 @@ void lex_found_token( StrC& content
token.Type = TokType::Identifier;
}
Tokens.append( token );
append(Tokens, token );
}
@ -582,7 +582,7 @@ TokArray lex( StrC content )
return { { nullptr }, 0 };
}
for ( StringCached entry : PreprocessorDefines )
foreach( StringCached, entry, PreprocessorDefines )
{
s32 length = 0;
char const* scanner = entry.Data;
@ -600,7 +600,7 @@ TokArray lex( StrC content )
defines.set( key, entry );
}
Tokens.clear();
clear(Tokens);
while (left )
{
@ -630,7 +630,7 @@ TokArray lex( StrC content )
token.Type = TokType::NewLine;
token.Length++;
Tokens.append( token );
append(Tokens, token );
continue;
}
}
@ -1099,7 +1099,7 @@ TokArray lex( StrC content )
move_forward();
token.Length++;
}
Tokens.append( token );
append(Tokens, token );
continue;
}
else if ( current == '*' )
@ -1135,7 +1135,7 @@ TokArray lex( StrC content )
move_forward();
token.Length++;
}
Tokens.append( token );
append(Tokens, token );
// end_line();
continue;
}
@ -1228,9 +1228,9 @@ TokArray lex( StrC content )
}
else
{
s32 start = max( 0, Tokens.num() - 100 );
s32 start = max( 0, num(Tokens) - 100 );
log_fmt("\n%d\n", start);
for ( s32 idx = start; idx < Tokens.num(); idx++ )
for ( s32 idx = start; idx < num(Tokens); idx++ )
{
log_fmt( "Token %d Type: %s : %.*s\n"
, idx
@ -1253,7 +1253,7 @@ TokArray lex( StrC content )
lex_found_token( content, left, scanner, line, column, defines, token );
}
if ( Tokens.num() == 0 )
if ( num(Tokens) == 0 )
{
log_failure( "Failed to lex any tokens" );
return { { nullptr }, 0 };

View File

@ -48,11 +48,11 @@ struct ParseContext
String result = String::make_reserve( GlobalAllocator, kilobytes(4) );
Token scope_start = Scope->Start;
Token last_valid = Tokens.Idx >= Tokens.Arr.num() ? Tokens.Arr[Tokens.Arr.num() -1] : Tokens.current();
Token last_valid = Tokens.Idx >= num(Tokens.Arr) ? Tokens.Arr[num(Tokens.Arr) -1] : Tokens.current();
sptr length = scope_start.Length;
char const* current = scope_start.Text + length;
while ( current <= Tokens.Arr.back().Text && *current != '\n' && length < 74 )
while ( current <= back(Tokens.Arr).Text && *current != '\n' && length < 74 )
{
current++;
length++;
@ -96,7 +96,7 @@ global ParseContext Context;
bool TokArray::__eat( TokType type )
{
if ( Arr.num() - Idx <= 0 )
if ( num(Arr) - Idx <= 0 )
{
log_failure( "No tokens left.\n%s", Context.to_string() );
return false;
@ -132,12 +132,12 @@ bool TokArray::__eat( TokType type )
internal
void init()
{
Tokens = Array<Token>::init_reserve( LexArena
Tokens = array_init_reserve<Token>( allocator_info(LexArena)
, ( LexAllocator_Size - sizeof( ArrayHeader ) ) / sizeof(Token)
);
defines_map_arena = Arena_256KB::init();
defines = HashTable<StrC>::init_reserve( defines_map_arena, 256 );
fixed_arena_init(defines_map_arena);
defines = HashTable<StrC>::init_reserve( allocator_info(defines_map_arena), 256 );
}
internal
@ -167,7 +167,7 @@ if ( def.Ptr == nullptr ) \
# define prevtok Context.Tokens.previous()
# define nexttok Context.Tokens.next()
# define eat( Type_ ) Context.Tokens.__eat( Type_ )
# define left ( Context.Tokens.Arr.num() - Context.Tokens.Idx )
# define left ( num(Context.Tokens.Arr) - Context.Tokens.Idx )
#ifdef check
#define CHECK_WAS_DEFINED
@ -713,8 +713,8 @@ Code parse_class_struct( TokType which, bool inplace_def = false )
local_persist
char interface_arr_mem[ kilobytes(4) ] {0};
Array<CodeType> interfaces; {
Arena arena = init_from_memory( interface_arr_mem, kilobytes(4) );
Array<CodeType>::init_reserve( arena, 4 );
Arena arena = arena_init_from_memory( interface_arr_mem, kilobytes(4) );
interfaces = array_init_reserve<CodeType>( allocator_info(arena), 4 );
}
// TODO(Ed) : Make an AST_DerivedType, we'll store any arbitary derived type into there as a linear linked list of them.
@ -745,7 +745,7 @@ Code parse_class_struct( TokType which, bool inplace_def = false )
}
Token interface_tok = parse_identifier();
interfaces.append( def_type( interface_tok ) );
append(interfaces, def_type( interface_tok ) );
// <ModuleFlags> <class/struct> <Attributes> <Name> : <Access Specifier> <Name>, ...
}
}
@ -777,7 +777,7 @@ Code parse_class_struct( TokType which, bool inplace_def = false )
if ( inline_cmt )
result->InlineCmt = inline_cmt;
interfaces.free();
free(interfaces);
return result;
}
@ -1152,7 +1152,7 @@ Code parse_complicated_definition( TokType which )
s32 idx = tokens.Idx;
s32 level = 0;
for ( ; idx < tokens.Arr.num(); idx++ )
for ( ; idx < num(tokens.Arr); idx++ )
{
if ( tokens[ idx ].Type == TokType::BraceCurly_Open )
level++;
@ -1837,7 +1837,7 @@ CodeBody parse_global_nspace( CodeT which )
bool found_operator_cast_outside_class_implmentation = false;
s32 idx = Context.Tokens.Idx;
for ( ; idx < Context.Tokens.Arr.num(); idx++ )
for ( ; idx < num(Context.Tokens.Arr); idx++ )
{
Token tok = Context.Tokens[ idx ];
@ -1909,14 +1909,14 @@ Code parse_global_nspace_constructor_destructor( CodeSpecifiers specifiers )
s32 idx = tokens.Idx;
Token nav = tokens[ idx ];
for ( ; idx < tokens.Arr.num(); idx++, nav = tokens[ idx ] )
for ( ; idx < num(tokens.Arr); idx++, nav = tokens[ idx ] )
{
if ( nav.Text[0] == '<' )
{
// Skip templated expressions as they mey have expressions with the () operators
s32 capture_level = 0;
s32 template_level = 0;
for ( ; idx < tokens.Arr.num(); idx++, nav = tokens[idx] )
for ( ; idx < num(tokens.Arr); idx++, nav = tokens[idx] )
{
if (nav.Text[ 0 ] == '<')
++ template_level;
@ -2511,7 +2511,7 @@ Code parse_operator_function_or_variable( bool expects_function, CodeAttributes
bool found_operator = false;
s32 idx = Context.Tokens.Idx;
for ( ; idx < Context.Tokens.Arr.num(); idx++ )
for ( ; idx < num(Context.Tokens.Arr); idx++ )
{
Token tok = Context.Tokens[ idx ];
@ -4348,7 +4348,7 @@ CodeTemplate parse_template()
bool found_operator_cast_outside_class_implmentation = false;
s32 idx = Context.Tokens.Idx;
for ( ; idx < Context.Tokens.Arr.num(); idx++ )
for ( ; idx < num(Context.Tokens.Arr); idx++ )
{
Token tok = Context.Tokens[ idx ];
@ -4896,7 +4896,7 @@ CodeTypedef parse_typedef()
s32 idx = tokens.Idx;
s32 level = 0;
for ( ; idx < tokens.Arr.num(); idx ++ )
for ( ; idx < num(tokens.Arr); idx ++ )
{
if ( tokens[idx].Type == TokType::BraceCurly_Open )
level++;

View File

@ -123,13 +123,21 @@ typedef s8 b8;
typedef s16 b16;
typedef s32 b32;
using mem_ptr = void*;
using mem_ptr_const = void const*;
typedef void* mem_ptr;
typedef void const* mem_ptr_const ;
#if ! GEN_COMPILER_C
template<typename Type> uptr to_uptr( Type* ptr ) { return (uptr)ptr; }
template<typename Type> sptr to_sptr( Type* ptr ) { return (sptr)ptr; }
template<typename Type> mem_ptr to_mem_ptr ( Type ptr ) { return (mem_ptr) ptr; }
template<typename Type> mem_ptr_const to_mem_ptr_const( Type ptr ) { return (mem_ptr_const)ptr; }
#else
#define to_utpr( ptr ) ((uptr)(ptr))
#define to_stpr( ptr ) ((sptr)(ptr))
#define to_mem_ptr( ptr) ((mem_ptr)ptr)
#define to_mem_ptr_const( ptr) ((mem_ptr)ptr)
#endif
#pragma endregion Basic Types

View File

@ -14,12 +14,26 @@ template<class TType>
using TRemoveConst = typename RemoveConst<TType>::Type;
#pragma region Array
#if ! GEN_COMPILER_C
#define Array(Type) Array<Type>
// #define array_init(Type, ...) array_init <Type>(__VA_ARGS__)
// #define array_init_reserve(Type, ...) array_init_reserve<Type>(__VA_ARGS__)
#endif
struct ArrayHeader;
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
template<class Type> struct Array;
#else
template<class Type>
using Array = Type*;
#endif
usize array_grow_formula(ssize value);
template<class Type> Array<Type> array_init(AllocatorInfo allocator);
template<class Type> Array<Type> array_init_reserve(AllocatorInfo allocator, ssize capacity);
template<class Type> usize array_grow_formula(ssize value);
template<class Type> bool append(Array<Type>& array, Array<Type> other);
template<class Type> bool append(Array<Type>& array, Type value);
template<class Type> bool append(Array<Type>& array, Type* items, usize item_num);
@ -38,18 +52,22 @@ template<class Type> bool resize(Array<Type>& array, usize num);
template<class Type> bool set_capacity(Array<Type>& array, usize new_capacity);
template<class Type> ArrayHeader* get_header(Array<Type>& array);
template<class Type> forceinline Type* begin(Array<Type>& array) { return array; }
template<class Type> forceinline Type* end(Array<Type>& array) { return array + get_header(array)->Num; }
template<class Type> forceinline Type* next(Array<Type>& array, Type* entry) { return entry + 1; }
struct ArrayHeader {
AllocatorInfo Allocator;
usize Capacity;
usize Num;
};
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
template<class Type>
struct Array
{
Type* Data;
#if 1
#pragma region Member Mapping
forceinline static Array init(AllocatorInfo allocator) { return GEN_NS array_init<Type>(allocator); }
forceinline static Array init_reserve(AllocatorInfo allocator, ssize capacity) { return GEN_NS array_init_reserve<Type>(allocator, capacity); }
@ -78,12 +96,12 @@ struct Array
forceinline Type* begin() { return Data; }
forceinline Type* end() { return Data + get_header()->Num; }
#pragma endregion Member Mapping
#endif
};
#endif
template<class Type> inline
Array<Type> array_init(AllocatorInfo allocator) {
return array_init_reserve<Type>(allocator, array_grow_formula<Type>(0));
return array_init_reserve<Type>(allocator, array_grow_formula(0));
}
template<class Type> inline
@ -101,7 +119,6 @@ Array<Type> array_init_reserve(AllocatorInfo allocator, ssize capacity)
return {rcast(Type*, header + 1)};
}
template<class Type> inline
usize array_grow_formula(ssize value) {
return 2 * value + 8;
}
@ -123,7 +140,7 @@ bool append(Array<Type>& array, Type value)
header = get_header(array);
}
array.Data[header->Num] = value;
array[header->Num] = value;
header->Num++;
return true;
@ -166,7 +183,7 @@ bool append_at(Array<Type>& array, Type item, usize idx)
header = get_header(array);
}
Type* target = array.Data + idx;
Type* target = array + idx;
mem_move(target + 1, target, (header->Num - idx) * sizeof(Type));
header->Num++;
@ -205,7 +222,7 @@ bool append_at(Array<Type>& array, Type* items, usize item_num, usize idx)
template<class Type> inline
Type& back(Array<Type>& array) {
ArrayHeader* header = get_header(array);
return array.Data[header->Num - 1];
return array[header->Num - 1];
}
template<class Type> inline
@ -224,7 +241,7 @@ bool fill(Array<Type>& array, usize begin, usize end, Type value)
for (ssize idx = ssize(begin); idx < ssize(end); idx++)
{
array.Data[idx] = value;
array[idx] = value;
}
return true;
@ -233,21 +250,23 @@ bool fill(Array<Type>& array, usize begin, usize end, Type value)
template<class Type> inline
void free(Array<Type>& array) {
ArrayHeader* header = get_header(array);
gen::free(header->Allocator, header);
array.Data = nullptr;
GEN_NS free(header->Allocator, header);
Type*& Data = rcast(Type*&, array);
Data = nullptr;
}
template<class Type> inline
ArrayHeader* get_header(Array<Type>& array) {
using NonConstType = TRemoveConst<Type>;
return rcast(ArrayHeader*, const_cast<NonConstType*>(array.Data)) - 1;
Type* Data = array; // This should do nothing in C but in C++ gets member Data struct.
return rcast(ArrayHeader*, const_cast<NonConstType*>(Data)) - 1;
}
template<class Type> inline
bool grow(Array<Type>& array, usize min_capacity)
{
ArrayHeader* header = get_header(array);
usize new_capacity = array_grow_formula<Type>(header->Capacity);
usize new_capacity = array_grow_formula(header->Capacity);
if (new_capacity < min_capacity)
new_capacity = min_capacity;
@ -273,7 +292,7 @@ void remove_at(Array<Type>& array, usize idx)
ArrayHeader* header = get_header(array);
GEN_ASSERT(idx < header->Num);
mem_move(array.Data + idx, array.Data + idx + 1, sizeof(Type) * (header->Num - idx - 1));
mem_move(array + idx, array + idx + 1, sizeof(Type) * (header->Num - idx - 1));
header->Num--;
}
@ -329,7 +348,8 @@ bool set_capacity(Array<Type>& array, usize new_capacity)
GEN_NS free(header->Allocator, header);
array.Data = rcast(Type*, new_header + 1);
Type*& Data = rcast(Type*&, array);
Data = rcast(Type*, new_header + 1);
return true;
}
#pragma endregion Array
@ -371,11 +391,11 @@ template<class Type> bool full(HashTable<Type>& table);
template<class Type> void map(HashTable<Type>& table, void (*map_proc)(u64 key, Type value));
template<class Type> void map_mut(HashTable<Type>& table, void (*map_proc)(u64 key, Type* value));
static constexpr f32 HashTable_CriticalLoadScale = 0.7f;
template<typename Type>
struct HashTable
{
static constexpr f32 CriticalLoadScale = 0.7f;
Array<ssize> Hashes;
Array<HashTableEntry<Type>> Entries;
@ -411,26 +431,26 @@ HashTable<Type> hashtable_init_reserve(AllocatorInfo allocator, usize num)
{
HashTable<Type> result = { { nullptr }, { nullptr } };
result.Hashes = Array<ssize>::init_reserve(allocator, num);
result.Hashes.get_header()->Num = num;
result.Hashes.resize(num);
result.Hashes.fill(0, num, -1);
result.Hashes = array_init_reserve<ssize>(allocator, num);
get_header(result.Hashes)->Num = num;
resize(result.Hashes, num);
fill<ssize>(result.Hashes, 0, num, -1);
result.Entries = Array<HashTableEntry<Type>>::init_reserve(allocator, num);
result.Entries = array_init_reserve<HashTableEntry<Type>>(allocator, num);
return result;
}
template<typename Type> inline
void clear(HashTable<Type>& table) {
table.Entries.clear();
table.Hashes.fill(0, table.Hashes.num(), -1);
clear(table.Entries);
fill<ssize>(table.Hashes, 0, num(table.Hashes), -1);
}
template<typename Type> inline
void destroy(HashTable<Type>& table) {
if (table.Hashes && table.Hashes.get_header()->Capacity) {
table.Hashes.free();
table.Entries.free();
if (table.Hashes && get_header(table.Hashes)->Capacity) {
free(table.Hashes);
free(table.Entries);
}
}
@ -463,7 +483,7 @@ void map_mut(HashTable<Type>& table, void (*map_proc)(u64 key, Type* value)) {
template<typename Type> inline
void grow(HashTable<Type>& table) {
ssize new_num = Array<HashTableEntry<Type>>::grow_formula(table.Entries.num());
ssize new_num = array_grow_formula(num(table.Entries));
rehash(table, new_num);
}
@ -471,9 +491,9 @@ template<typename Type> inline
void rehash(HashTable<Type>& table, ssize new_num)
{
ssize last_added_index;
HashTable<Type> new_ht = hashtable_init_reserve<Type>(table.Hashes.get_header()->Allocator, new_num);
HashTable<Type> new_ht = hashtable_init_reserve<Type>(get_header(table.Hashes)->Allocator, new_num);
for (ssize idx = 0; idx < ssize(table.Entries.num()); ++idx)
for (ssize idx = 0; idx < ssize(num(table.Entries)); ++idx)
{
HashTableFindResult find_result;
HashTableEntry<Type>& entry = table.Entries[idx];
@ -580,8 +600,8 @@ ssize add_entry(HashTable<Type>& table, u64 key) {
ssize idx;
HashTableEntry<Type> entry = { key, -1 };
idx = table.Entries.num();
table.Entries.append(entry);
idx = num(table.Entries);
append(table.Entries, entry);
return idx;
}
@ -590,9 +610,9 @@ HashTableFindResult find(HashTable<Type>& table, u64 key)
{
HashTableFindResult result = { -1, -1, -1 };
if (table.Hashes.num() > 0)
if (num(table.Hashes) > 0)
{
result.HashIndex = key % table.Hashes.num();
result.HashIndex = key % num(table.Hashes);
result.EntryIndex = table.Hashes[result.HashIndex];
while (result.EntryIndex >= 0)
@ -610,8 +630,8 @@ HashTableFindResult find(HashTable<Type>& table, u64 key)
template<typename Type> inline
bool full(HashTable<Type>& table) {
usize critical_load = usize(HashTable<Type>::CriticalLoadScale * f32(table.Hashes.num()));
b32 result = table.Entries.num() > critical_load;
usize critical_load = usize(HashTable_CriticalLoadScale * f32(num(table.Hashes)));
b32 result = num(table.Entries) > critical_load;
return result;
}
#pragma endregion HashTable

View File

@ -505,7 +505,7 @@ b8 file_stream_new( FileInfo* file, AllocatorInfo allocator )
d->allocator = allocator;
d->flags = EFileStream_CLONE_WRITABLE;
d->cap = 0;
d->buf = Array<u8>::init( allocator );
d->buf = array_init<u8>( allocator );
if ( ! d->buf )
return false;
@ -531,7 +531,7 @@ b8 file_stream_open( FileInfo* file, AllocatorInfo allocator, u8* buffer, ssize
d->flags = flags;
if ( d->flags & EFileStream_CLONE_WRITABLE )
{
Array<u8> arr = Array<u8>::init_reserve( allocator, size );
Array<u8> arr = array_init_reserve<u8>( allocator, size );
d->buf = arr;
if ( ! d->buf )
@ -540,7 +540,7 @@ b8 file_stream_open( FileInfo* file, AllocatorInfo allocator, u8* buffer, ssize
mem_copy( d->buf, buffer, size );
d->cap = size;
arr.get_header()->Num = size;
get_header(arr)->Num = size;
}
else
{
@ -610,9 +610,9 @@ GEN_FILE_WRITE_AT_PROC( _memory_file_write )
{
Array<u8> arr = { d->buf };
if ( arr.get_header()->Capacity < usize(new_cap) )
if ( get_header(arr)->Capacity < usize(new_cap) )
{
if ( ! arr.grow( ( s64 )( new_cap ) ) )
if ( ! grow( arr, ( s64 )( new_cap ) ) )
return false;
d->buf = arr;
}
@ -626,7 +626,7 @@ GEN_FILE_WRITE_AT_PROC( _memory_file_write )
mem_copy( d->buf + offset + rwlen, pointer_add_const( buffer, rwlen ), extralen );
d->cap = new_cap;
arr.get_header()->Capacity = new_cap;
get_header(arr)->Capacity = new_cap;
}
else
{
@ -647,7 +647,7 @@ GEN_FILE_CLOSE_PROC( _memory_file_close )
if ( d->flags & EFileStream_CLONE_WRITABLE )
{
Array<u8> arr = { d->buf };
arr.free();
free(arr);
}
free( allocator, d );

View File

@ -23,17 +23,32 @@
#define bitfield_is_equal( Type, Field, Mask ) ( (Type(Mask) & Type(Field)) == Type(Mask) )
#endif
#ifndef ccast
#define ccast( type, value ) ( const_cast< type >( (value) ) )
#endif
#ifndef pcast
#define pcast( type, value ) ( * reinterpret_cast< type* >( & ( value ) ) )
#endif
#ifndef rcast
#define rcast( type, value ) reinterpret_cast< type >( value )
#endif
#ifndef scast
#define scast( type, value ) static_cast< type >( value )
#if ! GEN_C_COMPILER
# ifndef ccast
# define ccast( type, value ) ( const_cast< type >( (value) ) )
# endif
# ifndef pcast
# define pcast( type, value ) ( * reinterpret_cast< type* >( & ( value ) ) )
# endif
# ifndef rcast
# define rcast( type, value ) reinterpret_cast< type >( value )
# endif
# ifndef scast
# define scast( type, value ) static_cast< type >( value )
# endif
#else
# ifndef ccast
# define ccast( type, value ) ( (type)(value) )
# endif
# ifndef pcast
# define pcast( type, value ) ( (type)(value) )
# endif
# ifndef rcast
# define rcast( type, value ) ( (type)(value) )
# endif
# ifndef scast
# define scast( type, value ) ( (type)(value) )
# endif
#endif
#ifndef stringize
@ -123,20 +138,20 @@
#define min( a, b ) ( (a < b) ? (a) : (b) )
#endif
#if defined( _MSC_VER ) || defined( GEN_COMPILER_TINYC )
#if GEN_COMPILER_MSVC || GEN_COMPILER_TINYC
# define offset_of( Type, element ) ( ( GEN_NS( ssize ) ) & ( ( ( Type* )0 )->element ) )
#else
# define offset_of( Type, element ) __builtin_offsetof( Type, element )
#endif
#ifndef forceinline
# ifdef GEN_COMPILER_MSVC
# if GEN_COMPILER_MSVC
# define forceinline __forceinline
# define neverinline __declspec( noinline )
# elif defined(GEN_COMPILER_GCC)
# elif GEN_COMPILER_GCC
# define forceinline inline __attribute__((__always_inline__))
# define neverinline __attribute__( ( __noinline__ ) )
# elif defined(GEN_COMPILER_CLANG)
# elif GEN_COMPILER_CLANG
# if __has_attribute(__always_inline__)
# define forceinline inline __attribute__((__always_inline__))
# define neverinline __attribute__( ( __noinline__ ) )
@ -151,11 +166,11 @@
#endif
#ifndef neverinline
# ifdef GEN_COMPILER_MSVC
# if GEN_COMPILER_MSVC
# define neverinline __declspec( noinline )
# elif defined(GEN_COMPILER_GCC)
# elif GEN_COMPILER_GCC
# define neverinline __attribute__( ( __noinline__ ) )
# elif defined(GEN_COMPILER_CLANG)
# elif GEN_COMPILER_CLANG
# if __has_attribute(__always_inline__)
# define neverinline __attribute__( ( __noinline__ ) )
# else
@ -166,4 +181,28 @@
# endif
#endif
#if !defined(GEN_SUPPORT_CPP_MEMBER_FEATURES) && (!GEN_COMPILER_C || __STDC_VERSION__ < 202311L)
# define GEN_SUPPORT_CPP_MEMBER_FEATURES 0
#endif
#if !defined(typeof) && (!GEN_COMPILER_C || __STDC_VERSION__ < 202311L)
# if ! GEN_COMPILER_C
# define typeof decltype
# elif defined(_MSC_VER)
# define typeof(x) __typeof(x)
# elif defined(__GNUC__) || defined(__clang__)
# define typeof(x) __typeof__(x)
# else
# error "Compiler not supported"
# endif
#endif
// This is intended to only really be used internally or with the C-library variant
// C++ users can just use the for-range directly.
#if GEN_COMPILER_C
# define foreach(Type, entry_id, iterable) for ( Type entry_id = begin(iterable); entry_id != end(iterable); entry_id = next(iterable, entry_id) )
#else
# define foreach(Type, entry_id, iterable) for ( Type entry_id : iterable )
#endif
#pragma endregion Macros

View File

@ -14,13 +14,23 @@
#define GEN__HIGHS ( GEN__ONES * ( GEN_U8_MAX / 2 + 1 ) )
#define GEN__HAS_ZERO( x ) ( ( ( x ) - GEN__ONES ) & ~( x ) & GEN__HIGHS )
template< class Type >
void swap( Type& a, Type& b )
{
#if ! GEN_COMPILER_C
template< class Type >
void swap( Type& a, Type& b )
{
Type tmp = a;
a = b;
b = tmp;
}
}
#else
#define swap( a, b ) \
do { \
typeof(a) \
temp = (a); \
(a) = (b); \
(b) = temp; \
} while(0)
#endif
//! Checks if value is power of 2.
b32 is_power_of_two( ssize x );
@ -70,10 +80,7 @@ enum AllocType : u8
EAllocation_RESIZE,
};
using AllocatorProc = void* ( void* allocator_data, AllocType type
, ssize size, ssize alignment
, void* old_memory, ssize old_size
, u64 flags );
typedef void*(AllocatorProc)( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags );
struct AllocatorInfo
{
@ -179,8 +186,8 @@ AllocatorInfo allocator_info( Arena& arena );
void* arena_allocator_proc(void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags);
// Add these declarations after the Arena struct
Arena init_from_allocator(AllocatorInfo backing, ssize size);
Arena init_from_memory( void* start, ssize size );
Arena arena_init_from_allocator(AllocatorInfo backing, ssize size);
Arena arena_init_from_memory( void* start, ssize size );
Arena init_sub(Arena& parent, ssize size);
ssize alignment_of(Arena& arena, ssize alignment);
@ -201,14 +208,14 @@ struct Arena
ssize TotalUsed;
ssize TempCount;
#if 1
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
#pragma region Member Mapping
forceinline operator AllocatorInfo() { return GEN_NS allocator_info(* this); }
forceinline static void* allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags ) { return GEN_NS arena_allocator_proc( allocator_data, type, size, alignment, old_memory, old_size, flags ); }
forceinline static Arena init_from_memory( void* start, ssize size ) { return GEN_NS init_from_memory( start, size ); }
forceinline static Arena init_from_allocator( AllocatorInfo backing, ssize size ) { return GEN_NS init_from_allocator( backing, size ); }
forceinline static Arena init_sub( Arena& parent, ssize size ) { return GEN_NS init_from_allocator( parent.Backing, size ); }
forceinline static Arena init_from_memory( void* start, ssize size ) { return GEN_NS arena_init_from_memory( start, size ); }
forceinline static Arena init_from_allocator( AllocatorInfo backing, ssize size ) { return GEN_NS arena_init_from_allocator( backing, size ); }
forceinline static Arena init_sub( Arena& parent, ssize size ) { return GEN_NS arena_init_from_allocator( parent.Backing, size ); }
forceinline ssize alignment_of( ssize alignment ) { return GEN_NS alignment_of(* this, alignment); }
forceinline void free() { return GEN_NS free(* this); }
forceinline ssize size_remaining( ssize alignment ) { return GEN_NS size_remaining(* this, alignment); }
@ -229,7 +236,7 @@ AllocatorInfo allocator_info( Arena& arena ) {
}
inline
Arena init_from_memory( void* start, ssize size )
Arena arena_init_from_memory( void* start, ssize size )
{
Arena arena = {
{ nullptr, nullptr },
@ -242,10 +249,8 @@ Arena init_from_memory( void* start, ssize size )
}
inline
Arena init_from_allocator(AllocatorInfo backing, ssize size)
{
Arena result =
{
Arena arena_init_from_allocator(AllocatorInfo backing, ssize size) {
Arena result = {
backing,
alloc(backing, size),
size,
@ -256,9 +261,8 @@ Arena init_from_allocator(AllocatorInfo backing, ssize size)
}
inline
Arena init_sub(Arena& parent, ssize size)
{
return init_from_allocator(parent.Backing, size);
Arena init_sub(Arena& parent, ssize size) {
return arena_init_from_allocator(parent.Backing, size);
}
inline
@ -291,7 +295,7 @@ void free(Arena& arena)
{
if (arena.Backing.Proc)
{
gen::free(arena.Backing, arena.PhysicalStart);
GEN_NS free(arena.Backing, arena.PhysicalStart);
arena.PhysicalStart = nullptr;
}
}
@ -320,7 +324,7 @@ struct FixedArena
char memory[Size];
Arena arena;
#if 1
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
#pragma region Member Mapping
forceinline operator AllocatorInfo() { return GEN_NS allocator_info(* this); }
@ -336,7 +340,7 @@ AllocatorInfo allocator_info( FixedArena<Size>& fixed_arena ) { return { arena_a
template<s32 Size> inline
void fixed_arena_init(FixedArena<Size>& result) {
zero_size(& result.memory[0], Size);
result.arena = init_from_memory(& result.memory[0], Size);
result.arena = arena_init_from_memory(& result.memory[0], Size);
}
template<s32 Size> inline
@ -378,6 +382,7 @@ struct Pool
ssize TotalSize;
ssize NumBlocks;
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
#pragma region Member Mapping
forceinline operator AllocatorInfo() { return GEN_NS allocator_info(* this); }
@ -387,6 +392,7 @@ struct Pool
forceinline void clear() { GEN_NS clear(* this); }
forceinline void free() { GEN_NS free(* this); }
#pragma endregion
#endif
};
inline

View File

@ -23,7 +23,7 @@ u8 adt_make_branch( ADT_Node* node, AllocatorInfo backing, char const* name, b32
node->type = type;
node->name = name;
node->parent = parent;
node->nodes = Array<ADT_Node>::init( backing );
node->nodes = array_init<ADT_Node>( backing );
if ( ! node->nodes )
return EADT_ERROR_OUT_OF_MEMORY;
@ -36,12 +36,12 @@ u8 adt_destroy_branch( ADT_Node* node )
GEN_ASSERT_NOT_NULL( node );
if ( ( node->type == EADT_TYPE_OBJECT || node->type == EADT_TYPE_ARRAY ) && node->nodes )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); ++i )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); ++i )
{
adt_destroy_branch( node->nodes + i );
}
node->nodes.free();
free(node->nodes);
}
return 0;
}
@ -66,7 +66,7 @@ ADT_Node* adt_find( ADT_Node* node, char const* name, b32 deep_search )
return NULL;
}
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
if ( ! str_compare( node->nodes[ i ].name, name ) )
{
@ -76,7 +76,7 @@ ADT_Node* adt_find( ADT_Node* node, char const* name, b32 deep_search )
if ( deep_search )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* res = adt_find( node->nodes + i, name, deep_search );
@ -132,7 +132,7 @@ internal ADT_Node* _adt_get_value( ADT_Node* node, char const* value )
internal ADT_Node* _adt_get_field( ADT_Node* node, char* name, char* value )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
if ( ! str_compare( node->nodes[ i ].name, name ) )
{
@ -207,7 +207,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
/* run a value comparison against any child that is an object node */
else if ( node->type == EADT_TYPE_ARRAY )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* child = &node->nodes[ i ];
if ( child->type != EADT_TYPE_OBJECT )
@ -225,7 +225,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
/* [value] */
else
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* child = &node->nodes[ i ];
if ( _adt_get_value( child, l_b2 ) )
@ -257,7 +257,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
else
{
ssize idx = ( ssize )str_to_i64( buf, NULL, 10 );
if ( idx >= 0 && idx < scast(ssize, node->nodes.num()) )
if ( idx >= 0 && idx < scast(ssize, num(node->nodes)) )
{
found_node = &node->nodes[ idx ];
@ -282,12 +282,12 @@ ADT_Node* adt_alloc_at( ADT_Node* parent, ssize index )
if ( ! parent->nodes )
return NULL;
if ( index < 0 || index > scast(ssize, parent->nodes.num()) )
if ( index < 0 || index > scast(ssize, num(parent->nodes)) )
return NULL;
ADT_Node o = { 0 };
o.parent = parent;
if ( ! parent->nodes.append_at( o, index ) )
if ( ! append_at( parent->nodes, o, index ) )
return NULL;
return parent->nodes + index;
@ -303,7 +303,7 @@ ADT_Node* adt_alloc( ADT_Node* parent )
if ( ! parent->nodes )
return NULL;
return adt_alloc_at( parent, parent->nodes.num() );
return adt_alloc_at( parent, num(parent->nodes) );
}
b8 adt_set_obj( ADT_Node* obj, char const* name, AllocatorInfo backing )
@ -357,7 +357,7 @@ ADT_Node* adt_move_node( ADT_Node* node, ADT_Node* new_parent )
GEN_ASSERT_NOT_NULL( node );
GEN_ASSERT_NOT_NULL( new_parent );
GEN_ASSERT( new_parent->type == EADT_TYPE_ARRAY || new_parent->type == EADT_TYPE_OBJECT );
return adt_move_node_at( node, new_parent, new_parent->nodes.num() );
return adt_move_node_at( node, new_parent, num(new_parent->nodes) );
}
void adt_swap_nodes( ADT_Node* node, ADT_Node* other_node )
@ -381,7 +381,7 @@ void adt_remove_node( ADT_Node* node )
GEN_ASSERT_NOT_NULL( node->parent );
ADT_Node* parent = node->parent;
ssize index = ( pointer_diff( parent->nodes, node ) / size_of( ADT_Node ) );
parent->nodes.remove_at( index );
remove_at( parent->nodes, index );
}
ADT_Node* adt_append_obj( ADT_Node* parent, char const* name )
@ -389,7 +389,7 @@ ADT_Node* adt_append_obj( ADT_Node* parent, char const* name )
ADT_Node* o = adt_alloc( parent );
if ( ! o )
return NULL;
if ( adt_set_obj( o, name, parent->nodes.get_header()->Allocator ) )
if ( adt_set_obj( o, name, get_header(parent->nodes)->Allocator ) )
{
adt_remove_node( o );
return NULL;
@ -402,7 +402,7 @@ ADT_Node* adt_append_arr( ADT_Node* parent, char const* name )
ADT_Node* o = adt_alloc( parent );
if ( ! o )
return NULL;
if ( adt_set_arr( o, name, parent->nodes.get_header()->Allocator ) )
if ( adt_set_arr( o, name, get_header(parent->nodes)->Allocator ) )
{
adt_remove_node( o );
return NULL;
@ -946,12 +946,12 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
}
}
if ( columnIndex >= scast(ssize, root->nodes.num()) )
if ( columnIndex >= scast(ssize, num(root->nodes)) )
{
adt_append_arr( root, NULL );
}
root->nodes[ columnIndex ].nodes.append( rowItem );
append(root->nodes[ columnIndex ].nodes, rowItem );
if ( delimiter == delim )
{
@ -979,7 +979,7 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
}
while ( *currentChar );
if ( root->nodes.num() == 0 )
if (num( root->nodes) == 0 )
{
GEN_CSV_ASSERT( "unexpected end of input. stream is empty." );
error = ECSV_Error__UNEXPECTED_END_OF_INPUT;
@ -989,12 +989,12 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
/* consider first row as a header. */
if ( has_header )
{
for ( ssize i = 0; i < scast(ssize, root->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(root->nodes)); i++ )
{
CSV_Object* col = root->nodes + i;
CSV_Object* hdr = col->nodes;
col->name = hdr->string;
col->nodes.remove_at( 0 );
remove_at(col->nodes, 0 );
}
}
@ -1057,11 +1057,11 @@ void csv_write_delimiter( FileInfo* file, CSV_Object* obj, char delimiter )
GEN_ASSERT_NOT_NULL( file );
GEN_ASSERT_NOT_NULL( obj );
GEN_ASSERT( obj->nodes );
ssize cols = obj->nodes.num();
ssize cols = num(obj->nodes);
if ( cols == 0 )
return;
ssize rows = obj->nodes[ 0 ].nodes.num();
ssize rows = num(obj->nodes[ 0 ].nodes);
if ( rows == 0 )
return;

View File

@ -1,3 +1,5 @@
#define GEN_SUPPORT_CPP_MEMBER_FEATURES 1
#ifdef GEN_INTELLISENSE_DIRECTIVES
# pragma once
#endif
@ -101,6 +103,14 @@
# define GEN_GCC_VERSION_CHECK(major,minor,patch) (0)
#endif
#ifndef GEN_COMPIELR_C
# if defined(__STDC_VERSION__)
# define GEN_COMPILER_C 1
# else
# define GEN_COMPILER_C 0
# endif
#endif
#pragma endregion Platform Detection
#pragma region Mandatory Includes
@ -114,7 +124,7 @@
#pragma endregion Mandatory Includes
#ifdef GEN_DONT_USE_NAMESPACE
#if GEN_DONT_USE_NAMESPACE || GEN_COMPILER_C
# define GEN_NS
# define GEN_NS_BEGIN
# define GEN_NS_END

View File

@ -11,9 +11,9 @@ using namespace gen;
CodeBody gen_ecode( char const* path )
{
char scratch_mem[kilobytes(1)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
file_read_contents( scratch, zero_terminate, path );
file_read_contents( allocator_info(scratch), zero_terminate, path );
CSV_Object csv_nodes;
csv_parse( &csv_nodes, scratch_mem, GlobalAllocator, false );
@ -57,9 +57,9 @@ CodeBody gen_ecode( char const* path )
CodeBody gen_eoperator( char const* path )
{
char scratch_mem[kilobytes(4)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
file_read_contents( scratch, zero_terminate, path );
file_read_contents( allocator_info(scratch), zero_terminate, path );
CSV_Object csv_nodes;
csv_parse( &csv_nodes, scratch_mem, GlobalAllocator, false );
@ -70,7 +70,7 @@ CodeBody gen_eoperator( char const* path )
String enum_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
String to_str_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
for (usize idx = 0; idx < enum_strs.num(); idx++)
for (usize idx = 0; idx < num(enum_strs); idx++)
{
char const* enum_str = enum_strs[idx].string;
char const* entry_to_str = str_strs [idx].string;
@ -113,9 +113,9 @@ CodeBody gen_eoperator( char const* path )
CodeBody gen_especifier( char const* path )
{
char scratch_mem[kilobytes(4)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
file_read_contents( scratch, zero_terminate, path );
file_read_contents( allocator_info(scratch), zero_terminate, path );
CSV_Object csv_nodes;
csv_parse( &csv_nodes, scratch_mem, GlobalAllocator, false );
@ -126,7 +126,7 @@ CodeBody gen_especifier( char const* path )
String enum_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
String to_str_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
for (usize idx = 0; idx < enum_strs.num(); idx++)
for (usize idx = 0; idx < num(enum_strs); idx++)
{
char const* enum_str = enum_strs[idx].string;
char const* entry_to_str = str_strs [idx].string;
@ -218,14 +218,16 @@ CodeBody gen_especifier( char const* path )
CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
{
char scratch_mem[kilobytes(16)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
FileContents enum_content = file_read_contents( scratch, zero_terminate, etok_path );
AllocatorInfo scratch_info = allocator_info(scratch);
FileContents enum_content = file_read_contents( scratch_info, zero_terminate, etok_path );
CSV_Object csv_enum_nodes;
csv_parse( &csv_enum_nodes, rcast(char*, enum_content.data), GlobalAllocator, false );
FileContents attrib_content = file_read_contents( scratch, zero_terminate, attr_path );
FileContents attrib_content = file_read_contents( scratch_info, zero_terminate, attr_path );
CSV_Object csv_attr_nodes;
csv_parse( &csv_attr_nodes, rcast(char*, attrib_content.data), GlobalAllocator, false );
@ -241,7 +243,7 @@ CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
String to_str_attributes = String::make_reserve( GlobalAllocator, kilobytes(4) );
String attribute_define_entries = String::make_reserve( GlobalAllocator, kilobytes(4) );
for (usize idx = 0; idx < enum_strs.num(); idx++)
for (usize idx = 0; idx < num(enum_strs); idx++)
{
char const* enum_str = enum_strs[idx].string;
char const* entry_to_str = enum_str_strs [idx].string;
@ -250,7 +252,7 @@ CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
to_str_entries.append_fmt( "{ sizeof(\"%s\"), \"%s\" },\n", entry_to_str, entry_to_str);
}
for ( usize idx = 0; idx < attribute_strs.num(); idx++ )
for ( usize idx = 0; idx < num(attribute_strs); idx++ )
{
char const* attribute_str = attribute_strs[idx].string;
char const* entry_to_str = attribute_str_strs [idx].string;
@ -259,7 +261,7 @@ CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
to_str_attributes.append_fmt( "{ sizeof(\"%s\"), \"%s\" },\n", entry_to_str, entry_to_str);
attribute_define_entries.append_fmt( "Entry( Attribute_%s, \"%s\" )", attribute_str, entry_to_str );
if ( idx < attribute_strs.num() - 1 )
if ( idx < num(attribute_strs) - 1 )
attribute_define_entries.append( " \\\n");
else
attribute_define_entries.append( "\n");

View File

@ -0,0 +1,10 @@
#pragma once
#include "gen.hpp"
GEN_NS_BEGIN
#include "dependencies/parsing.hpp"
GEN_NS_END
using namespace gen;

View File

@ -44,6 +44,7 @@ Push-Location $path_root
$verbose = $false
[bool] $bootstrap = $false
[bool] $singleheader = $false
[bool] $c_library = $false
[bool] $unreal = $false
[bool] $test = $false
@ -59,6 +60,7 @@ if ( $args ) { $args | ForEach-Object {
"debug" { $release = $false }
"bootstrap" { $bootstrap = $true }
"singleheader" { $singleheader = $true }
"c_library" { $c_library = $true }
"unreal" { $unreal = $true }
"test" { $test = $true }
}
@ -88,7 +90,7 @@ else {
$optimize = $true
}
if ( $bootstrap -eq $false -and $singleheader -eq $false -and $unreal -eq $false -and $test -eq $false ) {
if ( $bootstrap -eq $false -and $singleheader -eq $false -and $c_library -eq $false -and $unreal -eq $false -and $test -eq $false ) {
throw "No build target specified. One must be specified, this script will not assume one"
}
@ -103,8 +105,9 @@ write-host "Build Type: $(if ($release) {"Release"} else {"Debug"} )"
$path_build = Join-Path $path_root build
$path_project = Join-Path $path_root project
$path_scripts = Join-Path $path_root scripts
$path_singleheader = Join-Path $path_root singleheader
$path_unreal = Join-Path $path_root unreal_engine
$path_c_library = join-Path $path_root gen_c_library
$path_singleheader = Join-Path $path_root gen_singleheader
$path_unreal = Join-Path $path_root gen_unreal_engine
$path_test = Join-Path $path_root test
if ( $bootstrap )
@ -187,6 +190,44 @@ if ( $singleheader )
Pop-Location
}
if ( $c_library )
{
$path_build = join-path $path_c_library build
$path_gen = join-path $path_c_library gen
if ( -not(Test-Path($path_build) )) {
New-Item -ItemType Directory -Path $path_build
}
if ( -not(Test-Path($path_gen) )) {
New-Item -ItemType Directory -Path $path_gen
}
$includes = @( $path_project )
$unit = join-path $path_c_library "c_library.cpp"
$executable = join-path $path_build "c_library.exe"
$compiler_args = @()
$compiler_args += ( $flag_define + 'GEN_TIME' )
$linker_args = @(
$flag_link_win_subsystem_console
)
build-simple $path_build $includes $compiler_args $linker_args $unit $executable
Push-Location $path_c_library
if ( Test-Path( $executable ) ) {
write-host "`nRunning c_library generator"
$time_taken = Measure-Command { & $executable
| ForEach-Object {
write-host `t $_ -ForegroundColor Green
}
}
write-host "`nc_library generator completed in $($time_taken.TotalMilliseconds) ms"
}
Pop-Location
}
if ( $unreal )
{
$path_build = join-path $path_unreal build

View File

@ -0,0 +1,24 @@
__VERSION 1
// This is a example template to be used with the refactor program
// Use it to refactor the naming convention of this library to your own.
// Can be used as an aid to help use use your project's implementation if it fullfills the dependencies of this project.
// Example: Most likely have a memory and string library already, just rename the functions and make sure the args are the same.
// Program: https://github.com/Ed94/refactor
// NOTE: Due to the current limitations of the program, not every symbol in the library can be renamed.
// This is due to the program not actually parsing C/C++.
// not : Ignore
// include : #includes
// word : Alphanumeric or underscore
// namespace : Prefix search and replace (c-namspaces).
// regex : Unavailable in __VERSION 1.
// Precedence (highest to lowest):
// word, namespace, regex
// Gen Macro namespace
// namespace GEN_, new_namespace_
// TODO(Ed): This will be large as nearly all symbols will need to optionally support getting prefixed with gen_ or something else the user wants.

View File