15 Commits

Author SHA1 Message Date
Ed_
31a3609b28 some fixes to c's fixed_arena gen 2024-11-30 23:48:14 -05:00
Ed_
fbdb870986 Finished first pass reviewing memory.hpp for C lib generation 2024-11-30 23:38:27 -05:00
Ed_
6d04165b96 Reduce cpp freatures usage of Array container.
Almost ready to be inter-operable with C
2024-11-30 18:54:19 -05:00
Ed_
cc245cc263 new files 2024-11-30 17:22:06 -05:00
Ed_
06deb1e836 memory.hpp no longer uses memory mappings by default 2024-11-30 17:18:49 -05:00
Ed_
5527a27f7b prepare c_library meta-program a bit 2024-11-30 16:54:03 -05:00
Ed_
a67fdef20a dir restructuring
just making it more organized (gen_ prefix for library generation meta-programs)
2024-11-30 16:50:53 -05:00
Ed_
056a5863b8 for the future... 2024-11-30 14:34:28 -05:00
Ed_
79eb5f1f76 strings done 2024-11-30 14:13:30 -05:00
Ed_
c6cb583518 Hashtable done 2024-11-30 13:31:59 -05:00
Ed_
34eec66f35 Array done 2024-11-30 13:14:47 -05:00
Ed_
4137ebfbd8 pool done (see previous commits for context) 2024-11-30 12:27:54 -05:00
Ed_
5958dd2055 Did arena and fixedarena changes (for reducing usage of member procs) 2024-11-30 12:16:01 -05:00
Ed_
163ad0a511 looking into removing "oop" features from base library
I want to make member functions an optional addition the user can generate a derivative library with.
The purpose is to simplify the implementation as to make generating a C-variant simpiler.

I also want to use it as a study to see how much simpiler it makes the library without having it.
2024-11-29 15:18:06 -05:00
Ed_
e3c2a577ba addded String::contains defs 2024-11-29 14:50:54 -05:00
37 changed files with 2080 additions and 1259 deletions

1
.gitignore vendored
View File

@ -31,3 +31,4 @@ project/auxillary/vis_ast/dependencies/temp
test/gen/original
singleheader/gen/scratch.hpp
test/gen/scratch.cpp
gen_c_library/gen

View File

@ -37,7 +37,9 @@
"propidl.h": "c",
"android_native_app_glue.h": "c",
"raylib.h": "c",
"*.m": "cpp"
"*.m": "cpp",
"atomic": "cpp",
"gen.h": "c"
},
"C_Cpp.intelliSenseEngineFallback": "disabled",
"mesonbuild.configureOnOpen": true,

200
gen_c_library/c_library.cpp Normal file
View File

@ -0,0 +1,200 @@
#define GEN_DEFINE_LIBRARY_CODE_CONSTANTS
#define GEN_ENFORCE_STRONG_CODE_TYPES
#define GEN_EXPOSE_BACKEND
#include "../project/gen.cpp"
#include "helpers/push_ignores.inline.hpp"
#include "helpers/helper.hpp"
GEN_NS_BEGIN
#include "dependencies/parsing.cpp"
GEN_NS_END
#include "auxillary/builder.hpp"
#include "auxillary/builder.cpp"
#include "auxillary/scanner.hpp"
#include <cstdlib> // for system()
#include "components/memory.fixed_arena.hpp"
#include "components/misc.hpp"
using namespace gen;
constexpr char const* generation_notice =
"// This file was generated automatially by gencpp's c_library.cpp"
"(See: https://github.com/Ed94/gencpp)\n\n";
constexpr StrC roll_own_dependencies_guard_start = txt(R"(
//! If its desired to roll your own dependencies, define GEN_ROLL_OWN_DEPENDENCIES before including this file.
// Dependencies are derived from the c-zpl library: https://github.com/zpl-c/zpl
#ifndef GEN_ROLL_OWN_DEPENDENCIES
)");
constexpr StrC roll_own_dependencies_guard_end = txt(R"(
// GEN_ROLL_OWN_DEPENDENCIES
#endif
)");
constexpr StrC implementation_guard_start = txt(R"(
#pragma region GENCPP IMPLEMENTATION GUARD
#if defined(GEN_IMPLEMENTATION) && ! defined(GEN_IMPLEMENTED)
# define GEN_IMPLEMENTED
)");
constexpr StrC implementation_guard_end = txt(R"(
#endif
#pragma endregion GENCPP IMPLEMENTATION GUARD
)");
void format_file( char const* path )
{
String resolved_path = String::make(GlobalAllocator, to_str(path));
String style_arg = String::make(GlobalAllocator, txt("-style=file:"));
style_arg.append("../scripts/.clang-format ");
// Need to execute clang format on the generated file to get it to match the original.
#define clang_format "clang-format "
#define cf_format_inplace "-i "
#define cf_verbose "-verbose "
String command = String::make( GlobalAllocator, clang_format );
command.append( cf_format_inplace );
command.append( cf_verbose );
command.append( style_arg );
command.append( resolved_path );
log_fmt("\tRunning clang-format on file:\n");
system( command );
log_fmt("\tclang-format finished reformatting.\n");
#undef cf_cmd
#undef cf_format_inplace
#undef cf_style
#undef cf_verbse
}
Code dump_to_scratch_and_retireve( Code code )
{
Builder ecode_file_temp = Builder::open("gen/scratch.hpp");
ecode_file_temp.print(code);
ecode_file_temp.write();
format_file("gen/scratch.hpp");
Code result = scan_file( "gen/scratch.hpp" );
remove("gen/scratch.hpp");
return result;
}
CodeBody parse_file( const char* path )
{
FileContents file = file_read_contents( GlobalAllocator, true, path );
CodeBody code = parse_global_body( { file.size, (char const*)file.data } );
return code;
}
int gen_main()
{
#define project_dir "../project/"
gen::init();
Code push_ignores = scan_file( project_dir "helpers/push_ignores.inline.hpp" );
Code pop_ignores = scan_file( project_dir "helpers/pop_ignores.inline.hpp" );
Code c_library_header_start = scan_file( "components/header_start.hpp" );
Builder
header = Builder::open( "gen/gen.h" );
header.print_fmt( generation_notice );
header.print_fmt("#pragma once\n\n");
header.print( push_ignores );
// Headers
{
header.print( c_library_header_start );
Code platform = scan_file( project_dir "dependencies/platform.hpp" );
Code macros = scan_file( project_dir "dependencies/macros.hpp" );
Code basic_types = scan_file( project_dir "dependencies/basic_types.hpp" );
Code debug = scan_file( project_dir "dependencies/debug.hpp" );
CodeBody parsed_memory = parse_file( project_dir "dependencies/memory.hpp" );
CodeBody memory = def_body(ECode::Struct_Body);
for ( Code entry = parsed_memory.begin(); entry != parsed_memory.end(); ++ entry )
{
switch (entry->Type)
{
case ECode::Using:
{
log_fmt("REPLACE THIS MANUALLY: %S\n", entry->Name);
CodeUsing using_ver = entry.cast<CodeUsing>();
CodeTypedef typedef_ver = def_typedef(using_ver->Name, using_ver->UnderlyingType);
memory.append(typedef_ver);
}
break;
case ECode::Function:
{
CodeFn fn = entry.cast<CodeFn>();
s32 constexpr_found = fn->Specs.remove( ESpecifier::Constexpr );
if (constexpr_found > -1) {
log_fmt("Found constexpr proc\n");
fn->Specs.append(ESpecifier::Inline);
}
memory.append(entry);
}
break;
case ECode::Class:
case ECode::Struct:
{
CodeBody body = entry->Body->operator CodeBody();
CodeBody new_body = def_body( entry->Body->Type );
for ( Code body_entry = body.begin(); body_entry != body.end(); ++ body_entry ) switch
(body_entry->Type) {
case ECode::Preprocess_If:
{
ignore_preprocess_cond_block(txt("GEN_SUPPORT_CPP_MEMBER_FEATURES"), body_entry, body );
}
break;
default:
new_body.append(body_entry);
break;
}
entry->Body = rcast(AST*, new_body.ast);
memory.append(entry);
}
break;
case ECode::Preprocess_If:
{
ignore_preprocess_cond_block(txt("GEN_SUPPORT_CPP_MEMBER_FEATURES"), entry, memory );
}
break;
case ECode::Preprocess_Pragma:
{
swap_pragma_region_implementation( txt("FixedArena"), gen_fixed_arenas, entry, memory);
}
break;
default: {
memory.append(entry);
}
break;
}
}
header.print_fmt( roll_own_dependencies_guard_start );
header.print( platform );
header.print_fmt( "\nGEN_NS_BEGIN\n" );
header.print( macros );
header.print( basic_types );
header.print( debug );
header.print( memory );
}
header.print( pop_ignores );
header.write();
format_file( "gen/gen.h" );
gen::deinit();
return 0;
#undef project_dir
}

View File

@ -0,0 +1,14 @@
/*
gencpp: An attempt at "simple" staged metaprogramming for c/c++.
See Readme.md for more information from the project repository.
Public Address:
https://github.com/Ed94/gencpp
This is a single header C-Library variant.
Define GEN_IMPLEMENTATION before including this file in a single compilation unit.
*/
#if ! defined(GEN_DONT_ENFORCE_GEN_TIME_GUARD) && ! defined(GEN_TIME)
# error Gen.hpp : GEN_TIME not defined
#endif

View File

@ -0,0 +1,119 @@
#pragma once
#include "../project/gen.hpp"
using namespace gen;
CodeBody gen_fixed_arenas()
{
CodeBody result = def_body(ECode::Global_Body);
char const* template_struct = stringize(
struct FixedArena_<Name>
{
char memory[<Size>];
Arena arena;
};
);
char const* template_interface = stringize(
inline
void fixed_arena_init_<Name>(FixedArena_<Name>* result) {
zero_size(& result->memory[0], <Size>);
result.arena = arena_init_from_memory(& result->memory[0], <Size>);
}
inline
ssize fixed_arena_size_remaining_<Name>(FixedArena_<Name>* fixed_arena, ssize alignment) {
return size_remaining(fixed_arena->arena, alignment);
}
);
CodeStruct arena_struct_1kb = parse_struct( token_fmt_impl( 3, "Name", txt("1KB"), "Size", txt("kilobytes(1)"), template_struct ));
CodeStruct arena_struct_4kb = parse_struct( token_fmt_impl( 3, "Name", txt("4KB"), "Size", txt("kilobytes(4)"), template_struct ));
CodeStruct arena_struct_8kb = parse_struct( token_fmt_impl( 3, "Name", txt("8KB"), "Size", txt("kilobytes(8)"), template_struct ));
CodeStruct arena_struct_16kb = parse_struct( token_fmt_impl( 3, "Name", txt("16KB"), "Size", txt("kilobytes(16)"), template_struct ));
CodeStruct arena_struct_32kb = parse_struct( token_fmt_impl( 3, "Name", txt("32KB"), "Size", txt("kilobytes(32)"), template_struct ));
CodeStruct arena_struct_64kb = parse_struct( token_fmt_impl( 3, "Name", txt("64KB"), "Size", txt("kilobytes(64)"), template_struct ));
CodeStruct arena_struct_128kb = parse_struct( token_fmt_impl( 3, "Name", txt("128KB"), "Size", txt("kilobytes(128)"), template_struct ));
CodeStruct arena_struct_256kb = parse_struct( token_fmt_impl( 3, "Name", txt("256KB"), "Size", txt("kilobytes(256)"), template_struct ));
CodeStruct arena_struct_512kb = parse_struct( token_fmt_impl( 3, "Name", txt("512KB"), "Size", txt("kilobytes(512)"), template_struct ));
CodeStruct arena_struct_1mb = parse_struct( token_fmt_impl( 3, "Name", txt("1MB"), "Size", txt("megabytes(1)"), template_struct ));
CodeStruct arena_struct_2mb = parse_struct( token_fmt_impl( 3, "Name", txt("2MB"), "Size", txt("megabytes(2)"), template_struct ));
CodeStruct arena_struct_4mb = parse_struct( token_fmt_impl( 3, "Name", txt("4MB"), "Size", txt("megabytes(4)"), template_struct ));
CodeBody arena_interface_1kb = parse_global_body( token_fmt_impl( 3, "Name", txt("1KB"), "Size", txt("kilobytes(1)"), template_interface ));
CodeBody arena_interface_4kb = parse_global_body( token_fmt_impl( 3, "Name", txt("4KB"), "Size", txt("kilobytes(4)"), template_interface ));
CodeBody arena_interface_8kb = parse_global_body( token_fmt_impl( 3, "Name", txt("8KB"), "Size", txt("kilobytes(8)"), template_interface ));
CodeBody arena_interface_16kb = parse_global_body( token_fmt_impl( 3, "Name", txt("16KB"), "Size", txt("kilobytes(16)"), template_interface ));
CodeBody arena_interface_32kb = parse_global_body( token_fmt_impl( 3, "Name", txt("32KB"), "Size", txt("kilobytes(32)"), template_interface ));
CodeBody arena_interface_64kb = parse_global_body( token_fmt_impl( 3, "Name", txt("64KB"), "Size", txt("kilobytes(64)"), template_interface ));
CodeBody arena_interface_128kb = parse_global_body( token_fmt_impl( 3, "Name", txt("128KB"), "Size", txt("kilobytes(128)"), template_interface ));
CodeBody arena_interface_256kb = parse_global_body( token_fmt_impl( 3, "Name", txt("256KB"), "Size", txt("kilobytes(256)"), template_interface ));
CodeBody arena_interface_512kb = parse_global_body( token_fmt_impl( 3, "Name", txt("512KB"), "Size", txt("kilobytes(512)"), template_interface ));
CodeBody arena_interface_1mb = parse_global_body( token_fmt_impl( 3, "Name", txt("1MB"), "Size", txt("megabytes(1)"), template_interface ));
CodeBody arena_interface_2mb = parse_global_body( token_fmt_impl( 3, "Name", txt("2MB"), "Size", txt("megabytes(2)"), template_interface ));
CodeBody arena_interface_4mb = parse_global_body( token_fmt_impl( 3, "Name", txt("4MB"), "Size", txt("megabytes(4)"), template_interface ));
result.append(arena_struct_1kb);
result.append(arena_struct_4kb);
result.append(arena_struct_8kb);
result.append(arena_struct_16kb);
result.append(arena_struct_32kb);
result.append(arena_struct_128kb);
result.append(arena_struct_256kb);
result.append(arena_struct_512kb);
result.append(arena_struct_1mb);
result.append(arena_struct_2mb);
result.append(arena_struct_4mb);
result.append(arena_interface_1kb);
result.append(arena_interface_4kb);
result.append(arena_interface_8kb);
result.append(arena_interface_16kb);
result.append(arena_interface_32kb);
result.append(arena_interface_128kb);
result.append(arena_interface_256kb);
result.append(arena_interface_512kb);
result.append(arena_interface_1mb);
result.append(arena_interface_2mb);
result.append(arena_interface_4mb);
CodeDefine def = def_define(txt("fixed_arena_allocator_info(fixed_arena)"), code({ arena_allocator_proc, & fixed_arena.arena }) );
result.append(def);
result.append(parse_global_body(txt(R"(
#define fixed_arena_init(expr) _Generic((expr), \
FixedArena_1KB* : fixed_arena_init_1KB, \
FixedArena_4KB* : fixed_arena_init_4KB, \
FixedArena_8KB* : fixed_arena_init_8KB, \
FixedArena_16KB* : fixed_arena_init_16KB, \
FixedArena_32KB* : fixed_arena_init_32KB, \
FixedArena_64KB* : fixed_arena_init_64KB, \
FixedArena_128KB* : fixed_arena_init_128KB, \
FixedArena_256KB* : fixed_arena_init_256KB, \
FixedArena_512KB* : fixed_arena_init_512KB, \
FixedArena_1MB* : fixed_arena_init_1MB, \
FixedArena_2MB* : fixed_arena_init_2MB, \
FixedArena_4MB* : fixed_arena_init_4MB \
)(expr)
#define fixed_arena_size_remaining(expr, alignment) _Generic((expr), \
FixedArena_1KB* : fixed_arena_size_remaining_1KB, \
FixedArena_4KB* : fixed_arena_size_remaining_4KB, \
FixedArena_8KB* : fixed_arena_size_remaining_8KB, \
FixedArena_16KB* : fixed_arena_size_remaining_16KB, \
FixedArena_32KB* : fixed_arena_size_remaining_32KB, \
FixedArena_64KB* : fixed_arena_size_remaining_64KB, \
FixedArena_128KB* : fixed_arena_size_remaining_128KB, \
FixedArena_256KB* : fixed_arena_size_remaining_256KB, \
FixedArena_512KB* : fixed_arena_size_remaining_512KB, \
FixedArena_1MB* : fixed_arena_size_remaining_1MB, \
FixedArena_2MB* : fixed_arena_size_remaining_2MB, \
FixedArena_4MB* : fixed_arena_size_remaining_4MB \
)(expr, alignment)
)"
)));
return result;
}

View File

@ -0,0 +1,61 @@
// #pragma once
// #include "../project/gen.hpp"
// using namespace gen;
using SwapContentProc = CodeBody(void);
b32 ignore_preprocess_cond_block( StrC cond_sig, Code& entry_iter, CodeBody& body )
{
CodePreprocessCond cond = entry_iter.cast<CodePreprocessCond>();
if ( cond->Content.contains(cond_sig) )
{
s32 depth = 1;
++ entry_iter; for(b32 continue_for = true; continue_for && entry_iter != body.end(); ++ entry_iter) switch
(entry_iter->Type) {
case ECode::Preprocess_If:
case ECode::Preprocess_IfDef:
case ECode::Preprocess_IfNotDef:
depth ++;
break;
case ECode::Preprocess_EndIf:
{
depth --;
if (depth == 0) {
continue_for = false;
}
}
break;
}
}
return entry_iter != body.end();
}
void swap_pragma_region_implementation( StrC region_name, SwapContentProc* swap_content, Code& entry_iter, CodeBody& body )
{
CodePragma possible_region = entry_iter.cast<CodePragma>();
String region_sig = string_fmt_buf(GlobalAllocator, "region %s", region_name.Ptr);
String endregion_sig = string_fmt_buf(GlobalAllocator, "endregion %s", region_name.Ptr);
if ( possible_region->Content.contains(region_sig))
{
body.append(possible_region);
body.append(swap_content());
++ entry_iter; for(b32 continue_for = true; continue_for; ++entry_iter) switch
(entry_iter->Type) {
case ECode::Preprocess_Pragma:
{
CodePragma possible_end_region = entry_iter.cast<CodePragma>();
if ( possible_end_region->Content.contains(endregion_sig) ) {
body.append(possible_end_region);
continue_for = false;
}
}
break;
}
}
body.append(entry_iter);
}

View File

@ -1,4 +1,4 @@
# Singleheader
Creates a single header file version of the library using `gen.singleheader.cpp`.
Creates a single header file version of the library using `singleheader.cpp`.
Follows the same convention seen in the gb, stb, and zpl libraries.

View File

@ -12,12 +12,3 @@
#if ! defined(GEN_DONT_ENFORCE_GEN_TIME_GUARD) && ! defined(GEN_TIME)
# error Gen.hpp : GEN_TIME not defined
#endif
#ifdef GEN_DONT_USE_NAMESPACE
# define GEN_NS_BEGIN
# define GEN_NS_END
#else
# define GEN_NS_BEGIN namespace gen {
# define GEN_NS_END }
#endif

View File

@ -0,0 +1,23 @@
#ifdef GEN_INTELLISENSE_DIRECTIVES
# pragma once
# include "../gen.hpp"
#endif
/*
Explicitly generates a resolved definition of a cpp template definition.
TODO(Ed): Needs implementing for the C-library variant.
TODO(Ed): We need a non <token> syntax subst implemtnation for Strings for this to work. It must subst keywords directly based on template parameter names.
This is only meant to be used on relatively trivial templates, where the type or numeric is mostly a 'duck' type.
It cannot parse complex template parameters.
The varadic args should correspond 1:1 with the type of objects the generator expects from the template's parameters.alignas.
*/
CodeOperator gen_operator_template( CodeTemplate template, ... );
CodeFn gen_func_template( CodeTemplate template, ... );
Code gen_class_struct_template( CodeTemplate template, ... );
Code gen_template( CodeTemplate template, ... );
Code gen_template( StrC template, StrC instantiation );

View File

@ -165,6 +165,46 @@ struct CodeSpecifiers
return -1;
}
s32 remove( SpecifierT to_remove )
{
if ( ast == nullptr )
{
log_failure("CodeSpecifiers: Attempted to append to a null specifiers AST!");
return -1;
}
if ( raw()->NumEntries == AST::ArrSpecs_Cap )
{
log_failure("CodeSpecifiers: Attempted to append over %d specifiers to a specifiers AST!", AST::ArrSpecs_Cap );
return -1;
}
s32 result = -1;
s32 curr = 0;
s32 next = 0;
for(; next < raw()->NumEntries; ++ curr, ++ next)
{
SpecifierT spec = raw()->ArrSpecs[next];
if (spec == to_remove)
{
result = next;
next ++;
if (next >= raw()->NumEntries)
break;
spec = raw()->ArrSpecs[next];
}
raw()->ArrSpecs[ curr ] = spec;
}
if (result > -1) {
raw()->NumEntries --;
}
return result;
}
void to_string( String& result );
AST* raw()
{

View File

@ -133,6 +133,7 @@ extern CodeType t_typename;
#pragma region Macros
#ifndef token_fmt
# define gen_main main
# define __ NoCode
@ -151,6 +152,7 @@ extern CodeType t_typename;
// Takes a format string (char const*) and a list of tokens (StrC) and returns a StrC of the formatted string.
# define token_fmt( ... ) GEN_NS token_fmt_impl( (num_args( __VA_ARGS__ ) + 1) / 2, __VA_ARGS__ )
#endif
#pragma endregion Macros

View File

@ -11,7 +11,7 @@ internal void deinit();
internal
void* Global_Allocator_Proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags )
{
Arena* last = & Global_AllocatorBuckets.back();
Arena* last = & back(Global_AllocatorBuckets);
switch ( type )
{
@ -19,18 +19,18 @@ void* Global_Allocator_Proc( void* allocator_data, AllocType type, ssize size, s
{
if ( ( last->TotalUsed + size ) > last->TotalSize )
{
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
Arena bucket = arena_init_from_allocator( heap(), Global_BucketSize );
if ( bucket.PhysicalStart == nullptr )
GEN_FATAL( "Failed to create bucket for Global_AllocatorBuckets");
if ( ! Global_AllocatorBuckets.append( bucket ) )
if ( ! append( Global_AllocatorBuckets, bucket ) )
GEN_FATAL( "Failed to append bucket to Global_AllocatorBuckets");
last = & Global_AllocatorBuckets.back();
last = & back(Global_AllocatorBuckets);
}
return alloc_align( * last, size, alignment );
return alloc_align( allocator_info(* last), size, alignment );
}
case EAllocation_FREE:
{
@ -46,15 +46,15 @@ void* Global_Allocator_Proc( void* allocator_data, AllocType type, ssize size, s
{
if ( last->TotalUsed + size > last->TotalSize )
{
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
Arena bucket = arena_init_from_allocator( heap(), Global_BucketSize );
if ( bucket.PhysicalStart == nullptr )
GEN_FATAL( "Failed to create bucket for Global_AllocatorBuckets");
if ( ! Global_AllocatorBuckets.append( bucket ) )
if ( ! append( Global_AllocatorBuckets, bucket ) )
GEN_FATAL( "Failed to append bucket to Global_AllocatorBuckets");
last = & Global_AllocatorBuckets.back();
last = & back(Global_AllocatorBuckets);
}
void* result = alloc_align( last->Backing, size, alignment );
@ -235,28 +235,27 @@ void init()
{
GlobalAllocator = AllocatorInfo { & Global_Allocator_Proc, nullptr };
Global_AllocatorBuckets = Array<Arena>::init_reserve( heap(), 128 );
Global_AllocatorBuckets = array_init_reserve<Arena>( heap(), 128 );
if ( Global_AllocatorBuckets == nullptr )
GEN_FATAL( "Failed to reserve memory for Global_AllocatorBuckets");
Arena bucket = Arena::init_from_allocator( heap(), Global_BucketSize );
Arena bucket = arena_init_from_allocator( heap(), Global_BucketSize );
if ( bucket.PhysicalStart == nullptr )
GEN_FATAL( "Failed to create first bucket for Global_AllocatorBuckets");
Global_AllocatorBuckets.append( bucket );
append( Global_AllocatorBuckets, bucket );
}
// Setup the arrays
{
CodePools = Array<Pool>::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
CodePools = array_init_reserve<Pool>( Allocator_DataArrays, InitSize_DataArrays );
if ( CodePools == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the CodePools array" );
StringArenas = Array<Arena>::init_reserve( Allocator_DataArrays, InitSize_DataArrays );
StringArenas = array_init_reserve<Arena>( Allocator_DataArrays, InitSize_DataArrays );
if ( StringArenas == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the StringArenas array" );
@ -264,21 +263,21 @@ void init()
// Setup the code pool and code entries arena.
{
Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
Pool code_pool = pool_init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
if ( code_pool.PhysicalStart == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the code pool" );
CodePools.append( code_pool );
append(CodePools, code_pool );
LexArena = Arena::init_from_allocator( Allocator_Lexer, LexAllocator_Size );
LexArena = arena_init_from_allocator( Allocator_Lexer, LexAllocator_Size );
Arena string_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
Arena string_arena = arena_init_from_allocator( Allocator_StringArena, SizePer_StringArena );
if ( string_arena.PhysicalStart == nullptr )
GEN_FATAL( "gen::init: Failed to initialize the string arena" );
StringArenas.append( string_arena );
append(StringArenas, string_arena );
}
// Setup the hash tables
@ -290,7 +289,7 @@ void init()
}
// Preprocessor Defines
PreprocessorDefines = Array<StringCached>::init_reserve( GlobalAllocator, kilobytes(1) );
PreprocessorDefines = array_init_reserve<StringCached>( GlobalAllocator, kilobytes(1) );
define_constants();
parser::init();
@ -299,62 +298,62 @@ void init()
void deinit()
{
usize index = 0;
usize left = CodePools.num();
usize left = num(CodePools);
do
{
Pool* code_pool = & CodePools[index];
code_pool->free();
free(* code_pool);
index++;
}
while ( left--, left );
index = 0;
left = StringArenas.num();
left = num(StringArenas);
do
{
Arena* string_arena = & StringArenas[index];
string_arena->free();
free(* string_arena);
index++;
}
while ( left--, left );
StringCache.destroy();
CodePools.free();
StringArenas.free();
free(CodePools);
free(StringArenas);
LexArena.free();
free(LexArena);
PreprocessorDefines.free();
free(PreprocessorDefines);
index = 0;
left = Global_AllocatorBuckets.num();
left = num(Global_AllocatorBuckets);
do
{
Arena* bucket = & Global_AllocatorBuckets[ index ];
bucket->free();
free(* bucket);
index++;
}
while ( left--, left );
Global_AllocatorBuckets.free();
free(Global_AllocatorBuckets);
parser::deinit();
}
void reset()
{
s32 index = 0;
s32 left = CodePools.num();
s32 left = num(CodePools);
do
{
Pool* code_pool = & CodePools[index];
code_pool->clear();
clear(* code_pool);
index++;
}
while ( left--, left );
index = 0;
left = StringArenas.num();
left = num(StringArenas);
do
{
Arena* string_arena = & StringArenas[index];
@ -363,28 +362,28 @@ void reset()
}
while ( left--, left );
StringCache.clear();
clear(StringCache);
define_constants();
}
AllocatorInfo get_string_allocator( s32 str_length )
{
Arena* last = & StringArenas.back();
Arena* last = & back(StringArenas);
usize size_req = str_length + sizeof(String::Header) + sizeof(char*);
usize size_req = str_length + sizeof(StringHeader) + sizeof(char*);
if ( last->TotalUsed + ssize(size_req) > last->TotalSize )
{
Arena new_arena = Arena::init_from_allocator( Allocator_StringArena, SizePer_StringArena );
Arena new_arena = arena_init_from_allocator( Allocator_StringArena, SizePer_StringArena );
if ( ! StringArenas.append( new_arena ) )
if ( ! append(StringArenas, new_arena ) )
GEN_FATAL( "gen::get_string_allocator: Failed to allocate a new string arena" );
last = & StringArenas.back();
last = & back(StringArenas);
}
return * last;
return allocator_info(* last);
}
// Will either make or retrive a code string.
@ -408,21 +407,21 @@ StringCached get_cached_string( StrC str )
// Used internally to retireve a Code object form the CodePool.
Code make_code()
{
Pool* allocator = & CodePools.back();
Pool* allocator = & back(CodePools);
if ( allocator->FreeList == nullptr )
{
Pool code_pool = Pool::init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
Pool code_pool = pool_init( Allocator_CodePool, CodePool_NumBlocks, sizeof(AST) );
if ( code_pool.PhysicalStart == nullptr )
GEN_FATAL( "gen::make_code: Failed to allocate a new code pool - CodePool allcoator returned nullptr." );
if ( ! CodePools.append( code_pool ) )
if ( ! append( CodePools, code_pool ) )
GEN_FATAL( "gen::make_code: Failed to allocate a new code pool - CodePools failed to append new pool." );
allocator = & CodePools.back();
allocator = & back(CodePools);
}
Code result { rcast( AST*, alloc( * allocator, sizeof(AST) )) };
Code result { rcast( AST*, alloc( allocator_info(* allocator), sizeof(AST) )) };
mem_set( result.ast, 0, sizeof(AST) );
// result->Type = ECode::Invalid;

View File

@ -16,8 +16,8 @@ ssize token_fmt_va( char* buf, usize buf_size, s32 num_tokens, va_list va )
local_persist
char tok_map_mem[ TokenFmt_TokenMap_MemSize ];
tok_map_arena = Arena::init_from_memory( tok_map_mem, sizeof(tok_map_mem) );
tok_map = HashTable<StrC>::init( tok_map_arena );
tok_map_arena = arena_init_from_memory( tok_map_mem, sizeof(tok_map_mem) );
tok_map = HashTable<StrC>::init( allocator_info(tok_map_arena) );
s32 left = num_tokens - 1;
@ -95,7 +95,7 @@ ssize token_fmt_va( char* buf, usize buf_size, s32 num_tokens, va_list va )
}
tok_map.clear();
tok_map_arena.free();
free(tok_map_arena);
ssize result = buf_size - remaining;

View File

@ -222,7 +222,7 @@ s32 lex_preprocessor_directive(
, Token& token )
{
char const* hash = scanner;
Tokens.append( { hash, 1, TokType::Preprocess_Hash, line, column, TF_Preprocess } );
append(Tokens, { hash, 1, TokType::Preprocess_Hash, line, column, TF_Preprocess } );
move_forward();
SkipWhitespace();
@ -298,14 +298,14 @@ s32 lex_preprocessor_directive(
token.Length = token.Length + token.Text - hash;
token.Text = hash;
Tokens.append( token );
append(Tokens, token );
return Lex_Continue; // Skip found token, its all handled here.
}
if ( token.Type == TokType::Preprocess_Else || token.Type == TokType::Preprocess_EndIf )
{
token.Flags |= TF_Preprocess_Cond;
Tokens.append( token );
append(Tokens, token );
end_line();
return Lex_Continue;
}
@ -314,7 +314,7 @@ s32 lex_preprocessor_directive(
token.Flags |= TF_Preprocess_Cond;
}
Tokens.append( token );
append(Tokens, token );
SkipWhitespace();
@ -338,7 +338,7 @@ s32 lex_preprocessor_directive(
name.Length++;
}
Tokens.append( name );
append(Tokens, name );
u64 key = crc32( name.Text, name.Length );
defines.set( key, name );
@ -384,7 +384,7 @@ s32 lex_preprocessor_directive(
move_forward();
}
Tokens.append( preprocess_content );
append(Tokens, preprocess_content );
return Lex_Continue; // Skip found token, its all handled here.
}
@ -446,7 +446,7 @@ s32 lex_preprocessor_directive(
preprocess_content.Length++;
}
Tokens.append( preprocess_content );
append(Tokens, preprocess_content );
return Lex_Continue; // Skip found token, its all handled here.
}
@ -461,7 +461,7 @@ void lex_found_token( StrC& content
{
if ( token.Type != TokType::Invalid )
{
Tokens.append( token );
append(Tokens, token );
return;
}
@ -488,7 +488,7 @@ void lex_found_token( StrC& content
}
token.Type = type;
Tokens.append( token );
append(Tokens, token );
return;
}
@ -498,7 +498,7 @@ void lex_found_token( StrC& content
{
token.Type = type;
token.Flags |= TF_Specifier;
Tokens.append( token );
append(Tokens, token );
return;
}
@ -506,7 +506,7 @@ void lex_found_token( StrC& content
if ( type != TokType::Invalid )
{
token.Type = type;
Tokens.append( token );
append(Tokens, token );
return;
}
@ -558,7 +558,7 @@ void lex_found_token( StrC& content
token.Type = TokType::Identifier;
}
Tokens.append( token );
append(Tokens, token );
}
@ -582,7 +582,7 @@ TokArray lex( StrC content )
return { { nullptr }, 0 };
}
for ( StringCached entry : PreprocessorDefines )
foreach( StringCached, entry, PreprocessorDefines )
{
s32 length = 0;
char const* scanner = entry.Data;
@ -600,7 +600,7 @@ TokArray lex( StrC content )
defines.set( key, entry );
}
Tokens.clear();
clear(Tokens);
while (left )
{
@ -630,7 +630,7 @@ TokArray lex( StrC content )
token.Type = TokType::NewLine;
token.Length++;
Tokens.append( token );
append(Tokens, token );
continue;
}
}
@ -1099,7 +1099,7 @@ TokArray lex( StrC content )
move_forward();
token.Length++;
}
Tokens.append( token );
append(Tokens, token );
continue;
}
else if ( current == '*' )
@ -1135,7 +1135,7 @@ TokArray lex( StrC content )
move_forward();
token.Length++;
}
Tokens.append( token );
append(Tokens, token );
// end_line();
continue;
}
@ -1228,9 +1228,9 @@ TokArray lex( StrC content )
}
else
{
s32 start = max( 0, Tokens.num() - 100 );
s32 start = max( 0, num(Tokens) - 100 );
log_fmt("\n%d\n", start);
for ( s32 idx = start; idx < Tokens.num(); idx++ )
for ( s32 idx = start; idx < num(Tokens); idx++ )
{
log_fmt( "Token %d Type: %s : %.*s\n"
, idx
@ -1253,7 +1253,7 @@ TokArray lex( StrC content )
lex_found_token( content, left, scanner, line, column, defines, token );
}
if ( Tokens.num() == 0 )
if ( num(Tokens) == 0 )
{
log_failure( "Failed to lex any tokens" );
return { { nullptr }, 0 };

View File

@ -48,11 +48,11 @@ struct ParseContext
String result = String::make_reserve( GlobalAllocator, kilobytes(4) );
Token scope_start = Scope->Start;
Token last_valid = Tokens.Idx >= Tokens.Arr.num() ? Tokens.Arr[Tokens.Arr.num() -1] : Tokens.current();
Token last_valid = Tokens.Idx >= num(Tokens.Arr) ? Tokens.Arr[num(Tokens.Arr) -1] : Tokens.current();
sptr length = scope_start.Length;
char const* current = scope_start.Text + length;
while ( current <= Tokens.Arr.back().Text && *current != '\n' && length < 74 )
while ( current <= back(Tokens.Arr).Text && *current != '\n' && length < 74 )
{
current++;
length++;
@ -96,7 +96,7 @@ global ParseContext Context;
bool TokArray::__eat( TokType type )
{
if ( Arr.num() - Idx <= 0 )
if ( num(Arr) - Idx <= 0 )
{
log_failure( "No tokens left.\n%s", Context.to_string() );
return false;
@ -132,12 +132,12 @@ bool TokArray::__eat( TokType type )
internal
void init()
{
Tokens = Array<Token>::init_reserve( LexArena
, ( LexAllocator_Size - sizeof( Array<Token>::Header ) ) / sizeof(Token)
Tokens = array_init_reserve<Token>( allocator_info(LexArena)
, ( LexAllocator_Size - sizeof( ArrayHeader ) ) / sizeof(Token)
);
defines_map_arena = Arena_256KB::init();
defines = HashTable<StrC>::init_reserve( defines_map_arena, 256 );
fixed_arena_init(defines_map_arena);
defines = HashTable<StrC>::init_reserve( allocator_info(defines_map_arena), 256 );
}
internal
@ -167,7 +167,7 @@ if ( def.Ptr == nullptr ) \
# define prevtok Context.Tokens.previous()
# define nexttok Context.Tokens.next()
# define eat( Type_ ) Context.Tokens.__eat( Type_ )
# define left ( Context.Tokens.Arr.num() - Context.Tokens.Idx )
# define left ( num(Context.Tokens.Arr) - Context.Tokens.Idx )
#ifdef check
#define CHECK_WAS_DEFINED
@ -712,7 +712,10 @@ Code parse_class_struct( TokType which, bool inplace_def = false )
local_persist
char interface_arr_mem[ kilobytes(4) ] {0};
Array<CodeType> interfaces = Array<CodeType>::init_reserve( Arena::init_from_memory(interface_arr_mem, kilobytes(4) ), 4 );
Array<CodeType> interfaces; {
Arena arena = arena_init_from_memory( interface_arr_mem, kilobytes(4) );
interfaces = array_init_reserve<CodeType>( allocator_info(arena), 4 );
}
// TODO(Ed) : Make an AST_DerivedType, we'll store any arbitary derived type into there as a linear linked list of them.
if ( check( TokType::Assign_Classifer ) )
@ -742,7 +745,7 @@ Code parse_class_struct( TokType which, bool inplace_def = false )
}
Token interface_tok = parse_identifier();
interfaces.append( def_type( interface_tok ) );
append(interfaces, def_type( interface_tok ) );
// <ModuleFlags> <class/struct> <Attributes> <Name> : <Access Specifier> <Name>, ...
}
}
@ -774,7 +777,7 @@ Code parse_class_struct( TokType which, bool inplace_def = false )
if ( inline_cmt )
result->InlineCmt = inline_cmt;
interfaces.free();
free(interfaces);
return result;
}
@ -1149,7 +1152,7 @@ Code parse_complicated_definition( TokType which )
s32 idx = tokens.Idx;
s32 level = 0;
for ( ; idx < tokens.Arr.num(); idx++ )
for ( ; idx < num(tokens.Arr); idx++ )
{
if ( tokens[ idx ].Type == TokType::BraceCurly_Open )
level++;
@ -1834,7 +1837,7 @@ CodeBody parse_global_nspace( CodeT which )
bool found_operator_cast_outside_class_implmentation = false;
s32 idx = Context.Tokens.Idx;
for ( ; idx < Context.Tokens.Arr.num(); idx++ )
for ( ; idx < num(Context.Tokens.Arr); idx++ )
{
Token tok = Context.Tokens[ idx ];
@ -1906,14 +1909,14 @@ Code parse_global_nspace_constructor_destructor( CodeSpecifiers specifiers )
s32 idx = tokens.Idx;
Token nav = tokens[ idx ];
for ( ; idx < tokens.Arr.num(); idx++, nav = tokens[ idx ] )
for ( ; idx < num(tokens.Arr); idx++, nav = tokens[ idx ] )
{
if ( nav.Text[0] == '<' )
{
// Skip templated expressions as they mey have expressions with the () operators
s32 capture_level = 0;
s32 template_level = 0;
for ( ; idx < tokens.Arr.num(); idx++, nav = tokens[idx] )
for ( ; idx < num(tokens.Arr); idx++, nav = tokens[idx] )
{
if (nav.Text[ 0 ] == '<')
++ template_level;
@ -2508,7 +2511,7 @@ Code parse_operator_function_or_variable( bool expects_function, CodeAttributes
bool found_operator = false;
s32 idx = Context.Tokens.Idx;
for ( ; idx < Context.Tokens.Arr.num(); idx++ )
for ( ; idx < num(Context.Tokens.Arr); idx++ )
{
Token tok = Context.Tokens[ idx ];
@ -4345,7 +4348,7 @@ CodeTemplate parse_template()
bool found_operator_cast_outside_class_implmentation = false;
s32 idx = Context.Tokens.Idx;
for ( ; idx < Context.Tokens.Arr.num(); idx++ )
for ( ; idx < num(Context.Tokens.Arr); idx++ )
{
Token tok = Context.Tokens[ idx ];
@ -4893,7 +4896,7 @@ CodeTypedef parse_typedef()
s32 idx = tokens.Idx;
s32 level = 0;
for ( ; idx < tokens.Arr.num(); idx ++ )
for ( ; idx < num(tokens.Arr); idx ++ )
{
if ( tokens[idx].Type == TokType::BraceCurly_Open )
level++;

View File

@ -1,5 +1,6 @@
#ifdef GEN_INTELLISENSE_DIRECTIVES
# pragma once
# include "platform.hpp"
# include "macros.hpp"
#endif
@ -122,13 +123,21 @@ typedef s8 b8;
typedef s16 b16;
typedef s32 b32;
using mem_ptr = void*;
using mem_ptr_const = void const*;
typedef void* mem_ptr;
typedef void const* mem_ptr_const ;
#if ! GEN_COMPILER_C
template<typename Type> uptr to_uptr( Type* ptr ) { return (uptr)ptr; }
template<typename Type> sptr to_sptr( Type* ptr ) { return (sptr)ptr; }
template<typename Type> mem_ptr to_mem_ptr ( Type ptr ) { return (mem_ptr) ptr; }
template<typename Type> mem_ptr_const to_mem_ptr_const( Type ptr ) { return (mem_ptr_const)ptr; }
#else
#define to_utpr( ptr ) ((uptr)(ptr))
#define to_stpr( ptr ) ((sptr)(ptr))
#define to_mem_ptr( ptr) ((mem_ptr)ptr)
#define to_mem_ptr_const( ptr) ((mem_ptr)ptr)
#endif
#pragma endregion Basic Types

View File

@ -13,26 +13,101 @@ template<class TType, usize Size> struct RemoveConst<const TType[Size]> { typede
template<class TType>
using TRemoveConst = typename RemoveConst<TType>::Type;
#pragma region Array
#if ! GEN_COMPILER_C
#define Array(Type) Array<Type>
// #define array_init(Type, ...) array_init <Type>(__VA_ARGS__)
// #define array_init_reserve(Type, ...) array_init_reserve<Type>(__VA_ARGS__)
#endif
struct ArrayHeader;
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
template<class Type> struct Array;
#else
template<class Type>
struct Array
{
struct Header
{
using Array = Type*;
#endif
usize array_grow_formula(ssize value);
template<class Type> Array<Type> array_init(AllocatorInfo allocator);
template<class Type> Array<Type> array_init_reserve(AllocatorInfo allocator, ssize capacity);
template<class Type> bool append(Array<Type>& array, Array<Type> other);
template<class Type> bool append(Array<Type>& array, Type value);
template<class Type> bool append(Array<Type>& array, Type* items, usize item_num);
template<class Type> bool append_at(Array<Type>& array, Type item, usize idx);
template<class Type> bool append_at(Array<Type>& array, Type* items, usize item_num, usize idx);
template<class Type> Type& back(Array<Type>& array);
template<class Type> void clear(Array<Type>& array);
template<class Type> bool fill(Array<Type>& array, usize begin, usize end, Type value);
template<class Type> void free(Array<Type>& array);
template<class Type> bool grow(Array<Type>& array, usize min_capacity);
template<class Type> usize num(Array<Type>& array);
template<class Type> void pop(Array<Type>& array);
template<class Type> void remove_at(Array<Type>& array, usize idx);
template<class Type> bool reserve(Array<Type>& array, usize new_capacity);
template<class Type> bool resize(Array<Type>& array, usize num);
template<class Type> bool set_capacity(Array<Type>& array, usize new_capacity);
template<class Type> ArrayHeader* get_header(Array<Type>& array);
template<class Type> forceinline Type* begin(Array<Type>& array) { return array; }
template<class Type> forceinline Type* end(Array<Type>& array) { return array + get_header(array)->Num; }
template<class Type> forceinline Type* next(Array<Type>& array, Type* entry) { return entry + 1; }
struct ArrayHeader {
AllocatorInfo Allocator;
usize Capacity;
usize Num;
};
static
Array init( AllocatorInfo allocator )
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
template<class Type>
struct Array
{
return init_reserve( allocator, grow_formula(0) );
Type* Data;
#pragma region Member Mapping
forceinline static Array init(AllocatorInfo allocator) { return GEN_NS array_init<Type>(allocator); }
forceinline static Array init_reserve(AllocatorInfo allocator, ssize capacity) { return GEN_NS array_init_reserve<Type>(allocator, capacity); }
forceinline static usize grow_formula(ssize value) { return GEN_NS array_grow_formula<Type>(value); }
forceinline bool append(Array other) { return GEN_NS append<Type>(*this, other); }
forceinline bool append(Type value) { return GEN_NS append<Type>(*this, value); }
forceinline bool append(Type* items, usize item_num) { return GEN_NS append<Type>(*this, items, item_num); }
forceinline bool append_at(Type item, usize idx) { return GEN_NS append_at<Type>(*this, item, idx); }
forceinline bool append_at(Type* items, usize item_num, usize idx) { return GEN_NS append_at<Type>(*this, items, item_num, idx); }
forceinline Type& back() { return GEN_NS back<Type>(*this); }
forceinline void clear() { GEN_NS clear<Type>(*this); }
forceinline bool fill(usize begin, usize end, Type value) { return GEN_NS fill<Type>(*this, begin, end, value); }
forceinline void free() { GEN_NS free<Type>(*this); }
forceinline ArrayHeader* get_header() { return GEN_NS get_header<Type>(*this); }
forceinline bool grow(usize min_capacity) { return GEN_NS grow<Type>(*this, min_capacity); }
forceinline usize num() { return GEN_NS num<Type>(*this); }
forceinline void pop() { GEN_NS pop<Type>(*this); }
forceinline void remove_at(usize idx) { GEN_NS remove_at<Type>(*this, idx); }
forceinline bool reserve(usize new_capacity) { return GEN_NS reserve<Type>(*this, new_capacity); }
forceinline bool resize(usize num) { return GEN_NS resize<Type>(*this, num); }
forceinline bool set_capacity(usize new_capacity) { return GEN_NS set_capacity<Type>(*this, new_capacity); }
forceinline operator Type*() { return Data; }
forceinline operator Type const*() const { return Data; }
forceinline Type* begin() { return Data; }
forceinline Type* end() { return Data + get_header()->Num; }
#pragma endregion Member Mapping
};
#endif
template<class Type> inline
Array<Type> array_init(AllocatorInfo allocator) {
return array_init_reserve<Type>(allocator, array_grow_formula(0));
}
static
Array init_reserve( AllocatorInfo allocator, ssize capacity )
template<class Type> inline
Array<Type> array_init_reserve(AllocatorInfo allocator, ssize capacity)
{
Header* header = rcast( Header*, alloc( allocator, sizeof(Header) + sizeof(Type) * capacity ));
ArrayHeader* header = rcast(ArrayHeader*, alloc(allocator, sizeof(ArrayHeader) + sizeof(Type) * capacity));
if (header == nullptr)
return {nullptr};
@ -44,56 +119,55 @@ struct Array
return {rcast(Type*, header + 1)};
}
static
usize grow_formula( usize value )
{
usize array_grow_formula(ssize value) {
return 2 * value + 8;
}
bool append( Array other )
{
return append( other, other.num() );
template<class Type> inline
bool append(Array<Type>& array, Array<Type> other) {
return append(array, other, num(other));
}
bool append( Type value )
template<class Type> inline
bool append(Array<Type>& array, Type value)
{
Header* header = get_header();
ArrayHeader* header = get_header(array);
if (header->Num == header->Capacity)
{
if ( ! grow( header->Capacity ))
if (!grow(array, header->Capacity))
return false;
header = get_header();
header = get_header(array);
}
Data[ header->Num ] = value;
array[header->Num] = value;
header->Num++;
return true;
}
bool append( Type* items, usize item_num )
template<class Type> inline
bool append(Array<Type>& array, Type* items, usize item_num)
{
Header* header = get_header();
ArrayHeader* header = get_header(array);
if (header->Num + item_num > header->Capacity)
{
if ( ! grow( header->Capacity + item_num ))
if (!grow(array, header->Capacity + item_num))
return false;
header = get_header();
header = get_header(array);
}
mem_copy( Data + header->Num, items, item_num * sizeof(Type) );
mem_copy(array.Data + header->Num, items, item_num * sizeof(Type));
header->Num += item_num;
return true;
}
bool append_at( Type item, usize idx )
template<class Type> inline
bool append_at(Array<Type>& array, Type item, usize idx)
{
Header* header = get_header();
ArrayHeader* header = get_header(array);
if (idx >= header->Num)
idx = header->Num - 1;
@ -103,13 +177,13 @@ struct Array
if (header->Capacity < header->Num + 1)
{
if ( ! grow( header->Capacity + 1 ))
if (!grow(array, header->Capacity + 1))
return false;
header = get_header();
header = get_header(array);
}
Type* target = Data + idx;
Type* target = array + idx;
mem_move(target + 1, target, (header->Num - idx) * sizeof(Type));
header->Num++;
@ -117,25 +191,26 @@ struct Array
return true;
}
bool append_at( Type* items, usize item_num, usize idx )
template<class Type> inline
bool append_at(Array<Type>& array, Type* items, usize item_num, usize idx)
{
Header* header = get_header();
ArrayHeader* header = get_header(array);
if (idx >= header->Num)
{
return append( items, item_num );
return append(array, items, item_num);
}
if (item_num > header->Capacity)
{
if ( ! grow( header->Capacity + item_num ) )
if (!grow(array, header->Capacity + item_num))
return false;
header = get_header();
header = get_header(array);
}
Type* target = Data + idx + item_num;
Type* src = Data + idx;
Type* target = array.Data + idx + item_num;
Type* src = array.Data + idx;
mem_move(target, src, (header->Num - idx) * sizeof(Type));
mem_copy(src, items, item_num * sizeof(Type));
@ -144,268 +219,287 @@ struct Array
return true;
}
Type& back( void )
{
Header& header = * get_header();
return Data[ header.Num - 1 ];
template<class Type> inline
Type& back(Array<Type>& array) {
ArrayHeader* header = get_header(array);
return array[header->Num - 1];
}
void clear( void )
{
Header& header = * get_header();
header.Num = 0;
template<class Type> inline
void clear(Array<Type>& array) {
ArrayHeader* header = get_header(array);
header->Num = 0;
}
bool fill( usize begin, usize end, Type value )
template<class Type> inline
bool fill(Array<Type>& array, usize begin, usize end, Type value)
{
Header& header = * get_header();
ArrayHeader* header = get_header(array);
if ( begin < 0 || end > header.Num )
if (begin < 0 || end > header->Num)
return false;
for (ssize idx = ssize(begin); idx < ssize(end); idx++)
{
Data[ idx ] = value;
array[idx] = value;
}
return true;
}
void free( void )
{
Header& header = * get_header();
gen::free( header.Allocator, &header );
template<class Type> inline
void free(Array<Type>& array) {
ArrayHeader* header = get_header(array);
GEN_NS free(header->Allocator, header);
Type*& Data = rcast(Type*&, array);
Data = nullptr;
}
Header* get_header( void )
{
template<class Type> inline
ArrayHeader* get_header(Array<Type>& array) {
using NonConstType = TRemoveConst<Type>;
return rcast( Header*, const_cast<NonConstType*>(Data) ) - 1 ;
Type* Data = array; // This should do nothing in C but in C++ gets member Data struct.
return rcast(ArrayHeader*, const_cast<NonConstType*>(Data)) - 1;
}
bool grow( usize min_capacity )
template<class Type> inline
bool grow(Array<Type>& array, usize min_capacity)
{
Header& header = * get_header();
usize new_capacity = grow_formula( header.Capacity );
ArrayHeader* header = get_header(array);
usize new_capacity = array_grow_formula(header->Capacity);
if (new_capacity < min_capacity)
new_capacity = min_capacity;
return set_capacity( new_capacity );
return set_capacity(array, new_capacity);
}
usize num( void )
{
return get_header()->Num;
template<class Type> inline
usize num(Array<Type>& array) {
return get_header(array)->Num;
}
void pop( void )
{
Header& header = * get_header();
GEN_ASSERT( header.Num > 0 );
header.Num--;
}
void remove_at( usize idx )
{
Header* header = get_header();
GEN_ASSERT( idx < header->Num );
mem_move( header + idx, header + idx + 1, sizeof( Type ) * ( header->Num - idx - 1 ) );
template<class Type> inline
void pop(Array<Type>& array) {
ArrayHeader* header = get_header(array);
GEN_ASSERT(header->Num > 0);
header->Num--;
}
bool reserve( usize new_capacity )
template<class Type> inline
void remove_at(Array<Type>& array, usize idx)
{
Header& header = * get_header();
ArrayHeader* header = get_header(array);
GEN_ASSERT(idx < header->Num);
if ( header.Capacity < new_capacity )
return set_capacity( new_capacity );
mem_move(array + idx, array + idx + 1, sizeof(Type) * (header->Num - idx - 1));
header->Num--;
}
template<class Type> inline
bool reserve(Array<Type>& array, usize new_capacity)
{
ArrayHeader* header = get_header(array);
if (header->Capacity < new_capacity)
return set_capacity(array, new_capacity);
return true;
}
bool resize( usize num )
template<class Type> inline
bool resize(Array<Type>& array, usize num)
{
Header* header = get_header();
ArrayHeader* header = get_header(array);
if ( header->Capacity < num )
{
if ( ! grow( num ) )
if (header->Capacity < num) {
if (!grow(array, num))
return false;
header = get_header();
header = get_header(array);
}
header->Num = num;
return true;
}
bool set_capacity( usize new_capacity )
template<class Type> inline
bool set_capacity(Array<Type>& array, usize new_capacity)
{
Header& header = * get_header();
ArrayHeader* header = get_header(array);
if ( new_capacity == header.Capacity )
if (new_capacity == header->Capacity)
return true;
if ( new_capacity < header.Num )
if (new_capacity < header->Num)
{
// Already have the memory, mine as well keep it.
header.Num = new_capacity;
header->Num = new_capacity;
return true;
}
ssize size = sizeof( Header ) + sizeof( Type ) * new_capacity;
Header* new_header = rcast( Header*, alloc( header.Allocator, size ) );
ssize size = sizeof(ArrayHeader) + sizeof(Type) * new_capacity;
ArrayHeader* new_header = rcast(ArrayHeader*, alloc(header->Allocator, size));
if (new_header == nullptr)
return false;
mem_move( new_header, &header, sizeof( Header ) + sizeof( Type ) * header.Num );
mem_move(new_header, header, sizeof(ArrayHeader) + sizeof(Type) * header->Num);
new_header->Capacity = new_capacity;
gen::free( header.Allocator, &header );
GEN_NS free(header->Allocator, header);
Type*& Data = rcast(Type*&, array);
Data = rcast(Type*, new_header + 1);
return true;
}
Type* Data;
operator Type*()
{
return Data;
}
operator Type const*() const
{
return Data;
}
// For-range based support
Type* begin()
{
return Data;
}
Type* end()
{
return Data + get_header()->Num;
}
};
#pragma endregion Array
// TODO(Ed) : This thing needs ALOT of work.
template<typename Type>
struct HashTable
{
struct FindResult
{
#pragma region HashTable
template<class Type> struct HashTable;
struct HashTableFindResult {
ssize HashIndex;
ssize PrevIndex;
ssize EntryIndex;
};
struct Entry
{
template<class Type>
struct HashTableEntry {
u64 Key;
ssize Next;
Type Value;
};
static constexpr f32 CriticalLoadScale = 0.7f;
// Forward declarations for all lifted functions
template<class Type> HashTable<Type> hashtable_init(AllocatorInfo allocator);
template<class Type> HashTable<Type> hashtable_init_reserve(AllocatorInfo allocator, usize num);
template<class Type> void clear(HashTable<Type>& table);
template<class Type> void destroy(HashTable<Type>& table);
template<class Type> Type* get(HashTable<Type>& table, u64 key);
template<class Type> void grow(HashTable<Type>& table);
template<class Type> void rehash(HashTable<Type>& table, ssize new_num);
template<class Type> void rehash_fast(HashTable<Type>& table);
template<class Type> void remove(HashTable<Type>& table, u64 key);
template<class Type> void remove_entry(HashTable<Type>& table, ssize idx);
template<class Type> void set(HashTable<Type>& table, u64 key, Type value);
template<class Type> ssize slot(HashTable<Type>& table, u64 key);
template<class Type> ssize add_entry(HashTable<Type>& table, u64 key);
template<class Type> HashTableFindResult find(HashTable<Type>& table, u64 key);
template<class Type> bool full(HashTable<Type>& table);
template<class Type> void map(HashTable<Type>& table, void (*map_proc)(u64 key, Type value));
template<class Type> void map_mut(HashTable<Type>& table, void (*map_proc)(u64 key, Type* value));
static
HashTable init( AllocatorInfo allocator )
static constexpr f32 HashTable_CriticalLoadScale = 0.7f;
template<typename Type>
struct HashTable
{
HashTable<Type> result = init_reserve(allocator, 8);
Array<ssize> Hashes;
Array<HashTableEntry<Type>> Entries;
#if 1
#pragma region Member Mapping
forceinline static HashTable init(AllocatorInfo allocator) { return GEN_NS hashtable_init<Type>(allocator); }
forceinline static HashTable init_reserve(AllocatorInfo allocator, usize num) { return GEN_NS hashtable_init_reserve<Type>(allocator, num); }
forceinline void clear() { GEN_NS clear<Type>(*this); }
forceinline void destroy() { GEN_NS destroy<Type>(*this); }
forceinline Type* get(u64 key) { return GEN_NS get<Type>(*this, key); }
forceinline void grow() { GEN_NS grow<Type>(*this); }
forceinline void rehash(ssize new_num) { GEN_NS rehash<Type>(*this, new_num); }
forceinline void rehash_fast() { GEN_NS rehash_fast<Type>(*this); }
forceinline void remove(u64 key) { GEN_NS remove<Type>(*this, key); }
forceinline void remove_entry(ssize idx) { GEN_NS remove_entry<Type>(*this, idx); }
forceinline void set(u64 key, Type value) { GEN_NS set<Type>(*this, key, value); }
forceinline ssize slot(u64 key) { return GEN_NS slot<Type>(*this, key); }
forceinline void map(void (*proc)(u64, Type)) { GEN_NS map<Type>(*this, proc); }
forceinline void map_mut(void (*proc)(u64, Type*)) { GEN_NS map_mut<Type>(*this, proc); }
#pragma endregion Member Mapping
#endif
};
template<typename Type> inline
HashTable<Type> hashtable_init(AllocatorInfo allocator) {
HashTable<Type> result = hashtable_init_reserve<Type>(allocator, 8);
return result;
}
static
HashTable init_reserve( AllocatorInfo allocator, usize num )
template<typename Type> inline
HashTable<Type> hashtable_init_reserve(AllocatorInfo allocator, usize num)
{
HashTable<Type> result = { { nullptr }, { nullptr } };
result.Hashes = Array<ssize>::init_reserve( allocator, num );
result.Hashes.get_header()->Num = num;
result.Hashes.resize( num );
result.Hashes.fill( 0, num, -1);
result.Hashes = array_init_reserve<ssize>(allocator, num);
get_header(result.Hashes)->Num = num;
resize(result.Hashes, num);
fill<ssize>(result.Hashes, 0, num, -1);
result.Entries = Array<Entry>::init_reserve( allocator, num );
result.Entries = array_init_reserve<HashTableEntry<Type>>(allocator, num);
return result;
}
void clear( void )
{
Entries.clear();
Hashes.fill( 0, Hashes.num(), -1);
template<typename Type> inline
void clear(HashTable<Type>& table) {
clear(table.Entries);
fill<ssize>(table.Hashes, 0, num(table.Hashes), -1);
}
void destroy( void )
{
if ( Hashes && Hashes.get_header()->Capacity )
{
Hashes.free();
Entries.free();
template<typename Type> inline
void destroy(HashTable<Type>& table) {
if (table.Hashes && get_header(table.Hashes)->Capacity) {
free(table.Hashes);
free(table.Entries);
}
}
Type* get( u64 key )
{
ssize idx = find( key ).EntryIndex;
template<typename Type> inline
Type* get(HashTable<Type>& table, u64 key) {
ssize idx = find(table, key).EntryIndex;
if (idx >= 0)
return & Entries[ idx ].Value;
return &table.Entries[idx].Value;
return nullptr;
}
using MapProc = void (*)( u64 key, Type value );
void map( MapProc map_proc )
{
template<typename Type> inline
void map(HashTable<Type>& table, void (*map_proc)(u64 key, Type value)) {
GEN_ASSERT_NOT_NULL(map_proc);
for ( ssize idx = 0; idx < ssize(Entries.num()); ++idx )
{
map_proc( Entries[ idx ].Key, Entries[ idx ].Value );
for (ssize idx = 0; idx < ssize(table.Entries.num()); ++idx) {
map_proc(table.Entries[idx].Key, table.Entries[idx].Value);
}
}
using MapMutProc = void (*)( u64 key, Type* value );
void map_mut( MapMutProc map_proc )
{
template<typename Type> inline
void map_mut(HashTable<Type>& table, void (*map_proc)(u64 key, Type* value)) {
GEN_ASSERT_NOT_NULL(map_proc);
for ( ssize idx = 0; idx < ssize(Entries.num()); ++idx )
{
map_proc( Entries[ idx ].Key, & Entries[ idx ].Value );
for (ssize idx = 0; idx < ssize(table.Entries.num()); ++idx) {
map_proc(table.Entries[idx].Key, &table.Entries[idx].Value);
}
}
void grow()
{
ssize new_num = Array<Entry>::grow_formula( Entries.num() );
rehash( new_num );
template<typename Type> inline
void grow(HashTable<Type>& table) {
ssize new_num = array_grow_formula(num(table.Entries));
rehash(table, new_num);
}
void rehash( ssize new_num )
template<typename Type> inline
void rehash(HashTable<Type>& table, ssize new_num)
{
ssize last_added_index;
HashTable<Type> new_ht = hashtable_init_reserve<Type>(get_header(table.Hashes)->Allocator, new_num);
HashTable<Type> new_ht = init_reserve( Hashes.get_header()->Allocator, new_num );
for ( ssize idx = 0; idx < ssize(Entries.num()); ++idx )
for (ssize idx = 0; idx < ssize(num(table.Entries)); ++idx)
{
FindResult find_result;
HashTableFindResult find_result;
HashTableEntry<Type>& entry = table.Entries[idx];
Entry& entry = Entries[ idx ];
find_result = new_ht.find( entry.Key );
last_added_index = new_ht.add_entry( entry.Key );
find_result = find(new_ht, entry.Key);
last_added_index = add_entry(new_ht, entry.Key);
if (find_result.PrevIndex < 0)
new_ht.Hashes[find_result.HashIndex] = last_added_index;
@ -416,136 +510,130 @@ struct HashTable
new_ht.Entries[last_added_index].Value = entry.Value;
}
destroy();
*this = new_ht;
destroy(table);
table = new_ht;
}
void rehash_fast()
template<typename Type> inline
void rehash_fast(HashTable<Type>& table)
{
ssize idx;
for ( idx = 0; idx < ssize(Entries.num()); idx++ )
Entries[ idx ].Next = -1;
for (idx = 0; idx < ssize(table.Entries.num()); idx++)
table.Entries[idx].Next = -1;
for ( idx = 0; idx < ssize(Hashes.num()); idx++ )
Hashes[ idx ] = -1;
for (idx = 0; idx < ssize(table.Hashes.num()); idx++)
table.Hashes[idx] = -1;
for ( idx = 0; idx < ssize(Entries.num()); idx++ )
for (idx = 0; idx < ssize(table.Entries.num()); idx++)
{
Entry* entry;
FindResult find_result;
HashTableEntry<Type>* entry;
HashTableFindResult find_result;
entry = & Entries[ idx ];
find_result = find( entry->Key );
entry = &table.Entries[idx];
find_result = find(table, entry->Key);
if (find_result.PrevIndex < 0)
Hashes[ find_result.HashIndex ] = idx;
table.Hashes[find_result.HashIndex] = idx;
else
Entries[ find_result.PrevIndex ].Next = idx;
table.Entries[find_result.PrevIndex].Next = idx;
}
}
void remove( u64 key )
{
FindResult find_result = find( key);
template<typename Type> inline
void remove(HashTable<Type>& table, u64 key) {
HashTableFindResult find_result = find(table, key);
if ( find_result.EntryIndex >= 0 )
{
Entries.remove_at( find_result.EntryIndex );
rehash_fast();
if (find_result.EntryIndex >= 0) {
table.Entries.remove_at(find_result.EntryIndex);
rehash_fast(table);
}
}
void remove_entry( ssize idx )
{
Entries.remove_at( idx );
template<typename Type> inline
void remove_entry(HashTable<Type>& table, ssize idx) {
table.Entries.remove_at(idx);
}
void set( u64 key, Type value )
template<typename Type> inline
void set(HashTable<Type>& table, u64 key, Type value)
{
ssize idx;
FindResult find_result;
HashTableFindResult find_result;
if ( full() )
grow();
if (full(table))
grow(table);
find_result = find( key );
if ( find_result.EntryIndex >= 0 )
{
find_result = find(table, key);
if (find_result.EntryIndex >= 0) {
idx = find_result.EntryIndex;
}
else
{
idx = add_entry( key );
idx = add_entry(table, key);
if ( find_result.PrevIndex >= 0 )
{
Entries[ find_result.PrevIndex ].Next = idx;
if (find_result.PrevIndex >= 0) {
table.Entries[find_result.PrevIndex].Next = idx;
}
else
{
Hashes[ find_result.HashIndex ] = idx;
else {
table.Hashes[find_result.HashIndex] = idx;
}
}
Entries[ idx ].Value = value;
table.Entries[idx].Value = value;
if ( full() )
grow();
if (full(table))
grow(table);
}
ssize slot( u64 key )
{
for ( ssize idx = 0; idx < ssize(Hashes.num()); ++idx )
if ( Hashes[ idx ] == key )
template<typename Type> inline
ssize slot(HashTable<Type>& table, u64 key) {
for (ssize idx = 0; idx < ssize(table.Hashes.num()); ++idx)
if (table.Hashes[idx] == key)
return idx;
return -1;
}
Array< ssize> Hashes;
Array< Entry> Entries;
protected:
ssize add_entry( u64 key )
{
template<typename Type> inline
ssize add_entry(HashTable<Type>& table, u64 key) {
ssize idx;
Entry entry = { key, -1 };
HashTableEntry<Type> entry = { key, -1 };
idx = Entries.num();
Entries.append( entry );
idx = num(table.Entries);
append(table.Entries, entry);
return idx;
}
FindResult find( u64 key )
template<typename Type> inline
HashTableFindResult find(HashTable<Type>& table, u64 key)
{
FindResult result = { -1, -1, -1 };
HashTableFindResult result = { -1, -1, -1 };
if ( Hashes.num() > 0 )
if (num(table.Hashes) > 0)
{
result.HashIndex = key % Hashes.num();
result.EntryIndex = Hashes[ result.HashIndex ];
result.HashIndex = key % num(table.Hashes);
result.EntryIndex = table.Hashes[result.HashIndex];
while (result.EntryIndex >= 0)
{
if ( Entries[ result.EntryIndex ].Key == key )
if (table.Entries[result.EntryIndex].Key == key)
break;
result.PrevIndex = result.EntryIndex;
result.EntryIndex = Entries[ result.EntryIndex ].Next;
result.EntryIndex = table.Entries[result.EntryIndex].Next;
}
}
return result;
}
b32 full()
{
usize critical_load = usize( CriticalLoadScale * f32(Hashes.num()) );
b32 result = Entries.num() > critical_load;
template<typename Type> inline
bool full(HashTable<Type>& table) {
usize critical_load = usize(HashTable_CriticalLoadScale * f32(num(table.Hashes)));
b32 result = num(table.Entries) > critical_load;
return result;
}
};
#pragma endregion HashTable
#pragma endregion Containers

View File

@ -505,7 +505,7 @@ b8 file_stream_new( FileInfo* file, AllocatorInfo allocator )
d->allocator = allocator;
d->flags = EFileStream_CLONE_WRITABLE;
d->cap = 0;
d->buf = Array<u8>::init( allocator );
d->buf = array_init<u8>( allocator );
if ( ! d->buf )
return false;
@ -531,7 +531,7 @@ b8 file_stream_open( FileInfo* file, AllocatorInfo allocator, u8* buffer, ssize
d->flags = flags;
if ( d->flags & EFileStream_CLONE_WRITABLE )
{
Array<u8> arr = Array<u8>::init_reserve( allocator, size );
Array<u8> arr = array_init_reserve<u8>( allocator, size );
d->buf = arr;
if ( ! d->buf )
@ -540,7 +540,7 @@ b8 file_stream_open( FileInfo* file, AllocatorInfo allocator, u8* buffer, ssize
mem_copy( d->buf, buffer, size );
d->cap = size;
arr.get_header()->Num = size;
get_header(arr)->Num = size;
}
else
{
@ -610,9 +610,9 @@ GEN_FILE_WRITE_AT_PROC( _memory_file_write )
{
Array<u8> arr = { d->buf };
if ( arr.get_header()->Capacity < usize(new_cap) )
if ( get_header(arr)->Capacity < usize(new_cap) )
{
if ( ! arr.grow( ( s64 )( new_cap ) ) )
if ( ! grow( arr, ( s64 )( new_cap ) ) )
return false;
d->buf = arr;
}
@ -626,7 +626,7 @@ GEN_FILE_WRITE_AT_PROC( _memory_file_write )
mem_copy( d->buf + offset + rwlen, pointer_add_const( buffer, rwlen ), extralen );
d->cap = new_cap;
arr.get_header()->Capacity = new_cap;
get_header(arr)->Capacity = new_cap;
}
else
{
@ -647,7 +647,7 @@ GEN_FILE_CLOSE_PROC( _memory_file_close )
if ( d->flags & EFileStream_CLONE_WRITABLE )
{
Array<u8> arr = { d->buf };
arr.free();
free(arr);
}
free( allocator, d );

View File

@ -23,6 +23,7 @@
#define bitfield_is_equal( Type, Field, Mask ) ( (Type(Mask) & Type(Field)) == Type(Mask) )
#endif
#if ! GEN_C_COMPILER
# ifndef ccast
# define ccast( type, value ) ( const_cast< type >( (value) ) )
# endif
@ -35,6 +36,20 @@
# ifndef scast
# define scast( type, value ) static_cast< type >( value )
# endif
#else
# ifndef ccast
# define ccast( type, value ) ( (type)(value) )
# endif
# ifndef pcast
# define pcast( type, value ) ( (type)(value) )
# endif
# ifndef rcast
# define rcast( type, value ) ( (type)(value) )
# endif
# ifndef scast
# define scast( type, value ) ( (type)(value) )
# endif
#endif
#ifndef stringize
#define stringize_va( ... ) #__VA_ARGS__
@ -123,20 +138,20 @@
#define min( a, b ) ( (a < b) ? (a) : (b) )
#endif
#if defined( _MSC_VER ) || defined( GEN_COMPILER_TINYC )
#if GEN_COMPILER_MSVC || GEN_COMPILER_TINYC
# define offset_of( Type, element ) ( ( GEN_NS( ssize ) ) & ( ( ( Type* )0 )->element ) )
#else
# define offset_of( Type, element ) __builtin_offsetof( Type, element )
#endif
#ifndef forceinline
# ifdef GEN_COMPILER_MSVC
# if GEN_COMPILER_MSVC
# define forceinline __forceinline
# define neverinline __declspec( noinline )
# elif defined(GEN_COMPILER_GCC)
# elif GEN_COMPILER_GCC
# define forceinline inline __attribute__((__always_inline__))
# define neverinline __attribute__( ( __noinline__ ) )
# elif defined(GEN_COMPILER_CLANG)
# elif GEN_COMPILER_CLANG
# if __has_attribute(__always_inline__)
# define forceinline inline __attribute__((__always_inline__))
# define neverinline __attribute__( ( __noinline__ ) )
@ -151,11 +166,11 @@
#endif
#ifndef neverinline
# ifdef GEN_COMPILER_MSVC
# if GEN_COMPILER_MSVC
# define neverinline __declspec( noinline )
# elif defined(GEN_COMPILER_GCC)
# elif GEN_COMPILER_GCC
# define neverinline __attribute__( ( __noinline__ ) )
# elif defined(GEN_COMPILER_CLANG)
# elif GEN_COMPILER_CLANG
# if __has_attribute(__always_inline__)
# define neverinline __attribute__( ( __noinline__ ) )
# else
@ -166,4 +181,28 @@
# endif
#endif
#if !defined(GEN_SUPPORT_CPP_MEMBER_FEATURES) && (!GEN_COMPILER_C || __STDC_VERSION__ < 202311L)
# define GEN_SUPPORT_CPP_MEMBER_FEATURES 0
#endif
#if !defined(typeof) && (!GEN_COMPILER_C || __STDC_VERSION__ < 202311L)
# if ! GEN_COMPILER_C
# define typeof decltype
# elif defined(_MSC_VER)
# define typeof(x) __typeof(x)
# elif defined(__GNUC__) || defined(__clang__)
# define typeof(x) __typeof__(x)
# else
# error "Compiler not supported"
# endif
#endif
// This is intended to only really be used internally or with the C-library variant
// C++ users can just use the for-range directly.
#if GEN_COMPILER_C
# define foreach(Type, entry_id, iterable) for ( Type entry_id = begin(iterable); entry_id != end(iterable); entry_id = next(iterable, entry_id) )
#else
# define foreach(Type, entry_id, iterable) for ( Type entry_id : iterable )
#endif
#pragma endregion Macros

View File

@ -334,7 +334,7 @@ ssize virtual_memory_page_size( ssize* alignment_out )
#pragma endregion VirtualMemory
void* Arena::allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags )
void* arena_allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags )
{
Arena* arena = rcast(Arena*, allocator_data);
void* ptr = NULL;
@ -384,7 +384,7 @@ void* Arena::allocator_proc( void* allocator_data, AllocType type, ssize size, s
return ptr;
}
void* Pool::allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags )
void* pool_allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags )
{
Pool* pool = rcast( Pool*, allocator_data);
void* ptr = NULL;
@ -457,7 +457,7 @@ void* Pool::allocator_proc( void* allocator_data, AllocType type, ssize size, ss
return ptr;
}
Pool Pool::init_align( AllocatorInfo backing, ssize num_blocks, ssize block_size, ssize block_align )
Pool pool_init_align( AllocatorInfo backing, ssize num_blocks, ssize block_size, ssize block_align )
{
Pool pool = {};
@ -495,16 +495,16 @@ Pool Pool::init_align( AllocatorInfo backing, ssize num_blocks, ssize block_size
return pool;
}
void Pool::clear()
void clear(Pool& pool)
{
ssize actual_block_size, block_index;
void* curr;
uptr* end;
actual_block_size = BlockSize + BlockAlign;
actual_block_size = pool.BlockSize + pool.BlockAlign;
curr = PhysicalStart;
for ( block_index = 0; block_index < NumBlocks - 1; block_index++ )
curr = pool.PhysicalStart;
for ( block_index = 0; block_index < pool.NumBlocks - 1; block_index++ )
{
uptr* next = ( uptr* ) curr;
*next = ( uptr ) curr + actual_block_size;
@ -514,7 +514,7 @@ void Pool::clear()
end = ( uptr* ) curr;
*end = ( uptr ) NULL;
FreeList = PhysicalStart;
pool.FreeList = pool.PhysicalStart;
}
#pragma endregion Memory

View File

@ -14,6 +14,7 @@
#define GEN__HIGHS ( GEN__ONES * ( GEN_U8_MAX / 2 + 1 ) )
#define GEN__HAS_ZERO( x ) ( ( ( x ) - GEN__ONES ) & ~( x ) & GEN__HIGHS )
#if ! GEN_COMPILER_C
template< class Type >
void swap( Type& a, Type& b )
{
@ -21,6 +22,15 @@ void swap( Type& a, Type& b )
a = b;
b = tmp;
}
#else
#define swap( a, b ) \
do { \
typeof(a) \
temp = (a); \
(a) = (b); \
(b) = temp; \
} while(0)
#endif
//! Checks if value is power of 2.
b32 is_power_of_two( ssize x );
@ -70,10 +80,7 @@ enum AllocType : u8
EAllocation_RESIZE,
};
using AllocatorProc = void* ( void* allocator_data, AllocType type
, ssize size, ssize alignment
, void* old_memory, ssize old_size
, u64 flags );
typedef void*(AllocatorProc)( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags );
struct AllocatorInfo
{
@ -170,29 +177,80 @@ b32 gen_vm_purge( VirtualMemory vm );
//! Retrieve VM's page size and alignment.
ssize gen_virtual_memory_page_size( ssize* alignment_out );
#pragma region Arena
struct Arena;
AllocatorInfo allocator_info( Arena& arena );
// Remove static keyword and rename allocator_proc
void* arena_allocator_proc(void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags);
// Add these declarations after the Arena struct
Arena arena_init_from_allocator(AllocatorInfo backing, ssize size);
Arena arena_init_from_memory( void* start, ssize size );
Arena init_sub(Arena& parent, ssize size);
ssize alignment_of(Arena& arena, ssize alignment);
// This id is defined by Unreal for asserts
#pragma push_macro("check")
#undef check
void check(Arena& arena);
#pragma pop_macro("check")
void free(Arena& arena);
ssize size_remaining(Arena& arena, ssize alignment);
struct Arena
{
static
void* allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags );
AllocatorInfo Backing;
void* PhysicalStart;
ssize TotalSize;
ssize TotalUsed;
ssize TempCount;
static
Arena init_from_memory( void* start, ssize size )
{
return
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
#pragma region Member Mapping
forceinline operator AllocatorInfo() { return GEN_NS allocator_info(* this); }
forceinline static void* allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags ) { return GEN_NS arena_allocator_proc( allocator_data, type, size, alignment, old_memory, old_size, flags ); }
forceinline static Arena init_from_memory( void* start, ssize size ) { return GEN_NS arena_init_from_memory( start, size ); }
forceinline static Arena init_from_allocator( AllocatorInfo backing, ssize size ) { return GEN_NS arena_init_from_allocator( backing, size ); }
forceinline static Arena init_sub( Arena& parent, ssize size ) { return GEN_NS arena_init_from_allocator( parent.Backing, size ); }
forceinline ssize alignment_of( ssize alignment ) { return GEN_NS alignment_of(* this, alignment); }
forceinline void free() { return GEN_NS free(* this); }
forceinline ssize size_remaining( ssize alignment ) { return GEN_NS size_remaining(* this, alignment); }
// This id is defined by Unreal for asserts
#pragma push_macro("check")
#undef check
forceinline void check() { GEN_NS check(* this); }
#pragma pop_macro("check")
#pragma endregion Member Mapping
#endif
};
inline
AllocatorInfo allocator_info( Arena& arena ) {
return { arena_allocator_proc, &arena };
}
inline
Arena arena_init_from_memory( void* start, ssize size )
{
Arena arena = {
{ nullptr, nullptr },
start,
size,
0,
0
};
return arena;
}
static
Arena init_from_allocator( AllocatorInfo backing, ssize size )
{
Arena result =
{
inline
Arena arena_init_from_allocator(AllocatorInfo backing, ssize size) {
Arena result = {
backing,
alloc(backing, size),
size,
@ -202,19 +260,19 @@ struct Arena
return result;
}
static
Arena init_sub( Arena& parent, ssize size )
{
return init_from_allocator( parent.Backing, size );
inline
Arena init_sub(Arena& parent, ssize size) {
return arena_init_from_allocator(parent.Backing, size);
}
ssize alignment_of( ssize alignment )
inline
ssize alignment_of(Arena& arena, ssize alignment)
{
ssize alignment_offset, result_pointer, mask;
GEN_ASSERT(is_power_of_two(alignment));
alignment_offset = 0;
result_pointer = (ssize) PhysicalStart + TotalUsed;
result_pointer = (ssize)arena.PhysicalStart + arena.TotalUsed;
mask = alignment - 1;
if (result_pointer & mask)
@ -223,68 +281,73 @@ struct Arena
return alignment_offset;
}
// This id is defined by Unreal for asserts
#pragma push_macro("check")
#undef check
void check()
inline
void check(Arena& arena)
{
GEN_ASSERT( TempCount == 0 );
GEN_ASSERT(arena.TempCount == 0);
}
#pragma pop_macro("check")
void free()
inline
void free(Arena& arena)
{
if ( Backing.Proc )
if (arena.Backing.Proc)
{
gen::free( Backing, PhysicalStart );
PhysicalStart = nullptr;
GEN_NS free(arena.Backing, arena.PhysicalStart);
arena.PhysicalStart = nullptr;
}
}
ssize size_remaining( ssize alignment )
inline
ssize size_remaining(Arena& arena, ssize alignment)
{
ssize result = TotalSize - ( TotalUsed + alignment_of( alignment ) );
ssize result = arena.TotalSize - (arena.TotalUsed + alignment_of(arena, alignment));
return result;
}
#pragma endregion Arena
AllocatorInfo Backing;
void* PhysicalStart;
ssize TotalSize;
ssize TotalUsed;
ssize TempCount;
#pragma region FixedArena
template<s32 Size>
struct FixedArena;
operator AllocatorInfo()
{
return { allocator_proc, this };
}
};
template<s32 Size> AllocatorInfo allocator_info( FixedArena<Size>& fixed_arena );
template<s32 Size> FixedArena<Size> fixed_arena_init();
template<s32 Size> ssize size_remaining(FixedArena<Size>& fixed_arena, ssize alignment);
// Just a wrapper around using an arena with memory associated with its scope instead of from an allocator.
// Used for static segment or stack allocations.
template< s32 Size >
struct FixedArena
{
static
FixedArena init()
{
FixedArena result = { Arena::init_from_memory( result.memory, Size ), {0} };
return result;
}
ssize size_remaining( ssize alignment )
{
return arena.size_remaining( alignment );
}
operator AllocatorInfo()
{
return { Arena::allocator_proc, &arena };
}
Arena arena;
char memory[Size];
Arena arena;
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
#pragma region Member Mapping
forceinline operator AllocatorInfo() { return GEN_NS allocator_info(* this); }
forceinline static FixedArena init() { FixedArena result; GEN_NS fixed_arena_init<Size>(result); return result; }
forceinline ssize size_remaining(ssize alignment) { GEN_NS size_remaining(*this, alignment); }
#pragma endregion Member Mapping
#endif
};
template<s32 Size> inline
AllocatorInfo allocator_info( FixedArena<Size>& fixed_arena ) { return { arena_allocator_proc, & fixed_arena.arena }; }
template<s32 Size> inline
void fixed_arena_init(FixedArena<Size>& result) {
zero_size(& result.memory[0], Size);
result.arena = arena_init_from_memory(& result.memory[0], Size);
}
template<s32 Size> inline
ssize size_remaining(FixedArena<Size>& fixed_arena, ssize alignment) {
return size_remaining(fixed_arena.arena, alignment);
}
using Arena_1KB = FixedArena< kilobytes( 1 ) >;
using Arena_4KB = FixedArena< kilobytes( 4 ) >;
using Arena_8KB = FixedArena< kilobytes( 8 ) >;
@ -297,31 +360,20 @@ using Arena_512KB = FixedArena< kilobytes( 512 ) >;
using Arena_1MB = FixedArena< megabytes( 1 ) >;
using Arena_2MB = FixedArena< megabytes( 2 ) >;
using Arena_4MB = FixedArena< megabytes( 4 ) >;
#pragma endregion FixedArena
#pragma region Pool
struct Pool;
AllocatorInfo allocator_info(Pool& pool);
void* pool_allocator_proc(void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags);
Pool pool_init(AllocatorInfo backing, ssize num_blocks, ssize block_size);
Pool pool_init_align(AllocatorInfo backing, ssize num_blocks, ssize block_size, ssize block_align);
void clear(Pool& pool);
void free(Pool& pool);
struct Pool
{
static
void* allocator_proc( void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags );
static
Pool init( AllocatorInfo backing, ssize num_blocks, ssize block_size )
{
return init_align( backing, num_blocks, block_size, GEN_DEFAULT_MEMORY_ALIGNMENT );
}
static
Pool init_align( AllocatorInfo backing, ssize num_blocks, ssize block_size, ssize block_align );
void clear();
void free()
{
if ( Backing.Proc )
{
gen::free( Backing, PhysicalStart );
}
}
AllocatorInfo Backing;
void* PhysicalStart;
void* FreeList;
@ -330,12 +382,36 @@ struct Pool
ssize TotalSize;
ssize NumBlocks;
operator AllocatorInfo()
{
return { allocator_proc, this };
}
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
#pragma region Member Mapping
forceinline operator AllocatorInfo() { return GEN_NS allocator_info(* this); }
forceinline static void* allocator_proc(void* allocator_data, AllocType type, ssize size, ssize alignment, void* old_memory, ssize old_size, u64 flags) { return GEN_NS pool_allocator_proc(allocator_data, type, size, alignment, old_memory, old_size, flags); }
forceinline static Pool init(AllocatorInfo backing, ssize num_blocks, ssize block_size) { return GEN_NS pool_init(backing, num_blocks, block_size); }
forceinline static Pool init_align(AllocatorInfo backing, ssize num_blocks, ssize block_size, ssize block_align) { return GEN_NS pool_init_align(backing, num_blocks, block_size, block_align); }
forceinline void clear() { GEN_NS clear(* this); }
forceinline void free() { GEN_NS free(* this); }
#pragma endregion
#endif
};
inline
AllocatorInfo allocator_info(Pool& pool) {
return { pool_allocator_proc, &pool };
}
inline
Pool pool_init(AllocatorInfo backing, ssize num_blocks, ssize block_size) {
return pool_init_align(backing, num_blocks, block_size, GEN_DEFAULT_MEMORY_ALIGNMENT);
}
inline
void free(Pool& pool) {
if(pool.Backing.Proc) {
GEN_NS free(pool.Backing, pool.PhysicalStart);
}
}
#pragma endregion Pool
inline
b32 is_power_of_two( ssize x ) {

View File

@ -23,7 +23,7 @@ u8 adt_make_branch( ADT_Node* node, AllocatorInfo backing, char const* name, b32
node->type = type;
node->name = name;
node->parent = parent;
node->nodes = Array<ADT_Node>::init( backing );
node->nodes = array_init<ADT_Node>( backing );
if ( ! node->nodes )
return EADT_ERROR_OUT_OF_MEMORY;
@ -36,12 +36,12 @@ u8 adt_destroy_branch( ADT_Node* node )
GEN_ASSERT_NOT_NULL( node );
if ( ( node->type == EADT_TYPE_OBJECT || node->type == EADT_TYPE_ARRAY ) && node->nodes )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); ++i )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); ++i )
{
adt_destroy_branch( node->nodes + i );
}
node->nodes.free();
free(node->nodes);
}
return 0;
}
@ -66,7 +66,7 @@ ADT_Node* adt_find( ADT_Node* node, char const* name, b32 deep_search )
return NULL;
}
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
if ( ! str_compare( node->nodes[ i ].name, name ) )
{
@ -76,7 +76,7 @@ ADT_Node* adt_find( ADT_Node* node, char const* name, b32 deep_search )
if ( deep_search )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* res = adt_find( node->nodes + i, name, deep_search );
@ -132,7 +132,7 @@ internal ADT_Node* _adt_get_value( ADT_Node* node, char const* value )
internal ADT_Node* _adt_get_field( ADT_Node* node, char* name, char* value )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
if ( ! str_compare( node->nodes[ i ].name, name ) )
{
@ -207,7 +207,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
/* run a value comparison against any child that is an object node */
else if ( node->type == EADT_TYPE_ARRAY )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* child = &node->nodes[ i ];
if ( child->type != EADT_TYPE_OBJECT )
@ -225,7 +225,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
/* [value] */
else
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* child = &node->nodes[ i ];
if ( _adt_get_value( child, l_b2 ) )
@ -257,7 +257,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
else
{
ssize idx = ( ssize )str_to_i64( buf, NULL, 10 );
if ( idx >= 0 && idx < scast(ssize, node->nodes.num()) )
if ( idx >= 0 && idx < scast(ssize, num(node->nodes)) )
{
found_node = &node->nodes[ idx ];
@ -282,12 +282,12 @@ ADT_Node* adt_alloc_at( ADT_Node* parent, ssize index )
if ( ! parent->nodes )
return NULL;
if ( index < 0 || index > scast(ssize, parent->nodes.num()) )
if ( index < 0 || index > scast(ssize, num(parent->nodes)) )
return NULL;
ADT_Node o = { 0 };
o.parent = parent;
if ( ! parent->nodes.append_at( o, index ) )
if ( ! append_at( parent->nodes, o, index ) )
return NULL;
return parent->nodes + index;
@ -303,7 +303,7 @@ ADT_Node* adt_alloc( ADT_Node* parent )
if ( ! parent->nodes )
return NULL;
return adt_alloc_at( parent, parent->nodes.num() );
return adt_alloc_at( parent, num(parent->nodes) );
}
b8 adt_set_obj( ADT_Node* obj, char const* name, AllocatorInfo backing )
@ -357,7 +357,7 @@ ADT_Node* adt_move_node( ADT_Node* node, ADT_Node* new_parent )
GEN_ASSERT_NOT_NULL( node );
GEN_ASSERT_NOT_NULL( new_parent );
GEN_ASSERT( new_parent->type == EADT_TYPE_ARRAY || new_parent->type == EADT_TYPE_OBJECT );
return adt_move_node_at( node, new_parent, new_parent->nodes.num() );
return adt_move_node_at( node, new_parent, num(new_parent->nodes) );
}
void adt_swap_nodes( ADT_Node* node, ADT_Node* other_node )
@ -381,7 +381,7 @@ void adt_remove_node( ADT_Node* node )
GEN_ASSERT_NOT_NULL( node->parent );
ADT_Node* parent = node->parent;
ssize index = ( pointer_diff( parent->nodes, node ) / size_of( ADT_Node ) );
parent->nodes.remove_at( index );
remove_at( parent->nodes, index );
}
ADT_Node* adt_append_obj( ADT_Node* parent, char const* name )
@ -389,7 +389,7 @@ ADT_Node* adt_append_obj( ADT_Node* parent, char const* name )
ADT_Node* o = adt_alloc( parent );
if ( ! o )
return NULL;
if ( adt_set_obj( o, name, parent->nodes.get_header()->Allocator ) )
if ( adt_set_obj( o, name, get_header(parent->nodes)->Allocator ) )
{
adt_remove_node( o );
return NULL;
@ -402,7 +402,7 @@ ADT_Node* adt_append_arr( ADT_Node* parent, char const* name )
ADT_Node* o = adt_alloc( parent );
if ( ! o )
return NULL;
if ( adt_set_arr( o, name, parent->nodes.get_header()->Allocator ) )
if ( adt_set_arr( o, name, get_header(parent->nodes)->Allocator ) )
{
adt_remove_node( o );
return NULL;
@ -946,12 +946,12 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
}
}
if ( columnIndex >= scast(ssize, root->nodes.num()) )
if ( columnIndex >= scast(ssize, num(root->nodes)) )
{
adt_append_arr( root, NULL );
}
root->nodes[ columnIndex ].nodes.append( rowItem );
append(root->nodes[ columnIndex ].nodes, rowItem );
if ( delimiter == delim )
{
@ -979,7 +979,7 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
}
while ( *currentChar );
if ( root->nodes.num() == 0 )
if (num( root->nodes) == 0 )
{
GEN_CSV_ASSERT( "unexpected end of input. stream is empty." );
error = ECSV_Error__UNEXPECTED_END_OF_INPUT;
@ -989,12 +989,12 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
/* consider first row as a header. */
if ( has_header )
{
for ( ssize i = 0; i < scast(ssize, root->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(root->nodes)); i++ )
{
CSV_Object* col = root->nodes + i;
CSV_Object* hdr = col->nodes;
col->name = hdr->string;
col->nodes.remove_at( 0 );
remove_at(col->nodes, 0 );
}
}
@ -1057,11 +1057,11 @@ void csv_write_delimiter( FileInfo* file, CSV_Object* obj, char delimiter )
GEN_ASSERT_NOT_NULL( file );
GEN_ASSERT_NOT_NULL( obj );
GEN_ASSERT( obj->nodes );
ssize cols = obj->nodes.num();
ssize cols = num(obj->nodes);
if ( cols == 0 )
return;
ssize rows = obj->nodes[ 0 ].nodes.num();
ssize rows = num(obj->nodes[ 0 ].nodes);
if ( rows == 0 )
return;

View File

@ -1,3 +1,5 @@
#define GEN_SUPPORT_CPP_MEMBER_FEATURES 1
#ifdef GEN_INTELLISENSE_DIRECTIVES
# pragma once
#endif
@ -101,6 +103,14 @@
# define GEN_GCC_VERSION_CHECK(major,minor,patch) (0)
#endif
#ifndef GEN_COMPIELR_C
# if defined(__STDC_VERSION__)
# define GEN_COMPILER_C 1
# else
# define GEN_COMPILER_C 0
# endif
#endif
#pragma endregion Platform Detection
#pragma region Mandatory Includes
@ -114,7 +124,7 @@
#pragma endregion Mandatory Includes
#ifdef GEN_DONT_USE_NAMESPACE
#if GEN_DONT_USE_NAMESPACE || GEN_COMPILER_C
# define GEN_NS
# define GEN_NS_BEGIN
# define GEN_NS_END

View File

@ -4,20 +4,9 @@
#endif
#pragma region String
String String::fmt( AllocatorInfo allocator, char* buf, ssize buf_size, char const* fmt, ... )
String string_make_length( AllocatorInfo allocator, char const* str, ssize length )
{
va_list va;
va_start( va, fmt );
str_fmt_va( buf, buf_size, fmt, va );
va_end( va );
return make( allocator, buf );
}
String String::make_length( AllocatorInfo allocator, char const* str, ssize length )
{
constexpr ssize header_size = sizeof( Header );
constexpr ssize header_size = sizeof( StringHeader );
s32 alloc_size = header_size + length + 1;
void* allocation = alloc( allocator, alloc_size );
@ -25,8 +14,8 @@ String String::make_length( AllocatorInfo allocator, char const* str, ssize leng
if ( allocation == nullptr )
return { nullptr };
Header&
header = * rcast(Header*, allocation);
StringHeader&
header = * rcast(StringHeader*, allocation);
header = { allocator, length, length };
String result = { rcast( char*, allocation) + header_size };
@ -41,9 +30,9 @@ String String::make_length( AllocatorInfo allocator, char const* str, ssize leng
return result;
}
String String::make_reserve( AllocatorInfo allocator, ssize capacity )
String string_make_reserve( AllocatorInfo allocator, ssize capacity )
{
constexpr ssize header_size = sizeof( Header );
constexpr ssize header_size = sizeof( StringHeader );
s32 alloc_size = header_size + capacity + 1;
void* allocation = alloc( allocator, alloc_size );
@ -53,8 +42,8 @@ String String::make_reserve( AllocatorInfo allocator, ssize capacity )
mem_set( allocation, 0, alloc_size );
Header*
header = rcast(Header*, allocation);
StringHeader*
header = rcast(StringHeader*, allocation);
header->Allocator = allocator;
header->Capacity = capacity;
header->Length = 0;
@ -62,69 +51,4 @@ String String::make_reserve( AllocatorInfo allocator, ssize capacity )
String result = { rcast(char*, allocation) + header_size };
return result;
}
String String::fmt_buf( AllocatorInfo allocator, char const* fmt, ... )
{
local_persist thread_local
char buf[ GEN_PRINTF_MAXLEN ] = { 0 };
va_list va;
va_start( va, fmt );
str_fmt_va( buf, GEN_PRINTF_MAXLEN, fmt, va );
va_end( va );
return make( allocator, buf );
}
bool String::append_fmt( char const* fmt, ... )
{
ssize res;
char buf[ GEN_PRINTF_MAXLEN ] = { 0 };
va_list va;
va_start( va, fmt );
res = str_fmt_va( buf, count_of( buf ) - 1, fmt, va ) - 1;
va_end( va );
return append( buf, res );
}
bool String::make_space_for( char const* str, ssize add_len )
{
ssize available = avail_space();
// NOTE: Return if there is enough space left
if ( available >= add_len )
{
return true;
}
else
{
ssize new_len, old_size, new_size;
void* ptr;
void* new_ptr;
AllocatorInfo allocator = get_header().Allocator;
Header* header = nullptr;
new_len = grow_formula( length() + add_len );
ptr = & get_header();
old_size = size_of( Header ) + length() + 1;
new_size = size_of( Header ) + new_len + 1;
new_ptr = resize( allocator, ptr, old_size, new_size );
if ( new_ptr == nullptr )
return false;
header = rcast( Header*, new_ptr);
header->Allocator = allocator;
header->Capacity = new_len;
Data = rcast( char*, header + 1 );
return true;
}
}
#pragma endregion String

View File

@ -19,8 +19,7 @@ struct StrC
#define txt( text ) StrC { sizeof( text ) - 1, ( text ) }
inline
StrC to_str( char const* str )
{
StrC to_str( char const* str ) {
return { str_len( str ), str };
}
@ -28,232 +27,441 @@ StrC to_str( char const* str )
// This is directly based off the ZPL string api.
// They used a header pattern
// I kept it for simplicty of porting but its not necessary to keep it that way.
struct String
{
struct Header
{
#pragma region String
struct String;
struct StringHeader;
// Forward declarations for all file-scope functions
String string_make(AllocatorInfo allocator, char const* str);
String string_make(AllocatorInfo allocator, StrC str);
String string_make_reserve(AllocatorInfo allocator, ssize capacity);
String string_make_length(AllocatorInfo allocator, char const* str, ssize length);
String string_fmt(AllocatorInfo allocator, char* buf, ssize buf_size, char const* fmt, ...);
String string_fmt_buf(AllocatorInfo allocator, char const* fmt, ...);
String string_join(AllocatorInfo allocator, char const** parts, ssize num_parts, char const* glue);
usize string_grow_formula(usize value);
bool are_equal(String lhs, String rhs);
bool are_equal(String lhs, StrC rhs);
bool make_space_for(String& str, char const* to_append, ssize add_len);
bool append(String& str, char c);
bool append(String& str, char const* str_to_append);
bool append(String& str, char const* str_to_append, ssize length);
bool append(String& str, StrC str_to_append);
bool append(String& str, const String other);
bool append_fmt(String& str, char const* fmt, ...);
ssize avail_space(String const& str);
char& back(String& str);
bool contains(String const& str, StrC substring);
bool contains(String const& str, String const& substring);
ssize capacity(String const& str);
void clear(String& str);
String duplicate(String const& str, AllocatorInfo allocator);
void free(String& str);
StringHeader& get_header(String& str);
ssize length(String const& str);
b32 starts_with(String const& str, StrC substring);
b32 starts_with(String const& str, String substring);
void skip_line(String& str);
void strip_space(String& str);
void trim(String& str, char const* cut_set);
void trim_space(String& str);
String visualize_whitespace(String const& str);
struct StringHeader {
AllocatorInfo Allocator;
ssize Capacity;
ssize Length;
};
static
usize grow_formula( usize value )
struct String
{
char* Data;
#if 1
#pragma region Member Mapping
forceinline static String make(AllocatorInfo allocator, char const* str) { return GEN_NS string_make(allocator, str); }
forceinline static String make(AllocatorInfo allocator, StrC str) { return GEN_NS string_make(allocator, str); }
forceinline static String make_reserve(AllocatorInfo allocator, ssize cap) { return GEN_NS string_make_reserve(allocator, cap); }
forceinline static String make_length(AllocatorInfo a, char const* s, ssize l) { return GEN_NS string_make_length(a, s, l); }
forceinline static String join(AllocatorInfo a, char const** p, ssize n, char const* g) { return GEN_NS string_join(a, p, n, g); }
forceinline static usize grow_formula(usize value) { return GEN_NS string_grow_formula(value); }
forceinline static bool are_equal(String lhs, String rhs) { return GEN_NS are_equal(lhs, rhs); }
forceinline static bool are_equal(String lhs, StrC rhs) { return GEN_NS are_equal(lhs, rhs); }
static
String fmt(AllocatorInfo allocator, char* buf, ssize buf_size, char const* fmt, ...) {
va_list va;
va_start(va, fmt);
str_fmt_va(buf, buf_size, fmt, va);
va_end(va);
return GEN_NS string_make(allocator, buf);
}
static
String fmt_buf(AllocatorInfo allocator, char const* fmt, ...) {
local_persist thread_local
char buf[GEN_PRINTF_MAXLEN] = { 0 };
va_list va;
va_start(va, fmt);
str_fmt_va(buf, GEN_PRINTF_MAXLEN, fmt, va);
va_end(va);
return GEN_NS string_make(allocator, buf);
}
forceinline bool make_space_for(char const* str, ssize add_len) { return GEN_NS make_space_for(*this, str, add_len); }
forceinline bool append(char c) { return GEN_NS append(*this, c); }
forceinline bool append(char const* str) { return GEN_NS append(*this, str); }
forceinline bool append(char const* str, ssize length) { return GEN_NS append(*this, str, length); }
forceinline bool append(StrC str) { return GEN_NS append(*this, str); }
forceinline bool append(const String other) { return GEN_NS append(*this, other); }
forceinline ssize avail_space() const { return GEN_NS avail_space(*this); }
forceinline char& back() { return GEN_NS back(*this); }
forceinline bool contains(StrC substring) const { return GEN_NS contains(*this, substring); }
forceinline bool contains(String const& substring) const { return GEN_NS contains(*this, substring); }
forceinline ssize capacity() const { return GEN_NS capacity(*this); }
forceinline void clear() { GEN_NS clear(*this); }
forceinline String duplicate(AllocatorInfo allocator) const { return GEN_NS duplicate(*this, allocator); }
forceinline void free() { GEN_NS free(*this); }
forceinline ssize length() const { return GEN_NS length(*this); }
forceinline b32 starts_with(StrC substring) const { return GEN_NS starts_with(*this, substring); }
forceinline b32 starts_with(String substring) const { return GEN_NS starts_with(*this, substring); }
forceinline void skip_line() { GEN_NS skip_line(*this); }
forceinline void strip_space() { GEN_NS strip_space(*this); }
forceinline void trim(char const* cut_set) { GEN_NS trim(*this, cut_set); }
forceinline void trim_space() { GEN_NS trim_space(*this); }
forceinline String visualize_whitespace() const { return GEN_NS visualize_whitespace(*this); }
forceinline StringHeader& get_header() { return GEN_NS get_header(*this); }
bool append_fmt(char const* fmt, ...) {
ssize res;
char buf[GEN_PRINTF_MAXLEN] = { 0 };
va_list va;
va_start(va, fmt);
res = str_fmt_va(buf, count_of(buf) - 1, fmt, va) - 1;
va_end(va);
return GEN_NS append(*this, buf, res);
}
forceinline operator bool() { return Data != nullptr; }
forceinline operator char*() { return Data; }
forceinline operator char const*() const { return Data; }
forceinline operator StrC() const { return { length(), Data }; }
String const& operator=(String const& other) const {
if (this == &other)
return *this;
String* this_ = ccast(String*, this);
this_->Data = other.Data;
return *this;
}
forceinline char& operator[](ssize index) { return Data[index]; }
forceinline char const& operator[](ssize index) const { return Data[index]; }
forceinline char* begin() const { return Data; }
forceinline char* end() const { return Data + length(); }
#pragma endregion Member Mapping
#endif
};
inline
usize string_grow_formula(usize value) {
// Using a very aggressive growth formula to reduce time mem_copying with recursive calls to append in this library.
return 4 * value + 8;
}
static
String make( AllocatorInfo allocator, char const* str )
{
inline
String string_make(AllocatorInfo allocator, char const* str) {
ssize length = str ? str_len(str) : 0;
return make_length( allocator, str, length );
return string_make_length(allocator, str, length);
}
static
String make( AllocatorInfo allocator, StrC str )
{
return make_length( allocator, str.Ptr, str.Len );
inline
String string_make(AllocatorInfo allocator, StrC str) {
return string_make_length(allocator, str.Ptr, str.Len);
}
static
String make_reserve( AllocatorInfo allocator, ssize capacity );
inline
String string_fmt(AllocatorInfo allocator, char* buf, ssize buf_size, char const* fmt, ...) {
va_list va;
va_start(va, fmt);
str_fmt_va(buf, buf_size, fmt, va);
va_end(va);
static
String make_length( AllocatorInfo allocator, char const* str, ssize length );
return string_make(allocator, buf);
}
static
String fmt( AllocatorInfo allocator, char* buf, ssize buf_size, char const* fmt, ... );
static
String fmt_buf( AllocatorInfo allocator, char const* fmt, ... );
static
String join( AllocatorInfo allocator, char const** parts, ssize num_parts, char const* glue )
inline
String string_fmt_buf(AllocatorInfo allocator, char const* fmt, ...)
{
String result = make( allocator, "" );
local_persist thread_local
char buf[GEN_PRINTF_MAXLEN] = { 0 };
va_list va;
va_start(va, fmt);
str_fmt_va(buf, GEN_PRINTF_MAXLEN, fmt, va);
va_end(va);
return string_make(allocator, buf);
}
inline
String string_join(AllocatorInfo allocator, char const** parts, ssize num_parts, char const* glue)
{
String result = string_make(allocator, "");
for (ssize idx = 0; idx < num_parts; ++idx)
{
result.append( parts[ idx ] );
append(result, parts[idx]);
if (idx < num_parts - 1)
result.append( glue );
append(result, glue);
}
return result;
}
static
inline
bool append(String& str, char c) {
return append(str, &c, 1);
}
inline
bool append(String& str, char const* str_to_append) {
return append(str, str_to_append, str_len(str_to_append));
}
inline
bool append(String& str, char const* str_to_append, ssize append_length)
{
if (sptr(str_to_append) > 0)
{
ssize curr_len = length(str);
if (!make_space_for(str, str_to_append, append_length))
return false;
StringHeader& header = get_header(str);
mem_copy(str.Data + curr_len, str_to_append, append_length);
str.Data[curr_len + append_length] = '\0';
header.Length = curr_len + append_length;
}
return str_to_append != nullptr;
}
inline
bool append(String& str, StrC str_to_append) {
return append(str, str_to_append.Ptr, str_to_append.Len);
}
inline
bool append(String& str, const String other) {
return append(str, other.Data, length(other));
}
inline
bool are_equal(String lhs, String rhs)
{
if ( lhs.length() != rhs.length() )
if (length(lhs) != length(rhs))
return false;
for ( ssize idx = 0; idx < lhs.length(); ++idx )
for (ssize idx = 0; idx < length(lhs); ++idx)
if (lhs[idx] != rhs[idx])
return false;
return true;
}
static
inline
bool are_equal(String lhs, StrC rhs)
{
if ( lhs.length() != (rhs.Len) )
if (length(lhs) != (rhs.Len))
return false;
for ( ssize idx = 0; idx < lhs.length(); ++idx )
for (ssize idx = 0; idx < length(lhs); ++idx)
if (lhs[idx] != rhs[idx])
return false;
return true;
}
bool make_space_for( char const* str, ssize add_len );
bool append( char c )
{
return append( & c, 1 );
}
bool append( char const* str )
{
return append( str, str_len( str ) );
}
bool append( char const* str, ssize length )
{
if ( sptr(str) > 0 )
{
ssize curr_len = this->length();
if ( ! make_space_for( str, length ) )
return false;
Header& header = get_header();
mem_copy( Data + curr_len, str, length );
Data[ curr_len + length ] = '\0';
header.Length = curr_len + length;
}
return str != nullptr;
}
bool append( StrC str)
{
return append( str.Ptr, str.Len );
}
bool append( const String other )
{
return append( other.Data, other.length() );
}
bool append_fmt( char const* fmt, ... );
ssize avail_space() const
{
Header const&
header = * rcast( Header const*, Data - sizeof( Header ));
inline
ssize avail_space(String const& str) {
StringHeader const& header = *rcast(StringHeader const*, str.Data - sizeof(StringHeader));
return header.Capacity - header.Length;
}
char& back()
{
return Data[ length() - 1 ];
inline
char& back(String& str) {
return str.Data[length(str) - 1];
}
ssize capacity() const
inline
bool contains(String const& str, StrC substring)
{
Header const&
header = * rcast( Header const*, Data - sizeof( Header ));
StringHeader const& header = *rcast(StringHeader const*, str.Data - sizeof(StringHeader));
if (substring.Len > header.Length)
return false;
ssize main_len = header.Length;
ssize sub_len = substring.Len;
for (ssize idx = 0; idx <= main_len - sub_len; ++idx)
{
if (str_compare(str.Data + idx, substring.Ptr, sub_len) == 0)
return true;
}
return false;
}
inline
bool contains(String const& str, String const& substring)
{
StringHeader const& header = *rcast(StringHeader const*, str.Data - sizeof(StringHeader));
if (length(substring) > header.Length)
return false;
ssize main_len = header.Length;
ssize sub_len = length(substring);
for (ssize idx = 0; idx <= main_len - sub_len; ++idx)
{
if (str_compare(str.Data + idx, substring.Data, sub_len) == 0)
return true;
}
return false;
}
inline
ssize capacity(String const& str) {
StringHeader const& header = *rcast(StringHeader const*, str.Data - sizeof(StringHeader));
return header.Capacity;
}
void clear()
{
get_header().Length = 0;
inline
void clear(String& str) {
get_header(str).Length = 0;
}
String duplicate( AllocatorInfo allocator ) const
{
return make_length( allocator, Data, length() );
inline
String duplicate(String const& str, AllocatorInfo allocator) {
return string_make_length(allocator, str.Data, length(str));
}
void free()
{
if ( ! Data )
inline
void free(String& str) {
if (!str.Data)
return;
Header& header = get_header();
gen::free( header.Allocator, & header );
StringHeader& header = get_header(str);
GEN_NS free(header.Allocator, &header);
}
Header& get_header()
{
return *(Header*)(Data - sizeof(Header));
inline
StringHeader& get_header(String& str) {
return *(StringHeader*)(str.Data - sizeof(StringHeader));
}
ssize length() const
inline
ssize length(String const& str)
{
Header const&
header = * rcast( Header const*, Data - sizeof( Header ));
StringHeader const& header = *rcast(StringHeader const*, str.Data - sizeof(StringHeader));
return header.Length;
}
b32 starts_with( StrC substring ) const
inline
bool make_space_for(String& str, char const* to_append, ssize add_len)
{
if (substring.Len > length())
ssize available = avail_space(str);
if (available >= add_len) {
return true;
}
else
{
ssize new_len, old_size, new_size;
void* ptr;
void* new_ptr;
AllocatorInfo allocator = get_header(str).Allocator;
StringHeader* header = nullptr;
new_len = string_grow_formula(length(str) + add_len);
ptr = &get_header(str);
old_size = size_of(StringHeader) + length(str) + 1;
new_size = size_of(StringHeader) + new_len + 1;
new_ptr = resize(allocator, ptr, old_size, new_size);
if (new_ptr == nullptr)
return false;
b32 result = str_compare(Data, substring.Ptr, substring.Len ) == 0;
header = rcast(StringHeader*, new_ptr);
header->Allocator = allocator;
header->Capacity = new_len;
str.Data = rcast(char*, header + 1);
return true;
}
}
inline
b32 starts_with(String const& str, StrC substring) {
if (substring.Len > length(str))
return false;
b32 result = str_compare(str.Data, substring.Ptr, substring.Len) == 0;
return result;
}
b32 starts_with( String substring ) const
{
if (substring.length() > length())
inline
b32 starts_with(String const& str, String substring) {
if (length(substring) > length(str))
return false;
b32 result = str_compare(Data, substring, substring.length() - 1 ) == 0;
b32 result = str_compare(str.Data, substring.Data, length(substring) - 1) == 0;
return result;
}
void skip_line()
inline
void skip_line(String& str)
{
#define current (*scanner)
char* scanner = Data;
while ( current != '\r' && current != '\n' )
{
char* scanner = str.Data;
while (current != '\r' && current != '\n') {
++scanner;
}
s32 new_length = scanner - Data;
s32 new_length = scanner - str.Data;
if ( current == '\r' )
{
if (current == '\r') {
new_length += 1;
}
mem_move( Data, scanner, new_length );
mem_move(str.Data, scanner, new_length);
Header* header = & get_header();
StringHeader* header = &get_header(str);
header->Length = new_length;
#undef current
}
void strip_space()
inline
void strip_space(String& str)
{
char* write_pos = Data;
char* read_pos = Data;
char* write_pos = str.Data;
char* read_pos = str.Data;
while (*read_pos)
{
@ -268,15 +476,16 @@ struct String
write_pos[0] = '\0'; // Null-terminate the modified string
// Update the length if needed
get_header().Length = write_pos - Data;
get_header(str).Length = write_pos - str.Data;
}
void trim( char const* cut_set )
inline
void trim(String& str, char const* cut_set)
{
ssize len = 0;
char* start_pos = Data;
char* end_pos = Data + length() - 1;
char* start_pos = str.Data;
char* end_pos = str.Data + length(str) - 1;
while (start_pos <= end_pos && char_first_occurence(cut_set, *start_pos))
start_pos++;
@ -286,121 +495,55 @@ struct String
len = scast(ssize, (start_pos > end_pos) ? 0 : ((end_pos - start_pos) + 1));
if ( Data != start_pos )
mem_move( Data, start_pos, len );
if (str.Data != start_pos)
mem_move(str.Data, start_pos, len);
Data[ len ] = '\0';
str.Data[len] = '\0';
get_header().Length = len;
get_header(str).Length = len;
}
void trim_space()
{
return trim( " \t\r\n\v\f" );
inline
void trim_space(String& str) {
trim(str, " \t\r\n\v\f");
}
// Debug function that provides a copy of the string with whitespace characters visualized.
String visualize_whitespace() const
inline
String visualize_whitespace(String const& str)
{
Header* header = (Header*)(Data - sizeof(Header));
StringHeader* header = (StringHeader*)(str.Data - sizeof(StringHeader));
String result = string_make_reserve(header->Allocator, length(str) * 2); // Assume worst case for space requirements.
String result = make_reserve(header->Allocator, length() * 2); // Assume worst case for space requirements.
for ( char c : *this )
{
switch ( c )
for (char c : str) switch (c)
{
case ' ':
result.append( txt("·") );
append(result, txt("·"));
break;
case '\t':
result.append( txt("") );
append(result, txt(""));
break;
case '\n':
result.append( txt("") );
append(result, txt(""));
break;
case '\r':
result.append( txt("") );
append(result, txt(""));
break;
case '\v':
result.append( txt("") );
append(result, txt(""));
break;
case '\f':
result.append( txt("") );
append(result, txt(""));
break;
default:
result.append(c);
append(result, c);
break;
}
}
return result;
}
#pragma endregion String
// For-range support
char* begin() const
{
return Data;
}
char* end() const
{
Header const&
header = * rcast( Header const*, Data - sizeof( Header ));
return Data + header.Length;
}
operator bool()
{
return Data != nullptr;
}
operator char* ()
{
return Data;
}
operator char const* () const
{
return Data;
}
operator StrC() const
{
return { length(), Data };
}
// Used with cached strings
// Essentially makes the string a string view.
String const& operator = ( String const& other ) const
{
if ( this == & other )
return *this;
String*
this_ = ccast(String*, this);
this_->Data = other.Data;
return *this;
}
char& operator [] ( ssize index )
{
return Data[ index ];
}
char const& operator [] ( ssize index ) const
{
return Data[ index ];
}
char* Data;
};
struct String_POD
{
struct String_POD {
char* Data;
};
static_assert( sizeof( String_POD ) == sizeof( String ), "String is not a POD" );

View File

@ -11,9 +11,9 @@ using namespace gen;
CodeBody gen_ecode( char const* path )
{
char scratch_mem[kilobytes(1)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
file_read_contents( scratch, zero_terminate, path );
file_read_contents( allocator_info(scratch), zero_terminate, path );
CSV_Object csv_nodes;
csv_parse( &csv_nodes, scratch_mem, GlobalAllocator, false );
@ -57,9 +57,9 @@ CodeBody gen_ecode( char const* path )
CodeBody gen_eoperator( char const* path )
{
char scratch_mem[kilobytes(4)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
file_read_contents( scratch, zero_terminate, path );
file_read_contents( allocator_info(scratch), zero_terminate, path );
CSV_Object csv_nodes;
csv_parse( &csv_nodes, scratch_mem, GlobalAllocator, false );
@ -70,7 +70,7 @@ CodeBody gen_eoperator( char const* path )
String enum_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
String to_str_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
for (usize idx = 0; idx < enum_strs.num(); idx++)
for (usize idx = 0; idx < num(enum_strs); idx++)
{
char const* enum_str = enum_strs[idx].string;
char const* entry_to_str = str_strs [idx].string;
@ -113,9 +113,9 @@ CodeBody gen_eoperator( char const* path )
CodeBody gen_especifier( char const* path )
{
char scratch_mem[kilobytes(4)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
file_read_contents( scratch, zero_terminate, path );
file_read_contents( allocator_info(scratch), zero_terminate, path );
CSV_Object csv_nodes;
csv_parse( &csv_nodes, scratch_mem, GlobalAllocator, false );
@ -126,7 +126,7 @@ CodeBody gen_especifier( char const* path )
String enum_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
String to_str_entries = String::make_reserve( GlobalAllocator, kilobytes(1) );
for (usize idx = 0; idx < enum_strs.num(); idx++)
for (usize idx = 0; idx < num(enum_strs); idx++)
{
char const* enum_str = enum_strs[idx].string;
char const* entry_to_str = str_strs [idx].string;
@ -218,14 +218,16 @@ CodeBody gen_especifier( char const* path )
CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
{
char scratch_mem[kilobytes(16)];
Arena scratch = Arena::init_from_memory( scratch_mem, sizeof(scratch_mem) );
Arena scratch = arena_init_from_memory( scratch_mem, sizeof(scratch_mem) );
FileContents enum_content = file_read_contents( scratch, zero_terminate, etok_path );
AllocatorInfo scratch_info = allocator_info(scratch);
FileContents enum_content = file_read_contents( scratch_info, zero_terminate, etok_path );
CSV_Object csv_enum_nodes;
csv_parse( &csv_enum_nodes, rcast(char*, enum_content.data), GlobalAllocator, false );
FileContents attrib_content = file_read_contents( scratch, zero_terminate, attr_path );
FileContents attrib_content = file_read_contents( scratch_info, zero_terminate, attr_path );
CSV_Object csv_attr_nodes;
csv_parse( &csv_attr_nodes, rcast(char*, attrib_content.data), GlobalAllocator, false );
@ -241,7 +243,7 @@ CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
String to_str_attributes = String::make_reserve( GlobalAllocator, kilobytes(4) );
String attribute_define_entries = String::make_reserve( GlobalAllocator, kilobytes(4) );
for (usize idx = 0; idx < enum_strs.num(); idx++)
for (usize idx = 0; idx < num(enum_strs); idx++)
{
char const* enum_str = enum_strs[idx].string;
char const* entry_to_str = enum_str_strs [idx].string;
@ -250,7 +252,7 @@ CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
to_str_entries.append_fmt( "{ sizeof(\"%s\"), \"%s\" },\n", entry_to_str, entry_to_str);
}
for ( usize idx = 0; idx < attribute_strs.num(); idx++ )
for ( usize idx = 0; idx < num(attribute_strs); idx++ )
{
char const* attribute_str = attribute_strs[idx].string;
char const* entry_to_str = attribute_str_strs [idx].string;
@ -259,7 +261,7 @@ CodeBody gen_etoktype( char const* etok_path, char const* attr_path )
to_str_attributes.append_fmt( "{ sizeof(\"%s\"), \"%s\" },\n", entry_to_str, entry_to_str);
attribute_define_entries.append_fmt( "Entry( Attribute_%s, \"%s\" )", attribute_str, entry_to_str );
if ( idx < attribute_strs.num() - 1 )
if ( idx < num(attribute_strs) - 1 )
attribute_define_entries.append( " \\\n");
else
attribute_define_entries.append( "\n");

View File

@ -0,0 +1,10 @@
#pragma once
#include "gen.hpp"
GEN_NS_BEGIN
#include "dependencies/parsing.hpp"
GEN_NS_END
using namespace gen;

View File

@ -44,6 +44,7 @@ Push-Location $path_root
$verbose = $false
[bool] $bootstrap = $false
[bool] $singleheader = $false
[bool] $c_library = $false
[bool] $unreal = $false
[bool] $test = $false
@ -59,6 +60,7 @@ if ( $args ) { $args | ForEach-Object {
"debug" { $release = $false }
"bootstrap" { $bootstrap = $true }
"singleheader" { $singleheader = $true }
"c_library" { $c_library = $true }
"unreal" { $unreal = $true }
"test" { $test = $true }
}
@ -88,7 +90,7 @@ else {
$optimize = $true
}
if ( $bootstrap -eq $false -and $singleheader -eq $false -and $unreal -eq $false -and $test -eq $false ) {
if ( $bootstrap -eq $false -and $singleheader -eq $false -and $c_library -eq $false -and $unreal -eq $false -and $test -eq $false ) {
throw "No build target specified. One must be specified, this script will not assume one"
}
@ -103,8 +105,9 @@ write-host "Build Type: $(if ($release) {"Release"} else {"Debug"} )"
$path_build = Join-Path $path_root build
$path_project = Join-Path $path_root project
$path_scripts = Join-Path $path_root scripts
$path_singleheader = Join-Path $path_root singleheader
$path_unreal = Join-Path $path_root unreal_engine
$path_c_library = join-Path $path_root gen_c_library
$path_singleheader = Join-Path $path_root gen_singleheader
$path_unreal = Join-Path $path_root gen_unreal_engine
$path_test = Join-Path $path_root test
if ( $bootstrap )
@ -187,6 +190,44 @@ if ( $singleheader )
Pop-Location
}
if ( $c_library )
{
$path_build = join-path $path_c_library build
$path_gen = join-path $path_c_library gen
if ( -not(Test-Path($path_build) )) {
New-Item -ItemType Directory -Path $path_build
}
if ( -not(Test-Path($path_gen) )) {
New-Item -ItemType Directory -Path $path_gen
}
$includes = @( $path_project )
$unit = join-path $path_c_library "c_library.cpp"
$executable = join-path $path_build "c_library.exe"
$compiler_args = @()
$compiler_args += ( $flag_define + 'GEN_TIME' )
$linker_args = @(
$flag_link_win_subsystem_console
)
build-simple $path_build $includes $compiler_args $linker_args $unit $executable
Push-Location $path_c_library
if ( Test-Path( $executable ) ) {
write-host "`nRunning c_library generator"
$time_taken = Measure-Command { & $executable
| ForEach-Object {
write-host `t $_ -ForegroundColor Green
}
}
write-host "`nc_library generator completed in $($time_taken.TotalMilliseconds) ms"
}
Pop-Location
}
if ( $unreal )
{
$path_build = join-path $path_unreal build

View File

@ -0,0 +1,24 @@
__VERSION 1
// This is a example template to be used with the refactor program
// Use it to refactor the naming convention of this library to your own.
// Can be used as an aid to help use use your project's implementation if it fullfills the dependencies of this project.
// Example: Most likely have a memory and string library already, just rename the functions and make sure the args are the same.
// Program: https://github.com/Ed94/refactor
// NOTE: Due to the current limitations of the program, not every symbol in the library can be renamed.
// This is due to the program not actually parsing C/C++.
// not : Ignore
// include : #includes
// word : Alphanumeric or underscore
// namespace : Prefix search and replace (c-namspaces).
// regex : Unavailable in __VERSION 1.
// Precedence (highest to lowest):
// word, namespace, regex
// Gen Macro namespace
// namespace GEN_, new_namespace_
// TODO(Ed): This will be large as nearly all symbols will need to optionally support getting prefixed with gen_ or something else the user wants.

View File