Reduce cpp freatures usage of Array container.

Almost ready to be inter-operable with C
This commit is contained in:
2024-11-30 18:54:19 -05:00
parent cc245cc263
commit 6d04165b96
11 changed files with 235 additions and 133 deletions

View File

@ -14,12 +14,26 @@ template<class TType>
using TRemoveConst = typename RemoveConst<TType>::Type;
#pragma region Array
#if ! GEN_COMPILER_C
#define Array(Type) Array<Type>
// #define array_init(Type, ...) array_init <Type>(__VA_ARGS__)
// #define array_init_reserve(Type, ...) array_init_reserve<Type>(__VA_ARGS__)
#endif
struct ArrayHeader;
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
template<class Type> struct Array;
#else
template<class Type>
using Array = Type*;
#endif
usize array_grow_formula(ssize value);
template<class Type> Array<Type> array_init(AllocatorInfo allocator);
template<class Type> Array<Type> array_init_reserve(AllocatorInfo allocator, ssize capacity);
template<class Type> usize array_grow_formula(ssize value);
template<class Type> bool append(Array<Type>& array, Array<Type> other);
template<class Type> bool append(Array<Type>& array, Type value);
template<class Type> bool append(Array<Type>& array, Type* items, usize item_num);
@ -38,18 +52,22 @@ template<class Type> bool resize(Array<Type>& array, usize num);
template<class Type> bool set_capacity(Array<Type>& array, usize new_capacity);
template<class Type> ArrayHeader* get_header(Array<Type>& array);
template<class Type> forceinline Type* begin(Array<Type>& array) { return array; }
template<class Type> forceinline Type* end(Array<Type>& array) { return array + get_header(array)->Num; }
template<class Type> forceinline Type* next(Type* entry) { return entry + 1; }
struct ArrayHeader {
AllocatorInfo Allocator;
usize Capacity;
usize Num;
};
#if GEN_SUPPORT_CPP_MEMBER_FEATURES
template<class Type>
struct Array
{
Type* Data;
#if 1
#pragma region Member Mapping
forceinline static Array init(AllocatorInfo allocator) { return GEN_NS array_init<Type>(allocator); }
forceinline static Array init_reserve(AllocatorInfo allocator, ssize capacity) { return GEN_NS array_init_reserve<Type>(allocator, capacity); }
@ -78,12 +96,12 @@ struct Array
forceinline Type* begin() { return Data; }
forceinline Type* end() { return Data + get_header()->Num; }
#pragma endregion Member Mapping
#endif
};
#endif
template<class Type> inline
Array<Type> array_init(AllocatorInfo allocator) {
return array_init_reserve<Type>(allocator, array_grow_formula<Type>(0));
return array_init_reserve<Type>(allocator, array_grow_formula(0));
}
template<class Type> inline
@ -101,7 +119,6 @@ Array<Type> array_init_reserve(AllocatorInfo allocator, ssize capacity)
return {rcast(Type*, header + 1)};
}
template<class Type> inline
usize array_grow_formula(ssize value) {
return 2 * value + 8;
}
@ -123,7 +140,7 @@ bool append(Array<Type>& array, Type value)
header = get_header(array);
}
array.Data[header->Num] = value;
array[header->Num] = value;
header->Num++;
return true;
@ -166,7 +183,7 @@ bool append_at(Array<Type>& array, Type item, usize idx)
header = get_header(array);
}
Type* target = array.Data + idx;
Type* target = array + idx;
mem_move(target + 1, target, (header->Num - idx) * sizeof(Type));
header->Num++;
@ -205,7 +222,7 @@ bool append_at(Array<Type>& array, Type* items, usize item_num, usize idx)
template<class Type> inline
Type& back(Array<Type>& array) {
ArrayHeader* header = get_header(array);
return array.Data[header->Num - 1];
return array[header->Num - 1];
}
template<class Type> inline
@ -224,7 +241,7 @@ bool fill(Array<Type>& array, usize begin, usize end, Type value)
for (ssize idx = ssize(begin); idx < ssize(end); idx++)
{
array.Data[idx] = value;
array[idx] = value;
}
return true;
@ -234,20 +251,22 @@ template<class Type> inline
void free(Array<Type>& array) {
ArrayHeader* header = get_header(array);
gen::free(header->Allocator, header);
array.Data = nullptr;
Type*& Data = rcast(Type*&, array);
Data = nullptr;
}
template<class Type> inline
ArrayHeader* get_header(Array<Type>& array) {
using NonConstType = TRemoveConst<Type>;
return rcast(ArrayHeader*, const_cast<NonConstType*>(array.Data)) - 1;
Type* Data = array; // This should do nothing in C but in C++ gets member Data struct.
return rcast(ArrayHeader*, const_cast<NonConstType*>(Data)) - 1;
}
template<class Type> inline
bool grow(Array<Type>& array, usize min_capacity)
{
ArrayHeader* header = get_header(array);
usize new_capacity = array_grow_formula<Type>(header->Capacity);
usize new_capacity = array_grow_formula(header->Capacity);
if (new_capacity < min_capacity)
new_capacity = min_capacity;
@ -273,7 +292,7 @@ void remove_at(Array<Type>& array, usize idx)
ArrayHeader* header = get_header(array);
GEN_ASSERT(idx < header->Num);
mem_move(array.Data + idx, array.Data + idx + 1, sizeof(Type) * (header->Num - idx - 1));
mem_move(array + idx, array + idx + 1, sizeof(Type) * (header->Num - idx - 1));
header->Num--;
}
@ -329,7 +348,8 @@ bool set_capacity(Array<Type>& array, usize new_capacity)
GEN_NS free(header->Allocator, header);
array.Data = rcast(Type*, new_header + 1);
Type*& Data = rcast(Type*&, array);
Data = rcast(Type*, new_header + 1);
return true;
}
#pragma endregion Array
@ -371,11 +391,11 @@ template<class Type> bool full(HashTable<Type>& table);
template<class Type> void map(HashTable<Type>& table, void (*map_proc)(u64 key, Type value));
template<class Type> void map_mut(HashTable<Type>& table, void (*map_proc)(u64 key, Type* value));
static constexpr f32 HashTable_CriticalLoadScale = 0.7f;
template<typename Type>
struct HashTable
{
static constexpr f32 CriticalLoadScale = 0.7f;
Array<ssize> Hashes;
Array<HashTableEntry<Type>> Entries;
@ -411,26 +431,26 @@ HashTable<Type> hashtable_init_reserve(AllocatorInfo allocator, usize num)
{
HashTable<Type> result = { { nullptr }, { nullptr } };
result.Hashes = Array<ssize>::init_reserve(allocator, num);
result.Hashes.get_header()->Num = num;
result.Hashes.resize(num);
result.Hashes.fill(0, num, -1);
result.Hashes = array_init_reserve<ssize>(allocator, num);
get_header(result.Hashes)->Num = num;
resize(result.Hashes, num);
fill<ssize>(result.Hashes, 0, num, -1);
result.Entries = Array<HashTableEntry<Type>>::init_reserve(allocator, num);
result.Entries = array_init_reserve<HashTableEntry<Type>>(allocator, num);
return result;
}
template<typename Type> inline
void clear(HashTable<Type>& table) {
table.Entries.clear();
table.Hashes.fill(0, table.Hashes.num(), -1);
clear(table.Entries);
fill<ssize>(table.Hashes, 0, num(table.Hashes), -1);
}
template<typename Type> inline
void destroy(HashTable<Type>& table) {
if (table.Hashes && table.Hashes.get_header()->Capacity) {
table.Hashes.free();
table.Entries.free();
if (table.Hashes && get_header(table.Hashes)->Capacity) {
free(table.Hashes);
free(table.Entries);
}
}
@ -463,7 +483,7 @@ void map_mut(HashTable<Type>& table, void (*map_proc)(u64 key, Type* value)) {
template<typename Type> inline
void grow(HashTable<Type>& table) {
ssize new_num = Array<HashTableEntry<Type>>::grow_formula(table.Entries.num());
ssize new_num = array_grow_formula(num(table.Entries));
rehash(table, new_num);
}
@ -471,9 +491,9 @@ template<typename Type> inline
void rehash(HashTable<Type>& table, ssize new_num)
{
ssize last_added_index;
HashTable<Type> new_ht = hashtable_init_reserve<Type>(table.Hashes.get_header()->Allocator, new_num);
HashTable<Type> new_ht = hashtable_init_reserve<Type>(get_header(table.Hashes)->Allocator, new_num);
for (ssize idx = 0; idx < ssize(table.Entries.num()); ++idx)
for (ssize idx = 0; idx < ssize(num(table.Entries)); ++idx)
{
HashTableFindResult find_result;
HashTableEntry<Type>& entry = table.Entries[idx];
@ -580,8 +600,8 @@ ssize add_entry(HashTable<Type>& table, u64 key) {
ssize idx;
HashTableEntry<Type> entry = { key, -1 };
idx = table.Entries.num();
table.Entries.append(entry);
idx = num(table.Entries);
append(table.Entries, entry);
return idx;
}
@ -590,9 +610,9 @@ HashTableFindResult find(HashTable<Type>& table, u64 key)
{
HashTableFindResult result = { -1, -1, -1 };
if (table.Hashes.num() > 0)
if (num(table.Hashes) > 0)
{
result.HashIndex = key % table.Hashes.num();
result.HashIndex = key % num(table.Hashes);
result.EntryIndex = table.Hashes[result.HashIndex];
while (result.EntryIndex >= 0)
@ -610,8 +630,8 @@ HashTableFindResult find(HashTable<Type>& table, u64 key)
template<typename Type> inline
bool full(HashTable<Type>& table) {
usize critical_load = usize(HashTable<Type>::CriticalLoadScale * f32(table.Hashes.num()));
b32 result = table.Entries.num() > critical_load;
usize critical_load = usize(HashTable_CriticalLoadScale * f32(num(table.Hashes)));
b32 result = num(table.Entries) > critical_load;
return result;
}
#pragma endregion HashTable

View File

@ -505,7 +505,7 @@ b8 file_stream_new( FileInfo* file, AllocatorInfo allocator )
d->allocator = allocator;
d->flags = EFileStream_CLONE_WRITABLE;
d->cap = 0;
d->buf = Array<u8>::init( allocator );
d->buf = array_init<u8>( allocator );
if ( ! d->buf )
return false;
@ -531,7 +531,7 @@ b8 file_stream_open( FileInfo* file, AllocatorInfo allocator, u8* buffer, ssize
d->flags = flags;
if ( d->flags & EFileStream_CLONE_WRITABLE )
{
Array<u8> arr = Array<u8>::init_reserve( allocator, size );
Array<u8> arr = array_init_reserve<u8>( allocator, size );
d->buf = arr;
if ( ! d->buf )
@ -540,7 +540,7 @@ b8 file_stream_open( FileInfo* file, AllocatorInfo allocator, u8* buffer, ssize
mem_copy( d->buf, buffer, size );
d->cap = size;
arr.get_header()->Num = size;
get_header(arr)->Num = size;
}
else
{
@ -610,9 +610,9 @@ GEN_FILE_WRITE_AT_PROC( _memory_file_write )
{
Array<u8> arr = { d->buf };
if ( arr.get_header()->Capacity < usize(new_cap) )
if ( get_header(arr)->Capacity < usize(new_cap) )
{
if ( ! arr.grow( ( s64 )( new_cap ) ) )
if ( ! grow( arr, ( s64 )( new_cap ) ) )
return false;
d->buf = arr;
}
@ -626,7 +626,7 @@ GEN_FILE_WRITE_AT_PROC( _memory_file_write )
mem_copy( d->buf + offset + rwlen, pointer_add_const( buffer, rwlen ), extralen );
d->cap = new_cap;
arr.get_header()->Capacity = new_cap;
get_header(arr)->Capacity = new_cap;
}
else
{
@ -647,7 +647,7 @@ GEN_FILE_CLOSE_PROC( _memory_file_close )
if ( d->flags & EFileStream_CLONE_WRITABLE )
{
Array<u8> arr = { d->buf };
arr.free();
free(arr);
}
free( allocator, d );

View File

@ -187,7 +187,7 @@
#if !defined(typeof) && (!GEN_COMPILER_C || __STDC_VERSION__ < 202311L)
# if ! GEN_COMPILER_C
# define typeof
# define typeof decltype
# elif defined(_MSC_VER)
# define typeof(x) __typeof(x)
# elif defined(__GNUC__) || defined(__clang__)
@ -197,4 +197,12 @@
# endif
#endif
// This is intended to only really be used internally or with the C-library variant
// C++ users can just use the for-range directly.
#if GEN_COMPILER_C
# define foreach(Type, entry_id, iterable) for ( Type entry_id = begin(iterable); entry_id != end(iterable); entry_id = next(entry_id) )
#else
# define foreach(Type, entry_id, iterable) for ( Type entry_id : iterable )
#endif
#pragma endregion Macros

View File

@ -23,7 +23,7 @@ u8 adt_make_branch( ADT_Node* node, AllocatorInfo backing, char const* name, b32
node->type = type;
node->name = name;
node->parent = parent;
node->nodes = Array<ADT_Node>::init( backing );
node->nodes = array_init<ADT_Node>( backing );
if ( ! node->nodes )
return EADT_ERROR_OUT_OF_MEMORY;
@ -36,12 +36,12 @@ u8 adt_destroy_branch( ADT_Node* node )
GEN_ASSERT_NOT_NULL( node );
if ( ( node->type == EADT_TYPE_OBJECT || node->type == EADT_TYPE_ARRAY ) && node->nodes )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); ++i )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); ++i )
{
adt_destroy_branch( node->nodes + i );
}
node->nodes.free();
free(node->nodes);
}
return 0;
}
@ -66,7 +66,7 @@ ADT_Node* adt_find( ADT_Node* node, char const* name, b32 deep_search )
return NULL;
}
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
if ( ! str_compare( node->nodes[ i ].name, name ) )
{
@ -76,7 +76,7 @@ ADT_Node* adt_find( ADT_Node* node, char const* name, b32 deep_search )
if ( deep_search )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* res = adt_find( node->nodes + i, name, deep_search );
@ -132,7 +132,7 @@ internal ADT_Node* _adt_get_value( ADT_Node* node, char const* value )
internal ADT_Node* _adt_get_field( ADT_Node* node, char* name, char* value )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
if ( ! str_compare( node->nodes[ i ].name, name ) )
{
@ -207,7 +207,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
/* run a value comparison against any child that is an object node */
else if ( node->type == EADT_TYPE_ARRAY )
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* child = &node->nodes[ i ];
if ( child->type != EADT_TYPE_OBJECT )
@ -225,7 +225,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
/* [value] */
else
{
for ( ssize i = 0; i < scast(ssize, node->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(node->nodes)); i++ )
{
ADT_Node* child = &node->nodes[ i ];
if ( _adt_get_value( child, l_b2 ) )
@ -257,7 +257,7 @@ ADT_Node* adt_query( ADT_Node* node, char const* uri )
else
{
ssize idx = ( ssize )str_to_i64( buf, NULL, 10 );
if ( idx >= 0 && idx < scast(ssize, node->nodes.num()) )
if ( idx >= 0 && idx < scast(ssize, num(node->nodes)) )
{
found_node = &node->nodes[ idx ];
@ -282,12 +282,12 @@ ADT_Node* adt_alloc_at( ADT_Node* parent, ssize index )
if ( ! parent->nodes )
return NULL;
if ( index < 0 || index > scast(ssize, parent->nodes.num()) )
if ( index < 0 || index > scast(ssize, num(parent->nodes)) )
return NULL;
ADT_Node o = { 0 };
o.parent = parent;
if ( ! parent->nodes.append_at( o, index ) )
if ( ! append_at( parent->nodes, o, index ) )
return NULL;
return parent->nodes + index;
@ -303,7 +303,7 @@ ADT_Node* adt_alloc( ADT_Node* parent )
if ( ! parent->nodes )
return NULL;
return adt_alloc_at( parent, parent->nodes.num() );
return adt_alloc_at( parent, num(parent->nodes) );
}
b8 adt_set_obj( ADT_Node* obj, char const* name, AllocatorInfo backing )
@ -357,7 +357,7 @@ ADT_Node* adt_move_node( ADT_Node* node, ADT_Node* new_parent )
GEN_ASSERT_NOT_NULL( node );
GEN_ASSERT_NOT_NULL( new_parent );
GEN_ASSERT( new_parent->type == EADT_TYPE_ARRAY || new_parent->type == EADT_TYPE_OBJECT );
return adt_move_node_at( node, new_parent, new_parent->nodes.num() );
return adt_move_node_at( node, new_parent, num(new_parent->nodes) );
}
void adt_swap_nodes( ADT_Node* node, ADT_Node* other_node )
@ -381,7 +381,7 @@ void adt_remove_node( ADT_Node* node )
GEN_ASSERT_NOT_NULL( node->parent );
ADT_Node* parent = node->parent;
ssize index = ( pointer_diff( parent->nodes, node ) / size_of( ADT_Node ) );
parent->nodes.remove_at( index );
remove_at( parent->nodes, index );
}
ADT_Node* adt_append_obj( ADT_Node* parent, char const* name )
@ -389,7 +389,7 @@ ADT_Node* adt_append_obj( ADT_Node* parent, char const* name )
ADT_Node* o = adt_alloc( parent );
if ( ! o )
return NULL;
if ( adt_set_obj( o, name, parent->nodes.get_header()->Allocator ) )
if ( adt_set_obj( o, name, get_header(parent->nodes)->Allocator ) )
{
adt_remove_node( o );
return NULL;
@ -402,7 +402,7 @@ ADT_Node* adt_append_arr( ADT_Node* parent, char const* name )
ADT_Node* o = adt_alloc( parent );
if ( ! o )
return NULL;
if ( adt_set_arr( o, name, parent->nodes.get_header()->Allocator ) )
if ( adt_set_arr( o, name, get_header(parent->nodes)->Allocator ) )
{
adt_remove_node( o );
return NULL;
@ -946,12 +946,12 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
}
}
if ( columnIndex >= scast(ssize, root->nodes.num()) )
if ( columnIndex >= scast(ssize, num(root->nodes)) )
{
adt_append_arr( root, NULL );
}
root->nodes[ columnIndex ].nodes.append( rowItem );
append(root->nodes[ columnIndex ].nodes, rowItem );
if ( delimiter == delim )
{
@ -979,7 +979,7 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
}
while ( *currentChar );
if ( root->nodes.num() == 0 )
if (num( root->nodes) == 0 )
{
GEN_CSV_ASSERT( "unexpected end of input. stream is empty." );
error = ECSV_Error__UNEXPECTED_END_OF_INPUT;
@ -989,12 +989,12 @@ u8 csv_parse_delimiter( CSV_Object* root, char* text, AllocatorInfo allocator, b
/* consider first row as a header. */
if ( has_header )
{
for ( ssize i = 0; i < scast(ssize, root->nodes.num()); i++ )
for ( ssize i = 0; i < scast(ssize, num(root->nodes)); i++ )
{
CSV_Object* col = root->nodes + i;
CSV_Object* hdr = col->nodes;
col->name = hdr->string;
col->nodes.remove_at( 0 );
remove_at(col->nodes, 0 );
}
}
@ -1057,11 +1057,11 @@ void csv_write_delimiter( FileInfo* file, CSV_Object* obj, char delimiter )
GEN_ASSERT_NOT_NULL( file );
GEN_ASSERT_NOT_NULL( obj );
GEN_ASSERT( obj->nodes );
ssize cols = obj->nodes.num();
ssize cols = num(obj->nodes);
if ( cols == 0 )
return;
ssize rows = obj->nodes[ 0 ].nodes.num();
ssize rows = num(obj->nodes[ 0 ].nodes);
if ( rows == 0 )
return;

View File

@ -1,3 +1,5 @@
#define GEN_SUPPORT_CPP_MEMBER_FEATURES 1
#ifdef GEN_INTELLISENSE_DIRECTIVES
# pragma once
#endif