mirror of
https://github.com/Ed94/WATL_Exercise.git
synced 2025-11-02 23:06:17 -08:00
hash64_djb8 -> hash64_fnv1a; kt1l -> ktl and made the populate procedure only for strings + misc changes
For this exercise that doens't need to be generic, kt1cx does the job.
This commit is contained in:
162
C/watl.v0.msvc.c
162
C/watl.v0.msvc.c
@@ -147,32 +147,37 @@ def_struct(tmpl(Slice,type)) { \
|
||||
|
||||
typedef def_Slice(void);
|
||||
typedef def_Slice(Byte);
|
||||
#define slice_byte(slice) ((Slice_Byte){cast(Byte*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
|
||||
#define slice_fmem(mem) ((Slice_Byte){ mem, size_of(mem) })
|
||||
#define slice(type, data, len) ((tmpl(Slice,type)){ data, len })
|
||||
#define slice_fmem(mem) ((Slice_Byte){ mem, size_of(mem) })
|
||||
#define slice_to_bytes(slice) ((Slice_Byte){cast(Byte*, (slice).ptr), (slice).len * size_of_slice_type(slice)})
|
||||
|
||||
void slice__copy(Slice_Byte dest, SSIZE dest_typewidth, Slice_Byte src, SSIZE src_typewidth);
|
||||
void slice__zero(Slice_Byte mem, SSIZE typewidth);
|
||||
#define slice_copy(dest, src) do { \
|
||||
static_assert(typeof_same(dest, src)); \
|
||||
slice__copy(slice_byte(dest), size_of_slice_type(dest), slice_byte(src), size_of_slice_type(src)); \
|
||||
slice__copy(slice_to_bytes(dest), size_of_slice_type(dest), slice_to_bytes(src), size_of_slice_type(src)); \
|
||||
} while (0)
|
||||
#define slice_zero(slice) slice__zero(slice_byte(slice), size_of_slice_type(slice))
|
||||
#define slice_zero(slice) slice__zero(slice_to_bytes(slice), size_of_slice_type(slice))
|
||||
|
||||
#define slice_iter(container, iter) \
|
||||
( \
|
||||
typeof((container).ptr) iter = (container).ptr; \
|
||||
iter != slice_end(container); \
|
||||
++ iter
|
||||
++ iter \
|
||||
)
|
||||
#define slice_arg_from_array(type, ...) & (tmpl(Slice,type)) { \
|
||||
.ptr = farray_init(type, __VA_ARGS__), \
|
||||
.len = farray_len( farray_init(type, __VA_ARGS__)) \
|
||||
}
|
||||
|
||||
#define span_iter(type, iter, m_begin, op, m_end) \
|
||||
tmpl(Iter_Span,type) iter = { \
|
||||
.r = {(m_begin), (m_end)}, \
|
||||
.cursor = (m_begin) }; \
|
||||
iter.cursor op iter.r.end; \
|
||||
++ iter.cursor
|
||||
( \
|
||||
tmpl(Iter_Span,type) iter = { \
|
||||
.r = {(m_begin), (m_end)}, \
|
||||
.cursor = (m_begin) }; \
|
||||
iter.cursor op iter.r.end; \
|
||||
++ iter.cursor \
|
||||
)
|
||||
|
||||
#define def_span(type) \
|
||||
def_struct(tmpl( Span,type)) { type begin; type end; }; \
|
||||
@@ -180,6 +185,7 @@ void slice__zero(Slice_Byte mem, SSIZE typewidth);
|
||||
|
||||
typedef def_span(S32);
|
||||
typedef def_span(U32);
|
||||
typedef def_span(U64);
|
||||
typedef def_span(SSIZE);
|
||||
#pragma endregion Memory
|
||||
|
||||
@@ -241,8 +247,6 @@ struct AllocatorProc_Out {
|
||||
SSIZE left; // Contiguous memory left
|
||||
SSIZE max_alloc;
|
||||
SSIZE min_alloc;
|
||||
B32 continuity_break; // Whether this allocation broke continuity with the previous (address space wise)
|
||||
byte_pad(4);
|
||||
};
|
||||
typedef def_struct(AllocatorInfo) {
|
||||
AllocatorProc* proc;
|
||||
@@ -255,8 +259,6 @@ typedef def_struct(AllocatorQueryInfo) {
|
||||
SSIZE left; // Contiguous memory left
|
||||
SSIZE max_alloc;
|
||||
SSIZE min_alloc;
|
||||
B32 continuity_break; // Whether this allocation broke continuity with the previous (address space wise)
|
||||
byte_pad(4);
|
||||
};
|
||||
static_assert(size_of(AllocatorProc_Out) == size_of(AllocatorQueryInfo));
|
||||
|
||||
@@ -270,9 +272,9 @@ void mem_rewind (AllocatorInfo ainfo, AllocatorSP save_point);
|
||||
AllocatorSP mem_save_point(AllocatorInfo ainfo);
|
||||
|
||||
typedef def_struct(Opts_mem_alloc) { SSIZE alignment; B32 no_zero; byte_pad(4); };
|
||||
typedef def_struct(Opts_mem_grow) { SSIZE alignment; B32 no_zero; byte_pad(4); };
|
||||
typedef def_struct(Opts_mem_grow) { SSIZE alignment; B32 no_zero; B32 give_actual; };
|
||||
typedef def_struct(Opts_mem_shrink) { SSIZE alignment; };
|
||||
typedef def_struct(Opts_mem_resize) { SSIZE alignment; B32 no_zero; byte_pad(4); };
|
||||
typedef def_struct(Opts_mem_resize) { SSIZE alignment; B32 no_zero; B32 give_actual; };
|
||||
|
||||
Slice_Byte mem__alloc (AllocatorInfo ainfo, SSIZE size, Opts_mem_alloc* opts);
|
||||
Slice_Byte mem__grow (AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_grow* opts);
|
||||
@@ -284,8 +286,8 @@ Slice_Byte mem__shrink(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem
|
||||
#define mem_resize(ainfo, mem, size, ...) mem__resize(ainfo, mem, size, opt_args(Opts_mem_resize, __VA_ARGS__))
|
||||
#define mem_shrink(ainfo, mem, size, ...) mem__shrink(ainfo, mem, size, opt_args(Opts_mem_shrink, __VA_ARGS__))
|
||||
|
||||
#define alloc_type(ainfo, type, ...) (type*) mem__alloc(ainfo, size_of(type), opt_args(Opts_mem_alloc, __VA_ARGS__)).ptr
|
||||
#define alloc_slice(ainfo, type, num, ...) (tmpl(Slice,type)){ mem__alloc(ainfo, size_of(type) * num, opt_args(Opts_mem_alloc, __VA_ARGS__)).ptr, num }
|
||||
#define alloc_type(ainfo, type, ...) (type*) mem__alloc(ainfo, size_of(type), opt_args(Opts_mem_alloc, __VA_ARGS__)).ptr
|
||||
#define alloc_slice(ainfo, type, num, ...) (tmpl(Slice,type)){ (type*) mem__alloc(ainfo, size_of(type) * num, opt_args(Opts_mem_alloc, __VA_ARGS__)).ptr, num }
|
||||
#pragma endregion Allocator Interface
|
||||
|
||||
#pragma region FArena (Fixed-Sized Arena)
|
||||
@@ -412,44 +414,49 @@ cast(type*, arena__push(arena, 1, size_of(type), opt_args(Opts_arena, lit(string
|
||||
#pragma endregion Arena
|
||||
|
||||
#pragma region Hashing
|
||||
typedef def_struct(Opts_hash64_fnv1a) { U64 seed; };
|
||||
inline
|
||||
void hash64_djb8(U64* hash, Slice_Byte bytes) {
|
||||
for (U8 const* elem = bytes.ptr; elem != (bytes.ptr + bytes.len); ++ elem) {
|
||||
*hash = (((*hash) << 8) + (*hash)) + (*elem);
|
||||
}
|
||||
void hash64__fnv1a(U64* hash, Slice_Byte data, Opts_hash64_fnv1a* opts) {
|
||||
local_persist U64 const default_seed = 0xcbf29ce484222325;
|
||||
assert(opts != nullptr); if (opts->seed == 0) opts->seed = default_seed;
|
||||
* hash = opts->seed; for span_iter(U64, id, 0, <, data.len) { * hash = ((* hash) ^ data.ptr[id.cursor]) * 0x100000001b3; }
|
||||
}
|
||||
#define hash64_fnv1a(hash, data, ...) hash64__fnv1a(hash, data, opt_args(Opts_hash64_fnv1a, __VA_ARGS__))
|
||||
#pragma endregion Hashing
|
||||
|
||||
#pragma region Key Table 1-Layer Linear (KT1L)
|
||||
#define def_KT1L_Slot(type) \
|
||||
def_struct(tmpl(KT1L_Slot,type)) { \
|
||||
#pragma region Key Table Linear (KTL)
|
||||
#define def_KTL_Slot(type) \
|
||||
def_struct(tmpl(KTL_Slot,type)) { \
|
||||
U64 key; \
|
||||
type value; \
|
||||
}
|
||||
#define def_KT1L(type) \
|
||||
def_Slice(tmpl(KT1L_Slot,type)); \
|
||||
typedef tmpl(Slice_KT1L_Slot,type) tmpl(KT1L,type)
|
||||
#define def_KTL(type) \
|
||||
def_Slice(tmpl(KTL_Slot,type)); \
|
||||
typedef tmpl(Slice_KTL_Slot,type) tmpl(KTL,type)
|
||||
|
||||
typedef Slice_Byte KT1L_Byte;
|
||||
typedef def_struct(KT1L_Meta) {
|
||||
typedef Slice_Byte KTL_Byte;
|
||||
typedef def_struct(KTL_Meta) {
|
||||
SSIZE slot_size;
|
||||
SSIZE kt_value_offset;
|
||||
SSIZE type_width;
|
||||
Str8 type_name;
|
||||
};
|
||||
void kt1l__populate_slice_a2(KT1L_Byte* kt, AllocatorInfo backing, KT1L_Meta m, Slice_Byte values, SSIZE num_values );
|
||||
#define kt1l_populate_slice_a2(type, kt, ainfo, values) kt1l__populate_slice_a2( \
|
||||
cast(KT1L_Byte*, kt), \
|
||||
ainfo, \
|
||||
(KT1L_Meta){ \
|
||||
.slot_size = size_of(tmpl(KT1L_Slot,type)), \
|
||||
.kt_value_offset = offset_of(tmpl(KT1L_Slot,type), value), \
|
||||
.type_width = size_of(type), \
|
||||
.type_name = lit(stringify(type)) \
|
||||
}, \
|
||||
slice_byte(values), (values).len \
|
||||
)
|
||||
#pragma endregion KT1L
|
||||
|
||||
typedef def_farray(Str8, 2);
|
||||
typedef def_Slice(A2_Str8);
|
||||
typedef def_KTL_Slot(Str8);
|
||||
typedef def_KTL(Str8);
|
||||
inline
|
||||
void ktl_populate_slice_a2_str8(KTL_Str8* kt, AllocatorInfo backing, Slice_A2_Str8 values) {
|
||||
assert(kt != nullptr);
|
||||
if (values.len == 0) return;
|
||||
* kt = alloc_slice(backing, KTL_Slot_Str8, values.len);
|
||||
for span_iter(SSIZE, id, 0, <, values.len) {
|
||||
memory_copy(& kt->ptr[id.cursor].value, & values.ptr[id.cursor][1], size_of(Str8));
|
||||
hash64_fnv1a(& kt->ptr[id.cursor].key, slice_to_bytes(values.ptr[id.cursor][0]));
|
||||
}
|
||||
}
|
||||
#pragma endregion KTL
|
||||
|
||||
#pragma region Key Table 1-Layer Chained-Chunked-Cells (KT1CX)
|
||||
#define def_KT1CX_Slot(type) \
|
||||
@@ -525,11 +532,6 @@ inline U8 integer_symbols(U8 value) {
|
||||
char* str8_to_cstr_capped(Str8 content, Slice_Byte mem);
|
||||
Str8 str8_from_u32(AllocatorInfo ainfo, U32 num, U32 radix, U8 min_digits, U8 digit_group_separator);
|
||||
|
||||
typedef def_farray(Str8, 2);
|
||||
typedef def_Slice(A2_Str8);
|
||||
typedef def_KT1L_Slot(Str8);
|
||||
typedef def_KT1L(Str8);
|
||||
|
||||
Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8* entries);
|
||||
#define str8_fmt_backed(tbl_backing, buf_backing, fmt_template, ...) \
|
||||
str8__fmt_backed(tbl_backing, buf_backing, lit(fmt_template), slice_arg_from_array(A2_Str8, __VA_ARGS__))
|
||||
@@ -780,7 +782,7 @@ Slice_Byte mem__grow(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_g
|
||||
};
|
||||
AllocatorProc_Out out;
|
||||
ainfo.proc(in, & out);
|
||||
return out.allocation;
|
||||
return (Slice_Byte){out.allocation.ptr, opts->give_actual ? out.allocation.len : in.requested_size };
|
||||
}
|
||||
inline
|
||||
Slice_Byte mem__resize(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_resize* opts) {
|
||||
@@ -795,7 +797,7 @@ Slice_Byte mem__resize(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem
|
||||
};
|
||||
AllocatorProc_Out out;
|
||||
ainfo.proc(in, & out);
|
||||
return out.allocation;
|
||||
return (Slice_Byte){out.allocation.ptr, opts->give_actual ? out.allocation.len : in.requested_size };
|
||||
}
|
||||
inline
|
||||
Slice_Byte mem__shrink(AllocatorInfo ainfo, Slice_Byte mem, SSIZE size, Opts_mem_shrink* opts) {
|
||||
@@ -1295,7 +1297,6 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
||||
{
|
||||
active->pos += aligned_grow;
|
||||
out->allocation = (Slice_Byte){in.old_allocation.ptr, in.requested_size};
|
||||
out->continuity_break = false;
|
||||
memory_zero(in.old_allocation.ptr + in.old_allocation.len, grow_amount * (cast(SSIZE, in.op) - AllocatorOp_Grow_NoZero));
|
||||
break;
|
||||
}
|
||||
@@ -1309,7 +1310,6 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
||||
memory_copy(new_alloc.ptr, in.old_allocation.ptr, in.old_allocation.len);
|
||||
memory_zero(new_alloc.ptr + in.old_allocation.len, (in.requested_size - in.old_allocation.len) * (cast(SSIZE, in.op) - AllocatorOp_Grow_NoZero) );
|
||||
out->allocation = new_alloc;
|
||||
out->continuity_break = true;
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -1354,28 +1354,6 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out)
|
||||
}
|
||||
#pragma endregion Arena
|
||||
|
||||
#pragma region Key Table 1-Layer Linear (KT1L)
|
||||
void kt1l__populate_slice_a2(KT1L_Byte*restrict kt, AllocatorInfo backing, KT1L_Meta m, Slice_Byte values, SSIZE num_values ) {
|
||||
assert(kt != nullptr);
|
||||
if (num_values == 0) { return; }
|
||||
* kt = alloc_slice(backing, Byte, m.slot_size * num_values );
|
||||
slice_assert(* kt);
|
||||
for (span_iter(SSIZE, iter, 0, <, num_values)) {
|
||||
SSIZE slot_offset = iter.cursor * m.slot_size; // slot id
|
||||
Byte*restrict slot_cursor = & kt->ptr[slot_offset]; // slots[id] type: KT1L_<Type>
|
||||
U64*restrict slot_key = (U64*restrict)slot_cursor; // slots[id].key type: U64
|
||||
Slice_Byte slot_value = { slot_cursor + m.kt_value_offset, m.type_width }; // slots[id].value type: <Type>
|
||||
SSIZE a2_offset = iter.cursor * m.type_width * 2; // a2 entry id
|
||||
Byte*restrict a2_cursor = & values.ptr[a2_offset]; // a2_entries[id] type: A2_<Type>
|
||||
Slice_Byte a2_key = * cast(Slice_Byte*restrict, a2_cursor); // a2_entries[id].key type: <Type>
|
||||
Slice_Byte a2_value = { a2_cursor + m.type_width, m.type_width }; // a2_entries[id].value type: <Type>
|
||||
slice_copy(slot_value, a2_value); // slots[id].value = a2_entries[id].value
|
||||
* slot_key = 0; hash64_djb8(slot_key, a2_key); // slots[id].key = hash64_djb8(a2_entries[id].key)
|
||||
}
|
||||
kt->len = num_values;
|
||||
}
|
||||
#pragma endregion KT1L
|
||||
|
||||
#pragma region Key Table 1-Layer Chained-Chunked_Cells (KT1CX)
|
||||
inline
|
||||
void kt1cx_init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result) {
|
||||
@@ -1564,7 +1542,7 @@ Str8 str8_from_u32(AllocatorInfo ainfo, U32 num, U32 radix, U8 min_digits, U8 di
|
||||
}
|
||||
return result;
|
||||
}
|
||||
Str8 str8__fmt_kt1l(AllocatorInfo ainfo, Slice_Byte* _buffer, KT1L_Str8 table, Str8 fmt_template)
|
||||
Str8 str8__fmt_ktl(AllocatorInfo ainfo, Slice_Byte* _buffer, KTL_Str8 table, Str8 fmt_template)
|
||||
{
|
||||
assert(_buffer != nullptr);
|
||||
Slice_Byte buffer = *_buffer;
|
||||
@@ -1604,9 +1582,9 @@ Str8 str8__fmt_kt1l(AllocatorInfo ainfo, Slice_Byte* _buffer, KT1L_Str8 table, S
|
||||
}
|
||||
if (fmt_overflow) continue;
|
||||
// Hashing the potential token and cross checking it with our token table
|
||||
U64 key = 0; hash64_djb8(& key, (Slice_Byte){ cast(Byte*, potential_token_cursor), potential_token_len});
|
||||
U64 key = 0; hash64_fnv1a(& key, slice(Byte, cast(Byte*, potential_token_cursor), potential_token_len));
|
||||
Str8* value = nullptr;
|
||||
for (slice_iter(table, token)) {
|
||||
for slice_iter(table, token) {
|
||||
// We do a linear iteration instead of a hash table lookup because the user should be never substiuting with more than 100 unqiue tokens..
|
||||
if (token->key == key) {
|
||||
value = & token->value;
|
||||
@@ -1644,17 +1622,17 @@ Str8 str8__fmt_kt1l(AllocatorInfo ainfo, Slice_Byte* _buffer, KT1L_Str8 table, S
|
||||
}
|
||||
inline
|
||||
Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8* entries) {
|
||||
KT1L_Str8 kt; kt1l_populate_slice_a2(Str8, & kt, tbl_backing, *entries );
|
||||
SSIZE buf_size = kilo(64);
|
||||
Slice_Byte buffer = mem_alloc(buf_backing, buf_size);
|
||||
Str8 result = str8__fmt_kt1l(buf_backing, & buffer, kt, fmt_template);
|
||||
KTL_Str8 kt; ktl_populate_slice_a2_str8(& kt, tbl_backing, *entries );
|
||||
SSIZE buf_size = kilo(64);
|
||||
Slice_Byte buffer = mem_alloc(buf_backing, buf_size);
|
||||
Str8 result = str8__fmt_ktl(buf_backing, & buffer, kt, fmt_template);
|
||||
return result;
|
||||
}
|
||||
Str8 str8__fmt(Str8 fmt_template, Slice_A2_Str8* entries) {
|
||||
local_persist Byte tbl_mem[kilo(32)]; FArena tbl_arena = farena_make(slice_fmem(tbl_mem));
|
||||
local_persist Byte buf_mem[kilo(64)];
|
||||
KT1L_Str8 kt = {0}; kt1l_populate_slice_a2(Str8, & kt, ainfo_farena(tbl_arena), *entries );
|
||||
Str8 result = str8__fmt_kt1l((AllocatorInfo){0}, & slice_fmem(buf_mem), kt, fmt_template);
|
||||
KTL_Str8 kt = {0}; ktl_populate_slice_a2_str8(& kt, ainfo_farena(tbl_arena), *entries );
|
||||
Str8 result = str8__fmt_ktl((AllocatorInfo){0}, & slice_fmem(buf_mem), kt, fmt_template);
|
||||
return result;
|
||||
}
|
||||
inline
|
||||
@@ -1722,7 +1700,7 @@ Str8* str8cache_set(KT1CX_Str8 kt, U64 key, Str8 value, AllocatorInfo str_reserv
|
||||
slice_assert(value);
|
||||
assert(str_reserve.proc != nullptr);
|
||||
assert(backing_cells.proc != nullptr);
|
||||
Byte* entry = kt1cx_set(kt1cx_byte(kt), key, slice_byte(value), backing_cells, (KT1CX_ByteMeta){
|
||||
Byte* entry = kt1cx_set(kt1cx_byte(kt), key, slice_to_bytes(value), backing_cells, (KT1CX_ByteMeta){
|
||||
.slot_size = size_of(KT1CX_Slot_Str8),
|
||||
.slot_key_offset = offset_of(KT1CX_Slot_Str8, key),
|
||||
.cell_next_offset = offset_of(KT1CX_Cell_Str8, next),
|
||||
@@ -1743,7 +1721,7 @@ Str8* str8cache_set(KT1CX_Str8 kt, U64 key, Str8 value, AllocatorInfo str_reserv
|
||||
inline
|
||||
Str8 cache_str8(Str8Cache* cache, Str8 str) {
|
||||
assert(cache != nullptr);
|
||||
U64 key = 0; hash64_djb8(& key, slice_byte(str));
|
||||
U64 key = 0; hash64_fnv1a(& key, slice_to_bytes(str));
|
||||
Str8* result = str8cache_set(cache->kt, key, str, cache->str_reserve, cache->cell_reserve);
|
||||
return * result;
|
||||
}
|
||||
@@ -1762,14 +1740,14 @@ void str8gen_append_str8(Str8Gen* gen, Str8 str){
|
||||
Slice_Byte result = mem_grow(gen->backing, str8gen_slice_byte(* gen), str.len + gen->len);
|
||||
slice_assert(result);
|
||||
Slice_Byte to_copy = { result.ptr + gen->len, result.len - gen->len };
|
||||
slice_copy(to_copy, slice_byte(str));
|
||||
slice_copy(to_copy, slice_to_bytes(str));
|
||||
gen->ptr = cast(UTF8*, result.ptr);
|
||||
gen->len += str.len;
|
||||
gen->cap = max(gen->len , gen->cap); // TODO(Ed): Arenas currently hide total capacity before growth. Problably better todo classic append to actually track this.
|
||||
gen->cap = result.len;
|
||||
}
|
||||
void str8gen__append_fmt(Str8Gen* gen, Str8 fmt_template, Slice_A2_Str8* entries){
|
||||
local_persist Byte tbl_mem[kilo(32)]; FArena tbl_arena = farena_make(slice_fmem(tbl_mem));
|
||||
KT1L_Str8 kt = {0}; kt1l_populate_slice_a2(Str8, & kt, ainfo_farena(tbl_arena), *entries );
|
||||
KTL_Str8 kt = {0}; ktl_populate_slice_a2_str8(& kt, ainfo_farena(tbl_arena), *entries );
|
||||
Slice_Byte buffer = { gen->ptr + gen->len, gen->cap - gen->len };
|
||||
if (buffer.len < kilo(16)) {
|
||||
Slice_Byte result = mem_grow(gen->backing, str8gen_slice_byte(* gen), kilo(16) + gen->cap );
|
||||
@@ -1778,7 +1756,7 @@ void str8gen__append_fmt(Str8Gen* gen, Str8 fmt_template, Slice_A2_Str8* entries
|
||||
gen->cap += kilo(16);
|
||||
buffer = (Slice_Byte){ cast(Byte*, gen->ptr + gen->len), gen->cap - gen->len };
|
||||
}
|
||||
Str8 result = str8__fmt_kt1l(gen->backing, & buffer, kt, fmt_template);
|
||||
Str8 result = str8__fmt_ktl(gen->backing, & buffer, kt, fmt_template);
|
||||
gen->len += result.len;
|
||||
}
|
||||
#pragma endregion String Operations
|
||||
@@ -2076,7 +2054,7 @@ void api_watl_parse(WATL_ParseInfo* info, Slice_WATL_Tok tokens, Opts_watl_parse
|
||||
* curr = (WATL_Node){0};
|
||||
* line = (WATL_Line){ curr, 0 };
|
||||
info->lines = (Slice_WATL_Line){ line, 0 };
|
||||
for (slice_iter(tokens, token))
|
||||
for slice_iter(tokens, token)
|
||||
{
|
||||
switch(* token->ptr)
|
||||
{
|
||||
@@ -2129,7 +2107,7 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines)
|
||||
|
||||
Str8Gen result = str8gen_make(buffer);
|
||||
U32 line_num = 0;
|
||||
for (slice_iter(lines, line))
|
||||
for slice_iter(lines, line)
|
||||
{
|
||||
#define fmt_entry(label, value) { lit(label), value }
|
||||
++ line_num;
|
||||
@@ -2139,7 +2117,7 @@ Str8 watl_dump_listing(AllocatorInfo buffer, Slice_WATL_Line lines)
|
||||
, fmt_entry("line_num", str_line_num)
|
||||
, fmt_entry("chunk_num", str_chunk_num)
|
||||
);
|
||||
for (slice_iter(* line, chunk))
|
||||
for slice_iter(* line, chunk)
|
||||
{
|
||||
Str8 id;
|
||||
switch (* chunk->ptr)
|
||||
|
||||
Reference in New Issue
Block a user