From 3b9fdf56a9202d793ece8a84e4dbe71c973b206c Mon Sep 17 00:00:00 2001 From: Ed_ Date: Fri, 27 Jun 2025 20:30:09 -0400 Subject: [PATCH] progress on odin ver, some adjustments to c version --- C/watl.v0.msvc.c | 62 ++++++----- Odin/watl.v0.odin | 252 +++++++++++++++++++++++++++++++++++++++----- scripts/build.c.ps1 | 4 +- watl_exercise.proj | 14 +++ watl_exercise.user | 154 +++++++++++++++++++++++++++ 5 files changed, 428 insertions(+), 58 deletions(-) create mode 100644 watl_exercise.proj create mode 100644 watl_exercise.user diff --git a/C/watl.v0.msvc.c b/C/watl.v0.msvc.c index 36609d4..55cfab1 100644 --- a/C/watl.v0.msvc.c +++ b/C/watl.v0.msvc.c @@ -445,7 +445,7 @@ typedef def_struct(KT1L_Meta) { SSIZE type_width; Str8 type_name; }; -SSIZE kt1l__populate_slice_a2(KT1L_Byte* kt, AllocatorInfo backing, KT1L_Meta m, Slice_Byte values, SSIZE num_values ); +void kt1l__populate_slice_a2(KT1L_Byte* kt, AllocatorInfo backing, KT1L_Meta m, Slice_Byte values, SSIZE num_values ); #define kt1l_populate_slice_a2(type, kt, ainfo, values) kt1l__populate_slice_a2( \ cast(KT1L_Byte*, kt), \ ainfo, \ @@ -513,11 +513,11 @@ typedef def_struct(KT1CX_Info) { AllocatorInfo backing_table; AllocatorInfo backing_cells; }; -void kt1cx__init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result); -void kt1cx__clear (KT1CX_Byte kt, KT1CX_ByteMeta meta); -U64 kt1cx__slot_id(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta); -Byte* kt1cx__get (KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta); -Byte* kt1cx__set (KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_cells, KT1CX_ByteMeta meta); +void kt1cx_init (KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result); +void kt1cx_clear (KT1CX_Byte kt, KT1CX_ByteMeta meta); +U64 kt1cx_slot_id(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta); +Byte* kt1cx_get (KT1CX_Byte kt, U64 key, KT1CX_ByteMeta meta); +Byte* kt1cx_set (KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_cells, KT1CX_ByteMeta meta); #define kt1cx_assert(kt) do { \ slice_assert(kt.cell_pool); \ @@ -1367,32 +1367,30 @@ void arena_allocator_proc(AllocatorProc_In in, AllocatorProc_Out* out) #pragma endregion Arena #pragma region Key Table 1-Layer Linear (KT1L) -SSIZE kt1l__populate_slice_a2(KT1L_Byte* kt, AllocatorInfo backing, KT1L_Meta info, Slice_Byte values, SSIZE num_values ) { +void kt1l__populate_slice_a2(KT1L_Byte* kt, AllocatorInfo backing, KT1L_Meta m, Slice_Byte values, SSIZE num_values ) { assert(kt != nullptr); - * kt = alloc_slice(backing, Byte, info.slot_size * num_values ); + if (num_values == 0) { return; } + * kt = alloc_slice(backing, Byte, m.slot_size * num_values ); slice_assert(* kt); - SSIZE num_bytes = 0; for (range_iter(SSIZE, iter, 0, <, num_values)) { - SSIZE slot_offset = iter.cursor * info.slot_size; // slot id - Byte* slot_cursor = & kt->ptr[slot_offset]; // slots[id] type: KT1L_ - U64* slot_key = (U64*)slot_cursor; // slots[id].key type: U64 - Slice_Byte slot_value = { slot_cursor + info.kt_value_offset, info.type_width }; // slots[id].value type: - SSIZE a2_offset = iter.cursor * info.type_width * 2; // a2 entry id - Byte* a2_cursor = & values.ptr[a2_offset]; // a2_entries[id] type: A2_ - Slice_Byte a2_key = * cast(Slice_Byte*, a2_cursor); // a2_entries[id].key type: - Slice_Byte a2_value = { a2_cursor + info.type_width, info.type_width }; // a2_entries[id].value type: - slice_copy(slot_value, a2_value); // slots[id].value = a2_entries[id].value - * slot_key = 0; hash64_djb8(slot_key, a2_key); // slots[id].key = hash64_djb8(a2_entries[id].key) - num_bytes += cast(Slice_Byte*, a2_value.ptr)->len; // num_bytes += a2_entries[id].value.len + SSIZE slot_offset = iter.cursor * m.slot_size; // slot id + Byte* slot_cursor = & kt->ptr[slot_offset]; // slots[id] type: KT1L_ + U64* slot_key = (U64*)slot_cursor; // slots[id].key type: U64 + Slice_Byte slot_value = { slot_cursor + m.kt_value_offset, m.type_width }; // slots[id].value type: + SSIZE a2_offset = iter.cursor * m.type_width * 2; // a2 entry id + Byte* a2_cursor = & values.ptr[a2_offset]; // a2_entries[id] type: A2_ + Slice_Byte a2_key = * cast(Slice_Byte*, a2_cursor); // a2_entries[id].key type: + Slice_Byte a2_value = { a2_cursor + m.type_width, m.type_width }; // a2_entries[id].value type: + slice_copy(slot_value, a2_value); // slots[id].value = a2_entries[id].value + * slot_key = 0; hash64_djb8(slot_key, a2_key); // slots[id].key = hash64_djb8(a2_entries[id].key) } kt->len = num_values; - return num_bytes; } #pragma endregion KT1l #pragma region Key Table 1-Layer Chained-Chunked_Cells (KT1CX) inline -void kt1cx__init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result) { +void kt1cx_init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result) { assert(result != nullptr); assert(info.backing_cells.proc != nullptr); assert(info.backing_table.proc != nullptr); @@ -1404,7 +1402,7 @@ void kt1cx__init(KT1CX_Info info, KT1CX_InfoMeta m, KT1CX_Byte* result) { result->cell_pool = mem_alloc(info.backing_cells, m.cell_size * m.cell_pool_size); result->table.len = m.table_size; // Setting to the table number of elements instead of byte length. } -void kt1cx__clear(KT1CX_Byte kt, KT1CX_ByteMeta m) { +void kt1cx_clear(KT1CX_Byte kt, KT1CX_ByteMeta m) { Byte* cursor = kt.table.ptr; SSIZE num_cells = kt.table.len; kt.table.len *= m.cell_size; // Temporarily convert length to byte size. @@ -1428,12 +1426,12 @@ void kt1cx__clear(KT1CX_Byte kt, KT1CX_ByteMeta m) { kt.table.len = num_cells; // Restore to type-based length. } inline -U64 kt1cx__slot_id(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta m) { +U64 kt1cx_slot_id(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta m) { U64 hash_index = key % cast(U64, kt.table.len); return hash_index; } Byte* kt1cx__get(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta m) { - U64 hash_index = kt1cx__slot_id(kt, key, m); + U64 hash_index = kt1cx_slot_id(kt, key, m); SSIZE cell_offset = hash_index * m.cell_size; Slice_Byte cell = { & kt.table.ptr[cell_offset], m.cell_size}; // KT1CX_Cell_ cell = kt.table[hash_index] { @@ -1459,8 +1457,8 @@ Byte* kt1cx__get(KT1CX_Byte kt, U64 key, KT1CX_ByteMeta m) { } } inline -Byte* kt1cx__set(KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_cells, KT1CX_ByteMeta m) { - U64 hash_index = kt1cx__slot_id(kt, key, m); +Byte* kt1cx_set(KT1CX_Byte kt, U64 key, Slice_Byte value, AllocatorInfo backing_cells, KT1CX_ByteMeta m) { + U64 hash_index = kt1cx_slot_id(kt, key, m); SSIZE cell_offset = hash_index * m.cell_size; Slice_Byte cell = { & kt.table.ptr[cell_offset], m.cell_size}; // KT1CX_Cell_ cell = kt.table[hash_index] { @@ -1664,8 +1662,8 @@ Str8 str8__fmt_kt1l(AllocatorInfo ainfo, Slice_Byte buffer, KT1L_Str8 table, Str } inline Str8 str8__fmt_backed(AllocatorInfo tbl_backing, AllocatorInfo buf_backing, Str8 fmt_template, Slice_A2_Str8* entries) { - KT1L_Str8 kt; SSIZE num_bytes = kt1l_populate_slice_a2(Str8, & kt, tbl_backing, *entries ); - SSIZE buf_size = fmt_template.len + num_bytes; buf_size = buf_size > kilo(16) ? buf_size : kilo(16); + KT1L_Str8 kt; kt1l_populate_slice_a2(Str8, & kt, tbl_backing, *entries ); + SSIZE buf_size = kilo(32); Slice_Byte buffer = mem_alloc(buf_backing, buf_size); Str8 result = str8__fmt_kt1l(buf_backing, buffer, kt, fmt_template); return result; @@ -1704,14 +1702,14 @@ void str8cache__init(Str8Cache* cache, Opts_str8cache_init* opts) { .type_width = size_of(Str8), .type_name = lit(stringify(Str8)) }; - kt1cx__init(info, m, cast(KT1CX_Byte*, & cache->kt)); + kt1cx_init(info, m, cast(KT1CX_Byte*, & cache->kt)); return; } inline Str8Cache str8cache__make(Opts_str8cache_init* opts) { Str8Cache cache; str8cache__init(& cache, opts); return cache; } inline void str8cache_clear(KT1CX_Str8 kt) { kt1cx_assert(kt); - kt1cx__clear(kt1cx_byte(kt), (KT1CX_ByteMeta){ + kt1cx_clear(kt1cx_byte(kt), (KT1CX_ByteMeta){ .slot_size = size_of(KT1CX_Slot_Str8), .slot_key_offset = offset_of(KT1CX_Slot_Str8, key), .cell_next_offset = offset_of(KT1CX_Cell_Str8, next), @@ -1742,7 +1740,7 @@ Str8* str8cache_set(KT1CX_Str8 kt, U64 key, Str8 value, AllocatorInfo str_reserv slice_assert(value); assert(str_reserve.proc != nullptr); assert(backing_cells.proc != nullptr); - Byte* entry = kt1cx__set(kt1cx_byte(kt), key, slice_byte(value), backing_cells, (KT1CX_ByteMeta){ + Byte* entry = kt1cx_set(kt1cx_byte(kt), key, slice_byte(value), backing_cells, (KT1CX_ByteMeta){ .slot_size = size_of(KT1CX_Slot_Str8), .slot_key_offset = offset_of(KT1CX_Slot_Str8, key), .cell_next_offset = offset_of(KT1CX_Cell_Str8, next), diff --git a/Odin/watl.v0.odin b/Odin/watl.v0.odin index 6368a8d..b7b2e47 100644 --- a/Odin/watl.v0.odin +++ b/Odin/watl.v0.odin @@ -28,6 +28,7 @@ alloc :: proc { copy :: proc { memory_copy, slice_copy, + string_copy, } copy_non_overlapping :: proc { memory_copy_non_overlapping, @@ -106,16 +107,15 @@ slice_assert :: proc (s: $SliceType / []$Type) { assert(len(s) > 0) assert(s != nil) } -slice_end :: proc "contextless" (s : $SliceType / []$Type) -> Type { - return s[len(s) - 1] +slice_end :: proc "contextless" (s : $SliceType / []$Type) -> ^Type { + return & s[len(s) - 1] } @(require_results) slice_to_bytes :: proc "contextless" (s: []$Type) -> []byte { return ([^]byte)(raw_data(s))[:len(s) * size_of(Type)] } -slice_zero :: proc "contextless" (data: $SliceType / []$Type) -> Type { +slice_zero :: proc "contextless" (data: $SliceType / []$Type) { zero(raw_data(data), size_of(Type) * len(data)) - return data } slice_copy :: proc "contextless" (dst, src: $SliceType / []$Type) -> int { n := max(0, min(len(dst), len(src))) @@ -326,6 +326,8 @@ Raw_String :: struct { data: [^]byte, len: int, } +string_copy :: proc(dst, src: string) { slice_copy (transmute([]byte) dst, transmute([]byte) src) } +string_assert :: proc(s: string) { slice_assert(transmute([]byte) s) } //#endregion("Strings") //#region("FArena") @@ -604,7 +606,8 @@ varena_push :: proc(va: ^VArena, $Type: typeid, amount: int, alignment: int = ME next_commit_size: int if reserve_left > 0 { next_commit_size = max(va.commit_size, to_be_used) - } else { + } + else { next_commit_size = align_pow2(reserve_left, os_system_info().target_page_size) } if next_commit_size > 0 @@ -912,25 +915,55 @@ arena_allocator_proc :: proc(input: AllocatorProc_In, output: ^AllocatorProc_Out //#endregion("Arena (Casey-Ryan Composite Arena)") //#region("Hashing") -hash64_djb8 :: proc() {} +hash64_djb8 :: proc(hash: ^u64, bytes: []byte) { + for elem in bytes { + // This hash is a 1:1 translation of the C version's hash. + hash^ = ((hash^ << 8) + hash^) + u64(elem) + } +} //#endregion("Hashing") //#region("Key Table 1-Layer Linear (KT1L)") -KT1L_Slot :: struct($type: typeid) { +KT1L_Slot :: struct($Type: typeid) { key: u64, - value: type + value: Type, } KT1L_Meta :: struct { - slot_size: int, - kt_value_offset: int, - type_width: int, - type_name: int, + slot_size: uintptr, + kt_value_offset: uintptr, + type_width: uintptr, + type_name: string, } -kt1l_populate_slice_a2_Slice_Byte :: proc(kt: ^[]KT1L_Slot(byte), m: KT1L_Meta, backing: AllocatorInfo, values: [][2]byte) -> int { - return 0 +kt1l_populate_slice_a2_Slice_Byte :: proc(kt: ^[]byte, backing: AllocatorInfo, values: []byte, num_values: int, m: KT1L_Meta) { + assert(kt != nil) + if num_values == 0 { return } + table_size_bytes := num_values * int(m.slot_size) + kt^ = mem_alloc(backing, table_size_bytes) + slice_assert(kt ^) + kt_raw : Raw_Slice = transmute(Raw_Slice) kt^ + for cursor in 0 ..< cast(uintptr) num_values { + slot_offset := cursor * m.slot_size + slot_cursor := uintptr(kt_raw.data) + slot_offset + slot_key := cast(^u64) slot_cursor + slot_value := transmute([]byte) Raw_Slice { cast([^]byte) (slot_cursor + m.kt_value_offset), int(m.type_width)} + a2_offset := cursor * m.type_width * 2 + a2_cursor := uintptr(& values[a2_offset]) + a2_key := (transmute(^[]byte) a2_cursor) ^ + a2_value := transmute([]byte) Raw_Slice { rawptr(a2_cursor + m.type_width), int(m.type_width) } + copy(slot_value, a2_value) + slot_key^ = 0; hash64_djb8(slot_key, a2_key) + } + kt_raw.len = num_values } -kt1l_populate_slice_a2 :: proc($Type: typeid, kt: ^[]KT1L_Slot(Type), backing: AllocatorInfo, values: [][2]Type) -> int { - return 0 +kt1l_populate_slice_a2 :: proc($Type: typeid, kt: ^[]KT1L_Slot(Type), backing: AllocatorInfo, values: [][2]Type) { + assert(kt != nil) + values_bytes := transmute([]byte) Raw_Slice{data = raw_data(values), len = len(values) * size_of([2]Type)} + kt1l_populate_slice_a2_Slice_Byte(transmute(^[]byte) kt, backing, values_bytes, len(values), { + slot_size = size_of(KT1L_Slot(Type)), + kt_value_offset = offset_of(KT1L_Slot(Type), KT1L_Slot(Type).value), + type_width = size_of(Type), + type_name = #type_string(Type), + }) } //#endregion("Key Table 1-Layer Linear (KT1L)") @@ -961,8 +994,8 @@ KT1CX_Byte :: struct { } KT1CX_ByteMeta :: struct { slot_size: int, - slot_key_offset: int, - cell_next_offset: int, + slot_key_offset: uintptr, + cell_next_offset: uintptr, cell_depth: int, cell_size: int, type_width: int, @@ -972,8 +1005,8 @@ KT1CX_InfoMeta :: struct { cell_pool_size: int, table_size: int, slot_size: int, - slot_key_offset: int, - cell_next_offset: int, + slot_key_offset: uintptr, + cell_next_offset: uintptr, cell_depth: int, cell_size: int, type_width: int, @@ -984,17 +1017,114 @@ KT1CX_Info :: struct { backing_cells: AllocatorInfo, } kt1cx_init :: proc(info: KT1CX_Info, m: KT1CX_InfoMeta, result: ^KT1CX_Byte) { + assert(result != nil) + assert(info.backing_cells.procedure != nil) + assert(info.backing_table.procedure != nil) + assert(m.cell_depth > 0) + assert(m.cell_pool_size >= 4 * Kilo) + assert(m.table_size >= 4 * Kilo) + assert(m.type_width > 0) + table_raw := transmute(Raw_Slice) mem_alloc(info.backing_table, m.table_size * m.cell_size) + result.cell_pool = mem_alloc(info.backing_cells, m.cell_pool_size * m.cell_size) + slice_assert(result.cell_pool) + table_raw.len = m.table_size + result.table = transmute([]byte) table_raw + slice_assert(result.table) } kt1cx_clear :: proc(kt: KT1CX_Byte, m: KT1CX_ByteMeta) { + cursor := cast(uintptr) raw_data(kt.table) + num_cells := len(kt.table) + table_len := len(kt.table) * m.cell_size + for ; cursor != cast(uintptr) end(kt.table); cursor += cast(uintptr) m.cell_size + { + cell := Raw_Slice { rawptr(cursor), m.cell_size } + slots := Raw_Slice { cell.data, m.cell_depth * m.slot_size } + slot_cursor := uintptr(slots.data) + for;; { defer{slot_cursor += uintptr(m.slot_size)} { + slot := transmute([]byte) Raw_Slice { rawptr(slot_cursor), m.slot_size } + zero(slot) + if slot_cursor == cast(uintptr) end(transmute([]byte) slots) { + next := cast(rawptr) (slot_cursor + uintptr(m.cell_next_offset)) + if next != nil { + slots.data = next + slot_cursor = uintptr(next) + continue + } + } + }} + } } kt1cx_slot_id :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> u64 { - return 0 + hash_index := key % u64(len(kt.table)) + return hash_index } kt1cx_get :: proc(kt: KT1CX_Byte, key: u64, m: KT1CX_ByteMeta) -> ^byte { - return nil + hash_index := kt1cx_slot_id(kt, key, m) + cell_offset := uintptr(hash_index) * uintptr(m.cell_size) + cell := Raw_Slice {& kt.table[cell_offset], m.cell_size} + { + slots := Raw_Slice {cell.data, m.cell_depth * m.slot_size} + slot_cursor := uintptr(slots.data) + for;; { defer{slot_cursor += uintptr(m.slot_size)} + { + slot := transmute(^KT1CX_Byte_Slot) (slot_cursor + m.slot_key_offset) + if slot.occupied && slot.key == key { + return cast(^byte) slot_cursor + } + if slot_cursor == cast(uintptr) end(transmute([]byte) slots) + { + cell_next := cast(rawptr) (uintptr(cell.data) + m.cell_next_offset) + if cell_next != nil { + slots.data = cell_next + slot_cursor = uintptr(cell_next) + cell.data = cell_next + continue + } + else { + return nil + } + } + }} + } } kt1cx_set :: proc(kt: KT1CX_Byte, key: u64, value: []byte, backing_cells: AllocatorInfo, m: KT1CX_ByteMeta) -> ^byte { - return nil + hash_index := kt1cx_slot_id(kt, key, m) + cell_offset := uintptr(hash_index) * uintptr(m.cell_size) + cell := Raw_Slice{& kt.table[cell_offset], m.cell_size} + { + slots := Raw_Slice {cell.data, m.cell_depth * m.slot_size} + slot_cursor := uintptr(slots.data) + for ;; { defer{slot_cursor += uintptr(m.slot_size)} + { + slot := transmute(^KT1CX_Byte_Slot) rawptr(slot_cursor + m.slot_key_offset) + if slot.occupied == false { + slot.occupied = true + slot.key = key + return cast(^byte) slot_cursor + } + else if slot.key == key { + return cast(^byte) slot_cursor + } + if slot_cursor == uintptr(end(transmute([]byte) slots)) { + curr_cell := transmute(^KT1CX_Byte_Cell) (uintptr(cell.data) + m.cell_next_offset) + if curr_cell != nil { + slots.data = curr_cell.next + slot_cursor = uintptr(curr_cell.next) + cell.data = curr_cell.next + continue + } + else { + new_cell := mem_alloc(backing_cells, m.cell_size) + curr_cell.next = raw_data(new_cell) + slot = transmute(^KT1CX_Byte_Slot) rawptr(uintptr(raw_data(new_cell)) + m.slot_key_offset) + slot.occupied = true + slot.key = key + return raw_data(new_cell) + } + } + }} + return nil + } } kt1cx_assert :: proc(kt: $type / KT1CX) { slice_assert(kt.cell_pool) @@ -1013,9 +1143,83 @@ integer_symbols :: proc(value: u8) -> u8 { } str8_to_cstr_capped :: proc(content: string, mem: []byte) -> cstring { - return nil + copy_len := min(len(content), len(mem) - 1) + if copy_len > 0 { + copy(mem[:copy_len], transmute([]byte) content) + } + mem[copy_len] = 0 + return transmute(cstring) raw_data(mem) } str8_from_u32 :: proc(ainfo: AllocatorInfo, num: u32, radix: u32 = 10, min_digits: u8 = 0, digit_group_separator: u8 = 0) -> string { + prefix: string + switch radix { + case 16: prefix = "0x" + case 8: prefix = "0o" + case 2: prefix = "0b" + } + digit_group_size: u32 = 3 + switch radix { + case 2, 8, 16: + digit_group_size = 4 + } + needed_digits: u32 = 1 + if num > 0 + { + needed_digits = 0 + temp_num := num + for temp_num > 0 { + temp_num /= radix + needed_digits += 1 + } + } + needed_leading_zeros: u32 + if u32(min_digits) > needed_digits { + needed_leading_zeros = u32(min_digits) - needed_digits + } + total_digits := needed_digits + needed_leading_zeros + needed_separators: u32 + if digit_group_separator != 0 && total_digits > digit_group_size { + needed_separators = (total_digits - 1) / digit_group_size + } + total_len := len(prefix) + int(total_digits + needed_separators) + result_bytes := mem_alloc(ainfo, total_len) + if len(result_bytes) == 0 { return "" } + result := transmute(string) result_bytes + if len(prefix) > 0 { + copy(result, prefix) + } + // Fill content from right to left + write_cursor := total_len - 1 + num_reduce := num + for idx in 0.. 0 && idx % digit_group_size == 0 && digit_group_separator != 0 { + result_bytes[write_cursor] = digit_group_separator + write_cursor -= 1 + } + + if idx < needed_digits { + result_bytes[write_cursor] = char_to_lower(integer_symbols(u8(num_reduce % radix))) + num_reduce /= radix + } + else { + result_bytes[write_cursor] = '0' + } + write_cursor -= 1 + } + return result +} + +str8_fmt_kt1l :: proc(ainfo: AllocatorInfo, buffer: []byte, table: []KT1L_Slot(string), fmt_template: string) -> string { + slice_assert(buffer) + slice_assert(table) + string_assert(fmt_template) + if ainfo.procedure != nil { + assert(.Grow in allocator_query(ainfo).features) + } + cursor_buffer := uintptr(raw_data(buffer)) + buffer_remaining := len(buffer) + return {} } diff --git a/scripts/build.c.ps1 b/scripts/build.c.ps1 index 96c00d3..165f4b8 100644 --- a/scripts/build.c.ps1 +++ b/scripts/build.c.ps1 @@ -107,9 +107,9 @@ $compiler_args += $flag_full_src_path $compiler_args += $flag_asm_listing_file # $compiler_args += $flag_optimize_speed_max -$compiler_args += $flag_optimize_size +# $compiler_args += $flag_optimize_size # $compiler_args += $flag_optimize_intrinsics -# $compiler_args += $flag_no_optimization +$compiler_args += $flag_no_optimization # Debug setup $compiler_args += ($flag_define + 'BUILD_DEBUG') diff --git a/watl_exercise.proj b/watl_exercise.proj new file mode 100644 index 0000000..1295b03 --- /dev/null +++ b/watl_exercise.proj @@ -0,0 +1,14 @@ +// raddbg 0.9.18 project file + +recent_file: path: "c/watl.v0.msvc.c" +target: +{ + executable: "build/watl.v0.msvc.exe" + working_directory: C + enabled: 1 +} +breakpoint: +{ + source_location: "c/watl.v0.msvc.c:2227:1" + hit_count: 0 +} diff --git a/watl_exercise.user b/watl_exercise.user new file mode 100644 index 0000000..95d765b --- /dev/null +++ b/watl_exercise.user @@ -0,0 +1,154 @@ +// raddbg 0.9.18 user file + +recent_project: path: "watl_exercise.proj" +window: +{ + size: 2048.000000 1152.000000 + pos: 234 234 + monitor: "\\\\.\\DISPLAY1" + maximized + panels: + { + 0.683614: selected getting_started text: + { + selected + expression: "file:\"C:/projects/WATL_Exercise/c/watl.v0.msvc.c\".data" + auto: 1 + query: input: "" + cursor_line: 2227 + cursor_column: 1 + mark_line: 2227 + mark_column: 1 + } + 0.316386: + { + watch: expression: "query:targets" + watch: + { + selected + expression: "" + watch: "listing.ptr.text()" + } + } + } +} +keybindings: +{ + { kill_all f5 shift } + { step_into_inst f11 alt } + { step_over_inst f10 alt } + { step_out f11 shift } + { halt x ctrl shift } + { halt pause } + { run f5 } + { restart f5 ctrl shift } + { step_into f11 } + { step_over f10 } + { run_to_line f10 ctrl } + { set_next_statement f10 ctrl shift } + { inc_window_font_size equal alt } + { dec_window_font_size minus alt } + { window n ctrl shift } + { toggle_fullscreen return ctrl } + { new_panel_right p ctrl } + { new_panel_down minus ctrl } + { rotate_panel_columns 2 ctrl } + { next_panel comma ctrl } + { prev_panel comma ctrl shift } + { focus_panel_right right ctrl alt } + { focus_panel_left left ctrl alt } + { focus_panel_up up ctrl alt } + { focus_panel_down down ctrl alt } + { undo z ctrl } + { redo y ctrl } + { go_back left alt } + { go_forward right alt } + { close_panel p ctrl shift alt } + { next_tab page_down ctrl } + { prev_tab page_up ctrl } + { next_tab tab ctrl } + { prev_tab tab ctrl shift } + { move_tab_right page_down ctrl shift } + { move_tab_left page_up ctrl shift } + { close_tab w ctrl } + { tab_bar_top up ctrl shift alt } + { tab_bar_bottom down ctrl shift alt } + { open_tab t ctrl } + { open o ctrl } + { switch i ctrl } + { switch_to_partner_file o alt } + { open_user n ctrl shift alt } + { open_project n ctrl alt } + { open_user o ctrl shift alt } + { open_project o ctrl alt } + { save_user s ctrl shift alt } + { save_project s ctrl shift } + { edit f2 } + { accept return } + { accept space } + { cancel esc } + { move_left left } + { move_right right } + { move_up up } + { move_down down } + { move_left_select left shift } + { move_right_select right shift } + { move_up_select up shift } + { move_down_select down shift } + { move_left_chunk left ctrl } + { move_right_chunk right ctrl } + { move_up_chunk up ctrl } + { move_down_chunk down ctrl } + { move_up_page page_up } + { move_down_page page_down } + { move_up_whole home ctrl } + { move_down_whole end ctrl } + { move_left_chunk_select left ctrl shift } + { move_right_chunk_select right ctrl shift } + { move_up_chunk_select up ctrl shift } + { move_down_chunk_select down ctrl shift } + { move_up_page_select page_up shift } + { move_down_page_select page_down shift } + { move_up_whole_select home ctrl shift } + { move_down_whole_select end ctrl shift } + { move_up_reorder up alt } + { move_down_reorder down alt } + { move_home home } + { move_end end } + { move_home_select home shift } + { move_end_select end shift } + { select_all a ctrl } + { delete_single delete } + { delete_chunk delete ctrl } + { backspace_single backspace } + { backspace_chunk backspace ctrl } + { copy c ctrl } + { copy insert ctrl } + { cut x ctrl } + { paste v ctrl } + { paste insert shift } + { insert_text null } + { move_next tab } + { move_prev tab shift } + { goto_line g ctrl } + { goto_address g alt } + { search f ctrl } + { search_backwards r ctrl } + { find_next f3 } + { find_prev f3 ctrl } + { find_selected_thread f4 } + { goto_name j ctrl } + { goto_name_at_cursor f12 } + { toggle_watch_expr_at_cursor w alt } + { toggle_watch_expr_at_mouse d ctrl } + { toggle_watch_pin f9 ctrl } + { toggle_breakpoint f9 } + { add_address_breakpoint f9 shift } + { add_function_breakpoint f9 ctrl shift } + { attach f6 shift } + { open_palette f1 } + { open_palette p ctrl shift } + { log_marker m ctrl shift alt } + { toggle_dev_menu d ctrl shift alt } +} +current_path: "C:/projects/WATL_Exercise/build"