This commit is contained in:
2026-02-21 13:11:54 -05:00
parent 0a78cd8405
commit 6953e6b9b3

View File

@@ -7,7 +7,8 @@
X(Data, "Data", 0x0094BAA1, "$") \ X(Data, "Data", 0x0094BAA1, "$") \
X(Imm, "Imm", 0x004AA4C2, "^") \ X(Imm, "Imm", 0x004AA4C2, "^") \
X(Comment, "Comment", 0x00AAAAAA, ".") \ X(Comment, "Comment", 0x00AAAAAA, ".") \
X(Format, "Format", 0x003A2F3B, " ") X(Format, "Format", 0x003A2F3B, " ") \
X(Lambda, "Lambda", 0x00D675A4, "%")
typedef Enum_(U4, STag) { typedef Enum_(U4, STag) {
#define X(n, s, c, p) tmpl(STag, n), #define X(n, s, c, p) tmpl(STag, n),
@@ -137,6 +138,7 @@ WinAPI B4 ms_close_handle(void* hObject) asm("CloseHandle");
#define PRIM_DUP 11 #define PRIM_DUP 11
#define PRIM_DROP 12 #define PRIM_DROP 12
#define PRIM_SUB 13 #define PRIM_SUB 13
#define PRIM_EXECUTE 14
global const char* prim_names[] = { global const char* prim_names[] = {
"", "",
@@ -152,7 +154,8 @@ global const char* prim_names[] = {
"RET_IF_S", "RET_IF_S",
"DUP ", "DUP ",
"DROP ", "DROP ",
"SUB " "SUB ",
"EXECUTE "
}; };
internal U4 resolve_name_to_index(const char* ref_name); internal U4 resolve_name_to_index(const char* ref_name);
@@ -222,6 +225,12 @@ internal void emit32(U4 val) {
code_arena.used += 4; code_arena.used += 4;
} }
} }
internal void emit64(U8 val) {
if (code_arena.used + 8 <= code_arena.capacity) {
u8_r(code_arena.start+ code_arena.used)[0] = val;
code_arena.used += 8;
}
}
internal void pad32(void) { internal void pad32(void) {
while ((code_arena.used % 4) != 0) emit8(0x90); while ((code_arena.used % 4) != 0) emit8(0x90);
@@ -274,6 +283,172 @@ internal void relink_tape(void) {
} }
} }
#pragma region x64 Emission DSL
// ===================================================================================================================
// x64 Emission DSL
// Follows the same bit-field composition pattern as the GP command macros.
// ===================================================================================================================
// --- REX Prefix Composition ---
// REX byte: 0100 W R X B
// W = 64-bit operand width
// R = extends ModRM.reg field to reach R8-R15
// X = extends SIB.index field to reach R8-R15
// B = extends ModRM.r/m field to reach R8-R15
#define x64_rex_offset_W 3
#define x64_rex_offset_R 2
#define x64_rex_offset_X 1
#define x64_rex_offset_B 0
#define x64_rex_base 0x40
#define x64_rex_W (0x1 << x64_rex_offset_W) // 64-bit operand size
#define x64_rex_R (0x1 << x64_rex_offset_R) // Extend reg field
#define x64_rex_X (0x1 << x64_rex_offset_X) // Extend index field
#define x64_rex_B (0x1 << x64_rex_offset_B) // Extend r/m field
#define x64_rex(flags) (x64_rex_base | (flags))
#define x64_REX x64_rex(x64_rex_W) // 0x48 - 64-bit, standard regs
#define x64_REX_R x64_rex(x64_rex_W | x64_rex_R) // 0x4C - 64-bit, extended reg field
#define x64_REX_B x64_rex(x64_rex_W | x64_rex_B) // 0x49 - 64-bit, extended r/m field
#define x64_REX_RB x64_rex(x64_rex_W | x64_rex_R | x64_rex_B) // 0x4D
// --- Register Encoding ---
// These are the 3-bit register IDs used in ModRM and SIB fields.
#define x64_reg_RAX 0x0 // 000
#define x64_reg_RCX 0x1 // 001
#define x64_reg_RDX 0x2 // 010
#define x64_reg_RBX 0x3 // 011
#define x64_reg_RSP 0x4 // 100 (also: SIB follows when in r/m with Mod != 11)
#define x64_reg_RBP 0x5 // 101 (also: disp32 no base when Mod = 00)
#define x64_reg_RSI 0x6 // 110
#define x64_reg_RDI 0x7 // 111
// Extended registers (require REX.R or REX.B)
#define x64_reg_R8 0x0 // 000 + REX.R/B
#define x64_reg_R9 0x1 // 001 + REX.R/B
#define x64_reg_R10 0x2 // 010 + REX.R/B
#define x64_reg_R11 0x3 // 011 + REX.R/B
// --- ModRM Composition ---
// ModRM byte: [Mod:2][Reg:3][R/M:3]
// Mod=11 -> both operands are registers (no memory)
// Mod=00 -> r/m is a memory address, no displacement
// Mod=01 -> r/m is a memory address + 8-bit displacement
// Mod=10 -> r/m is a memory address + 32-bit displacement
#define x64_mod_mem 0x0 // 00 - memory, no displacement
#define x64_mod_mem_disp8 0x1 // 01 - memory + 8-bit displacement
#define x64_mod_mem_disp32 0x2 // 10 - memory + 32-bit displacement
#define x64_mod_reg 0x3 // 11 - register direct (no memory)
#define x64_modrm_offset_mod 6
#define x64_modrm_offset_reg 3
#define x64_modrm_offset_rm 0
#define x64_modrm(mod, reg, rm) \
(((mod) << x64_modrm_offset_mod) | ((reg) << x64_modrm_offset_reg) | ((rm) << x64_modrm_offset_rm))
// Register-to-register ModRM shortcuts (Mod=11, the common case)
#define x64_modrm_rr(reg, rm) x64_modrm(x64_mod_reg, reg, rm)
// Commonly used ModRM bytes in this runtime (reg direct)
#define x64_modrm_RAX_RAX x64_modrm_rr(x64_reg_RAX, x64_reg_RAX) // 0xC0
#define x64_modrm_RAX_RCX x64_modrm_rr(x64_reg_RAX, x64_reg_RCX) // 0xC1
#define x64_modrm_RAX_RDX x64_modrm_rr(x64_reg_RAX, x64_reg_RDX) // 0xC2
#define x64_modrm_RAX_RBX x64_modrm_rr(x64_reg_RAX, x64_reg_RBX) // 0xC3
#define x64_modrm_RCX_RAX x64_modrm_rr(x64_reg_RCX, x64_reg_RAX) // 0xC8
#define x64_modrm_RCX_RBX x64_modrm_rr(x64_reg_RCX, x64_reg_RBX) // 0xCB
#define x64_modrm_RDX_RAX x64_modrm_rr(x64_reg_RDX, x64_reg_RAX) // 0xD0
// Memory + disp8 ModRM shortcuts (Mod=01)
#define x64_modrm_RAX_mem_disp8_RBX x64_modrm(x64_mod_mem_disp8, x64_reg_RAX, x64_reg_RBX) // 0x43
#define x64_modrm_RDX_mem_disp8_RBX x64_modrm(x64_mod_mem_disp8, x64_reg_RDX, x64_reg_RBX) // 0x53
// SIB-addressed ModRM (Mod=00, R/M=RSP signals SIB follows)
#define x64_modrm_RAX_sib x64_modrm(x64_mod_mem, x64_reg_RAX, x64_reg_RSP) // 0x04
#define x64_modrm_RDX_sib x64_modrm(x64_mod_mem, x64_reg_RDX, x64_reg_RSP) // 0x14
// --- SIB Composition ---
// SIB byte: [Scale:2][Index:3][Base:3]
// Scale: 00=*1, 01=*2, 10=*4, 11=*8
#define x64_sib_scale_1 0x0 // 00
#define x64_sib_scale_2 0x1 // 01
#define x64_sib_scale_4 0x2 // 10
#define x64_sib_scale_8 0x3 // 11
#define x64_sib_offset_scale 6
#define x64_sib_offset_index 3
#define x64_sib_offset_base 0
#define x64_sib(scale, index, base) \
(((scale) << x64_sib_offset_scale) | ((index) << x64_sib_offset_index) | ((base) << x64_sib_offset_base))
// Tape drive SIB: [rbx + rax*8]
// Scale=8, Index=RAX, Base=RBX
#define x64_sib_tape x64_sib(x64_sib_scale_8, x64_reg_RAX, x64_reg_RBX) // 0xC3
// --- Opcodes ---
#define x64_op_MOV_rm_reg 0x89 // mov r/m, reg (store: reg -> memory or register)
#define x64_op_MOV_reg_rm 0x8B // mov reg, r/m (load: memory or register -> reg)
#define x64_op_MOV_rm_imm32 0xC7 // mov r/m, imm32 (sign-extended to 64-bit)
#define x64_op_MOV_rax_imm64 0xB8 // mov rax, imm64 (register baked into opcode)
#define x64_op_MOV_r10_imm64 0xBA // mov r10, imm64 (B8 + r10_id=2, needs REX.B)
#define x64_op_XCHG_rm_reg 0x87 // xchg r/m, reg
#define x64_op_ADD_rm_reg 0x01 // add r/m, reg
#define x64_op_SUB_rm_reg 0x29 // sub r/m, reg
#define x64_op_IMUL_reg_rm 0x0F // imul prefix (followed by 0xAF)
#define x64_op_IMUL_reg_rm2 0xAF // imul reg, r/m (second byte)
#define x64_op_TEST_rm_reg 0x85 // test r/m, reg (sets ZF and SF)
#define x64_op_UNARY 0xFF // inc/dec/call-indirect (Reg field = opcode extension)
#define x64_op_ARITH_imm8 0x83 // add/sub/etc with sign-extended 8-bit immediate (Reg = extension)
#define x64_op_ARITH_imm32 0x81 // add/sub/etc with 32-bit immediate (Reg = extension)
// Opcode extensions (used as the Reg field of ModRM with 0xFF and 0x83)
#define x64_ext_INC 0x0 // /0
#define x64_ext_DEC 0x1 // /1
#define x64_ext_CALL 0x2 // /2 (used with 0xFF for indirect call)
#define x64_ext_ADD 0x0 // /0 (used with 0x83/0x81)
#define x64_ext_SUB 0x5 // /5 (used with 0x83/0x81)
#define x64_op_CALL_rel32 0xE8 // call rel32
#define x64_op_JMP_rel32 0xE9 // jmp rel32
#define x64_op_JNZ_rel8 0x75 // jnz rel8 (jump if Zero Flag not set)
#define x64_op_JNS_rel8 0x79 // jns rel8 (jump if Sign Flag not set)
#define x64_op_RET 0xC3 // ret
#define x64_op_NOP 0x90 // nop (used for padding to 32-bit alignment)
// Push/Pop (opcode encodes register directly, no ModRM)
#define x64_op_PUSH_RBX 0x53 // push rbx (50 + rbx_id=3)
#define x64_op_POP_RBX 0x5B // pop rbx (58 + rbx_id=3)
#define x64_op_PUSH_RDX 0x52 // push rdx (50 + rdx_id=2)
#define x64_op_POP_RDX 0x5A // pop rdx (58 + rdx_id=2)
// --- Composite Instruction Macros ---
// Each maps directly to the emit8/emit32/emit64 calls in compile_action.
// Stack Machine Operations
#define x64_XCHG_RAX_RDX() do { emit8(x64_REX); emit8(x64_op_XCHG_rm_reg); emit8(x64_modrm_RAX_RDX); } while(0)
#define x64_MOV_RDX_RAX() do { emit8(x64_REX); emit8(x64_op_MOV_rm_reg); emit8(x64_modrm_RAX_RDX); } while(0) // DUP
#define x64_MOV_RAX_RDX() do { emit8(x64_REX); emit8(x64_op_MOV_rm_reg); emit8(x64_modrm_RDX_RAX); } while(0) // DROP
// Arithmetic (2-register stack: op RAX with RDX, result in RAX)
#define x64_ADD_RAX_RDX() do { emit8(x64_REX); emit8(x64_op_ADD_rm_reg); emit8(x64_modrm_RAX_RDX); } while(0)
#define x64_SUB_RAX_RDX() do { emit8(x64_REX); emit8(x64_op_SUB_rm_reg); emit8(x64_modrm_RAX_RDX); } while(0)
#define x64_IMUL_RAX_RDX() do { emit8(x64_REX); emit8(x64_op_IMUL_reg_rm); emit8(x64_op_IMUL_reg_rm2); emit8(x64_modrm_RAX_RDX); } while(0)
#define x64_DEC_RAX() do { emit8(x64_REX); emit8(x64_op_UNARY); emit8(x64_modrm(x64_mod_reg, x64_ext_DEC, x64_reg_RAX)); } while(0)
// Flag Operations (for conditional returns)
#define x64_TEST_RAX_RAX() do { emit8(x64_REX); emit8(x64_op_TEST_rm_reg); emit8(x64_modrm_RAX_RAX); } while(0)
// Conditional Returns (TEST must precede these)
// JNZ skips the RET if RAX != 0, so RET only fires when RAX == 0
#define x64_RET_IF_ZERO() do { x64_TEST_RAX_RAX(); emit8(x64_op_JNZ_rel8); emit8(0x01); emit8(x64_op_RET); } while(0)
// JNS skips the RET if RAX >= 0, so RET only fires when RAX < 0
#define x64_RET_IF_SIGN() do { x64_TEST_RAX_RAX(); emit8(x64_op_JNS_rel8); emit8(0x01); emit8(x64_op_RET); } while(0)
// Tape Drive Memory (Preemptive Scatter via RBX base pointer)
#define x64_FETCH() do { emit8(x64_REX); emit8(x
#pragma endregion x64 Emission DSL
internal void compile_action(U4 val) internal void compile_action(U4 val)
{ {
if (val >= 0x10000) { if (val >= 0x10000) {
@@ -330,7 +505,13 @@ internal void compile_action(U4 val)
emit8(0x48); emit8(0x89); emit8(0xD0); emit8(0x48); emit8(0x89); emit8(0xD0);
pad32(); pad32();
return; return;
} else if (p == PRIM_PRINT) { }
else if (p == PRIM_EXECUTE) {
emit8(0XFF); emit8(0XD0);
pad32();
return;
}
else if (p == PRIM_PRINT) {
// FFI Dance: Save RDX, Align RSP (32 shadow + 8 align = 40) // FFI Dance: Save RDX, Align RSP (32 shadow + 8 align = 40)
emit8(0x52); // push rdx emit8(0x52); // push rdx
emit8(0x48); emit8(0x83); emit8(0xEC); emit8(0x28); // sub rsp, 40 emit8(0x48); emit8(0x83); emit8(0xEC); emit8(0x28); // sub rsp, 40
@@ -371,68 +552,107 @@ IA_ void compile_and_run_tape(void)
log_count = 0; log_count = 0;
gdi_log_count = 0; gdi_log_count = 0;
emit8(0x53); // push rbx (callee-saved; also aligns RSP to 0 mod 16) emit8(0x53); // push rbx
emit8(0x48); emit8(0x89); emit8(0xCB); // mov rbx, rcx (stable globals ptr for whole JIT session) emit8(0x48); emit8(0x89); emit8(0xCB); // mov rbx, rcx
emit8(0x48); emit8(0x8B); emit8(0x43); emit8(0x70); // mov rax, [rbx+0x70] emit8(0x48); emit8(0x8B); emit8(0x43); emit8(0x70); // mov rax, [rbx+0x70]
emit8(0x48); emit8(0x8B); emit8(0x53); emit8(0x78); // mov rdx, [rbx+0x78] emit8(0x48); emit8(0x8B); emit8(0x53); emit8(0x78); // mov rdx, [rbx+0x78]
U4*r tape_ptr = u4_r(tape_arena.start); U4*r tape_ptr = u4_r(tape_arena.start);
U8*r anno_ptr = u8_r(anno_arena.start); U8*r anno_ptr = u8_r(anno_arena.start);
B4 in_def = false; B4 in_def = false;
U4 def_jmp_offset = 0; U4 def_jmp_offset = 0;
B4 in_lambda = false;
U4 lambda_jmp_offset = 0;
U8 end_idx = run_full ? (tape_arena.used / sizeof(U4)) : (cursor_idx + 1); U8 end_idx = run_full ? (tape_arena.used / sizeof(U4)) : (cursor_idx + 1);
for (U8 i = 0; i < end_idx; i++) for (U8 i = 0; i < end_idx; i++)
{ {
U4 tag = unpack_tag(tape_ptr[i]); U4 tag = unpack_tag(tape_ptr[i]);
U4 val = unpack_val(tape_ptr[i]); U4 val = unpack_val(tape_ptr[i]);
// NUDGE: Define what terminates blocks.
B4 is_terminator = (tag == STag_Define || tag == STag_Imm);
// Terminate lambdas first if needed
if (in_lambda && (is_terminator || tag == STag_Lambda)) {
emit8(0xC3); pad32(); // Terminate lambda with RET
U4 current = code_arena.used;
u4_r(code_arena.start + lambda_jmp_offset)[0] = current - (lambda_jmp_offset + 4);
in_lambda = false;
}
// Terminate definitions
if (in_def && is_terminator) {
emit8(0xC3); pad32(); // Terminate definition with RET
U4 current = code_arena.used;
u4_r(code_arena.start + def_jmp_offset)[0] = current - (def_jmp_offset + 4);
in_def = false;
}
if (tag == STag_Define) if (tag == STag_Define)
{ {
if (in_def == false) { pad32();
pad32(); emit8(0xE9);
emit8(0xE9); def_jmp_offset = code_arena.used;
def_jmp_offset = code_arena.used; emit32(0); // Placeholder for jump distance
emit32(0); pad32();
pad32(); in_def = true;
in_def = true;
} else {
emit8(0xC3);
pad32();
}
tape_to_code_offset[i] = code_arena.used; tape_to_code_offset[i] = code_arena.used;
emit8(0x48); emit8(0x87); emit8(0xC2); emit8(0x48); emit8(0x87); emit8(0xC2); // xchg rax, rdx
pad32(); pad32();
} }
// NUDGE: Handle the new Lambda tag.
else if (tag == STag_Lambda)
{
char* name = (char*)&anno_ptr[i];
char val_hex[9];
u64_to_hex(val, val_hex, 8);
val_hex[8] = '\0';
KTL_Slot_Str8 call_log_table[] = {
{ ktl_str8_key("name"), str8(name) },
{ ktl_str8_key("val"), str8(val_hex) },
};
debug_log(str8("Compiling lambda: <name> (val: <val>)"), ktl_str8_from_arr(call_log_table));
// Outer function: Push lambda address into RAX
emit8(0x48); emit8(0x89); emit8(0xC2); // mov rdx, rax (save old rax)
emit8(0x48); emit8(0xB8); // mov rax, ... (64-bit immediate)
U4 rax_imm_offset = code_arena.used;
emit64(0); // Placeholder for lambda address
pad32();
// Outer function: Jump over lambda body
emit8(0xE9);
lambda_jmp_offset = code_arena.used;
emit32(0); // Placeholder for jump distance
pad32();
in_lambda = true;
// Patch the mov rax, ... with the actual lambda body address
U8 lambda_addr = u8_(code_arena.start + code_arena.used);
u8_r(code_arena.start + rax_imm_offset)[0] = lambda_addr;
}
else if (tag == STag_Call || tag == STag_Imm) else if (tag == STag_Call || tag == STag_Imm)
{ {
char* name = (char*)&anno_ptr[i];
char val_hex[9];
u64_to_hex(val, val_hex, 8);
val_hex[8] = '\0';
KTL_Slot_Str8 call_log_table[] = {
{ ktl_str8_key("name"), str8(name) },
{ ktl_str8_key("val"), str8(val_hex) },
};
debug_log(str8("Compiling call: <name> (val: <val>)"), ktl_str8_from_arr(call_log_table));
if (tag == STag_Imm && in_def) {
emit8(0xC3);
pad32();
U4 current = code_arena.used;
u4_r(code_arena.start + def_jmp_offset)[0] = current - (def_jmp_offset + 4);
in_def = false;
}
compile_action(val); compile_action(val);
} }
else if (tag == STag_Data) { else if (tag == STag_Data)
{
emit8(0x48); emit8(0x89); emit8(0xC2); emit8(0x48); emit8(0x89); emit8(0xC2);
emit8(0x48); emit8(0xC7); emit8(0xC0); emit32(val); emit8(0x48); emit8(0xC7); emit8(0xC0); emit32(val);
pad32(); pad32();
} }
} }
if (in_lambda) {
emit8(0xC3);
pad32();
U4 current = code_arena.used;
u4_r(code_arena.start + lambda_jmp_offset)[0] = current - (lambda_jmp_offset + 4);
}
if (in_def) { if (in_def) {
emit8(0xC3); emit8(0xC3);
pad32(); pad32();
@@ -442,8 +662,8 @@ IA_ void compile_and_run_tape(void)
emit8(0x48); emit8(0x89); emit8(0x43); emit8(0x70); // mov [rbx+0x70], rax emit8(0x48); emit8(0x89); emit8(0x43); emit8(0x70); // mov [rbx+0x70], rax
emit8(0x48); emit8(0x89); emit8(0x53); emit8(0x78); // mov [rbx+0x78], rdx emit8(0x48); emit8(0x89); emit8(0x53); emit8(0x78); // mov [rbx+0x78], rdx
emit8(0x5B); // pop rbx emit8(0x5B); // pop rbx
emit8(0xC3); // ret emit8(0xC3); // ret
typedef void JIT_Func(U8* globals_ptr); typedef void JIT_Func(U8* globals_ptr);
JIT_Func* func = (JIT_Func*)code_arena.start; JIT_Func* func = (JIT_Func*)code_arena.start;
@@ -466,6 +686,7 @@ IA_ void compile_and_run_tape(void)
} }
#undef r #undef r
#undef v #undef v
#undef expect #undef expect
@@ -956,53 +1177,101 @@ int main(void) {
farena_init(& anno_arena, anno_mem); farena_init(& anno_arena, anno_mem);
farena_init(& code_arena, code_mem); farena_init(& code_arena, code_mem);
mu_init(&mu_ctx); mu_init(&mu_ctx);
mu_ctx.text_width = text_width_cb; mu_ctx.text_width = text_width_cb;
mu_ctx.text_height = text_height_cb; mu_ctx.text_height = text_height_cb;
scatter(pack_token(STag_Comment, 0), "INIT "); {
scatter(pack_token(STag_Data, 5), 0); scatter(pack_token(STag_Comment, 0), "INIT ");
scatter(pack_token(STag_Data, 0), 0); scatter(pack_token(STag_Data, 5), 0);
scatter(pack_token(STag_Imm, 0), "STORE "); scatter(pack_token(STag_Data, 0), 0);
scatter(pack_token(STag_Imm, 0), "STORE ");
scatter(pack_token(STag_Data, 1), 0); scatter(pack_token(STag_Data, 1), 0);
scatter(pack_token(STag_Data, 1), 0); scatter(pack_token(STag_Data, 1), 0);
scatter(pack_token(STag_Imm, 0), "STORE "); scatter(pack_token(STag_Imm, 0), "STORE ");
scatter(pack_token(STag_Format, 0xA), 0); scatter(pack_token(STag_Format, 0xA), 0);
scatter(pack_token(STag_Define, 0), "F_STEP "); scatter(pack_token(STag_Define, 0), "F_STEP ");
scatter(pack_token(STag_Data, 0), 0); scatter(pack_token(STag_Data, 0), 0);
scatter(pack_token(STag_Call, 0), "FETCH "); scatter(pack_token(STag_Call, 0), "FETCH ");
scatter(pack_token(STag_Call, 0), "RET_IF_Z"); scatter(pack_token(STag_Call, 0), "RET_IF_Z");
scatter(pack_token(STag_Format, 0xA), 0); scatter(pack_token(STag_Format, 0xA), 0);
scatter(pack_token(STag_Data, 1), 0); scatter(pack_token(STag_Data, 1), 0);
scatter(pack_token(STag_Call, 0), "FETCH "); scatter(pack_token(STag_Call, 0), "FETCH ");
scatter(pack_token(STag_Data, 0), 0); scatter(pack_token(STag_Data, 0), 0);
scatter(pack_token(STag_Call, 0), "FETCH "); scatter(pack_token(STag_Call, 0), "FETCH ");
scatter(pack_token(STag_Call, 0), "MULT "); scatter(pack_token(STag_Call, 0), "MULT ");
scatter(pack_token(STag_Data, 1), 0); scatter(pack_token(STag_Data, 1), 0);
scatter(pack_token(STag_Call, 0), "STORE "); scatter(pack_token(STag_Call, 0), "STORE ");
scatter(pack_token(STag_Format, 0xA), 0); scatter(pack_token(STag_Format, 0xA), 0);
scatter(pack_token(STag_Data, 0), 0); scatter(pack_token(STag_Data, 0), 0);
scatter(pack_token(STag_Call, 0), "FETCH "); scatter(pack_token(STag_Call, 0), "FETCH ");
scatter(pack_token(STag_Call, 0), "DEC "); scatter(pack_token(STag_Call, 0), "DEC ");
scatter(pack_token(STag_Data, 0), 0); scatter(pack_token(STag_Data, 0), 0);
scatter(pack_token(STag_Call, 0), "STORE "); scatter(pack_token(STag_Call, 0), "STORE ");
scatter(pack_token(STag_Data, 1), 0); scatter(pack_token(STag_Data, 1), 0);
scatter(pack_token(STag_Call, 0), "FETCH "); scatter(pack_token(STag_Call, 0), "FETCH ");
scatter(pack_token(STag_Call, 0), "PRINT "); scatter(pack_token(STag_Call, 0), "PRINT ");
scatter(pack_token(STag_Format, 0xA), 0); scatter(pack_token(STag_Format, 0xA), 0);
scatter(pack_token(STag_Imm, 0), "F_STEP "); scatter(pack_token(STag_Imm, 0), "F_STEP ");
scatter(pack_token(STag_Imm, 0), "F_STEP "); scatter(pack_token(STag_Imm, 0), "F_STEP ");
scatter(pack_token(STag_Imm, 0), "F_STEP "); scatter(pack_token(STag_Imm, 0), "F_STEP ");
scatter(pack_token(STag_Imm, 0), "F_STEP "); scatter(pack_token(STag_Imm, 0), "F_STEP ");
scatter(pack_token(STag_Imm, 0), "F_STEP "); scatter(pack_token(STag_Imm, 0), "F_STEP ");
}
{
scatter(pack_token(STag_Comment, 0), "LAMBDAS ");
scatter(pack_token(STag_Format, 0xA), 0);
// --- Store Lambda 1 (Square) in Global[0] ---
scatter(pack_token(STag_Data, 0), 0);
scatter(pack_token(STag_Lambda, 0), "L_SQUARE");
// Lambda Body:
scatter(pack_token(STag_Call, 0), "SWAP "); // Get the argument into RAX
scatter(pack_token(STag_Call, 0), "DUP ");
scatter(pack_token(STag_Call, 0), "MULT ");
// Terminate Lambda, return to main scope, and prepare for STORE
scatter(pack_token(STag_Imm, 0), "SWAP "); // RAX = 0, RDX = L_SQUARE_addr
scatter(pack_token(STag_Call, 0), "STORE "); // Global[0] = L_SQUARE_addr
scatter(pack_token(STag_Format, 0xA), 0);
// --- Store Lambda 2 (Double) in Global[1] ---
scatter(pack_token(STag_Data, 1), 0);
scatter(pack_token(STag_Lambda, 0), "L_DOUBLE");
// Lambda Body:
scatter(pack_token(STag_Call, 0), "SWAP "); // Get the argument into RAX
scatter(pack_token(STag_Call, 0), "DUP ");
scatter(pack_token(STag_Call, 0), "ADD ");
// Terminate Lambda, return to main scope, and prepare for STORE
scatter(pack_token(STag_Imm, 0), "SWAP "); // RAX = 1, RDX = L_DOUBLE_addr
scatter(pack_token(STag_Call, 0), "STORE "); // Global[1] = L_DOUBLE_addr
scatter(pack_token(STag_Format, 0xA), 0);
// --- Execute Lambda 1 (Square of 5) ---
scatter(pack_token(STag_Comment, 0), "USE L1 ");
scatter(pack_token(STag_Data, 5), 0); // Argument for lambda
scatter(pack_token(STag_Data, 0), 0);
scatter(pack_token(STag_Call, 0), "FETCH "); // RAX = Global[0] (L_SQUARE_addr), RDX = 5
scatter(pack_token(STag_Call, 0), "EXECUTE "); // Calls L_SQUARE. Returns RAX = 25
scatter(pack_token(STag_Call, 0), "PRINT "); // Prints 0x19 (25)
scatter(pack_token(STag_Format, 0xA), 0);
// --- Execute Lambda 2 (Double of 5) ---
scatter(pack_token(STag_Comment, 0), "USE L2 ");
scatter(pack_token(STag_Data, 5), 0); // Argument for lambda
scatter(pack_token(STag_Data, 1), 0);
scatter(pack_token(STag_Call, 0), "FETCH "); // RAX = Global[1] (L_DOUBLE_addr), RDX = 5
scatter(pack_token(STag_Call, 0), "EXECUTE "); // Calls L_DOUBLE. Returns RAX = 10
scatter(pack_token(STag_Call, 0), "PRINT "); // Prints 0xA (10)
}
relink_tape(); relink_tape();
run_full = true; run_full = true;