From 81c5f8e750fcda6a2451fb54604130431434f88f Mon Sep 17 00:00:00 2001 From: dec05eba Date: Sat, 17 Aug 2019 02:57:08 +0200 Subject: Implement more instructions, implement function parameters and arguments --- executor/executor.h | 10 +- executor/interpreter/executor.c | 197 ++++++++++++++++++++++++++++++++++++++++ executor/x86_64/asm.c | 86 ++++++++++++++---- executor/x86_64/asm.h | 18 +++- executor/x86_64/executor.c | 129 ++++++++++++++++++++++---- 5 files changed, 397 insertions(+), 43 deletions(-) create mode 100644 executor/interpreter/executor.c (limited to 'executor') diff --git a/executor/executor.h b/executor/executor.h index fdf6e67..5be9abc 100644 --- a/executor/executor.h +++ b/executor/executor.h @@ -20,6 +20,10 @@ CHECK_RESULT int amal_executor_init(amal_executor **self); void amal_executor_deinit(amal_executor *self); CHECK_RESULT int amal_executor_run(amal_executor *self); +/* These functions are called for every file in the program. Every file has its own list of strings, intermediates, functions and external functions */ +CHECK_RESULT int amal_executor_instructions_start(amal_executor *self, u16 num_functions); +CHECK_RESULT int amal_executor_instructions_end(amal_executor *self); + CHECK_RESULT int amal_exec_nop(amal_executor *self); CHECK_RESULT int amal_exec_setz(amal_executor *self, u8 dst_reg); CHECK_RESULT int amal_exec_mov(amal_executor *self, u8 dst_reg, u8 src_reg); @@ -34,12 +38,12 @@ CHECK_RESULT int amal_exec_div(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 CHECK_RESULT int amal_exec_push(amal_executor *self, u8 reg); CHECK_RESULT int amal_exec_pushi(amal_executor *self, i64 imm); CHECK_RESULT int amal_exec_pushd(amal_executor *self, BufferView data); -/*CHECK_RESULT int amal_exec_call(u8 dst_reg, BufferView data); -CHECK_RESULT int amal_exec_callr(u8 dst_reg, BufferView data);*/ +CHECK_RESULT int amal_exec_call(amal_executor *self, u16 func_index, u8 num_args); +/*CHECK_RESULT int amal_exec_callr(u8 dst_reg, BufferView data);*/ CHECK_RESULT int amal_exec_cmp(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2); CHECK_RESULT int amal_exec_jz(amal_executor *self, u8 dst_reg, i16 offset); CHECK_RESULT int amal_exec_jmp(amal_executor *self, i16 offset); -CHECK_RESULT int amal_exec_ret(amal_executor *self); +CHECK_RESULT int amal_exec_ret(amal_executor *self, u8 reg); CHECK_RESULT int amal_exec_func_start(amal_executor *self, u16 num_regs); CHECK_RESULT int amal_exec_func_end(amal_executor *self); diff --git a/executor/interpreter/executor.c b/executor/interpreter/executor.c new file mode 100644 index 0000000..0180b08 --- /dev/null +++ b/executor/interpreter/executor.c @@ -0,0 +1,197 @@ +#include "../executor.h" +#include "../../include/program.h" +#include "../../include/std/alloc.h" +#include + +typedef struct { + usize *stack; + usize stack_size; + usize stack_index; + isize reg[AMAL_PROGRAM_NUM_REGISTERS]; +} amal_executor_impl; + +#define IMPL \ + amal_executor_impl *impl; \ + impl = (amal_executor_impl*)self; + +#define FOUR_MEGABYTES 1024*1024*4 + +static int executor_ensure_stack_capacity(amal_executor_impl *self, usize bytes_to_add) { + const isize overflow = self->stack_size - self->stack_index * sizeof(isize) + bytes_to_add; + if(overflow > 0) { + void *new_data; + const usize new_stack_size = self->stack_size * 1.5; + if(new_stack_size > FOUR_MEGABYTES) + return AMAL_PROGRAM_INSTRUCTION_STACK_OVERFLOW; + return_if_error(am_realloc(self->stack, new_stack_size, &new_data)); + self->stack = new_data; + self->stack_size = new_stack_size; + } + return 0; +} + +int amal_executor_init(amal_executor **self) { + amal_executor_impl **impl; + impl = (amal_executor_impl**)self; + *impl = NULL; + return_if_error(am_malloc(sizeof(amal_executor_impl), (void**)impl)); + (*impl)->stack_size = 4096; + return_if_error(am_malloc((*impl)->stack_size, (void**)&(*impl)->stack)); + (*impl)->stack_index = 0; + return 0; +} + +void amal_executor_deinit(amal_executor *self) { + IMPL + am_free(impl->stack); + am_free(impl); +} + +int amal_executor_run(amal_executor *self) { + (void)self; + assert(bool_false && "TODO: Implement!"); + return 0; +} + +int amal_exec_nop(amal_executor *self) { + (void)self; + return 0; +} + +int amal_exec_setz(amal_executor *self, u8 dst_reg) { + IMPL + impl->reg[dst_reg] = 0; + return 0; +} + +int amal_exec_mov(amal_executor *self, u8 dst_reg, u8 src_reg) { + IMPL + impl->reg[dst_reg] = impl->reg[src_reg]; + return 0; +} + +int amal_exec_movi(amal_executor *self, u8 dst_reg, i64 imm) { + IMPL + impl->reg[dst_reg] = imm; + return 0; +} + +int amal_exec_movd(amal_executor *self, u8 dst_reg, BufferView data) { + IMPL + impl->reg[dst_reg] = (uintptr_t)data.data; + return 0; +} + +int amal_exec_add(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { + IMPL + impl->reg[dst_reg] = impl->reg[src_reg1] + impl->reg[src_reg2]; + return 0; +} + +int amal_exec_sub(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { + IMPL + impl->reg[dst_reg] = impl->reg[src_reg1] - impl->reg[src_reg2]; + return 0; +} + +int amal_exec_imul(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { + IMPL + impl->reg[dst_reg] = impl->reg[src_reg1] * impl->reg[src_reg2]; + return 0; +} + +int amal_exec_mul(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { + IMPL + impl->reg[dst_reg] = impl->reg[src_reg1] * impl->reg[src_reg2]; + return 0; +} + +int amal_exec_idiv(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { + IMPL + impl->reg[dst_reg] = impl->reg[src_reg1] / impl->reg[src_reg2]; + return 0; +} + +int amal_exec_div(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { + IMPL + impl->reg[dst_reg] = impl->reg[src_reg1] / impl->reg[src_reg2]; + return 0; +} + +int amal_exec_push(amal_executor *self, u8 reg) { + IMPL + return_if_error(executor_ensure_stack_capacity(impl, sizeof(isize))); + impl->stack[impl->stack_index++] = impl->reg[reg]; + return 0; +} + +int amal_exec_pushi(amal_executor *self, i64 imm) { + IMPL + return_if_error(executor_ensure_stack_capacity(impl, sizeof(isize))); + impl->stack[impl->stack_index++] = imm; + return 0; +} + +int amal_exec_pushd(amal_executor *self, BufferView data) { + IMPL + return_if_error(executor_ensure_stack_capacity(impl, sizeof(isize))); + impl->stack[impl->stack_index++] = (uintptr_t)data.data; + return 0; +} + +/*int amal_exec_call(u8 dst_reg, BufferView data); +int amal_exec_callr(u8 dst_reg, BufferView data);*/ +int amal_exec_cmp(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { + IMPL + impl->reg[dst_reg] = (impl->reg[src_reg1] == impl->reg[src_reg2]); + return 0; +} + +int amal_exec_jz(amal_executor *self, u8 dst_reg, i16 offset) { + (void)self; + (void)dst_reg; + (void)offset; + /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); + return 0; +} + +int amal_exec_jmp(amal_executor *self, i16 offset) { + (void)self; + (void)offset; + /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); + return 0; +} + +int amal_exec_ret(amal_executor *self) { + (void)self; + /* TODO: Implement! */ + assert(bool_false && "TODO: Implement RET. RET needs to restore the stack before returning"); + return 0; +} + +int amal_exec_func_start(amal_executor *self, u16 num_regs) { + /* + TODO: Validate stack size, or maybe remove all validation? do we really need validation? + If we need security, we could fork the process instead. + */ + + /* + Some registers need to be preserved before entering a function scope and these registers are different on different platforms. + 32-bit: EBX, ESI, EDI, EBP + 64-bit Windows: RBX, RSI, RDI, RBP, R12-R15, XMM6-XMM15 + 64-bit Linux,BSD,Mac: RBX, RBP, R12-R15 + */ + /* TODO: Preserve registers and stack frame */ + /*return executor_ensure_stack_capacity(impl, num_regs * sizeof(isize));*/ + (void)self; + (void)num_regs; + return 0; +} + +int amal_exec_func_end(amal_executor *self) { + (void)self; + /* TODO: Restore registers and stack frame and ret */ + return 0; +} diff --git a/executor/x86_64/asm.c b/executor/x86_64/asm.c index 73ae568..f032538 100644 --- a/executor/x86_64/asm.c +++ b/executor/x86_64/asm.c @@ -35,6 +35,10 @@ static void ins_start(Asm *self) { asm_debug_str_buffer_index = 0; } +static int max(int a, int b) { + return a >= b ? a : b; +} + static void ins_end(Asm *self, const char *fmt, ...) { usize ins_end_offset; usize i; @@ -46,7 +50,7 @@ static void ins_end(Asm *self, const char *fmt, ...) { fprintf(stderr, "%02x ", ((u8*)self->code)[i]); } /* Same padding for all instructions, no matter how long they are */ - for(i = 0; i < 35 - (ins_end_offset - ins_start_offset)*3; ++i) { + for(i = 0; i < (usize)max(0, 35 - (ins_end_offset - ins_start_offset)*3); ++i) { putc(' ', stderr); } vfprintf(stderr, fmt, args); @@ -160,9 +164,9 @@ void asm_ptr_init_index_disp(AsmPtr *self, Reg64 base, Reg64 index, i32 disp) { } int asm_init(Asm *self) { - self->size = am_pagesize(); - amal_log_debug("asm: page size: %u", self->size); - self->code = mmap(NULL, self->size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + self->allocated_size = am_pagesize(); + amal_log_debug("asm: page size: %u", self->allocated_size); + self->code = mmap(NULL, self->allocated_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if(self->code == MAP_FAILED) return -errno; self->code_it = self->code; @@ -171,12 +175,17 @@ int asm_init(Asm *self) { void asm_deinit(Asm *self) { if(self->code) - munmap(self->code, self->size); + munmap(self->code, self->allocated_size); self->code = NULL; self->code_it = NULL; - self->size = 0; + self->allocated_size = 0; +} + +usize asm_get_size(Asm *self) { + return self->code_it - (u8*)self->code; } +#if 0 static void asm_print_code_hex(Asm *self) { u8 *ptr; int off; @@ -196,13 +205,14 @@ static void asm_print_code_hex(Asm *self) { if(off != 0) putc('\n', stdout); } +#endif int asm_execute(Asm *self) { void (*func)(); - if(mprotect(self->code, self->size, PROT_READ | PROT_EXEC) != 0) + if(mprotect(self->code, self->allocated_size, PROT_READ | PROT_EXEC) != 0) return -errno; - asm_print_code_hex(self); + /*asm_print_code_hex(self);*/ /* TODO: Verify if this is valid on all platforms. According to ISO C standard it isn't? */ *(void**)(&func) = self->code; @@ -214,17 +224,17 @@ int asm_execute(Asm *self) { static CHECK_RESULT int asm_ensure_capacity(Asm *self, usize size) { usize current_offset; current_offset = (u8*)self->code_it - (u8*)self->code; - if(current_offset + size > self->size) { + if(current_offset + size > self->allocated_size) { void *new_mem; usize new_size; - new_size = self->size + am_pagesize(); + new_size = self->allocated_size + am_pagesize(); new_mem = mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if(self->code == MAP_FAILED) return -errno; - am_memcpy(new_mem, self->code, self->size); + am_memcpy(new_mem, self->code, self->allocated_size); self->code = new_mem; - self->size = new_size; + self->allocated_size = new_size; self->code_it = (u8*)self->code + current_offset; } return 0; @@ -232,7 +242,7 @@ static CHECK_RESULT int asm_ensure_capacity(Asm *self, usize size) { #ifdef DEBUG static isize asm_get_capacity_left(Asm *self) { - return (isize)self->size - (isize)((u8*)self->code_it - (u8*)self->code); + return (isize)self->allocated_size - (isize)((u8*)self->code_it - (u8*)self->code); } #endif @@ -346,21 +356,20 @@ int asm_mov_rm(Asm *self, Reg64 dst, AsmPtr *src) { return 0; } +/* Note: This shows as instruction movabs in intel assembly format */ int asm_mov_ri(Asm *self, Reg64 dst, i64 immediate) { ins_start(self); - /* 10 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */ return_if_error(asm_ensure_capacity(self, 10)); *self->code_it++ = REX_W; *self->code_it++ = 0xB8 + dst; am_memcpy(self->code_it, &immediate, sizeof(immediate)); self->code_it += sizeof(immediate); - ins_end(self, "mov %s, %ld", reg64_to_str(dst), immediate); + ins_end(self, "mov %s, 0x%x", reg64_to_str(dst), immediate); return 0; } int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src) { ins_start(self); - /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */ return_if_error(asm_ensure_capacity(self, 3)); *self->code_it++ = REX_W; *self->code_it++ = 0x89; @@ -371,7 +380,6 @@ int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src) { int asm_add_rr(Asm *self, Reg64 dst, Reg64 src) { ins_start(self); - /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */ return_if_error(asm_ensure_capacity(self, 3)); *self->code_it++ = REX_W; *self->code_it++ = 0x01; @@ -382,7 +390,6 @@ int asm_add_rr(Asm *self, Reg64 dst, Reg64 src) { int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src) { ins_start(self); - /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */ return_if_error(asm_ensure_capacity(self, 3)); *self->code_it++ = REX_W; *self->code_it++ = 0x29; @@ -393,7 +400,6 @@ int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src) { int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src) { ins_start(self); - /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */ return_if_error(asm_ensure_capacity(self, 4)); *self->code_it++ = REX_W; *self->code_it++ = 0x0F; @@ -403,6 +409,25 @@ int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src) { return 0; } +int asm_cqo(Asm *self) { + ins_start(self); + return_if_error(asm_ensure_capacity(self, 2)); + *self->code_it++ = REX_W; + *self->code_it++ = 0x99; + ins_end(self, "cqo"); + return 0; +} + +int asm_idiv_rr(Asm *self, Reg64 src) { + ins_start(self); + return_if_error(asm_ensure_capacity(self, 4)); + *self->code_it++ = REX_W; + *self->code_it++ = 0xF7; + asm_rr(self, src, 0x7); + ins_end(self, "idiv %s", reg64_to_str(src)); + return 0; +} + int asm_pushr(Asm *self, Reg64 reg) { ins_start(self); return_if_error(asm_ensure_capacity(self, 1)); @@ -419,6 +444,26 @@ int asm_popr(Asm *self, Reg64 reg) { return 0; } +/* + Note: This is sometimes called with @relative 0 (will print call -5), in which case it's most likely a dummy call until the relative position + is later changed with @asm_override_call_rel32. TODO: Update the ins_end debug print to take that into account somehow +*/ +int asm_call_rel32(Asm *self, i32 relative) { + ins_start(self); + relative -= 5; /* In x86, the relative position starts from the next instruction */ + return_if_error(asm_ensure_capacity(self, 5)); + *self->code_it++ = 0xE8; + am_memcpy(self->code_it, &relative, sizeof(relative)); + self->code_it += sizeof(relative); + ins_end(self, "call 0x%x", relative); + return 0; +} + +void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative) { + new_relative -= 5; /* In x86, the relative position starts from the next instruction */ + am_memcpy((u8*)self->code + asm_index + 1, &new_relative, sizeof(new_relative)); +} + /* TODO: Remove these !*/ /* /r */ @@ -559,11 +604,12 @@ int asm_ret(Asm *self, u16 bytes) { if(bytes == 0) { return_if_error(asm_ensure_capacity(self, 1)); *self->code_it++ = 0xC3; + ins_end(self, "ret"); } else { return_if_error(asm_ensure_capacity(self, 3)); *self->code_it++ = 0xC2; am_memcpy(self->code_it, &bytes, sizeof(bytes)); + ins_end(self, "ret 0x%x", bytes); } - ins_end(self, "ret 0x%x", bytes); return 0; } diff --git a/executor/x86_64/asm.h b/executor/x86_64/asm.h index b374e44..7e5ac67 100644 --- a/executor/x86_64/asm.h +++ b/executor/x86_64/asm.h @@ -7,7 +7,7 @@ typedef struct { void *code; u8 *code_it; - usize size; + usize allocated_size; } Asm; typedef enum { @@ -47,6 +47,8 @@ void asm_ptr_init_index_disp(AsmPtr *self, Reg64 base, Reg64 index, i32 disp); CHECK_RESULT int asm_init(Asm *self); void asm_deinit(Asm *self); +usize asm_get_size(Asm *self); + CHECK_RESULT int asm_execute(Asm *self); CHECK_RESULT int asm_nop(Asm *self); @@ -68,9 +70,23 @@ CHECK_RESULT int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src); CHECK_RESULT int asm_add_rr(Asm *self, Reg64 dst, Reg64 src); CHECK_RESULT int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src); CHECK_RESULT int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src); +/* Sign extend RAX into RDX, this is needed for some operations, such as idiv */ +CHECK_RESULT int asm_cqo(Asm *self); +/* + Divide RDX:RAX by @src. Store the quotient in RAX and the remainder in RDX. + @asm_cqo should be called before this, since RAX needs to be sign extended into RDX +*/ +CHECK_RESULT int asm_idiv_rr(Asm *self, Reg64 src); CHECK_RESULT int asm_pushr(Asm *self, Reg64 reg); CHECK_RESULT int asm_popr(Asm *self, Reg64 reg); +/* + In x86 assembly, the @relative position starts from the next instruction. + This offset shouldn't be calculated by the caller and is instead managed + by this asm library itself. +*/ +CHECK_RESULT int asm_call_rel32(Asm *self, i32 relative); +void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative); diff --git a/executor/x86_64/executor.c b/executor/x86_64/executor.c index b53ccea..b7aa91f 100644 --- a/executor/x86_64/executor.c +++ b/executor/x86_64/executor.c @@ -1,5 +1,6 @@ #include "../executor.h" #include "../../include/std/alloc.h" +#include "../../include/std/buffer.h" #include "asm.h" #include @@ -10,8 +11,17 @@ TODO: Operations with memory registers could access outside the stack. Should this be checked? */ +typedef struct { + u32 asm_index; + u16 func_index; +} CallDefer; + typedef struct { Asm asm; + usize *function_indices; + u16 num_functions; + u16 func_counter; + Buffer/*CallDefer*/ call_defer; } amal_executor_impl; #define IMPL \ @@ -26,13 +36,20 @@ static i64 abs_i64(i64 value) { int amal_executor_init(amal_executor **self) { amal_executor_impl **impl; - return_if_error(am_malloc(sizeof(amal_executor_impl), (void**)self)); impl = (amal_executor_impl**)self; + *impl = NULL; + return_if_error(am_malloc(sizeof(amal_executor_impl), (void**)impl)); + (*impl)->function_indices = NULL; + (*impl)->num_functions = 0; + (*impl)->func_counter = 0; + ignore_result_int(buffer_init(&(*impl)->call_defer, NULL)); return asm_init(&(*impl)->asm); } void amal_executor_deinit(amal_executor *self) { IMPL + buffer_deinit(&impl->call_defer); + am_free(impl->function_indices); asm_deinit(&impl->asm); am_free(impl); } @@ -42,6 +59,30 @@ int amal_executor_run(amal_executor *self) { return asm_execute(&impl->asm); } +int amal_executor_instructions_start(amal_executor *self, u16 num_functions) { + void *new_data; + IMPL + return_if_error(am_realloc(impl->function_indices, num_functions * sizeof(*impl->function_indices), &new_data)); + impl->function_indices = new_data; + impl->num_functions = num_functions; + impl->func_counter = 0; + buffer_clear(&impl->call_defer); + return 0; +} + +int amal_executor_instructions_end(amal_executor *self) { + CallDefer *call_defer, *call_defer_end; + IMPL + + call_defer = buffer_begin(&impl->call_defer); + call_defer_end = buffer_end(&impl->call_defer); + for(; call_defer != call_defer_end; ++call_defer) { + const isize func_offset = (isize)impl->function_indices[call_defer->func_index] - (isize)call_defer->asm_index; + asm_override_call_rel32(&impl->asm, call_defer->asm_index, func_offset); + } + return 0; +} + int amal_exec_nop(amal_executor *self) { IMPL return asm_nop(&impl->asm); @@ -158,16 +199,25 @@ int amal_exec_mul(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { return_if_error(asm_mul_rr(&self->asm, RAX, RCX)); return_if_error(asm_mov_mr(&self->asm, &dst, RAX)); #endif + assert(bool_false && "TODO: Implement!"); return 0; } int amal_exec_idiv(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { - (void)self; - (void)dst_reg; - (void)src_reg1; - (void)src_reg2; - /* TODO: Implement! */ - return 0; + AsmPtr dst; + AsmPtr reg1; + AsmPtr reg2; + IMPL + + asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); + asm_ptr_init_disp(®1, RBP, get_register_stack_offset(src_reg1)); + asm_ptr_init_disp(®2, RBP, get_register_stack_offset(src_reg2)); + + return_if_error(asm_mov_rm(&impl->asm, RAX, ®1)); + return_if_error(asm_mov_rm(&impl->asm, RCX, ®2)); + return_if_error(asm_cqo(&impl->asm)); + return_if_error(asm_idiv_rr(&impl->asm, RCX)); + return asm_mov_mr(&impl->asm, &dst, RAX); } int amal_exec_div(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { @@ -176,20 +226,24 @@ int amal_exec_div(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { (void)src_reg1; (void)src_reg2; /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); return 0; } int amal_exec_push(amal_executor *self, u8 reg) { - (void)self; - (void)reg; - /* TODO: Implement! */ - return 0; + AsmPtr reg_ptr; + IMPL + + asm_ptr_init_disp(®_ptr, RBP, get_register_stack_offset(reg)); + return_if_error(asm_mov_rm(&impl->asm, RAX, ®_ptr)); + return asm_pushr(&impl->asm, RAX); } int amal_exec_pushi(amal_executor *self, i64 imm) { (void)self; (void)imm; /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); return 0; } @@ -197,17 +251,48 @@ int amal_exec_pushd(amal_executor *self, BufferView data) { (void)self; (void)data; /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); + return 0; +} + +int amal_exec_call(amal_executor *self, u16 func_index, u8 num_args) { + isize asm_size; + IMPL + /* TODO: Preserve necessary registers before call? */ + /* TODO: This assumes all arguments are isize */ + asm_size = asm_get_size(&impl->asm); + if(func_index < impl->func_counter) { + return_if_error(asm_call_rel32(&impl->asm, (isize)impl->function_indices[func_index] - asm_size)); + } else { + /* + The location of the function has not been defined yet. Use call instruction with dummy data and change + the location once the location to the function is known + */ + CallDefer call_defer; + call_defer.asm_index = asm_size; + call_defer.func_index = func_index; + return_if_error(buffer_append(&impl->call_defer, &call_defer, sizeof(call_defer))); + return_if_error(asm_call_rel32(&impl->asm, 0)); + } + + if(num_args > 0) + return asm_add_rm64_imm(&impl->asm, RSP, num_args * sizeof(isize)); return 0; } -/*int amal_exec_call(u8 dst_reg, BufferView data); -int amal_exec_callr(u8 dst_reg, BufferView data);*/ +/* +int amal_exec_callr(u8 dst_reg, BufferView data) { + +} +*/ + int amal_exec_cmp(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) { (void)self; (void)dst_reg; (void)src_reg1; (void)src_reg2; /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); return 0; } @@ -216,6 +301,7 @@ int amal_exec_jz(amal_executor *self, u8 dst_reg, i16 offset) { (void)dst_reg; (void)offset; /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); return 0; } @@ -223,14 +309,18 @@ int amal_exec_jmp(amal_executor *self, i16 offset) { (void)self; (void)offset; /* TODO: Implement! */ + assert(bool_false && "TODO: Implement!"); return 0; } -int amal_exec_ret(amal_executor *self) { - (void)self; - /* TODO: Implement! */ - assert(bool_false && "TODO: Implement RET. RET needs to restore the stack before returning"); - return 0; +int amal_exec_ret(amal_executor *self, u8 reg) { + AsmPtr ret_reg; + IMPL + + asm_ptr_init_disp(&ret_reg, RBP, get_register_stack_offset(reg)); + /* Result is returned in RAX register. TODO: Make this work for larger data */ + return_if_error(asm_mov_rm(&impl->asm, RAX, &ret_reg)); + return amal_exec_func_end(self); } int amal_exec_func_start(amal_executor *self, u16 num_regs) { @@ -246,10 +336,11 @@ int amal_exec_func_start(amal_executor *self, u16 num_regs) { 64-bit Linux,BSD,Mac: RBX, RBP, R12-R15 */ IMPL + impl->function_indices[impl->func_counter++] = asm_get_size(&impl->asm); return_if_error(asm_pushr(&impl->asm, RBX)); return_if_error(asm_pushr(&impl->asm, RBP)); return_if_error(asm_mov_rr(&impl->asm, RBP, RSP)); - return asm_sub_rm64_imm(&impl->asm, RSP, num_regs * sizeof(usize)); + return asm_sub_rm64_imm(&impl->asm, RSP, num_regs * sizeof(isize)); } int amal_exec_func_end(amal_executor *self) { -- cgit v1.2.3