diff options
author | dec05eba <dec05eba@protonmail.com> | 2019-09-29 23:47:52 +0200 |
---|---|---|
committer | dec05eba <dec05eba@protonmail.com> | 2020-07-25 14:36:46 +0200 |
commit | f5dc9ad48db4d22e7d6f15e340063dc7cb14c1e1 (patch) | |
tree | 4465a81a77e936dc2ed6ecd90183ba6af9cc2dae /executor/x86_64 | |
parent | c811a743a1528db1d05970e1aa14162ef7c70b75 (diff) |
Implicit cast from str to ?&c_char, fix use of parameters (to use sys v registers)
Diffstat (limited to 'executor/x86_64')
-rw-r--r-- | executor/x86_64/asm.c | 2 | ||||
-rw-r--r-- | executor/x86_64/asm.h | 2 | ||||
-rw-r--r-- | executor/x86_64/executor.c | 378 |
3 files changed, 239 insertions, 143 deletions
diff --git a/executor/x86_64/asm.c b/executor/x86_64/asm.c index 60b1752..f7bc19c 100644 --- a/executor/x86_64/asm.c +++ b/executor/x86_64/asm.c @@ -442,7 +442,7 @@ void asm_cqo(Asm *self) { ins_end(self, "cqo"); } -void asm_idiv_rr(Asm *self, Reg64 src) { +void asm_idiv_rax_r(Asm *self, Reg64 src) { ins_start(self); *self->code_it++ = rex_rr(src, 0); *self->code_it++ = 0xF7; diff --git a/executor/x86_64/asm.h b/executor/x86_64/asm.h index b82a63e..dacc248 100644 --- a/executor/x86_64/asm.h +++ b/executor/x86_64/asm.h @@ -89,7 +89,7 @@ void asm_cqo(Asm *self); Divide RDX:RAX by @src. Store the quotient in RAX and the remainder in RDX. @asm_cqo should be called before this, since RAX needs to be sign extended into RDX */ -void asm_idiv_rr(Asm *self, Reg64 src); +void asm_idiv_rax_r(Asm *self, Reg64 src); void asm_pushr(Asm *self, Reg64 reg); void asm_popr(Asm *self, Reg64 reg); diff --git a/executor/x86_64/executor.c b/executor/x86_64/executor.c index c918c13..6f3c1de 100644 --- a/executor/x86_64/executor.c +++ b/executor/x86_64/executor.c @@ -32,32 +32,109 @@ typedef struct { int label_counter; int num_args; int num_pushed_values; + int num_saved_params_for_call; } amal_executor_impl; #define ASM_ENSURE_CAPACITY return_if_error(asm_ensure_capacity(&impl->asm, 256)); -#define IMPL_START \ - amal_executor_impl *impl = (amal_executor_impl*)self; \ - ASM_ENSURE_CAPACITY - -/* - @reg will be a positive value when accessing local variables, in which case the first - local variable is located at -sizeof(usize) and the next one is at -(2 * sizeof(usize)). - @reg will be a negative value starting at -1 when accessing parameters. - The first parameter is located at 3*sizeof(usize) and the next one is at 4*sizeof(usize). - Parameter starts at 3*sizeof(usize) because offset 0 is the return address, offset 1*sizeof(usize) is the - saved RBP and 2*sizeof(usize) is saved RBX. - TODO: Use different offset when saving more registers, for example on Microsoft Windows. -*/ -#define get_register_stack_offset(reg) \ - (reg >= 0 ? (i32)(-reg * (int)sizeof(usize) - sizeof(usize)) : (i32)(-reg * (int)sizeof(usize) + 2*sizeof(usize))) +const Reg64 SYS_V_REG_PARAMS[] = { RDI, RSI, RDX, RCX, R8, R9, R10, R11 }; +const int NUM_REG_PARAMS = 8; static i64 abs_i64(i64 value) { return value >= 0 ? value : -value; } -const Reg64 SYS_V_REG_PARAMS[] = { RDI, RSI, RDX, RCX, R8, R9, R10, R11 }; -const int NUM_REG_PARAMS = 8; +typedef enum { + OPERAND_TYPE_REG, + OPERAND_TYPE_MEM +} AsmOperandType; + +typedef struct { + AsmOperandType type; + union { + AsmPtr mem; + Reg64 reg; + } value; +} AsmOperand; + +static AsmOperand amal_reg_to_asm_operand(AmalReg reg) { + AsmOperand result; + AmalReg reg_value = AMAL_REG_VALUE(reg); + if(reg & REG_FLAG_PARAM) { + if(reg_value < NUM_REG_PARAMS) { + result.type = OPERAND_TYPE_REG; + result.value.reg = SYS_V_REG_PARAMS[reg_value]; + } else { + result.type = OPERAND_TYPE_MEM; + asm_ptr_init_disp(&result.value.mem, RBP, (i32)reg_value * sizeof(usize) + 2 * sizeof(usize)); + } + } else { + result.type = OPERAND_TYPE_MEM; + asm_ptr_init_disp(&result.value.mem, RBP, (i32)-reg_value * sizeof(usize) - sizeof(usize)); + } + return result; +} + +static AsmOperand asm_reg_to_operand(Reg64 reg) { + AsmOperand result; + result.type = OPERAND_TYPE_REG; + result.value.reg = reg; + return result; +} + +/* Note: both operands can't be memory operands */ +static void asm_mov(Asm *self, AsmOperand *dst, AsmOperand *src) { + switch(dst->type) { + case OPERAND_TYPE_REG: { + switch(src->type) { + case OPERAND_TYPE_REG: + asm_mov_rr(self, dst->value.reg, src->value.reg); + break; + case OPERAND_TYPE_MEM: + asm_mov_rm(self, dst->value.reg, &src->value.mem); + break; + } + break; + } + case OPERAND_TYPE_MEM: { + assert(src->type == OPERAND_TYPE_REG && "Both operands can't be memory operands"); + asm_mov_mr(self, &dst->value.mem, src->value.reg); + break; + } + } +} + +static void asm_movi(Asm *self, AsmOperand *dst, i64 immediate) { + switch(dst->type) { + case OPERAND_TYPE_REG: + asm_mov_ri(self, dst->value.reg, immediate); + break; + case OPERAND_TYPE_MEM: + asm_mov_mi(self, &dst->value.mem, immediate); + break; + } +} + +static void asm_cmp(Asm *self, AsmOperand *op1, AsmOperand *op2) { + switch(op1->type) { + case OPERAND_TYPE_REG: { + switch(op2->type) { + case OPERAND_TYPE_REG: + asm_cmp_rm64(self, op1->value.reg, op2->value.reg); + break; + case OPERAND_TYPE_MEM: + asm_cmp_rm(self, op1->value.reg, &op2->value.mem); + break; + } + break; + } + case OPERAND_TYPE_MEM: { + assert(op2->type == OPERAND_TYPE_REG && "Both operands can't be memory operands"); + asm_cmp_rm(self, op2->value.reg, &op1->value.mem); + break; + } + } +} int amal_executor_init(amal_executor **self) { amal_executor_impl **impl; @@ -68,6 +145,7 @@ int amal_executor_init(amal_executor **self) { (*impl)->label_counter = 0; (*impl)->num_args = 0; (*impl)->num_pushed_values = 0; + (*impl)->num_saved_params_for_call = 0; ignore_result_int(buffer_init(&(*impl)->jump_defer, NULL)); return asm_init(&(*impl)->asm); } @@ -102,109 +180,107 @@ int amal_executor_instructions_end(amal_executor *self) { } int amal_exec_nop(amal_executor *self) { - IMPL_START + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY asm_nop(&impl->asm); return 0; } -int amal_exec_setz(amal_executor *self, i8 dst_reg) { - AsmPtr dst; - IMPL_START - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_mov_mi(&impl->asm, &dst, 0); +int amal_exec_setz(amal_executor *self, AmalReg dst_reg) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY + asm_movi(&impl->asm, &dst_op, 0); return 0; } -int amal_exec_mov(amal_executor *self, i8 dst_reg, i8 src_reg) { - AsmPtr ptr; - IMPL_START - - asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(src_reg)); - asm_mov_rm(&impl->asm, RAX, &ptr); +int amal_exec_mov(amal_executor *self, AmalReg dst_reg, AmalReg src_reg) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand src_op = amal_reg_to_asm_operand(src_reg); + AsmOperand rax_op = asm_reg_to_operand(RAX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(dst_reg)); - asm_mov_mr(&impl->asm, &ptr, RAX); + asm_mov(&impl->asm, &rax_op, &src_op); + asm_mov(&impl->asm, &dst_op, &rax_op); return 0; } -int amal_exec_movi(amal_executor *self, i8 dst_reg, i64 imm) { - IMPL_START +int amal_exec_movi(amal_executor *self, AmalReg dst_reg, i64 imm) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY /* TODO: if @number is a float then use float instructions */ if(abs_i64(imm) <= INT32_MAX) { - AsmPtr dst; - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_mov_mi(&impl->asm, &dst, imm); + asm_movi(&impl->asm, &dst_op, imm); } else { - AsmPtr dst; - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_mov_ri(&impl->asm, RAX, imm); - asm_mov_mr(&impl->asm, &dst, RAX); + AsmOperand rax_op = asm_reg_to_operand(RAX); + asm_movi(&impl->asm, &rax_op, imm); + asm_mov(&impl->asm, &dst_op, &rax_op); } return 0; } -int amal_exec_movd(amal_executor *self, i8 dst_reg, BufferView data) { - AsmPtr dst; - IMPL_START +int amal_exec_movd(amal_executor *self, AmalReg dst_reg, BufferView data) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand rax_op = asm_reg_to_operand(RAX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_mov_ri(&impl->asm, RAX, (uintptr_t)data.data); - asm_mov_mr(&impl->asm, &dst, RAX); + asm_mov_ri(&impl->asm, rax_op.value.reg, (uintptr_t)data.data); + asm_mov(&impl->asm, &dst_op, &rax_op); return 0; } -int amal_exec_add(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { - AsmPtr dst; - AsmPtr reg1; - AsmPtr reg2; - IMPL_START - - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_ptr_init_disp(®1, RBP, get_register_stack_offset(src_reg1)); - asm_ptr_init_disp(®2, RBP, get_register_stack_offset(src_reg2)); +int amal_exec_add(amal_executor *self, AmalReg dst_reg, AmalReg src_reg1, AmalReg src_reg2) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand src_op1 = amal_reg_to_asm_operand(src_reg1); + AsmOperand src_op2 = amal_reg_to_asm_operand(src_reg2); + AsmOperand rax_op = asm_reg_to_operand(RAX); + AsmOperand rcx_op = asm_reg_to_operand(RCX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_mov_rm(&impl->asm, RAX, ®1); - asm_mov_rm(&impl->asm, RCX, ®2); + asm_mov(&impl->asm, &rax_op, &src_op1); + asm_mov(&impl->asm, &rcx_op, &src_op2); asm_add_rr(&impl->asm, RAX, RCX); - asm_mov_mr(&impl->asm, &dst, RAX); + asm_mov(&impl->asm, &dst_op, &rax_op); return 0; } -int amal_exec_sub(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { - AsmPtr dst; - AsmPtr reg1; - AsmPtr reg2; - IMPL_START - - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_ptr_init_disp(®1, RBP, get_register_stack_offset(src_reg1)); - asm_ptr_init_disp(®2, RBP, get_register_stack_offset(src_reg2)); +int amal_exec_sub(amal_executor *self, AmalReg dst_reg, AmalReg src_reg1, AmalReg src_reg2) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand src_op1 = amal_reg_to_asm_operand(src_reg1); + AsmOperand src_op2 = amal_reg_to_asm_operand(src_reg2); + AsmOperand rax_op = asm_reg_to_operand(RAX); + AsmOperand rcx_op = asm_reg_to_operand(RCX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_mov_rm(&impl->asm, RAX, ®1); - asm_mov_rm(&impl->asm, RCX, ®2); + asm_mov(&impl->asm, &rax_op, &src_op1); + asm_mov(&impl->asm, &rcx_op, &src_op2); asm_sub_rr(&impl->asm, RAX, RCX); - asm_mov_mr(&impl->asm, &dst, RAX); + asm_mov(&impl->asm, &dst_op, &rax_op); return 0; } -int amal_exec_imul(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { - AsmPtr dst; - AsmPtr reg1; - AsmPtr reg2; - IMPL_START - - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_ptr_init_disp(®1, RBP, get_register_stack_offset(src_reg1)); - asm_ptr_init_disp(®2, RBP, get_register_stack_offset(src_reg2)); +int amal_exec_imul(amal_executor *self, AmalReg dst_reg, AmalReg src_reg1, AmalReg src_reg2) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand src_op1 = amal_reg_to_asm_operand(src_reg1); + AsmOperand src_op2 = amal_reg_to_asm_operand(src_reg2); + AsmOperand rax_op = asm_reg_to_operand(RAX); + AsmOperand rcx_op = asm_reg_to_operand(RCX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_mov_rm(&impl->asm, RAX, ®1); - asm_mov_rm(&impl->asm, RCX, ®2); + asm_mov(&impl->asm, &rax_op, &src_op1); + asm_mov(&impl->asm, &rcx_op, &src_op2); asm_imul_rr(&impl->asm, RAX, RCX); - asm_mov_mr(&impl->asm, &dst, RAX); + asm_mov(&impl->asm, &dst_op, &rax_op); return 0; } -int amal_exec_mul(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { +int amal_exec_mul(amal_executor *self, AmalReg dst_reg, AmalReg src_reg1, AmalReg src_reg2) { (void)self; (void)dst_reg; (void)src_reg1; @@ -228,25 +304,24 @@ int amal_exec_mul(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { return 0; } -int amal_exec_idiv(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { - AsmPtr dst; - AsmPtr reg1; - AsmPtr reg2; - IMPL_START - - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_ptr_init_disp(®1, RBP, get_register_stack_offset(src_reg1)); - asm_ptr_init_disp(®2, RBP, get_register_stack_offset(src_reg2)); +int amal_exec_idiv(amal_executor *self, AmalReg dst_reg, AmalReg src_reg1, AmalReg src_reg2) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand src_op1 = amal_reg_to_asm_operand(src_reg1); + AsmOperand src_op2 = amal_reg_to_asm_operand(src_reg2); + AsmOperand rax_op = asm_reg_to_operand(RAX); + AsmOperand rcx_op = asm_reg_to_operand(RCX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_mov_rm(&impl->asm, RAX, ®1); - asm_mov_rm(&impl->asm, RCX, ®2); + asm_mov(&impl->asm, &rax_op, &src_op1); + asm_mov(&impl->asm, &rcx_op, &src_op2); asm_cqo(&impl->asm); - asm_idiv_rr(&impl->asm, RCX); - asm_mov_mr(&impl->asm, &dst, RAX); + asm_idiv_rax_r(&impl->asm, RCX); + asm_mov(&impl->asm, &dst_op, &rax_op); return 0; } -int amal_exec_div(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { +int amal_exec_div(amal_executor *self, AmalReg dst_reg, AmalReg src_reg1, AmalReg src_reg2) { (void)self; (void)dst_reg; (void)src_reg1; @@ -256,17 +331,32 @@ int amal_exec_div(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { return 0; } -int amal_exec_push(amal_executor *self, i8 reg) { - AsmPtr reg_ptr; - IMPL_START +int amal_exec_push(amal_executor *self, AmalReg reg) { + AsmOperand op = amal_reg_to_asm_operand(reg); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_ptr_init_disp(®_ptr, RBP, get_register_stack_offset(reg)); if(impl->num_pushed_values < NUM_REG_PARAMS) { - asm_mov_rm(&impl->asm, SYS_V_REG_PARAMS[impl->num_pushed_values], ®_ptr); + ++impl->num_saved_params_for_call; + /* + TODO: If the arguments to the function are taken from the parameter of the current + function, then this can be optimized to either swap registers or no-op + */ + AsmOperand dst_reg = asm_reg_to_operand(SYS_V_REG_PARAMS[impl->num_pushed_values]); + /* + Backup parameter. + TODO: Remove this, copy it to a temporary register instead. + This should also only be done if the parameter is actually used in the current function + and only if it is used after this point + */ + asm_pushr(&impl->asm, dst_reg.value.reg); + asm_mov(&impl->asm, &dst_reg, &op); } else { - asm_mov_rm(&impl->asm, RAX, ®_ptr); + AsmOperand rax_op = asm_reg_to_operand(RAX); + asm_mov(&impl->asm, &rax_op, &op); asm_pushr(&impl->asm, RAX); } + ++impl->num_pushed_values; return 0; } @@ -293,26 +383,27 @@ int amal_exec_call_start(amal_executor *self, u8 num_args) { return 0; } -int amal_exec_call(amal_executor *self, u32 code_offset, i8 dst_reg) { +int amal_exec_call(amal_executor *self, u32 code_offset, AmalReg dst_reg) { amal_executor_impl *impl = (amal_executor_impl*)self; /* TODO: Preserve necessary registers before call? */ /* TODO: This assumes all arguments are isize */ /* Do the function call */ isize asm_offset = asm_get_size(&impl->asm); - int num_pushed_stack = impl->num_pushed_values - (int)NUM_REG_PARAMS; + int num_pushed_stack = impl->num_pushed_values + impl->num_saved_params_for_call - (int)NUM_REG_PARAMS; ASM_ENSURE_CAPACITY assert((num_pushed_stack <= 0 || num_pushed_stack % 2 == 0) && "TODO: Align stack to 16-bytes before calling functions"); assert(code_offset < asm_offset); asm_call_rel32(&impl->asm, (isize)code_offset - asm_offset); - /* Handle function result and cleanup */ + /* Function result */ { - AsmPtr dst; - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand rax_op = asm_reg_to_operand(RAX); /* TODO: Make this work when result is not stored in RAX (multiple return results) */ - asm_mov_mr(&impl->asm, &dst, RAX); + asm_mov(&impl->asm, &dst_op, &rax_op); } + /* Function cleanup */ if(num_pushed_stack > 0) asm_add_rm64_imm(&impl->asm, RSP, num_pushed_stack * sizeof(isize)); impl->num_pushed_values = 0; @@ -332,10 +423,11 @@ void amal_exec_call_overwrite(amal_executor *self, u32 call_code_offset, i32 new The rest are passed in the stack. */ /* TODO: Make this work when function returns something else than a POD */ -int amal_exec_calle(amal_executor *self, void *func, i8 dst_reg) { - AsmPtr dst; +int amal_exec_calle(amal_executor *self, void *func, AmalReg dst_reg) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand rax_op = asm_reg_to_operand(RAX); amal_executor_impl *impl = (amal_executor_impl*)self; - int num_pushed_stack = impl->num_pushed_values - (int)NUM_REG_PARAMS; + int num_pushed_stack = impl->num_pushed_values + impl->num_saved_params_for_call - (int)NUM_REG_PARAMS; ASM_ENSURE_CAPACITY assert((num_pushed_stack <= 0 || num_pushed_stack % 2 == 0) && "TODO: Align stack to 16-bytes before calling functions"); @@ -344,8 +436,7 @@ int amal_exec_calle(amal_executor *self, void *func, i8 dst_reg) { /* TODO: This assumes all arguments are isize */ asm_mov_ri(&impl->asm, RAX, (intptr_t)func); asm_callr(&impl->asm, RAX); - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_mov_mr(&impl->asm, &dst, RAX); + asm_mov(&impl->asm, &dst_op, &rax_op); if(num_pushed_stack > 0) asm_add_rm64_imm(&impl->asm, RSP, num_pushed_stack * sizeof(isize)); impl->num_pushed_values = 0; @@ -353,37 +444,39 @@ int amal_exec_calle(amal_executor *self, void *func, i8 dst_reg) { } /* -int amal_exec_callr(i8 dst_reg, BufferView data) { +int amal_exec_callr(AmalReg dst_reg, BufferView data) { } */ -int amal_exec_cmp(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) { - AsmPtr dst, src1, src2; - IMPL_START - - asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg)); - asm_ptr_init_disp(&src1, RBP, get_register_stack_offset(src_reg1)); - asm_ptr_init_disp(&src2, RBP, get_register_stack_offset(src_reg2)); +int amal_exec_cmp(amal_executor *self, AmalReg dst_reg, AmalReg src_reg1, AmalReg src_reg2) { + AsmOperand dst_op = amal_reg_to_asm_operand(dst_reg); + AsmOperand src_op1 = amal_reg_to_asm_operand(src_reg1); + AsmOperand src_op2 = amal_reg_to_asm_operand(src_reg2); + AsmOperand rax_op = asm_reg_to_operand(RAX); + AsmOperand rcx_op = asm_reg_to_operand(RCX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_mov_rm(&impl->asm, RCX, &dst); - asm_xor_rm64(&impl->asm, RCX, RCX); + asm_mov(&impl->asm, &rcx_op, &dst_op); + asm_xor_rm64(&impl->asm, rcx_op.value.reg, rcx_op.value.reg); - asm_mov_rm(&impl->asm, RAX, &src1); - asm_cmp_rm(&impl->asm, RAX, &src2); - asm_sete_r(&impl->asm, RCX); - asm_mov_mr(&impl->asm, &dst, RCX); + asm_mov(&impl->asm, &rax_op, &src_op1); + asm_cmp(&impl->asm, &rax_op, &src_op2); + asm_sete_r(&impl->asm, rcx_op.value.reg); + asm_mov(&impl->asm, &dst_op, &rcx_op); return 0; } -int amal_exec_jz(amal_executor *self, i8 reg, u16 target_label) { - AsmPtr ptr; +int amal_exec_jz(amal_executor *self, AmalReg reg, u16 target_label) { + AsmOperand op = amal_reg_to_asm_operand(reg); + AsmOperand rax_op = asm_reg_to_operand(RAX); u32 asm_offset; - IMPL_START + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(reg)); - asm_mov_rm(&impl->asm, RAX, &ptr); - asm_cmp_rm64_imm(&impl->asm, RAX, 0); + asm_mov(&impl->asm, &rax_op, &op); + asm_cmp_rm64_imm(&impl->asm, rax_op.value.reg, 0); asm_offset = asm_get_size(&impl->asm); if(target_label < impl->label_counter) { @@ -424,13 +517,14 @@ int amal_exec_jmp(amal_executor *self, u16 target_label) { } } -int amal_exec_ret(amal_executor *self, i8 reg) { - AsmPtr ret_reg; - IMPL_START +int amal_exec_ret(amal_executor *self, AmalReg reg) { + AsmOperand op = amal_reg_to_asm_operand(reg); + AsmOperand rax_op = asm_reg_to_operand(RAX); + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY - asm_ptr_init_disp(&ret_reg, RBP, get_register_stack_offset(reg)); /* Result is returned in RAX register. TODO: Make this work when returning more than one result */ - asm_mov_rm(&impl->asm, RAX, &ret_reg); + asm_mov(&impl->asm, &rax_op, &op); return amal_exec_func_end(self); } @@ -438,7 +532,7 @@ static u32 get_next_uneven_number(u32 value) { return value + !(value & 1); } -int amal_exec_func_start(amal_executor *self, u16 num_regs) { +int amal_exec_func_start(amal_executor *self, u8 num_params, u16 num_regs) { /* TODO: Validate stack size, or maybe remove all validation? do we really need validation? If we need security, we could fork the process instead. @@ -450,7 +544,9 @@ int amal_exec_func_start(amal_executor *self, u16 num_regs) { 64-bit Windows: RBX, RSI, RDI, RBP, R12-R15, XMM6-XMM15 64-bit Linux,BSD,Mac: RBX, RBP, R12-R15 */ - IMPL_START + amal_executor_impl *impl = (amal_executor_impl*)self; + ASM_ENSURE_CAPACITY + (void)num_params; /* TODO: Allow use of parameter registers that do not need to be preserved since they are not used */ asm_pushr(&impl->asm, RBX); asm_pushr(&impl->asm, RBP); asm_mov_rr(&impl->asm, RBP, RSP); |