aboutsummaryrefslogtreecommitdiff
path: root/executor/x86_64
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2019-08-24 23:31:14 +0200
committerdec05eba <dec05eba@protonmail.com>2020-07-25 14:36:46 +0200
commitd9f652919961a2947452ad3c4af4659f3d2fb330 (patch)
tree2db541db311a9b5a83d3f2c9b199f6d5c3341555 /executor/x86_64
parent40652d7dbf701eda83fa8323b42a6b5bf0ca6bdd (diff)
Add if/else/elseif/while, including the final assembly
Diffstat (limited to 'executor/x86_64')
-rw-r--r--executor/x86_64/asm.c112
-rw-r--r--executor/x86_64/asm.h24
-rw-r--r--executor/x86_64/executor.c151
3 files changed, 248 insertions, 39 deletions
diff --git a/executor/x86_64/asm.c b/executor/x86_64/asm.c
index f2bb801..a6bf274 100644
--- a/executor/x86_64/asm.c
+++ b/executor/x86_64/asm.c
@@ -480,6 +480,112 @@ int asm_call_rel32(Asm *self, i32 relative) {
}
void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative) {
+ assert(*(u8*)(self->code + asm_index) == 0xE8);
+ new_relative -= 5; /* In x86, the relative position starts from the next instruction */
+ am_memcpy((u8*)self->code + asm_index + 1, &new_relative, sizeof(new_relative));
+}
+
+int asm_cmp_rm(Asm *self, Reg64 reg1, AsmPtr *reg2) {
+ ins_start(self);
+ /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 8));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x3B;
+ asm_rm(self, reg2, reg1);
+ ins_end(self, "cmp %s, %s", reg64_to_str(reg1), asm_ptr_to_string(reg2));
+ return 0;
+}
+
+int asm_sete_m(Asm *self, AsmPtr *dst) {
+ assert(dst->base != RSP && dst->base != RBP && dst->base != RSI && dst->base != RDI);
+ ins_start(self);
+ /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 8));
+ *self->code_it++ = 0x0F;
+ *self->code_it++ = 0x94;
+ asm_rm(self, dst, 0x0); /* the @src bits are not used */
+ ins_end(self, "sete %s", asm_ptr_to_string(dst));
+ return 0;
+}
+
+int asm_sete_r(Asm *self, Reg64 dst) {
+ assert(dst != RSP && dst != RBP && dst != RSI && dst != RDI);
+ ins_start(self);
+ /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 8));
+ *self->code_it++ = 0x0F;
+ *self->code_it++ = 0x94;
+ asm_rr(self, dst, 0x0); /* the @src bits are not used */
+ ins_end(self, "sete %s", reg64_to_str(dst));
+ return 0;
+}
+
+/*
+ Note: This is sometimes called with @relative INT32_MAX-(2 or 6) (will print jz 0x7ffffff9), in which case it's most likely a dummy
+ jump until the relative position is later changed with @asm_override_jcc_rel32.
+ TODO: Update the ins_end debug print to take that into account somehow
+*/
+int asm_jz(Asm *self, i32 relative) {
+ /*
+ Note: We dont use the 16-bit relative variant, as it will clear the upper two bytes of the EIP registers, resulting
+ in a maximum instruction pointer size of 16 bits
+ */
+ ins_start(self);
+ if(abs(relative - 2) <= INT8_MAX) {
+ relative -= 2;
+ return_if_error(asm_ensure_capacity(self, 2));
+ *self->code_it++ = 0x74;
+ *self->code_it++ = (i8)relative;
+ } else {
+ relative -= 6;
+ return_if_error(asm_ensure_capacity(self, 6));
+ *self->code_it++ = 0x0F;
+ *self->code_it++ = 0x84;
+ am_memcpy(self->code_it, &relative, sizeof(relative));
+ self->code_it += sizeof(relative);
+ }
+ ins_end(self, "jz 0x%x", relative);
+ return 0;
+}
+
+void asm_override_jcc_rel32(Asm *self, u32 asm_index, i32 new_relative) {
+ /* +2 because rel32 variant of the jump instruction opcode is 2 bytes */
+ assert(*(u8*)(self->code + asm_index) == 0x0F);
+ assert(*(u8*)(self->code + asm_index + 1) == 0x84);
+ new_relative -= 6; /* In x86, the relative position starts from the next instruction */
+ am_memcpy((u8*)self->code + asm_index + 2, &new_relative, sizeof(new_relative));
+}
+
+/*
+ Note: This is sometimes called with @relative INT32_MAX-(2 or 5) (will print jmp 0x7ffffffa), in which case it's most likely a dummy
+ jump until the relative position is later changed with @asm_override_jmp_rel32.
+ TODO: Update the ins_end debug print to take that into account somehow
+*/
+int asm_jmp(Asm *self, i32 relative) {
+ /*
+ Note: We dont use the 16-bit relative variant, as it will clear the upper two bytes of the EIP registers, resulting
+ in a maximum instruction pointer size of 16 bits
+ */
+ ins_start(self);
+ if(abs(relative - 2) <= INT8_MAX) {
+ relative -= 2;
+ return_if_error(asm_ensure_capacity(self, 2));
+ *self->code_it++ = 0xEB;
+ *self->code_it++ = (i8)relative;
+ } else {
+ relative -= 5;
+ return_if_error(asm_ensure_capacity(self, 5));
+ *self->code_it++ = 0xE9;
+ am_memcpy(self->code_it, &relative, sizeof(relative));
+ self->code_it += sizeof(relative);
+ }
+ ins_end(self, "jmp 0x%x", relative);
+ return 0;
+}
+
+void asm_override_jmp_rel32(Asm *self, u32 asm_index, i32 new_relative) {
+ /* +1 to skip instruction opcode */
+ assert(*(u8*)(self->code + asm_index) == 0xE9);
new_relative -= 5; /* In x86, the relative position starts from the next instruction */
am_memcpy((u8*)self->code + asm_index + 1, &new_relative, sizeof(new_relative));
}
@@ -488,8 +594,7 @@ void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative) {
/* /r */
#define DEFINE_INS_RM(mnemonic, opcode) \
-int asm_##mnemonic##_rmb(Asm *self, Reg32 dst, Reg32 src) { \
- return_if_error(asm_ensure_capacity(self, 2)); \
+int asm_##mnemonic##_rmb(Asm *self, Reg32 dst, Reg32 src) { \
*self->code_it++ = opcode; \
*self->code_it++ = 0xC0 + 8*dst + src; \
return 0; \
@@ -498,6 +603,7 @@ int asm_##mnemonic##_rmb(Asm *self, Reg32 dst, Reg32 src) { \
int asm_##mnemonic##_rm32(Asm *self, Reg32 dst, Reg32 src) { \
int result; \
ins_start(self); \
+ return_if_error(asm_ensure_capacity(self, 2)); \
result = asm_##mnemonic##_rmb(self, (Reg32)dst, (Reg32)src); \
ins_end(self, #mnemonic" %s, %s", reg32_to_str(dst), reg32_to_str(src)); \
return result; \
@@ -506,7 +612,7 @@ int asm_##mnemonic##_rm32(Asm *self, Reg32 dst, Reg32 src) { \
int asm_##mnemonic##_rm64(Asm *self, Reg64 dst, Reg64 src) { \
int result; \
ins_start(self); \
- return_if_error(asm_ensure_capacity(self, 1)); \
+ return_if_error(asm_ensure_capacity(self, 3)); \
*self->code_it++ = REX_W; \
result = asm_##mnemonic##_rmb(self, (Reg32)dst, (Reg32)src); \
ins_end(self, #mnemonic" %s, %s", reg64_to_str(dst), reg64_to_str(src)); \
diff --git a/executor/x86_64/asm.h b/executor/x86_64/asm.h
index ace1ecf..ac519e9 100644
--- a/executor/x86_64/asm.h
+++ b/executor/x86_64/asm.h
@@ -89,6 +89,30 @@ CHECK_RESULT int asm_callr(Asm *self, Reg64 reg);
CHECK_RESULT int asm_call_rel32(Asm *self, i32 relative);
void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative);
+CHECK_RESULT int asm_cmp_rm(Asm *self, Reg64 reg1, AsmPtr *reg2);
+/*
+ Sets the 8 bit memory operand to 1 if the last cmp was equals, otherwise set it to 0.
+ Note: this instruction doesn't work with AH (RSP), CH (RBP), DH (RSI) and BH (RDI).
+ TODO: When ST, MM AND XMM registers are implemented, also check for them as they are also invalid
+*/
+CHECK_RESULT int asm_sete_m(Asm *self, AsmPtr *dst);
+CHECK_RESULT int asm_sete_r(Asm *self, Reg64 dst);
+/*
+ In x86 assembly, the @relative position starts from the next instruction.
+ This offset shouldn't be calculated by the caller and is instead managed
+ by this asm library itself.
+*/
+CHECK_RESULT int asm_jz(Asm *self, i32 relative);
+/* Override conditional jump target */
+void asm_override_jcc_rel32(Asm *self, u32 asm_index, i32 new_relative);
+/*
+ In x86 assembly, the @relative position starts from the next instruction.
+ This offset shouldn't be calculated by the caller and is instead managed
+ by this asm library itself.
+*/
+CHECK_RESULT int asm_jmp(Asm *self, i32 relative);
+void asm_override_jmp_rel32(Asm *self, u32 asm_index, i32 new_relative);
+
diff --git a/executor/x86_64/executor.c b/executor/x86_64/executor.c
index 335790a..d35dbdc 100644
--- a/executor/x86_64/executor.c
+++ b/executor/x86_64/executor.c
@@ -11,22 +11,35 @@
TODO: Operations with memory registers could access outside the stack. Should this be checked?
*/
+/*
+ TODO: Allow this to dynamically change up to 1<<16, to match jump instruction.
+ This is a sane limit for now
+*/
+#define MAX_LABELS 128
+
typedef struct {
u32 asm_index;
u16 func_index;
} CallDefer;
typedef struct {
+ u32 asm_index;
+ u16 target_label;
+ bool condition;
+} JumpDefer;
+
+typedef struct {
Asm asm;
usize *function_indices;
u16 num_functions;
u16 func_counter;
Buffer/*CallDefer*/ call_defer;
+ Buffer/*JumpDefer*/ jump_defer;
+ u32 label_asm_index[MAX_LABELS];
+ int label_counter;
} amal_executor_impl;
-#define IMPL \
- amal_executor_impl *impl; \
- impl = (amal_executor_impl*)self;
+#define IMPL amal_executor_impl *impl = (amal_executor_impl*)self;
/*
@reg will be a positive value when accessing local variables, in which case the first
@@ -53,11 +66,14 @@ int amal_executor_init(amal_executor **self) {
(*impl)->num_functions = 0;
(*impl)->func_counter = 0;
ignore_result_int(buffer_init(&(*impl)->call_defer, NULL));
+ ignore_result_int(buffer_init(&(*impl)->jump_defer, NULL));
+ (*impl)->label_counter = 0;
return asm_init(&(*impl)->asm);
}
void amal_executor_deinit(amal_executor *self) {
IMPL
+ buffer_deinit(&impl->jump_defer);
buffer_deinit(&impl->call_defer);
am_free(impl->function_indices);
asm_deinit(&impl->asm);
@@ -75,26 +91,28 @@ u32 amal_exec_get_code_offset(amal_executor *self) {
}
int amal_executor_instructions_start(amal_executor *self, u16 num_functions) {
- void *new_data;
IMPL
- return_if_error(am_realloc(impl->function_indices, num_functions * sizeof(*impl->function_indices), &new_data));
- impl->function_indices = new_data;
+ return_if_error(am_realloc(impl->function_indices, num_functions * sizeof(usize), (void**)&impl->function_indices));
impl->num_functions = num_functions;
- impl->func_counter = 0;
- buffer_clear(&impl->call_defer);
return 0;
}
int amal_executor_instructions_end(amal_executor *self) {
- CallDefer *call_defer, *call_defer_end;
IMPL
- call_defer = buffer_begin(&impl->call_defer);
- call_defer_end = buffer_end(&impl->call_defer);
+ CallDefer *call_defer = buffer_begin(&impl->call_defer);
+ CallDefer *call_defer_end = buffer_end(&impl->call_defer);
for(; call_defer != call_defer_end; ++call_defer) {
- const isize func_offset = (isize)impl->function_indices[call_defer->func_index] - (isize)call_defer->asm_index;
+ i32 func_offset;
+ if(call_defer->func_index >= impl->num_functions) {
+ amal_log_error("Program attempted to call a function that doesn't exist (index %u, while there are only %u functions)", call_defer->func_index, impl->num_functions);
+ return -1;
+ }
+ func_offset = (isize)impl->function_indices[call_defer->func_index] - (isize)call_defer->asm_index;
asm_override_call_rel32(&impl->asm, call_defer->asm_index, func_offset);
}
+ buffer_clear(&impl->call_defer);
+ impl->func_counter = 0;
return 0;
}
@@ -272,25 +290,26 @@ int amal_exec_pushd(amal_executor *self, BufferView data) {
}
int amal_exec_call(amal_executor *self, u16 func_index, u8 num_args, i8 dst_reg) {
- isize asm_size;
IMPL
/* TODO: Preserve necessary registers before call? */
/* TODO: This assumes all arguments are isize */
- asm_size = asm_get_size(&impl->asm);
+ /* Do the function call */
+ isize asm_offset = asm_get_size(&impl->asm);
if(func_index < impl->func_counter) {
- return_if_error(asm_call_rel32(&impl->asm, (isize)impl->function_indices[func_index] - asm_size));
+ return_if_error(asm_call_rel32(&impl->asm, (isize)impl->function_indices[func_index] - asm_offset));
} else {
/*
The location of the function has not been defined yet. Use call instruction with dummy data and change
the location once the location to the function is known
*/
CallDefer call_defer;
- call_defer.asm_index = asm_size;
+ call_defer.asm_index = asm_offset;
call_defer.func_index = func_index;
return_if_error(buffer_append(&impl->call_defer, &call_defer, sizeof(call_defer)));
return_if_error(asm_call_rel32(&impl->asm, 0));
}
+ /* Handle function result and cleanup */
{
AsmPtr dst;
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
@@ -350,30 +369,65 @@ int amal_exec_callr(i8 dst_reg, BufferView data) {
*/
int amal_exec_cmp(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
- (void)self;
- (void)dst_reg;
- (void)src_reg1;
- (void)src_reg2;
- /* TODO: Implement! */
- assert(bool_false && "TODO: Implement!");
- return 0;
+ IMPL
+
+ AsmPtr dst, src1, src2;
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ asm_ptr_init_disp(&src1, RBP, get_register_stack_offset(src_reg1));
+ asm_ptr_init_disp(&src2, RBP, get_register_stack_offset(src_reg2));
+
+ return_if_error(asm_mov_rm(&impl->asm, RCX, &dst));
+ return_if_error(asm_xor_rm64(&impl->asm, RCX, RCX));
+
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &src1));
+ return_if_error(asm_cmp_rm(&impl->asm, RAX, &src2));
+ return_if_error(asm_sete_r(&impl->asm, RCX));
+ return asm_mov_mr(&impl->asm, &dst, RCX);
}
-int amal_exec_jz(amal_executor *self, i8 dst_reg, i16 offset) {
- (void)self;
- (void)dst_reg;
- (void)offset;
- /* TODO: Implement! */
- assert(bool_false && "TODO: Implement!");
- return 0;
+int amal_exec_jz(amal_executor *self, i8 reg, u16 target_label) {
+ AsmPtr ptr;
+ u32 asm_offset;
+ IMPL
+
+ asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(reg));
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &ptr));
+ return_if_error(asm_cmp_rm64_imm(&impl->asm, RAX, 0));
+
+ asm_offset = asm_get_size(&impl->asm);
+ if(target_label < impl->label_counter) {
+ return asm_jz(&impl->asm, (i32)impl->label_asm_index[target_label] - (i32)asm_offset);
+ } else {
+ JumpDefer jump_defer;
+ jump_defer.asm_index = asm_offset;
+ jump_defer.target_label = target_label;
+ jump_defer.condition = bool_true;
+ /*
+ Insert dummy target, but it has to be above INT16_MAX, so the target can be replaced
+ no matter how large the jump will be
+ */
+ return_if_error(asm_jz(&impl->asm, INT32_MAX));
+ return buffer_append(&impl->jump_defer, &jump_defer, sizeof(jump_defer));
+ }
}
-int amal_exec_jmp(amal_executor *self, i16 offset) {
- (void)self;
- (void)offset;
- /* TODO: Implement! */
- assert(bool_false && "TODO: Implement!");
- return 0;
+int amal_exec_jmp(amal_executor *self, u16 target_label) {
+ IMPL
+ u32 asm_offset = asm_get_size(&impl->asm);
+ if(target_label < impl->label_counter) {
+ return asm_jmp(&impl->asm, (i32)impl->label_asm_index[target_label] - (i32)asm_offset);
+ } else {
+ JumpDefer jump_defer;
+ jump_defer.asm_index = asm_offset;
+ jump_defer.target_label = target_label;
+ jump_defer.condition = bool_false;
+ /*
+ Insert dummy target, but it has to be above INT16_MAX, so the target can be replaced
+ no matter how large the jump will be
+ */
+ return_if_error(asm_jmp(&impl->asm, INT32_MAX));
+ return buffer_append(&impl->jump_defer, &jump_defer, sizeof(jump_defer));
+ }
}
int amal_exec_ret(amal_executor *self, i8 reg) {
@@ -408,8 +462,33 @@ int amal_exec_func_start(amal_executor *self, u16 num_regs) {
int amal_exec_func_end(amal_executor *self) {
IMPL
+
+ JumpDefer *jump_defer = buffer_begin(&impl->jump_defer);
+ JumpDefer *jump_defer_end = buffer_end(&impl->jump_defer);
+ for(; jump_defer != jump_defer_end; ++jump_defer) {
+ i32 jump_offset;
+ if(jump_defer->target_label >= impl->label_counter) {
+ amal_log_error("Program attempted to jump to a label that doesn't exist (label %u, while there are only %u labels)", jump_defer->target_label, impl->label_counter);
+ return -1;
+ }
+ jump_offset = (isize)impl->label_asm_index[jump_defer->target_label] - (isize)jump_defer->asm_index;
+ if(jump_defer->condition)
+ asm_override_jcc_rel32(&impl->asm, jump_defer->asm_index, jump_offset);
+ else
+ asm_override_jmp_rel32(&impl->asm, jump_defer->asm_index, jump_offset);
+ }
+ buffer_clear(&impl->jump_defer);
+ impl->label_counter = 0;
+
return_if_error(asm_mov_rr(&impl->asm, RSP, RBP));
return_if_error(asm_popr(&impl->asm, RBP));
return_if_error(asm_popr(&impl->asm, RBX));
return asm_ret(&impl->asm, 0);
}
+
+int amal_exec_label(amal_executor *self) {
+ IMPL
+ assert(impl->label_counter < MAX_LABELS);
+ impl->label_asm_index[impl->label_counter++] = asm_get_size(&impl->asm);
+ return 0;
+}