aboutsummaryrefslogtreecommitdiff
path: root/executor/x86_64
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2019-08-14 01:30:08 +0200
committerdec05eba <dec05eba@protonmail.com>2020-07-25 14:36:46 +0200
commit664fbc5f5c947aaa04bbbf132d9c935959e34a9c (patch)
treefb25c4d6b8ccc5c6c7d02ad1170947096ff684e9 /executor/x86_64
parentea97370f973374f863e4296c2bb872be8b5235a3 (diff)
Move program code generation and execution out of program (make it generic)
Diffstat (limited to 'executor/x86_64')
-rw-r--r--executor/x86_64/executor.c261
1 files changed, 261 insertions, 0 deletions
diff --git a/executor/x86_64/executor.c b/executor/x86_64/executor.c
new file mode 100644
index 0000000..b53ccea
--- /dev/null
+++ b/executor/x86_64/executor.c
@@ -0,0 +1,261 @@
+#include "../executor.h"
+#include "../../include/std/alloc.h"
+#include "asm.h"
+#include <assert.h>
+
+/*
+ TODO: Currently almost all operations are performed on memory. This should be optimized
+ to take advantage of registers.
+
+ TODO: Operations with memory registers could access outside the stack. Should this be checked?
+*/
+
+typedef struct {
+ Asm asm;
+} amal_executor_impl;
+
+#define IMPL \
+ amal_executor_impl *impl; \
+ impl = (amal_executor_impl*)self;
+
+#define get_register_stack_offset(reg) -(i32)(reg * (int)sizeof(usize) + (int)sizeof(usize))
+
+static i64 abs_i64(i64 value) {
+ return value >= 0 ? value : -value;
+}
+
+int amal_executor_init(amal_executor **self) {
+ amal_executor_impl **impl;
+ return_if_error(am_malloc(sizeof(amal_executor_impl), (void**)self));
+ impl = (amal_executor_impl**)self;
+ return asm_init(&(*impl)->asm);
+}
+
+void amal_executor_deinit(amal_executor *self) {
+ IMPL
+ asm_deinit(&impl->asm);
+ am_free(impl);
+}
+
+int amal_executor_run(amal_executor *self) {
+ IMPL
+ return asm_execute(&impl->asm);
+}
+
+int amal_exec_nop(amal_executor *self) {
+ IMPL
+ return asm_nop(&impl->asm);
+}
+
+int amal_exec_setz(amal_executor *self, u8 dst_reg) {
+ AsmPtr dst;
+ IMPL
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ return asm_mov_mi(&impl->asm, &dst, 0);
+}
+
+int amal_exec_mov(amal_executor *self, u8 dst_reg, u8 src_reg) {
+ AsmPtr ptr;
+ IMPL
+
+ asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(src_reg));
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &ptr));
+
+ asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(dst_reg));
+ return asm_mov_mr(&impl->asm, &ptr, RAX);
+}
+
+int amal_exec_movi(amal_executor *self, u8 dst_reg, i64 imm) {
+ IMPL
+ /* TODO: if @number is a float then use float instructions */
+ if(abs_i64(imm) <= INT32_MAX) {
+ AsmPtr dst;
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ return_if_error(asm_mov_mi(&impl->asm, &dst, imm));
+ } else {
+ AsmPtr dst;
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ return_if_error(asm_mov_ri(&impl->asm, RAX, imm));
+ return_if_error(asm_mov_mr(&impl->asm, &dst, RAX));
+ }
+ return 0;
+}
+
+int amal_exec_movd(amal_executor *self, u8 dst_reg, BufferView data) {
+ AsmPtr dst;
+ IMPL
+
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ return_if_error(asm_mov_ri(&impl->asm, RAX, (uintptr_t)data.data));
+ return asm_mov_mr(&impl->asm, &dst, RAX);
+}
+
+int amal_exec_add(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+ IMPL
+
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
+ asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
+
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
+ return_if_error(asm_add_rr(&impl->asm, RAX, RCX));
+ return asm_mov_mr(&impl->asm, &dst, RAX);
+}
+
+int amal_exec_sub(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+ IMPL
+
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
+ asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
+
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
+ return_if_error(asm_sub_rr(&impl->asm, RAX, RCX));
+ return asm_mov_mr(&impl->asm, &dst, RAX);
+}
+
+int amal_exec_imul(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+ IMPL
+
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
+ asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
+
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
+ return_if_error(asm_imul_rr(&impl->asm, RAX, RCX));
+ return asm_mov_mr(&impl->asm, &dst, RAX);
+}
+
+int amal_exec_mul(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
+ (void)self;
+ (void)dst_reg;
+ (void)src_reg1;
+ (void)src_reg2;
+ /* TODO: Implement! */
+ #if 0
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ asm_ptr_init_disp(&reg1, RBP, -(i32)get_register_at_offset(1));
+ asm_ptr_init_disp(&reg2, RBP, -(i32)get_register_at_offset(2));
+
+ return_if_error(asm_mov_rm(&self->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&self->asm, RCX, &reg2));
+ return_if_error(asm_mul_rr(&self->asm, RAX, RCX));
+ return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
+ #endif
+ return 0;
+}
+
+int amal_exec_idiv(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
+ (void)self;
+ (void)dst_reg;
+ (void)src_reg1;
+ (void)src_reg2;
+ /* TODO: Implement! */
+ return 0;
+}
+
+int amal_exec_div(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
+ (void)self;
+ (void)dst_reg;
+ (void)src_reg1;
+ (void)src_reg2;
+ /* TODO: Implement! */
+ return 0;
+}
+
+int amal_exec_push(amal_executor *self, u8 reg) {
+ (void)self;
+ (void)reg;
+ /* TODO: Implement! */
+ return 0;
+}
+
+int amal_exec_pushi(amal_executor *self, i64 imm) {
+ (void)self;
+ (void)imm;
+ /* TODO: Implement! */
+ return 0;
+}
+
+int amal_exec_pushd(amal_executor *self, BufferView data) {
+ (void)self;
+ (void)data;
+ /* TODO: Implement! */
+ return 0;
+}
+
+/*int amal_exec_call(u8 dst_reg, BufferView data);
+int amal_exec_callr(u8 dst_reg, BufferView data);*/
+int amal_exec_cmp(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
+ (void)self;
+ (void)dst_reg;
+ (void)src_reg1;
+ (void)src_reg2;
+ /* TODO: Implement! */
+ return 0;
+}
+
+int amal_exec_jz(amal_executor *self, u8 dst_reg, i16 offset) {
+ (void)self;
+ (void)dst_reg;
+ (void)offset;
+ /* TODO: Implement! */
+ return 0;
+}
+
+int amal_exec_jmp(amal_executor *self, i16 offset) {
+ (void)self;
+ (void)offset;
+ /* TODO: Implement! */
+ return 0;
+}
+
+int amal_exec_ret(amal_executor *self) {
+ (void)self;
+ /* TODO: Implement! */
+ assert(bool_false && "TODO: Implement RET. RET needs to restore the stack before returning");
+ return 0;
+}
+
+int amal_exec_func_start(amal_executor *self, u16 num_regs) {
+ /*
+ TODO: Validate stack size, or maybe remove all validation? do we really need validation?
+ If we need security, we could fork the process instead.
+ */
+
+ /*
+ Some registers need to be preserved before entering a function scope and these registers are different on different platforms.
+ 32-bit: EBX, ESI, EDI, EBP
+ 64-bit Windows: RBX, RSI, RDI, RBP, R12-R15, XMM6-XMM15
+ 64-bit Linux,BSD,Mac: RBX, RBP, R12-R15
+ */
+ IMPL
+ return_if_error(asm_pushr(&impl->asm, RBX));
+ return_if_error(asm_pushr(&impl->asm, RBP));
+ return_if_error(asm_mov_rr(&impl->asm, RBP, RSP));
+ return asm_sub_rm64_imm(&impl->asm, RSP, num_regs * sizeof(usize));
+}
+
+int amal_exec_func_end(amal_executor *self) {
+ IMPL
+ return_if_error(asm_mov_rr(&impl->asm, RSP, RBP));
+ return_if_error(asm_popr(&impl->asm, RBP));
+ return_if_error(asm_popr(&impl->asm, RBX));
+ return asm_ret(&impl->asm, 0);
+}