aboutsummaryrefslogtreecommitdiff
path: root/executor/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'executor/x86_64')
-rw-r--r--executor/x86_64/asm.c86
-rw-r--r--executor/x86_64/asm.h18
-rw-r--r--executor/x86_64/executor.c129
3 files changed, 193 insertions, 40 deletions
diff --git a/executor/x86_64/asm.c b/executor/x86_64/asm.c
index 73ae568..f032538 100644
--- a/executor/x86_64/asm.c
+++ b/executor/x86_64/asm.c
@@ -35,6 +35,10 @@ static void ins_start(Asm *self) {
asm_debug_str_buffer_index = 0;
}
+static int max(int a, int b) {
+ return a >= b ? a : b;
+}
+
static void ins_end(Asm *self, const char *fmt, ...) {
usize ins_end_offset;
usize i;
@@ -46,7 +50,7 @@ static void ins_end(Asm *self, const char *fmt, ...) {
fprintf(stderr, "%02x ", ((u8*)self->code)[i]);
}
/* Same padding for all instructions, no matter how long they are */
- for(i = 0; i < 35 - (ins_end_offset - ins_start_offset)*3; ++i) {
+ for(i = 0; i < (usize)max(0, 35 - (ins_end_offset - ins_start_offset)*3); ++i) {
putc(' ', stderr);
}
vfprintf(stderr, fmt, args);
@@ -160,9 +164,9 @@ void asm_ptr_init_index_disp(AsmPtr *self, Reg64 base, Reg64 index, i32 disp) {
}
int asm_init(Asm *self) {
- self->size = am_pagesize();
- amal_log_debug("asm: page size: %u", self->size);
- self->code = mmap(NULL, self->size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ self->allocated_size = am_pagesize();
+ amal_log_debug("asm: page size: %u", self->allocated_size);
+ self->code = mmap(NULL, self->allocated_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if(self->code == MAP_FAILED)
return -errno;
self->code_it = self->code;
@@ -171,12 +175,17 @@ int asm_init(Asm *self) {
void asm_deinit(Asm *self) {
if(self->code)
- munmap(self->code, self->size);
+ munmap(self->code, self->allocated_size);
self->code = NULL;
self->code_it = NULL;
- self->size = 0;
+ self->allocated_size = 0;
+}
+
+usize asm_get_size(Asm *self) {
+ return self->code_it - (u8*)self->code;
}
+#if 0
static void asm_print_code_hex(Asm *self) {
u8 *ptr;
int off;
@@ -196,13 +205,14 @@ static void asm_print_code_hex(Asm *self) {
if(off != 0)
putc('\n', stdout);
}
+#endif
int asm_execute(Asm *self) {
void (*func)();
- if(mprotect(self->code, self->size, PROT_READ | PROT_EXEC) != 0)
+ if(mprotect(self->code, self->allocated_size, PROT_READ | PROT_EXEC) != 0)
return -errno;
- asm_print_code_hex(self);
+ /*asm_print_code_hex(self);*/
/* TODO: Verify if this is valid on all platforms. According to ISO C standard it isn't? */
*(void**)(&func) = self->code;
@@ -214,17 +224,17 @@ int asm_execute(Asm *self) {
static CHECK_RESULT int asm_ensure_capacity(Asm *self, usize size) {
usize current_offset;
current_offset = (u8*)self->code_it - (u8*)self->code;
- if(current_offset + size > self->size) {
+ if(current_offset + size > self->allocated_size) {
void *new_mem;
usize new_size;
- new_size = self->size + am_pagesize();
+ new_size = self->allocated_size + am_pagesize();
new_mem = mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if(self->code == MAP_FAILED)
return -errno;
- am_memcpy(new_mem, self->code, self->size);
+ am_memcpy(new_mem, self->code, self->allocated_size);
self->code = new_mem;
- self->size = new_size;
+ self->allocated_size = new_size;
self->code_it = (u8*)self->code + current_offset;
}
return 0;
@@ -232,7 +242,7 @@ static CHECK_RESULT int asm_ensure_capacity(Asm *self, usize size) {
#ifdef DEBUG
static isize asm_get_capacity_left(Asm *self) {
- return (isize)self->size - (isize)((u8*)self->code_it - (u8*)self->code);
+ return (isize)self->allocated_size - (isize)((u8*)self->code_it - (u8*)self->code);
}
#endif
@@ -346,21 +356,20 @@ int asm_mov_rm(Asm *self, Reg64 dst, AsmPtr *src) {
return 0;
}
+/* Note: This shows as instruction movabs in intel assembly format */
int asm_mov_ri(Asm *self, Reg64 dst, i64 immediate) {
ins_start(self);
- /* 10 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
return_if_error(asm_ensure_capacity(self, 10));
*self->code_it++ = REX_W;
*self->code_it++ = 0xB8 + dst;
am_memcpy(self->code_it, &immediate, sizeof(immediate));
self->code_it += sizeof(immediate);
- ins_end(self, "mov %s, %ld", reg64_to_str(dst), immediate);
+ ins_end(self, "mov %s, 0x%x", reg64_to_str(dst), immediate);
return 0;
}
int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = REX_W;
*self->code_it++ = 0x89;
@@ -371,7 +380,6 @@ int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src) {
int asm_add_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = REX_W;
*self->code_it++ = 0x01;
@@ -382,7 +390,6 @@ int asm_add_rr(Asm *self, Reg64 dst, Reg64 src) {
int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = REX_W;
*self->code_it++ = 0x29;
@@ -393,7 +400,6 @@ int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src) {
int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
return_if_error(asm_ensure_capacity(self, 4));
*self->code_it++ = REX_W;
*self->code_it++ = 0x0F;
@@ -403,6 +409,25 @@ int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src) {
return 0;
}
+int asm_cqo(Asm *self) {
+ ins_start(self);
+ return_if_error(asm_ensure_capacity(self, 2));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x99;
+ ins_end(self, "cqo");
+ return 0;
+}
+
+int asm_idiv_rr(Asm *self, Reg64 src) {
+ ins_start(self);
+ return_if_error(asm_ensure_capacity(self, 4));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0xF7;
+ asm_rr(self, src, 0x7);
+ ins_end(self, "idiv %s", reg64_to_str(src));
+ return 0;
+}
+
int asm_pushr(Asm *self, Reg64 reg) {
ins_start(self);
return_if_error(asm_ensure_capacity(self, 1));
@@ -419,6 +444,26 @@ int asm_popr(Asm *self, Reg64 reg) {
return 0;
}
+/*
+ Note: This is sometimes called with @relative 0 (will print call -5), in which case it's most likely a dummy call until the relative position
+ is later changed with @asm_override_call_rel32. TODO: Update the ins_end debug print to take that into account somehow
+*/
+int asm_call_rel32(Asm *self, i32 relative) {
+ ins_start(self);
+ relative -= 5; /* In x86, the relative position starts from the next instruction */
+ return_if_error(asm_ensure_capacity(self, 5));
+ *self->code_it++ = 0xE8;
+ am_memcpy(self->code_it, &relative, sizeof(relative));
+ self->code_it += sizeof(relative);
+ ins_end(self, "call 0x%x", relative);
+ return 0;
+}
+
+void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative) {
+ new_relative -= 5; /* In x86, the relative position starts from the next instruction */
+ am_memcpy((u8*)self->code + asm_index + 1, &new_relative, sizeof(new_relative));
+}
+
/* TODO: Remove these !*/
/* /r */
@@ -559,11 +604,12 @@ int asm_ret(Asm *self, u16 bytes) {
if(bytes == 0) {
return_if_error(asm_ensure_capacity(self, 1));
*self->code_it++ = 0xC3;
+ ins_end(self, "ret");
} else {
return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = 0xC2;
am_memcpy(self->code_it, &bytes, sizeof(bytes));
+ ins_end(self, "ret 0x%x", bytes);
}
- ins_end(self, "ret 0x%x", bytes);
return 0;
}
diff --git a/executor/x86_64/asm.h b/executor/x86_64/asm.h
index b374e44..7e5ac67 100644
--- a/executor/x86_64/asm.h
+++ b/executor/x86_64/asm.h
@@ -7,7 +7,7 @@
typedef struct {
void *code;
u8 *code_it;
- usize size;
+ usize allocated_size;
} Asm;
typedef enum {
@@ -47,6 +47,8 @@ void asm_ptr_init_index_disp(AsmPtr *self, Reg64 base, Reg64 index, i32 disp);
CHECK_RESULT int asm_init(Asm *self);
void asm_deinit(Asm *self);
+usize asm_get_size(Asm *self);
+
CHECK_RESULT int asm_execute(Asm *self);
CHECK_RESULT int asm_nop(Asm *self);
@@ -68,9 +70,23 @@ CHECK_RESULT int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src);
CHECK_RESULT int asm_add_rr(Asm *self, Reg64 dst, Reg64 src);
CHECK_RESULT int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src);
CHECK_RESULT int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src);
+/* Sign extend RAX into RDX, this is needed for some operations, such as idiv */
+CHECK_RESULT int asm_cqo(Asm *self);
+/*
+ Divide RDX:RAX by @src. Store the quotient in RAX and the remainder in RDX.
+ @asm_cqo should be called before this, since RAX needs to be sign extended into RDX
+*/
+CHECK_RESULT int asm_idiv_rr(Asm *self, Reg64 src);
CHECK_RESULT int asm_pushr(Asm *self, Reg64 reg);
CHECK_RESULT int asm_popr(Asm *self, Reg64 reg);
+/*
+ In x86 assembly, the @relative position starts from the next instruction.
+ This offset shouldn't be calculated by the caller and is instead managed
+ by this asm library itself.
+*/
+CHECK_RESULT int asm_call_rel32(Asm *self, i32 relative);
+void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative);
diff --git a/executor/x86_64/executor.c b/executor/x86_64/executor.c
index b53ccea..b7aa91f 100644
--- a/executor/x86_64/executor.c
+++ b/executor/x86_64/executor.c
@@ -1,5 +1,6 @@
#include "../executor.h"
#include "../../include/std/alloc.h"
+#include "../../include/std/buffer.h"
#include "asm.h"
#include <assert.h>
@@ -11,7 +12,16 @@
*/
typedef struct {
+ u32 asm_index;
+ u16 func_index;
+} CallDefer;
+
+typedef struct {
Asm asm;
+ usize *function_indices;
+ u16 num_functions;
+ u16 func_counter;
+ Buffer/*CallDefer*/ call_defer;
} amal_executor_impl;
#define IMPL \
@@ -26,13 +36,20 @@ static i64 abs_i64(i64 value) {
int amal_executor_init(amal_executor **self) {
amal_executor_impl **impl;
- return_if_error(am_malloc(sizeof(amal_executor_impl), (void**)self));
impl = (amal_executor_impl**)self;
+ *impl = NULL;
+ return_if_error(am_malloc(sizeof(amal_executor_impl), (void**)impl));
+ (*impl)->function_indices = NULL;
+ (*impl)->num_functions = 0;
+ (*impl)->func_counter = 0;
+ ignore_result_int(buffer_init(&(*impl)->call_defer, NULL));
return asm_init(&(*impl)->asm);
}
void amal_executor_deinit(amal_executor *self) {
IMPL
+ buffer_deinit(&impl->call_defer);
+ am_free(impl->function_indices);
asm_deinit(&impl->asm);
am_free(impl);
}
@@ -42,6 +59,30 @@ int amal_executor_run(amal_executor *self) {
return asm_execute(&impl->asm);
}
+int amal_executor_instructions_start(amal_executor *self, u16 num_functions) {
+ void *new_data;
+ IMPL
+ return_if_error(am_realloc(impl->function_indices, num_functions * sizeof(*impl->function_indices), &new_data));
+ impl->function_indices = new_data;
+ impl->num_functions = num_functions;
+ impl->func_counter = 0;
+ buffer_clear(&impl->call_defer);
+ return 0;
+}
+
+int amal_executor_instructions_end(amal_executor *self) {
+ CallDefer *call_defer, *call_defer_end;
+ IMPL
+
+ call_defer = buffer_begin(&impl->call_defer);
+ call_defer_end = buffer_end(&impl->call_defer);
+ for(; call_defer != call_defer_end; ++call_defer) {
+ const isize func_offset = (isize)impl->function_indices[call_defer->func_index] - (isize)call_defer->asm_index;
+ asm_override_call_rel32(&impl->asm, call_defer->asm_index, func_offset);
+ }
+ return 0;
+}
+
int amal_exec_nop(amal_executor *self) {
IMPL
return asm_nop(&impl->asm);
@@ -158,16 +199,25 @@ int amal_exec_mul(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
return_if_error(asm_mul_rr(&self->asm, RAX, RCX));
return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
#endif
+ assert(bool_false && "TODO: Implement!");
return 0;
}
int amal_exec_idiv(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
- (void)self;
- (void)dst_reg;
- (void)src_reg1;
- (void)src_reg2;
- /* TODO: Implement! */
- return 0;
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+ IMPL
+
+ asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
+ asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
+ asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
+
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
+ return_if_error(asm_cqo(&impl->asm));
+ return_if_error(asm_idiv_rr(&impl->asm, RCX));
+ return asm_mov_mr(&impl->asm, &dst, RAX);
}
int amal_exec_div(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
@@ -176,20 +226,24 @@ int amal_exec_div(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
(void)src_reg1;
(void)src_reg2;
/* TODO: Implement! */
+ assert(bool_false && "TODO: Implement!");
return 0;
}
int amal_exec_push(amal_executor *self, u8 reg) {
- (void)self;
- (void)reg;
- /* TODO: Implement! */
- return 0;
+ AsmPtr reg_ptr;
+ IMPL
+
+ asm_ptr_init_disp(&reg_ptr, RBP, get_register_stack_offset(reg));
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &reg_ptr));
+ return asm_pushr(&impl->asm, RAX);
}
int amal_exec_pushi(amal_executor *self, i64 imm) {
(void)self;
(void)imm;
/* TODO: Implement! */
+ assert(bool_false && "TODO: Implement!");
return 0;
}
@@ -197,17 +251,48 @@ int amal_exec_pushd(amal_executor *self, BufferView data) {
(void)self;
(void)data;
/* TODO: Implement! */
+ assert(bool_false && "TODO: Implement!");
+ return 0;
+}
+
+int amal_exec_call(amal_executor *self, u16 func_index, u8 num_args) {
+ isize asm_size;
+ IMPL
+ /* TODO: Preserve necessary registers before call? */
+ /* TODO: This assumes all arguments are isize */
+ asm_size = asm_get_size(&impl->asm);
+ if(func_index < impl->func_counter) {
+ return_if_error(asm_call_rel32(&impl->asm, (isize)impl->function_indices[func_index] - asm_size));
+ } else {
+ /*
+ The location of the function has not been defined yet. Use call instruction with dummy data and change
+ the location once the location to the function is known
+ */
+ CallDefer call_defer;
+ call_defer.asm_index = asm_size;
+ call_defer.func_index = func_index;
+ return_if_error(buffer_append(&impl->call_defer, &call_defer, sizeof(call_defer)));
+ return_if_error(asm_call_rel32(&impl->asm, 0));
+ }
+
+ if(num_args > 0)
+ return asm_add_rm64_imm(&impl->asm, RSP, num_args * sizeof(isize));
return 0;
}
-/*int amal_exec_call(u8 dst_reg, BufferView data);
-int amal_exec_callr(u8 dst_reg, BufferView data);*/
+/*
+int amal_exec_callr(u8 dst_reg, BufferView data) {
+
+}
+*/
+
int amal_exec_cmp(amal_executor *self, u8 dst_reg, u8 src_reg1, u8 src_reg2) {
(void)self;
(void)dst_reg;
(void)src_reg1;
(void)src_reg2;
/* TODO: Implement! */
+ assert(bool_false && "TODO: Implement!");
return 0;
}
@@ -216,6 +301,7 @@ int amal_exec_jz(amal_executor *self, u8 dst_reg, i16 offset) {
(void)dst_reg;
(void)offset;
/* TODO: Implement! */
+ assert(bool_false && "TODO: Implement!");
return 0;
}
@@ -223,14 +309,18 @@ int amal_exec_jmp(amal_executor *self, i16 offset) {
(void)self;
(void)offset;
/* TODO: Implement! */
+ assert(bool_false && "TODO: Implement!");
return 0;
}
-int amal_exec_ret(amal_executor *self) {
- (void)self;
- /* TODO: Implement! */
- assert(bool_false && "TODO: Implement RET. RET needs to restore the stack before returning");
- return 0;
+int amal_exec_ret(amal_executor *self, u8 reg) {
+ AsmPtr ret_reg;
+ IMPL
+
+ asm_ptr_init_disp(&ret_reg, RBP, get_register_stack_offset(reg));
+ /* Result is returned in RAX register. TODO: Make this work for larger data */
+ return_if_error(asm_mov_rm(&impl->asm, RAX, &ret_reg));
+ return amal_exec_func_end(self);
}
int amal_exec_func_start(amal_executor *self, u16 num_regs) {
@@ -246,10 +336,11 @@ int amal_exec_func_start(amal_executor *self, u16 num_regs) {
64-bit Linux,BSD,Mac: RBX, RBP, R12-R15
*/
IMPL
+ impl->function_indices[impl->func_counter++] = asm_get_size(&impl->asm);
return_if_error(asm_pushr(&impl->asm, RBX));
return_if_error(asm_pushr(&impl->asm, RBP));
return_if_error(asm_mov_rr(&impl->asm, RBP, RSP));
- return asm_sub_rm64_imm(&impl->asm, RSP, num_regs * sizeof(usize));
+ return asm_sub_rm64_imm(&impl->asm, RSP, num_regs * sizeof(isize));
}
int amal_exec_func_end(amal_executor *self) {