aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2019-08-25 11:18:13 +0200
committerdec05eba <dec05eba@protonmail.com>2020-07-25 14:36:46 +0200
commit4d8283e14b0365038b01df4fab5dee75035007ed (patch)
tree758545eeeb9b7eae3b04fdc6a863fd06f3039f6d
parentd9f652919961a2947452ad3c4af4659f3d2fb330 (diff)
Remove number of branches in executor
-rw-r--r--executor/x86_64/asm.c147
-rw-r--r--executor/x86_64/asm.h131
-rw-r--r--executor/x86_64/executor.c183
-rw-r--r--include/compiler.h6
-rw-r--r--include/defs.h1
-rw-r--r--include/tokenizer.h6
-rw-r--r--src/compiler.c10
-rw-r--r--src/tokenizer.c4
8 files changed, 226 insertions, 262 deletions
diff --git a/executor/x86_64/asm.c b/executor/x86_64/asm.c
index a6bf274..c2b00ef 100644
--- a/executor/x86_64/asm.c
+++ b/executor/x86_64/asm.c
@@ -224,7 +224,7 @@ int asm_execute(Asm *self, u32 offset) {
}
/* TODO: See how this can be optimized */
-static CHECK_RESULT int asm_ensure_capacity(Asm *self, usize size) {
+int asm_ensure_capacity(Asm *self, usize size) {
usize current_offset;
current_offset = (u8*)self->code_it - (u8*)self->code;
if(current_offset + size > self->allocated_size) {
@@ -249,12 +249,10 @@ static isize asm_get_capacity_left(Asm *self) {
}
#endif
-int asm_nop(Asm *self) {
+void asm_nop(Asm *self) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 1));
*self->code_it++ = 0x90;
ins_end(self, "nop");
- return 0;
}
static i32 abs_i32(i32 value) {
@@ -331,152 +329,121 @@ static void asm_rr(Asm *self, Reg64 dst, Reg64 src) {
}
/* TODO: Implement 1 and 2 byte immediate? */
-int asm_mov_mi(Asm *self, AsmPtr *dst, i32 immediate) {
+void asm_mov_mi(Asm *self, AsmPtr *dst, i32 immediate) {
ins_start(self);
- /* 12 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
- return_if_error(asm_ensure_capacity(self, 12));
*self->code_it++ = REX_W;
*self->code_it++ = 0xC7;
asm_rm(self, dst, 0);
am_memcpy(self->code_it, &immediate, sizeof(immediate));
self->code_it += sizeof(immediate);
ins_end(self, "mov %s, 0x%x", asm_ptr_to_string(dst), immediate);
- return 0;
}
-int asm_mov_mr(Asm *self, AsmPtr *dst, Reg64 src) {
+void asm_mov_mr(Asm *self, AsmPtr *dst, Reg64 src) {
ins_start(self);
- /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
- return_if_error(asm_ensure_capacity(self, 8));
*self->code_it++ = REX_W;
*self->code_it++ = 0x89;
asm_rm(self, dst, src);
ins_end(self, "mov %s, %s", asm_ptr_to_string(dst), reg64_to_str(src));
- return 0;
}
-int asm_mov_rm(Asm *self, Reg64 dst, AsmPtr *src) {
+void asm_mov_rm(Asm *self, Reg64 dst, AsmPtr *src) {
ins_start(self);
- /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
- return_if_error(asm_ensure_capacity(self, 8));
*self->code_it++ = REX_W;
*self->code_it++ = 0x8B;
asm_rm(self, src, dst);
ins_end(self, "mov %s, %s", reg64_to_str(dst), asm_ptr_to_string(src));
- return 0;
}
/* Note: This shows as instruction movabs in intel assembly format */
-int asm_mov_ri(Asm *self, Reg64 dst, i64 immediate) {
+void asm_mov_ri(Asm *self, Reg64 dst, i64 immediate) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 10));
*self->code_it++ = REX_W;
*self->code_it++ = 0xB8 + dst;
am_memcpy(self->code_it, &immediate, sizeof(immediate));
self->code_it += sizeof(immediate);
ins_end(self, "mov %s, 0x%x", reg64_to_str(dst), immediate);
- return 0;
}
-int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src) {
+void asm_mov_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = REX_W;
*self->code_it++ = 0x89;
asm_rr(self, dst, src);
ins_end(self, "mov %s, %s", reg64_to_str(dst), reg64_to_str(src));
- return 0;
}
-int asm_add_rr(Asm *self, Reg64 dst, Reg64 src) {
+void asm_add_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = REX_W;
*self->code_it++ = 0x01;
asm_rr(self, dst, src);
ins_end(self, "add %s, %s", reg64_to_str(dst), reg64_to_str(src));
- return 0;
}
-int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src) {
+void asm_sub_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = REX_W;
*self->code_it++ = 0x29;
asm_rr(self, dst, src);
ins_end(self, "sub %s, %s", reg64_to_str(dst), reg64_to_str(src));
- return 0;
}
-int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src) {
+void asm_imul_rr(Asm *self, Reg64 dst, Reg64 src) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 4));
*self->code_it++ = REX_W;
*self->code_it++ = 0x0F;
*self->code_it++ = 0xAF;
asm_rr(self, dst, src);
ins_end(self, "imul %s, %s", reg64_to_str(dst), reg64_to_str(src));
- return 0;
}
-int asm_cqo(Asm *self) {
+void asm_cqo(Asm *self) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 2));
*self->code_it++ = REX_W;
*self->code_it++ = 0x99;
ins_end(self, "cqo");
- return 0;
}
-int asm_idiv_rr(Asm *self, Reg64 src) {
+void asm_idiv_rr(Asm *self, Reg64 src) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 4));
*self->code_it++ = REX_W;
*self->code_it++ = 0xF7;
asm_rr(self, src, 0x7);
ins_end(self, "idiv %s", reg64_to_str(src));
- return 0;
}
-int asm_pushr(Asm *self, Reg64 reg) {
+void asm_pushr(Asm *self, Reg64 reg) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 1));
*self->code_it++ = 0x50 + reg;
ins_end(self, "push %s", reg64_to_str(reg));
- return 0;
}
-int asm_popr(Asm *self, Reg64 reg) {
+void asm_popr(Asm *self, Reg64 reg) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 1));
*self->code_it++ = 0x58 + reg;
ins_end(self, "pop %s", reg64_to_str(reg));
- return 0;
}
-int asm_callr(Asm *self, Reg64 reg) {
+void asm_callr(Asm *self, Reg64 reg) {
ins_start(self);
- return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = REX_W;
*self->code_it++ = 0xFF;
asm_rr(self, reg, 0x2);
ins_end(self, "call %s", reg64_to_str(reg));
- return 0;
}
/*
Note: This is sometimes called with @relative 0 (will print call -5), in which case it's most likely a dummy call until the relative position
is later changed with @asm_override_call_rel32. TODO: Update the ins_end debug print to take that into account somehow
*/
-int asm_call_rel32(Asm *self, i32 relative) {
+void asm_call_rel32(Asm *self, i32 relative) {
ins_start(self);
relative -= 5; /* In x86, the relative position starts from the next instruction */
- return_if_error(asm_ensure_capacity(self, 5));
*self->code_it++ = 0xE8;
am_memcpy(self->code_it, &relative, sizeof(relative));
self->code_it += sizeof(relative);
ins_end(self, "call 0x%x", relative);
- return 0;
}
void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative) {
@@ -485,39 +452,30 @@ void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative) {
am_memcpy((u8*)self->code + asm_index + 1, &new_relative, sizeof(new_relative));
}
-int asm_cmp_rm(Asm *self, Reg64 reg1, AsmPtr *reg2) {
+void asm_cmp_rm(Asm *self, Reg64 reg1, AsmPtr *reg2) {
ins_start(self);
- /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
- return_if_error(asm_ensure_capacity(self, 8));
*self->code_it++ = REX_W;
*self->code_it++ = 0x3B;
asm_rm(self, reg2, reg1);
ins_end(self, "cmp %s, %s", reg64_to_str(reg1), asm_ptr_to_string(reg2));
- return 0;
}
-int asm_sete_m(Asm *self, AsmPtr *dst) {
+void asm_sete_m(Asm *self, AsmPtr *dst) {
assert(dst->base != RSP && dst->base != RBP && dst->base != RSI && dst->base != RDI);
ins_start(self);
- /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
- return_if_error(asm_ensure_capacity(self, 8));
*self->code_it++ = 0x0F;
*self->code_it++ = 0x94;
asm_rm(self, dst, 0x0); /* the @src bits are not used */
ins_end(self, "sete %s", asm_ptr_to_string(dst));
- return 0;
}
-int asm_sete_r(Asm *self, Reg64 dst) {
+void asm_sete_r(Asm *self, Reg64 dst) {
assert(dst != RSP && dst != RBP && dst != RSI && dst != RDI);
ins_start(self);
- /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
- return_if_error(asm_ensure_capacity(self, 8));
*self->code_it++ = 0x0F;
*self->code_it++ = 0x94;
asm_rr(self, dst, 0x0); /* the @src bits are not used */
ins_end(self, "sete %s", reg64_to_str(dst));
- return 0;
}
/*
@@ -525,7 +483,7 @@ int asm_sete_r(Asm *self, Reg64 dst) {
jump until the relative position is later changed with @asm_override_jcc_rel32.
TODO: Update the ins_end debug print to take that into account somehow
*/
-int asm_jz(Asm *self, i32 relative) {
+void asm_jz(Asm *self, i32 relative) {
/*
Note: We dont use the 16-bit relative variant, as it will clear the upper two bytes of the EIP registers, resulting
in a maximum instruction pointer size of 16 bits
@@ -533,19 +491,16 @@ int asm_jz(Asm *self, i32 relative) {
ins_start(self);
if(abs(relative - 2) <= INT8_MAX) {
relative -= 2;
- return_if_error(asm_ensure_capacity(self, 2));
*self->code_it++ = 0x74;
*self->code_it++ = (i8)relative;
} else {
relative -= 6;
- return_if_error(asm_ensure_capacity(self, 6));
*self->code_it++ = 0x0F;
*self->code_it++ = 0x84;
am_memcpy(self->code_it, &relative, sizeof(relative));
self->code_it += sizeof(relative);
}
ins_end(self, "jz 0x%x", relative);
- return 0;
}
void asm_override_jcc_rel32(Asm *self, u32 asm_index, i32 new_relative) {
@@ -561,7 +516,7 @@ void asm_override_jcc_rel32(Asm *self, u32 asm_index, i32 new_relative) {
jump until the relative position is later changed with @asm_override_jmp_rel32.
TODO: Update the ins_end debug print to take that into account somehow
*/
-int asm_jmp(Asm *self, i32 relative) {
+void asm_jmp(Asm *self, i32 relative) {
/*
Note: We dont use the 16-bit relative variant, as it will clear the upper two bytes of the EIP registers, resulting
in a maximum instruction pointer size of 16 bits
@@ -569,18 +524,15 @@ int asm_jmp(Asm *self, i32 relative) {
ins_start(self);
if(abs(relative - 2) <= INT8_MAX) {
relative -= 2;
- return_if_error(asm_ensure_capacity(self, 2));
*self->code_it++ = 0xEB;
*self->code_it++ = (i8)relative;
} else {
relative -= 5;
- return_if_error(asm_ensure_capacity(self, 5));
*self->code_it++ = 0xE9;
am_memcpy(self->code_it, &relative, sizeof(relative));
self->code_it += sizeof(relative);
}
ins_end(self, "jmp 0x%x", relative);
- return 0;
}
void asm_override_jmp_rel32(Asm *self, u32 asm_index, i32 new_relative) {
@@ -594,29 +546,22 @@ void asm_override_jmp_rel32(Asm *self, u32 asm_index, i32 new_relative) {
/* /r */
#define DEFINE_INS_RM(mnemonic, opcode) \
-int asm_##mnemonic##_rmb(Asm *self, Reg32 dst, Reg32 src) { \
+void asm_##mnemonic##_rmb(Asm *self, Reg32 dst, Reg32 src) { \
*self->code_it++ = opcode; \
*self->code_it++ = 0xC0 + 8*dst + src; \
- return 0; \
} \
\
-int asm_##mnemonic##_rm32(Asm *self, Reg32 dst, Reg32 src) { \
- int result; \
+void asm_##mnemonic##_rm32(Asm *self, Reg32 dst, Reg32 src) { \
ins_start(self); \
- return_if_error(asm_ensure_capacity(self, 2)); \
- result = asm_##mnemonic##_rmb(self, (Reg32)dst, (Reg32)src); \
+ asm_##mnemonic##_rmb(self, (Reg32)dst, (Reg32)src); \
ins_end(self, #mnemonic" %s, %s", reg32_to_str(dst), reg32_to_str(src)); \
- return result; \
} \
\
-int asm_##mnemonic##_rm64(Asm *self, Reg64 dst, Reg64 src) { \
- int result; \
+void asm_##mnemonic##_rm64(Asm *self, Reg64 dst, Reg64 src) { \
ins_start(self); \
- return_if_error(asm_ensure_capacity(self, 3)); \
*self->code_it++ = REX_W; \
- result = asm_##mnemonic##_rmb(self, (Reg32)dst, (Reg32)src); \
+ asm_##mnemonic##_rmb(self, (Reg32)dst, (Reg32)src); \
ins_end(self, #mnemonic" %s, %s", reg64_to_str(dst), reg64_to_str(src)); \
- return result; \
}
DEFINE_INS_RM(mov, 0x8B)
@@ -634,38 +579,30 @@ DEFINE_INS_RM(cmp, 0x3B)
one register the other register can be encoded for that.
*/
#define DEFINE_INS_EXT_IMM(mnemonic, extension) \
-int asm_##mnemonic##_rmb_imm(Asm *self, Reg32 reg, i32 immediate) { \
+void asm_##mnemonic##_rmb_imm(Asm *self, Reg32 reg, i32 immediate) { \
if(abs_i32(immediate) <= INT8_MAX) { \
- return_if_error(asm_ensure_capacity(self, 3)); \
*self->code_it++ = 0x83; \
*self->code_it++ = 0xC0 + 8*extension + reg; \
*self->code_it++ = (u8)immediate; \
} else { \
- return_if_error(asm_ensure_capacity(self, 6)); \
*self->code_it++ = 0x81; \
*self->code_it++ = 0xC0 + 8*extension + reg; \
am_memcpy(self->code_it, &immediate, sizeof(immediate)); \
self->code_it += sizeof(immediate); \
} \
- return 0; \
} \
\
-int asm_##mnemonic##_rm32_imm(Asm *self, Reg32 reg, i32 immediate) { \
- int result; \
+void asm_##mnemonic##_rm32_imm(Asm *self, Reg32 reg, i32 immediate) { \
ins_start(self); \
- result = asm_##mnemonic##_rmb_imm(self, (Reg32)reg, immediate); \
+ asm_##mnemonic##_rmb_imm(self, (Reg32)reg, immediate); \
ins_end(self, #mnemonic" %s, 0x%x", reg32_to_str(reg), immediate); \
- return result; \
}\
\
-int asm_##mnemonic##_rm64_imm(Asm *self, Reg64 reg, i32 immediate) { \
- int result; \
+void asm_##mnemonic##_rm64_imm(Asm *self, Reg64 reg, i32 immediate) { \
ins_start(self); \
- return_if_error(asm_ensure_capacity(self, 1)); \
*self->code_it++ = REX_W; \
- result = asm_##mnemonic##_rmb_imm(self, (Reg32)reg, immediate); \
+ asm_##mnemonic##_rmb_imm(self, (Reg32)reg, immediate); \
ins_end(self, #mnemonic" %s, 0x%x", reg64_to_str(reg), immediate); \
- return result; \
}
DEFINE_INS_EXT_IMM(add, 0)
@@ -684,36 +621,27 @@ DEFINE_INS_EXT_IMM(cmp, 7)
one register the other register can be encoded for that.
*/
#define DEFINE_INS_SHIFT_IMM8(mnemonic, extension) \
-int asm_##mnemonic##_rmb_imm(Asm *self, Reg32 reg, i8 immediate) { \
+void asm_##mnemonic##_rmb_imm(Asm *self, Reg32 reg, i8 immediate) { \
if(immediate == 1) { \
- return_if_error(asm_ensure_capacity(self, 2)); \
*self->code_it++ = 0xC1; \
*self->code_it++ = 0xC0 + 8*reg + extension; \
} else { \
- return_if_error(asm_ensure_capacity(self, 3)); \
*self->code_it++ = 0xD1; \
*self->code_it++ = 0xC0 + 8*reg + extension; \
*self->code_it++ = immediate; \
} \
- return 0; \
} \
\
-int asm_##mnemonic##_rm32_imm(Asm *self, Reg32 reg, i8 immediate) { \
- int result; \
+void asm_##mnemonic##_rm32_imm(Asm *self, Reg32 reg, i8 immediate) { \
ins_start(self); \
- result = asm_##mnemonic##_rmb_imm(self, (Reg32)reg, immediate); \
ins_end(self, #mnemonic" %s, 0x%x", reg32_to_str(reg), immediate); \
- return result; \
} \
\
-int asm_##mnemonic##_rm64_imm(Asm *self, Reg64 reg, i8 immediate) { \
- int result; \
+void asm_##mnemonic##_rm64_imm(Asm *self, Reg64 reg, i8 immediate) { \
ins_start(self); \
- return_if_error(asm_ensure_capacity(self, 1)); \
*self->code_it++ = REX_W; \
- result = asm_##mnemonic##_rmb_imm(self, (Reg32)reg, immediate); \
+ asm_##mnemonic##_rmb_imm(self, (Reg32)reg, immediate); \
ins_end(self, #mnemonic" %s, 0x%x", reg64_to_str(reg), immediate); \
- return result; \
}
DEFINE_INS_SHIFT_IMM8(rol, 0)
@@ -725,17 +653,14 @@ DEFINE_INS_SHIFT_IMM8(shr, 5)
/*DEFINE_INS_SHIFT_IMM8(shl, 6)*/
DEFINE_INS_SHIFT_IMM8(sar, 7)
-int asm_ret(Asm *self, u16 bytes) {
+void asm_ret(Asm *self, u16 bytes) {
ins_start(self);
if(bytes == 0) {
- return_if_error(asm_ensure_capacity(self, 1));
*self->code_it++ = 0xC3;
ins_end(self, "ret");
} else {
- return_if_error(asm_ensure_capacity(self, 3));
*self->code_it++ = 0xC2;
am_memcpy(self->code_it, &bytes, sizeof(bytes));
ins_end(self, "ret 0x%x", bytes);
}
- return 0;
}
diff --git a/executor/x86_64/asm.h b/executor/x86_64/asm.h
index ac519e9..a3f1b5a 100644
--- a/executor/x86_64/asm.h
+++ b/executor/x86_64/asm.h
@@ -50,8 +50,9 @@ void asm_deinit(Asm *self);
usize asm_get_size(Asm *self);
CHECK_RESULT int asm_execute(Asm *self, u32 offset);
+CHECK_RESULT int asm_ensure_capacity(Asm *self, usize size);
-CHECK_RESULT int asm_nop(Asm *self);
+void asm_nop(Asm *self);
@@ -61,48 +62,48 @@ CHECK_RESULT int asm_nop(Asm *self);
-CHECK_RESULT int asm_mov_mi(Asm *self, AsmPtr *dst, i32 immediate);
-CHECK_RESULT int asm_mov_mr(Asm *self, AsmPtr *dst, Reg64 src);
-CHECK_RESULT int asm_mov_rm(Asm *self, Reg64 dst, AsmPtr *src);
-CHECK_RESULT int asm_mov_ri(Asm *self, Reg64 dst, i64 immediate);
-CHECK_RESULT int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src);
+void asm_mov_mi(Asm *self, AsmPtr *dst, i32 immediate);
+void asm_mov_mr(Asm *self, AsmPtr *dst, Reg64 src);
+void asm_mov_rm(Asm *self, Reg64 dst, AsmPtr *src);
+void asm_mov_ri(Asm *self, Reg64 dst, i64 immediate);
+void asm_mov_rr(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_add_rr(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src);
+void asm_add_rr(Asm *self, Reg64 dst, Reg64 src);
+void asm_sub_rr(Asm *self, Reg64 dst, Reg64 src);
+void asm_imul_rr(Asm *self, Reg64 dst, Reg64 src);
/* Sign extend RAX into RDX, this is needed for some operations, such as idiv */
-CHECK_RESULT int asm_cqo(Asm *self);
+void asm_cqo(Asm *self);
/*
Divide RDX:RAX by @src. Store the quotient in RAX and the remainder in RDX.
@asm_cqo should be called before this, since RAX needs to be sign extended into RDX
*/
-CHECK_RESULT int asm_idiv_rr(Asm *self, Reg64 src);
+void asm_idiv_rr(Asm *self, Reg64 src);
-CHECK_RESULT int asm_pushr(Asm *self, Reg64 reg);
-CHECK_RESULT int asm_popr(Asm *self, Reg64 reg);
-CHECK_RESULT int asm_callr(Asm *self, Reg64 reg);
+void asm_pushr(Asm *self, Reg64 reg);
+void asm_popr(Asm *self, Reg64 reg);
+void asm_callr(Asm *self, Reg64 reg);
/*
In x86 assembly, the @relative position starts from the next instruction.
This offset shouldn't be calculated by the caller and is instead managed
by this asm library itself.
*/
-CHECK_RESULT int asm_call_rel32(Asm *self, i32 relative);
+void asm_call_rel32(Asm *self, i32 relative);
void asm_override_call_rel32(Asm *self, u32 asm_index, i32 new_relative);
-CHECK_RESULT int asm_cmp_rm(Asm *self, Reg64 reg1, AsmPtr *reg2);
+void asm_cmp_rm(Asm *self, Reg64 reg1, AsmPtr *reg2);
/*
Sets the 8 bit memory operand to 1 if the last cmp was equals, otherwise set it to 0.
Note: this instruction doesn't work with AH (RSP), CH (RBP), DH (RSI) and BH (RDI).
TODO: When ST, MM AND XMM registers are implemented, also check for them as they are also invalid
*/
-CHECK_RESULT int asm_sete_m(Asm *self, AsmPtr *dst);
-CHECK_RESULT int asm_sete_r(Asm *self, Reg64 dst);
+void asm_sete_m(Asm *self, AsmPtr *dst);
+void asm_sete_r(Asm *self, Reg64 dst);
/*
In x86 assembly, the @relative position starts from the next instruction.
This offset shouldn't be calculated by the caller and is instead managed
by this asm library itself.
*/
-CHECK_RESULT int asm_jz(Asm *self, i32 relative);
+void asm_jz(Asm *self, i32 relative);
/* Override conditional jump target */
void asm_override_jcc_rel32(Asm *self, u32 asm_index, i32 new_relative);
/*
@@ -110,7 +111,7 @@ void asm_override_jcc_rel32(Asm *self, u32 asm_index, i32 new_relative);
This offset shouldn't be calculated by the caller and is instead managed
by this asm library itself.
*/
-CHECK_RESULT int asm_jmp(Asm *self, i32 relative);
+void asm_jmp(Asm *self, i32 relative);
void asm_override_jmp_rel32(Asm *self, u32 asm_index, i32 new_relative);
@@ -124,52 +125,52 @@ void asm_override_jmp_rel32(Asm *self, u32 asm_index, i32 new_relative);
-CHECK_RESULT int asm_mov_rm32(Asm *self, Reg32 dst, Reg32 src);
-CHECK_RESULT int asm_add_rm32(Asm *self, Reg32 dst, Reg32 src);
-CHECK_RESULT int asm_sub_rm32(Asm *self, Reg32 dst, Reg32 src);
-CHECK_RESULT int asm_and_rm32(Asm *self, Reg32 dst, Reg32 src);
-CHECK_RESULT int asm_or_rm32(Asm *self, Reg32 dst, Reg32 src);
-CHECK_RESULT int asm_xor_rm32(Asm *self, Reg32 dst, Reg32 src);
-CHECK_RESULT int asm_cmp_rm32(Asm *self, Reg32 dst, Reg32 src);
-CHECK_RESULT int asm_add_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_or_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_adc_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_sbb_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_and_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_sub_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_xor_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_cmp_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
-CHECK_RESULT int asm_rol_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
-CHECK_RESULT int asm_ror_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
-CHECK_RESULT int asm_rcl_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
-CHECK_RESULT int asm_rcr_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
-CHECK_RESULT int asm_shl_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
-CHECK_RESULT int asm_shr_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
-CHECK_RESULT int asm_sar_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
+void asm_mov_rm32(Asm *self, Reg32 dst, Reg32 src);
+void asm_add_rm32(Asm *self, Reg32 dst, Reg32 src);
+void asm_sub_rm32(Asm *self, Reg32 dst, Reg32 src);
+void asm_and_rm32(Asm *self, Reg32 dst, Reg32 src);
+void asm_or_rm32(Asm *self, Reg32 dst, Reg32 src);
+void asm_xor_rm32(Asm *self, Reg32 dst, Reg32 src);
+void asm_cmp_rm32(Asm *self, Reg32 dst, Reg32 src);
+void asm_add_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_or_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_adc_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_sbb_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_and_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_sub_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_xor_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_cmp_rm32_imm(Asm *self, Reg32 reg, i32 immediate);
+void asm_rol_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
+void asm_ror_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
+void asm_rcl_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
+void asm_rcr_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
+void asm_shl_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
+void asm_shr_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
+void asm_sar_rm32_imm(Asm *self, Reg32 reg, i8 immediate);
-CHECK_RESULT int asm_mov_rm64(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_add_rm64(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_sub_rm64(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_and_rm64(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_or_rm64(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_xor_rm64(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_cmp_rm64(Asm *self, Reg64 dst, Reg64 src);
-CHECK_RESULT int asm_add_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_or_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_adc_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_sbb_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_and_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_sub_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_xor_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_cmp_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
-CHECK_RESULT int asm_rol_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
-CHECK_RESULT int asm_ror_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
-CHECK_RESULT int asm_rcl_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
-CHECK_RESULT int asm_rcr_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
-CHECK_RESULT int asm_shl_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
-CHECK_RESULT int asm_shr_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
-CHECK_RESULT int asm_sar_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
+void asm_mov_rm64(Asm *self, Reg64 dst, Reg64 src);
+void asm_add_rm64(Asm *self, Reg64 dst, Reg64 src);
+void asm_sub_rm64(Asm *self, Reg64 dst, Reg64 src);
+void asm_and_rm64(Asm *self, Reg64 dst, Reg64 src);
+void asm_or_rm64(Asm *self, Reg64 dst, Reg64 src);
+void asm_xor_rm64(Asm *self, Reg64 dst, Reg64 src);
+void asm_cmp_rm64(Asm *self, Reg64 dst, Reg64 src);
+void asm_add_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_or_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_adc_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_sbb_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_and_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_sub_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_xor_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_cmp_rm64_imm(Asm *self, Reg64 reg, i32 immediate);
+void asm_rol_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
+void asm_ror_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
+void asm_rcl_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
+void asm_rcr_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
+void asm_shl_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
+void asm_shr_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
+void asm_sar_rm64_imm(Asm *self, Reg64 reg, i8 immediate);
-CHECK_RESULT int asm_ret(Asm *self, u16 bytes);
+void asm_ret(Asm *self, u16 bytes);
#endif
diff --git a/executor/x86_64/executor.c b/executor/x86_64/executor.c
index d35dbdc..9083e14 100644
--- a/executor/x86_64/executor.c
+++ b/executor/x86_64/executor.c
@@ -39,7 +39,9 @@ typedef struct {
int label_counter;
} amal_executor_impl;
-#define IMPL amal_executor_impl *impl = (amal_executor_impl*)self;
+#define IMPL_START \
+ amal_executor_impl *impl = (amal_executor_impl*)self; \
+ return_if_error(asm_ensure_capacity(&impl->asm, 256));
/*
@reg will be a positive value when accessing local variables, in which case the first
@@ -72,7 +74,7 @@ int amal_executor_init(amal_executor **self) {
}
void amal_executor_deinit(amal_executor *self) {
- IMPL
+ amal_executor_impl *impl = (amal_executor_impl*)self;
buffer_deinit(&impl->jump_defer);
buffer_deinit(&impl->call_defer);
am_free(impl->function_indices);
@@ -81,25 +83,24 @@ void amal_executor_deinit(amal_executor *self) {
}
int amal_executor_run(amal_executor *self, u32 offset) {
- IMPL
+ amal_executor_impl *impl = (amal_executor_impl*)self;
return asm_execute(&impl->asm, offset);
}
u32 amal_exec_get_code_offset(amal_executor *self) {
- IMPL
+ amal_executor_impl *impl = (amal_executor_impl*)self;
return asm_get_size(&impl->asm);
}
int amal_executor_instructions_start(amal_executor *self, u16 num_functions) {
- IMPL
+ amal_executor_impl *impl = (amal_executor_impl*)self;
return_if_error(am_realloc(impl->function_indices, num_functions * sizeof(usize), (void**)&impl->function_indices));
impl->num_functions = num_functions;
return 0;
}
int amal_executor_instructions_end(amal_executor *self) {
- IMPL
-
+ amal_executor_impl *impl = (amal_executor_impl*)self;
CallDefer *call_defer = buffer_begin(&impl->call_defer);
CallDefer *call_defer_end = buffer_end(&impl->call_defer);
for(; call_defer != call_defer_end; ++call_defer) {
@@ -117,99 +118,106 @@ int amal_executor_instructions_end(amal_executor *self) {
}
int amal_exec_nop(amal_executor *self) {
- IMPL
- return asm_nop(&impl->asm);
+ IMPL_START
+ asm_nop(&impl->asm);
+ return 0;
}
int amal_exec_setz(amal_executor *self, i8 dst_reg) {
AsmPtr dst;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
- return asm_mov_mi(&impl->asm, &dst, 0);
+ asm_mov_mi(&impl->asm, &dst, 0);
+ return 0;
}
int amal_exec_mov(amal_executor *self, i8 dst_reg, i8 src_reg) {
AsmPtr ptr;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(src_reg));
- return_if_error(asm_mov_rm(&impl->asm, RAX, &ptr));
+ asm_mov_rm(&impl->asm, RAX, &ptr);
asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(dst_reg));
- return asm_mov_mr(&impl->asm, &ptr, RAX);
+ asm_mov_mr(&impl->asm, &ptr, RAX);
+ return 0;
}
int amal_exec_movi(amal_executor *self, i8 dst_reg, i64 imm) {
- IMPL
+ IMPL_START
/* TODO: if @number is a float then use float instructions */
if(abs_i64(imm) <= INT32_MAX) {
AsmPtr dst;
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
- return_if_error(asm_mov_mi(&impl->asm, &dst, imm));
+ asm_mov_mi(&impl->asm, &dst, imm);
} else {
AsmPtr dst;
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
- return_if_error(asm_mov_ri(&impl->asm, RAX, imm));
- return_if_error(asm_mov_mr(&impl->asm, &dst, RAX));
+ asm_mov_ri(&impl->asm, RAX, imm);
+ asm_mov_mr(&impl->asm, &dst, RAX);
}
return 0;
}
int amal_exec_movd(amal_executor *self, i8 dst_reg, BufferView data) {
AsmPtr dst;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
- return_if_error(asm_mov_ri(&impl->asm, RAX, (uintptr_t)data.data));
- return asm_mov_mr(&impl->asm, &dst, RAX);
+ asm_mov_ri(&impl->asm, RAX, (uintptr_t)data.data);
+ asm_mov_mr(&impl->asm, &dst, RAX);
+ return 0;
}
int amal_exec_add(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
AsmPtr dst;
AsmPtr reg1;
AsmPtr reg2;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
- return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
- return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
- return_if_error(asm_add_rr(&impl->asm, RAX, RCX));
- return asm_mov_mr(&impl->asm, &dst, RAX);
+ asm_mov_rm(&impl->asm, RAX, &reg1);
+ asm_mov_rm(&impl->asm, RCX, &reg2);
+ asm_add_rr(&impl->asm, RAX, RCX);
+ asm_mov_mr(&impl->asm, &dst, RAX);
+ return 0;
}
int amal_exec_sub(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
AsmPtr dst;
AsmPtr reg1;
AsmPtr reg2;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
- return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
- return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
- return_if_error(asm_sub_rr(&impl->asm, RAX, RCX));
- return asm_mov_mr(&impl->asm, &dst, RAX);
+ asm_mov_rm(&impl->asm, RAX, &reg1);
+ asm_mov_rm(&impl->asm, RCX, &reg2);
+ asm_sub_rr(&impl->asm, RAX, RCX);
+ asm_mov_mr(&impl->asm, &dst, RAX);
+ return 0;
}
int amal_exec_imul(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
AsmPtr dst;
AsmPtr reg1;
AsmPtr reg2;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
- return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
- return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
- return_if_error(asm_imul_rr(&impl->asm, RAX, RCX));
- return asm_mov_mr(&impl->asm, &dst, RAX);
+ asm_mov_rm(&impl->asm, RAX, &reg1);
+ asm_mov_rm(&impl->asm, RCX, &reg2);
+ asm_imul_rr(&impl->asm, RAX, RCX);
+ asm_mov_mr(&impl->asm, &dst, RAX);
+ return 0;
}
int amal_exec_mul(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
@@ -240,18 +248,19 @@ int amal_exec_idiv(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
AsmPtr dst;
AsmPtr reg1;
AsmPtr reg2;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
asm_ptr_init_disp(&reg1, RBP, get_register_stack_offset(src_reg1));
asm_ptr_init_disp(&reg2, RBP, get_register_stack_offset(src_reg2));
- return_if_error(asm_mov_rm(&impl->asm, RAX, &reg1));
- return_if_error(asm_mov_rm(&impl->asm, RCX, &reg2));
- return_if_error(asm_cqo(&impl->asm));
+ asm_mov_rm(&impl->asm, RAX, &reg1);
+ asm_mov_rm(&impl->asm, RCX, &reg2);
+ asm_cqo(&impl->asm);
/* TODO: Preserve RDX if needed, since it's also used as a parameter in system-v x86_64 abi */
- return_if_error(asm_idiv_rr(&impl->asm, RCX));
- return asm_mov_mr(&impl->asm, &dst, RAX);
+ asm_idiv_rr(&impl->asm, RCX);
+ asm_mov_mr(&impl->asm, &dst, RAX);
+ return 0;
}
int amal_exec_div(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
@@ -266,11 +275,12 @@ int amal_exec_div(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
int amal_exec_push(amal_executor *self, i8 reg) {
AsmPtr reg_ptr;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&reg_ptr, RBP, get_register_stack_offset(reg));
- return_if_error(asm_mov_rm(&impl->asm, RAX, &reg_ptr));
- return asm_pushr(&impl->asm, RAX);
+ asm_mov_rm(&impl->asm, RAX, &reg_ptr);
+ asm_pushr(&impl->asm, RAX);
+ return 0;
}
int amal_exec_pushi(amal_executor *self, i64 imm) {
@@ -290,13 +300,13 @@ int amal_exec_pushd(amal_executor *self, BufferView data) {
}
int amal_exec_call(amal_executor *self, u16 func_index, u8 num_args, i8 dst_reg) {
- IMPL
+ IMPL_START
/* TODO: Preserve necessary registers before call? */
/* TODO: This assumes all arguments are isize */
/* Do the function call */
isize asm_offset = asm_get_size(&impl->asm);
if(func_index < impl->func_counter) {
- return_if_error(asm_call_rel32(&impl->asm, (isize)impl->function_indices[func_index] - asm_offset));
+ asm_call_rel32(&impl->asm, (isize)impl->function_indices[func_index] - asm_offset);
} else {
/*
The location of the function has not been defined yet. Use call instruction with dummy data and change
@@ -306,7 +316,7 @@ int amal_exec_call(amal_executor *self, u16 func_index, u8 num_args, i8 dst_reg)
call_defer.asm_index = asm_offset;
call_defer.func_index = func_index;
return_if_error(buffer_append(&impl->call_defer, &call_defer, sizeof(call_defer)));
- return_if_error(asm_call_rel32(&impl->asm, 0));
+ asm_call_rel32(&impl->asm, 0);
}
/* Handle function result and cleanup */
@@ -314,10 +324,10 @@ int amal_exec_call(amal_executor *self, u16 func_index, u8 num_args, i8 dst_reg)
AsmPtr dst;
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
/* TODO: Make this work when result is not stored in RAX (multiple return results) */
- return_if_error(asm_mov_mr(&impl->asm, &dst, RAX));
+ asm_mov_mr(&impl->asm, &dst, RAX);
}
if(num_args > 0)
- return asm_add_rm64_imm(&impl->asm, RSP, num_args * sizeof(isize));
+ asm_add_rm64_imm(&impl->asm, RSP, num_args * sizeof(isize));
return 0;
}
@@ -333,7 +343,7 @@ const Reg64 SYS_V_PARAM_REGS[] = { RDI, RSI, RDX, RCX };
/* TODO: Make this work when function returns something else than a POD */
int amal_exec_calle(amal_executor *self, void *func, u8 num_args, i8 dst_reg) {
AsmPtr dst;
- IMPL
+ IMPL_START
/* TODO: Support R and XMM registers so more than 5 arguments can be used for functions */
assert(num_args < 5);
@@ -346,19 +356,19 @@ int amal_exec_calle(amal_executor *self, void *func, u8 num_args, i8 dst_reg) {
AsmPtr src;
asm_ptr_init_disp(&src, RSP, 0);
for(i = num_args - 1; i >= 0; --i) {
- return_if_error(asm_mov_rm(&impl->asm, SYS_V_PARAM_REGS[i], &src));
+ asm_mov_rm(&impl->asm, SYS_V_PARAM_REGS[i], &src);
src.disp += 0x8;
}
}
/* TODO: Preserve necessary registers before call? */
/* TODO: This assumes all arguments are isize */
- return_if_error(asm_mov_ri(&impl->asm, RAX, (intptr_t)func));
- return_if_error(asm_callr(&impl->asm, RAX));
+ asm_mov_ri(&impl->asm, RAX, (intptr_t)func);
+ asm_callr(&impl->asm, RAX);
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
- return_if_error(asm_mov_mr(&impl->asm, &dst, RAX));
+ asm_mov_mr(&impl->asm, &dst, RAX);
if(num_args > 0)
- return asm_add_rm64_imm(&impl->asm, RSP, num_args * sizeof(isize));
+ asm_add_rm64_imm(&impl->asm, RSP, num_args * sizeof(isize));
return 0;
}
@@ -369,34 +379,36 @@ int amal_exec_callr(i8 dst_reg, BufferView data) {
*/
int amal_exec_cmp(amal_executor *self, i8 dst_reg, i8 src_reg1, i8 src_reg2) {
- IMPL
+ IMPL_START
AsmPtr dst, src1, src2;
asm_ptr_init_disp(&dst, RBP, get_register_stack_offset(dst_reg));
asm_ptr_init_disp(&src1, RBP, get_register_stack_offset(src_reg1));
asm_ptr_init_disp(&src2, RBP, get_register_stack_offset(src_reg2));
- return_if_error(asm_mov_rm(&impl->asm, RCX, &dst));
- return_if_error(asm_xor_rm64(&impl->asm, RCX, RCX));
+ asm_mov_rm(&impl->asm, RCX, &dst);
+ asm_xor_rm64(&impl->asm, RCX, RCX);
- return_if_error(asm_mov_rm(&impl->asm, RAX, &src1));
- return_if_error(asm_cmp_rm(&impl->asm, RAX, &src2));
- return_if_error(asm_sete_r(&impl->asm, RCX));
- return asm_mov_mr(&impl->asm, &dst, RCX);
+ asm_mov_rm(&impl->asm, RAX, &src1);
+ asm_cmp_rm(&impl->asm, RAX, &src2);
+ asm_sete_r(&impl->asm, RCX);
+ asm_mov_mr(&impl->asm, &dst, RCX);
+ return 0;
}
int amal_exec_jz(amal_executor *self, i8 reg, u16 target_label) {
AsmPtr ptr;
u32 asm_offset;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&ptr, RBP, get_register_stack_offset(reg));
- return_if_error(asm_mov_rm(&impl->asm, RAX, &ptr));
- return_if_error(asm_cmp_rm64_imm(&impl->asm, RAX, 0));
+ asm_mov_rm(&impl->asm, RAX, &ptr);
+ asm_cmp_rm64_imm(&impl->asm, RAX, 0);
asm_offset = asm_get_size(&impl->asm);
if(target_label < impl->label_counter) {
- return asm_jz(&impl->asm, (i32)impl->label_asm_index[target_label] - (i32)asm_offset);
+ asm_jz(&impl->asm, (i32)impl->label_asm_index[target_label] - (i32)asm_offset);
+ return 0;
} else {
JumpDefer jump_defer;
jump_defer.asm_index = asm_offset;
@@ -406,16 +418,17 @@ int amal_exec_jz(amal_executor *self, i8 reg, u16 target_label) {
Insert dummy target, but it has to be above INT16_MAX, so the target can be replaced
no matter how large the jump will be
*/
- return_if_error(asm_jz(&impl->asm, INT32_MAX));
+ asm_jz(&impl->asm, INT32_MAX);
return buffer_append(&impl->jump_defer, &jump_defer, sizeof(jump_defer));
}
}
int amal_exec_jmp(amal_executor *self, u16 target_label) {
- IMPL
+ IMPL_START
u32 asm_offset = asm_get_size(&impl->asm);
if(target_label < impl->label_counter) {
- return asm_jmp(&impl->asm, (i32)impl->label_asm_index[target_label] - (i32)asm_offset);
+ asm_jmp(&impl->asm, (i32)impl->label_asm_index[target_label] - (i32)asm_offset);
+ return 0;
} else {
JumpDefer jump_defer;
jump_defer.asm_index = asm_offset;
@@ -425,18 +438,18 @@ int amal_exec_jmp(amal_executor *self, u16 target_label) {
Insert dummy target, but it has to be above INT16_MAX, so the target can be replaced
no matter how large the jump will be
*/
- return_if_error(asm_jmp(&impl->asm, INT32_MAX));
+ asm_jmp(&impl->asm, INT32_MAX);
return buffer_append(&impl->jump_defer, &jump_defer, sizeof(jump_defer));
}
}
int amal_exec_ret(amal_executor *self, i8 reg) {
AsmPtr ret_reg;
- IMPL
+ IMPL_START
asm_ptr_init_disp(&ret_reg, RBP, get_register_stack_offset(reg));
/* Result is returned in RAX register. TODO: Make this work when returning more than one result */
- return_if_error(asm_mov_rm(&impl->asm, RAX, &ret_reg));
+ asm_mov_rm(&impl->asm, RAX, &ret_reg);
return amal_exec_func_end(self);
}
@@ -452,16 +465,17 @@ int amal_exec_func_start(amal_executor *self, u16 num_regs) {
64-bit Windows: RBX, RSI, RDI, RBP, R12-R15, XMM6-XMM15
64-bit Linux,BSD,Mac: RBX, RBP, R12-R15
*/
- IMPL
+ IMPL_START
impl->function_indices[impl->func_counter++] = asm_get_size(&impl->asm);
- return_if_error(asm_pushr(&impl->asm, RBX));
- return_if_error(asm_pushr(&impl->asm, RBP));
- return_if_error(asm_mov_rr(&impl->asm, RBP, RSP));
- return asm_sub_rm64_imm(&impl->asm, RSP, num_regs * sizeof(isize));
+ asm_pushr(&impl->asm, RBX);
+ asm_pushr(&impl->asm, RBP);
+ asm_mov_rr(&impl->asm, RBP, RSP);
+ asm_sub_rm64_imm(&impl->asm, RSP, num_regs * sizeof(isize));
+ return 0;
}
int amal_exec_func_end(amal_executor *self) {
- IMPL
+ IMPL_START
JumpDefer *jump_defer = buffer_begin(&impl->jump_defer);
JumpDefer *jump_defer_end = buffer_end(&impl->jump_defer);
@@ -480,14 +494,15 @@ int amal_exec_func_end(amal_executor *self) {
buffer_clear(&impl->jump_defer);
impl->label_counter = 0;
- return_if_error(asm_mov_rr(&impl->asm, RSP, RBP));
- return_if_error(asm_popr(&impl->asm, RBP));
- return_if_error(asm_popr(&impl->asm, RBX));
- return asm_ret(&impl->asm, 0);
+ asm_mov_rr(&impl->asm, RSP, RBP);
+ asm_popr(&impl->asm, RBP);
+ asm_popr(&impl->asm, RBX);
+ asm_ret(&impl->asm, 0);
+ return 0;
}
int amal_exec_label(amal_executor *self) {
- IMPL
+ IMPL_START
assert(impl->label_counter < MAX_LABELS);
impl->label_asm_index[impl->label_counter++] = asm_get_size(&impl->asm);
return 0;
diff --git a/include/compiler.h b/include/compiler.h
index ac50d28..3e3cc92 100644
--- a/include/compiler.h
+++ b/include/compiler.h
@@ -67,4 +67,10 @@ void amal_compiler_options_init(amal_compiler_options *self);
CHECK_RESULT int amal_compiler_load_file(amal_compiler_options *options, amal_program *program, const char *filepath);
CHECK_RESULT int amal_compiler_internal_load_file(amal_compiler *self, const char *filepath, FileScopeReference **file_scope);
+/*
+ Returns a reference to the parsers tokenizer that contains the code reference, or NULL.
+ Note: The lifetime of the tokenizer returned is the same as the lifetime of the parser that owns it.
+*/
+Tokenizer* amal_compiler_find_tokenizer_by_code_reference(amal_compiler *self, const char *code_ref);
+
#endif
diff --git a/include/defs.h b/include/defs.h
index c8db820..669d9c9 100644
--- a/include/defs.h
+++ b/include/defs.h
@@ -7,5 +7,6 @@ typedef struct Scope Scope;
typedef struct FileScopeReference FileScopeReference;
typedef struct FunctionDecl FunctionDecl;
typedef struct FunctionSignature FunctionSignature;
+typedef struct Tokenizer Tokenizer;
#endif
diff --git a/include/tokenizer.h b/include/tokenizer.h
index 57ed9de..b6b401b 100644
--- a/include/tokenizer.h
+++ b/include/tokenizer.h
@@ -4,6 +4,7 @@
#include "std/buffer_view.h"
#include "std/misc.h"
#include "std/defs.h"
+#include "defs.h"
#include "binop_type.h"
#include "compiler_options.h"
#include <stdarg.h>
@@ -42,7 +43,7 @@ typedef enum {
TOK_RETURN
} Token;
-typedef struct {
+struct Tokenizer {
BufferView code;
int index;
int prev_index;
@@ -64,7 +65,7 @@ typedef struct {
bool number_is_integer;
ArenaAllocator *allocator; /* borrowed */
const amal_compiler_options *compiler_options; /* borrowed */
-} Tokenizer;
+};
typedef struct {
int index;
@@ -84,5 +85,6 @@ void tokenizer_print_error_object(Tokenizer *self, TokenizerError *error);
TokenizerError tokenizer_create_error(Tokenizer *self, int index, const char *fmt, ...);
int tokenizer_get_error_index(Tokenizer *self);
int tokenizer_get_code_reference_index(Tokenizer *self, const char *ref);
+bool tokenizer_contains_code_reference(Tokenizer *self, const char *code_ref);
#endif
diff --git a/src/compiler.c b/src/compiler.c
index 3ccb86a..dbc1498 100644
--- a/src/compiler.c
+++ b/src/compiler.c
@@ -482,3 +482,13 @@ int amal_compiler_internal_load_file(amal_compiler *self, const char *filepath,
return 0;
}
+
+Tokenizer* amal_compiler_find_tokenizer_by_code_reference(amal_compiler *self, const char *code_ref) {
+ Parser **parser = buffer_begin(&self->parsers);
+ Parser **parser_end = buffer_end(&self->parsers);
+ for(; parser != parser_end; ++parser) {
+ if(tokenizer_contains_code_reference(&(*parser)->tokenizer, code_ref))
+ return &(*parser)->tokenizer;
+ }
+ return NULL;
+}
diff --git a/src/tokenizer.c b/src/tokenizer.c
index 82ea0f6..fd516f6 100644
--- a/src/tokenizer.c
+++ b/src/tokenizer.c
@@ -768,3 +768,7 @@ int tokenizer_get_code_reference_index(Tokenizer *self, const char *ref) {
return -1;
return ref - self->code.data;
}
+
+bool tokenizer_contains_code_reference(Tokenizer *self, const char *code_ref) {
+ return code_ref >= self->code.data && code_ref < self->code.data + self->code.size;
+}