aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/asm/x86_64.c383
-rw-r--r--src/ast.c129
-rw-r--r--src/bytecode/bytecode.c43
-rw-r--r--src/compiler.c90
-rw-r--r--src/parser.c66
-rw-r--r--src/program.c368
-rw-r--r--src/ssa/ssa.c56
-rw-r--r--src/std/arena_allocator.c (renamed from src/std/scoped_allocator.c)36
-rw-r--r--src/std/buffer.c6
-rw-r--r--src/std/hash_map.c6
-rw-r--r--src/std/mem.c6
-rw-r--r--src/tokenizer.c7
12 files changed, 1015 insertions, 181 deletions
diff --git a/src/asm/x86_64.c b/src/asm/x86_64.c
new file mode 100644
index 0000000..2cbeead
--- /dev/null
+++ b/src/asm/x86_64.c
@@ -0,0 +1,383 @@
+#include "../../include/asm/x86_64.h"
+
+#include "../../include/std/mem.h"
+#include "../../include/std/log.h"
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+
+#include <sys/mman.h>
+
+#define REX_W 0x48
+
+void asm_ptr_init(AsmPtr *self, Reg64 base) {
+ self->base = base;
+ self->index = -1;
+ self->disp = 0;
+ self->scale = 0;
+}
+
+void asm_ptr_init_index(AsmPtr *self, Reg64 base, Reg64 index) {
+ self->base = base;
+ self->index = index;
+ self->disp = 0;
+ self->scale = 0;
+}
+
+void asm_ptr_init_disp(AsmPtr *self, Reg64 base, i32 disp) {
+ self->base = base;
+ self->index = -1;
+ self->disp = disp;
+ self->scale = 0;
+}
+
+void asm_ptr_init_index_disp(AsmPtr *self, Reg64 base, Reg64 index, i32 disp) {
+ self->base = base;
+ self->index = index;
+ self->disp = disp;
+ self->scale = 0;
+}
+
+int asm_init(Asm *self) {
+ self->size = am_pagesize();
+ amal_log_debug("asm: page size: %u", self->size);
+ self->code = mmap(NULL, self->size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if(self->code == MAP_FAILED)
+ return -errno;
+ self->code_it = self->code;
+ return 0;
+}
+
+void asm_deinit(Asm *self) {
+ if(self->code)
+ munmap(self->code, self->size);
+ self->code = NULL;
+ self->code_it = NULL;
+ self->size = 0;
+}
+
+static void asm_print_code_hex(Asm *self) {
+ u8 *ptr;
+ int off;
+ ptr = self->code;
+ off = 0;
+ while(ptr != self->code_it) {
+ printf("%02x", *ptr);
+ ++ptr;
+ ++off;
+ if(off == 8) {
+ putc('\n', stdout);
+ off = 0;
+ } else {
+ putc(' ', stdout);
+ }
+ }
+ if(off != 0)
+ putc('\n', stdout);
+}
+
+int asm_execute(Asm *self) {
+ void (*func)();
+ if(mprotect(self->code, self->size, PROT_READ | PROT_EXEC) != 0)
+ return -errno;
+
+ asm_print_code_hex(self);
+
+ /* TODO: Verify if this is valid on all platforms. According to ISO C standard it isn't? */
+ *(void**)(&func) = self->code;
+ func();
+ return 0;
+}
+
+/* TODO: See how this can be optimized */
+static CHECK_RESULT int asm_ensure_capacity(Asm *self, usize size) {
+ usize current_offset;
+ current_offset = (u8*)self->code_it - (u8*)self->code;
+ if(current_offset + size > self->size) {
+ void *new_mem;
+ usize new_size;
+ new_size = self->size + am_pagesize();
+ new_mem = mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if(self->code == MAP_FAILED)
+ return -errno;
+
+ am_memcpy(new_mem, self->code, self->size);
+ self->code = new_mem;
+ self->size = new_size;
+ self->code_it = (u8*)self->code + current_offset;
+ }
+ return 0;
+}
+
+static isize asm_get_capacity_left(Asm *self) {
+ return (isize)self->size - (isize)((u8*)self->code_it - (u8*)self->code);
+}
+
+int asm_nop(Asm *self) {
+ return_if_error(asm_ensure_capacity(self, 1));
+ *self->code_it++ = 0x90;
+ return 0;
+}
+
+static i32 abs_i32(i32 value) {
+ return value >= 0 ? value : -value;
+}
+
+/*
+TODO: Implement 1 and 2 byte displacement?
+There has to be at least 6 bytes left in the asm buffer before calling this function.
+*/
+static void asm_rm(Asm *self, AsmPtr *mem, Reg64 reg) {
+ u8 rm_byte;
+ u8 disp_bytes;
+ assert(asm_get_capacity_left(self) >= 6);
+ if((int)mem->index != -1) {
+ u8 sib_offset;
+ if(mem->disp == 0) {
+ rm_byte = 0x04;
+ disp_bytes = 0;
+ } else if(abs_i32(mem->disp) <= INT8_MAX) {
+ rm_byte = 0x44;
+ disp_bytes = 1;
+ } else {
+ rm_byte = 0x84;
+ disp_bytes = 4;
+ }
+
+ #ifdef DEBUG
+ if(mem->scale != 0 && mem->scale != 2 && mem->scale != 4 && mem->scale != 8) {
+ amal_log_error("Invalid scale %d, expected 0, 2, 4, or 8", mem->scale);
+ assert(bool_false);
+ }
+ #endif
+ assert(mem->base != RBP && "TODO: Implement RBP base for sib byte. RBP is special and requires different logic");
+ sib_offset = (mem->scale << 5) + 8*mem->index + mem->base;
+
+ *self->code_it++ = rm_byte;
+ *self->code_it++ = sib_offset;
+ } else {
+ if(mem->disp == 0) {
+ if(mem->base == RBP) {
+ rm_byte = 0x45;
+ disp_bytes = 1;
+ } else {
+ rm_byte = mem->base;
+ disp_bytes = 0;
+ }
+ } else if(abs_i32(mem->disp) <= INT8_MAX) {
+ rm_byte = 0x40 + mem->base;
+ disp_bytes = 1;
+ } else {
+ rm_byte = 0x80 + mem->base;
+ disp_bytes = 4;
+ }
+ *self->code_it++ = (reg << 3) | rm_byte;
+ }
+
+ am_memcpy(self->code_it, &mem->disp, disp_bytes);
+ self->code_it += disp_bytes;
+}
+
+/* There has to be at least 1 byte left in the asm buffer before calling this function. */
+static void asm_rr(Asm *self, Reg64 dst, Reg64 src) {
+ assert(asm_get_capacity_left(self) >= 1);
+ *self->code_it++ = 0xC0 + dst + 8*src;
+}
+
+/* TODO: Implement 1 and 2 byte immediate? */
+int asm_mov_mi(Asm *self, AsmPtr *dst, i32 immediate) {
+ /* 12 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 12));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0xC7;
+ asm_rm(self, dst, 0);
+ am_memcpy(self->code_it, &immediate, sizeof(immediate));
+ self->code_it += sizeof(immediate);
+ return 0;
+}
+
+int asm_mov_mr(Asm *self, AsmPtr *dst, Reg64 src) {
+ /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 8));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x89;
+ asm_rm(self, dst, src);
+ return 0;
+}
+
+int asm_mov_rm(Asm *self, Reg64 dst, AsmPtr *src) {
+ /* 8 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 8));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x8B;
+ asm_rm(self, src, dst);
+ return 0;
+}
+
+int asm_mov_ri(Asm *self, Reg64 dst, i64 immediate) {
+ /* 10 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 10));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0xB8 + dst;
+ am_memcpy(self->code_it, &immediate, sizeof(immediate));
+ self->code_it += sizeof(immediate);
+ return 0;
+}
+
+int asm_mov_rr(Asm *self, Reg64 dst, Reg64 src) {
+ /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 3));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x89;
+ asm_rr(self, dst, src);
+ return 0;
+}
+
+int asm_add_rr(Asm *self, Reg64 dst, Reg64 src) {
+ /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 3));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x01;
+ asm_rr(self, dst, src);
+ return 0;
+}
+
+int asm_sub_rr(Asm *self, Reg64 dst, Reg64 src) {
+ /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 3));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x29;
+ asm_rr(self, dst, src);
+ return 0;
+}
+
+int asm_imul_rr(Asm *self, Reg64 dst, Reg64 src) {
+ /* 3 bytes is the maximum size of the instruction. We don't how how large it will be so we prepare for the largest size */
+ return_if_error(asm_ensure_capacity(self, 4));
+ *self->code_it++ = REX_W;
+ *self->code_it++ = 0x0F;
+ *self->code_it++ = 0xAF;
+ asm_rr(self, dst, src);
+ return 0;
+}
+
+int asm_pushr(Asm *self, Reg64 reg) {
+ return_if_error(asm_ensure_capacity(self, 1));
+ *self->code_it++ = 0x50 + reg;
+ return 0;
+}
+
+int asm_popr(Asm *self, Reg64 reg) {
+ return_if_error(asm_ensure_capacity(self, 1));
+ *self->code_it++ = 0x58 + reg;
+ return 0;
+}
+
+/* /r */
+#define DEFINE_INS_RM(mnemonic, opcode) \
+int asm_##mnemonic##_rm32(Asm *self, Reg32 dst, Reg32 src) { \
+ return_if_error(asm_ensure_capacity(self, 2)); \
+ *self->code_it++ = opcode; \
+ *self->code_it++ = 0xC0 + 8*dst + src; \
+ return 0; \
+} \
+ \
+int asm_##mnemonic##_rm64(Asm *self, Reg64 dst, Reg64 src) { \
+ return_if_error(asm_ensure_capacity(self, 1)); \
+ *self->code_it++ = REX_W; \
+ return asm_##mnemonic##_rm32(self, (Reg32)dst, (Reg32)src); \
+}
+
+DEFINE_INS_RM(mov, 0x8B)
+DEFINE_INS_RM(add, 0x03)
+DEFINE_INS_RM(sub, 0x2B)
+DEFINE_INS_RM(and, 0x23)
+DEFINE_INS_RM(or, 0x0B)
+DEFINE_INS_RM(xor, 0x33)
+DEFINE_INS_RM(cmp, 0x3B)
+
+/*
+ /number
+ The number is called the extension, a number from 0 to 7;
+ It's a number used to extend the opcode type, since the instruction only uses
+ one register the other register can be encoded for that.
+*/
+#define DEFINE_INS_EXT_IMM(mnemonic, extension) \
+int asm_##mnemonic##_rm32_imm(Asm *self, Reg32 reg, i32 immediate) { \
+ if(abs_i32(immediate) <= INT8_MAX) { \
+ return_if_error(asm_ensure_capacity(self, 3)); \
+ *self->code_it++ = 0x83; \
+ *self->code_it++ = 0xC0 + 8*extension + reg; \
+ *self->code_it++ = (u8)immediate; \
+ } else { \
+ return_if_error(asm_ensure_capacity(self, 6)); \
+ *self->code_it++ = 0x81; \
+ *self->code_it++ = 0xC0 + 8*extension + reg; \
+ am_memcpy(self->code_it, &immediate, sizeof(immediate)); \
+ self->code_it += sizeof(immediate); \
+ } \
+ return 0; \
+} \
+ \
+int asm_##mnemonic##_rm64_imm(Asm *self, Reg64 reg, i32 immediate) { \
+ return_if_error(asm_ensure_capacity(self, 1)); \
+ *self->code_it++ = REX_W; \
+ return asm_##mnemonic##_rm32_imm(self, (Reg32)reg, immediate); \
+}
+
+DEFINE_INS_EXT_IMM(add, 0)
+DEFINE_INS_EXT_IMM(or, 1)
+DEFINE_INS_EXT_IMM(adc, 2)
+DEFINE_INS_EXT_IMM(sbb, 3)
+DEFINE_INS_EXT_IMM(and, 4)
+DEFINE_INS_EXT_IMM(sub, 5)
+DEFINE_INS_EXT_IMM(xor, 6)
+DEFINE_INS_EXT_IMM(cmp, 7)
+
+/*
+ /number
+ The number is called the extension, a number from 0 to 7;
+ It's a number used to extend the opcode type, since the instruction only uses
+ one register the other register can be encoded for that.
+*/
+#define DEFINE_INS_SHIFT_IMM8(mnemonic, extension) \
+int asm_##mnemonic##_rm32_imm(Asm *self, Reg32 reg, i8 immediate) { \
+ if(immediate == 1) { \
+ return_if_error(asm_ensure_capacity(self, 2)); \
+ *self->code_it++ = 0xC1; \
+ *self->code_it++ = 0xC0 + 8*reg + extension; \
+ } else { \
+ return_if_error(asm_ensure_capacity(self, 3)); \
+ *self->code_it++ = 0xD1; \
+ *self->code_it++ = 0xC0 + 8*reg + extension; \
+ *self->code_it++ = immediate; \
+ } \
+ return 0; \
+} \
+ \
+int asm_##mnemonic##_rm64_imm(Asm *self, Reg64 reg, i8 immediate) { \
+ return_if_error(asm_ensure_capacity(self, 1)); \
+ *self->code_it++ = REX_W; \
+ return asm_##mnemonic##_rm32_imm(self, (Reg32)reg, immediate); \
+}
+
+DEFINE_INS_SHIFT_IMM8(rol, 0)
+DEFINE_INS_SHIFT_IMM8(ror, 1)
+DEFINE_INS_SHIFT_IMM8(rcl, 2)
+DEFINE_INS_SHIFT_IMM8(rcr, 3)
+DEFINE_INS_SHIFT_IMM8(shl, 4)
+DEFINE_INS_SHIFT_IMM8(shr, 5)
+/*DEFINE_INS_SHIFT_IMM8(shl, 6)*/
+DEFINE_INS_SHIFT_IMM8(sar, 7)
+
+int asm_ret(Asm *self, u16 bytes) {
+ if(bytes == 0) {
+ return_if_error(asm_ensure_capacity(self, 1));
+ *self->code_it++ = 0xC3;
+ } else {
+ return_if_error(asm_ensure_capacity(self, 3));
+ *self->code_it++ = 0xC2;
+ am_memcpy(self->code_it, &bytes, sizeof(bytes));
+ }
+ return 0;
+} \ No newline at end of file
diff --git a/src/ast.c b/src/ast.c
index 69243f8..35ece2e 100644
--- a/src/ast.c
+++ b/src/ast.c
@@ -23,8 +23,8 @@ static void resolve_data_init(AstResolveData *self) {
self->type = NULL;
}
-int ast_create(ScopedAllocator *allocator, void *value, AstType type, Ast **result) {
- return_if_error(scoped_allocator_alloc(allocator, sizeof(Ast), (void**)result));
+int ast_create(ArenaAllocator *allocator, void *value, AstType type, Ast **result) {
+ return_if_error(arena_allocator_alloc(allocator, sizeof(Ast), (void**)result));
(*result)->value.data = value;
(*result)->type = type;
resolve_data_init(&(*result)->resolve_data);
@@ -75,18 +75,23 @@ static BufferView ast_get_code_reference(Ast *self) {
return ast_get_name(self);
}
-int funcdecl_init(FunctionDecl *self, FunctionSignature *signature, Scope *parent, ScopedAllocator *allocator) {
+void function_signature_init(FunctionSignature *self) {
+ self->params = 0;
+ self->resolved = bool_false;
+}
+
+int funcdecl_init(FunctionDecl *self, FunctionSignature *signature, Scope *parent, ArenaAllocator *allocator) {
self->signature = signature;
self->ssa_func_index = 0;
return scope_init(&self->body, parent, allocator);
}
-int funccall_init(FunctionCall *self, BufferView name, ScopedAllocator *allocator) {
+int funccall_init(FunctionCall *self, BufferView name, ArenaAllocator *allocator) {
variable_init(&self->func, name);
return buffer_init(&self->args, allocator);
}
-int structdecl_init(StructDecl *self, Scope *parent, ScopedAllocator *allocator) {
+int structdecl_init(StructDecl *self, Scope *parent, ArenaAllocator *allocator) {
return scope_init(&self->body, parent, allocator);
}
@@ -139,24 +144,24 @@ void binop_init(Binop *self) {
self->grouped = bool_false;
}
-int if_statement_init(IfStatement *self, Scope *parent, ScopedAllocator *allocator) {
+int if_statement_init(IfStatement *self, Scope *parent, ArenaAllocator *allocator) {
self->condition = NULL;
self->else_if_stmt = NULL;
return scope_init(&self->body, parent, allocator);
}
-int else_if_statement_init(ElseIfStatement *self, Scope *parent, ScopedAllocator *allocator) {
+int else_if_statement_init(ElseIfStatement *self, Scope *parent, ArenaAllocator *allocator) {
self->condition = NULL;
self->next_else_if_stmt = NULL;
return scope_init(&self->body, parent, allocator);
}
-int while_statement_init(WhileStatement *self, Scope *parent, ScopedAllocator *allocator) {
+int while_statement_init(WhileStatement *self, Scope *parent, ArenaAllocator *allocator) {
self->condition = NULL;
return scope_init(&self->body, parent, allocator);
}
-int scope_init(Scope *self, Scope *parent, ScopedAllocator *allocator) {
+int scope_init(Scope *self, Scope *parent, ArenaAllocator *allocator) {
return_if_error(buffer_init(&self->ast_objects, allocator));
return_if_error(hash_map_init(&self->named_objects, allocator, sizeof(Ast*), hash_map_compare_string, amal_hash_string));
self->parent = parent;
@@ -164,7 +169,7 @@ int scope_init(Scope *self, Scope *parent, ScopedAllocator *allocator) {
return 0;
}
-int file_scope_reference_init(FileScopeReference *self, BufferView canonical_path, ScopedAllocator *allocator) {
+int file_scope_reference_init(FileScopeReference *self, BufferView canonical_path, ArenaAllocator *allocator) {
char null_terminator;
null_terminator = '\0';
self->parser = NULL;
@@ -273,15 +278,17 @@ static Ast* scope_get_resolved_variable(Scope *self, AstCompilerContext *context
static void function_signature_resolve(FunctionSignature *self, AstCompilerContext *context) {
/* TODO: Implement */
+ if(self->resolved)
+ return;
+ self->resolved = bool_true;
(void)self;
(void)context;
}
static void variable_resolve(Variable *self, AstCompilerContext *context, AstResolveData *resolve_data) {
- if(!self->resolved_var) {
+ if(!self->resolved_var)
self->resolved_var = scope_get_resolved_variable(context->scope, context, self->name);
- resolve_data->type = self->resolved_var->resolve_data.type;
- }
+ resolve_data->type = self->resolved_var->resolve_data.type;
}
static LhsExpr* variable_get_resolved_type(Variable *self) {
@@ -289,15 +296,18 @@ static LhsExpr* variable_get_resolved_type(Variable *self) {
return self->resolved_var->value.lhs_expr;
}
-static void variable_type_resolve(VariableType *self, AstCompilerContext *context, AstResolveData *resolve_data) {
- switch(self->type) {
+static void variable_type_resolve(LhsExpr *self, AstCompilerContext *context, AstResolveData *resolve_data) {
+ VariableType *lhs_expr_type;
+ lhs_expr_type = &self->type;
+ switch(lhs_expr_type->type) {
case VARIABLE_TYPE_NONE:
return;
case VARIABLE_TYPE_VARIABLE:
- variable_resolve(self->value.variable, context, resolve_data);
+ variable_resolve(lhs_expr_type->value.variable, context, resolve_data);
break;
case VARIABLE_TYPE_SIGNATURE:
- function_signature_resolve(self->value.signature, context);
+ function_signature_resolve(lhs_expr_type->value.signature, context);
+ resolve_data->type = self;
break;
}
}
@@ -333,13 +343,11 @@ static LhsExpr* lhsexpr_resolve_rhs(LhsExpr *self, AstCompilerContext *context)
static void lhsexpr_resolve(Ast *ast, AstCompilerContext *context) {
LhsExpr *self;
- LhsExpr *rhs_resolve_type;
assert(ast->type == AST_LHS);
self = ast->value.lhs_expr;
- rhs_resolve_type = NULL;
- variable_type_resolve(&self->type, context, &ast->resolve_data);
+ variable_type_resolve(self, context, &ast->resolve_data);
/*
TODO: When parameters and return types are implemented, AST_RESOLVE_END should be set after
@@ -347,8 +355,22 @@ static void lhsexpr_resolve(Ast *ast, AstCompilerContext *context) {
be allowed but recursive function calls still require parameters and return types to be known.
*/
if(self->rhs_expr) {
- if(self->rhs_expr->type == AST_FUNCTION_DECL)
+ LhsExpr *rhs_resolve_type;
+ if(self->rhs_expr->type == AST_FUNCTION_DECL) {
+ /*
+ The function declaration itself always resolves the signature, but we also do it here because we
+ want to have the signature solved before setting the lhs expr as solved. Also function signatures can exist
+ without lhs expr (anonymous function).
+ */
+ function_signature_resolve(self->rhs_expr->value.func_decl->signature, context);
ast->resolve_data.status = AST_RESOLVED;
+ /*
+ If rhs is a function declaration then there is no need to wait until it has been resolved before setting the type as the type
+ is @self (the lhs). We still need to continue after this, so rhs can be resolved.
+ */
+ if(!ast->resolve_data.type)
+ ast->resolve_data.type = self;
+ }
rhs_resolve_type = lhsexpr_resolve_rhs(self, context);
@@ -365,10 +387,8 @@ static void lhsexpr_resolve(Ast *ast, AstCompilerContext *context) {
"Variable type and variable assignment type (right-hand side) do not match");
throw(AST_ERR);
}
- }
-
- if(rhs_resolve_type)
ast->resolve_data.type = rhs_resolve_type;
+ }
}
static LhsExpr* binop_get_lhs_expr(Binop *self) {
@@ -443,18 +463,20 @@ static void import_resolve(Ast *ast, AstCompilerContext *context) {
static Scope* lhsexpr_get_scope(LhsExpr *self) {
AstValue value;
- value = self->rhs_expr->value;
- switch(self->rhs_expr->type) {
- case AST_FUNCTION_DECL:
- return &value.func_decl->body;
- case AST_STRUCT_DECL:
- return &value.struct_decl->body;
- case AST_IMPORT:
- return &value.import->file_scope->parser->struct_decl.body;
- default:
- break;
+ if(self->rhs_expr) {
+ value = self->rhs_expr->value;
+ switch(self->rhs_expr->type) {
+ case AST_FUNCTION_DECL:
+ return &value.func_decl->body;
+ case AST_STRUCT_DECL:
+ return &value.struct_decl->body;
+ case AST_IMPORT:
+ return &value.import->file_scope->parser->struct_decl.body;
+ default:
+ break;
+ }
}
- assert(bool_false && "Expected lhsexpr_get_scope to only be called for function decl, struct decl and import");
+ assert(bool_false && "Expected lhsexpr_get_scope to only be called for non-extern function declaration, struct declaration and import");
return NULL;
}
@@ -466,9 +488,20 @@ static Parser* get_resolved_type_parser(Ast *self) {
static void funcdecl_resolve(FunctionDecl *self, AstCompilerContext *context) {
/* TODO: Implement parameters and return types */
+ function_signature_resolve(self->signature, context);
scope_resolve(&self->body, context);
}
+/*
+ Dont need to check if @self is resolved, since it will always be partially resolved when called from @funccall_resolve.
+ Meaning the resolve status wont be set to solved but the resolve type will be set.
+*/
+static bool is_func_decl(Ast *self) {
+ const LhsExpr *resolved_type = self->resolve_data.type;
+ return (resolved_type->rhs_expr && resolved_type->rhs_expr->type == AST_FUNCTION_DECL) ||
+ resolved_type->type.type == VARIABLE_TYPE_SIGNATURE;
+}
+
static void funccall_resolve(Ast *self, AstCompilerContext *context) {
FunctionCall *func_call;
Ast **ast;
@@ -477,7 +510,7 @@ static void funccall_resolve(Ast *self, AstCompilerContext *context) {
func_call = self->value.func_call;
variable_resolve(&func_call->func, context, &self->resolve_data);
/* Attemping to use call syntax (variable_name ( ) ) with a variable that is not a function */
- if(self->resolve_data.type->rhs_expr->type != AST_FUNCTION_DECL) {
+ if(!is_func_decl(self)) {
Parser *caller_parser;
Parser *callee_parser;
BufferView callee_code_ref;
@@ -514,6 +547,12 @@ static void structfield_resolve(Ast *self, AstCompilerContext *context) {
variable_resolve(&struct_field->type, context, &self->resolve_data);
}
+static bool is_struct_decl(Ast *self) {
+ const LhsExpr *resolved_type = self->resolve_data.type;
+ assert(self->resolve_data.status == AST_RESOLVED);
+ return resolved_type->rhs_expr && resolved_type->rhs_expr->type == AST_STRUCT_DECL;
+}
+
static void binop_resolve_dot_access(Ast *ast, AstCompilerContext *context) {
Binop *self;
Scope *lhs_scope;
@@ -541,7 +580,7 @@ static void binop_resolve_dot_access(Ast *ast, AstCompilerContext *context) {
caller_code_ref = ast_get_code_reference(self->rhs);
callee_code_ref = self->rhs->resolve_data.type->var_name;
- if(self->lhs->resolve_data.type->rhs_expr->type != AST_STRUCT_DECL) {
+ if(!is_struct_decl(self->lhs)) {
parser_print_error(caller_parser, caller_code_ref.data, "Can only access field of structs");
/* TODO: use tokenizer_print_note, once it has been added */
/* TODO: Print type */
@@ -583,6 +622,8 @@ static void binop_resolve(Ast *ast, AstCompilerContext *context) {
/*
TODO: For this first error, only print the line without a reference to code.
This requires change in tokenizer_print_error to be able to take a line as reference.
+
+ TODO: Use note for the additional information instead of error.
*/
Parser *parser;
parser = scope_get_parser(context->scope);
@@ -597,6 +638,14 @@ static void binop_resolve(Ast *ast, AstCompilerContext *context) {
"Right-hand side is of type %.*s",
self->rhs->resolve_data.type->var_name.size, self->rhs->resolve_data.type->var_name.data);
throw(AST_ERR);
+ } else if(!is_arithmetic_type(self->lhs->resolve_data.type, context->compiler)) { /* TODO: Optimize this? store arithmetic type in the LhsExpr itself? */
+ /* TODO: Point the error at the binop instead of LHS */
+ Parser *parser;
+ parser = scope_get_parser(context->scope);
+ parser_print_error(parser, ast_get_code_reference(self->lhs).data,
+ "Arithmetic operation can only be performed with the types i8, u8, i16, u16, i32, u32, i64, u64, isize and usize",
+ self->lhs->resolve_data.type->var_name.size, self->lhs->resolve_data.type->var_name.data);
+ throw(AST_ERR);
}
ast->resolve_data.type = self->lhs->resolve_data.type;
}
@@ -655,9 +704,9 @@ void ast_resolve(Ast *self, AstCompilerContext *context) {
number = self->value.number;
/* TODO: Support other number types */
if(number->is_integer)
- self->resolve_data.type = context->compiler->default_types.i64;
+ self->resolve_data.type = (LhsExpr*)context->compiler->default_types.i64;
else
- self->resolve_data.type = context->compiler->default_types.f64;
+ self->resolve_data.type = (LhsExpr*)context->compiler->default_types.f64;
break;
}
case AST_FUNCTION_DECL:
@@ -684,7 +733,7 @@ void ast_resolve(Ast *self, AstCompilerContext *context) {
break;
case AST_STRING:
/* TODO: Convert special combinations. For example \n to newline */
- self->resolve_data.type = context->compiler->default_types.str;
+ self->resolve_data.type = (LhsExpr*)context->compiler->default_types.str;
break;
case AST_VARIABLE:
variable_resolve(self->value.variable, context, &self->resolve_data);
diff --git a/src/bytecode/bytecode.c b/src/bytecode/bytecode.c
index 9bf5f24..ffcd2e0 100644
--- a/src/bytecode/bytecode.c
+++ b/src/bytecode/bytecode.c
@@ -18,7 +18,7 @@
throw(return_if_result); \
} while(0)
-int bytecode_init(Bytecode *self, ScopedAllocator *allocator) {
+int bytecode_init(Bytecode *self, ArenaAllocator *allocator) {
return buffer_init(&self->data, allocator);
}
@@ -37,8 +37,8 @@ static CHECK_RESULT usize ssa_extract_form2(u8 *instruction_data, SsaInsForm2 *r
static CHECK_RESULT usize ssa_extract_func_start(u8 *instruction_data, SsaInsFuncStart *result) {
am_memcpy(&result->func_index, instruction_data, sizeof(result->func_index));
- am_memcpy(&result->num_args, instruction_data + sizeof(result->func_index), sizeof(result->num_args));
- return sizeof(result->func_index) + sizeof(result->num_args);
+ am_memcpy(&result->num_registers, instruction_data + sizeof(result->func_index), sizeof(result->num_registers));
+ return sizeof(result->func_index) + sizeof(result->num_registers);
}
static CHECK_RESULT usize ssa_extract_func_call(u8 *instruction_data, SsaInsFuncCall *result) {
@@ -109,7 +109,7 @@ void add_strings(BytecodeCompilerContext *self) {
throw_if_error(buffer_append(instructions, &strings_size, sizeof(u32)));
for(; string != strings_end; ++string) {
throw_if_error(buffer_append(instructions, &string->size, sizeof(u16)));
- throw_if_error(buffer_append(instructions, &string->data, string->size));
+ throw_if_error(buffer_append(instructions, string->data, string->size));
}
}
@@ -189,6 +189,20 @@ static void add_ins6(BytecodeCompilerContext *self, AmalOpcode opcode, u8 dst_re
fputc('\n', stderr);
}
+static void add_ins7(BytecodeCompilerContext *self, AmalOpcode opcode, u16 idx, u8 arg, const char *fmt) {
+ Buffer *instructions;
+ size_t index;
+ instructions = &self->bytecode.data;
+ index = instructions->size;
+
+ throw_if_error(buffer_append_empty(instructions, sizeof(AmalOpcodeType) + sizeof(idx) + sizeof(arg)));
+ instructions->data[index] = opcode;
+ memcpy(instructions->data + index + sizeof(AmalOpcodeType), &idx, sizeof(idx));
+ instructions->data[index + sizeof(AmalOpcodeType) + sizeof(idx)] = arg;
+ fprintf(stderr, fmt, idx, arg);
+ fputc('\n', stderr);
+}
+
#if 0
#define NUM_MAX_REGS 256
#define NUM_MAX_FUNC_ARGS 32
@@ -397,11 +411,21 @@ static void add_instructions(BytecodeCompilerContext *self) {
add_ins5(self, AMAL_OP_SUB, ssa_ins_form2.result, ssa_ins_form2.lhs, ssa_ins_form2.rhs, "sub r%d, r%d, r%d");
break;
}
+ case SSA_IMUL: {
+ instruction += ssa_extract_form2(instruction, &ssa_ins_form2);
+ add_ins5(self, AMAL_OP_IMUL, ssa_ins_form2.result, ssa_ins_form2.lhs, ssa_ins_form2.rhs, "imul r%d, r%d, r%d");
+ break;
+ }
case SSA_MUL: {
instruction += ssa_extract_form2(instruction, &ssa_ins_form2);
add_ins5(self, AMAL_OP_MUL, ssa_ins_form2.result, ssa_ins_form2.lhs, ssa_ins_form2.rhs, "mul r%d, r%d, r%d");
break;
}
+ case SSA_IDIV: {
+ instruction += ssa_extract_form2(instruction, &ssa_ins_form2);
+ add_ins5(self, AMAL_OP_IDIV, ssa_ins_form2.result, ssa_ins_form2.lhs, ssa_ins_form2.rhs, "idiv r%d, r%d, r%d");
+ break;
+ }
case SSA_DIV: {
instruction += ssa_extract_form2(instruction, &ssa_ins_form2);
add_ins5(self, AMAL_OP_DIV, ssa_ins_form2.result, ssa_ins_form2.lhs, ssa_ins_form2.rhs, "div r%d, r%d, r%d");
@@ -414,11 +438,11 @@ static void add_instructions(BytecodeCompilerContext *self) {
}
case SSA_FUNC_START: {
instruction += ssa_extract_func_start(instruction, &ssa_ins_func_start);
- add_ins1(self, AMAL_OP_FUNC_START, "func_start");
+ add_ins4(self, AMAL_OP_FUNC_START, ssa_ins_func_start.num_registers, "func_start %u");
break;
}
case SSA_FUNC_END: {
- add_ins1(self, AMAL_OP_FUNC_START, "func_end");
+ add_ins1(self, AMAL_OP_FUNC_END, "func_end");
break;
}
case SSA_PUSH: {
@@ -429,7 +453,6 @@ static void add_instructions(BytecodeCompilerContext *self) {
break;
}
case SSA_CALL: {
- /* TODO: Add args, using number of bytes to pop after function call. */
/*
TODO: Pass return register to function. The register should be a pointer that
has the size of the function return values so the return values can fit in it.
@@ -443,7 +466,8 @@ static void add_instructions(BytecodeCompilerContext *self) {
is defined as the size of all previous files' number of functions.
*/
instruction += ssa_extract_func_call(instruction, &ssa_ins_func_call);
- add_ins4(self, AMAL_OP_CALL, ssa_ins_func_call.func_decl->ssa_func_index, "call %d");
+ /* TODO: Replace 0 with the number of arguments */
+ add_ins7(self, AMAL_OP_CALL, ssa_ins_func_call.func_decl->ssa_func_index, 0, "call %d, %d");
break;
}
case SSA_JUMP_ZERO: {
@@ -456,9 +480,6 @@ static void add_instructions(BytecodeCompilerContext *self) {
add_ins4(self, AMAL_OP_JMP, ssa_ins_jump.jump_offset, "jmp %d");
break;
}
- default:
- amal_log_error("Instruction not yet implemented: %d", ins);
- assert(bool_false && "Instruction not yet implemented");
}
}
diff --git a/src/compiler.c b/src/compiler.c
index 05fae78..8c3266c 100644
--- a/src/compiler.c
+++ b/src/compiler.c
@@ -29,10 +29,10 @@ static CHECK_RESULT int create_default_type(amal_compiler *compiler, const char
StructDecl *struct_decl;
Ast *expr;
- return_if_error(scoped_allocator_alloc(&compiler->allocator, sizeof(StructDecl), (void**)&struct_decl));
+ return_if_error(arena_allocator_alloc(&compiler->allocator, sizeof(StructDecl), (void**)&struct_decl));
return_if_error(structdecl_init(struct_decl, &compiler->root_scope, &compiler->allocator));
- return_if_error(scoped_allocator_alloc(&compiler->allocator, sizeof(LhsExpr), (void**)lhs_expr));
+ return_if_error(arena_allocator_alloc(&compiler->allocator, sizeof(LhsExpr), (void**)lhs_expr));
lhsexpr_init(*lhs_expr, bool_true, bool_true, bool_true, create_buffer_view(name, strnlen(name, PATH_MAX)));
return_if_error(ast_create(&compiler->allocator, struct_decl, AST_STRUCT_DECL, &(*lhs_expr)->rhs_expr));
return_if_error(ast_create(&compiler->allocator, *lhs_expr, AST_LHS, &expr));
@@ -42,22 +42,59 @@ static CHECK_RESULT int create_default_type(amal_compiler *compiler, const char
}
static CHECK_RESULT int init_default_types(amal_compiler *compiler) {
- return_if_error(create_default_type(compiler, "i8", &compiler->default_types.i8));
- return_if_error(create_default_type(compiler, "i16", &compiler->default_types.i16));
- return_if_error(create_default_type(compiler, "i32", &compiler->default_types.i32));
- return_if_error(create_default_type(compiler, "i64", &compiler->default_types.i64));
- return_if_error(create_default_type(compiler, "u8", &compiler->default_types.u8));
- return_if_error(create_default_type(compiler, "u16", &compiler->default_types.u16));
- return_if_error(create_default_type(compiler, "u32", &compiler->default_types.u32));
- return_if_error(create_default_type(compiler, "u64", &compiler->default_types.u64));
- return_if_error(create_default_type(compiler, "isize", &compiler->default_types.isize));
- return_if_error(create_default_type(compiler, "usize", &compiler->default_types.usize));
- return_if_error(create_default_type(compiler, "f32", &compiler->default_types.f32));
- return_if_error(create_default_type(compiler, "f64", &compiler->default_types.f64));
- return_if_error(create_default_type(compiler, "str", &compiler->default_types.str));
+ return_if_error(create_default_type(compiler, "i8", (LhsExpr**)&compiler->default_types.i8));
+ return_if_error(create_default_type(compiler, "i16", (LhsExpr**)&compiler->default_types.i16));
+ return_if_error(create_default_type(compiler, "i32", (LhsExpr**)&compiler->default_types.i32));
+ return_if_error(create_default_type(compiler, "i64", (LhsExpr**)&compiler->default_types.i64));
+ return_if_error(create_default_type(compiler, "u8", (LhsExpr**)&compiler->default_types.u8));
+ return_if_error(create_default_type(compiler, "u16", (LhsExpr**)&compiler->default_types.u16));
+ return_if_error(create_default_type(compiler, "u32", (LhsExpr**)&compiler->default_types.u32));
+ return_if_error(create_default_type(compiler, "u64", (LhsExpr**)&compiler->default_types.u64));
+ return_if_error(create_default_type(compiler, "isize", (LhsExpr**)&compiler->default_types.isize));
+ return_if_error(create_default_type(compiler, "usize", (LhsExpr**)&compiler->default_types.usize));
+ return_if_error(create_default_type(compiler, "f32", (LhsExpr**)&compiler->default_types.f32));
+ return_if_error(create_default_type(compiler, "f64", (LhsExpr**)&compiler->default_types.f64));
+ return_if_error(create_default_type(compiler, "str", (LhsExpr**)&compiler->default_types.str));
+
+ compiler->default_types.arithmetic_types[0] = compiler->default_types.i8;
+ compiler->default_types.arithmetic_types[1] = compiler->default_types.u8;
+ compiler->default_types.arithmetic_types[2] = compiler->default_types.i16;
+ compiler->default_types.arithmetic_types[3] = compiler->default_types.u16;
+ compiler->default_types.arithmetic_types[4] = compiler->default_types.i32;
+ compiler->default_types.arithmetic_types[5] = compiler->default_types.u32;
+ compiler->default_types.arithmetic_types[6] = compiler->default_types.i64;
+ compiler->default_types.arithmetic_types[7] = compiler->default_types.u64;
+ compiler->default_types.arithmetic_types[8] = compiler->default_types.isize;
+ compiler->default_types.arithmetic_types[9] = compiler->default_types.usize;
+
+ compiler->default_types.i8->is_signed = bool_true;
+ compiler->default_types.u8->is_signed = bool_false;
+ compiler->default_types.i16->is_signed = bool_true;
+ compiler->default_types.u16->is_signed = bool_false;
+ compiler->default_types.i32->is_signed = bool_true;
+ compiler->default_types.u32->is_signed = bool_false;
+ compiler->default_types.i64->is_signed = bool_true;
+ compiler->default_types.u64->is_signed = bool_false;
+ compiler->default_types.isize->is_signed = bool_true;
+ compiler->default_types.usize->is_signed = bool_false;
+ compiler->default_types.f32->is_signed = bool_true;
+ compiler->default_types.f64->is_signed = bool_true;
+ compiler->default_types.str->is_signed = bool_false;
return 0;
}
+bool is_arithmetic_type(LhsExpr *expr, amal_compiler *compiler) {
+ usize i;
+ const amal_default_types *default_types;
+ default_types = &compiler->default_types;
+
+ for(i = 0; i < NUM_ARITHMETIC_TYPES; ++i) {
+ if(expr == (LhsExpr*)default_types->arithmetic_types[i])
+ return bool_true;
+ }
+ return bool_false;
+}
+
void amal_compiler_options_init(amal_compiler_options *self) {
self->error_callback = NULL;
self->error_callback_userdata = NULL;
@@ -89,12 +126,12 @@ static CHECK_RESULT int amal_compiler_init(amal_compiler *self, const amal_compi
self->generic_work_object_index = 0;
amal_mutex_init(&self->mutex);
- return_if_error(scoped_allocator_init(&self->allocator));
+ return_if_error(arena_allocator_init(&self->allocator));
cleanup_if_error(scope_init(&self->root_scope, NULL, &self->allocator));
cleanup_if_error(buffer_init(&self->parsers, &self->allocator));
cleanup_if_error(buffer_init(&self->queued_files, &self->allocator));
cleanup_if_error(hash_map_init(&self->file_scopes, &self->allocator, sizeof(FileScopeReference*), hash_map_compare_string, amal_hash_string));
- cleanup_if_error(scoped_allocator_alloc(&self->allocator,
+ cleanup_if_error(arena_allocator_alloc(&self->allocator,
self->usable_thread_count * sizeof(ParserThreadData),
(void**)&self->threads));
for(i = 0; i < self->usable_thread_count; ++i)
@@ -114,7 +151,7 @@ void amal_compiler_deinit(amal_compiler *self) {
}
amal_mutex_deinit(&self->mutex);
- scoped_allocator_deinit(&self->allocator);
+ arena_allocator_deinit(&self->allocator);
}
typedef enum {
@@ -145,7 +182,7 @@ typedef struct {
ThreadWorkType type;
} ThreadWorkData;
-static CHECK_RESULT int amal_compiler_load_in_this_thread(amal_compiler *compiler, FileScopeReference *file_scope, ScopedAllocator *allocator) {
+static CHECK_RESULT int amal_compiler_load_in_this_thread(amal_compiler *compiler, FileScopeReference *file_scope, ArenaAllocator *allocator) {
Parser *parser;
int result;
BufferView filepath;
@@ -153,7 +190,7 @@ static CHECK_RESULT int amal_compiler_load_in_this_thread(amal_compiler *compile
filepath = create_buffer_view(file_scope->canonical_path.data, file_scope->canonical_path.size);
amal_log_info("Started parsing %.*s", filepath.size, filepath.data);
- return_if_error(scoped_allocator_alloc(allocator, sizeof(Parser), (void**)&parser));
+ return_if_error(arena_allocator_alloc(allocator, sizeof(Parser), (void**)&parser));
return_if_error(parser_init(parser, compiler, allocator));
file_scope->parser = parser;
return_if_error(parser_parse_file(parser, filepath));
@@ -229,7 +266,7 @@ static CHECK_RESULT int thread_generate_ssa(Parser *parser) {
SsaCompilerContext compiler_context;
int result;
- return_if_error(scoped_allocator_alloc(parser->allocator, sizeof(Ssa), (void**)&compiler_context.ssa));
+ return_if_error(arena_allocator_alloc(parser->allocator, sizeof(Ssa), (void**)&compiler_context.ssa));
return_if_error(ssa_init(compiler_context.ssa, parser->allocator));
compiler_context.compiler = parser->compiler;
parser->ssa = compiler_context.ssa;
@@ -446,7 +483,10 @@ static CHECK_RESULT int amal_compiler_dispatch_generic(amal_compiler *self, Thre
}
static CHECK_RESULT int amal_compiler_generate_program(amal_compiler *self) {
- /* TODO: Copying the bytecode to the program can be done using multiple threads */
+ /*
+ TODO: Copying the bytecode to the program can be done using multiple threads.
+ Use self->threads for that.
+ */
Parser **parser;
Parser **parser_end;
parser = buffer_begin(&self->parsers);
@@ -472,7 +512,7 @@ static CHECK_RESULT int try_create_file_scope(amal_compiler *compiler, const cha
path_view = create_buffer_view(result_path, result_path_size);
cleanup_if_error(amal_mutex_lock(&compiler->mutex, "try_create_file_scope"));
if(!hash_map_get(&compiler->file_scopes, path_view, file_scope)) {
- cleanup_if_error(scoped_allocator_alloc(&compiler->allocator, sizeof(FileScopeReference), (void**)file_scope));
+ cleanup_if_error(arena_allocator_alloc(&compiler->allocator, sizeof(FileScopeReference), (void**)file_scope));
/* @(*file_scope)->canonical_path won't change after this, so it's fine if allocator belongs to non-thread safe compiler instance */
cleanup_if_error(file_scope_reference_init(*file_scope, path_view, &compiler->allocator));
cleanup_if_error(hash_map_insert(&compiler->file_scopes, path_view, file_scope));
@@ -487,11 +527,11 @@ static CHECK_RESULT int try_create_file_scope(amal_compiler *compiler, const cha
}
int amal_compiler_load_file(amal_compiler_options *options, amal_program *program, const char *filepath) {
- assert(program);
- assert(filepath);
amal_compiler compiler;
FileScopeReference *file_scope;
int result;
+ assert(program);
+ assert(filepath);
return_if_error(amal_compiler_init(&compiler, options, program));
result = amal_compiler_internal_load_file(&compiler, filepath, &file_scope);
amal_compiler_deinit(&compiler);
diff --git a/src/parser.c b/src/parser.c
index b61a968..fdf34ce 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -30,12 +30,12 @@ int parser_thread_data_init(ParserThreadData *self) {
am_memset(&self->allocator, 0, sizeof(self->allocator));
am_memset(&self->thread, 0, sizeof(self->thread));
self->status = PARSER_THREAD_STATUS_NEW;
- return scoped_allocator_init(&self->allocator);
+ return arena_allocator_init(&self->allocator);
}
void parser_thread_data_deinit(ParserThreadData *self) {
ignore_result_int(amal_thread_deinit(&self->thread));
- scoped_allocator_deinit(&self->allocator);
+ arena_allocator_deinit(&self->allocator);
}
int parser_thread_data_start(ParserThreadData *self, AmalThreadCallbackFunc callback_func, void *userdata) {
@@ -51,7 +51,7 @@ int parser_thread_data_join(ParserThreadData *self, void **result) {
return amal_thread_join(&self->thread, result);
}
-int parser_init(Parser *self, amal_compiler *compiler, ScopedAllocator *allocator) {
+int parser_init(Parser *self, amal_compiler *compiler, ArenaAllocator *allocator) {
self->allocator = allocator;
self->compiler = compiler;
self->ssa = NULL;
@@ -151,15 +151,15 @@ static CHECK_RESULT FunctionSignature* parser_parse_function_signature(Parser *s
/* TODO: Parse return types */
}
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(FunctionSignature), (void**)&signature));
- signature->params = 0;
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(FunctionSignature), (void**)&signature));
+ function_signature_init(signature);
return signature;
}
/*
VAR_TYPE_DEF = ':' TOK_IDENTIFIER|FUNC_SIGNATURE
*/
-static CHECK_RESULT int parser_parse_var_type_def(Parser *self, VariableType *result) {
+static void parser_parse_var_type_def(Parser *self, VariableType *result) {
bool match;
result->type = VARIABLE_TYPE_NONE;
@@ -167,14 +167,14 @@ static CHECK_RESULT int parser_parse_var_type_def(Parser *self, VariableType *re
throw_if_error(tokenizer_consume_if(&self->tokenizer, TOK_COLON, &match));
if(!match)
- return -1;
+ return;
throw_if_error(tokenizer_consume_if(&self->tokenizer, TOK_IDENTIFIER, &match));
if(match) {
result->type = VARIABLE_TYPE_VARIABLE;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(Variable), (void**)&result->value.variable));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(Variable), (void**)&result->value.variable));
variable_init(result->value.variable, self->tokenizer.value.identifier);
- return 0;
+ return;
}
result->type = VARIABLE_TYPE_SIGNATURE;
@@ -185,7 +185,6 @@ static CHECK_RESULT int parser_parse_var_type_def(Parser *self, VariableType *re
"Expected type or closure signature");
throw(PARSER_UNEXPECTED_TOKEN);
}
- return 0;
}
/*
@@ -232,10 +231,10 @@ static CHECK_RESULT LhsExpr* parser_parse_declaration_lhs(Parser *self) {
throw_if_error(tokenizer_accept(&self->tokenizer, TOK_IDENTIFIER));
var_name = self->tokenizer.value.identifier;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(LhsExpr), (void**)&result));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(LhsExpr), (void**)&result));
lhsexpr_init(result, is_extern, is_pub, is_const, var_name);
- ignore_result_int(parser_parse_var_type_def(self, &result->type));
+ parser_parse_var_type_def(self, &result->type);
return result;
}
@@ -252,7 +251,7 @@ static CHECK_RESULT FunctionDecl* parser_parse_closure(Parser *self) {
if(!signature)
return NULL;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(FunctionDecl), (void**)&result));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(FunctionDecl), (void**)&result));
throw_if_error(funcdecl_init(result, signature, self->current_scope, self->allocator));
self->current_scope = &result->body;
@@ -277,7 +276,7 @@ static CHECK_RESULT StructDecl* parser_parse_struct_decl(Parser *self) {
if(!match)
return result;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(StructDecl), (void**)&result));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(StructDecl), (void**)&result));
throw_if_error(structdecl_init(result, self->current_scope, self->allocator));
self->current_scope = &result->body;
@@ -329,13 +328,13 @@ static CHECK_RESULT Ast* parser_parse_function_call_or_variable(Parser *self) {
throw_if_error(tokenizer_consume_if(&self->tokenizer, TOK_OPEN_PAREN, &match));
if(!match) {
Variable *variable;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(Variable), (void**)&variable));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(Variable), (void**)&variable));
variable_init(variable, identifier);
throw_if_error(ast_create(self->allocator, variable, AST_VARIABLE, &result));
return result;
}
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(FunctionCall), (void**)&func_call));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(FunctionCall), (void**)&func_call));
throw_if_error(funccall_init(func_call, self->tokenizer.value.identifier, self->allocator));
throw_if_error(ast_create(self->allocator, func_call, AST_FUNCTION_CALL, &result));
/* Ends after TOK_CLOSING_PAREN */
@@ -355,7 +354,7 @@ static CHECK_RESULT Import* parser_parse_import(Parser *self) {
if(!match)
return result;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(Import), (void**)&result));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(Import), (void**)&result));
import_init(result, self->tokenizer.value.string);
return result;
}
@@ -371,7 +370,7 @@ static CHECK_RESULT ElseIfStatement* parser_parse_else_if_statement(Parser *self
if(!match)
return NULL;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(ElseIfStatement), (void**)&result));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(ElseIfStatement), (void**)&result));
throw_if_error(else_if_statement_init(result, self->current_scope, self->allocator));
throw_if_error(tokenizer_consume_if(&self->tokenizer, TOK_IF, &match));
@@ -417,7 +416,7 @@ static CHECK_RESULT IfStatement* parser_parse_if_statement(Parser *self) {
if(!match)
return NULL;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(IfStatement), (void**)&result));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(IfStatement), (void**)&result));
throw_if_error(if_statement_init(result, self->current_scope, self->allocator));
result->condition = parser_parse_rhs_binop(self);
@@ -446,7 +445,7 @@ static CHECK_RESULT WhileStatement* parser_parse_while_statement(Parser *self) {
if(!match)
return NULL;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(WhileStatement), (void**)&result));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(WhileStatement), (void**)&result));
throw_if_error(while_statement_init(result, self->current_scope, self->allocator));
result->condition = parser_parse_rhs_binop(self);
@@ -473,7 +472,7 @@ static CHECK_RESULT Ast* parser_parse_number(Parser *self) {
if(!match)
return result;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(Number), (void**)&number));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(Number), (void**)&number));
number_init(number, self->tokenizer.value.integer, self->tokenizer.number_is_integer,
create_buffer_view(self->tokenizer.code.data + self->tokenizer.prev_index, self->tokenizer.index - self->tokenizer.prev_index));
throw_if_error(ast_create(self->allocator, number, AST_NUMBER, &result));
@@ -491,7 +490,7 @@ static Ast* parser_parse_rhs_single_expr(Parser *self) {
throw_if_error(tokenizer_consume_if(&self->tokenizer, TOK_STRING, &match));
if(match) {
String *string;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(String), (void**)&string));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(String), (void**)&string));
throw_if_error(string_init(string, self->tokenizer.value.string));
throw_if_error(ast_create(self->allocator, string, AST_STRING, &result));
return result;
@@ -549,7 +548,7 @@ Ast* parser_parse_rhs_binop(Parser *self) {
binop_type = self->tokenizer.value.binop_type;
rhs = parser_parse_rhs_binop(self);
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(Binop), (void**)&binop));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(Binop), (void**)&binop));
binop_init(binop);
binop->type = binop_type;
binop->lhs = lhs;
@@ -645,11 +644,23 @@ Ast* parser_parse_body(Parser *self) {
throw_if_error(ast_create(self->allocator, lhs_expr, AST_LHS, &result));
if(lhs_expr->is_extern) {
throw_if_error(tokenizer_accept(&self->tokenizer, TOK_SEMICOLON));
+ if (lhs_expr->type.type == VARIABLE_TYPE_NONE) {
+ self->error = tokenizer_create_error(&self->tokenizer, self->tokenizer.prev_index, "A variable can't be declared without a type or assignment");
+ throw(PARSER_UNEXPECTED_TOKEN);
+ }
return result;
} else {
throw_if_error(tokenizer_consume_if(&self->tokenizer, TOK_SEMICOLON, &match));
- if(match)
+ if(match) {
+ if(lhs_expr->type.type == VARIABLE_TYPE_SIGNATURE) {
+ self->error = tokenizer_create_error(&self->tokenizer, self->tokenizer.prev_index, "Expected function declaration. Only extern functions can have empty declarations.");
+ throw(PARSER_UNEXPECTED_TOKEN);
+ } else if (lhs_expr->type.type == VARIABLE_TYPE_NONE) {
+ self->error = tokenizer_create_error(&self->tokenizer, self->tokenizer.prev_index, "A variable can't be declared without a type or assignment");
+ throw(PARSER_UNEXPECTED_TOKEN);
+ }
return result;
+ }
}
throw_if_error(tokenizer_accept(&self->tokenizer, TOK_EQUALS));
@@ -683,14 +694,16 @@ Ast* parser_parse_body(Parser *self) {
self->error_context = ERROR_CONTEXT_RHS_STANDALONE;
rhs_expr = parser_parse_rhs(self);
self->error_context = ERROR_CONTEXT_NONE;
+ /* Variable declaration with lhs and rhs */
if(lhs_expr) {
lhs_expr->rhs_expr = rhs_expr;
} else {
bool match;
throw_if_error(tokenizer_consume_if(&self->tokenizer, TOK_EQUALS, &match));
+ /* Variable assignment */
if(match) {
AssignmentExpr *assign_expr;
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(AssignmentExpr), (void**)&assign_expr));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(AssignmentExpr), (void**)&assign_expr));
throw_if_error(ast_create(self->allocator, assign_expr, AST_ASSIGN, &result));
assign_expr->lhs_expr = rhs_expr;
@@ -698,6 +711,7 @@ Ast* parser_parse_body(Parser *self) {
assign_expr->rhs_expr = parser_parse_rhs(self);
self->error_context = ERROR_CONTEXT_NONE;
} else {
+ /* Only rhs (for example function call, binop etc...) */
result = rhs_expr;
}
}
@@ -721,7 +735,7 @@ Ast* parser_parse_struct_body(Parser *self) {
throw_if_error(tokenizer_accept(&self->tokenizer, TOK_IDENTIFIER));
type_name = self->tokenizer.value.identifier;
throw_if_error(tokenizer_accept(&self->tokenizer, TOK_SEMICOLON));
- throw_if_error(scoped_allocator_alloc(self->allocator, sizeof(LhsExpr), (void**)&struct_field));
+ throw_if_error(arena_allocator_alloc(self->allocator, sizeof(LhsExpr), (void**)&struct_field));
structfield_init(struct_field, var_name, type_name);
throw_if_error(ast_create(self->allocator, struct_field, AST_STRUCT_FIELD, &result));
return result;
diff --git a/src/program.c b/src/program.c
index 167f4c4..5e63c04 100644
--- a/src/program.c
+++ b/src/program.c
@@ -2,6 +2,7 @@
#include "../include/std/mem.h"
#include "../include/std/alloc.h"
#include "../include/std/log.h"
+#include "../include/std/buffer_view.h"
#include <stdio.h>
#include <errno.h>
#include <assert.h>
@@ -19,13 +20,12 @@ typedef union {
f64 floating;
} NumberUnion;
-int amal_program_init(amal_program *self) {
- ignore_result_int(buffer_init(&self->data, NULL));
- ignore_result_int(buffer_init(&self->string_indices, NULL));
- self->intermediates_start = NULL;
- self->strings_start = NULL;
- self->read_index = 0;
+typedef struct {
+ NumberType type;
+ NumberUnion value;
+} Number;
+static CHECK_RESULT int amal_program_append_header(amal_program *self) {
/*doc(Bytecode header)
# Header layout
|Size|Name |Description |
@@ -50,9 +50,37 @@ int amal_program_init(amal_program *self) {
return 0;
}
+int amal_program_init(amal_program *self) {
+ ignore_result_int(buffer_init(&self->data, NULL));
+ self->string_indices = NULL;
+ self->intermediates_start = NULL;
+ self->strings_start = NULL;
+ self->read_index = 0;
+ self->num_strings = 0;
+ self->num_intermediates = 0;
+ am_memset(self->reg, 0, sizeof(self->reg));
+ self->stack = NULL;
+ self->stack_size = 4096;
+ am_memset(&self->asm, 0, sizeof(self->asm));
+ cleanup_if_error(am_malloc(self->stack_size, (void**)&self->stack));
+ cleanup_if_error(asm_init(&self->asm));
+ self->stack_index = 0;
+
+ cleanup_if_error(amal_program_append_header(self));
+ return 0;
+
+ cleanup:
+ amal_program_deinit(self);
+ return -1;
+}
+
void amal_program_deinit(amal_program *self) {
buffer_deinit(&self->data);
- buffer_deinit(&self->string_indices);
+ am_free(self->string_indices);
+ self->string_indices = NULL;
+ am_free(self->stack);
+ self->stack = NULL;
+ asm_deinit(&self->asm);
}
int amal_program_append_bytecode(amal_program *self, Bytecode *bytecode) {
@@ -113,6 +141,8 @@ static CHECK_RESULT int amal_program_read_intermediates(amal_program *self) {
}
self->intermediates_start = &self->data.data[self->read_index];
+ if(intermediates_size > 0)
+ self->num_intermediates = intermediates_size / (sizeof(u8) + sizeof(u64));
/*
read_end = self->read_index + intermediates_size;
while(self->read_index < read_end) {
@@ -140,10 +170,11 @@ static CHECK_RESULT int amal_program_read_strings(amal_program *self) {
am_memcpy(&num_strings, &self->data.data[self->read_index], sizeof(num_strings));
self->read_index += sizeof(num_strings);
+ self->num_strings = num_strings;
- if(buffer_append_empty(&self->string_indices, num_strings) != 0)
+ if(am_malloc(sizeof(u32) * num_strings, (void**)&self->string_indices) != 0)
return AMAL_PROGRAM_STRING_ALLOC_FAILURE;
- string_index_ptr = (u32*)self->string_indices.data;
+ string_index_ptr = self->string_indices;
if(bytes_left_to_read(self) < sizeof(strings_size))
return AMAL_PROGRAM_INVALID_STRINGS;
@@ -178,9 +209,58 @@ static CHECK_RESULT int amal_program_read_strings(amal_program *self) {
return AMAL_PROGRAM_OK;
}
+static CHECK_RESULT int amal_program_get_intermediate_by_index(amal_program *self, u16 index, Number *result) {
+ if(index >= self->num_intermediates)
+ return AMAL_PROGRAM_INSTRUCTION_INVALID_INTERMEDIATE_INDEX;
+
+ am_memcpy(&result->type, &self->intermediates_start[(sizeof(u8) + sizeof(u64)) * (usize)index], sizeof(u8));
+ am_memcpy(&result->value, &self->intermediates_start[(sizeof(u8) + sizeof(u64)) * (usize)index + sizeof(u8)], sizeof(u64));
+ return 0;
+}
+
+static CHECK_RESULT int amal_program_get_data_by_index(amal_program *self, u16 index, char **result) {
+ char *str_ptr;
+
+ if(index >= self->num_strings) {
+ amal_log_error("Data index %ld is out of range (%ld)", index, self->num_strings);
+ return AMAL_PROGRAM_INSTRUCTION_INVALID_DATA_INDEX;
+ }
+
+ str_ptr = self->strings_start + self->string_indices[index];
+ am_memcpy(result, &str_ptr, sizeof(char**));
+ return 0;
+}
+
+static CHECK_RESULT int ensure_stack_capacity_for_push(amal_program *self) {
+ if(self->stack_index >= self->stack_size) {
+ self->stack_size *= 2;
+ /* 4MB */
+ if(self->stack_size >= (1<<22))
+ return AMAL_PROGRAM_INSTRUCTION_STACK_OVERFLOW;
+ if(am_realloc(self->stack, self->stack_size, (void**)&self->stack) != 0)
+ return AMAL_PROGRAM_INSTRUCTION_STACK_OOM;
+ }
+ return 0;
+}
+
+static i64 abs_i64(i64 value) {
+ return value >= 0 ? value : -value;
+}
+
+static int assert_reg_outside_stack() {
+ assert(bool_false && "Register outside stack!");
+ return 0;
+}
+
static CHECK_RESULT int amal_program_read_instructions(amal_program *self) {
u32 instructions_size;
+ u32 read_start;
u32 read_end;
+ bool inside_func;
+ u16 func_num_registers;
+
+ func_num_registers = 0;
+ inside_func = bool_false;
if(bytes_left_to_read(self) < sizeof(instructions_size))
return AMAL_PROGRAM_INVALID_INSTRUCTIONS_SIZE;
@@ -191,68 +271,296 @@ static CHECK_RESULT int amal_program_read_instructions(amal_program *self) {
if(bytes_left_to_read(self) < instructions_size)
return AMAL_PROGRAM_INVALID_INSTRUCTIONS_SIZE;
- read_end = self->read_index + instructions_size;
+ /*
+ TODO: self->reg should be of type Number and each arithmetic operation should operate
+ on the type of the register.
+
+ TODO: Currently almost all operations are performed on memory. This should be optimized
+ to take advantage of registers.
+
+ TODO: Operations with memory registers could access outside the stack. Should this be checked?
+ */
+
+ #ifdef DEBUG
+ #define get_register_at_offset(offset) \
+ (self->data.data[self->read_index + (offset)] < func_num_registers ? self->data.data[self->read_index + (offset)] * (int)sizeof(usize) + (int)sizeof(usize) : assert_reg_outside_stack())
+ #else
+ #define get_register_at_offset(offset) (self->data.data[self->read_index + (offset)] * (int)sizeof(usize) + (int)sizeof(usize))
+ #endif
+
+ read_start = self->read_index;
+ read_end = read_start + instructions_size;
while(self->read_index < read_end) {
AmalOpcode opcode;
opcode = self->data.data[self->read_index];
self->read_index += sizeof(AmalOpcodeType);
switch(opcode) {
- case AMAL_OP_NOP:
+ case AMAL_OP_NOP: {
+ return_if_error(asm_nop(&self->asm));
break;
- case AMAL_OP_SETZ:
+ }
+ case AMAL_OP_SETZ: {
+ AsmPtr dst;
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ return_if_error(asm_mov_mi(&self->asm, &dst, 0));
+ self->reg[(u8)self->data.data[self->read_index]] = 0;
self->read_index += 1;
break;
- case AMAL_OP_MOV:
+ }
+ case AMAL_OP_MOV: {
+ AsmPtr ptr;
+ asm_ptr_init_disp(&ptr, RBP, -(i32)get_register_at_offset(1));
+ return_if_error(asm_mov_rm(&self->asm, RAX, &ptr));
+
+ asm_ptr_init_disp(&ptr, RBP, -(i32)get_register_at_offset(0));
+ return_if_error(asm_mov_mr(&self->asm, &ptr, RAX));
+
+ self->reg[(u8)self->data.data[self->read_index]] = self->reg[(u8)self->data.data[self->read_index + 1]];
self->read_index += 2;
break;
- case AMAL_OP_MOVI:
+ }
+ case AMAL_OP_MOVI: {
+ u8 dst_reg;
+ u16 intermediate_index;
+ Number number;
+
+ dst_reg = self->reg[(u8)self->data.data[self->read_index]];
+ am_memcpy(&intermediate_index, &self->data.data[self->read_index + sizeof(u8)], sizeof(intermediate_index));
+
+ return_if_error(amal_program_get_intermediate_by_index(self, intermediate_index, &number));
+ self->reg[dst_reg] = number.value.integer;
+
+ /* TODO: if @number is a float then use float instructions */
+ if(abs_i64(number.value.integer) <= INT32_MAX) {
+ AsmPtr dst;
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ return_if_error(asm_mov_mi(&self->asm, &dst, number.value.integer));
+ } else {
+ AsmPtr dst;
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ return_if_error(asm_mov_ri(&self->asm, RAX, number.value.integer));
+ return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
+ }
+
+ self->read_index += 3;
+ break;
+ }
+ case AMAL_OP_MOVD: {
+ u8 dst_reg;
+ u16 data_index;
+ char *data_ptr;
+ AsmPtr dst;
+
+ dst_reg = self->reg[(u8)self->data.data[self->read_index]];
+ am_memcpy(&data_index, &self->data.data[self->read_index + sizeof(u8)], sizeof(data_index));
+
+ return_if_error(amal_program_get_data_by_index(self, data_index, &data_ptr));
+ self->reg[dst_reg] = (uintptr_t)data_ptr;
+
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ return_if_error(asm_mov_ri(&self->asm, RAX, (uintptr_t)data_ptr));
+ return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
+
+ self->read_index += 3;
+ break;
+ }
+ case AMAL_OP_ADD: {
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ asm_ptr_init_disp(&reg1, RBP, -(i32)get_register_at_offset(1));
+ asm_ptr_init_disp(&reg2, RBP, -(i32)get_register_at_offset(2));
+
+ return_if_error(asm_mov_rm(&self->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&self->asm, RCX, &reg2));
+ return_if_error(asm_add_rr(&self->asm, RAX, RCX));
+ return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
+
self->read_index += 3;
break;
- case AMAL_OP_MOVD:
+ }
+ case AMAL_OP_SUB: {
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ asm_ptr_init_disp(&reg1, RBP, -(i32)get_register_at_offset(1));
+ asm_ptr_init_disp(&reg2, RBP, -(i32)get_register_at_offset(2));
+
+ return_if_error(asm_mov_rm(&self->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&self->asm, RCX, &reg2));
+ return_if_error(asm_sub_rr(&self->asm, RAX, RCX));
+ return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
+
self->read_index += 3;
break;
- case AMAL_OP_ADD:
+ }
+ case AMAL_OP_IMUL: {
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ asm_ptr_init_disp(&reg1, RBP, -(i32)get_register_at_offset(1));
+ asm_ptr_init_disp(&reg2, RBP, -(i32)get_register_at_offset(2));
+
+ return_if_error(asm_mov_rm(&self->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&self->asm, RCX, &reg2));
+ return_if_error(asm_imul_rr(&self->asm, RAX, RCX));
+ return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
+
self->read_index += 3;
break;
- case AMAL_OP_SUB:
+ }
+ case AMAL_OP_MUL: {
+ #if 0
+ AsmPtr dst;
+ AsmPtr reg1;
+ AsmPtr reg2;
+
+ asm_ptr_init_disp(&dst, RBP, -(i32)get_register_at_offset(0));
+ asm_ptr_init_disp(&reg1, RBP, -(i32)get_register_at_offset(1));
+ asm_ptr_init_disp(&reg2, RBP, -(i32)get_register_at_offset(2));
+
+ return_if_error(asm_mov_rm(&self->asm, RAX, &reg1));
+ return_if_error(asm_mov_rm(&self->asm, RCX, &reg2));
+ return_if_error(asm_mul_rr(&self->asm, RAX, RCX));
+ return_if_error(asm_mov_mr(&self->asm, &dst, RAX));
+ #endif
+
self->read_index += 3;
break;
- case AMAL_OP_MUL:
+ }
+ case AMAL_OP_IDIV: {
self->read_index += 3;
break;
- case AMAL_OP_DIV:
+ }
+ case AMAL_OP_DIV: {
self->read_index += 3;
break;
- case AMAL_OP_PUSH:
+ }
+ case AMAL_OP_PUSH: {
+ #if 0
+ AsmPtr reg;
+ asm_ptr_init_disp(&reg, RBP, -(i32)get_register_at_offset(0));
+
+ return_if_error(ensure_stack_capacity_for_push(self));
+ self->stack[self->stack_index] = self->reg[(u8)self->data.data[self->read_index]];
+ ++self->stack_index;
+ return_if_error(asm_pushm(&self->asm, &reg));
+ #endif
self->read_index += 1;
break;
- case AMAL_OP_PUSHI:
+ }
+ case AMAL_OP_PUSHI: {
+ u16 intermediate_index;
+ Number number;
+
+ am_memcpy(&intermediate_index, &self->data.data[self->read_index], sizeof(intermediate_index));
+ return_if_error(amal_program_get_intermediate_by_index(self, intermediate_index, &number));
+ return_if_error(ensure_stack_capacity_for_push(self));
+
+ self->stack[self->stack_index] = number.value.integer;
+ ++self->stack_index;
self->read_index += 2;
break;
- case AMAL_OP_PUSHD:
+ }
+ case AMAL_OP_PUSHD: {
+ u16 data_index;
+ char *data_ptr;
+
+ am_memcpy(&data_index, &self->data.data[self->read_index], sizeof(data_index));
+ return_if_error(amal_program_get_data_by_index(self, data_index, &data_ptr));
+ return_if_error(ensure_stack_capacity_for_push(self));
+
+ self->stack[self->stack_index] = (uintptr_t)data_ptr;
+ ++self->stack_index;
self->read_index += 2;
break;
+ }
case AMAL_OP_CALL:
- self->read_index += 2;
+ /*assert(bool_false && "TODO: Implement CALL");*/
+ self->read_index += 3;
break;
case AMAL_OP_CALLR:
- self->read_index += 1;
+ /*assert(bool_false && "TODO: Implement CALLR");*/
+ self->read_index += 2;
break;
- case AMAL_OP_CMP:
+ case AMAL_OP_CMP: {
+ self->reg[(u8)self->data.data[self->read_index]] =
+ self->reg[(u8)self->data.data[self->read_index + 1]] == self->reg[(u8)self->data.data[self->read_index + 2]];
self->read_index += 3;
break;
- case AMAL_OP_JZ:
+ }
+ case AMAL_OP_JZ: {
+ #if 0
+ u8 reg;
+ i16 jump_offset;
+ u32 jump_target;
+
+ reg = (u8)self->data.data[self->read_index];
+ am_memcpy(&jump_offset, &self->data.data[self->read_index + 1], sizeof(jump_offset));
+ jump_target = (isize)self->read_index + jump_offset;
+ if(jump_target < read_start || jump_target >= read_end)
+ return AMAL_PROGRAM_INSTRUCTION_ILLEGAL_JUMP_TARGET;
+ #endif
self->read_index += 3;
break;
- case AMAL_OP_JMP:
+ }
+ case AMAL_OP_JMP: {
+ #if 0
+ i16 jump_offset;
+ u32 jump_target;
+
+ am_memcpy(&jump_offset, &self->data.data[self->read_index], sizeof(jump_offset));
+ jump_target = (isize)self->read_index + jump_offset;
+ if(jump_target < read_start || jump_target >= read_end)
+ return AMAL_PROGRAM_INSTRUCTION_ILLEGAL_JUMP_TARGET;
+ #endif
self->read_index += 2;
break;
+ }
case AMAL_OP_RET:
+ /* return_if_error(asm_ret(&self->asm, 0)); */
+ assert(bool_false && "TODO: Implement RET. RET needs to restore the stack before returning");
break;
- case AMAL_OP_FUNC_START:
+ case AMAL_OP_FUNC_START: {
+ assert(!inside_func);
+ inside_func = bool_true;
+ am_memcpy(&func_num_registers, &self->data.data[self->read_index], sizeof(func_num_registers));
+ /*
+ TODO: Validate stack size, or maybe remove all validation? do we really need validation?
+ If we need security, we could fork the process instead.
+ */
+
+ /*
+ Some registers need to be preserved before entering a function scope and these registers are different on different platforms.
+ 32-bit: EBX, ESI, EDI, EBP
+ 64-bit Windows: RBX, RSI, RDI, RBP, R12-R15, XMM6-XMM15
+ 64-bit Linux,BSD,Mac: RBX, RBP, R12-R15
+ */
+ return_if_error(asm_pushr(&self->asm, RBX));
+ return_if_error(asm_pushr(&self->asm, RBP));
+ return_if_error(asm_mov_rr(&self->asm, RBP, RSP));
+ return_if_error(asm_sub_rm64_imm(&self->asm, RSP, func_num_registers * sizeof(usize)));
+ self->read_index += 2;
break;
- case AMAL_OP_FUNC_END:
+ }
+ case AMAL_OP_FUNC_END: {
+ assert(inside_func);
+ inside_func = bool_false;
+ /*assert(bool_false && "TODO: Implement FUNC_END");*/
+ /* TODO: Validate FUNC_END is called for every FUNC_START, otherwise stack will be corrupted */
+ /* TODO: Use mov_rr(RSP, RBP) instead? why doesn't gcc do this? */
+ return_if_error(asm_mov_rr(&self->asm, RSP, RBP));
+ return_if_error(asm_popr(&self->asm, RBP));
+ return_if_error(asm_popr(&self->asm, RBX));
+ return_if_error(asm_ret(&self->asm, 0));
break;
+ }
}
}
@@ -266,7 +574,7 @@ int amal_program_run(amal_program *self) {
return_if_error(amal_program_read_strings(self));
return_if_error(amal_program_read_instructions(self));
}
- return AMAL_PROGRAM_OK;
+ return asm_execute(&self->asm);
}
int amal_program_save(amal_program *self, const char *filepath) {
diff --git a/src/ssa/ssa.c b/src/ssa/ssa.c
index 6e61bf1..b95cd10 100644
--- a/src/ssa/ssa.c
+++ b/src/ssa/ssa.c
@@ -50,7 +50,7 @@ SsaNumber create_ssa_float(f64 value) {
return result;
}
-int ssa_init(Ssa *self, ScopedAllocator *allocator) {
+int ssa_init(Ssa *self, ArenaAllocator *allocator) {
return_if_error(buffer_init(&self->instructions, allocator));
return_if_error(hash_map_init(&self->intermediates_map, allocator, sizeof(SsaIntermediateIndex), compare_number, hash_number));
return_if_error(buffer_init(&self->intermediates, allocator));
@@ -207,7 +207,7 @@ static CHECK_RESULT int ssa_ins_binop(Ssa *self, SsaInstruction binop_type, SsaR
return ssa_add_ins_form2(self, binop_type, lhs, rhs, result);
}
-static CHECK_RESULT int ssa_ins_func_start(Ssa *self, u8 num_args, SsaFuncIndex *result) {
+static CHECK_RESULT int ssa_ins_func_start(Ssa *self, SsaFuncIndex *result, usize *func_metadata_index) {
usize index;
index = self->instructions.size;
@@ -215,12 +215,13 @@ static CHECK_RESULT int ssa_ins_func_start(Ssa *self, u8 num_args, SsaFuncIndex
if(self->func_counter + 1 < self->func_counter)
return -1;
- return_if_error(buffer_append_empty(&self->instructions, sizeof(u8) + sizeof(SsaFuncIndex) + sizeof(u8)));
+ return_if_error(buffer_append_empty(&self->instructions, sizeof(u8) + sizeof(SsaFuncIndex) + sizeof(u16)));
*result = self->func_counter++;
self->instructions.data[index + 0] = SSA_FUNC_START;
am_memcpy(self->instructions.data + index + 1, result, sizeof(SsaFuncIndex));
- self->instructions.data[index + 1 + sizeof(SsaFuncIndex)] = num_args;
- amal_log_debug("FUNC_START f%u(%u)", *result, num_args);
+ *func_metadata_index = index + 1 + sizeof(SsaFuncIndex);
+ /* No need to add data to instructions.data here, it can contain undefined data until we set it (@ the caller) */
+ amal_log_debug("FUNC_START f%u", *result);
return 0;
}
@@ -363,9 +364,9 @@ static CHECK_RESULT SsaRegister lhsexpr_generate_ssa(Ast *self, SsaCompilerConte
} else {
/* TODO: Do not assign if we dont want default value */
SsaNumber number;
- if(self->resolve_data.type == context->compiler->default_types.i64)
+ if(self->resolve_data.type == (LhsExpr*)context->compiler->default_types.i64)
number = create_ssa_integer(0);
- else if(self->resolve_data.type == context->compiler->default_types.f64)
+ else if(self->resolve_data.type == (LhsExpr*)context->compiler->default_types.f64)
number = create_ssa_float(0.0);
else
assert(bool_false && "TODO: assign default value to reg depending on LhsExpr type");
@@ -399,23 +400,24 @@ static CHECK_RESULT SsaRegister funcdecl_generate_ssa(FunctionDecl *self, SsaCom
that is reset after function end
*/
SsaRegister prev_reg_counter;
+ usize func_metadata_index;
prev_reg_counter = context->ssa->reg_counter;
context->ssa->reg_counter = 0;
amal_log_debug("SSA funcdecl %p", self);
- throw_if_error(ssa_ins_func_start(context->ssa, 0, &self->ssa_func_index));
+ throw_if_error(ssa_ins_func_start(context->ssa, &self->ssa_func_index, &func_metadata_index));
scope_generate_ssa(&self->body, context);
throw_if_error(ssa_ins_func_end(context->ssa));
+ /* Add the number of registers used to the function metadata (FUNC_START) */
+ am_memcpy(&context->ssa->instructions.data[func_metadata_index], &context->ssa->reg_counter, sizeof(u16));
context->ssa->reg_counter = prev_reg_counter;
- /*assert(bool_false);*/
return 0;
}
static CHECK_RESULT SsaRegister funccall_generate_ssa(Ast *self, SsaCompilerContext *context) {
/* TODO: Implement */
FunctionCall *func_call;
- FunctionDecl *func_to_call;
Ast **ast;
Ast **ast_end;
SsaRegister reg;
@@ -430,14 +432,19 @@ static CHECK_RESULT SsaRegister funccall_generate_ssa(Ast *self, SsaCompilerCont
throw_if_error(ssa_ins_push(context->ssa, arg_reg));
}
- assert(self->resolve_data.type->rhs_expr->type == AST_FUNCTION_DECL);
- func_to_call = self->resolve_data.type->rhs_expr->value.func_decl;
- /*
- TODO: Implement func reference instead of using 0. Perhaps the best way is to use function declaration pointer value?
- then there is no need for mutex locks.
- */
- amal_log_debug("SSA funccall %.*s, func index ptr: %p", func_call->func.name.size, func_call->func.name.data, func_to_call);
- throw_if_error(ssa_ins_call(context->ssa, func_to_call, &reg));
+ assert((self->resolve_data.type->rhs_expr && self->resolve_data.type->rhs_expr->type == AST_FUNCTION_DECL) ||
+ self->resolve_data.type->type.type == VARIABLE_TYPE_SIGNATURE);
+ if(self->resolve_data.type->is_extern) {
+ amal_log_error("TODO: Implement extern function call (extern function %.*s was called)", func_call->func.name.size, func_call->func.name.data);
+ reg = 0;
+ assert(bool_false && "TODO: Implement extern function call!");
+ } else {
+ FunctionDecl *func_to_call;
+ func_to_call = self->resolve_data.type->rhs_expr->value.func_decl;
+ amal_log_debug("SSA funccall %.*s, func index ptr: %p", func_call->func.name.size, func_call->func.name.data, func_to_call);
+ throw_if_error(ssa_ins_call(context->ssa, func_to_call, &reg));
+ }
+
return reg;
}
@@ -470,16 +477,16 @@ static CHECK_RESULT SsaRegister variable_generate_ssa(Variable *self, SsaCompile
return ast_generate_ssa(self->resolved_var, context);
}
-static SsaInstruction binop_type_to_ssa_type(BinopType binop_type) {
+static SsaInstruction binop_type_to_ssa_type(BinopType binop_type, amal_default_type *type) {
switch(binop_type) {
case BINOP_ADD:
return SSA_ADD;
case BINOP_SUB:
return SSA_SUB;
case BINOP_MUL:
- return SSA_MUL;
+ return type->is_signed ? SSA_IMUL : SSA_MUL;
case BINOP_DIV:
- return SSA_DIV;
+ return type->is_signed ? SSA_IDIV : SSA_DIV;
case BINOP_DOT:
assert(bool_false && "Binop dot not valid for arithmetic operation and requires special functionality");
return 0;
@@ -494,18 +501,23 @@ static CHECK_RESULT SsaRegister binop_generate_ssa(Binop *self, SsaCompilerConte
SsaRegister rhs_reg;
SsaRegister reg;
+ /*
+ const std = @import("std.amal");
+ std.printf
+ */
if(self->type == BINOP_DOT && self->rhs->resolve_data.type->rhs_expr->type == AST_FUNCTION_DECL) {
reg = ast_generate_ssa(self->rhs, context);
} else {
lhs_reg = ast_generate_ssa(self->lhs, context);
rhs_reg = ast_generate_ssa(self->rhs, context);
- throw_if_error(ssa_ins_binop(context->ssa, binop_type_to_ssa_type(self->type), lhs_reg, rhs_reg, &reg));
+ throw_if_error(ssa_ins_binop(context->ssa, binop_type_to_ssa_type(self->type, (amal_default_type*)self->lhs->resolve_data.type), lhs_reg, rhs_reg, &reg));
}
return reg;
}
static void else_if_statement_generate_ssa(ElseIfStatement *else_if_stmt, SsaCompilerContext *context) {
usize jump_ins_index;
+ jump_ins_index = 0;
if(else_if_stmt->condition) {
SsaRegister condition_reg;
condition_reg = ast_generate_ssa(else_if_stmt->condition, context);
diff --git a/src/std/scoped_allocator.c b/src/std/arena_allocator.c
index d8acbf6..20f0394 100644
--- a/src/std/scoped_allocator.c
+++ b/src/std/arena_allocator.c
@@ -1,4 +1,4 @@
-#include "../../include/std/scoped_allocator.h"
+#include "../../include/std/arena_allocator.h"
#include "../../include/std/alloc.h"
#include "../../include/std/thread.h"
#include "../../include/std/log.h"
@@ -6,31 +6,31 @@
#define ALLOC_NODE_SIZE 4096
-int scoped_allocator_node_init(ScopedAllocatorNode *self) {
+int arena_allocator_node_init(ArenaAllocatorNode *self) {
self->data = NULL;
self->size = 0;
self->next = NULL;
return am_malloc(ALLOC_NODE_SIZE, (void**)&self->data);
}
-void scoped_allocator_node_deinit(ScopedAllocatorNode *self) {
+void arena_allocator_node_deinit(ArenaAllocatorNode *self) {
am_free(self->data);
self->data = NULL;
self->size = 0;
if(self->next) {
- scoped_allocator_node_deinit(self->next);
+ arena_allocator_node_deinit(self->next);
am_free(self->next);
self->next = NULL;
}
}
-int scoped_allocator_init(ScopedAllocator *self) {
- return_if_error(scoped_allocator_node_init(&self->head));
+int arena_allocator_init(ArenaAllocator *self) {
+ return_if_error(arena_allocator_node_init(&self->head));
self->current = &self->head;
return buffer_init(&self->mems, NULL);
}
-static void scoped_allocator_deinit_buffers(ScopedAllocator *self) {
+static void arena_allocator_deinit_buffers(ArenaAllocator *self) {
void **mem;
void **mems_end;
mem = buffer_begin(&self->mems);
@@ -42,19 +42,19 @@ static void scoped_allocator_deinit_buffers(ScopedAllocator *self) {
buffer_deinit(&self->mems);
}
-void scoped_allocator_deinit(ScopedAllocator *self) {
+void arena_allocator_deinit(ArenaAllocator *self) {
self->current = NULL;
- scoped_allocator_deinit_buffers(self);
- scoped_allocator_node_deinit(&self->head);
+ arena_allocator_deinit_buffers(self);
+ arena_allocator_node_deinit(&self->head);
}
-static CHECK_RESULT int scoped_allocator_ensure_capacity_for(ScopedAllocator *self, usize size) {
+static CHECK_RESULT int arena_allocator_ensure_capacity_for(ArenaAllocator *self, usize size) {
void *new_node;
new_node = NULL;
if(self->current->size + size > ALLOC_NODE_SIZE) {
- return_if_error(am_malloc(sizeof(ScopedAllocatorNode), &new_node));
- cleanup_if_error(scoped_allocator_node_init(new_node));
+ return_if_error(am_malloc(sizeof(ArenaAllocatorNode), &new_node));
+ cleanup_if_error(arena_allocator_node_init(new_node));
self->current->next = new_node;
self->current = new_node;
}
@@ -78,19 +78,19 @@ static usize align_ptr_ceil_offset(void *ptr, uintptr_t alignment) {
#define SCOPED_ALLOC_ALIGNMENT 8
-int scoped_allocator_alloc(ScopedAllocator *self, usize size, void **mem) {
- ScopedAllocatorNode *current;
+int arena_allocator_alloc(ArenaAllocator *self, usize size, void **mem) {
+ ArenaAllocatorNode *current;
usize alloc_size;
assert(self->current);
current = self->current;
if(size >= ALLOC_NODE_SIZE) {
- amal_log_error("scoped_allocator_alloc: tried to alloc memory of size %lu. Max allowed alloc size is %lu", size, ALLOC_NODE_SIZE);
+ amal_log_error("arena_allocator_alloc: tried to alloc memory of size %lu. Max allowed alloc size is %lu", size, ALLOC_NODE_SIZE);
return -1;
}
alloc_size = size + align_ptr_ceil_offset(self->current->data + self->current->size, SCOPED_ALLOC_ALIGNMENT);
- return_if_error(scoped_allocator_ensure_capacity_for(self, alloc_size));
+ return_if_error(arena_allocator_ensure_capacity_for(self, alloc_size));
/* Reallocated (new node created) */
if(self->current != current) {
*mem = self->current->data;
@@ -102,7 +102,7 @@ int scoped_allocator_alloc(ScopedAllocator *self, usize size, void **mem) {
return 0;
}
-int scoped_allocator_add_mem(ScopedAllocator *self, usize *result_index) {
+int arena_allocator_add_mem(ArenaAllocator *self, usize *result_index) {
void *null_data;
null_data = NULL;
*result_index = buffer_get_size(&self->mems, sizeof(void*));
diff --git a/src/std/buffer.c b/src/std/buffer.c
index f4e93e5..0e4ca89 100644
--- a/src/std/buffer.c
+++ b/src/std/buffer.c
@@ -1,16 +1,16 @@
#include "../../include/std/buffer.h"
#include "../../include/std/alloc.h"
#include "../../include/std/mem.h"
-#include "../../include/std/scoped_allocator.h"
+#include "../../include/std/arena_allocator.h"
#include <assert.h>
-int buffer_init(Buffer *self, struct ScopedAllocator *allocator) {
+int buffer_init(Buffer *self, struct ArenaAllocator *allocator) {
self->data = NULL;
self->size = 0;
self->capacity = 0;
self->allocator = allocator;
if(allocator) {
- return scoped_allocator_add_mem(allocator, &self->allocator_index);
+ return arena_allocator_add_mem(allocator, &self->allocator_index);
} else {
self->allocator_index = ~(usize)0;
return 0;
diff --git a/src/std/hash_map.c b/src/std/hash_map.c
index 1ad0dea..bcb43eb 100644
--- a/src/std/hash_map.c
+++ b/src/std/hash_map.c
@@ -1,5 +1,5 @@
#include "../../include/std/hash_map.h"
-#include "../../include/std/scoped_allocator.h"
+#include "../../include/std/arena_allocator.h"
#include "../../include/std/mem.h"
#include <assert.h>
@@ -73,7 +73,7 @@ static void* bucket_node_get_value(HashMapBucketNode *self) {
return value;
}
-int hash_map_init(HashMap *self, ScopedAllocator *allocator, usize value_type_size,
+int hash_map_init(HashMap *self, ArenaAllocator *allocator, usize value_type_size,
HashMapCompare compare_func, HashMapHash hash_func) {
assert(compare_func);
assert(hash_func);
@@ -91,7 +91,7 @@ int hash_map_init(HashMap *self, ScopedAllocator *allocator, usize value_type_si
static CHECK_RESULT int hash_map_bucket_add(HashMap *self, HashMapBucket *bucket, BufferView key, void *value, usize hash) {
HashMapBucketNode *new_bucket_node;
- return_if_error(scoped_allocator_alloc(self->allocator,
+ return_if_error(arena_allocator_alloc(self->allocator,
sizeof(HashMapBucketNode*) + sizeof(hash) + sizeof(u32) + key.size + self->value_type_size,
(void**)&new_bucket_node));
bucket_node_set_next(new_bucket_node, bucket->start);
diff --git a/src/std/mem.c b/src/std/mem.c
index f406176..95edcb9 100644
--- a/src/std/mem.c
+++ b/src/std/mem.c
@@ -1,5 +1,6 @@
#include "../../include/std/mem.h"
#include <string.h>
+#include <unistd.h>
void am_memcpy(void *dest, const void *src, usize size) {
memcpy(dest, src, size);
@@ -16,3 +17,8 @@ bool am_memeql(const void *lhs, const void *rhs, usize size) {
void am_memset(void *dest, int value, usize size) {
memset(dest, value, size);
}
+
+long am_pagesize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
diff --git a/src/tokenizer.c b/src/tokenizer.c
index bce386d..7620fc0 100644
--- a/src/tokenizer.c
+++ b/src/tokenizer.c
@@ -2,7 +2,7 @@
#include "../include/std/mem.h"
#include "../include/std/log.h"
#include "../include/std/thread.h"
-#include "../include/std/scoped_allocator.h"
+#include "../include/std/arena_allocator.h"
#include <assert.h>
#include <limits.h>
#include <stdio.h>
@@ -25,7 +25,7 @@ static int tokenizer_get_end_of_line_from_index(Tokenizer *self, int index);
/* Returns -1 if end of multiline comment was not found */
static int tokenizer_get_end_of_multiline_comment(Tokenizer *self, int index);
-int tokenizer_init(Tokenizer *self, ScopedAllocator *allocator, BufferView code, BufferView code_name, const amal_compiler_options *compiler_options) {
+int tokenizer_init(Tokenizer *self, ArenaAllocator *allocator, BufferView code, BufferView code_name, const amal_compiler_options *compiler_options) {
assert(code.size <= INT_MAX);
assert(compiler_options);
/* Skip UTF-8 BOM */
@@ -415,6 +415,7 @@ static const char* binop_to_string(BinopType binop_type) {
static BufferView tokenizer_expected_token_as_string(Token token) {
const char *str;
+ str = "";
switch(token) {
case TOK_NONE:
str = "none";
@@ -737,7 +738,7 @@ TokenizerError tokenizer_create_error(Tokenizer *self, int index, const char *fm
result.index = index;
result.str = NULL;
- ignore_result_int(scoped_allocator_alloc(self->allocator, bytes_copied + 1, (void**)&result.str));
+ ignore_result_int(arena_allocator_alloc(self->allocator, bytes_copied + 1, (void**)&result.str));
if(result.str && bytes_copied > 0)
am_memcpy(result.str, buffer, bytes_copied + 1);
return result;