#include "../include/compiler.h" #include "../include/parser.h" #include "../include/ssa/ssa.h" #include "../include/bytecode/bytecode.h" #include "../include/std/log.h" #include "../include/std/mem.h" #include "../include/std/hash.h" #include "../include/std/file.h" #include "../include/std/alloc.h" #include #include #include #include static CHECK_RESULT int get_thread_count_env_var(int *thread_count) { char *threads; threads = getenv("THREADS"); if(!threads) return -1; *thread_count = atoi(threads); return 0; } static usize strnlen(const char *str, usize max_length) { usize len; len = 0; while(len < max_length && *str != '\0') { ++len; ++str; } return len; } /* TODO: Allow to specify size and members? */ static CHECK_RESULT int create_default_type(amal_compiler *compiler, const char *name, LhsExpr **lhs_expr) { StructDecl *struct_decl; Ast *expr; return_if_error(scoped_allocator_alloc(&compiler->allocator, sizeof(StructDecl), (void**)&struct_decl)); return_if_error(structdecl_init(struct_decl, &compiler->root_scope, &compiler->allocator)); return_if_error(scoped_allocator_alloc(&compiler->allocator, sizeof(LhsExpr), (void**)lhs_expr)); return_if_error(lhsexpr_init(*lhs_expr, bool_true, bool_true, create_buffer_view(name, strnlen(name, PATH_MAX)), &compiler->allocator)); return_if_error(ast_create(&compiler->allocator, struct_decl, AST_STRUCT_DECL, &(*lhs_expr)->rhs_expr)); return_if_error(ast_create(&compiler->allocator, *lhs_expr, AST_LHS, &expr)); expr->resolve_data.type = *lhs_expr; expr->resolve_data.status = AST_RESOLVED; return scope_add_child(&compiler->root_scope, expr); } static CHECK_RESULT int init_default_types(amal_compiler *compiler) { return_if_error(create_default_type(compiler, "i8", &compiler->default_types.i8)); return_if_error(create_default_type(compiler, "i16", &compiler->default_types.i16)); return_if_error(create_default_type(compiler, "i32", &compiler->default_types.i32)); return_if_error(create_default_type(compiler, "i64", &compiler->default_types.i64)); return_if_error(create_default_type(compiler, "u8", &compiler->default_types.u8)); return_if_error(create_default_type(compiler, "u16", &compiler->default_types.u16)); return_if_error(create_default_type(compiler, "u32", &compiler->default_types.u32)); return_if_error(create_default_type(compiler, "u64", &compiler->default_types.u64)); return_if_error(create_default_type(compiler, "isize", &compiler->default_types.isize)); return_if_error(create_default_type(compiler, "usize", &compiler->default_types.usize)); return_if_error(create_default_type(compiler, "f32", &compiler->default_types.f32)); return_if_error(create_default_type(compiler, "f64", &compiler->default_types.f64)); return_if_error(create_default_type(compiler, "str", &compiler->default_types.str)); return 0; } void amal_compiler_options_init(amal_compiler_options *self) { self->error_callback = NULL; self->error_callback_userdata = NULL; } int amal_compiler_init(amal_compiler *self, const amal_compiler_options *options) { int i; int result; result = get_thread_count_env_var(&self->usable_thread_count); if(result != 0) { self->usable_thread_count = amal_get_usable_thread_count(); if(self->usable_thread_count == 0) { amal_log_warning("Unable to get the number of threads available on the system, using 1 thread."); amal_log_warning("You can override the number of threads using by setting the environment variable THREADS"); self->usable_thread_count = 1; } } else if(self->usable_thread_count <= 0) { amal_log_error("Environment variable THREADS contains invalid number for threads. THREADS has to be at least 1."); return AMAL_COMPILER_ERR; } am_memset(&self->allocator, 0, sizeof(self->allocator)); am_memset(&self->root_scope, 0, sizeof(self->root_scope)); if(options) am_memcpy(&self->options, options, sizeof(self->options)); else am_memset(&self->options, 0, sizeof(self->options)); self->started = bool_false; self->used = bool_false; self->generic_work_object_index = 0; amal_mutex_init(&self->mutex); return_if_error(scoped_allocator_init(&self->allocator)); cleanup_if_error(scope_init(&self->root_scope, NULL, &self->allocator)); cleanup_if_error(buffer_init(&self->parsers, &self->allocator)); cleanup_if_error(buffer_init(&self->queued_files, &self->allocator)); cleanup_if_error(hash_map_init(&self->file_scopes, &self->allocator, sizeof(FileScopeReference*), hash_compare_string, amal_hash_string)); cleanup_if_error(scoped_allocator_alloc(&self->allocator, self->usable_thread_count * sizeof(ParserThreadData), (void**)&self->threads)); for(i = 0; i < self->usable_thread_count; ++i) cleanup_if_error(parser_thread_data_init(&self->threads[i])); cleanup_if_error(init_default_types(self)); return AMAL_COMPILER_OK; cleanup: ignore_result_int(amal_compiler_deinit(self)); return AMAL_COMPILER_ERR; } int amal_compiler_deinit(amal_compiler *self) { int i; int result; result = AMAL_COMPILER_OK; for(i = 0; i < self->usable_thread_count; ++i) { int r; r = parser_thread_data_deinit(&self->threads[i]); if(r != 0) result = r; } amal_mutex_deinit(&self->mutex); scoped_allocator_deinit(&self->allocator); return result; } typedef enum { THREAD_WORK_PARSE, THREAD_WORK_RESOLVE_AST, THREAD_WORK_GENERATE_SSA, THREAD_WORK_GENERATE_BYTECODE } ThreadWorkType; typedef struct { amal_compiler *compiler; ParserThreadData *parser_thread_data; FileScopeReference *file_scope; } CompilerParserThreadUserData; typedef struct { amal_compiler *compiler; ParserThreadData *parser_thread_data; Parser *parser; ThreadWorkType work_type; } CompilerGenericThreadUserData; typedef struct { union { FileScopeReference *file_scope; Parser *parser; } value; ThreadWorkType type; } ThreadWorkData; static CHECK_RESULT int amal_compiler_load_in_this_thread(amal_compiler *compiler, FileScopeReference *file_scope, ScopedAllocator *allocator) { Parser *parser; int result; BufferView filepath; result = AMAL_COMPILER_ERR; filepath = create_buffer_view(file_scope->canonical_path.data, file_scope->canonical_path.size); amal_log_info("Started parsing %.*s", filepath.size, filepath.data); return_if_error(scoped_allocator_alloc(allocator, sizeof(Parser), (void**)&parser)); return_if_error(parser_init(parser, compiler, allocator)); file_scope->parser = parser; return_if_error(parser_parse_file(parser, filepath)); cleanup_if_error(amal_mutex_lock(&compiler->mutex, "amal_compiler_load_in_this_thread")); cleanup_if_error(buffer_append(&compiler->parsers, &parser, sizeof(parser))); amal_log_info("Finished parsing %.*s", filepath.size, filepath.data); result = AMAL_COMPILER_OK; cleanup: amal_mutex_tryunlock(&compiler->mutex); return result; } /* TODO: Handle errors (stop parsing in all other threads and report errors/warnings) */ static void* thread_callback_parse_file(void *userdata) { FileScopeReference *file_scope; CompilerParserThreadUserData compiler_parser_userdata; void *result; assert(!amal_thread_is_main()); am_memcpy(&compiler_parser_userdata, userdata, sizeof(compiler_parser_userdata)); am_free(userdata); file_scope = compiler_parser_userdata.file_scope; result = (void*)AMAL_COMPILER_ERR; for(;;) { int has_next; cleanup_if_error(amal_compiler_load_in_this_thread(compiler_parser_userdata.compiler, file_scope, &compiler_parser_userdata.parser_thread_data->allocator)); cleanup_if_error(amal_mutex_lock(&compiler_parser_userdata.compiler->mutex, "thread_callback_parse_file")); has_next = buffer_pop(&compiler_parser_userdata.compiler->queued_files, &file_scope, sizeof(FileScopeReference*)); amal_mutex_tryunlock(&compiler_parser_userdata.compiler->mutex); if(has_next != 0) break; } result = NULL; cleanup: /* To stop all other parsers from working cleanly, we simply clear the file queue, and the other threads will stop when they are done with the file they are currently parsing. */ if(result != NULL) { ignore_result_int(amal_mutex_lock(&compiler_parser_userdata.compiler->mutex, "thread_callback_parse_file")); buffer_clear(&compiler_parser_userdata.compiler->queued_files); } compiler_parser_userdata.parser_thread_data->status = PARSER_THREAD_STATUS_IDLE; amal_mutex_tryunlock(&compiler_parser_userdata.compiler->mutex); return result; } static CHECK_RESULT int thread_resolve_ast(amal_compiler *compiler, Parser *parser) { AstCompilerContext compiler_context; int result; compiler_context.compiler = compiler; compiler_context.scope = NULL; result = setjmp(compiler_context.env); if(result == 0) { amal_log_debug("Resolving AST for file: %.*s", parser->tokenizer.code_name.size, parser->tokenizer.code_name.data); scope_resolve(&parser->struct_decl.body, &compiler_context); } return result; } static CHECK_RESULT int thread_generate_ssa(Parser *parser) { SsaCompilerContext compiler_context; int result; return_if_error(scoped_allocator_alloc(parser->allocator, sizeof(Ssa), (void**)&compiler_context.ssa)); return_if_error(ssa_init(compiler_context.ssa, parser->allocator)); parser->ssa = compiler_context.ssa; amal_log_debug("Generating SSA for file: %.*s", parser->tokenizer.code_name.size, parser->tokenizer.code_name.data); result = setjmp(compiler_context.env); if(result == 0) scope_generate_ssa(&parser->struct_decl.body, &compiler_context); return result; } static CHECK_RESULT int thread_generate_bytecode(Parser *parser) { BytecodeCompilerContext compiler_context; int result; return_if_error(scoped_allocator_alloc(parser->allocator, sizeof(Bytecode), (void**)&compiler_context.bytecode)); return_if_error(bytecode_init(compiler_context.bytecode, parser->allocator)); compiler_context.parser = parser; amal_log_debug("Generating bytecode for file: %.*s", parser->tokenizer.code_name.size, parser->tokenizer.code_name.data); result = setjmp(compiler_context.env); if(result == 0) generate_bytecode_from_ssa(&compiler_context); return result; } /* TODO: Handle errors (stop work in all other threads and report errors/warnings) */ static void* thread_callback_generic(void *userdata) { CompilerGenericThreadUserData compiler_userdata; Parser *parser; void *result; assert(!amal_thread_is_main()); am_memcpy(&compiler_userdata, userdata, sizeof(compiler_userdata)); am_free(userdata); parser = compiler_userdata.parser; result = (void*)AMAL_COMPILER_ERR; for(;;) { /* TODO: stop work in all other threads on failure */ switch(compiler_userdata.work_type) { case THREAD_WORK_PARSE: { assert(bool_false && "Thread work type can't ge 'parse' for generic work"); break; } case THREAD_WORK_RESOLVE_AST: cleanup_if_error(thread_resolve_ast(compiler_userdata.compiler, parser)); break; case THREAD_WORK_GENERATE_SSA: cleanup_if_error(thread_generate_ssa(parser)); break; case THREAD_WORK_GENERATE_BYTECODE: cleanup_if_error(thread_generate_bytecode(parser)); break; } cleanup_if_error(amal_mutex_lock(&compiler_userdata.compiler->mutex, "thread_callback_generic")); if(compiler_userdata.compiler->generic_work_object_index + 1 >= (int)buffer_get_size(&compiler_userdata.compiler->parsers, Parser*)) break; ++compiler_userdata.compiler->generic_work_object_index; parser = *(Parser**)buffer_get(&compiler_userdata.compiler->parsers, compiler_userdata.compiler->generic_work_object_index, sizeof(parser)); amal_mutex_tryunlock(&compiler_userdata.compiler->mutex); } result = NULL; cleanup: /* To stop all other worker threads cleanly, we simply say we are done with all work in the queue, and the other threads will stop when they are done with the work they are currently working on. */ if(result != NULL) { cleanup_if_error(amal_mutex_lock(&compiler_userdata.compiler->mutex, "thread_callback_generic")); compiler_userdata.compiler->generic_work_object_index = (int)buffer_get_size(&compiler_userdata.compiler->parsers, Parser*); } compiler_userdata.parser_thread_data->status = PARSER_THREAD_STATUS_IDLE; amal_mutex_tryunlock(&compiler_userdata.compiler->mutex); return result; } static CHECK_RESULT int amal_compiler_select_thread_for_work(amal_compiler *self, ThreadWorkData work_data, ParserThreadData **thread_selected) { int i; int result; ParserThreadData *parser_thread_data; void *thread_user_data; thread_user_data = NULL; *thread_selected = NULL; result = AMAL_COMPILER_OK; cleanup_if_error(amal_mutex_lock(&self->mutex, "amal_compiler_select_thread_for_work")); for(i = 0; i < self->usable_thread_count; ++i) { parser_thread_data = &self->threads[i]; if(parser_thread_data->status == PARSER_THREAD_STATUS_RUNNING) continue; switch(work_data.type) { case THREAD_WORK_PARSE: { CompilerParserThreadUserData *userdata; cleanup_if_error(am_malloc(sizeof(CompilerParserThreadUserData), (void**)&userdata)); thread_user_data = userdata; userdata->compiler = self; userdata->parser_thread_data = parser_thread_data; userdata->file_scope = work_data.value.file_scope; result = parser_thread_data_start(parser_thread_data, thread_callback_parse_file, userdata); break; } case THREAD_WORK_RESOLVE_AST: case THREAD_WORK_GENERATE_SSA: case THREAD_WORK_GENERATE_BYTECODE: { CompilerGenericThreadUserData *userdata; cleanup_if_error(am_malloc(sizeof(CompilerGenericThreadUserData), (void**)&userdata)); thread_user_data = userdata; userdata->compiler = self; userdata->parser_thread_data = parser_thread_data; userdata->parser = work_data.value.parser; userdata->work_type = work_data.type; ++self->generic_work_object_index; result = parser_thread_data_start(parser_thread_data, thread_callback_generic, userdata); break; } } *thread_selected = parser_thread_data; break; } cleanup: if(result != 0) am_free(thread_user_data); amal_mutex_tryunlock(&self->mutex); return result; } static CHECK_RESULT bool amal_compiler_check_all_threads_done(amal_compiler *self) { int i; bool result; result = bool_false; cleanup_if_error(amal_mutex_lock(&self->mutex, "amal_compiler_check_all_threads_done")); for(i = 0; i < self->usable_thread_count; ++i) { ParserThreadData *parser_thread_data; parser_thread_data = &self->threads[i]; if(parser_thread_data->status == PARSER_THREAD_STATUS_RUNNING) { goto cleanup; } } result = bool_true; cleanup: amal_mutex_tryunlock(&self->mutex); return result; } static CHECK_RESULT int amal_compiler_load_file_join_threads(amal_compiler *self) { int i; int result; void *thread_return_data; ParserThreadData *parser_thread_data; bool work_failed; assert(amal_thread_is_main()); thread_return_data = NULL; work_failed = bool_false; for(;;) { bool done; /* Joining running threads. After checking one running thread another one might start up, so this is mostly to wait for threads to finish and to sleep without doing work. The check after that (amal_compiler_all_threads_done) check that all threads are done correctly */ for(i = 0; i < self->usable_thread_count; ++i) { result = amal_mutex_lock(&self->mutex, "amal_compiler_load_file_join_threads, waiting for workers"); parser_thread_data = &self->threads[i]; amal_mutex_tryunlock(&self->mutex); if(result != 0) goto cleanup; /* TODO: Cleanup remaining threads if join fails */ cleanup_if_error(parser_thread_data_join(parser_thread_data, &thread_return_data)); if(thread_return_data != NULL) { /* TODO: Somehow exit running jobs */ amal_log_error("Failed, waiting for jobs to finish"); work_failed = bool_true; } } done = amal_compiler_check_all_threads_done(self); if(done) break; } result = AMAL_COMPILER_OK; cleanup: if(work_failed) result = AMAL_COMPILER_ERR; return result; } static CHECK_RESULT int amal_compiler_dispatch_generic(amal_compiler *self, ThreadWorkType work_type) { Parser **parser; Parser **parser_end; parser = buffer_begin(&self->parsers); parser_end = buffer_end(&self->parsers); self->generic_work_object_index = 0; for(; parser != parser_end; ++parser) { ParserThreadData *thread_selected; ThreadWorkData thread_work_data; thread_work_data.type = work_type; thread_work_data.value.parser = *parser; return_if_error(amal_compiler_select_thread_for_work(self, thread_work_data, &thread_selected)); /* After all threads have been used, they will handle using the remaining parsers or stop if there is an error */ if(!thread_selected) break; } return amal_compiler_load_file_join_threads(self); } static CHECK_RESULT int try_create_file_scope(amal_compiler *compiler, const char *filepath, FileScopeReference **file_scope, bool *new_entry) { int ret; char *result_path; usize result_path_size; BufferView path_view; ret = -1; result_path = NULL; *new_entry = bool_false; /* TODO: Optimize. No need to allocate everytime... */ return_if_error(file_get_canonical_path(filepath, &result_path, &result_path_size)); path_view = create_buffer_view(result_path, result_path_size); cleanup_if_error(amal_mutex_lock(&compiler->mutex, "try_create_file_scope")); if(!hash_map_get(&compiler->file_scopes, path_view, file_scope)) { cleanup_if_error(scoped_allocator_alloc(&compiler->allocator, sizeof(FileScopeReference), (void**)file_scope)); /* @(*file_scope)->canonical_path won't change after this, so it's fine if allocator belongs to non-thread safe compiler instance */ cleanup_if_error(file_scope_reference_init(*file_scope, path_view, &compiler->allocator)); cleanup_if_error(hash_map_insert(&compiler->file_scopes, path_view, file_scope)); *new_entry = bool_true; } ret = 0; cleanup: amal_mutex_tryunlock(&compiler->mutex); am_free(result_path); return ret; } int amal_compiler_load_file(amal_compiler *self, const char *filepath) { FileScopeReference *file_scope; if(self->used) return AMAL_COMPILER_ERR; self->used = bool_true; return amal_compiler_internal_load_file(self, filepath, &file_scope); } int amal_compiler_internal_load_file(amal_compiler *self, const char *filepath, FileScopeReference **file_scope) { int result; BufferView filepath_view; ParserThreadData *parser_thread_data; ThreadWorkData thread_work_data; bool main_job; bool new_entry; return_if_error(try_create_file_scope(self, filepath, file_scope, &new_entry)); assert(file_scope && *file_scope && (*file_scope)->canonical_path.data); filepath_view = create_buffer_view((*file_scope)->canonical_path.data, (*file_scope)->canonical_path.size); if(!new_entry) { amal_log_info("amal_compiler_load_file: file already parsed: %.*s", filepath_view.size, filepath_view.data); return 0; } result = AMAL_COMPILER_ERR; thread_work_data.type = THREAD_WORK_PARSE; thread_work_data.value.file_scope = *file_scope; main_job = bool_false; /* The first time we get here, this will run single-threaded so this part doesn't need mutex */ if(!self->started) { self->started = bool_true; main_job = bool_true; } return_if_error(amal_compiler_select_thread_for_work(self, thread_work_data, &parser_thread_data)); if(main_job) { return_if_error(amal_compiler_load_file_join_threads(self)); assert(amal_compiler_check_all_threads_done(self)); amal_log_info("Finished parsing all files, resolving AST"); return_if_error(amal_compiler_dispatch_generic(self, THREAD_WORK_RESOLVE_AST)); assert(amal_compiler_check_all_threads_done(self)); amal_log_info("Finished resolving AST, generating SSA"); return_if_error(amal_compiler_dispatch_generic(self, THREAD_WORK_GENERATE_SSA)); assert(amal_compiler_check_all_threads_done(self)); amal_log_info("Finished generating SSA"); return_if_error(amal_compiler_dispatch_generic(self, THREAD_WORK_GENERATE_BYTECODE)); assert(amal_compiler_check_all_threads_done(self)); amal_log_info("Finished generating bytecode"); return AMAL_COMPILER_OK; } if(parser_thread_data) return AMAL_COMPILER_OK; cleanup_if_error(amal_mutex_lock(&self->mutex, "amal_compiler_load_file")); cleanup_if_error(buffer_append(&self->queued_files, file_scope, sizeof(FileScopeReference*))); result = AMAL_COMPILER_OK; cleanup: amal_mutex_tryunlock(&self->mutex); return result; }