aboutsummaryrefslogtreecommitdiff
path: root/src/std
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2019-03-07 22:19:57 +0100
committerdec05eba <dec05eba@protonmail.com>2020-07-25 14:36:46 +0200
commit27718f093689dbd3decd7021eaa97327f578c8f3 (patch)
treec41ab4bb5727b22be35c1237279cfdfec0a27561 /src/std
parent81b6004928015ced29b0b949e35753977aa17606 (diff)
Add hash map
Diffstat (limited to 'src/std')
-rw-r--r--src/std/buffer.c7
-rw-r--r--src/std/hash.c14
-rw-r--r--src/std/hash_map.c191
-rw-r--r--src/std/mem.c6
-rw-r--r--src/std/thread.c4
5 files changed, 218 insertions, 4 deletions
diff --git a/src/std/buffer.c b/src/std/buffer.c
index 1fdcfd4..e631153 100644
--- a/src/std/buffer.c
+++ b/src/std/buffer.c
@@ -39,9 +39,10 @@ static CHECK_RESULT int buffer_ensure_capacity(Buffer *self, usize new_capacity)
return BUFFER_OK;
}
-int buffer_append(Buffer *self, void *data, usize size) {
+int buffer_append(Buffer *self, const void *data, usize size) {
return_if_error(buffer_ensure_capacity(self, self->size + size));
- am_memcpy(self->data + self->size, data, size);
+ if(data)
+ am_memcpy(self->data + self->size, data, size);
self->size += size;
return BUFFER_OK;
}
@@ -72,4 +73,4 @@ void *buffer_end(Buffer *self) {
usize __buffer_get_size(Buffer *self, usize type_size) {
assert(type_size != 0);
return self->size / type_size;
-} \ No newline at end of file
+}
diff --git a/src/std/hash.c b/src/std/hash.c
new file mode 100644
index 0000000..603149a
--- /dev/null
+++ b/src/std/hash.c
@@ -0,0 +1,14 @@
+#include "../../include/std/hash.h"
+
+usize amal_hash_string(const u8 *data, usize size) {
+ usize result;
+ result = 0xdec05eba;
+
+ while(size) {
+ result ^= *data;
+ ++data;
+ --size;
+ }
+
+ return result;
+}
diff --git a/src/std/hash_map.c b/src/std/hash_map.c
new file mode 100644
index 0000000..1ffca90
--- /dev/null
+++ b/src/std/hash_map.c
@@ -0,0 +1,191 @@
+#include "../../include/std/hash_map.h"
+#include "../../include/std/scoped_allocator.h"
+#include "../../include/std/mem.h"
+#include <assert.h>
+
+/*
+Basic hash map implementation. TODO: Improve performance
+*/
+
+#define HASH_MAP_INITIAL_SIZE 16
+
+typedef struct HashMapBucketNode HashMapBucketNode;
+
+typedef struct {
+ HashMapBucketNode *start;
+} HashMapBucket;
+
+static void bucket_node_set_next(HashMapBucketNode *self, HashMapBucketNode *next) {
+ am_memcpy(self, &next, sizeof(next));
+}
+
+static HashMapBucketNode* bucket_node_get_next(HashMapBucketNode *self) {
+ HashMapBucketNode *next;
+ am_memcpy(&next, self, sizeof(HashMapBucketNode*));
+ return next;
+}
+
+static void bucket_node_set_key(HashMapBucketNode *self, BufferView key) {
+ u32 key_size;
+ key_size = (u32)key.size;
+ am_memcpy((char*)self + sizeof(HashMapBucketNode*), &key_size, sizeof(u32));
+ am_memcpy((char*)self + sizeof(HashMapBucketNode*) + sizeof(u32), key.data, key_size);
+}
+
+static BufferView bucket_node_get_key(HashMapBucketNode *self) {
+ BufferView key;
+ u32 key_size;
+ am_memcpy(&key_size, (char*)self + sizeof(HashMapBucketNode*), sizeof(u32));
+ key.size = key_size;
+ key.data = (char*)self + sizeof(HashMapBucketNode*) + sizeof(u32);
+ return key;
+}
+
+static void bucket_node_set_value(HashMapBucketNode *self, void *value, usize type_size) {
+ u32 key_size;
+ am_memcpy(&key_size, (char*)self + sizeof(HashMapBucketNode*), sizeof(key_size));
+ am_memcpy((char*)self + sizeof(HashMapBucketNode*) + sizeof(u32) + key_size, value, type_size);
+}
+
+static void* bucket_node_get_value(HashMapBucketNode *self) {
+ u32 key_size;
+ void *value;
+ am_memcpy(&key_size, (char*)self + sizeof(HashMapBucketNode*), sizeof(key_size));
+ value = (char*)self + sizeof(HashMapBucketNode*) + sizeof(u32) + key_size;
+ return value;
+}
+
+int hash_map_init(HashMap *self, ScopedAllocator *allocator, usize type_size,
+ HashMapCompare compare_func, HashMapHash hash_func) {
+ assert(compare_func);
+ assert(hash_func);
+ self->allocator = allocator;
+ self->type_size = type_size;
+ self->num_elements = 0;
+ self->compare_func = compare_func;
+ self->hash_func = hash_func;
+ return_if_error(buffer_init(&self->buckets, self->allocator));
+ assert(self->buckets.size == 0);
+ return_if_error(buffer_append(&self->buckets, NULL, sizeof(HashMapBucket) * HASH_MAP_INITIAL_SIZE));
+ am_memset(self->buckets.data, 0, self->buckets.size);
+ return 0;
+}
+
+static CHECK_RESULT int hash_map_bucket_add(HashMap *self, HashMapBucket *bucket, BufferView key, void *value) {
+ HashMapBucketNode *new_bucket_node;
+ return_if_error(scoped_allocator_alloc(self->allocator,
+ sizeof(HashMapBucketNode*) + sizeof(u32) + key.size + self->type_size,
+ (void**)&new_bucket_node));
+ bucket_node_set_next(new_bucket_node, bucket->start);
+ bucket_node_set_key(new_bucket_node, key);
+ bucket_node_set_value(new_bucket_node, value, self->type_size);
+ bucket->start = new_bucket_node;
+ return 0;
+}
+
+static void hash_map_reorder_buckets(HashMap *self, usize end_index) {
+ HashMapBucket *bucket;
+ HashMapBucket *bucket_end;
+ usize bucket_size;
+ usize index;
+
+ bucket = (HashMapBucket*)self->buckets.data;
+ bucket_end = ((HashMapBucket*)self->buckets.data) + end_index;
+ bucket_size = buffer_get_size(&self->buckets, HashMapBucket);
+ index = 0;
+
+ for(; bucket != bucket_end; ++bucket, ++index) {
+ HashMapBucketNode *prev_bucket_node;
+ HashMapBucketNode *bucket_node;
+ prev_bucket_node = NULL;
+ for(bucket_node = bucket->start; bucket_node; bucket_node_get_next(bucket_node)) {
+ BufferView bucket_key;
+ usize bucket_index;
+
+ /* Node between current and previous has been removed, chain previous and current */
+ if(prev_bucket_node && !bucket_node_get_next(prev_bucket_node))
+ bucket_node_set_next(prev_bucket_node, bucket_node);
+
+ bucket_key = bucket_node_get_key(bucket_node);
+ bucket_index = self->hash_func((const u8*)bucket_key.data, bucket_key.size) % bucket_size;
+ if(bucket_index != index) {
+ /* Add node to new bucket */
+ HashMapBucket *new_bucket;
+ new_bucket = ((HashMapBucket*)self->buckets.data) + bucket_index;
+ bucket_node_set_next(bucket_node, new_bucket->start);
+ new_bucket->start = bucket_node;
+ /* Mark bucket node to be removed from previous bucket */
+ bucket_node_set_next(prev_bucket_node, NULL);
+ } else {
+ prev_bucket_node = bucket_node;
+ }
+ }
+ }
+}
+
+static CHECK_RESULT int hash_map_increase_buckets(HashMap *self) {
+ usize prev_size;
+ usize bytes_to_append;
+ prev_size = buffer_get_size(&self->buckets, HashMapBucket);
+ bytes_to_append = prev_size * 1.5;
+ return_if_error(buffer_append(&self->buckets, NULL, bytes_to_append));
+ am_memset(self->buckets.data + prev_size, 0, prev_size + bytes_to_append);
+ hash_map_reorder_buckets(self, prev_size);
+ return 0;
+}
+
+int hash_map_insert(HashMap *self, BufferView key, void *value) {
+ usize bucket_index;
+ usize bucket_size;
+ HashMapBucket *bucket;
+
+ bucket_size = buffer_get_size(&self->buckets, HashMapBucket);
+ if(self->num_elements == bucket_size) {
+ return_if_error(hash_map_increase_buckets(self));
+ bucket_size = buffer_get_size(&self->buckets, HashMapBucket);
+ }
+
+ bucket_index = self->hash_func((const u8*)key.data, key.size) % bucket_size;
+ bucket = ((HashMapBucket*)self->buckets.data) + bucket_index;
+ return_if_error(hash_map_bucket_add(self, bucket, key, value));
+ ++self->num_elements;
+ return 0;
+}
+
+bool hash_map_get(HashMap *self, BufferView key, void *value) {
+ usize bucket_size;
+ usize bucket_index;
+ HashMapBucket *bucket;
+ HashMapBucketNode *bucket_node;
+
+ bucket_size = buffer_get_size(&self->buckets, HashMapBucket);
+ bucket_index = self->hash_func((const u8*)key.data, key.size) % bucket_size;
+
+ bucket = ((HashMapBucket*)self->buckets.data) + bucket_index;
+ for(bucket_node = bucket->start; bucket_node; bucket_node_get_next(bucket_node)) {
+ BufferView bucket_key;
+ bucket_key = bucket_node_get_key(bucket_node);
+ bucket_index = self->hash_func((const u8*)bucket_key.data, bucket_key.size) % bucket_size;
+ bucket_index %= bucket_size;
+ if(self->compare_func(value, bucket_node_get_value(bucket_node)) == 0)
+ return bool_true;
+ }
+
+ return bool_false;
+}
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+int hash_compare_string(const void *a, const void *b) {
+ const BufferView *lhs;
+ const BufferView *rhs;
+ int mem_diff;
+
+ lhs = a;
+ rhs = b;
+ mem_diff = am_memcmp(lhs->data, rhs->data, MIN(lhs->size, rhs->size));
+ if(mem_diff == 0)
+ return (int)lhs->size - (int)rhs->size;
+ else
+ return mem_diff;
+}
diff --git a/src/std/mem.c b/src/std/mem.c
index ec7520c..f406176 100644
--- a/src/std/mem.c
+++ b/src/std/mem.c
@@ -5,8 +5,12 @@ void am_memcpy(void *dest, const void *src, usize size) {
memcpy(dest, src, size);
}
+int am_memcmp(const void *lhs, const void *rhs, usize size) {
+ return memcmp(lhs, rhs, size);
+}
+
bool am_memeql(const void *lhs, const void *rhs, usize size) {
- return memcmp(lhs, rhs, size) == 0;
+ return am_memcmp(lhs, rhs, size) == 0;
}
void am_memset(void *dest, int value, usize size) {
diff --git a/src/std/thread.c b/src/std/thread.c
index c8dba9a..64a7f1b 100644
--- a/src/std/thread.c
+++ b/src/std/thread.c
@@ -89,6 +89,10 @@ void amal_mutex_init(amal_mutex *self) {
self->lock_identifier = NULL;
}
+void amal_mutex_deinit(amal_mutex *self) {
+ pthread_mutex_destroy(&self->mutex);
+}
+
static long amal_process_get_id() {
return getpid();
}