diff options
-rw-r--r-- | include/Body.hpp | 1 | ||||
-rw-r--r-- | src/Body.cpp | 1 | ||||
-rw-r--r-- | src/QuickMedia.cpp | 43 |
3 files changed, 26 insertions, 19 deletions
diff --git a/include/Body.hpp b/include/Body.hpp index 4bfd36f..525cee5 100644 --- a/include/Body.hpp +++ b/include/Body.hpp @@ -21,6 +21,7 @@ namespace QuickMedia { enum class FetchStatus { NONE, + QUEUED_LOADING, LOADING, FINISHED_LOADING, FAILED_TO_LOAD diff --git a/src/Body.cpp b/src/Body.cpp index 0143fe2..2d429f8 100644 --- a/src/Body.cpp +++ b/src/Body.cpp @@ -665,6 +665,7 @@ namespace QuickMedia { switch(embedded_item_status) { case FetchStatus::NONE: return ""; + case FetchStatus::QUEUED_LOADING: case FetchStatus::LOADING: return "Loading message..."; case FetchStatus::FINISHED_LOADING: diff --git a/src/QuickMedia.cpp b/src/QuickMedia.cpp index aeb719e..03a4d8a 100644 --- a/src/QuickMedia.cpp +++ b/src/QuickMedia.cpp @@ -3206,8 +3206,8 @@ namespace QuickMedia { Messages unreferenced_events; auto set_body_as_deleted = [¤t_room](Message *message, BodyItem *body_item) { - body_item->embedded_item = nullptr; - body_item->embedded_item_status = FetchStatus::NONE; + //body_item->embedded_item = nullptr; + //body_item->embedded_item_status = FetchStatus::NONE; message->type = MessageType::REDACTION; //message->related_event_id.clear(); //message->related_event_type = RelatedEventType::NONE; @@ -3611,8 +3611,7 @@ namespace QuickMedia { enum class FetchMessageType { MESSAGE, - USER_UPDATE, - ROOM_USERS + USER_UPDATE }; struct FetchMessageResult { @@ -3621,6 +3620,7 @@ namespace QuickMedia { }; //const int num_fetch_message_threads = 4; + AsyncTask<bool> fetch_users_future; AsyncTask<FetchMessageResult> fetch_message_future; Message *fetch_message = nullptr; BodyItem *fetch_body_item = nullptr; @@ -3628,7 +3628,7 @@ namespace QuickMedia { // TODO: How about instead fetching all messages we have, not only the visible ones? also fetch with multiple threads. tabs[PINNED_TAB_INDEX].body->body_item_render_callback = [this, ¤t_room, &fetch_message_future, &tabs, &fetch_message, &find_body_item_by_event_id, &fetch_body_item, &fetch_message_tab](BodyItem *body_item) { - if(fetch_message_future.valid() || !current_room) + if(fetch_message_future.valid()) return; PinnedEventData *event_data = static_cast<PinnedEventData*>(body_item->userdata); @@ -3651,7 +3651,7 @@ namespace QuickMedia { #endif if(event_data->status == FetchStatus::FINISHED_LOADING && event_data->message) { - if(event_data->message->related_event_id.empty() || body_item->embedded_item_status != FetchStatus::NONE) + if(event_data->message->related_event_id.empty() || (body_item->embedded_item_status != FetchStatus::NONE && body_item->embedded_item_status != FetchStatus::QUEUED_LOADING)) return; // Check if we already have the referenced message as a body item, so we dont create a new one. @@ -3701,13 +3701,9 @@ namespace QuickMedia { // TODO: How about instead fetching all messages we have, not only the visible ones? also fetch with multiple threads. tabs[MESSAGES_TAB_INDEX].body->body_item_render_callback = [this, ¤t_room, &fetch_message_future, &tabs, &fetch_message, &find_body_item_by_event_id, &fetch_body_item, &fetch_message_tab](BodyItem *body_item) { - if(fetch_message_future.valid() || !current_room) - return; - Message *message = static_cast<Message*>(body_item->userdata); if(!message) return; - assert(message); #if 0 if(message->user->resolve_state == UserResolveState::NOT_RESOLVED) { @@ -3724,8 +3720,13 @@ namespace QuickMedia { } #endif - if(message->related_event_id.empty() || body_item->embedded_item_status != FetchStatus::NONE) + if(message->related_event_id.empty() || (body_item->embedded_item_status != FetchStatus::NONE && body_item->embedded_item_status != FetchStatus::QUEUED_LOADING)) + return; + + if(fetch_message_future.valid()) { + body_item->embedded_item_status = FetchStatus::QUEUED_LOADING; return; + } // Check if we already have the referenced message as a body item, so we dont create a new one. // TODO: Optimize from linear search to hash map @@ -4009,9 +4010,10 @@ namespace QuickMedia { } }; - auto cleanup_tasks = [&set_read_marker_future, &fetch_message_future, &typing_state_queue, &typing_state_thread, &post_task_queue, &provisional_message_queue, &fetched_messages_set, &sent_messages, &post_thread, &tabs]() { + auto cleanup_tasks = [&set_read_marker_future, &fetch_message_future, &fetch_users_future, &typing_state_queue, &typing_state_thread, &post_task_queue, &provisional_message_queue, &fetched_messages_set, &sent_messages, &post_thread, &tabs]() { set_read_marker_future.cancel(); fetch_message_future.cancel(); + fetch_users_future.cancel(); typing_state_queue.close(); if(typing_state_thread.joinable()) { program_kill_in_thread(typing_state_thread.get_id()); @@ -4046,9 +4048,9 @@ namespace QuickMedia { //update_ } else { // TODO: Race condition? maybe use matrix /members instead which has a since parameter to make the members list match current sync - fetch_message_future = [this, ¤t_room]() { + fetch_users_future = [this, ¤t_room]() { matrix->update_room_users(current_room); - return FetchMessageResult{FetchMessageType::ROOM_USERS, nullptr}; + return true; }; } @@ -4491,13 +4493,16 @@ namespace QuickMedia { } } + if(fetch_users_future.ready()) { + fetch_users_future.get(); + current_room->users_fetched = true; + update_pinned_messages_authors(); + update_messages_authors(); + } + if(fetch_message_future.ready()) { FetchMessageResult fetch_message_result = fetch_message_future.get(); - if(fetch_message_result.type == FetchMessageType::ROOM_USERS) { - current_room->users_fetched = true; - update_pinned_messages_authors(); - update_messages_authors(); - } else if(fetch_message_result.type == FetchMessageType::USER_UPDATE) { + if(fetch_message_result.type == FetchMessageType::USER_UPDATE) { update_pinned_messages_author(fetch_message->user); update_messages_author(fetch_message->user); fetch_message = nullptr; |