aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2022-02-14 00:38:27 +0100
committerdec05eba <dec05eba@protonmail.com>2022-02-14 00:38:27 +0100
commit0145b7d91c3529415928868f024330ba23d2b9b3 (patch)
tree20a07de4cb74c7d17cd6fad7d0d56ee759e49fe1
parent15d635f8e7d4c5f5e12f759a62d1b48ef2e67f70 (diff)
Remove mangadex upgrade
-rw-r--r--TODO1
-rwxr-xr-xautomediabin120760 -> 124920 bytes
-rwxr-xr-xinstall.sh1
-rwxr-xr-xmangadex-upgrade.py162
-rw-r--r--src/main.c10
5 files changed, 0 insertions, 174 deletions
diff --git a/TODO b/TODO
index 76d6aef..850769b 100644
--- a/TODO
+++ b/TODO
@@ -7,5 +7,4 @@ Verify path lengths. Currently there is limit to 255 characters for remote names
Deal with replacing of / with _.
Handle strdup failure.
Make downloading manga asynchronous, just like torrents. And have timeout for download.
-Remove mangadex legacy id conversion code in 2023.
Detect if a website is very slow (timeout?) and ignore sync for that website for the current sync. This is to prevent a slow website from preventing all syncs.
diff --git a/automedia b/automedia
index d090d81..3cda69a 100755
--- a/automedia
+++ b/automedia
Binary files differ
diff --git a/install.sh b/install.sh
index 19698d9..58b47a6 100755
--- a/install.sh
+++ b/install.sh
@@ -4,7 +4,6 @@ script_dir=$(dirname "$0")
cd "$script_dir"
install -Dm755 "automedia" "/usr/bin/automedia"
-install -Dm755 "mangadex-upgrade.py" "/usr/share/automedia/mangadex-upgrade.py"
for file in plugins/*; do
install -Dm755 "$file" "/usr/share/automedia/$file"
done
diff --git a/mangadex-upgrade.py b/mangadex-upgrade.py
deleted file mode 100755
index b46980d..0000000
--- a/mangadex-upgrade.py
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import json
-import re
-import requests
-
-class Chapter:
- id = ""
- title = ""
- time = ""
-
-class Manga:
- id = ""
- updated = ""
- chapters = None
- directory = ""
-
- def __init__(self):
- self.chapters = []
-
-# Returns None if the manga is not tracked using mangadex or if it has already been migrated
-def manga_data_file_read(filepath):
- with open(filepath, "rb") as file:
- data = json.load(file)
- if data["plugin"] != "mangadex.py":
- return None
-
- manga_link = data["link"]
- title_search = re.search(r"/title/([0-9]+)", manga_link)
- if not title_search or len(title_search.groups()) == 0:
- print("Mangadex manga already migrated: %s" % manga_link)
- return None
-
- manga_data = Manga()
- manga_data.id = int(title_search.group(1))
- manga_data.updated = data["updated"]
-
- downloaded = data.get("downloaded")
- if type(downloaded) is list:
- for downloaded_item in downloaded:
- chapter_url = downloaded_item["url"]
- chapter_title_search = re.search(r"chapter/([0-9]+)", chapter_url)
- if not chapter_title_search:
- print("Failed to extract title from manga %s, chapter %s" % (manga_link, chapter_url))
- exit(1)
-
- chapter_data = Chapter()
- chapter_data.id = int(chapter_title_search.group(1))
- chapter_data.title = downloaded_item["title"]
- chapter_data.time = downloaded_item["time"]
- manga_data.chapters.append(chapter_data)
- return manga_data
-
-# Returns a list with tuple where the first element is the legacy id and the second element is the new id
-def legacy_id_to_new_id(ids, type):
- mapping = []
- id_start = 0
- while id_start < len(ids):
- id_end = min(id_start + 1000, len(ids))
- if id_end - id_start == 0:
- break
- response = requests.post("https://api.mangadex.org/legacy/mapping", json={"type": type, "ids": ids[id_start:id_end]})
- response.raise_for_status()
- response_json = response.json()
-
- for response_item in response_json:
- if response_item["result"] != "ok":
- print("legacy mapping returned an error")
- exit(1)
-
- attributes = response_item["data"]["attributes"]
- mapping.append((attributes["legacyId"], attributes["newId"]))
-
- id_start = id_end
-
- if len(mapping) != len(ids):
- print("Failed to get the legacy to new id mapping for all ids. Got %d mappings, expected %d" % (len(mapping), len(ids)))
- exit(1)
-
- return mapping
-
-def get_manga_by_id(manga_list, manga_id):
- for manga_data in manga_list:
- if manga_data.id == manga_id:
- return manga_data
-
-def get_chapter_by_id_in_legacy_mapping(legacy_chapter_ids_to_new_ids, id):
- for chapter_id_mapping in legacy_chapter_ids_to_new_ids:
- if chapter_id_mapping[0] == id:
- return chapter_id_mapping
-
-def file_overwrite_atomic(filepath, content):
- tmp_filepath = filepath + ".tmp"
- with open(tmp_filepath, "wb") as file:
- file.write(content.encode())
- file.flush()
- os.fsync(file.fileno())
- os.rename(tmp_filepath, filepath)
-
-if __name__ == "__main__":
- migrate_finished_filepath = os.path.expanduser("~/.config/automedia/mangadex-upgraded")
- if os.path.isfile(migrate_finished_filepath):
- print("Mangadex tracked manga have already migrated, nothing to do")
- exit(0)
-
- tracked_dir = os.path.expanduser("~/.config/automedia/html/tracked")
- if not os.path.isdir(tracked_dir):
- exit(0)
-
- manga_list = []
- for manga_name in os.listdir(tracked_dir):
- manga_tracked_dir = os.path.join(tracked_dir, manga_name)
- manga_data_file = os.path.join(manga_tracked_dir, "data")
- manga_data = manga_data_file_read(manga_data_file)
- if manga_data:
- manga_data.directory = os.path.join(tracked_dir, manga_name)
- manga_list.append(manga_data)
-
- manga_ids = []
- chapter_ids = []
- cc = {}
- for manga_data in manga_list:
- manga_ids.append(manga_data.id)
- for chapter_data in manga_data.chapters:
- chapter_ids.append(chapter_data.id)
-
- legancy_manga_ids_to_new_ids = legacy_id_to_new_id(manga_ids, "manga")
- legacy_chapter_ids_to_new_ids = legacy_id_to_new_id(chapter_ids, "chapter")
-
- for manga_id in legancy_manga_ids_to_new_ids:
- manga_data = get_manga_by_id(manga_list, manga_id[0])
- if not manga_data:
- print("Failed to get manga by id: %s" % manga_id[0])
- exit(1)
-
- new_manga_link = str(manga_id[1])
- new_manga_data = {}
- new_manga_data["plugin"] = "mangadex.py"
- new_manga_data["link"] = new_manga_link
- new_manga_data["updated"] = manga_data.updated
- downloaded = []
- for chapter_data in manga_data.chapters:
- chapter_id_mapping = get_chapter_by_id_in_legacy_mapping(legacy_chapter_ids_to_new_ids, chapter_data.id)
- if not chapter_id_mapping:
- print("Failed to get new id from manga %d, chapter %d" % (manga_data.id, chapter_data.id))
- exit(1)
-
- downloaded_item = {}
- downloaded_item["title"] = chapter_data.title
- downloaded_item["time"] = chapter_data.time
- downloaded_item["url"] = chapter_id_mapping[1]
- downloaded.append(downloaded_item)
- new_manga_data["downloaded"] = downloaded
-
- file_overwrite_atomic(os.path.join(manga_data.directory, "link"), new_manga_link)
- new_manga_data_str = json.dumps(new_manga_data, indent=4)
- file_overwrite_atomic(os.path.join(manga_data.directory, "data"), new_manga_data_str)
-
- file_overwrite_atomic(migrate_finished_filepath, "1")
- print("Successfully migrated %d manga with a total of %d chapters" % (len(manga_ids), len(chapter_ids)))
- \ No newline at end of file
diff --git a/src/main.c b/src/main.c
index d38ea2d..e5edd19 100644
--- a/src/main.c
+++ b/src/main.c
@@ -591,20 +591,10 @@ static int proc_read_cmdline(const char *pid_str, char *cmdline_data, int cmdlin
return 0;
}
-static int run_mangadex_migration_script() {
- const char *args[] = { "/usr/share/automedia/mangadex-upgrade.py", NULL };
- return program_exec(args, NULL, NULL);
-}
-
static void command_sync(int argc, char **argv, char *rss_config_dir, char *html_config_dir, char *program_dir) {
if(argc < 1)
usage_sync();
- if(run_mangadex_migration_script() != 0) {
- fprintf(stderr, "Failed to migrade mangadex manga\n");
- exit(1);
- }
-
char *download_dir = argv[0];
const char automedia_pid_path[] = "/tmp/automedia.pid";