aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2024-10-18 12:07:54 +0200
committerdec05eba <dec05eba@protonmail.com>2024-10-18 12:07:54 +0200
commit1c96d1bb874fb7908a0594bcde971a772a80bcfb (patch)
treecbf9d0e9bff8b2d880dd7d816874d5789ba10e44
parente0c3120b46cc95cd9d0cb9bf300ec4f9cff9572a (diff)
Manganelo: fallback to second server if download fails in first serverHEADmaster
-rw-r--r--TODO4
-rwxr-xr-xplugins/manganelo.py41
-rw-r--r--src/main.c2
3 files changed, 29 insertions, 18 deletions
diff --git a/TODO b/TODO
index 83a584a..a8d1872 100644
--- a/TODO
+++ b/TODO
@@ -12,4 +12,6 @@ Cleanup command should remove torrents from transmission.
Remove dirname because it breaks automedia.pid because it modified /proc/.../cmdline. Should also do readlink on the first arg of cmdline (which doesn't work if automedia is in /usr/bin???). Use socket! look at quickmedia.
Some mangadex chapters redirect to mangaplus. Those should redirect to the mangaplus plugin. Right now they are simply skipped.
Remove torrents when running automedia cleanup. This can be done by removing torrents by anime config data "filename" (in the json file).
-Use fallbacks for manga download too. \ No newline at end of file
+Use fallbacks for manga download too.
+
+Put anime and manga downloads into separate subdirectories in the download directory.
diff --git a/plugins/manganelo.py b/plugins/manganelo.py
index 5593fd4..cfc8c9c 100755
--- a/plugins/manganelo.py
+++ b/plugins/manganelo.py
@@ -45,9 +45,6 @@ def download_file(url, save_path):
"referer": "https://manganelo.com/",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
}
- #cookies = {
- # "content_server": "server2"
- #}
with requests.get(url, stream=True, headers=headers, timeout=30) as response:
if not response.ok:
return 0
@@ -138,19 +135,21 @@ def list_chapters(url, chapter_list_input):
print(json.dumps(chapters))
-def download_chapter(url, download_dir):
- response = requests.get(url, timeout=30)
- response.raise_for_status()
-
- in_progress_filepath = os.path.join(download_dir, ".in_progress")
- with open(in_progress_filepath, "w") as file:
- file.write(url)
+def download_chapter_images(url, download_dir, use_backup_server):
+ response = ""
+ try:
+ cookies = {}
+ if use_backup_server:
+ cookies = {"content_server": "server2"}
+ response = requests.get(url, timeout=30, cookies=cookies)
+ response.raise_for_status()
+ except requests.HTTPError:
+ return False
tree = etree.HTML(response.text)
tree = redirect_migrated_url(url, tree, True)
if tree is None:
- os.remove(in_progress_filepath)
- exit(2)
+ return False
img_number = 1
for image_source in tree.xpath('//div[@class="container-chapter-reader"]/img/@src'):
@@ -161,14 +160,24 @@ def download_chapter(url, download_dir):
file_size = download_file(image_source, image_path)
if file_size < 255:
print("resource temporary unavailable: %s" % image_source)
- os.remove(in_progress_filepath)
- exit(2)
+ return False
img_number += 1
if img_number == 1:
print("Failed to find images for chapter")
- os.remove(in_progress_filepath)
- exit(2)
+ return False
+
+ return True
+
+def download_chapter(url, download_dir):
+ in_progress_filepath = os.path.join(download_dir, ".in_progress")
+ with open(in_progress_filepath, "w") as file:
+ file.write(url)
+
+ if not download_chapter_images(url, download_dir, False):
+ if not download_chapter_images(url, download_dir, True):
+ os.remove(in_progress_filepath)
+ exit(2)
with open(os.path.join(download_dir, ".finished"), "w") as file:
file.write("1")
diff --git a/src/main.c b/src/main.c
index de7bebe..52d68ec 100644
--- a/src/main.c
+++ b/src/main.c
@@ -64,7 +64,7 @@ static void usage_sync(void) {
fprintf(stderr, "OPTIONS\n");
fprintf(stderr, " download_dir The path where media should be downloaded to\n");
fprintf(stderr, "EXAMPLES\n");
- fprintf(stderr, " automedia sync /home/adam/Downloads/automedia\n");
+ fprintf(stderr, " automedia sync /home/user/Downloads/automedia\n");
exit(1);
}