diff options
-rw-r--r-- | TODO | 4 | ||||
-rwxr-xr-x | build.sh | 3 | ||||
-rwxr-xr-x | plugins/manganelo.py | 41 | ||||
-rwxr-xr-x | release.sh | 3 | ||||
-rw-r--r-- | src/main.c | 2 |
5 files changed, 35 insertions, 18 deletions
@@ -12,4 +12,6 @@ Cleanup command should remove torrents from transmission. Remove dirname because it breaks automedia.pid because it modified /proc/.../cmdline. Should also do readlink on the first arg of cmdline (which doesn't work if automedia is in /usr/bin???). Use socket! look at quickmedia. Some mangadex chapters redirect to mangaplus. Those should redirect to the mangaplus plugin. Right now they are simply skipped. Remove torrents when running automedia cleanup. This can be done by removing torrents by anime config data "filename" (in the json file). -Use fallbacks for manga download too.
\ No newline at end of file +Use fallbacks for manga download too. + +Put anime and manga downloads into separate subdirectories in the download directory. @@ -1,5 +1,8 @@ #!/bin/sh +script_dir=$(dirname "$0") +cd "$script_dir" + CFLAGS="-O3 -s -flto -Wall -Wextra -DNDEBUG" [ -z "$RELEASE" ] && CFLAGS="-O0 -g3 -Wall -Wextra"; CC=cc diff --git a/plugins/manganelo.py b/plugins/manganelo.py index 5593fd4..cfc8c9c 100755 --- a/plugins/manganelo.py +++ b/plugins/manganelo.py @@ -45,9 +45,6 @@ def download_file(url, save_path): "referer": "https://manganelo.com/", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" } - #cookies = { - # "content_server": "server2" - #} with requests.get(url, stream=True, headers=headers, timeout=30) as response: if not response.ok: return 0 @@ -138,19 +135,21 @@ def list_chapters(url, chapter_list_input): print(json.dumps(chapters)) -def download_chapter(url, download_dir): - response = requests.get(url, timeout=30) - response.raise_for_status() - - in_progress_filepath = os.path.join(download_dir, ".in_progress") - with open(in_progress_filepath, "w") as file: - file.write(url) +def download_chapter_images(url, download_dir, use_backup_server): + response = "" + try: + cookies = {} + if use_backup_server: + cookies = {"content_server": "server2"} + response = requests.get(url, timeout=30, cookies=cookies) + response.raise_for_status() + except requests.HTTPError: + return False tree = etree.HTML(response.text) tree = redirect_migrated_url(url, tree, True) if tree is None: - os.remove(in_progress_filepath) - exit(2) + return False img_number = 1 for image_source in tree.xpath('//div[@class="container-chapter-reader"]/img/@src'): @@ -161,14 +160,24 @@ def download_chapter(url, download_dir): file_size = download_file(image_source, image_path) if file_size < 255: print("resource temporary unavailable: %s" % image_source) - os.remove(in_progress_filepath) - exit(2) + return False img_number += 1 if img_number == 1: print("Failed to find images for chapter") - os.remove(in_progress_filepath) - exit(2) + return False + + return True + +def download_chapter(url, download_dir): + in_progress_filepath = os.path.join(download_dir, ".in_progress") + with open(in_progress_filepath, "w") as file: + file.write(url) + + if not download_chapter_images(url, download_dir, False): + if not download_chapter_images(url, download_dir, True): + os.remove(in_progress_filepath) + exit(2) with open(os.path.join(download_dir, ".finished"), "w") as file: file.write("1") @@ -1,3 +1,6 @@ #!/bin/sh +script_dir=$(dirname "$0") +cd "$script_dir" + RELEASE=1 ./build.sh @@ -64,7 +64,7 @@ static void usage_sync(void) { fprintf(stderr, "OPTIONS\n"); fprintf(stderr, " download_dir The path where media should be downloaded to\n"); fprintf(stderr, "EXAMPLES\n"); - fprintf(stderr, " automedia sync /home/adam/Downloads/automedia\n"); + fprintf(stderr, " automedia sync /home/user/Downloads/automedia\n"); exit(1); } |