From 6f1afa9d76e9c2611a8025fb067d7ee14518dc18 Mon Sep 17 00:00:00 2001 From: dec05eba Date: Mon, 20 Apr 2020 00:57:21 +0200 Subject: Remove in_progress file for manga if download fails --- plugins/mangaplus.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'plugins/mangaplus.py') diff --git a/plugins/mangaplus.py b/plugins/mangaplus.py index 01940ff..3797d14 100755 --- a/plugins/mangaplus.py +++ b/plugins/mangaplus.py @@ -182,7 +182,8 @@ def download_file(url, page, save_path): index = 0 with requests.get(url, headers=headers, stream=True) as response: - response.raise_for_status() + if not response.ok(): + return False with open(save_path, "wb") as file: for chunk in response.iter_content(chunk_size=8192): if chunk: @@ -191,11 +192,13 @@ def download_file(url, page, save_path): index += len(chunk) else: with requests.get(url, headers=headers, stream=True) as response: - response.raise_for_status() + if not response.ok(): + return False with open(save_path, "wb") as file: for chunk in response.iter_content(chunk_size=8192): if chunk: file.write(chunk) + return True def title_url_extract_manga_id(url): result = re.search("mangaplus.shueisha.co.jp/titles/([0-9]+)", url) @@ -253,6 +256,7 @@ def viewer_url_extract_manga_id(url): return result.groups()[0] def download_chapter(url, download_dir): + request_url = url manga_id = viewer_url_extract_manga_id(url) if not manga_id: print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangaplus.shueisha.co.jp/viewer/" % url) @@ -267,7 +271,7 @@ def download_chapter(url, download_dir): in_progress_filepath = os.path.join(download_dir, ".in_progress") with open(in_progress_filepath, "w") as file: - file.write(url) + file.write(request_url) resp = MangaplusResponse.loads(response.content) if resp.error: @@ -284,11 +288,15 @@ def download_chapter(url, download_dir): image_name = str(img_number) + ext image_path = os.path.join(download_dir, image_name) print("Downloading {} to {}".format(page.page.image_url, image_path)) - download_file(page.page.image_url, page, image_path) + if not download_file(page.page.image_url, page, image_path): + print("Failed to download image: %s" % page.page.image_url) + os.remove(in_progress_filepath) + exit(2) img_number += 1 if img_number == 1: print("Failed to find images for chapter") + os.remove(in_progress_filepath) exit(2) with open(os.path.join(download_dir, ".finished"), "w") as file: -- cgit v1.2.3