diff options
author | dec05eba <dec05eba@protonmail.com> | 2020-01-01 10:34:21 +0100 |
---|---|---|
committer | dec05eba <dec05eba@protonmail.com> | 2020-07-06 07:12:34 +0200 |
commit | 3586c8d003077ee32b541f00d7690ae179448963 (patch) | |
tree | 9befe18f02e89acb7fb0b4d8768221a6374b6656 /plugins | |
parent | 08ad0983f4e5473b08cbf5b51aa8dc7d29b5d862 (diff) |
Fix all chapters redownloading if latest chapter changes name
Remove readms, as it's dead.
Diffstat (limited to 'plugins')
-rwxr-xr-x | plugins/manganelo.py | 36 | ||||
-rwxr-xr-x | plugins/readms.py | 123 |
2 files changed, 24 insertions, 135 deletions
diff --git a/plugins/manganelo.py b/plugins/manganelo.py index 12813a1..9d8a644 100755 --- a/plugins/manganelo.py +++ b/plugins/manganelo.py @@ -17,16 +17,13 @@ def usage(): exit(1) def usage_list(): - print("manganelo.py list <url> [latest]") - print("examples:") - print(" manganelo.py list \"https://mangakakalot.com/manga/assassins_pride\"") - print(" manganelo.py list \"https://mangakakalot.com/manga/assassins_pride\" \"Chapter 13\"") + print("manganelo.py list <url>") exit(1) def usage_download(): print("manganelo.py download <url> <download_dir>") print("examples:") - print(" manganelo.py download \"https://mangakakalot.com/chapter/vy918232/chapter_16\" /home/adam/Manga/MangaName") + print(" manganelo.py download \"https://manganelo.com/chapter/read_naruto_manga_online_free3/chapter_700.5\" /home/adam/Manga/MangaName") print("") print("Note: The manga directory has to exist.") exit(1) @@ -42,19 +39,32 @@ def download_file(url, save_path): return False return True -def list_chapters(url, latest): +def list_chapters(url, chapter_list_input): response = requests.get(url) if response.status_code != 200: print("Failed to list chapters, server responded with status code %d" % response.status_code) exit(2) + seen_titles = set() + for item in chapter_list_input: + title = item.get("title") + if title and len(title) > 0: + seen_titles.add(title.lower().replace(" ", "")) + + seen_urls = set() + for item in chapter_list_input: + url = item.get("url") + if url and len(url) > 0: + seen_urls.add(url.replace("mangakakalot", "manganelo")) + tree = etree.HTML(response.text) chapters = [] for element in tree.xpath('//ul[@class="row-content-chapter"]//a'): element_text = element.text.strip() - if latest and element_text == latest: + url = element.attrib.get("href").strip() + if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls: break - chapters.append({ "name": element_text, "url": element.attrib.get("href").strip() }) + chapters.append({ "name": element_text, "url": url }) print(json.dumps(chapters)) def download_chapter(url, download_dir): @@ -89,10 +99,12 @@ if command == "list": usage_list() url = sys.argv[2].replace("mangakakalot", "manganelo") - latest = "" - if len(sys.argv) >= 4: - latest = sys.argv[3] - list_chapters(url, latest) + chapter_list_input = sys.stdin.read() + if len(chapter_list_input) == 0: + chapter_list_input = [] + else: + chapter_list_input = json.loads(chapter_list_input) + list_chapters(url, chapter_list_input) elif command == "download": if len(sys.argv) < 4: usage_download() diff --git a/plugins/readms.py b/plugins/readms.py deleted file mode 100755 index a5343b8..0000000 --- a/plugins/readms.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python3 - -import os -import time -import sys -import requests -import json -import subprocess - -from lxml import etree - -def usage(): - print("readms.py command") - print("commands:") - print(" download") - print(" list") - exit(1) - -def usage_list(): - print("readms.py list <url> [latest]") - print("examples:") - print(" readms.py list \"https://readms.net/manga/a_trail_of_blood\"") - print(" readms.py list \"https://readms.net/manga/a_trail_of_blood\" \"48 - Blood oath\"") - exit(1) - -def usage_download(): - print("readms.py download <url> <download_dir>") - print("examples:") - print(" readms.py download \"https://readms.net/manga/a_trail_of_blood\" /home/adam/Manga/MangaName") - print("") - print("Note: The manga directory has to exist.") - exit(1) - -if len(sys.argv) < 2: - usage() - -def download_file(url, save_path): - process = subprocess.Popen(["wget", "-q", "-o", "/dev/null", "-O", save_path, url], stderr=subprocess.PIPE) - _, stderr = process.communicate() - if process.returncode != 0: - print("Failed to download file: {}, error: {}".format(url, stderr.decode('utf-8'))) - return False - return True - -def list_chapters(url, latest): - response = requests.get(url) - if response.status_code != 200: - print("Failed to list chapters, server responded with status code %d" % response.status_code) - exit(2) - - tree = etree.HTML(response.text) - chapters = [] - for element in tree.xpath('//table//tr//a'): - element_text = element.text.strip() - if latest and element_text == latest: - break - chapters.append({ "name": element_text, "url": "https://readms.net" + element.attrib.get("href").strip() }) - print(json.dumps(chapters)) - -def download_chapter(url, download_dir): - in_progress_filepath = os.path.join(download_dir, ".in_progress") - with open(in_progress_filepath, "w") as file: - file.write(url) - - img_number = 1 - while True: - response = requests.get(url) - if response.status_code != 200: - print("Failed to list chapters, server responded with status code %d" % response.status_code) - exit(2) - - tree = etree.HTML(response.text) - - image_sources = tree.xpath('//img[@id="manga-page"]/@src') - if len(image_sources) != 1: - break - - image_source = "https:" + image_sources[0] - ext = image_source[image_source.rfind("."):] - image_name = str(img_number) + ext - image_path = os.path.join(download_dir, image_name) - print("Downloading {} to {}".format(image_source, image_path)) - if not download_file(image_source, image_path): - exit(1) - - next_pages = tree.xpath('//div[@class="page"]//a/@href') - if len(next_pages) != 1: - break - - next_page = next_pages[0] - last_slash = next_page.rfind('/') - try: - if last_slash != -1 and int(next_page[last_slash+1:]) <= img_number: - break - except ValueError: - pass - - url = "https://readms.net" + next_page - img_number += 1 - - with open(os.path.join(download_dir, ".finished"), "w") as file: - file.write("1") - - os.remove(in_progress_filepath) - -command = sys.argv[1] -if command == "list": - if len(sys.argv) < 3: - usage_list() - - url = sys.argv[2] - latest = "" - if len(sys.argv) >= 4: - latest = sys.argv[3] - list_chapters(url, latest) -elif command == "download": - if len(sys.argv) < 4: - usage_download() - url = sys.argv[2] - download_dir = sys.argv[3] - download_chapter(url, download_dir) -else: - usage() |