diff options
Diffstat (limited to 'plugins/lhtranslation.py')
-rwxr-xr-x | plugins/lhtranslation.py | 116 |
1 files changed, 116 insertions, 0 deletions
diff --git a/plugins/lhtranslation.py b/plugins/lhtranslation.py new file mode 100755 index 0000000..a90f579 --- /dev/null +++ b/plugins/lhtranslation.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +import os +import time +import sys +import requests +import json +import subprocess + +from lxml import etree + +def usage(): + print("lhtranslation.py command") + print("commands:") + print(" download") + print(" list") + exit(1) + +def usage_list(): + print("lhtranslation.py list <url>") + exit(1) + +def usage_download(): + print("lhtranslation.py download <url> <download_dir>") + print("examples:") + print(" lhtranslation.py download \"https://lhtranslation.net/manga-kaifuku-jutsushi-no-yarinaoshi.html\" /home/adam/Manga/MangaName") + print("") + print("Note: The manga directory has to exist.") + exit(1) + +if len(sys.argv) < 2: + usage() + +def download_file(url, save_path): + process = subprocess.Popen(["wget", "-q", "-o", "/dev/null", "-O", save_path, url], stderr=subprocess.PIPE) + _, stderr = process.communicate() + if process.returncode != 0: + print("Failed to download file: {}, error: {}".format(url, stderr.decode('utf-8'))) + return False + return True + +def list_chapters(url, chapter_list_input): + response = requests.get(url) + if response.status_code != 200: + print("Failed to list chapters, server responded with status code %d" % response.status_code) + exit(2) + + seen_titles = set() + for item in chapter_list_input: + title = item.get("title") + if title and len(title) > 0: + seen_titles.add(title.lower().replace(" ", "")) + + seen_urls = set() + for item in chapter_list_input: + url = item.get("url") + if url and len(url) > 0: + seen_urls.add(url) + + tree = etree.HTML(response.text) + chapters = [] + for element in tree.xpath("//div[@class='list-chapters']//a[@class='chapter']"): + title = element.attrib.get("title").strip() + url = element.attrib.get("href").strip() + if title.lower().replace(" ", "") in seen_titles or url in seen_urls: + break + chapters.append({ "name": title, "url": url }) + print(json.dumps(chapters)) + +def download_chapter(url, download_dir): + response = requests.get(url) + if response.status_code != 200: + print("Failed to list chapters, server responded with status code %d" % response.status_code) + exit(2) + + in_progress_filepath = os.path.join(download_dir, ".in_progress") + with open(in_progress_filepath, "w") as file: + file.write(url) + + tree = etree.HTML(response.text) + img_number = 1 + for image_source in tree.xpath("//article[@id='content']//img/@src"): + image_source = image_source.strip() + ext = image_source[image_source.rfind("."):] + image_name = str(img_number) + ext + image_path = os.path.join(download_dir, image_name) + print("Downloading {} to {}".format(image_source, image_path)) + if not download_file(image_source, image_path): + exit(1) + img_number += 1 + + with open(os.path.join(download_dir, ".finished"), "w") as file: + file.write("1") + + os.remove(in_progress_filepath) + +command = sys.argv[1] +if command == "list": + if len(sys.argv) < 3: + usage_list() + + url = sys.argv[2] + chapter_list_input = sys.stdin.read() + if len(chapter_list_input) == 0: + chapter_list_input = [] + else: + chapter_list_input = json.loads(chapter_list_input) + list_chapters(url, chapter_list_input) +elif command == "download": + if len(sys.argv) < 4: + usage_download() + url = sys.argv[2] + download_dir = sys.argv[3] + download_chapter(url, download_dir) +else: + usage() |