#!/usr/bin/env python3 import os import time import sys import requests import json from lxml import etree headers = { 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36" } def usage(): print("mangatown.py command") print("commands:") print(" download") print(" list") exit(1) def usage_list(): print("mangatown.py list ") exit(1) def usage_download(): print("mangatown.py download ") print("examples:") print(" mangatown.py download \"https://www.mangatown.com/manga/naruto/v63/c700.2/\" /home/user/Manga/MangaName") print("") print("Note: The manga directory has to exist.") exit(1) if len(sys.argv) < 2: usage() def download_file(url, save_path): file_size = 0 with requests.get(url, headers=headers, stream=True, timeout=30) as response: if not response.ok: return 0 with open(save_path, "wb") as file: for chunk in response.iter_content(chunk_size=8192): if chunk: file.write(chunk) file_size += len(chunk) return file_size def list_chapters(url, chapter_list_input): response = requests.get(url, headers=headers, timeout=30) response.raise_for_status() seen_titles = set() for item in chapter_list_input: title = item.get("title") if title and len(title) > 0: seen_titles.add(title.lower().replace(" ", "").replace("/", "_")) seen_urls = set() for item in chapter_list_input: chapter_url = item.get("url") if chapter_url and len(chapter_url) > 0: seen_urls.add(chapter_url) tree = etree.HTML(response.text) chapters = [] for element in tree.xpath('//ul[@class="chapter_list"]//a'): element_text = element.text.strip().replace("/", "_") url = element.attrib.get("href").strip() if "/manga/" not in url: continue url = "https://www.mangatown.com" + url if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls: break chapters.append({ "name": element_text, "url": url }) print(json.dumps(chapters)) def is_only_num(s): for c in s: if c < '0' or c > '9': return False return True def download_chapter(url, download_dir): if not url[-1] == '/': url += '/' response = requests.get(url, headers=headers, timeout=30) response.raise_for_status() in_progress_filepath = os.path.join(download_dir, ".in_progress") with open(in_progress_filepath, "w") as file: file.write(url) tree = etree.HTML(response.text) num_pages = 0 for element in tree.xpath('//div[@class="page_select"]//option'): value = element.attrib.get("value", "") if "/manga/" in value and is_only_num(element.text): num_pages = int(element.text) if num_pages == 0: print("Failed to find number of pages for chapter") os.remove(in_progress_filepath) exit(2) img_number = 1 page_url = url while img_number <= num_pages: image_sources = tree.xpath("//img[@id='image']/@src") if not image_sources or len(image_sources) == 0: print("Failed to find images %d for chapter" % img_number) os.remove(in_progress_filepath) exit(2) image_source = "https:" + image_sources[0] ext = image_source[image_source.rfind("."):] image_name = str(img_number) + ext image_path = os.path.join(download_dir, image_name) print("Downloading {} to {}".format(image_source, image_path)) file_size = download_file(image_source, image_path) if file_size < 255: print("resource temporary unavailable: %s" % image_source) os.remove(in_progress_filepath) exit(2) img_number += 1 if img_number - 1 == num_pages: break next_page_urls = tree.xpath("//a[@class='next_page']/@href") if not next_page_urls or len(next_page_urls) == 0: break next_page_url = url + next_page_urls[0] page_url = next_page_url response = requests.get(page_url, headers=headers, timeout=30) if not response.ok: print("Failed to get next page for chapter") os.remove(in_progress_filepath) exit(2) tree = etree.HTML(response.text) if img_number == 1 or (img_number - 1 != num_pages): print("Failed to find images for chapter") os.remove(in_progress_filepath) exit(2) with open(os.path.join(download_dir, ".finished"), "w") as file: file.write("1") os.remove(in_progress_filepath) command = sys.argv[1] if command == "list": if len(sys.argv) < 3: usage_list() url = sys.argv[2] chapter_list_input = sys.stdin.read() if len(chapter_list_input) == 0: chapter_list_input = [] else: chapter_list_input = json.loads(chapter_list_input) list_chapters(url, chapter_list_input) elif command == "download": if len(sys.argv) < 4: usage_download() url = sys.argv[2] download_dir = sys.argv[3] download_chapter(url, download_dir) else: usage()