#!/usr/bin/env python3 # The page decryption in this file is based on komikku/servers/mangaplus/__init__.py # available at https://gitlab.com/valos/Komikku/-/blob/master/komikku/servers/mangaplus/__init__.py # which is licensed under GPL 3.0 import os import time import sys import re import requests import json RE_ENCRYPTION_KEY = re.compile('.{1,2}') api_url = 'https://jumpg-webapi.tokyo-cdn.com/api' api_manga_url = api_url + '/title_detailV3?title_id={0}&format=json' api_chapter_url = api_url + '/manga_viewer?chapter_id={0}&split=yes&img_quality=high&format=json' class Chapter: id = "" title = "" class MangaPage: url = "" encryption_key = None def usage(): print("mangaplus.py command") print("commands:") print(" download") print(" list") exit(1) def usage_list(): print("mangaplus.py list ") print("examples:") print(" mangaplus.py list \"https://mangaplus.shueisha.co.jp/titles/100056\"") exit(1) def usage_download(): print("mangaplus.py download ") print("examples:") print(" mangaplus.py download \"https://mangaplus.shueisha.co.jp/viewer/1006611\" /home/user/Manga/MangaName") print("") print("Note: The manga directory has to exist.") exit(1) if len(sys.argv) < 2: usage() # Encryption is done with symetric key and the key is provided in the json response.... def download_file(url, encryption_key, save_path): if encryption_key is not None: # Decryption key_stream = [int(v, 16) for v in RE_ENCRYPTION_KEY.findall(encryption_key)] block_size_in_bytes = len(key_stream) index = 0 with requests.get(url, stream=True, timeout=30) as response: if not response.ok: return False with open(save_path, "wb") as file: for chunk in response.iter_content(chunk_size=8192): if chunk: content = bytes([int(v) ^ key_stream[(index + i) % block_size_in_bytes] for i, v in enumerate(chunk)]) file.write(content) index += len(chunk) else: with requests.get(url, stream=True, timeout=30) as response: if not response.ok: return False with open(save_path, "wb") as file: for chunk in response.iter_content(chunk_size=8192): if chunk: file.write(chunk) return True def title_url_extract_manga_id(url): result = re.search("mangaplus.shueisha.co.jp/titles/([0-9]+)", url) if result and len(result.groups()) > 0: return result.groups()[0] def parse_chapters(chapters_json): result = [] if not chapters_json: return result for chapter_json in chapters_json: chapter = Chapter() chapter.id = chapter_json["chapterId"] chapter.title = chapter_json["subTitle"] result.append(chapter) return result def list_chapters(url, chapter_list_input): manga_id = title_url_extract_manga_id(url) if not manga_id: print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangaplus.shueisha.co.jp/titles/" % url) exit(2) url = api_manga_url.format(manga_id) response = requests.get(url, timeout=30) response.raise_for_status() resp_json = response.json() all_chapters = [] chapter_list_groups = resp_json["success"]["titleDetailView"]["chapterListGroup"] for chapter_list_group in chapter_list_groups: first_chapter_list = chapter_list_group.get("firstChapterList") mid_chapter_list = chapter_list_group.get("midChapterList") last_chapter_list = chapter_list_group.get("lastChapterList") all_chapters.extend(parse_chapters(first_chapter_list)) all_chapters.extend(parse_chapters(mid_chapter_list)) all_chapters.extend(parse_chapters(last_chapter_list)) seen_titles = set() for item in chapter_list_input: title = item.get("title") if len(title) > 0: seen_titles.add(title.lower().replace(" ", "").replace("/", "_")) seen_urls = set() for item in chapter_list_input: chapter_url = item.get("url") if chapter_url and len(chapter_url) > 0: seen_urls.add(chapter_url) chapters = [] for chapter in reversed(all_chapters): title = chapter.title.replace("/", "_") url = "https://mangaplus.shueisha.co.jp/viewer/{0}".format(chapter.id) if title.lower().replace(" ", "") in seen_titles or url in seen_urls: break chapters.append({ "name": title, "url": url }) print(json.dumps(chapters)) def viewer_url_extract_manga_id(url): result = re.search("mangaplus.shueisha.co.jp/viewer/([0-9]+)", url) if result and len(result.groups()) > 0: return result.groups()[0] def download_chapter(url, download_dir): request_url = url manga_id = viewer_url_extract_manga_id(url) if not manga_id: print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangaplus.shueisha.co.jp/viewer/" % url) exit(2) url = api_chapter_url.format(manga_id) response = requests.get(url, timeout=30) response.raise_for_status() resp_json = response.json() manga_pages = [] pages = resp_json["success"]["mangaViewer"]["pages"] for page in pages: manga_page_json = page.get("mangaPage") if manga_page_json: manga_page = MangaPage() manga_page.url = manga_page_json["imageUrl"] manga_page.encryption_key = manga_page_json.get("encryptionKey") manga_pages.append(manga_page) in_progress_filepath = os.path.join(download_dir, ".in_progress") with open(in_progress_filepath, "w") as file: file.write(request_url) img_number = 1 for manga_page in manga_pages: image_name = manga_page.url.split('?')[0].split('/')[-1] ext = image_name[image_name.rfind("."):] image_name = str(img_number) + ext image_path = os.path.join(download_dir, image_name) print("Downloading {} to {}".format(manga_page.url, image_path)) if not download_file(manga_page.url, manga_page.encryption_key, image_path): print("Failed to download image: %s" % manga_page.url) os.remove(in_progress_filepath) exit(2) img_number += 1 if img_number == 1: print("Failed to find images for chapter") os.remove(in_progress_filepath) exit(2) with open(os.path.join(download_dir, ".finished"), "w") as file: file.write("1") os.remove(in_progress_filepath) command = sys.argv[1] if command == "list": if len(sys.argv) < 3: usage_list() url = sys.argv[2] chapter_list_input = sys.stdin.read() if len(chapter_list_input) == 0: chapter_list_input = [] else: chapter_list_input = json.loads(chapter_list_input) list_chapters(url, chapter_list_input) elif command == "download": if len(sys.argv) < 4: usage_download() url = sys.argv[2] download_dir = sys.argv[3] download_chapter(url, download_dir) else: usage()