aboutsummaryrefslogtreecommitdiff
path: root/plugins/mangaplus.shueisha.py
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2020-04-12 16:21:48 +0200
committerdec05eba <dec05eba@protonmail.com>2020-07-06 07:12:34 +0200
commitd2427a2fb2b8d20ec2c71d11004732cef9452e3f (patch)
treefd87ffc5cc1046bb25b7207f3f8482faa4e5d7bf /plugins/mangaplus.shueisha.py
parent415fc1ff9ec0f158e376e10624814cc2116121ba (diff)
Fix mangaplus name
Diffstat (limited to 'plugins/mangaplus.shueisha.py')
-rwxr-xr-xplugins/mangaplus.shueisha.py314
1 files changed, 0 insertions, 314 deletions
diff --git a/plugins/mangaplus.shueisha.py b/plugins/mangaplus.shueisha.py
deleted file mode 100755
index 5cd9b67..0000000
--- a/plugins/mangaplus.shueisha.py
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/usr/bin/env python3
-
-# The page decryption in this file is based on komikku/servers/mangaplus.py
-# available at https://gitlab.com/valos/Komikku/-/blob/master/komikku/servers/mangaplus.py
-# which is licensed under GPL 3.0
-
-import os
-import time
-import sys
-import re
-import requests
-import uuid
-import json
-
-from pure_protobuf.dataclasses_ import field, message
-from pure_protobuf.types import int32
-
-from dataclasses import dataclass
-from enum import IntEnum
-from typing import List
-
-RE_ENCRYPTION_KEY = re.compile('.{1,2}')
-
-headers = {
- 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
- 'Origin': 'https://mangaplus.shueisha.co.jp',
- 'Referer': 'https://mangaplus.shueisha.co.jp',
- 'SESSION-TOKEN': repr(uuid.uuid1()),
-}
-
-api_url = 'https://jumpg-webapi.tokyo-cdn.com/api'
-api_manga_url = api_url + '/title_detail?title_id={0}'
-api_chapter_url = api_url + '/manga_viewer?chapter_id={0}&split=yes&img_quality=high'
-
-# Protocol Buffers messages used to deserialize API responses
-# https://gist.github.com/ZaneHannanAU/437531300c4df524bdb5fd8a13fbab50
-
-class ActionEnum(IntEnum):
- DEFAULT = 0
- UNAUTHORIZED = 1
- MAINTAINENCE = 2
- GEOIP_BLOCKING = 3
-
-class LanguageEnum(IntEnum):
- ENGLISH = 0
- SPANISH = 1
-
-class UpdateTimingEnum(IntEnum):
- NOT_REGULARLY = 0
- MONDAY = 1
- TUESDAY = 2
- WEDNESDAY = 3
- THURSDAY = 4
- FRIDAY = 5
- SATURDAY = 6
- SUNDAY = 7
- DAY = 8
-
-@message
-@dataclass
-class Popup:
- subject: str = field(1)
- body: str = field(2)
-
-@message
-@dataclass
-class ErrorResult:
- action: ActionEnum = field(1)
- english_popup: Popup = field(2)
- spanish_popup: Popup = field(3)
- debug_info: str = field(4)
-
-@message
-@dataclass
-class MangaPage:
- image_url: str = field(1)
- width: int32 = field(2)
- height: int32 = field(3)
- encryption_key: str = field(5, default=None)
-
-@message
-@dataclass
-class Page:
- page: MangaPage = field(1, default=None)
-
-@message
-@dataclass
-class MangaViewer:
- pages: List[Page] = field(1, default_factory=list)
-
-@message
-@dataclass
-class Chapter:
- title_id: int32 = field(1)
- id: int32 = field(2)
- name: str = field(3)
- subtitle: str = field(4, default=None)
- start_timestamp: int32 = field(6, default=None)
- end_timestamp: int32 = field(7, default=None)
-
-@message
-@dataclass
-class Title:
- id: int32 = field(1)
- name: str = field(2)
- author: str = field(3)
- portrait_image_url: str = field(4)
- landscape_image_url: str = field(5)
- view_count: int32 = field(6)
- language: LanguageEnum = field(7, default=LanguageEnum.ENGLISH)
-
-@message
-@dataclass
-class TitleDetail:
- title: Title = field(1)
- title_image_url: str = field(2)
- synopsis: str = field(3)
- background_image_url: str = field(4)
- next_timestamp: int32 = field(5, default=0)
- update_timimg: UpdateTimingEnum = field(6, default=UpdateTimingEnum.DAY)
- viewing_period_description: str = field(7, default=None)
- first_chapters: List[Chapter] = field(9, default_factory=List)
- last_chapters: List[Chapter] = field(10, default_factory=list)
- is_simul_related: bool = field(14, default=True)
- chapters_descending: bool = field(17, default=True)
-
-@message
-@dataclass
-class TitlesAll:
- titles: List[Title] = field(1)
-
-@message
-@dataclass
-class TitlesRanking:
- titles: List[Title] = field(1)
-
-@message
-@dataclass
-class SuccessResult:
- is_featured_updated: bool = field(1, default=False)
- titles_all: TitlesAll = field(5, default=None)
- titles_ranking: TitlesRanking = field(6, default=None)
- title_detail: TitleDetail = field(8, default=None)
- manga_viewer: MangaViewer = field(10, default=None)
-
-@message
-@dataclass
-class MangaplusResponse:
- success: SuccessResult = field(1, default=None)
- error: ErrorResult = field(2, default=None)
-
-
-def usage():
- print("mangaplus.shueisha.py command")
- print("commands:")
- print(" download")
- print(" list")
- exit(1)
-
-def usage_list():
- print("mangaplus.shueisha.py list <url>")
- print("examples:")
- print(" mangaplus.shueisha.py list \"https://mangaplus.shueisha.co.jp/titles/100056\"")
- exit(1)
-
-def usage_download():
- print("mangaplus.shueisha.py download <url> <download_dir>")
- print("examples:")
- print(" mangaplus.shueisha.py download \"https://mangaplus.shueisha.co.jp/viewer/1006611\" /home/adam/Manga/MangaName")
- print("")
- print("Note: The manga directory has to exist.")
- exit(1)
-
-if len(sys.argv) < 2:
- usage()
-
-def download_file(url, page, save_path):
- if page.page.encryption_key is not None:
- # Decryption
- key_stream = [int(v, 16) for v in RE_ENCRYPTION_KEY.findall(page.page.encryption_key)]
- block_size_in_bytes = len(key_stream)
-
- index = 0
- with requests.get(url, headers=headers, stream=True) as response:
- response.raise_for_status()
- with open(save_path, "wb") as file:
- for chunk in response.iter_content(chunk_size=8192):
- if chunk:
- content = bytes([int(v) ^ key_stream[(index + i) % block_size_in_bytes] for i, v in enumerate(chunk)])
- file.write(content)
- index += len(chunk)
- else:
- with requests.get(url, headers=headers, stream=True) as response:
- response.raise_for_status()
- with open(save_path, "wb") as file:
- for chunk in response.iter_content(chunk_size=8192):
- if chunk:
- file.write(chunk)
-
-def title_url_extract_manga_id(url):
- result = re.search("mangaplus.shueisha.co.jp/titles/([0-9]+)", url)
- if result and len(result.groups()) > 0:
- return result.groups()[0]
-
-def list_chapters(url, chapter_list_input):
- manga_id = title_url_extract_manga_id(url)
- if not manga_id:
- print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangaplus.shueisha.co.jp/titles/<number>" % url)
- exit(2)
-
- url = api_manga_url.format(manga_id)
-
- response = requests.get(url, headers=headers)
- if response.status_code != 200:
- print("Failed to list chapters, server responded with status code %d" % response.status_code)
- exit(2)
-
- resp = MangaplusResponse.loads(response.content)
- if resp.error:
- print("Mangaplus response error: %s" % str(resp.error))
- exit(1)
-
- seen_titles = set()
- for item in chapter_list_input:
- title = item.get("title")
- if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
-
- seen_urls = set()
- for item in chapter_list_input:
- url = item.get("url")
- if url and len(url) > 0:
- seen_urls.add(url)
-
- resp_data = resp.success.title_detail
- all_chapters = []
- for resp_chapters in (resp_data.first_chapters, resp_data.last_chapters):
- for chapter in resp_chapters:
- all_chapters.append(chapter)
-
- chapters = []
- for chapter in reversed(all_chapters):
- title = chapter.subtitle
- url = "https://mangaplus.shueisha.co.jp/viewer/{0}".format(chapter.id)
- if title.lower().replace(" ", "") in seen_titles or url in seen_urls:
- break
- chapters.append({ "name": title, "url": url })
- print(json.dumps(chapters))
-
-def viewer_url_extract_manga_id(url):
- result = re.search("mangaplus.shueisha.co.jp/viewer/([0-9]+)", url)
- if result and len(result.groups()) > 0:
- return result.groups()[0]
-
-def download_chapter(url, download_dir):
- manga_id = viewer_url_extract_manga_id(url)
- if not manga_id:
- print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangaplus.shueisha.co.jp/viewer/<number>" % url)
- exit(2)
-
- url = api_chapter_url.format(manga_id)
-
- response = requests.get(url, headers=headers)
- if response.status_code != 200:
- print("Failed to list chapters, server responded with status code %d" % response.status_code)
- exit(2)
-
- in_progress_filepath = os.path.join(download_dir, ".in_progress")
- with open(in_progress_filepath, "w") as file:
- file.write(url)
-
- resp = MangaplusResponse.loads(response.content)
- if resp.error:
- print("Mangaplus response error: %s" % str(resp.error))
- exit(1)
-
- img_number = 1
- for page in resp.success.manga_viewer.pages:
- if page.page is None:
- continue
-
- image_name = page.page.image_url.split('?')[0].split('/')[-1]
- ext = image_name[image_name.rfind("."):]
- image_name = str(img_number) + ext
- image_path = os.path.join(download_dir, image_name)
- print("Downloading {} to {}".format(page.page.image_url, image_path))
- download_file(page.page.image_url, page, image_path)
- img_number += 1
-
- with open(os.path.join(download_dir, ".finished"), "w") as file:
- file.write("1")
-
- os.remove(in_progress_filepath)
-
-command = sys.argv[1]
-if command == "list":
- if len(sys.argv) < 3:
- usage_list()
-
- url = sys.argv[2]
- chapter_list_input = sys.stdin.read()
- if len(chapter_list_input) == 0:
- chapter_list_input = []
- else:
- chapter_list_input = json.loads(chapter_list_input)
- list_chapters(url, chapter_list_input)
-elif command == "download":
- if len(sys.argv) < 4:
- usage_download()
- url = sys.argv[2]
- download_dir = sys.argv[3]
- download_chapter(url, download_dir)
-else:
- usage()