1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
|
#!/usr/bin/env python3
# The page decryption in this file is based on komikku/servers/mangaplus.py
# available at https://gitlab.com/valos/Komikku/-/blob/master/komikku/servers/mangaplus.py
# which is licensed under GPL 3.0
import os
import time
import sys
import re
import requests
import json
from pure_protobuf.dataclasses_ import field, message
from pure_protobuf.types import int32
from dataclasses import dataclass
from enum import IntEnum
from typing import List
RE_ENCRYPTION_KEY = re.compile('.{1,2}')
api_url = 'https://jumpg-webapi.tokyo-cdn.com/api'
api_manga_url = api_url + '/title_detail?title_id={0}'
api_chapter_url = api_url + '/manga_viewer?chapter_id={0}&split=yes&img_quality=high'
# Protocol Buffers messages used to deserialize API responses
# https://gist.github.com/ZaneHannanAU/437531300c4df524bdb5fd8a13fbab50
class ActionEnum(IntEnum):
DEFAULT = 0
UNAUTHORIZED = 1
MAINTAINENCE = 2
GEOIP_BLOCKING = 3
class LanguageEnum(IntEnum):
ENGLISH = 0
SPANISH = 1
class UpdateTimingEnum(IntEnum):
NOT_REGULARLY = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
DAY = 8
@message
@dataclass
class Popup:
subject: str = field(1)
body: str = field(2)
@message
@dataclass
class ErrorResult:
action: ActionEnum = field(1)
english_popup: Popup = field(2)
spanish_popup: Popup = field(3)
debug_info: str = field(4)
@message
@dataclass
class MangaPage:
image_url: str = field(1)
width: int32 = field(2)
height: int32 = field(3)
encryption_key: str = field(5, default=None)
@message
@dataclass
class Page:
page: MangaPage = field(1, default=None)
@message
@dataclass
class MangaViewer:
pages: List[Page] = field(1, default_factory=list)
@message
@dataclass
class Chapter:
title_id: int32 = field(1)
id: int32 = field(2)
name: str = field(3)
subtitle: str = field(4, default=None)
start_timestamp: int32 = field(6, default=None)
end_timestamp: int32 = field(7, default=None)
@message
@dataclass
class Title:
id: int32 = field(1)
name: str = field(2)
author: str = field(3)
portrait_image_url: str = field(4)
landscape_image_url: str = field(5)
view_count: int32 = field(6)
language: LanguageEnum = field(7, default=LanguageEnum.ENGLISH)
@message
@dataclass
class TitleDetail:
title: Title = field(1)
title_image_url: str = field(2)
synopsis: str = field(3)
background_image_url: str = field(4)
next_timestamp: int32 = field(5, default=0)
update_timimg: UpdateTimingEnum = field(6, default=UpdateTimingEnum.DAY)
viewing_period_description: str = field(7, default=None)
first_chapters: List[Chapter] = field(9, default_factory=List)
last_chapters: List[Chapter] = field(10, default_factory=list)
is_simul_related: bool = field(14, default=True)
chapters_descending: bool = field(17, default=True)
@message
@dataclass
class TitlesAll:
titles: List[Title] = field(1)
@message
@dataclass
class TitlesRanking:
titles: List[Title] = field(1)
@message
@dataclass
class SuccessResult:
is_featured_updated: bool = field(1, default=False)
titles_all: TitlesAll = field(5, default=None)
titles_ranking: TitlesRanking = field(6, default=None)
title_detail: TitleDetail = field(8, default=None)
manga_viewer: MangaViewer = field(10, default=None)
@message
@dataclass
class MangaplusResponse:
success: SuccessResult = field(1, default=None)
error: ErrorResult = field(2, default=None)
def usage():
print("mangaplus.py command")
print("commands:")
print(" download")
print(" list")
exit(1)
def usage_list():
print("mangaplus.py list <url>")
print("examples:")
print(" mangaplus.py list \"https://mangaplus.shueisha.co.jp/titles/100056\"")
exit(1)
def usage_download():
print("mangaplus.py download <url> <download_dir>")
print("examples:")
print(" mangaplus.py download \"https://mangaplus.shueisha.co.jp/viewer/1006611\" /home/user/Manga/MangaName")
print("")
print("Note: The manga directory has to exist.")
exit(1)
if len(sys.argv) < 2:
usage()
def download_file(url, page, save_path):
if page.page.encryption_key is not None:
# Decryption
key_stream = [int(v, 16) for v in RE_ENCRYPTION_KEY.findall(page.page.encryption_key)]
block_size_in_bytes = len(key_stream)
index = 0
with requests.get(url, stream=True) as response:
if not response.ok:
return False
with open(save_path, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
content = bytes([int(v) ^ key_stream[(index + i) % block_size_in_bytes] for i, v in enumerate(chunk)])
file.write(content)
index += len(chunk)
else:
with requests.get(url, stream=True) as response:
if not response.ok:
return False
with open(save_path, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
file.write(chunk)
return True
def title_url_extract_manga_id(url):
result = re.search("mangaplus.shueisha.co.jp/titles/([0-9]+)", url)
if result and len(result.groups()) > 0:
return result.groups()[0]
def list_chapters(url, chapter_list_input):
manga_id = title_url_extract_manga_id(url)
if not manga_id:
print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangaplus.shueisha.co.jp/titles/<number>" % url)
exit(2)
url = api_manga_url.format(manga_id)
response = requests.get(url)
response.raise_for_status()
resp = MangaplusResponse.loads(response.content)
if resp.error:
print("Mangaplus response error: %s" % str(resp.error))
exit(1)
seen_titles = set()
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
chapter_url = item.get("url")
if chapter_url and len(chapter_url) > 0:
seen_urls.add(chapter_url)
resp_data = resp.success.title_detail
all_chapters = []
for resp_chapters in (resp_data.first_chapters, resp_data.last_chapters):
for chapter in resp_chapters:
all_chapters.append(chapter)
chapters = []
for chapter in reversed(all_chapters):
title = chapter.subtitle.replace("/", "_")
url = "https://mangaplus.shueisha.co.jp/viewer/{0}".format(chapter.id)
if title.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
chapters.append({ "name": title, "url": url })
print(json.dumps(chapters))
def viewer_url_extract_manga_id(url):
result = re.search("mangaplus.shueisha.co.jp/viewer/([0-9]+)", url)
if result and len(result.groups()) > 0:
return result.groups()[0]
def download_chapter(url, download_dir):
request_url = url
manga_id = viewer_url_extract_manga_id(url)
if not manga_id:
print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangaplus.shueisha.co.jp/viewer/<number>" % url)
exit(2)
url = api_chapter_url.format(manga_id)
response = requests.get(url)
response.raise_for_status()
in_progress_filepath = os.path.join(download_dir, ".in_progress")
with open(in_progress_filepath, "w") as file:
file.write(request_url)
resp = MangaplusResponse.loads(response.content)
if resp.error:
print("Mangaplus response error: %s" % str(resp.error))
exit(1)
img_number = 1
for page in resp.success.manga_viewer.pages:
if page.page is None:
continue
image_name = page.page.image_url.split('?')[0].split('/')[-1]
ext = image_name[image_name.rfind("."):]
image_name = str(img_number) + ext
image_path = os.path.join(download_dir, image_name)
print("Downloading {} to {}".format(page.page.image_url, image_path))
if not download_file(page.page.image_url, page, image_path):
print("Failed to download image: %s" % page.page.image_url)
os.remove(in_progress_filepath)
exit(2)
img_number += 1
if img_number == 1:
print("Failed to find images for chapter")
os.remove(in_progress_filepath)
exit(2)
with open(os.path.join(download_dir, ".finished"), "w") as file:
file.write("1")
os.remove(in_progress_filepath)
command = sys.argv[1]
if command == "list":
if len(sys.argv) < 3:
usage_list()
url = sys.argv[2]
chapter_list_input = sys.stdin.read()
if len(chapter_list_input) == 0:
chapter_list_input = []
else:
chapter_list_input = json.loads(chapter_list_input)
list_chapters(url, chapter_list_input)
elif command == "download":
if len(sys.argv) < 4:
usage_download()
url = sys.argv[2]
download_dir = sys.argv[3]
download_chapter(url, download_dir)
else:
usage()
|