1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
#!/usr/bin/env python3
import os
import time
import sys
import requests
import json
from lxml import etree
headers = {
'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
'x-requested-with': 'XMLHttpRequest'
}
def usage():
print("lhtranslation.py command")
print("commands:")
print(" download")
print(" list")
exit(1)
def usage_list():
print("lhtranslation.py list <url>")
exit(1)
def usage_download():
print("lhtranslation.py download <url> <download_dir>")
print("examples:")
print(" lhtranslation.py download \"https://lhtranslation.net/manga/kaifuku-jutsushi-no-yarinaoshi/\" /home/user/Manga/MangaName")
print("")
print("Note: The manga directory has to exist.")
exit(1)
if len(sys.argv) < 2:
usage()
def download_file(url, save_path):
with requests.get(url, headers=headers, stream=True, timeout=30) as response:
if not response.ok:
return False
with open(save_path, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
file.write(chunk)
return True
def list_chapters(url, chapter_list_input):
url = url.rstrip('/')
response = requests.post(url + "/ajax/chapters/", headers=headers)
response.raise_for_status()
seen_titles = set()
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
chapter_url = item.get("url")
if chapter_url and len(chapter_url) > 0:
seen_urls.add(chapter_url)
tree = etree.HTML(response.text)
chapters = []
for element in tree.xpath("//a[contains(@href, '/manga/')]"):
title = element.text.strip().replace("/", "_")
url = element.attrib.get("href").strip()
if title.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
chapters.append({ "name": title, "url": url })
print(json.dumps(chapters))
def download_chapter(url, download_dir):
response = requests.get(url, headers=headers, timeout=30)
response.raise_for_status()
in_progress_filepath = os.path.join(download_dir, ".in_progress")
with open(in_progress_filepath, "w") as file:
file.write(url)
tree = etree.HTML(response.text)
img_number = 1
for image_source in tree.xpath("//div[@class='reading-content']//img[contains(@data-src, '/uploads/')]/@data-src"):
image_source = image_source.strip()
ext = image_source[image_source.rfind("."):]
image_name = str(img_number) + ext
image_path = os.path.join(download_dir, image_name)
print("Downloading {} to {}".format(image_source, image_path))
if not download_file(image_source, image_path):
print("Failed to download image: %s" % image_source)
os.remove(in_progress_filepath)
exit(2)
img_number += 1
if img_number == 1:
print("Failed to find images for chapter")
os.remove(in_progress_filepath)
exit(2)
with open(os.path.join(download_dir, ".finished"), "w") as file:
file.write("1")
os.remove(in_progress_filepath)
command = sys.argv[1]
if command == "list":
if len(sys.argv) < 3:
usage_list()
url = sys.argv[2]
chapter_list_input = sys.stdin.read()
if len(chapter_list_input) == 0:
chapter_list_input = []
else:
chapter_list_input = json.loads(chapter_list_input)
list_chapters(url, chapter_list_input)
elif command == "download":
if len(sys.argv) < 4:
usage_download()
url = sys.argv[2]
download_dir = sys.argv[3]
download_chapter(url, download_dir)
else:
usage()
|