aboutsummaryrefslogtreecommitdiff
path: root/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'plugins')
-rwxr-xr-xplugins/mangakatana.py2
-rwxr-xr-xplugins/manganelo.py78
2 files changed, 50 insertions, 30 deletions
diff --git a/plugins/mangakatana.py b/plugins/mangakatana.py
index 85fd8d0..586c4ee 100755
--- a/plugins/mangakatana.py
+++ b/plugins/mangakatana.py
@@ -110,7 +110,7 @@ def get_javascript_string_arrays(js_source):
return arrays
arr = js_source[start:end].replace("'", "").split(",")
- arrays.extend(list(filter(None, arr)))
+ arrays.extend(list(filter(lambda x: x is not None and ".com" in x, arr)))
start = end + 1
def uniq_str_arr(arr):
diff --git a/plugins/manganelo.py b/plugins/manganelo.py
index 5593fd4..0391eec 100755
--- a/plugins/manganelo.py
+++ b/plugins/manganelo.py
@@ -9,7 +9,8 @@ import json
from lxml import etree
headers = {
- 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
+ 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0",
+ 'Referer': "https://www.nelomanga.com/"
}
def usage():
@@ -36,18 +37,15 @@ if len(sys.argv) < 2:
def download_file(url, save_path):
file_size = 0
- headers = {
- "accept-language": "en-US,en;q=0.9",
- "accept": "image/webp,image/apng,image/*,*/*;q=0.8",
- "sec-fetch-site": "cross-site",
- "sec-fetch-mode": "no-cors",
- "sec-fetch-dest": "image",
- "referer": "https://manganelo.com/",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
- }
- #cookies = {
- # "content_server": "server2"
- #}
+ # headers = {
+ # "accept-language": "en-US,en;q=0.9",
+ # "accept": "image/webp,image/apng,image/*,*/*;q=0.8",
+ # "sec-fetch-site": "cross-site",
+ # "sec-fetch-mode": "no-cors",
+ # "sec-fetch-dest": "image",
+ # "referer": "https://manganelo.com/",
+ # "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
+ # }
with requests.get(url, stream=True, headers=headers, timeout=30) as response:
if not response.ok:
return 0
@@ -102,7 +100,11 @@ def redirect_migrated_url(url, tree, is_chapter):
return tree
def list_chapters(url, chapter_list_input):
+ url = url.replace("mangakakalot", "manganelo").replace("manganelo", "nelomanga")
response = requests.get(url, timeout=30, headers=headers)
+ if not response.ok:
+ url = url.replace("_", "-")
+ response = requests.get(url, timeout=30, headers=headers)
response.raise_for_status()
seen_titles = set()
@@ -115,7 +117,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
chapter_url = item.get("url")
if chapter_url and len(chapter_url) > 0:
- seen_urls.add(chapter_url.replace("mangakakalot", "manganelo"))
+ seen_urls.add(chapter_url.replace("mangakakalot", "manganelo").replace("manganelo", "nelomanga"))
tree = etree.HTML(response.text)
tree = redirect_migrated_url(url, tree, False)
@@ -138,19 +140,25 @@ def list_chapters(url, chapter_list_input):
print(json.dumps(chapters))
-def download_chapter(url, download_dir):
- response = requests.get(url, timeout=30)
- response.raise_for_status()
+def download_chapter_images(url, download_dir, use_backup_server):
+ url = url.replace("mangakakalot", "manganelo").replace("manganelo", "nelomanga")
+ cookies = {}
+ try:
+ new_headers = headers.copy()
+ new_headers['referer'] = url
+ response = requests.get('https://nelomanga.com/change_content_s2' if use_backup_server else 'https://nelomanga.com/change_content_s1', headers=new_headers, allow_redirects=False)
+ response.raise_for_status()
+ cookies = response.cookies
+ except requests.HTTPError:
+ return False
- in_progress_filepath = os.path.join(download_dir, ".in_progress")
- with open(in_progress_filepath, "w") as file:
- file.write(url)
+ response = requests.get(url, timeout=30, headers=headers, cookies=cookies)
+ response.raise_for_status()
tree = etree.HTML(response.text)
tree = redirect_migrated_url(url, tree, True)
if tree is None:
- os.remove(in_progress_filepath)
- exit(2)
+ return False
img_number = 1
for image_source in tree.xpath('//div[@class="container-chapter-reader"]/img/@src'):
@@ -159,16 +167,26 @@ def download_chapter(url, download_dir):
image_path = os.path.join(download_dir, image_name)
print("Downloading {} to {}".format(image_source, image_path))
file_size = download_file(image_source, image_path)
- if file_size < 255:
+ if file_size < 100:
print("resource temporary unavailable: %s" % image_source)
- os.remove(in_progress_filepath)
- exit(2)
+ return False
img_number += 1
if img_number == 1:
print("Failed to find images for chapter")
- os.remove(in_progress_filepath)
- exit(2)
+ return False
+
+ return True
+
+def download_chapter(url, download_dir):
+ in_progress_filepath = os.path.join(download_dir, ".in_progress")
+ with open(in_progress_filepath, "w") as file:
+ file.write(url)
+
+ if not download_chapter_images(url, download_dir, False):
+ if not download_chapter_images(url, download_dir, True):
+ os.remove(in_progress_filepath)
+ exit(2)
with open(os.path.join(download_dir, ".finished"), "w") as file:
file.write("1")
@@ -180,7 +198,8 @@ if command == "list":
if len(sys.argv) < 3:
usage_list()
- url = sys.argv[2].replace("mangakakalot", "manganelo")
+ #url = sys.argv[2].replace("mangakakalot", "manganelo")
+ url = sys.argv[2]
chapter_list_input = sys.stdin.read()
if len(chapter_list_input) == 0:
chapter_list_input = []
@@ -191,7 +210,8 @@ elif command == "download":
if len(sys.argv) < 4:
usage_download()
- url = sys.argv[2].replace("mangakakalot", "manganelo")
+ #url = sys.argv[2].replace("mangakakalot", "manganelo")
+ url = sys.argv[2]
download_dir = sys.argv[3]
download_chapter(url, download_dir)
else: