aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--TODO2
-rwxr-xr-xplugins/manganelo.py27
2 files changed, 17 insertions, 12 deletions
diff --git a/TODO b/TODO
index 375c795..ba92af6 100644
--- a/TODO
+++ b/TODO
@@ -16,3 +16,5 @@ Use fallbacks for manga download too.
Put anime and manga downloads into separate subdirectories in the download directory.
Automatically cleanup downloaded (and seeded) torrents in transmission.
+
+Add command to migrate from manganelo/mangakakalot to mangakatana/mangadex.
diff --git a/plugins/manganelo.py b/plugins/manganelo.py
index bbbe856..37d2777 100755
--- a/plugins/manganelo.py
+++ b/plugins/manganelo.py
@@ -9,7 +9,8 @@ import json
from lxml import etree
headers = {
- 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
+ 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0",
+ 'Referer': "https://www.nelomanga.com/"
}
def usage():
@@ -36,15 +37,15 @@ if len(sys.argv) < 2:
def download_file(url, save_path):
file_size = 0
- headers = {
- "accept-language": "en-US,en;q=0.9",
- "accept": "image/webp,image/apng,image/*,*/*;q=0.8",
- "sec-fetch-site": "cross-site",
- "sec-fetch-mode": "no-cors",
- "sec-fetch-dest": "image",
- "referer": "https://manganelo.com/",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
- }
+ # headers = {
+ # "accept-language": "en-US,en;q=0.9",
+ # "accept": "image/webp,image/apng,image/*,*/*;q=0.8",
+ # "sec-fetch-site": "cross-site",
+ # "sec-fetch-mode": "no-cors",
+ # "sec-fetch-dest": "image",
+ # "referer": "https://manganelo.com/",
+ # "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
+ # }
with requests.get(url, stream=True, headers=headers, timeout=30) as response:
if not response.ok:
return 0
@@ -99,6 +100,7 @@ def redirect_migrated_url(url, tree, is_chapter):
return tree
def list_chapters(url, chapter_list_input):
+ url = url.replace("mangakakalot", "manganelo").replace("manganelo", "nelomanga")
response = requests.get(url, timeout=30, headers=headers)
response.raise_for_status()
@@ -112,7 +114,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
chapter_url = item.get("url")
if chapter_url and len(chapter_url) > 0:
- seen_urls.add(chapter_url.replace("mangakakalot", "manganelo"))
+ seen_urls.add(chapter_url.replace("mangakakalot", "manganelo").replace("manganelo", "nelomanga"))
tree = etree.HTML(response.text)
tree = redirect_migrated_url(url, tree, False)
@@ -136,11 +138,12 @@ def list_chapters(url, chapter_list_input):
print(json.dumps(chapters))
def download_chapter_images(url, download_dir, use_backup_server):
+ url = url.replace("mangakakalot", "manganelo").replace("manganelo", "nelomanga")
cookies = {}
try:
new_headers = headers.copy()
new_headers['referer'] = url
- response = requests.get('https://mangakakalot.com/change_content_s2' if use_backup_server else 'https://mangakakalot.com/change_content_s1', headers=new_headers, allow_redirects=False)
+ response = requests.get('https://nelomanga.com/change_content_s2' if use_backup_server else 'https://nelomanga.com/change_content_s1', headers=new_headers, allow_redirects=False)
response.raise_for_status()
cookies = response.cookies
except requests.HTTPError: