aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2021-04-20 15:19:39 +0200
committerdec05eba <dec05eba@protonmail.com>2021-04-20 15:19:39 +0200
commit0c49bfeeefc0b63c2a486948460435c66a36faf9 (patch)
tree9e948df195802e95a5e49d4db4f02d953a41d247
parent43cd46e9e7040b4653340f91d3b190831103ca79 (diff)
Add mangakatana.com
-rw-r--r--README.md2
-rwxr-xr-xplugins/mangakatana.py143
-rwxr-xr-xplugins/manganelos.py3
3 files changed, 147 insertions, 1 deletions
diff --git a/README.md b/README.md
index 4216419..723ea94 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# AutoMedia
-Automatically track new releases of media and download them. Currently works with rss for torrent sites (`nyaa.si`) and a for these manga sites: `manganelo.com`, `manganelos.com`, `mangatown.com`, `mangakakalot.com`, `lhtranslation.net`, `mangawindow.net`, `mangaplus.shueisha.co.jp` and `mangadex.org`.
+Automatically track new releases of media and download them. Currently works with rss for torrent sites (`nyaa.si`) and a for these manga sites: `manganelo.com`, `manganelos.com`, `mangatown.com`, `mangakakalot.com`, `mangakatana.com`, `lhtranslation.net`, `mangawindow.net`, `mangaplus.shueisha.co.jp` and `mangadex.org`.
A notification is shown on the screen when a download finishes (if notify-send is installed).
## Installation
diff --git a/plugins/mangakatana.py b/plugins/mangakatana.py
new file mode 100755
index 0000000..f2195a4
--- /dev/null
+++ b/plugins/mangakatana.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+
+import os
+import time
+import sys
+import requests
+import json
+
+from lxml import etree
+
+headers = {
+ 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
+}
+
+def usage():
+ print("mangakatana.py command")
+ print("commands:")
+ print(" download")
+ print(" list")
+ exit(1)
+
+def usage_list():
+ print("mangakatana.py list <url>")
+ exit(1)
+
+def usage_download():
+ print("mangakatana.py download <url> <download_dir>")
+ print("examples:")
+ print(" mangakatana.py download \"http://mangakatana.com/manga/useless-ponko.22679\" /home/adam/Manga/MangaName")
+ print("")
+ print("Note: The manga directory has to exist.")
+ exit(1)
+
+if len(sys.argv) < 2:
+ usage()
+
+def download_file(url, save_path):
+ file_size = 0
+ with requests.get(url, headers=headers, stream=True) as response:
+ if not response.ok:
+ return 0
+ with open(save_path, "wb") as file:
+ for chunk in response.iter_content(chunk_size=8192):
+ if chunk:
+ file.write(chunk)
+ file_size += len(chunk)
+ return file_size
+
+def list_chapters(url, chapter_list_input):
+ response = requests.get(url, headers=headers)
+ response.raise_for_status()
+
+ seen_titles = set()
+ for item in chapter_list_input:
+ title = item.get("title")
+ if title and len(title) > 0:
+ seen_titles.add(title.lower().replace(" ", ""))
+
+ seen_urls = set()
+ for item in chapter_list_input:
+ url = item.get("url")
+ if url and len(url) > 0:
+ seen_urls.add(url)
+
+ tree = etree.HTML(response.text)
+ chapters = []
+ for element in tree.xpath('//div[@class="chapters"]//div[@class="chapter"]//a'):
+ element_text = element.text.strip()
+ url = element.attrib.get("href").strip()
+ if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls:
+ break
+ chapters.append({ "name": element_text, "url": url })
+
+ print(json.dumps(chapters))
+
+def download_chapter(url, download_dir):
+ response = requests.get(url)
+ response.raise_for_status()
+
+ in_progress_filepath = os.path.join(download_dir, ".in_progress")
+ with open(in_progress_filepath, "w") as file:
+ file.write(url)
+
+ response_text = response.text
+ sources_start = response_text.find("ytaw=[")
+ if sources_start == -1:
+ print("Failed to find images for chapter")
+ os.remove(in_progress_filepath)
+ exit(2)
+
+ sources_start += 6
+ sources_end = response_text.find("]", sources_start)
+ if sources_end == -1:
+ print("Failed to find images for chapter")
+ os.remove(in_progress_filepath)
+ exit(2)
+
+ image_sources = response_text[sources_start:sources_end].replace("'", "").split(",")
+ img_number = 1
+ for image_source in image_sources:
+ image_source = image_source.strip()
+ if not image_source:
+ continue
+ ext = image_source[image_source.rfind("."):]
+ image_name = str(img_number) + ext
+ image_path = os.path.join(download_dir, image_name)
+ print("Downloading {} to {}".format(image_source, image_path))
+ if not download_file(image_source, image_path):
+ print("Failed to download image: %s" % image_source)
+ os.remove(in_progress_filepath)
+ exit(2)
+ img_number += 1
+
+ if img_number == 1:
+ print("Failed to find images for chapter")
+ os.remove(in_progress_filepath)
+ exit(2)
+
+ with open(os.path.join(download_dir, ".finished"), "w") as file:
+ file.write("1")
+
+ os.remove(in_progress_filepath)
+
+command = sys.argv[1]
+if command == "list":
+ if len(sys.argv) < 3:
+ usage_list()
+
+ url = sys.argv[2]
+ chapter_list_input = sys.stdin.read()
+ if len(chapter_list_input) == 0:
+ chapter_list_input = []
+ else:
+ chapter_list_input = json.loads(chapter_list_input)
+ list_chapters(url, chapter_list_input)
+elif command == "download":
+ if len(sys.argv) < 4:
+ usage_download()
+ url = sys.argv[2]
+ download_dir = sys.argv[3]
+ download_chapter(url, download_dir)
+else:
+ usage()
diff --git a/plugins/manganelos.py b/plugins/manganelos.py
index bb5be66..7e220d5 100755
--- a/plugins/manganelos.py
+++ b/plugins/manganelos.py
@@ -90,6 +90,9 @@ def download_chapter(url, download_dir):
for image_source_list in tree.xpath('//p[@id="arraydata"]/text()'):
image_source_list = image_source_list.strip()
for image_source in image_source_list.split(','):
+ image_source = image_source.strip()
+ if not image_source:
+ continue
ext = image_source[image_source.rfind("."):]
image_name = str(img_number) + ext
image_path = os.path.join(download_dir, image_name)