aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rwxr-xr-xplugins/mangawindow.py119
2 files changed, 120 insertions, 1 deletions
diff --git a/README.md b/README.md
index 90dba1c..e12e54f 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# AutoMedia
-Automatically track new releases of media and download them. Currently works with rss for torrent sites (nyaa.si) and a few manga websites (see plugins directory).
+Automatically track new releases of media and download them. Currently works with rss for torrent sites (`nyaa.si`) and a for these manga sites: `manganelo.com`, `mangakakalot.com`, `lhtranslation.net` and `mangawindow.net`.
A notification is shown on the screen when a download finishes (if notify-send is installed).
## Usage
Run automedia with `sync` option and keep it running to track media. You can then use `add` option to add new media to track.
diff --git a/plugins/mangawindow.py b/plugins/mangawindow.py
new file mode 100755
index 0000000..d55e68f
--- /dev/null
+++ b/plugins/mangawindow.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+
+import os
+import time
+import sys
+import requests
+import json
+import re
+
+from lxml import etree
+
+def usage():
+ print("mangawindow.py command")
+ print("commands:")
+ print(" download")
+ print(" list")
+ exit(1)
+
+def usage_list():
+ print("mangawindow.py list <url>")
+ exit(1)
+
+def usage_download():
+ print("mangawindow.py download <url> <download_dir>")
+ print("examples:")
+ print(" mangawindow.py download \"https://mangawindow.net/chapter/1430298\" /home/adam/Manga/MangaName")
+ print("")
+ print("Note: The manga directory has to exist.")
+ exit(1)
+
+if len(sys.argv) < 2:
+ usage()
+
+def download_file(url, save_path):
+ with requests.get(url, stream=True) as response:
+ response.raise_for_status()
+ with open(save_path, "wb") as file:
+ for chunk in response.iter_content(chunk_size=8192):
+ if chunk:
+ file.write(chunk)
+
+def list_chapters(url, chapter_list_input):
+ response = requests.get(url)
+ if response.status_code != 200:
+ print("Failed to list chapters, server responded with status code %d" % response.status_code)
+ exit(2)
+
+ seen_titles = set()
+ for item in chapter_list_input:
+ title = item.get("title")
+ if title and len(title) > 0:
+ seen_titles.add(title.lower().replace(" ", ""))
+
+ seen_urls = set()
+ for item in chapter_list_input:
+ url = item.get("url")
+ if url and len(url) > 0:
+ seen_urls.add(url)
+
+ tree = etree.HTML(response.text)
+ chapters = []
+ for element in tree.xpath("//a[@class='chapt']"):
+ title = element.findtext('b')
+ if title is None:
+ print("Failed to get title for chapter")
+ exit(2)
+ title = title.strip()
+ url = "https://mangawindow.net" + element.attrib.get("href").strip()
+ if title.lower().replace(" ", "") in seen_titles or url in seen_urls:
+ break
+ chapters.append({ "name": title, "url": url })
+ print(json.dumps(chapters))
+
+def download_chapter(url, download_dir):
+ response = requests.get(url)
+ if response.status_code != 200:
+ print("Failed to list chapters, server responded with status code %d" % response.status_code)
+ exit(2)
+
+ in_progress_filepath = os.path.join(download_dir, ".in_progress")
+ with open(in_progress_filepath, "w") as file:
+ file.write(url)
+
+ img_number = 1
+ image_urls = re.findall(r'mangawindow\.net/images[^"]+', response.text)
+ for image_url in image_urls:
+ image_url = "https://z-img-04." + image_url
+ ext = image_url[image_url.rfind("."):]
+ image_name = str(img_number) + ext
+ image_path = os.path.join(download_dir, image_name)
+ print("Downloading {} to {}".format(image_url, image_path))
+ download_file(image_url, image_path)
+ img_number += 1
+
+ with open(os.path.join(download_dir, ".finished"), "w") as file:
+ file.write("1")
+
+ os.remove(in_progress_filepath)
+
+command = sys.argv[1]
+if command == "list":
+ if len(sys.argv) < 3:
+ usage_list()
+
+ url = sys.argv[2]
+ chapter_list_input = sys.stdin.read()
+ if len(chapter_list_input) == 0:
+ chapter_list_input = []
+ else:
+ chapter_list_input = json.loads(chapter_list_input)
+ list_chapters(url, chapter_list_input)
+elif command == "download":
+ if len(sys.argv) < 4:
+ usage_download()
+ url = sys.argv[2]
+ download_dir = sys.argv[3]
+ download_chapter(url, download_dir)
+else:
+ usage()