aboutsummaryrefslogtreecommitdiff
path: root/plugins
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2019-05-31 07:55:31 +0200
committerdec05eba <dec05eba@protonmail.com>2020-07-06 07:12:33 +0200
commit1f623da3b6b056a028c83bd1809b3429b94e1857 (patch)
tree645f71a7f5f7abae5b9110bdd89ebbbb76079eb8 /plugins
Initial commit, support for rss torrent, manganelo and readms
Diffstat (limited to 'plugins')
l---------plugins/mangakakalot1
-rwxr-xr-xplugins/manganelo.py101
-rwxr-xr-xplugins/readms.py113
3 files changed, 215 insertions, 0 deletions
diff --git a/plugins/mangakakalot b/plugins/mangakakalot
new file mode 120000
index 0000000..3f83212
--- /dev/null
+++ b/plugins/mangakakalot
@@ -0,0 +1 @@
+manganelo.py \ No newline at end of file
diff --git a/plugins/manganelo.py b/plugins/manganelo.py
new file mode 100755
index 0000000..1526e6f
--- /dev/null
+++ b/plugins/manganelo.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python3
+
+import os
+import time
+import sys
+import requests
+import json
+import subprocess
+
+from lxml import etree
+
+def usage():
+ print("manganelo.py command")
+ print("commands:")
+ print(" download")
+ print(" list")
+ exit(1)
+
+def usage_list():
+ print("manganelo.py list <url> [latest]")
+ print("examples:")
+ print(" manganelo.py list \"https://mangakakalot.com/manga/assassins_pride\"")
+ print(" manganelo.py list \"https://mangakakalot.com/manga/assassins_pride\" \"Chapter 13\"")
+ exit(1)
+
+def usage_download():
+ print("manganelo.py download <url> <download_dir>")
+ print("examples:")
+ print(" manganelo.py download \"https://mangakakalot.com/chapter/vy918232/chapter_16\" /home/adam/Manga/MangaName")
+ print("")
+ print("Note: The manga directory has to exist.")
+ exit(1)
+
+if len(sys.argv) < 2:
+ usage()
+
+def download_file(url, save_path):
+ process = subprocess.Popen(["wget", "-q", "-o", "/dev/null", "-O", save_path, url], stderr=subprocess.PIPE)
+ _, stderr = process.communicate()
+ if process.returncode != 0:
+ print("Failed to download file: {}, error: {}".format(url, stderr.decode('utf-8')))
+ return False
+ return True
+
+def list_chapters(url, latest):
+ response = requests.get(url)
+ if response.status_code != 200:
+ print("Failed to list chapters, server responded with status code %d" % response.status_code)
+ exit(2)
+
+ tree = etree.HTML(response.text)
+ chapters = []
+ for element in tree.xpath('//div[@class="chapter-list"]//a'):
+ if latest and element.text == latest:
+ break
+ chapters.append({ "name": element.text.strip(), "url": element.attrib.get("href").strip() })
+ print(json.dumps({ "items": chapters }))
+
+def download_chapter(url, download_dir):
+ response = requests.get(url)
+ if response.status_code != 200:
+ print("Failed to list chapters, server responded with status code %d" % response.status_code)
+ exit(2)
+
+ in_progress_filepath = os.path.join(download_dir, "in_progress")
+ with open(in_progress_filepath, "w") as file:
+ file.write(url)
+
+ tree = etree.HTML(response.text)
+ img_number = 1
+ for image_source in tree.xpath('//div[@id="vungdoc"]/img/@src'):
+ ext = image_source[image_source.rfind("."):]
+ image_name = str(img_number) + ext
+ image_path = os.path.join(download_dir, image_name)
+ print("Downloading {} to {}".format(image_source, image_path))
+ download_file(image_source, image_path)
+ img_number += 1
+
+ with open(os.path.join(download_dir, "finished"), "w") as file:
+ file.write("1")
+
+ os.remove(in_progress_filepath)
+
+command = sys.argv[1]
+if command == "list":
+ if len(sys.argv) < 3:
+ usage_list()
+
+ url = sys.argv[2]
+ latest = ""
+ if len(sys.argv) >= 4:
+ latest = sys.argv[3]
+ list_chapters(url, latest)
+elif command == "download":
+ if len(sys.argv) < 4:
+ usage_download()
+ url = sys.argv[2]
+ download_dir = sys.argv[3]
+ download_chapter(url, download_dir)
+else:
+ usage()
diff --git a/plugins/readms.py b/plugins/readms.py
new file mode 100755
index 0000000..d88deb1
--- /dev/null
+++ b/plugins/readms.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+
+import os
+import time
+import sys
+import requests
+import json
+import subprocess
+
+from lxml import etree
+
+def usage():
+ print("readms.py command")
+ print("commands:")
+ print(" download")
+ print(" list")
+ exit(1)
+
+def usage_list():
+ print("readms.py list <url> [latest]")
+ print("examples:")
+ print(" readms.py list \"https://readms.net/manga/a_trail_of_blood\"")
+ print(" readms.py list \"https://readms.net/manga/a_trail_of_blood\" \"48 - Blood oath\"")
+ exit(1)
+
+def usage_download():
+ print("readms.py download <url> <download_dir>")
+ print("examples:")
+ print(" readms.py download \"https://readms.net/manga/a_trail_of_blood\" /home/adam/Manga/MangaName")
+ print("")
+ print("Note: The manga directory has to exist.")
+ exit(1)
+
+if len(sys.argv) < 2:
+ usage()
+
+def download_file(url, save_path):
+ process = subprocess.Popen(["wget", "-q", "-o", "/dev/null", "-O", save_path, url], stderr=subprocess.PIPE)
+ _, stderr = process.communicate()
+ if process.returncode != 0:
+ print("Failed to download file: {}, error: {}".format(url, stderr.decode('utf-8')))
+ return False
+ return True
+
+def list_chapters(url, latest):
+ response = requests.get(url)
+ if response.status_code != 200:
+ print("Failed to list chapters, server responded with status code %d" % response.status_code)
+ exit(2)
+
+ tree = etree.HTML(response.text)
+ chapters = []
+ for element in tree.xpath('//table//tr//a'):
+ if latest and element.text == latest:
+ break
+ chapters.append({ "name": element.text.strip(), "url": "https://readms.net" + element.attrib.get("href").strip() })
+ print(json.dumps({ "items": chapters }))
+
+def download_chapter(url, download_dir):
+ in_progress_filepath = os.path.join(download_dir, "in_progress")
+ with open(in_progress_filepath, "w") as file:
+ file.write(url)
+
+ img_number = 1
+ while True:
+ response = requests.get(url)
+ if response.status_code != 200:
+ print("Failed to list chapters, server responded with status code %d" % response.status_code)
+ exit(2)
+
+ tree = etree.HTML(response.text)
+
+ image_sources = tree.xpath('//img[@id="manga-page"]/@src')
+ if len(image_sources) != 1:
+ break
+
+ image_source = "https:" + image_sources[0]
+ ext = image_source[image_source.rfind("."):]
+ image_name = str(img_number) + ext
+ image_path = os.path.join(download_dir, image_name)
+ print("Downloading {} to {}".format(image_source, image_path))
+ download_file(image_source, image_path)
+
+ next_pages = tree.xpath('//div[@class="page"]//a/@href')
+ if len(next_pages) != 1:
+ break
+
+ url = "https://readms.net" + next_pages[0]
+ img_number += 1
+
+ with open(os.path.join(download_dir, "finished"), "w") as file:
+ file.write("1")
+
+ os.remove(in_progress_filepath)
+
+command = sys.argv[1]
+if command == "list":
+ if len(sys.argv) < 3:
+ usage_list()
+
+ url = sys.argv[2]
+ latest = ""
+ if len(sys.argv) >= 4:
+ latest = sys.argv[3]
+ list_chapters(url, latest)
+elif command == "download":
+ if len(sys.argv) < 4:
+ usage_download()
+ url = sys.argv[2]
+ download_dir = sys.argv[3]
+ download_chapter(url, download_dir)
+else:
+ usage()