aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2023-03-11 13:40:20 +0100
committerdec05eba <dec05eba@protonmail.com>2023-03-11 13:40:20 +0100
commitb78c1d5f90ebf567c5099fd895318ad13bb7905a (patch)
tree3efa60a196640d1c8c90ecb7477e6be0b354f4a2
parenta91257864b78e688af9c412fc82e81ab714f3ec8 (diff)
Remove readm as it now has a google captcha
-rw-r--r--README.md2
-rwxr-xr-xplugins/readm.py131
2 files changed, 1 insertions, 132 deletions
diff --git a/README.md b/README.md
index b00e6c7..0c78c07 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# AutoMedia
-Automatically track new releases of media and download them. Currently works with rss for torrent sites (`nyaa.si`) and for these manga sites: `manganelo.com`, `manganelos.com`, `mangatown.com`, `mangakakalot.com`, `mangakatana.com`, `lhtranslation.net`, `readm.org`, `mangaplus.shueisha.co.jp` and `mangadex.org`.\
+Automatically track new releases of media and download them. Currently works with rss for torrent sites (`nyaa.si`) and for these manga sites: `manganelo.com`, `manganelos.com`, `mangatown.com`, `mangakakalot.com`, `mangakatana.com`, `lhtranslation.net`, `mangaplus.shueisha.co.jp` and `mangadex.org`.\
A notification is shown on the screen when a download finishes (if notify-send is installed).\
AutoMedia checks and downloads updates every 15 minutes. Torrents stop seeding after a ratio of 2.0.\
## Installation
diff --git a/plugins/readm.py b/plugins/readm.py
deleted file mode 100755
index 2b4dffb..0000000
--- a/plugins/readm.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import time
-import sys
-import requests
-import json
-
-from lxml import etree
-
-headers = {
- 'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
-}
-
-def usage():
- print("readm.py command")
- print("commands:")
- print(" download")
- print(" list")
- exit(1)
-
-def usage_list():
- print("readm.py list <url>")
- exit(1)
-
-def usage_download():
- print("readm.py download <url> <download_dir>")
- print("examples:")
- print(" readm.py download \"https://readm.org/manga/16609/307/all-pages\" /home/user/Manga/MangaName")
- print("")
- print("Note: The manga directory has to exist.")
- exit(1)
-
-if len(sys.argv) < 2:
- usage()
-
-def download_file(url, save_path):
- file_size = 0
- with requests.get(url, headers=headers, stream=True, timeout=30) as response:
- if not response.ok:
- return 0
- with open(save_path, "wb") as file:
- for chunk in response.iter_content(chunk_size=8192):
- if chunk:
- file.write(chunk)
- file_size += len(chunk)
- return file_size
-
-def list_chapters(url, chapter_list_input):
- response = requests.get(url, headers=headers, timeout=30)
- response.raise_for_status()
-
- seen_titles = set()
- for item in chapter_list_input:
- title = item.get("title")
- if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
-
- seen_urls = set()
- for item in chapter_list_input:
- chapter_url = item.get("url")
- if chapter_url and len(chapter_url) > 0:
- seen_urls.add(chapter_url)
-
- tree = etree.HTML(response.text)
- chapters = []
- for element in tree.xpath('//div[@class="episodes-list"]//a'):
- element_text = element.text.strip().replace("/", "_")
- url = element.attrib.get("href").strip()
- url = "https://readm.org" + url
- if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls:
- break
- chapters.append({ "name": element_text, "url": url })
-
- print(json.dumps(chapters))
-
-def download_chapter(url, download_dir):
- response = requests.get(url, timeout=30)
- response.raise_for_status()
-
- in_progress_filepath = os.path.join(download_dir, ".in_progress")
- with open(in_progress_filepath, "w") as file:
- file.write(url)
-
- tree = etree.HTML(response.text)
- img_number = 1
- for element in tree.xpath('//div[@id="content"]//img[contains(@src, "/chapter_files/")]'):
- image_source = element.attrib.get("src").strip()
- if not image_source:
- continue
- image_source = "https://readm.org" + image_source
- ext = image_source[image_source.rfind("."):]
- image_name = str(img_number) + ext
- image_path = os.path.join(download_dir, image_name)
- print("Downloading {} to {}".format(image_source, image_path))
- if not download_file(image_source, image_path):
- print("Failed to download image: %s" % image_source)
- os.remove(in_progress_filepath)
- exit(2)
- img_number += 1
-
- if img_number == 1:
- print("Failed to find images for chapter")
- os.remove(in_progress_filepath)
- exit(2)
-
- with open(os.path.join(download_dir, ".finished"), "w") as file:
- file.write("1")
-
- os.remove(in_progress_filepath)
-
-command = sys.argv[1]
-if command == "list":
- if len(sys.argv) < 3:
- usage_list()
-
- url = sys.argv[2]
- chapter_list_input = sys.stdin.read()
- if len(chapter_list_input) == 0:
- chapter_list_input = []
- else:
- chapter_list_input = json.loads(chapter_list_input)
- list_chapters(url, chapter_list_input)
-elif command == "download":
- if len(sys.argv) < 4:
- usage_download()
- url = sys.argv[2]
- download_dir = sys.argv[3]
- download_chapter(url, download_dir)
-else:
- usage()