aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2020-07-07 05:42:17 +0200
committerdec05eba <dec05eba@protonmail.com>2020-07-07 05:42:17 +0200
commitf40fdb11160c8dc1cc4fa2d68d893637cd598272 (patch)
tree67d27129fc0418ac840abe6a3d99d36caec6eeb3
parentb9267c0ff2b13cb3a3bdd8f28a1a2329ef01f5d6 (diff)
Add option to start tracking anime from filename
-rw-r--r--README.md2
-rwxr-xr-xautomedia.py44
-rwxr-xr-xdomain.py2
-rwxr-xr-xepisode.py73
-rw-r--r--requirements.txt3
5 files changed, 118 insertions, 6 deletions
diff --git a/README.md b/README.md
index f4fd433..c401da6 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ Run automedia without any options to see all options.
## System
transmission-cli, notify-send (optional)
## Python
-feedparser, transmissionrpc, lxml, requests, pure_protobuf (optional, used with mangaplus.shueisha.co.jp)
+feedparser, transmissionrpc, lxml, requests, urllib, pure_protobuf (optional, used with mangaplus.shueisha.co.jp)
# Requirements when using read_manga.py
## System
rofi, sxiv
diff --git a/automedia.py b/automedia.py
index 71142ff..9d05636 100755
--- a/automedia.py
+++ b/automedia.py
@@ -10,8 +10,11 @@ import uuid
import errno
import signal
import transmissionrpc
+import requests
+import urllib.parse
from domain import url_extract_domain
+import episode
from lxml import etree
from datetime import datetime
@@ -233,11 +236,42 @@ def get_matching_html_items_by_name(html_items1, html_items2):
matching_items.append(html_item1.name)
return matching_items
+def get_rss_from_episode_info(episode_name_raw, episode_info):
+ user_failed = False
+
+ group_name_escaped = urllib.parse.quote(episode_info.group_name)
+ user_page_response = requests.head("https://nyaa.si/user/" + group_name_escaped)
+ if not user_page_response.ok:
+ user_failed = True
+ print("Warning: Failed to get user nyaa.si user from '%s'" % episode_name_raw)
+
+ generic_name = episode_info.get_generic_name()
+ while True:
+ response = input('Are you sure you want to track "%s" by "%s" starting from "%s" ? (Y)es/No: ' % (generic_name, "all users" if user_failed else episode_info.group_name, episode_name_raw))
+ if len(response) > 0 and response[0].lower() == 'n':
+ return None
+ elif len(response) == 0 or response[0].lower() == 'y':
+ break
+
+ return 'https://nyaa.si/?page=rss&q={}&c=0_0&f=0&u={}'.format(urllib.parse.quote(generic_name), group_name_escaped)
+
def add_rss(name, url, rss_config_dir, start_after):
feed = feedparser.parse(url)
if feed.bozo == 1:
- print("Failed to add rss, error: {}".format(str(feed.bozo_exception)))
- return False
+ episode_info = episode.episode_extract_info(url)
+ if not episode_info.group_name or not episode_info.anime_name:
+ print("Failed to add rss, error: {}".format(str(feed.bozo_exception)))
+ return False
+
+ start_after = url
+ url = get_rss_from_episode_info(url, episode_info)
+ if not url:
+ return True
+
+ feed = feedparser.parse(url)
+ if feed.bozo == 1:
+ print("Failed to add rss, error: {}".format(str(feed.bozo_exception)))
+ return False
if not name:
name = feed["channel"]["title"].replace("/", "_").strip()
@@ -624,15 +658,17 @@ def usage():
exit(1)
def usage_add():
- print("usage: automedia.py add <type> <url> [--name name] [--start-after start_after]")
+ print("usage: automedia.py add <type> <url|filename> [--name name] [--start-after start_after]")
print("OPTIONS")
print(" type The type should be either rss or html")
print(" url The url to the rss or html")
+ print(" filename The filename of an episode of an existing serie to start track. Currently only works with rss on https://nyaa.si")
print(" --name The display name to be used for the media. Optional for rss, in which case the name will be retries from rss TITLE, required for html")
print(" --start-after The sync should start downloading media after this item. This --start-after value should be the title of the episode/chapter (Optional, default is to start from the first item)")
print("EXAMPLES")
print(" automedia.py add rss 'https://nyaa.si/?page=rss&q=Tejina-senpai+1080p&c=0_0&f=0&u=HorribleSubs'")
print(" automedia.py add html 'https://manganelo.com/manga/read_naruto_manga_online_free3' --name Naruto")
+ print(" automedia.py add rss '[Erai-raws] Saiki Kusuo no Psi Nan - Kanketsu-hen - 01 [1080p][Multiple Subtitle].mkv'")
exit(1)
def usage_sync():
@@ -674,6 +710,8 @@ def command_add(args):
if start_after:
start_after = start_after.replace("/", "_").strip()
+ media_url = media_url.strip()
+
if media_type == "rss":
os.makedirs(rss_config_dir, exist_ok=True)
result = add_rss(media_name, media_url, rss_config_dir, start_after)
diff --git a/domain.py b/domain.py
index 9f39efc..0ec8ac7 100755
--- a/domain.py
+++ b/domain.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
def url_extract_domain(url):
index = 0
diff --git a/episode.py b/episode.py
new file mode 100755
index 0000000..6ac8ab1
--- /dev/null
+++ b/episode.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+
+class EpisodeInfo:
+ group_name = None
+ anime_name = None
+ episode = None
+ resolution = None
+ extension = None
+
+ def get_generic_name(self):
+ if not self.group_name or not self.anime_name:
+ return ""
+ generic_name = "[%s] %s" % (self.group_name, self.anime_name)
+ if self.resolution:
+ generic_name += " [%s]" % self.resolution
+ if self.extension:
+ generic_name += self.extension
+ return generic_name
+
+def episode_extract_info(episode_name):
+ episode_name = episode_name.strip()
+ episode_info = EpisodeInfo()
+
+ if len(episode_name) == 0:
+ return episode_info
+
+ extension_index = episode_name.rfind('.')
+ if extension_index != -1:
+ episode_info.extension = episode_name[extension_index:]
+ episode_name = episode_name[0:extension_index]
+
+ if episode_name[0] != '[':
+ return episode_info
+
+ group_name_end = episode_name.find(']')
+ if group_name_end == -1:
+ return episode_info
+
+ episode_info.group_name = episode_name[1:group_name_end].strip()
+
+ last_dash = episode_name.rfind('-')
+ if last_dash == -1:
+ return episode_info
+
+ episode_info.anime_name = episode_name[group_name_end+1:last_dash].strip()
+
+ resolution_index = episode_name.find('[', last_dash + 1)
+ if resolution_index == -1:
+ episode_info.episode = episode_name[last_dash+1:].strip()
+ else:
+ episode_info.episode = episode_name[last_dash+1:resolution_index].strip()
+ resolution_end = episode_name.find(']', resolution_index + 1)
+ if resolution_end != -1:
+ episode_info.resolution = episode_name[resolution_index+1:resolution_end]
+
+ return episode_info
+
+if __name__ == "__main__":
+ episode_info = episode_extract_info("[Erai-raws] Saiki Kusuo no Psi Nan - Kanketsu-hen - 01 [1080p][Multiple Subtitle].mkv")
+ print("|{}|".format(episode_info.group_name))
+ print("|{}|".format(episode_info.anime_name))
+ print("|{}|".format(episode_info.episode))
+ print("|{}|".format(episode_info.resolution))
+ print("|{}|".format(episode_info.extension))
+ print("|{}|".format(episode_info.get_generic_name()))
+
+ episode_info = episode_extract_info("nyaa.si")
+ print("|{}|".format(episode_info.group_name))
+ print("|{}|".format(episode_info.anime_name))
+ print("|{}|".format(episode_info.episode))
+ print("|{}|".format(episode_info.resolution))
+ print("|{}|".format(episode_info.extension))
+ print("|{}|".format(episode_info.get_generic_name())) \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 7c9c65a..d9dfe42 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,4 +2,5 @@ feedparser
transmissionrpc
lxml
requests
-pure_protobuf \ No newline at end of file
+pure_protobuf
+urllib \ No newline at end of file