aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md15
-rwxr-xr-xautomediabin116640 -> 116640 bytes
-rwxr-xr-xinstall.sh5
-rwxr-xr-xmangadex-upgrade.py159
-rwxr-xr-xopen_media.py2
-rwxr-xr-xplugins/lhtranslation.py4
-rwxr-xr-xplugins/mangadex.py165
-rwxr-xr-xplugins/mangakatana.py4
-rwxr-xr-xplugins/manganelo.py4
-rwxr-xr-xplugins/manganelos.py4
-rwxr-xr-xplugins/mangaplus.py4
-rwxr-xr-xplugins/mangatown.py4
-rwxr-xr-xplugins/mangawindow.py4
-rwxr-xr-xplugins/readm.py4
-rw-r--r--src/fileutils.c9
-rw-r--r--src/fileutils.h3
-rw-r--r--src/main.c26
17 files changed, 298 insertions, 118 deletions
diff --git a/README.md b/README.md
index 519f473..e3ffe9c 100644
--- a/README.md
+++ b/README.md
@@ -10,17 +10,16 @@ AutoMedia checks and downloads updates every 15 minutes.
Run automedia with `sync` option and keep it running to track media. You can then use `add` option to add new media to track.
Run automedia without any options to see all options.
## TODO
-1. Periodically check and remove in_progress files and their directories. This can happen if the computer crashes while adding rss.
-2. Automatically remove torrents that have finished seeding, to reduce memory usage and startup time of transmission.
-3. Cache string lengths (too many strcat).
-4. Convert python plugins to C.
-5. Use torrent add response to track finished torrents. A torrent can finish before it is first checked!
-6. Add rate limiting for downloading manga.
-7. Verify path lengths. Currently there is limit to 255 characters for remote names, but not local + remote names.
+* Automatically remove torrents that have finished seeding, to reduce memory usage and startup time of transmission.
+* Cache string lengths (too many strcat).
+* Convert python plugins to C.
+* Use torrent add response to track finished torrents. A torrent can finish before it is first checked!
+* Add rate limiting for downloading manga.
+* Verify path lengths. Currently there is limit to 255 characters for remote names, but not local + remote names.
# Requirements
## System
curl, transmission-cli, notify-send (optional)
-## Python
+## Python 3
lxml, requests, pure_protobuf (optional, used with mangaplus.shueisha.co.jp)
# Requirements when using open_media.py
## System
diff --git a/automedia b/automedia
index cf67880..268884e 100755
--- a/automedia
+++ b/automedia
Binary files differ
diff --git a/install.sh b/install.sh
index d19a84b..19698d9 100755
--- a/install.sh
+++ b/install.sh
@@ -3,7 +3,8 @@
script_dir=$(dirname "$0")
cd "$script_dir"
-install -Dm755 "automedia" "$pkgdir/usr/bin/automedia"
+install -Dm755 "automedia" "/usr/bin/automedia"
+install -Dm755 "mangadex-upgrade.py" "/usr/share/automedia/mangadex-upgrade.py"
for file in plugins/*; do
- install -Dm755 "$file" "$pkgdir/usr/share/automedia/$file"
+ install -Dm755 "$file" "/usr/share/automedia/$file"
done
diff --git a/mangadex-upgrade.py b/mangadex-upgrade.py
new file mode 100755
index 0000000..36533fa
--- /dev/null
+++ b/mangadex-upgrade.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+
+import os
+import json
+import re
+import requests
+
+class Chapter:
+ id = ""
+ title = ""
+ time = ""
+
+class Manga:
+ id = ""
+ updated = ""
+ chapters = None
+ directory = ""
+
+ def __init__(self):
+ self.chapters = []
+
+# Returns None if the manga is not tracked using mangadex or if it has already been migrated
+def manga_data_file_read(filepath):
+ with open(filepath, "rb") as file:
+ data = json.load(file)
+ if data["plugin"] != "mangadex.py":
+ return None
+
+ manga_link = data["link"]
+ title_search = re.search(r"/title/([0-9]+)", manga_link)
+ if not title_search or len(title_search.groups()) == 0:
+ print("Mangadex manga already migrated: %s" % manga_link)
+ return None
+
+ manga_data = Manga()
+ manga_data.id = int(title_search.group(1))
+ manga_data.updated = data["updated"]
+
+ downloaded = data.get("downloaded")
+ if type(downloaded) is list:
+ for downloaded_item in downloaded:
+ chapter_url = downloaded_item["url"]
+ chapter_title_search = re.search(r"chapter/([0-9]+)", chapter_url)
+ if not chapter_title_search:
+ print("Failed to extract title from manga %s, chapter %s" % (manga_link, chapter_url))
+ exit(1)
+
+ chapter_data = Chapter()
+ chapter_data.id = int(chapter_title_search.group(1))
+ chapter_data.title = downloaded_item["title"]
+ chapter_data.time = downloaded_item["time"]
+ manga_data.chapters.append(chapter_data)
+ return manga_data
+
+# Returns a list with tuple where the first element is the legacy id and the second element is the new id
+def legacy_id_to_new_id(ids, type):
+ mapping = []
+ id_start = 0
+ while id_start < len(ids):
+ id_end = min(id_start + 1000, len(ids))
+ if id_end - id_start == 0:
+ break
+ response = requests.post("https://api.mangadex.org/legacy/mapping", json={"type": type, "ids": ids[id_start:id_end]})
+ response.raise_for_status()
+ response_json = response.json()
+
+ for response_item in response_json:
+ if response_item["result"] != "ok":
+ print("legacy mapping returned an error")
+ exit(1)
+
+ attributes = response_item["data"]["attributes"]
+ mapping.append((attributes["legacyId"], attributes["newId"]))
+
+ id_start = id_end
+
+ if len(mapping) != len(ids):
+ print("Failed to get the legacy to new id mapping for all ids. Got %d mappings, expected %d" % (len(mapping), len(ids)))
+ exit(1)
+
+ return mapping
+
+def get_manga_by_id(manga_list, manga_id):
+ for manga_data in manga_list:
+ if manga_data.id == manga_id:
+ return manga_data
+
+def get_chapter_by_id_in_legacy_mapping(legacy_chapter_ids_to_new_ids, id):
+ for chapter_id_mapping in legacy_chapter_ids_to_new_ids:
+ if chapter_id_mapping[0] == id:
+ return chapter_id_mapping
+
+def file_overwrite_atomic(filepath, content):
+ tmp_filepath = filepath + ".tmp"
+ with open(tmp_filepath, "wb") as file:
+ file.write(content.encode())
+ file.flush()
+ os.fsync(file.fileno())
+ os.rename(tmp_filepath, filepath)
+
+if __name__ == "__main__":
+ migrate_finished_filepath = os.path.expanduser("~/.config/automedia/mangadex-upgraded")
+ if os.path.isfile(migrate_finished_filepath):
+ print("Mangadex tracked manga have already migrated, nothing to do")
+ exit(0)
+
+ tracked_dir = os.path.expanduser("~/.config/automedia/html/tracked")
+ manga_list = []
+ for manga_name in os.listdir(tracked_dir):
+ manga_tracked_dir = os.path.join(tracked_dir, manga_name)
+ manga_data_file = os.path.join(manga_tracked_dir, "data")
+ manga_data = manga_data_file_read(manga_data_file)
+ if manga_data:
+ manga_data.directory = os.path.join(tracked_dir, manga_name)
+ manga_list.append(manga_data)
+
+ manga_ids = []
+ chapter_ids = []
+ cc = {}
+ for manga_data in manga_list:
+ manga_ids.append(manga_data.id)
+ for chapter_data in manga_data.chapters:
+ chapter_ids.append(chapter_data.id)
+
+ legancy_manga_ids_to_new_ids = legacy_id_to_new_id(manga_ids, "manga")
+ legacy_chapter_ids_to_new_ids = legacy_id_to_new_id(chapter_ids, "chapter")
+
+ for manga_id in legancy_manga_ids_to_new_ids:
+ manga_data = get_manga_by_id(manga_list, manga_id[0])
+ if not manga_data:
+ print("Failed to get manga by id: %s" % manga_id[0])
+ exit(1)
+
+ new_manga_link = str(manga_id[1])
+ new_manga_data = {}
+ new_manga_data["plugin"] = "mangadex.py"
+ new_manga_data["link"] = new_manga_link
+ new_manga_data["updated"] = manga_data.updated
+ downloaded = []
+ for chapter_data in manga_data.chapters:
+ chapter_id_mapping = get_chapter_by_id_in_legacy_mapping(legacy_chapter_ids_to_new_ids, chapter_data.id)
+ if not chapter_id_mapping:
+ print("Failed to get new id from manga %d, chapter %d" % (manga_data.id, chapter_data.id))
+ exit(1)
+
+ downloaded_item = {}
+ downloaded_item["title"] = chapter_data.title
+ downloaded_item["time"] = chapter_data.time
+ downloaded_item["url"] = chapter_id_mapping[1]
+ downloaded.append(downloaded_item)
+ new_manga_data["downloaded"] = downloaded
+
+ file_overwrite_atomic(os.path.join(manga_data.directory, "link"), new_manga_link)
+ new_manga_data_str = json.dumps(new_manga_data, indent=4)
+ file_overwrite_atomic(os.path.join(manga_data.directory, "data"), new_manga_data_str)
+
+ file_overwrite_atomic(migrate_finished_filepath, "1")
+ print("Successfully migrated %d manga with a total of %d chapters" % (len(manga_ids), len(chapter_ids)))
+ \ No newline at end of file
diff --git a/open_media.py b/open_media.py
index a432c73..1f87975 100755
--- a/open_media.py
+++ b/open_media.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import sys
diff --git a/plugins/lhtranslation.py b/plugins/lhtranslation.py
index 153f4ed..082b865 100755
--- a/plugins/lhtranslation.py
+++ b/plugins/lhtranslation.py
@@ -50,7 +50,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -61,7 +61,7 @@ def list_chapters(url, chapter_list_input):
tree = etree.HTML(response.text)
chapters = []
for element in tree.xpath("//div[@class='list-chapters']//a[@class='chapter']"):
- title = element.find("b").text.strip()
+ title = element.find("b").text.strip().replace("/", "_")
url = "https://lhtranslation.net/" + element.attrib.get("href").strip()
if title.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
diff --git a/plugins/mangadex.py b/plugins/mangadex.py
index 72b28f3..04bc800 100755
--- a/plugins/mangadex.py
+++ b/plugins/mangadex.py
@@ -21,13 +21,13 @@ def usage():
exit(1)
def usage_list():
- print("mangadex.py list <url>")
+ print("mangadex.py list <manga-id>")
exit(1)
def usage_download():
- print("mangadex.py download <url> <download_dir>")
+ print("mangadex.py download <chapter-id> <download_dir>")
print("examples:")
- print(" mangadex.py download \"https://mangadex.org/title/7139/one-punch-man\" /home/adam/Manga/MangaName")
+ print(" mangadex.py download \"4e4a1ed8-d4a0-4096-86db-ca5e3fc42c5d\" /home/adam/Manga/MangaName")
print("")
print("Note: The manga directory has to exist.")
exit(1)
@@ -35,8 +35,8 @@ def usage_download():
if len(sys.argv) < 2:
usage()
-def download_file(url, cookies, save_path):
- with requests.get(url, headers=headers, cookies=cookies, stream=True) as response:
+def download_file(url, save_path):
+ with requests.get(url, headers=headers, stream=True) as response:
if not response.ok:
return False
with open(save_path, "wb") as file:
@@ -45,11 +45,6 @@ def download_file(url, cookies, save_path):
file.write(chunk)
return True
-def title_url_extract_manga_id(url):
- result = re.search("mangadex.org/title/([0-9]+)", url)
- if result and len(result.groups()) > 0:
- return result.groups()[0]
-
def chapter_sort_func(chapter_data):
return chapter_data[1].get("timestamp", 0)
@@ -58,24 +53,16 @@ def chapter_title_extract_number(chapter_title):
if result and len(result.groups()) > 0:
return result.groups()[0]
-def list_chapters(url, chapter_list_input):
- manga_id = title_url_extract_manga_id(url)
- if not manga_id:
- print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangadex.org/title/<number>/..." % url)
- exit(2)
-
- url = "https://mangadex.org/api/?id=%s&type=manga" % manga_id;
- response = requests.get(url, headers=headers)
- if response.status_code != 200:
- print("Failed to list chapters, server responded with status code %d" % response.status_code)
- exit(2)
+def sort_chapters(chapter):
+ return chapter["chapter"]
+def list_chapters(url, chapter_list_input):
seen_titles = set()
seen_chapter_numbers = set()
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
chapter_number = chapter_title_extract_number(title)
if chapter_number:
seen_chapter_numbers.add(chapter_number)
@@ -86,93 +73,95 @@ def list_chapters(url, chapter_list_input):
if url and len(url) > 0:
seen_urls.add(url)
- lang = "gb" # english
+ lang = "en"
- json_response = response.json()
- status = json_response["status"]
- if status != "OK":
- print("Expected server response OK, got %s" % status)
- exit(2)
-
- chapter_json = json_response["chapter"]
- time_now = time.time()
+ i = 0
prev_chapter_number = ""
- output_chapters = []
- for chapter_id, chapter in chapter_json.items():
- timestamp = chapter.get("timestamp", 0)
- if timestamp > time_now:
- continue
-
- lang_code = chapter.get("lang_code", "")
- if lang_code != lang:
- continue
-
- chapter_number_str = chapter.get("chapter", "0")
- if chapter_number_str == prev_chapter_number:
- continue
- prev_chapter_number = chapter_number_str
-
- chapter_title = chapter.get("title")
- chapter_url = "https://mangadex.org/chapter/" + chapter_id
- chapter_name = "Ch. " + chapter_number_str
- if chapter_title and len(chapter_title) > 0:
- chapter_name += " - " + chapter_title
-
- if chapter_title.lower().replace(" ", "") in seen_titles or chapter_url in seen_urls:
+ chapters = []
+ while True:
+ url = "https://api.mangadex.org/chapter?manga=" + url + "&limit=100&offset=%d&order[publishAt]=desc" % (i * 100)
+ response = requests.get(url, headers=headers)
+ response.raise_for_status()
+
+ if len(response.text) == 0:
break
- if chapter_number_str in seen_chapter_numbers:
+ json_response = response.json()
+ results = json_response["results"]
+ if len(results) == 0:
break
- output_chapters.append({ "name": chapter_name, "url": chapter_url })
- print(json.dumps(output_chapters))
+ for result_item in results:
+ if result_item["result"] != "ok":
+ print("Got item with result != ok")
+ exit(1)
-def chapter_url_extract_manga_id(url):
- result = re.search("mangadex.org/chapter/([0-9]+)", url)
- if result and len(result.groups()) > 0:
- return result.groups()[0]
+ data = result_item["data"]
+ id = data["id"]
+ attributes = data["attributes"]
+ if attributes["translatedLanguage"] != lang:
+ continue
-def download_chapter(url, download_dir):
- request_url = url
- manga_id = chapter_url_extract_manga_id(url)
- if not manga_id:
- print("Failed to extract manga id from url: %s. Note: url is expected to be in this format: mangadex.org/chapter/<number>" % url)
- exit(2)
+ chapter_number_str = attributes["chapter"]
+ if chapter_number_str == prev_chapter_number:
+ continue
+ prev_chapter_number = chapter_number_str
- response = requests.get(request_url, headers=headers)
- if response.status_code != 200:
- print("Failed to list chapter images, server responded with status code %d" % response.status_code)
- exit(2)
+ title = "Ch. " + chapter_number_str
+ title_item = attributes.get("title")
+ if title_item:
+ title += " - " + title_item
+ title = title.replace("/", "_")
- cookies = response.cookies
+ if title.lower().replace(" ", "") in seen_titles or id in seen_urls:
+ break
- url = "https://mangadex.org/api/?id=%s&server=null&type=chapter" % manga_id
+ if chapter_number_str in seen_chapter_numbers:
+ break
- response = requests.get(url, headers=headers)
- if response.status_code != 200:
- print("Failed to list chapter images, server responded with status code %d" % response.status_code)
- exit(2)
+ chapters.append({ "name": title, "url": id, "chapter": int(chapter_number_str) })
+ i += 1
+
+ chapters = sorted(chapters, key=sort_chapters, reverse=True)
+ output_chapters = []
+ for chapter in chapters:
+ output_chapters.append({ "name": chapter["name"], "url": chapter["url"] })
+ print(json.dumps(output_chapters))
+
+def get_base_url_for_chapter(chapter_id):
+ response = requests.get("https://api.mangadex.org/at-home/server/" + chapter_id, headers=headers)
+ response.raise_for_status()
+ return response.json()["baseUrl"]
+
+def download_chapter(url, download_dir):
+ base_url = get_base_url_for_chapter(url)
+ response = requests.get("https://api.mangadex.org/chapter/" + url, headers=headers)
+ response.raise_for_status()
+
+ image_urls = []
+ json_response = response.json()
+ if json_response["result"] != "ok":
+ print("Got item with result != ok")
+ exit(1)
+
+ data = json_response["data"]
+ attributes = data["attributes"]
+ hash = attributes["hash"]
+ image_names = attributes["data"]
+ for image_name in image_names:
+ image_urls.append(base_url + "/data/" + hash + "/" + image_name)
in_progress_filepath = os.path.join(download_dir, ".in_progress")
with open(in_progress_filepath, "w") as file:
- file.write(request_url)
+ file.write(url)
img_number = 1
- json_response = response.json()
- status = json_response["status"]
- if status != "OK":
- print("Expected server response OK, got %s" % status)
- exit(2)
-
- chapter_hash = json_response["hash"]
- server = json_response.get("server", "https://mangadex.org/data/")
- for image_name in json_response["page_array"]:
- image_url = "%s%s/%s" % (server, chapter_hash, image_name)
+ for image_url in image_urls:
ext = image_url[image_url.rfind("."):]
image_name = str(img_number) + ext
image_path = os.path.join(download_dir, image_name)
print("Downloading {} to {}".format(image_url, image_path))
- if not download_file(image_url, cookies, image_path):
+ if not download_file(image_url, image_path):
print("Failed to download image: %s" % image_url)
os.remove(in_progress_filepath)
exit(2)
diff --git a/plugins/mangakatana.py b/plugins/mangakatana.py
index f2195a4..e90d916 100755
--- a/plugins/mangakatana.py
+++ b/plugins/mangakatana.py
@@ -54,7 +54,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -65,7 +65,7 @@ def list_chapters(url, chapter_list_input):
tree = etree.HTML(response.text)
chapters = []
for element in tree.xpath('//div[@class="chapters"]//div[@class="chapter"]//a'):
- element_text = element.text.strip()
+ element_text = element.text.strip().replace("/", "_")
url = element.attrib.get("href").strip()
if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
diff --git a/plugins/manganelo.py b/plugins/manganelo.py
index 1f0882c..57eb0cc 100755
--- a/plugins/manganelo.py
+++ b/plugins/manganelo.py
@@ -64,7 +64,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -75,7 +75,7 @@ def list_chapters(url, chapter_list_input):
tree = etree.HTML(response.text)
chapters = []
for element in tree.xpath('//ul[@class="row-content-chapter"]//a'):
- element_text = element.text.strip()
+ element_text = element.text.strip().replace("/", "_")
url = element.attrib.get("href").strip()
if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
diff --git a/plugins/manganelos.py b/plugins/manganelos.py
index 7e220d5..54c2625 100755
--- a/plugins/manganelos.py
+++ b/plugins/manganelos.py
@@ -56,7 +56,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -67,7 +67,7 @@ def list_chapters(url, chapter_list_input):
tree = etree.HTML(response.text)
chapters = []
for element in tree.xpath('//section[@id="examples"]//div[@class="chapter-list"]//a'):
- element_text = element.text.strip()
+ element_text = element.text.strip().replace("/", "_")
url = element.attrib.get("href").strip()
if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
diff --git a/plugins/mangaplus.py b/plugins/mangaplus.py
index 7104d80..0d87ddc 100755
--- a/plugins/mangaplus.py
+++ b/plugins/mangaplus.py
@@ -219,7 +219,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -235,7 +235,7 @@ def list_chapters(url, chapter_list_input):
chapters = []
for chapter in reversed(all_chapters):
- title = chapter.subtitle
+ title = chapter.subtitle.replace("/", "_")
url = "https://mangaplus.shueisha.co.jp/viewer/{0}".format(chapter.id)
if title.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
diff --git a/plugins/mangatown.py b/plugins/mangatown.py
index d3a7e04..1a7eae9 100755
--- a/plugins/mangatown.py
+++ b/plugins/mangatown.py
@@ -56,7 +56,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -67,7 +67,7 @@ def list_chapters(url, chapter_list_input):
tree = etree.HTML(response.text)
chapters = []
for element in tree.xpath('//ul[@class="chapter_list"]//a'):
- element_text = element.text.strip()
+ element_text = element.text.strip().replace("/", "_")
url = element.attrib.get("href").strip()
if "/manga/" not in url:
continue
diff --git a/plugins/mangawindow.py b/plugins/mangawindow.py
index b5f5bf8..3a8c30f 100755
--- a/plugins/mangawindow.py
+++ b/plugins/mangawindow.py
@@ -51,7 +51,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -66,7 +66,7 @@ def list_chapters(url, chapter_list_input):
if title is None:
print("Failed to get title for chapter")
exit(2)
- title = title.strip()
+ title = title.strip().replace("/", "_")
url = "https://mangawindow.net" + element.attrib.get("href").strip()
if title.lower().replace(" ", "") in seen_titles or url in seen_urls:
break
diff --git a/plugins/readm.py b/plugins/readm.py
index 3101587..74ae43c 100755
--- a/plugins/readm.py
+++ b/plugins/readm.py
@@ -56,7 +56,7 @@ def list_chapters(url, chapter_list_input):
for item in chapter_list_input:
title = item.get("title")
if title and len(title) > 0:
- seen_titles.add(title.lower().replace(" ", ""))
+ seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))
seen_urls = set()
for item in chapter_list_input:
@@ -67,7 +67,7 @@ def list_chapters(url, chapter_list_input):
tree = etree.HTML(response.text)
chapters = []
for element in tree.xpath('//div[@class="episodes-list"]//a'):
- element_text = element.text.strip()
+ element_text = element.text.strip().replace("/", "_")
url = element.attrib.get("href").strip()
url = "https://readm.org" + url
if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls:
diff --git a/src/fileutils.c b/src/fileutils.c
index d7021dc..48cf825 100644
--- a/src/fileutils.c
+++ b/src/fileutils.c
@@ -52,6 +52,15 @@ int file_get_content(const char *filepath, char **data, long *size) {
return result;
}
+int file_get_last_modified_time(const char *path, time_t *last_modified) {
+ struct stat s;
+ if(stat(path, &s) == 0) {
+ *last_modified = s.st_mtim.tv_sec;
+ return 0;
+ }
+ return -1;
+}
+
int create_directory_recursive(char *path) {
int path_len = strlen(path);
char *p = path;
diff --git a/src/fileutils.h b/src/fileutils.h
index 979f656..e243a89 100644
--- a/src/fileutils.h
+++ b/src/fileutils.h
@@ -2,10 +2,13 @@
#define FILEUTILS_H
#include <stddef.h>
+#include <time.h>
const char* get_home_dir();
/* Returns 0 on success */
int file_get_content(const char *filepath, char **data, long *size);
+/* Returns 0 on success */
+int file_get_last_modified_time(const char *path, time_t *last_modified);
/* Returns 0 on success (if the directories are created or if the directories already exists) */
int create_directory_recursive(char *path);
/* Returns 0 on success */
diff --git a/src/main.c b/src/main.c
index 7a1e68e..a49acdb 100644
--- a/src/main.c
+++ b/src/main.c
@@ -26,6 +26,8 @@
#include <unistd.h>
#define NAME_MAX_LEN 250
+/* 30 minutes */
+#define IN_PROGRESS_TIMEOUT_SEC 60L*30L
static void usage(void) {
fprintf(stderr, "usage: automedia COMMAND\n");
@@ -365,6 +367,8 @@ static void iterate_tracked_items(char *config_dir, IterateTrackedItemCallback i
strcat(item_filepath, "/");
int item_filepath_len = strlen(item_filepath);
+ time_t time_now = time(NULL);
+
while((dir = readdir(d)) != NULL && automedia_running) {
int title_len = strlen(dir->d_name);
if((title_len == 1 && dir->d_name[0] == '.') || (title_len == 2 && dir->d_name[0] == '.' && dir->d_name[1] == '.'))
@@ -373,9 +377,15 @@ static void iterate_tracked_items(char *config_dir, IterateTrackedItemCallback i
strcpy(item_filepath + item_filepath_len, dir->d_name);
strcpy(item_filepath + item_filepath_len + title_len, "/.in_progress");
- if(file_exists(item_filepath) == 0) {
- fprintf(stderr, "Skipping in-progress rss %s\n", dir->d_name);
- continue;
+ time_t last_modified = 0;
+ if(file_get_last_modified_time(item_filepath, &last_modified) == 0) {
+ if(time_now - last_modified > IN_PROGRESS_TIMEOUT_SEC) {
+ fprintf(stderr, "Removing in-progress item: %s\n", dir->d_name);
+ remove(item_filepath);
+ } else {
+ fprintf(stderr, "Skipping in-progress item: %s\n", dir->d_name);
+ continue;
+ }
}
char *link_file_content = NULL;
@@ -577,10 +587,20 @@ static int proc_read_cmdline(const char *pid_str, char *cmdline_data, int cmdlin
return 0;
}
+static int run_mangadex_migration_script() {
+ const char *args[] = { "/usr/share/automedia/mangadex-upgrade.py", NULL };
+ return program_exec(args, NULL, NULL);
+}
+
static void command_sync(int argc, char **argv, char *rss_config_dir, char *html_config_dir, char *program_dir) {
if(argc < 1)
usage_sync();
+ if(run_mangadex_migration_script() != 0) {
+ fprintf(stderr, "Failed to migrade mangadex manga\n");
+ exit(1);
+ }
+
char *download_dir = argv[0];
const char automedia_pid_path[] = "/tmp/automedia.pid";