aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2019-11-18 23:13:21 +0100
committerdec05eba <dec05eba@protonmail.com>2020-07-06 07:12:34 +0200
commit718f9feb8de96be91748521ff52d802b34d5dc73 (patch)
tree5cb4906a60ea01a7540e5ad9322bb01ed849a9b0
parent606c530f50cc410113aa2807d708b6ce6c84cc83 (diff)
Add script to open media, and track already seen media
-rwxr-xr-xautomedia.py72
-rwxr-xr-xopen_media.py89
-rwxr-xr-xread_manga.py2
3 files changed, 146 insertions, 17 deletions
diff --git a/automedia.py b/automedia.py
index 46c57df..e505ddb 100755
--- a/automedia.py
+++ b/automedia.py
@@ -97,13 +97,14 @@ def get_tracked_rss(rss_tracked_dir, existing_tracked_rss):
if json_data:
json_data = json.loads(json_data)
else:
+ updated = str(time.time())
json_data = {
"link": link,
- "updated": str(time.time()),
+ "updated": updated,
"downloaded": []
}
if latest:
- json_data["downloaded"].append({ "title": latest })
+ json_data["downloaded"].append({ "title": latest, "time": updated })
if not link or not json_data:
print("Rss corrupt, link or data missing for rss %s" % title)
continue
@@ -116,11 +117,12 @@ def rss_update_latest(rss_tracked_dir, rss, latest):
with open(os.path.join(rss_tracked_dir, rss.title, "latest"), "w") as file:
file.write(latest)
+ updated = str(time.time())
with open(os.path.join(rss_tracked_dir, rss.title, "updated"), "w") as file:
- file.write(str(time.time()))
+ file.write(updated)
- rss.json_data["updated"] = str(time.time())
- rss.json_data["downloaded"].append({ "title": latest })
+ rss.json_data["updated"] = updated
+ rss.json_data["downloaded"].append({ "title": latest, "time": updated })
with open(os.path.join(rss_tracked_dir, rss.title, "data"), "w") as file:
json.dump(rss.json_data, file, indent=4)
@@ -128,11 +130,12 @@ def html_update_latest(html_tracked_dir, html, latest):
with open(os.path.join(html_tracked_dir, html.title, "latest"), "w") as file:
file.write(latest)
+ updated = str(time.time())
with open(os.path.join(html_tracked_dir, html.title, "updated"), "w") as file:
- file.write(str(time.time()))
+ file.write(updated)
- html.json_data["updated"] = str(time.time())
- html.json_data["downloaded"].append({ "title": latest })
+ html.json_data["updated"] = updated
+ html.json_data["downloaded"].append({ "title": latest, "time": updated })
with open(os.path.join(html_tracked_dir, html.title, "data"), "w") as file:
json.dump(html.json_data, file, indent=4)
@@ -154,14 +157,15 @@ def get_tracked_html(html_tracked_dir):
if json_data:
json_data = json.loads(json_data)
else:
+ updated = str(time.time())
json_data = {
"plugin": plugin,
"link": link,
- "updated": str(time.time()),
+ "updated": updated,
"downloaded": []
}
if latest:
- json_data["downloaded"].append({ "title": latest })
+ json_data["downloaded"].append({ "title": latest, "time": updated })
if not plugin or not json_data:
print("html corrupt, plugin or data missing for html %s" % title)
continue
@@ -301,16 +305,17 @@ def add_rss(name, url, rss_config_dir, start_after):
with open(os.path.join(rss_dir, "latest"), "w") as file:
file.write(start_after)
+ updated = str(time.time())
with open(os.path.join(rss_dir, "updated"), "w") as file:
- file.write(str(time.time()))
+ file.write(updated)
data = {
"link": url,
- "updated": str(time.time()),
+ "updated": updated,
"downloaded": []
}
if start_after:
- data["downloaded"].append({ "title": start_after })
+ data["downloaded"].append({ "title": start_after, "time": updated })
with open(os.path.join(rss_dir, "data"), "w") as file:
json.dump(data, file, indent=4)
@@ -372,17 +377,18 @@ def add_html(name, url, html_config_dir, start_after):
with open(os.path.join(html_dir, "latest"), "w") as file:
file.write(start_after)
+ updated = str(time.time())
with open(os.path.join(html_dir, "updated"), "w") as file:
- file.write(str(time.time()))
+ file.write(updated)
data = {
"plugin": os.path.basename(plugin_path),
"link": url,
- "updated": str(time.time()),
+ "updated": updated,
"downloaded": []
}
if start_after:
- data["downloaded"].append({ "title": start_after })
+ data["downloaded"].append({ "title": start_after, "time": updated })
with open(os.path.join(html_dir, "data"), "w") as file:
json.dump(data, file, indent=4)
@@ -702,6 +708,38 @@ def command_sync(args):
sync_rate_sec = 15 * 60 # every 15 min
sync(rss_config_dir, html_config_dir, download_dir, sync_rate_sec)
+def data_file_get_downloaded(data_filepath):
+ downloaded = []
+ try:
+ with open(data_filepath, "r") as file:
+ for item in json.loads(file.read())["downloaded"]:
+ downloaded.append(item)
+ except OSError:
+ pass
+ return downloaded
+
+def get_downloaded_items(tracked_dir):
+ downloaded_items = []
+ try:
+ downloaded_items = []
+ for name in os.listdir(tracked_dir):
+ data_filepath = os.path.join(tracked_dir, name, "data")
+ downloaded = data_file_get_downloaded(data_filepath)
+ for item in downloaded:
+ if item.get("time"):
+ downloaded_items.append(item)
+ except OSError:
+ pass
+ return downloaded_items
+
+def command_downloaded():
+ downloaded_items = []
+ downloaded_items.extend(get_downloaded_items(os.path.join(rss_config_dir, "tracked")))
+ downloaded_items.extend(get_downloaded_items(os.path.join(html_config_dir, "tracked")))
+ #downloaded_items = sorted(downloaded_items, key=lambda item: float(item["time"]), reverse=True)
+ for item in downloaded_items:
+ print(item["title"])
+
def main():
if len(sys.argv) < 2:
usage()
@@ -711,6 +749,8 @@ def main():
command_add(sys.argv[2:])
elif command == "sync":
command_sync(sys.argv[2:])
+ elif command == "downloaded":
+ command_downloaded()
else:
usage()
diff --git a/open_media.py b/open_media.py
new file mode 100755
index 0000000..e373007
--- /dev/null
+++ b/open_media.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+import os
+import sys
+import subprocess
+
+script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+
+def run_dmenu(input):
+ process = subprocess.Popen(["rofi", "-dmenu", "-i", "-p", "Select media"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ stdout, stderr = process.communicate(input.encode())
+ if process.returncode == 0:
+ return stdout
+ else:
+ print("Failed to launch rofi, error: {}".format(stderr))
+ return None
+
+def add_seen(seen_filepath, media_name, seen_list):
+ if media_name in seen_list:
+ return
+ with open(seen_filepath, "a") as seen_file:
+ seen_file.write(media_name + "\n")
+
+def sort_images(filename):
+ idx = filename.find(".")
+ if idx != -1:
+ return int(filename[0:idx])
+ return 0
+
+def get_downloaded_list():
+ process = subprocess.Popen([os.path.join(script_dir, "automedia.py"), "downloaded"], stdout=subprocess.PIPE)
+ stdout, stderr = process.communicate()
+ if process.returncode == 0:
+ return stdout.decode().splitlines()
+ else:
+ print("Failed to list downloaded items, error: {}".format(stderr))
+ return []
+
+def main():
+ if len(sys.argv) < 2:
+ print("usage: open_media.py <download_dir>")
+ print("example: open_media.sh /home/user/Downloads/automedia")
+ exit(1)
+
+ download_dir = sys.argv[1]
+ if not os.path.isdir(download_dir):
+ print("No such directory: " % (download_dir))
+ exit(2)
+
+ downloaded_list = get_downloaded_list()
+
+ seen_filepath = os.path.expanduser("~/.config/automedia/seen")
+ seen_list = []
+ try:
+ with open(seen_filepath, "r") as seen_file:
+ seen_list = seen_file.read().splitlines()
+ except OSError as e:
+ print("Failed to open {}, reason: {}".format(seen_filepath, str(e)))
+
+ print("seen_list: {}".format(str(seen_list)))
+ for seen in seen_list:
+ for i, downloaded in enumerate(downloaded_list):
+ if seen == downloaded:
+ downloaded_list[i] = "✓ {}".format(downloaded)
+
+ selected_media = run_dmenu("\n".join(downloaded_list[::-1]))
+ if not selected_media:
+ exit(0)
+ selected_media = selected_media.decode().replace("✓ ", "").rstrip()
+
+ media_path = os.path.join(download_dir, selected_media)
+ if os.path.isdir(media_path):
+ add_seen(seen_filepath, selected_media, seen_list)
+ files = []
+ for filename in os.listdir(media_path):
+ if filename not in ("finished", "session_id"):
+ files.append(filename)
+
+ files = sorted(files, key=sort_images)
+ process = subprocess.Popen(["sxiv", "-i", "-f"], stdin=subprocess.PIPE)
+ files_fullpath = []
+ for filename in files:
+ files_fullpath.append(os.path.join(media_path, filename))
+ process.communicate("\n".join(files_fullpath).encode())
+ elif os.path.isfile(media_path):
+ add_seen(seen_filepath, selected_media, seen_list)
+ subprocess.Popen(["mpv", "--", media_path])
+
+main()
diff --git a/read_manga.py b/read_manga.py
index d08127a..b4425cf 100755
--- a/read_manga.py
+++ b/read_manga.py
@@ -77,5 +77,5 @@ for chapter in chapters_by_oldest[start_index:]:
images_str.append(os.path.join(image_dir, image))
index += 1
-process = subprocess.Popen(["sxiv", "-i", "-s", "f", "-f"], stdin=subprocess.PIPE)
+process = subprocess.Popen(["sxiv", "-i", "-f"], stdin=subprocess.PIPE)
process.communicate("\n".join(images_str).encode())