1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
|
#!/usr/bin/env python3
import os
import json
import re
import requests
class Chapter:
id = ""
title = ""
time = ""
class Manga:
id = ""
updated = ""
chapters = None
directory = ""
def __init__(self):
self.chapters = []
# Returns None if the manga is not tracked using mangadex or if it has already been migrated
def manga_data_file_read(filepath):
with open(filepath, "rb") as file:
data = json.load(file)
if data["plugin"] != "mangadex.py":
return None
manga_link = data["link"]
title_search = re.search(r"/title/([0-9]+)", manga_link)
if not title_search or len(title_search.groups()) == 0:
print("Mangadex manga already migrated: %s" % manga_link)
return None
manga_data = Manga()
manga_data.id = int(title_search.group(1))
manga_data.updated = data["updated"]
downloaded = data.get("downloaded")
if type(downloaded) is list:
for downloaded_item in downloaded:
chapter_url = downloaded_item["url"]
chapter_title_search = re.search(r"chapter/([0-9]+)", chapter_url)
if not chapter_title_search:
print("Failed to extract title from manga %s, chapter %s" % (manga_link, chapter_url))
exit(1)
chapter_data = Chapter()
chapter_data.id = int(chapter_title_search.group(1))
chapter_data.title = downloaded_item["title"]
chapter_data.time = downloaded_item["time"]
manga_data.chapters.append(chapter_data)
return manga_data
# Returns a list with tuple where the first element is the legacy id and the second element is the new id
def legacy_id_to_new_id(ids, type):
mapping = []
id_start = 0
while id_start < len(ids):
id_end = min(id_start + 1000, len(ids))
if id_end - id_start == 0:
break
response = requests.post("https://api.mangadex.org/legacy/mapping", json={"type": type, "ids": ids[id_start:id_end]})
response.raise_for_status()
response_json = response.json()
for response_item in response_json:
if response_item["result"] != "ok":
print("legacy mapping returned an error")
exit(1)
attributes = response_item["data"]["attributes"]
mapping.append((attributes["legacyId"], attributes["newId"]))
id_start = id_end
if len(mapping) != len(ids):
print("Failed to get the legacy to new id mapping for all ids. Got %d mappings, expected %d" % (len(mapping), len(ids)))
exit(1)
return mapping
def get_manga_by_id(manga_list, manga_id):
for manga_data in manga_list:
if manga_data.id == manga_id:
return manga_data
def get_chapter_by_id_in_legacy_mapping(legacy_chapter_ids_to_new_ids, id):
for chapter_id_mapping in legacy_chapter_ids_to_new_ids:
if chapter_id_mapping[0] == id:
return chapter_id_mapping
def file_overwrite_atomic(filepath, content):
tmp_filepath = filepath + ".tmp"
with open(tmp_filepath, "wb") as file:
file.write(content.encode())
file.flush()
os.fsync(file.fileno())
os.rename(tmp_filepath, filepath)
if __name__ == "__main__":
migrate_finished_filepath = os.path.expanduser("~/.config/automedia/mangadex-upgraded")
if os.path.isfile(migrate_finished_filepath):
print("Mangadex tracked manga have already migrated, nothing to do")
exit(0)
tracked_dir = os.path.expanduser("~/.config/automedia/html/tracked")
if not os.path.isdir(tracked_dir):
exit(0)
manga_list = []
for manga_name in os.listdir(tracked_dir):
manga_tracked_dir = os.path.join(tracked_dir, manga_name)
manga_data_file = os.path.join(manga_tracked_dir, "data")
manga_data = manga_data_file_read(manga_data_file)
if manga_data:
manga_data.directory = os.path.join(tracked_dir, manga_name)
manga_list.append(manga_data)
manga_ids = []
chapter_ids = []
cc = {}
for manga_data in manga_list:
manga_ids.append(manga_data.id)
for chapter_data in manga_data.chapters:
chapter_ids.append(chapter_data.id)
legancy_manga_ids_to_new_ids = legacy_id_to_new_id(manga_ids, "manga")
legacy_chapter_ids_to_new_ids = legacy_id_to_new_id(chapter_ids, "chapter")
for manga_id in legancy_manga_ids_to_new_ids:
manga_data = get_manga_by_id(manga_list, manga_id[0])
if not manga_data:
print("Failed to get manga by id: %s" % manga_id[0])
exit(1)
new_manga_link = str(manga_id[1])
new_manga_data = {}
new_manga_data["plugin"] = "mangadex.py"
new_manga_data["link"] = new_manga_link
new_manga_data["updated"] = manga_data.updated
downloaded = []
for chapter_data in manga_data.chapters:
chapter_id_mapping = get_chapter_by_id_in_legacy_mapping(legacy_chapter_ids_to_new_ids, chapter_data.id)
if not chapter_id_mapping:
print("Failed to get new id from manga %d, chapter %d" % (manga_data.id, chapter_data.id))
exit(1)
downloaded_item = {}
downloaded_item["title"] = chapter_data.title
downloaded_item["time"] = chapter_data.time
downloaded_item["url"] = chapter_id_mapping[1]
downloaded.append(downloaded_item)
new_manga_data["downloaded"] = downloaded
file_overwrite_atomic(os.path.join(manga_data.directory, "link"), new_manga_link)
new_manga_data_str = json.dumps(new_manga_data, indent=4)
file_overwrite_atomic(os.path.join(manga_data.directory, "data"), new_manga_data_str)
file_overwrite_atomic(migrate_finished_filepath, "1")
print("Successfully migrated %d manga with a total of %d chapters" % (len(manga_ids), len(chapter_ids)))
|