#!/usr/bin/env python3

import os
import time
import sys
import requests
import json

from lxml import etree

headers = {
    'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
}

def usage():
    print("mangakatana.py command")
    print("commands:")
    print("  download")
    print("  list")
    exit(1)

def usage_list():
    print("mangakatana.py list <url>")
    exit(1)

def usage_download():
    print("mangakatana.py download <url> <download_dir>")
    print("examples:")
    print("  mangakatana.py download \"http://mangakatana.com/manga/useless-ponko.22679\" /home/user/Manga/MangaName")
    print("")
    print("Note: The manga directory has to exist.")
    exit(1)

if len(sys.argv) < 2:
    usage()

def download_file(url, save_path):
    file_size = 0
    with requests.get(url, headers=headers, stream=True, timeout=30) as response:
        if not response.ok:
            return 0
        with open(save_path, "wb") as file:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    file.write(chunk)
                    file_size += len(chunk)
    return file_size

def list_chapters(url, chapter_list_input):
    response = requests.get(url, headers=headers, timeout=30)
    response.raise_for_status()

    seen_titles = set()
    for item in chapter_list_input:
        title = item.get("title")
        if title and len(title) > 0:
            seen_titles.add(title.lower().replace(" ", "").replace("/", "_"))

    seen_urls = set()
    for item in chapter_list_input:
        chapter_url = item.get("url")
        if chapter_url and len(chapter_url) > 0:
            seen_urls.add(chapter_url)

    tree = etree.HTML(response.text)
    chapters = []
    for element in tree.xpath('//div[@class="chapters"]//div[@class="chapter"]//a[1]'):
        element_text = element.text.strip().replace("/", "_")
        url = element.attrib.get("href").strip()
        if element_text.lower().replace(" ", "") in seen_titles or url in seen_urls:
            break
        chapters.append({ "name": element_text, "url": url })
    
    print(json.dumps(chapters))

def download_chapter(url, download_dir):
    response = requests.get(url, timeout=30)
    response.raise_for_status()

    in_progress_filepath = os.path.join(download_dir, ".in_progress")
    with open(in_progress_filepath, "w") as file:
        file.write(url)

    response_text = response.text
    sources_start = response_text.find("ytaw=[")
    if sources_start == -1:
        print("Failed to find images for chapter")
        os.remove(in_progress_filepath)
        exit(2)

    sources_start += 6
    sources_end = response_text.find("]", sources_start)
    if sources_end == -1:
        print("Failed to find images for chapter")
        os.remove(in_progress_filepath)
        exit(2)

    image_sources = response_text[sources_start:sources_end].replace("'", "").split(",")
    img_number = 1
    for image_source in image_sources:
        image_source = image_source.strip()
        if not image_source:
            continue
        ext = image_source[image_source.rfind("."):]
        image_name = str(img_number) + ext
        image_path = os.path.join(download_dir, image_name)
        print("Downloading {} to {}".format(image_source, image_path))
        if not download_file(image_source, image_path):
            print("Failed to download image: %s" % image_source)
            os.remove(in_progress_filepath)
            exit(2)
        img_number += 1

    if img_number == 1:
        print("Failed to find images for chapter")
        os.remove(in_progress_filepath)
        exit(2)

    with open(os.path.join(download_dir, ".finished"), "w") as file:
        file.write("1")

    os.remove(in_progress_filepath)

command = sys.argv[1]
if command == "list":
    if len(sys.argv) < 3:
        usage_list()
    
    url = sys.argv[2]
    chapter_list_input = sys.stdin.read()
    if len(chapter_list_input) == 0:
        chapter_list_input = []
    else:
        chapter_list_input = json.loads(chapter_list_input)
    list_chapters(url, chapter_list_input)
elif command == "download":
    if len(sys.argv) < 4:
        usage_download()
    url = sys.argv[2]
    download_dir = sys.argv[3]
    download_chapter(url, download_dir)
else:
    usage()