131 lines
4.5 KiB
Python
Raw Normal View History

2015-11-07 02:32:59 +01:00
# -*- coding: utf-8 -*-
2017-04-05 12:16:23 +02:00
# Copyright 2015-2017 Mike Fährmann
2015-11-07 02:32:59 +01:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
2015-11-28 22:21:35 +01:00
"""Extract manga-chapters and entire manga from http://kissmanga.com/"""
2015-11-07 02:32:59 +01:00
from .common import Extractor, Message
2017-04-05 12:16:23 +02:00
from .. import text, cloudflare, aes
2015-11-07 02:32:59 +01:00
import re
import hashlib
2015-11-07 02:32:59 +01:00
2017-04-05 12:16:23 +02:00
IV = [
165, 232, 226, 233, 194, 114, 27, 224,
168, 74, 214, 96, 196, 114, 193, 243,
]
2017-02-01 00:53:19 +01:00
class KissmangaExtractor(Extractor):
2015-11-28 22:21:35 +01:00
"""Base class for kissmanga extractors"""
2015-11-27 16:19:21 +01:00
category = "kissmanga"
2017-02-01 00:53:19 +01:00
directory_fmt = ["{category}", "{manga}",
"c{chapter:>03}{chapter-minor} - {title}"]
filename_fmt = ("{manga}_c{chapter:>03}{chapter-minor}_"
"{page:>03}.{extension}")
root = "http://kissmanga.com"
2015-11-27 16:19:21 +01:00
def __init__(self, match):
Extractor.__init__(self)
self.url = match.group(0)
self.session.headers["Referer"] = self.root
request = cloudflare.request_func
2015-11-27 16:19:21 +01:00
class KissmangaMangaExtractor(KissmangaExtractor):
"""Extractor for mangas from kissmanga.com"""
2015-11-30 01:11:13 +01:00
subcategory = "manga"
pattern = [r"(?:https?://)?(?:www\.)?kissmanga\.com/Manga/[^/]+/?$"]
2015-12-13 04:36:44 +01:00
test = [("http://kissmanga.com/Manga/Dropout", {
"url": "992befdd64e178fe5af67de53f8b510860d968ca",
})]
2015-11-27 16:19:21 +01:00
def items(self):
yield Message.Version, 1
for chapter in self.get_chapters():
yield Message.Queue, self.root + chapter
2015-11-27 16:19:21 +01:00
def get_chapters(self):
"""Return a list of all chapter urls"""
page = self.request(self.url).text
2015-11-28 02:06:29 +01:00
return reversed(list(
text.extract_iter(page, '<td>\n<a href="', '"')
))
2015-11-27 16:19:21 +01:00
class KissmangaChapterExtractor(KissmangaExtractor):
"""Extractor for manga-chapters from kissmanga.com"""
2015-11-30 01:11:13 +01:00
subcategory = "chapter"
2015-11-21 04:26:30 +01:00
pattern = [r"(?:https?://)?(?:www\.)?kissmanga\.com/Manga/.+/.+\?id=\d+"]
2015-12-13 04:36:44 +01:00
test = [
("http://kissmanga.com/Manga/Dropout/Ch-000---Oneshot-?id=145847", {
2016-07-12 12:08:36 +02:00
"url": "4136bcd1c6cecbca8cc2bc965d54f33ef0a97cc0",
2016-09-25 17:28:46 +02:00
"keyword": "ab332093a4f2e473a468235bfd624cbe3b19fd7f",
2015-12-13 04:36:44 +01:00
}),
("http://kissmanga.com/Manga/Urban-Tales/a?id=256717", {
2016-07-12 12:08:36 +02:00
"url": "de074848f6c1245204bb9214c12bcc3ecfd65019",
2016-09-25 17:28:46 +02:00
"keyword": "013aad80e578c6ccd2e1fe47cdc27c12a64f6db2",
2015-12-13 04:36:44 +01:00
})
]
2015-11-21 04:26:30 +01:00
2015-11-07 02:32:59 +01:00
def items(self):
page = self.request(self.url).text
data = self.get_job_metadata(page)
imgs = self.get_image_urls(page)
data["count"] = len(imgs)
yield Message.Version, 1
yield Message.Directory, data
2016-11-08 00:15:27 +01:00
for data["page"], url in enumerate(imgs, 1):
2015-11-16 17:32:26 +01:00
yield Message.Url, url, text.nameext_from_url(url, data)
2015-11-07 02:32:59 +01:00
2015-11-21 04:26:30 +01:00
def get_job_metadata(self, page):
2015-11-07 02:32:59 +01:00
"""Collect metadata for extractor-job"""
manga, pos = text.extract(page, "Read manga\n", "\n")
cinfo, pos = text.extract(page, "", "\n", pos)
2017-02-01 00:53:19 +01:00
match = re.match((r"(?:Vol.0*(\d+) )?(?:Ch.)?0*(\d+)"
r"(?:\.0*(\d+))?(?:: (.+))?"), cinfo)
2015-11-07 13:28:07 +01:00
chminor = match.group(3)
2015-11-07 02:32:59 +01:00
return {
"manga": manga,
"volume": match.group(1) or "",
"chapter": match.group(2),
2015-11-07 13:28:07 +01:00
"chapter-minor": "."+chminor if chminor else "",
"title": match.group(4) or "",
2015-11-07 02:32:59 +01:00
"lang": "en",
"language": "English",
}
def get_image_urls(self, page):
2015-11-07 02:32:59 +01:00
"""Extract list of all image-urls for a manga chapter"""
try:
key = self.build_aes_key(page)
return [
aes.aes_cbc_decrypt_text(data, key, IV)
for data in text.extract_iter(
page, 'lstImages.push(wrapKA("', '"'
)
]
except UnicodeDecodeError:
self.log.error("Failed to decrypt image URls")
return []
def build_aes_key(self, page):
"""Get and parse the AES key"""
try:
pos = page.rindex('; key = ')
pos = page.rindex('<script type="text/javascript">', 0, pos)
except ValueError:
self.log.error("Unable to find AES key")
return [0] * 32
try:
key = text.extract(page, ' = ["', '"]', pos)[0]
data = bytes(int(i, 16) for i in key[2:].split(r"\x"))
except (ValueError, TypeError):
self.log.error("Unable to parse AES key: '%s'", key)
return [0] * 32
return list(hashlib.sha256(data).digest())