2015-12-08 22:29:34 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2016-11-16 14:20:25 +01:00
|
|
|
# Copyright 2015, 2016 Mike Fährmann
|
2015-12-08 22:29:34 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extract manga-chapters and entire manga from http://mangapark.me/"""
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
|
|
|
from .. import text
|
2015-12-09 00:07:18 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2015-12-09 00:07:18 +01:00
|
|
|
class MangaparkMangaExtractor(Extractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for mangas from mangapark.me"""
|
2015-12-09 00:07:18 +01:00
|
|
|
category = "mangapark"
|
|
|
|
subcategory = "manga"
|
2015-12-14 01:56:49 +01:00
|
|
|
pattern = [r"(?:https?://)?(?:www\.)?mangapark\.me/manga/([^/]+)$"]
|
2015-12-14 03:00:58 +01:00
|
|
|
test = [("http://mangapark.me/manga/mushishi", {
|
|
|
|
"url": "9902e342af71af19a5ac20fcd01950b165acf119",
|
|
|
|
})]
|
2015-12-09 00:07:18 +01:00
|
|
|
url_base = "http://mangapark.me"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self)
|
|
|
|
self.url_title = match.group(1)
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
yield Message.Version, 1
|
|
|
|
for chapter in self.get_chapters():
|
|
|
|
yield Message.Queue, self.url_base + chapter
|
|
|
|
|
|
|
|
def get_chapters(self):
|
|
|
|
"""Return a list of all chapter urls"""
|
|
|
|
page = self.request(self.url_base + "/manga/" + self.url_title).text
|
|
|
|
needle = '<a class="ch sts sts_1" target="_blank" href="'
|
|
|
|
pos = page.index('<div id="list" class="book-list">')
|
|
|
|
return reversed(list(
|
|
|
|
text.extract_iter(page, needle, '"', pos)
|
|
|
|
))
|
|
|
|
|
2015-12-08 22:29:34 +01:00
|
|
|
|
|
|
|
class MangaparkChapterExtractor(Extractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for manga-chapters from mangapark.me"""
|
2015-12-08 22:29:34 +01:00
|
|
|
category = "mangapark"
|
|
|
|
subcategory = "chapter"
|
2017-02-01 00:53:19 +01:00
|
|
|
directory_fmt = ["{category}", "{manga}",
|
|
|
|
"c{chapter:>03}{chapter-minor} - {title}"]
|
|
|
|
filename_fmt = ("{manga}_c{chapter:>03}{chapter-minor}_"
|
|
|
|
"{page:>03}.{extension}")
|
2015-12-09 00:07:18 +01:00
|
|
|
pattern = [(r"(?:https?://)?(?:www\.)?mangapark\.me/manga/"
|
2016-11-16 14:20:25 +01:00
|
|
|
r"([^/]+/s(\d+)(?:/v([^/]+))?/c(\d+)(?:([^/]+)|/e(\d+))?)")]
|
2015-12-14 03:00:58 +01:00
|
|
|
test = [
|
2016-11-16 14:20:25 +01:00
|
|
|
("http://mangapark.me/manga/gosu/s2/c55", {
|
2017-04-14 14:40:36 +02:00
|
|
|
"url": "482d4a27c1e7f03cff8afac145d06f3ddeac82bb",
|
|
|
|
"keyword": "bd97ca24ef344b44292910384215ef3f1005ea2e",
|
2016-11-16 14:20:25 +01:00
|
|
|
}),
|
2017-02-01 00:53:19 +01:00
|
|
|
(("http://mangapark.me/manga/"
|
|
|
|
"ad-astra-per-aspera-hata-kenjirou/s1/c1.2"), {
|
2017-04-14 14:40:36 +02:00
|
|
|
"url": "f325ce264df390c5ba9607c52a7e7b0829672404",
|
|
|
|
"keyword": "6e56986610cb2da9917d0d9d3217d700fbc48665",
|
2015-12-14 03:00:58 +01:00
|
|
|
}),
|
|
|
|
("http://mangapark.me/manga/gekkan-shoujo-nozaki-kun/s2/c70/e2/1", {
|
2017-04-14 14:40:36 +02:00
|
|
|
"url": "8534c8286a18c4db47606f84a4df9f1a42bab291",
|
|
|
|
"keyword": "46a332caa65ef646c9405f69947c27f0dbc5430e",
|
2015-12-14 03:00:58 +01:00
|
|
|
})
|
|
|
|
]
|
2015-12-08 22:29:34 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self)
|
2017-02-01 00:53:19 +01:00
|
|
|
self.part = match.group(1)
|
2015-12-14 01:56:49 +01:00
|
|
|
self.version = match.group(2)
|
2017-02-01 00:53:19 +01:00
|
|
|
self.volume = match.group(3)
|
2015-12-14 01:56:49 +01:00
|
|
|
self.chapter = match.group(4)
|
2016-11-16 14:20:25 +01:00
|
|
|
try:
|
|
|
|
self.chminor = match.group(5) or "v" + match.group(6)
|
|
|
|
except TypeError:
|
|
|
|
self.chminor = ""
|
2015-12-08 22:29:34 +01:00
|
|
|
|
|
|
|
def items(self):
|
2017-02-01 00:53:19 +01:00
|
|
|
page = self.request("http://mangapark.me/manga/" + self.part +
|
|
|
|
"?zoom=2").text
|
2015-12-08 22:29:34 +01:00
|
|
|
data = self.get_job_metadata(page)
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, data
|
2016-10-03 15:56:27 +02:00
|
|
|
for url, image in self.get_images(page):
|
2015-12-08 22:29:34 +01:00
|
|
|
data.update(image)
|
2016-10-03 15:56:27 +02:00
|
|
|
yield Message.Url, url, text.nameext_from_url(url, data)
|
2015-12-08 22:29:34 +01:00
|
|
|
|
|
|
|
def get_job_metadata(self, page):
|
2015-12-09 00:07:18 +01:00
|
|
|
"""Collect metadata for extractor-job"""
|
2015-12-08 22:29:34 +01:00
|
|
|
data = {
|
|
|
|
"version": self.version,
|
|
|
|
"volume": self.volume or "",
|
|
|
|
"chapter": self.chapter,
|
|
|
|
"chapter-minor": self.chminor or "",
|
|
|
|
"lang": "en",
|
|
|
|
"language": "English",
|
|
|
|
}
|
|
|
|
data = text.extract_all(page, (
|
|
|
|
("manga-id" , "var _manga_id = '", "'"),
|
|
|
|
("chapter-id", "var _book_id = '", "'"),
|
|
|
|
("manga" , "<h2>", "</h2>"),
|
2016-11-16 14:20:25 +01:00
|
|
|
("title" , "</a>", "<"),
|
2015-12-08 22:29:34 +01:00
|
|
|
(None , 'target="_blank" href="', ''),
|
|
|
|
("count" , 'page 1">1 / ', '<'),
|
|
|
|
), values=data)[0]
|
2016-11-16 14:20:25 +01:00
|
|
|
data["manga"], data["type"] = data["manga"].rsplit(" ", maxsplit=1)
|
|
|
|
data["manga"] = text.unescape(data["manga"])
|
|
|
|
pos = data["title"].find(": ")
|
|
|
|
data["title"] = data["title"][pos+2:] if pos != -1 else ""
|
2015-12-08 22:29:34 +01:00
|
|
|
return data
|
|
|
|
|
2015-12-09 00:07:18 +01:00
|
|
|
@staticmethod
|
|
|
|
def get_images(page):
|
|
|
|
"""Collect image-urls, -widths and -heights"""
|
2015-12-08 22:29:34 +01:00
|
|
|
pos = 0
|
2016-10-03 15:56:27 +02:00
|
|
|
num = 0
|
2015-12-08 22:29:34 +01:00
|
|
|
while True:
|
2017-02-01 00:53:19 +01:00
|
|
|
url, pos = text.extract(page, ' target="_blank" href="', '"', pos)
|
2015-12-08 22:29:34 +01:00
|
|
|
if not url:
|
|
|
|
return
|
2016-10-03 15:56:27 +02:00
|
|
|
num += 1
|
2015-12-08 22:29:34 +01:00
|
|
|
width , pos = text.extract(page, ' width="', '"', pos)
|
|
|
|
height, pos = text.extract(page, ' _heighth="', '"', pos)
|
2016-10-03 15:56:27 +02:00
|
|
|
yield url, {
|
|
|
|
"page": num,
|
2015-12-08 22:29:34 +01:00
|
|
|
"width": width,
|
|
|
|
"height": height,
|
|
|
|
}
|