[batoto] a few fixes

- removed 'volume' fields from default format strings
- fixed parsing  by making volumes optional
- removed debug output
- updated docstring
- added a few unescape calls
This commit is contained in:
Mike Fährmann 2015-05-10 15:47:44 +02:00
parent a145a73d05
commit c0530a635c

View File

@ -10,7 +10,7 @@
from .common import AsynchronousExtractor from .common import AsynchronousExtractor
from .common import Message from .common import Message
from .common import filename_from_url from .common import filename_from_url, unescape
from urllib.parse import unquote from urllib.parse import unquote
import os.path import os.path
import re import re
@ -18,9 +18,8 @@ import re
info = { info = {
"category": "batoto", "category": "batoto",
"extractor": "BatotoExtractor", "extractor": "BatotoExtractor",
"directory": ["{category}", "{manga}", "directory": ["{category}", "{manga}", "c{chapter:>03} - {title}"],
"v{volume:>02} c{chapter:>03} - {title}"], "filename": "{manga}_c{chapter:>03}_{page:>03}.{extension}",
"filename": "{manga}_v{volume:>02}_c{chapter:>03}_{page:>03}.{extension}",
"pattern": [ "pattern": [
r"(?:https?://)?(?:www\.)?bato\.to/read/_/(\d+).*", r"(?:https?://)?(?:www\.)?bato\.to/read/_/(\d+).*",
], ],
@ -39,12 +38,11 @@ class BatotoExtractor(AsynchronousExtractor):
url = self.url_base + self.chapter_id url = self.url_base + self.chapter_id
while url: while url:
url, data = self.get_page_metadata(url) url, data = self.get_page_metadata(url)
print(data)
yield Message.Directory, data yield Message.Directory, data
yield Message.Url, data["image-url"], data yield Message.Url, data["image-url"], data
def get_page_metadata(self, page_url): def get_page_metadata(self, page_url):
"""Collect metadata for one manga-page""" """Collect next url and metadata for one manga-page"""
page = self.request(page_url).text page = self.request(page_url).text
_ , pos = self.extract(page, 'selected="selected"', '') _ , pos = self.extract(page, 'selected="selected"', '')
title, pos = self.extract(page, ': ', '<', pos) title, pos = self.extract(page, ': ', '<', pos)
@ -53,20 +51,27 @@ class BatotoExtractor(AsynchronousExtractor):
_ , pos = self.extract(page, '<div id="full_image"', '', pos) _ , pos = self.extract(page, '<div id="full_image"', '', pos)
image, pos = self.extract(page, '<img src="', '"', pos) image, pos = self.extract(page, '<img src="', '"', pos)
url , pos = self.extract(page, '<a href="', '"', pos) url , pos = self.extract(page, '<a href="', '"', pos)
mmatch = re.search(r"<title>(.+) - vol (\d+) ch (\d+) Page (\d+)", page) mmatch = re.search(
tmatch = re.match(r"(.+) - ([^ ]+)", trans) r"<title>(.+) - (?:vol (\d+) )?"
r"ch (\d+)[^ ]+ Page (\d+) | Batoto!</title>",
page
)
tmatch = re.match(
r"(.+) - ([^ ]+)",
trans
)
filename = unquote(filename_from_url(image)) filename = unquote(filename_from_url(image))
name, ext = os.path.splitext(filename) name, ext = os.path.splitext(filename)
return url, { return url, {
"category": info["category"], "category": info["category"],
"chapter-id": self.chapter_id, "chapter-id": self.chapter_id,
"manga": mmatch.group(1), "manga": unescape(mmatch.group(1)),
"volume": mmatch.group(2), "volume": mmatch.group(2) or "",
"chapter": mmatch.group(3), "chapter": mmatch.group(3),
"page": mmatch.group(4), "page": mmatch.group(4),
"group": tmatch.group(1), "group": tmatch.group(1),
"language": tmatch.group(2), "language": tmatch.group(2),
"title": title, "title": unescape(title),
"image-url": image, "image-url": image,
"name": name, "name": name,
"extension": ext[1:], "extension": ext[1:],