gallery-dl/gallery_dl/extractor/spectrumnexus.py

92 lines
3.3 KiB
Python
Raw Normal View History

2015-11-13 00:21:50 +01:00
# -*- coding: utf-8 -*-
# Copyright 2015-2017 Mike Fährmann
2015-11-13 00:21:50 +01:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract manga pages from http://www.thespectrum.net/manga_scans/"""
from .common import MangaExtractor, AsynchronousExtractor, Message
2015-11-13 00:21:50 +01:00
from .. import text
2017-02-01 00:53:19 +01:00
class SpectrumnexusMangaExtractor(MangaExtractor):
"""Extractor for manga from thespectrum.net"""
2015-11-21 04:26:30 +01:00
category = "spectrumnexus"
pattern = [r"(?:https?://)?(view\.thespectrum\.net/series/[^.]+\.html)#?$"]
reverse = False
2015-12-13 04:36:44 +01:00
test = [("http://view.thespectrum.net/series/kare-kano-volume-01.html", {
"url": "b2b175aad5ef1701cc4aee7c24f1ca3a93aba9cb",
})]
def chapters(self, page):
page = text.extract(page, 'class="selectchapter"', '</select>')[0]
return [
self.url + "?ch=" + chapter.replace(" ", "+")
for chapter in text.extract_iter(page, '<option value="', '"')
]
2015-12-02 02:03:24 +01:00
class SpectrumnexusChapterExtractor(AsynchronousExtractor):
"""Extractor for manga-chapters or -volumes from thespectrum.net"""
2015-12-02 02:03:24 +01:00
category = "spectrumnexus"
subcategory = "chapter"
2015-12-02 02:12:38 +01:00
directory_fmt = ["{category}", "{manga}", "{identifier}"]
filename_fmt = "{manga} {identifier} {page:>03}.{extension}"
2015-11-21 04:26:30 +01:00
pattern = [
2017-02-01 00:53:19 +01:00
(r"(?:https?://)?(view\.thespectrum\.net/series/"
r"[^\.]+\.html)\?ch=(Chapter\+(\d+)|Volume\+(\d+))"),
(r"(?:https?://)?(view\.thespectrum\.net/series/"
r"[^/]+-chapter-(\d+)\.html)"),
2015-11-21 04:26:30 +01:00
]
2017-02-01 00:53:19 +01:00
test = [(("http://view.thespectrum.net/series/"
"toriko.html?ch=Chapter+343&page=1"), {
2015-12-13 04:36:44 +01:00
"url": "c0fc7dc594841217cc622a67edd79f06e9900333",
2016-09-25 17:28:46 +02:00
"keyword": "8499166b62db0c87e7109cc5f9aa837b4815dd9c",
2015-12-13 04:36:44 +01:00
})]
2015-11-13 00:21:50 +01:00
def __init__(self, match):
AsynchronousExtractor.__init__(self)
self.url = "http://" + match.group(1)
2015-12-02 02:12:38 +01:00
self.identifier = match.group(2)
self.chapter = match.group(3)
self.volume = match.group(4)
2015-11-13 00:21:50 +01:00
def items(self):
params = {
2015-12-02 02:12:38 +01:00
"ch": self.identifier,
2015-11-13 00:21:50 +01:00
"page": 1,
}
page = self.request(self.url, params=params).text
data = self.get_job_metadata(page)
yield Message.Version, 1
yield Message.Directory, data.copy()
count = int(data["count"])
for i in range(1, count+1):
url = self.get_image_url(page)
2015-11-16 17:32:26 +01:00
text.nameext_from_url(url, data)
2015-11-13 00:21:50 +01:00
data["page"] = i
yield Message.Url, url, data.copy()
if i < count:
params["page"] += 1
page = self.request(self.url, params=params).text
def get_job_metadata(self, page):
"""Collect metadata for extractor-job"""
data = {
2015-12-02 02:12:38 +01:00
"chapter": self.chapter or "",
"volume": self.volume or "",
"identifier": self.identifier.replace("+", " "),
2015-11-13 00:21:50 +01:00
}
return text.extract_all(page, (
('manga', '<title>', ' &#183; SPECTRUM NEXUS </title>'),
('count', '<div class="viewerLabel"> of ', '<'),
), values=data)[0]
@staticmethod
def get_image_url(page):
"""Extract url of one manga page"""
return text.extract(page, '<img id="mainimage" src="', '"')[0]