gallery-dl/gallery_dl/extractor/dynastyscans.py

74 lines
2.6 KiB
Python
Raw Normal View History

2016-09-22 17:20:57 +02:00
# -*- coding: utf-8 -*-
# Copyright 2015-2018 Mike Fährmann
2016-09-22 17:20:57 +02:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
2017-04-20 13:20:41 +02:00
"""Extract manga-chapters from https://dynasty-scans.com/"""
2016-09-22 17:20:57 +02:00
from .common import ChapterExtractor
from .. import text
2016-09-22 17:20:57 +02:00
import re
import json
2017-02-01 00:53:19 +01:00
class DynastyscansChapterExtractor(ChapterExtractor):
2016-09-22 17:20:57 +02:00
"""Extractor for manga-chapters from dynasty-scans.com"""
category = "dynastyscans"
pattern = [r"(?:https?://)?(?:www\.)?dynasty-scans\.com/chapters/([^/]+)"]
test = [
2017-02-01 00:53:19 +01:00
(("http://dynasty-scans.com/chapters/"
"hitoribocchi_no_oo_seikatsu_ch33"), {
"url": "dce64e8c504118f1ab4135c00245ea12413896cb",
"keyword": "ec5c56bbd5c97aa521d00f2598bba4663fb8ab9f",
2016-09-22 17:20:57 +02:00
}),
2017-02-01 00:53:19 +01:00
(("http://dynasty-scans.com/chapters/"
"new_game_the_spinoff_special_13"), {
"url": "dbe5bbb74da2edcfb1832895a484e2a40bc8b538",
"keyword": "1208a102d9a1bb0b0c740a67996d9b26a9357b64",
2016-09-22 17:20:57 +02:00
}),
]
root = "https://dynasty-scans.com"
2016-09-22 17:20:57 +02:00
def __init__(self, match):
self.chaptername = match.group(1)
url = self.root + "/chapters/" + self.chaptername
ChapterExtractor.__init__(self, url)
2016-09-22 17:20:57 +02:00
def get_metadata(self, page):
2016-09-22 17:20:57 +02:00
"""Collect metadata for extractor-job"""
info , pos = text.extract(page, "<h3 id='chapter-title'><b>", "</b>")
author, pos = text.extract(page, " by ", "</a>", pos)
group , pos = text.extract(page, '"icon-print"></i> ', '</span>', pos)
2017-02-01 00:53:19 +01:00
date , pos = text.extract(page, '"icon-calendar"></i> ', '<', pos)
2016-09-22 17:20:57 +02:00
match = re.match(
(r"(?:<a[^>]*>)?([^<]+)(?:</a>)?" # manga name
r"(?: ch(\d+)([^:<]*))?" # chapter info
r"(?:: (.+))?"), # title
2016-09-22 17:20:57 +02:00
info
)
2016-09-22 17:20:57 +02:00
return {
"manga": text.unescape(match.group(1)),
"chapter": text.parse_int(match.group(2)),
"chapter_minor": match.group(3) or "",
"title": text.unescape(match.group(4) or ""),
2016-09-22 17:20:57 +02:00
"author": text.remove_html(author),
"group": (text.remove_html(group) or
text.extract(group, ' alt="', '"')[0] or ""),
2016-09-22 17:20:57 +02:00
"date": date,
"lang": "en",
"language": "English",
}
def get_images(self, page):
2016-09-22 17:20:57 +02:00
"""Extract list of all image-urls for a manga chapter"""
data = text.extract(page, "var pages = ", ";\n")[0]
return [
(self.root + img["image"], None)
for img in json.loads(data)
]