2015-11-08 00:03:14 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-30 16:18:22 +01:00
|
|
|
# Copyright 2015-2019 Mike Fährmann
|
2015-11-08 00:03:14 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2019-01-30 16:18:22 +01:00
|
|
|
"""Extract manga-chapters from https://readms.net/"""
|
2015-11-08 00:03:14 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
from .common import ChapterExtractor
|
2018-04-20 14:53:21 +02:00
|
|
|
from .. import text
|
2015-11-08 00:03:14 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
class MangastreamChapterExtractor(ChapterExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for manga-chapters from mangastream.com"""
|
2015-11-21 04:26:30 +01:00
|
|
|
category = "mangastream"
|
2018-01-30 22:49:16 +01:00
|
|
|
archive_fmt = "{chapter_id}_{page}"
|
2019-02-11 18:38:47 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:www\.)?(?:readms\.net|mangastream\.com)"
|
|
|
|
r"/r(?:ead)?/([^/]*/([^/]+)/(\d+))")
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
|
|
|
("https://readms.net/r/onepunch_man/087/4874/1"),
|
|
|
|
("https://mangastream.com/r/onepunch_man/087/4874/1"),
|
|
|
|
)
|
2019-01-30 16:18:22 +01:00
|
|
|
root = "https://readms.net"
|
2015-11-08 00:03:14 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-01-30 16:18:22 +01:00
|
|
|
self.part, self.chapter, self.chapter_id = match.groups()
|
|
|
|
url = "{}/r/{}".format(self.root, self.part)
|
2019-02-11 13:31:10 +01:00
|
|
|
ChapterExtractor.__init__(self, match, url)
|
2015-11-08 00:03:14 +01:00
|
|
|
|
2019-02-11 18:38:47 +01:00
|
|
|
def metadata(self, page):
|
2017-02-01 00:53:19 +01:00
|
|
|
manga, pos = text.extract(
|
2017-09-24 15:59:25 +02:00
|
|
|
page, '<span class="hidden-xs hidden-sm">', "<")
|
2016-07-17 20:02:25 +02:00
|
|
|
pos = page.find(self.part, pos)
|
|
|
|
title, pos = text.extract(page, ' - ', '<', pos)
|
|
|
|
count, pos = text.extract(page, 'Last Page (', ')', pos)
|
2017-09-24 15:59:25 +02:00
|
|
|
return {
|
2016-07-17 20:02:25 +02:00
|
|
|
"manga": manga,
|
2016-04-20 08:28:17 +02:00
|
|
|
"chapter": text.unquote(self.chapter),
|
2019-01-30 16:18:22 +01:00
|
|
|
"chapter_id": text.parse_int(self.chapter_id),
|
2016-07-17 20:02:25 +02:00
|
|
|
"title": title,
|
2018-04-20 14:53:21 +02:00
|
|
|
"count": text.parse_int(count, 1),
|
2015-11-08 00:03:14 +01:00
|
|
|
"lang": "en",
|
|
|
|
"language": "English",
|
|
|
|
}
|
|
|
|
|
2019-02-11 18:38:47 +01:00
|
|
|
def images(self, page):
|
2018-02-07 11:22:47 +01:00
|
|
|
while True:
|
2018-02-17 22:40:16 +01:00
|
|
|
pos = page.index(' class="page"')
|
|
|
|
next_url = text.extract(page, ' href="', '"', pos)[0]
|
|
|
|
image_url = text.extract(page, ' src="', '"', pos)[0]
|
2019-01-30 16:18:22 +01:00
|
|
|
yield text.urljoin(self.root, image_url), None
|
|
|
|
page = self.request(text.urljoin(self.root, next_url)).text
|