2015-11-08 00:03:14 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
# Copyright 2015-2018 Mike Fährmann
|
2015-11-08 00:03:14 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2017-04-01 21:42:36 +02:00
|
|
|
"""Extract manga-chapters from https://mangastream.com/"""
|
2015-11-08 00:03:14 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
from .common import ChapterExtractor
|
2017-09-24 15:59:25 +02:00
|
|
|
from .. import text, util
|
2017-04-01 21:42:36 +02:00
|
|
|
from urllib.parse import urljoin
|
2015-11-08 00:03:14 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
class MangastreamChapterExtractor(ChapterExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for manga-chapters from mangastream.com"""
|
2015-11-21 04:26:30 +01:00
|
|
|
category = "mangastream"
|
2018-01-30 22:49:16 +01:00
|
|
|
archive_fmt = "{chapter_id}_{page}"
|
2017-04-01 21:42:36 +02:00
|
|
|
pattern = [(r"(?:https?://)?(?:www\.)?(?:readms|mangastream)\.(?:com|net)/"
|
2016-04-22 18:00:58 +02:00
|
|
|
r"r(?:ead)?/([^/]*/([^/]+)/(\d+))")]
|
2018-02-11 16:28:19 +01:00
|
|
|
test = [("https://readms.net/r/onepunch_man/087/4874/1", None)]
|
2018-02-07 11:22:47 +01:00
|
|
|
base_url = "https://readms.net/r/"
|
2015-11-08 00:03:14 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2016-04-20 08:28:17 +02:00
|
|
|
self.part, self.chapter, self.ch_id = match.groups()
|
2018-02-07 11:22:47 +01:00
|
|
|
ChapterExtractor.__init__(self, self.base_url + self.part)
|
2015-11-08 00:03:14 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
def get_metadata(self, page):
|
2017-02-01 00:53:19 +01:00
|
|
|
manga, pos = text.extract(
|
2017-09-24 15:59:25 +02:00
|
|
|
page, '<span class="hidden-xs hidden-sm">', "<")
|
2016-07-17 20:02:25 +02:00
|
|
|
pos = page.find(self.part, pos)
|
|
|
|
title, pos = text.extract(page, ' - ', '<', pos)
|
|
|
|
count, pos = text.extract(page, 'Last Page (', ')', pos)
|
2017-09-24 15:59:25 +02:00
|
|
|
return {
|
2016-07-17 20:02:25 +02:00
|
|
|
"manga": manga,
|
2016-04-20 08:28:17 +02:00
|
|
|
"chapter": text.unquote(self.chapter),
|
2017-09-24 15:59:25 +02:00
|
|
|
"chapter_id": util.safe_int(self.ch_id),
|
2016-07-17 20:02:25 +02:00
|
|
|
"title": title,
|
2017-09-24 15:59:25 +02:00
|
|
|
"count": util.safe_int(count, 1),
|
2015-11-08 00:03:14 +01:00
|
|
|
"lang": "en",
|
|
|
|
"language": "English",
|
|
|
|
}
|
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
def get_images(self, page):
|
|
|
|
while True:
|
2018-02-17 22:40:16 +01:00
|
|
|
pos = page.index(' class="page"')
|
|
|
|
next_url = text.extract(page, ' href="', '"', pos)[0]
|
|
|
|
image_url = text.extract(page, ' src="', '"', pos)[0]
|
2018-02-07 11:22:47 +01:00
|
|
|
yield urljoin(self.base_url, image_url), None
|
|
|
|
page = self.request(urljoin(self.base_url, next_url)).text
|