[mangapark] support .net and .com mirrors
This commit is contained in:
parent
c20c0a4820
commit
2eefaa99a3
@ -15,7 +15,7 @@ from .. import text
|
|||||||
class MangaparkExtractor():
|
class MangaparkExtractor():
|
||||||
"""Base class for mangapark extractors"""
|
"""Base class for mangapark extractors"""
|
||||||
category = "mangapark"
|
category = "mangapark"
|
||||||
root = "https://mangapark.me"
|
root_fmt = "https://mangapark.{}"
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_chapter_path(path, data):
|
def parse_chapter_path(path, data):
|
||||||
@ -37,11 +37,20 @@ class MangaparkExtractor():
|
|||||||
|
|
||||||
class MangaparkMangaExtractor(MangaparkExtractor, MangaExtractor):
|
class MangaparkMangaExtractor(MangaparkExtractor, MangaExtractor):
|
||||||
"""Extractor for manga from mangapark.me"""
|
"""Extractor for manga from mangapark.me"""
|
||||||
pattern = [r"(?:https?://)?(?:www\.)?(mangapark\.me/manga/[^/]+)/?$"]
|
pattern = [r"(?:https?://)?(?:www\.)?mangapark\.(me|net|com)"
|
||||||
test = [("https://mangapark.me/manga/aria", {
|
r"(/manga/[^/?&#]+)/?$"]
|
||||||
"url": "4cb5606530b4eeacde7a4c9fd38296eb6ff46563",
|
test = [
|
||||||
"keyword": "e87ab8e7ad2571bbe587881e7fd422e8f582f818",
|
("https://mangapark.me/manga/aria", {
|
||||||
})]
|
"url": "4cb5606530b4eeacde7a4c9fd38296eb6ff46563",
|
||||||
|
"keyword": "e87ab8e7ad2571bbe587881e7fd422e8f582f818",
|
||||||
|
}),
|
||||||
|
("https://mangapark.net/manga/aria", None),
|
||||||
|
("https://mangapark.com/manga/aria", None),
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, match):
|
||||||
|
self.root = self.root_fmt.format(match.group(1))
|
||||||
|
MangaExtractor.__init__(self, match, self.root + match.group(2))
|
||||||
|
|
||||||
def chapters(self, page):
|
def chapters(self, page):
|
||||||
results = []
|
results = []
|
||||||
@ -69,8 +78,8 @@ class MangaparkMangaExtractor(MangaparkExtractor, MangaExtractor):
|
|||||||
|
|
||||||
class MangaparkChapterExtractor(MangaparkExtractor, ChapterExtractor):
|
class MangaparkChapterExtractor(MangaparkExtractor, ChapterExtractor):
|
||||||
"""Extractor for manga-chapters from mangapark.me"""
|
"""Extractor for manga-chapters from mangapark.me"""
|
||||||
pattern = [(r"(?:https?://)?(?:www\.)?mangapark\.me(/manga/[^/]+"
|
pattern = [(r"(?:https?://)?(?:www\.)?mangapark\.(me|net|com)"
|
||||||
r"/s\d+(?:/v\d+)?/c\d+[^/]*(?:/e\d+)?)")]
|
r"(/manga/[^/]+/s\d+(?:/v\d+)?/c\d+[^/]*(?:/e\d+)?)")]
|
||||||
test = [
|
test = [
|
||||||
("https://mangapark.me/manga/gosu/s2/c55", {
|
("https://mangapark.me/manga/gosu/s2/c55", {
|
||||||
"count": 50,
|
"count": 50,
|
||||||
@ -85,10 +94,13 @@ class MangaparkChapterExtractor(MangaparkExtractor, ChapterExtractor):
|
|||||||
"count": 15,
|
"count": 15,
|
||||||
"keyword": "8d5d1608d4182495ea43ad665e25b755b6468be2",
|
"keyword": "8d5d1608d4182495ea43ad665e25b755b6468be2",
|
||||||
}),
|
}),
|
||||||
|
("https://mangapark.net/manga/gosu/s2/c55", None),
|
||||||
|
("https://mangapark.com/manga/gosu/s2/c55", None),
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, match):
|
def __init__(self, match):
|
||||||
self.path = match.group(1)
|
tld, self.path = match.groups()
|
||||||
|
self.root = self.root_fmt.format(tld)
|
||||||
url = self.root + self.path + "?zoom=2"
|
url = self.root + self.path + "?zoom=2"
|
||||||
ChapterExtractor.__init__(self, url)
|
ChapterExtractor.__init__(self, url)
|
||||||
|
|
||||||
@ -111,16 +123,13 @@ class MangaparkChapterExtractor(MangaparkExtractor, ChapterExtractor):
|
|||||||
|
|
||||||
def get_images(self, page):
|
def get_images(self, page):
|
||||||
pos = 0
|
pos = 0
|
||||||
num = 0
|
|
||||||
while True:
|
while True:
|
||||||
url, pos = text.extract(page, ' target="_blank" href="', '"', pos)
|
url, pos = text.extract(page, ' target="_blank" href="', '"', pos)
|
||||||
if not url:
|
if not url:
|
||||||
return
|
return
|
||||||
num += 1
|
|
||||||
width , pos = text.extract(page, ' width="', '"', pos)
|
width , pos = text.extract(page, ' width="', '"', pos)
|
||||||
height, pos = text.extract(page, ' _heighth="', '"', pos)
|
height, pos = text.extract(page, ' _heighth="', '"', pos)
|
||||||
yield text.urljoin(self.root, url), {
|
yield text.urljoin(self.root, url), {
|
||||||
"page": num,
|
|
||||||
"width": width,
|
"width": width,
|
||||||
"height": height,
|
"height": height,
|
||||||
}
|
}
|
||||||
|
@ -22,8 +22,6 @@ TRAVIS_SKIP = {
|
|||||||
|
|
||||||
# temporary issues, etc.
|
# temporary issues, etc.
|
||||||
BROKEN = {
|
BROKEN = {
|
||||||
"imgth", # "temporary offline"
|
|
||||||
"mangahere", # lots of 502 Bad Gateway responses
|
|
||||||
"whatisthisimnotgoodwithcomputers",
|
"whatisthisimnotgoodwithcomputers",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user