[luacious] get correct image URLs (fixes #33)
Instead of using thumbnail URLs and modifying them the extractor now goes through every single image-page and gets its download URL from there.
This commit is contained in:
parent
6950708e52
commit
d443822fdb
@ -8,12 +8,11 @@
|
|||||||
|
|
||||||
"""Extract images from https://luscious.net/"""
|
"""Extract images from https://luscious.net/"""
|
||||||
|
|
||||||
from .common import Extractor, Message
|
from .common import AsynchronousExtractor, Message
|
||||||
from .. import text, util
|
from .. import text, util
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
class LusciousAlbumExtractor(Extractor):
|
class LusciousAlbumExtractor(AsynchronousExtractor):
|
||||||
"""Extractor for image albums from luscious.net"""
|
"""Extractor for image albums from luscious.net"""
|
||||||
category = "luscious"
|
category = "luscious"
|
||||||
subcategory = "album"
|
subcategory = "album"
|
||||||
@ -25,32 +24,33 @@ class LusciousAlbumExtractor(Extractor):
|
|||||||
(("https://luscious.net/c/hentai_manga/albums/"
|
(("https://luscious.net/c/hentai_manga/albums/"
|
||||||
"okinami-no-koigokoro_277031/view/"), {
|
"okinami-no-koigokoro_277031/view/"), {
|
||||||
"url": "7e4984a271a1072ac6483e4228a045895aff86f3",
|
"url": "7e4984a271a1072ac6483e4228a045895aff86f3",
|
||||||
"keyword": "3b3d36b355fa6a1a6c24be374ae16e6e9b0c729e",
|
"keyword": "8533c72ff85578240cf7594eb617d907bebf87ab",
|
||||||
"content": "b3a747a6464509440bd0ff6d1267e6959f8d6ff3",
|
"content": "b3a747a6464509440bd0ff6d1267e6959f8d6ff3",
|
||||||
}),
|
}),
|
||||||
("https://luscious.net/albums/okinami-no-koigokoro_277031/", {
|
("https://luscious.net/albums/virgin-killer-sweater_282582/", {
|
||||||
"url": "7e4984a271a1072ac6483e4228a045895aff86f3",
|
"url": "01e2d7dd6eecea0152610f2446a6b1d60519c8bd",
|
||||||
"keyword": "3b3d36b355fa6a1a6c24be374ae16e6e9b0c729e",
|
"keyword": "6c8750df7f38ff4e15cabc9a3a2e876b84a328d6",
|
||||||
}),
|
}),
|
||||||
|
("https://luscious.net/albums/okinami-no-koigokoro_277031/", None),
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, match):
|
def __init__(self, match):
|
||||||
Extractor.__init__(self)
|
AsynchronousExtractor.__init__(self)
|
||||||
self.gpart, self.gid = match.groups()
|
self.gpart, self.gid = match.groups()
|
||||||
self.section = "x"
|
self.section = "x"
|
||||||
|
|
||||||
def items(self):
|
def items(self):
|
||||||
data = self.get_job_metadata()
|
url = "https://luscious.net/albums/" + self.gpart + "/"
|
||||||
|
page = self.request(url).text
|
||||||
|
data = self.get_metadata(page)
|
||||||
yield Message.Version, 1
|
yield Message.Version, 1
|
||||||
yield Message.Directory, data
|
yield Message.Directory, data
|
||||||
for url, image in self.get_images():
|
for url, image in self.get_images(page):
|
||||||
image.update(data)
|
image.update(data)
|
||||||
yield Message.Url, url, image
|
yield Message.Url, url, image
|
||||||
|
|
||||||
def get_job_metadata(self):
|
def get_metadata(self, page):
|
||||||
"""Collect metadata for extractor-job"""
|
"""Collect metadata for extractor-job"""
|
||||||
url = "https://luscious.net/albums/" + self.gpart + "/"
|
|
||||||
page = self.request(url).text
|
|
||||||
data = text.extract_all(page, (
|
data = text.extract_all(page, (
|
||||||
("title" , '"og:title" content="', '"'),
|
("title" , '"og:title" content="', '"'),
|
||||||
("tags" , '<meta name="keywords" content="', '"'),
|
("tags" , '<meta name="keywords" content="', '"'),
|
||||||
@ -61,35 +61,30 @@ class LusciousAlbumExtractor(Extractor):
|
|||||||
("section" , '>', '<'),
|
("section" , '>', '<'),
|
||||||
("language", '<p>Language:', ' '),
|
("language", '<p>Language:', ' '),
|
||||||
), values={"gallery-id": self.gid})[0]
|
), values={"gallery-id": self.gid})[0]
|
||||||
data["lang"] = util.language_to_code(data["language"])
|
data["lang"] = util.language_to_code(data["language"] or "", None)
|
||||||
data["artist"] = text.extract(data["tags"], "rtist: ", ",")[0] or ""
|
data["artist"] = text.extract(data["tags"], "rtist: ", ",")[0] or None
|
||||||
self.section = data["com"]
|
self.section = data["com"]
|
||||||
del data["com"]
|
del data["com"]
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def get_images(self):
|
def get_images(self, page):
|
||||||
"""Collect image-urls and -metadata"""
|
"""Collect image-urls and -metadata"""
|
||||||
pnum = 1
|
extr = text.extract
|
||||||
inum = 1
|
num = 1
|
||||||
apiurl = ("https://luscious.net/c/{}/pictures/album/{}/page/{{}}/.json"
|
pos = page.find('<div class="album_cover_item">')
|
||||||
"/?style=default").format(self.section, self.gpart)
|
url = extr(page, '<a href="', '"', pos)[0]
|
||||||
while True:
|
while not url.endswith("/more_like_this/"):
|
||||||
data = self.request(apiurl.format(pnum)).json()
|
page = self.request("https://luscious.net" + url).text
|
||||||
page = data["html"]
|
imgid, pos = extr(url , '/id/', '/')
|
||||||
pos = 0
|
url , pos = extr(page, '<link rel="next" href="', '"')
|
||||||
while True:
|
name , pos = extr(page, '<h1 id="picture_title">', '</h1>', pos)
|
||||||
imgid, pos = text.extract(page, 'container" id="', '"', pos)
|
_ , pos = extr(page, '<ul class="image_option_icons">', '', pos)
|
||||||
if not imgid:
|
iurl , pos = extr(page, '<li><a href="', '"', pos+100)
|
||||||
break
|
|
||||||
url , pos = text.extract(page, 'data-src="', '"', pos)
|
yield iurl, {
|
||||||
title, pos = text.extract(page, 'alt="', '"', pos)
|
"num": num,
|
||||||
yield re.sub(r"\.\d+x\d+(\.[a-z]+)$", r"\1", url), {
|
"name": name,
|
||||||
"num": inum,
|
"extension": iurl.rpartition(".")[2],
|
||||||
"name": title,
|
"image-id": imgid,
|
||||||
"extension": url[url.rfind(".")+1:],
|
}
|
||||||
"image-id": imgid[8:]
|
num += 1
|
||||||
}
|
|
||||||
inum += 1
|
|
||||||
if data["paginator_complete"]:
|
|
||||||
return
|
|
||||||
pnum += 1
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user