[luacious] get correct image URLs (fixes #33)

Instead of using thumbnail URLs and modifying them the extractor now
goes through every single image-page and gets its download URL from
there.
This commit is contained in:
Mike Fährmann 2017-08-02 19:58:13 +02:00
parent 6950708e52
commit d443822fdb
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88

View File

@ -8,12 +8,11 @@
"""Extract images from https://luscious.net/"""
from .common import Extractor, Message
from .common import AsynchronousExtractor, Message
from .. import text, util
import re
class LusciousAlbumExtractor(Extractor):
class LusciousAlbumExtractor(AsynchronousExtractor):
"""Extractor for image albums from luscious.net"""
category = "luscious"
subcategory = "album"
@ -25,32 +24,33 @@ class LusciousAlbumExtractor(Extractor):
(("https://luscious.net/c/hentai_manga/albums/"
"okinami-no-koigokoro_277031/view/"), {
"url": "7e4984a271a1072ac6483e4228a045895aff86f3",
"keyword": "3b3d36b355fa6a1a6c24be374ae16e6e9b0c729e",
"keyword": "8533c72ff85578240cf7594eb617d907bebf87ab",
"content": "b3a747a6464509440bd0ff6d1267e6959f8d6ff3",
}),
("https://luscious.net/albums/okinami-no-koigokoro_277031/", {
"url": "7e4984a271a1072ac6483e4228a045895aff86f3",
"keyword": "3b3d36b355fa6a1a6c24be374ae16e6e9b0c729e",
("https://luscious.net/albums/virgin-killer-sweater_282582/", {
"url": "01e2d7dd6eecea0152610f2446a6b1d60519c8bd",
"keyword": "6c8750df7f38ff4e15cabc9a3a2e876b84a328d6",
}),
("https://luscious.net/albums/okinami-no-koigokoro_277031/", None),
]
def __init__(self, match):
Extractor.__init__(self)
AsynchronousExtractor.__init__(self)
self.gpart, self.gid = match.groups()
self.section = "x"
def items(self):
data = self.get_job_metadata()
url = "https://luscious.net/albums/" + self.gpart + "/"
page = self.request(url).text
data = self.get_metadata(page)
yield Message.Version, 1
yield Message.Directory, data
for url, image in self.get_images():
for url, image in self.get_images(page):
image.update(data)
yield Message.Url, url, image
def get_job_metadata(self):
def get_metadata(self, page):
"""Collect metadata for extractor-job"""
url = "https://luscious.net/albums/" + self.gpart + "/"
page = self.request(url).text
data = text.extract_all(page, (
("title" , '"og:title" content="', '"'),
("tags" , '<meta name="keywords" content="', '"'),
@ -61,35 +61,30 @@ class LusciousAlbumExtractor(Extractor):
("section" , '>', '<'),
("language", '<p>Language:', ' '),
), values={"gallery-id": self.gid})[0]
data["lang"] = util.language_to_code(data["language"])
data["artist"] = text.extract(data["tags"], "rtist: ", ",")[0] or ""
data["lang"] = util.language_to_code(data["language"] or "", None)
data["artist"] = text.extract(data["tags"], "rtist: ", ",")[0] or None
self.section = data["com"]
del data["com"]
return data
def get_images(self):
def get_images(self, page):
"""Collect image-urls and -metadata"""
pnum = 1
inum = 1
apiurl = ("https://luscious.net/c/{}/pictures/album/{}/page/{{}}/.json"
"/?style=default").format(self.section, self.gpart)
while True:
data = self.request(apiurl.format(pnum)).json()
page = data["html"]
pos = 0
while True:
imgid, pos = text.extract(page, 'container" id="', '"', pos)
if not imgid:
break
url , pos = text.extract(page, 'data-src="', '"', pos)
title, pos = text.extract(page, 'alt="', '"', pos)
yield re.sub(r"\.\d+x\d+(\.[a-z]+)$", r"\1", url), {
"num": inum,
"name": title,
"extension": url[url.rfind(".")+1:],
"image-id": imgid[8:]
}
inum += 1
if data["paginator_complete"]:
return
pnum += 1
extr = text.extract
num = 1
pos = page.find('<div class="album_cover_item">')
url = extr(page, '<a href="', '"', pos)[0]
while not url.endswith("/more_like_this/"):
page = self.request("https://luscious.net" + url).text
imgid, pos = extr(url , '/id/', '/')
url , pos = extr(page, '<link rel="next" href="', '"')
name , pos = extr(page, '<h1 id="picture_title">', '</h1>', pos)
_ , pos = extr(page, '<ul class="image_option_icons">', '', pos)
iurl , pos = extr(page, '<li><a href="', '"', pos+100)
yield iurl, {
"num": num,
"name": name,
"extension": iurl.rpartition(".")[2],
"image-id": imgid,
}
num += 1