84 lines
3.2 KiB
Python
Raw Normal View History

2015-10-28 16:24:35 +01:00
# -*- coding: utf-8 -*-
# Copyright 2015-2017 Mike Fährmann
2015-10-28 16:24:35 +01:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images from https://hitomi.la/"""
from .common import Extractor, Message
2017-03-28 13:12:44 +02:00
from .. import text, util
2015-10-29 17:53:29 +01:00
import string
2015-10-28 16:24:35 +01:00
2017-02-01 00:53:19 +01:00
class HitomiGalleryExtractor(Extractor):
"""Extractor for image galleries from hitomi.la"""
2015-11-21 04:26:30 +01:00
category = "hitomi"
subcategory = "gallery"
directory_fmt = ["{category}", "{gallery_id} {title}"]
filename_fmt = "{category}_{gallery_id}_{num:>03}_{name}.{extension}"
2015-11-21 04:26:30 +01:00
pattern = [r"(?:https?://)?hitomi\.la/(?:galleries|reader)/(\d+)\.html"]
2016-09-23 08:21:49 +02:00
test = [("https://hitomi.la/galleries/867789.html", {
2016-10-29 19:45:17 +02:00
"url": "e42a47dfadda93e4bf37e82b1dc9ad29edfa9130",
"keyword": "c007cd41229d727b2ced3b364350561444738351",
2015-12-14 03:00:58 +01:00
})]
2015-11-21 04:26:30 +01:00
2015-10-28 16:24:35 +01:00
def __init__(self, match):
Extractor.__init__(self)
self.gid = match.group(1)
def items(self):
2017-02-01 00:53:19 +01:00
url = "https://hitomi.la/galleries/" + self.gid + ".html"
page = self.request(url).text
2015-10-28 16:24:35 +01:00
data = self.get_job_metadata(page)
images = self.get_image_urls(page)
data["count"] = len(images)
yield Message.Version, 1
yield Message.Directory, data
for data["num"], url in enumerate(images, 1):
2015-11-16 17:32:26 +01:00
yield Message.Url, url, text.nameext_from_url(url, data)
2015-10-28 16:24:35 +01:00
def get_job_metadata(self, page):
"""Collect metadata for extractor-job"""
2017-02-01 00:53:19 +01:00
group = ""
gtype = ""
2015-10-29 17:53:29 +01:00
series = ""
2015-10-28 16:24:35 +01:00
_ , pos = text.extract(page, '<h1><a href="/reader/', '')
2015-10-29 17:53:29 +01:00
title , pos = text.extract(page, '.html">', "</a>", pos)
2015-10-28 16:24:35 +01:00
_ , pos = text.extract(page, '<li><a href="/artist/', '', pos)
2015-10-29 17:53:29 +01:00
artist, pos = text.extract(page, '.html">', '</a>', pos)
test , pos = text.extract(page, '<li><a href="/group/', '', pos)
if test is not None:
group , pos = text.extract(page, '.html">', '</a>', pos)
test , pos = text.extract(page, '<a href="/type/', '', pos)
if test is not None:
gtype , pos = text.extract(page, '.html">', '</a>', pos)
_ , pos = text.extract(page, '<tdLanguage</td>', '', pos)
lang , pos = text.extract(page, '.html">', '</a>', pos)
test , pos = text.extract(page, '<a href="/series/', '', pos)
if test is not None:
series, pos = text.extract(page, '.html">', '</a>', pos)
lang = lang.capitalize()
2015-10-28 16:24:35 +01:00
return {
"gallery_id": self.gid,
2016-09-23 08:21:49 +02:00
"title": " ".join(title.split()),
2015-10-29 17:53:29 +01:00
"artist": string.capwords(artist),
"group": string.capwords(group),
2016-09-23 08:21:49 +02:00
"type": gtype.strip().capitalize(),
2017-03-28 13:12:44 +02:00
"lang": util.language_to_code(lang),
"language": lang,
2015-10-29 17:53:29 +01:00
"series": string.capwords(series),
2015-10-28 16:24:35 +01:00
}
@staticmethod
def get_image_urls(page):
"""Extract and return a list of all image-urls"""
2016-09-23 08:21:49 +02:00
return [
2016-10-29 19:45:17 +02:00
"https://la.hitomi.la/galleries/" + urlpart
2016-09-23 08:21:49 +02:00
for urlpart in text.extract_iter(
page, "'//tn.hitomi.la/smalltn/", ".jpg',"
)
]