2016-02-20 11:29:10 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2017-04-13 20:47:22 +02:00
|
|
|
# Copyright 2016-2017 Mike Fährmann
|
2016-02-20 11:29:10 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extract images from https://www.tumblr.com/"""
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2017-11-03 22:16:57 +01:00
|
|
|
from .. import text, exception
|
|
|
|
from ..cache import memcache
|
2017-11-18 22:49:55 +01:00
|
|
|
import re
|
2016-02-20 11:29:10 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2017-11-03 22:16:57 +01:00
|
|
|
class TumblrExtractor(Extractor):
|
|
|
|
"""Base class for tumblr extractors"""
|
2016-02-20 11:29:10 +01:00
|
|
|
category = "tumblr"
|
2017-11-03 22:16:57 +01:00
|
|
|
directory_fmt = ["{category}", "{name}"]
|
2017-11-18 22:49:55 +01:00
|
|
|
filename_fmt = "{category}_{blog[name]}_{id}{offset:?o//}.{extension}"
|
2016-02-20 11:29:10 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self)
|
|
|
|
self.user = match.group(1)
|
2017-11-18 22:49:55 +01:00
|
|
|
self.api = TumblrAPI(self)
|
2016-02-20 11:29:10 +01:00
|
|
|
|
|
|
|
def items(self):
|
2017-11-03 22:16:57 +01:00
|
|
|
blog = self.api.info(self.user)
|
2016-02-20 11:29:10 +01:00
|
|
|
yield Message.Version, 1
|
2017-11-03 22:16:57 +01:00
|
|
|
yield Message.Directory, blog
|
2016-02-20 15:24:30 +01:00
|
|
|
|
2017-11-03 22:16:57 +01:00
|
|
|
for post in self.posts():
|
2017-11-18 22:49:55 +01:00
|
|
|
post["blog"] = blog
|
|
|
|
|
|
|
|
if "trail" in post:
|
|
|
|
del post["trail"]
|
|
|
|
|
|
|
|
if "photos" in post:
|
|
|
|
photos = post["photos"]
|
|
|
|
del post["photos"]
|
|
|
|
|
|
|
|
for offset, photo in enumerate(photos, 1):
|
|
|
|
photo.update(photo["original_size"])
|
|
|
|
photo["url"] = self._original_url(photo["url"])
|
|
|
|
del photo["original_size"]
|
|
|
|
del photo["alt_sizes"]
|
|
|
|
post["extension"] = photo["url"].rpartition(".")[2]
|
|
|
|
post["offset"] = offset
|
|
|
|
post["photo"] = photo
|
|
|
|
yield Message.Url, photo["url"], post
|
|
|
|
|
|
|
|
if "audio_url" in post: # type: "audio"
|
|
|
|
post["extension"] = None
|
|
|
|
post["offset"] = None
|
|
|
|
yield Message.Url, post["audio_url"], post
|
|
|
|
|
|
|
|
if "video_url" in post: # type: "video"
|
|
|
|
post["extension"] = post["video_url"].rpartition(".")[2]
|
|
|
|
post["offset"] = None
|
|
|
|
yield Message.Url, post["video_url"], post
|
|
|
|
|
|
|
|
if "description" in post:
|
|
|
|
for url in re.findall(
|
|
|
|
r' src="([^"]+)"', post["description"]):
|
|
|
|
yield Message.Queue, url, post
|
|
|
|
|
|
|
|
if "permalink_url" in post: # external video/audio
|
|
|
|
yield Message.Queue, post["permalink_url"], post
|
|
|
|
|
|
|
|
if "url" in post: # type: "link"
|
|
|
|
yield Message.Queue, post["url"], post
|
2017-11-03 22:16:57 +01:00
|
|
|
|
|
|
|
def posts(self):
|
|
|
|
"""Return an iterable containing all relevant posts"""
|
|
|
|
|
2017-11-18 22:49:55 +01:00
|
|
|
@staticmethod
|
|
|
|
def _original_url(url):
|
|
|
|
return re.sub(
|
|
|
|
(r"https?://\d+\.media\.tumblr\.com/([0-9a-f]+)"
|
|
|
|
r"/tumblr_([^/?&#.]+)_\d+\.([0-9a-z]+)"),
|
|
|
|
r"http://data.tumblr.com/\1/tumblr_\2_raw.\3", url
|
|
|
|
)
|
|
|
|
|
2017-11-03 22:16:57 +01:00
|
|
|
|
|
|
|
class TumblrUserExtractor(TumblrExtractor):
|
|
|
|
"""Extractor for all images from a tumblr-user"""
|
|
|
|
subcategory = "user"
|
|
|
|
pattern = [r"(?:https?://)?([^.]+)\.tumblr\.com(?:/page/\d+)?/?$"]
|
|
|
|
test = [("http://demo.tumblr.com/", {
|
2017-11-18 22:49:55 +01:00
|
|
|
"pattern": (r"https?://(?:$|"
|
|
|
|
r"\d+\.media\.tumblr\.com/tumblr_[^/_]+_1280\.jpg|"
|
|
|
|
r"w+\.tumblr\.com/audio_file/demo/\d+/tumblr_\w+)"),
|
|
|
|
"count": 3,
|
2017-11-03 22:16:57 +01:00
|
|
|
})]
|
|
|
|
|
|
|
|
def posts(self):
|
|
|
|
return self.api.posts(self.user, {})
|
|
|
|
|
|
|
|
|
|
|
|
class TumblrPostExtractor(TumblrExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for images from a single post on tumblr"""
|
2016-02-20 15:24:30 +01:00
|
|
|
subcategory = "post"
|
|
|
|
pattern = [r"(?:https?://)?([^.]+)\.tumblr\.com/post/(\d+)"]
|
|
|
|
test = [("http://demo.tumblr.com/post/459265350", {
|
2017-11-03 22:16:57 +01:00
|
|
|
"pattern": r"https://\d+\.media\.tumblr\.com/tumblr_[^/_]+_1280.jpg",
|
2017-10-08 23:59:27 +02:00
|
|
|
"count": 1,
|
2016-02-20 15:24:30 +01:00
|
|
|
})]
|
|
|
|
|
|
|
|
def __init__(self, match):
|
2017-11-03 22:16:57 +01:00
|
|
|
TumblrExtractor.__init__(self, match)
|
|
|
|
self.post_id = match.group(2)
|
2016-02-20 15:24:55 +01:00
|
|
|
|
2017-11-03 22:16:57 +01:00
|
|
|
def posts(self):
|
|
|
|
return self.api.posts(self.user, {"id": self.post_id})
|
2016-02-20 15:24:55 +01:00
|
|
|
|
2017-11-03 22:16:57 +01:00
|
|
|
|
|
|
|
class TumblrTagExtractor(TumblrExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for images from a tumblr-user by tag"""
|
2016-02-20 15:24:55 +01:00
|
|
|
subcategory = "tag"
|
|
|
|
pattern = [r"(?:https?://)?([^.]+)\.tumblr\.com/tagged/(.+)"]
|
2017-10-23 17:00:53 +02:00
|
|
|
test = [("http://demo.tumblr.com/tagged/Times%20Square", {
|
2017-11-03 22:16:57 +01:00
|
|
|
"pattern": r"https://\d+\.media\.tumblr\.com/tumblr_[^/_]+_1280.jpg",
|
2017-10-08 23:59:27 +02:00
|
|
|
"count": 1,
|
2016-02-20 15:24:55 +01:00
|
|
|
})]
|
|
|
|
|
|
|
|
def __init__(self, match):
|
2017-11-03 22:16:57 +01:00
|
|
|
TumblrExtractor.__init__(self, match)
|
|
|
|
self.tag = text.unquote(match.group(2))
|
|
|
|
|
|
|
|
def posts(self):
|
|
|
|
return self.api.posts(self.user, {"tag": self.tag})
|
|
|
|
|
|
|
|
|
|
|
|
class TumblrAPI():
|
|
|
|
"""Minimal interface for the Tumblr API v2"""
|
|
|
|
API_KEY = "O3hU2tMi5e4Qs5t3vezEi6L0qRORJ5y9oUpSGsrWu8iA3UCc3B"
|
|
|
|
|
2017-11-18 22:49:55 +01:00
|
|
|
def __init__(self, extractor):
|
2017-11-03 22:16:57 +01:00
|
|
|
self.api_key = extractor.config("api-key", TumblrAPI.API_KEY)
|
2017-11-18 22:49:55 +01:00
|
|
|
self.params = {"offset": 0, "limit": 50}
|
2017-11-03 22:16:57 +01:00
|
|
|
self.extractor = extractor
|
|
|
|
|
|
|
|
@memcache(keyarg=1)
|
|
|
|
def info(self, blog):
|
|
|
|
"""Return general information about a blog"""
|
|
|
|
return self._call(blog, "info", {})["blog"]
|
|
|
|
|
|
|
|
def posts(self, blog, params):
|
|
|
|
"""Retrieve published posts"""
|
|
|
|
params.update(self.params)
|
|
|
|
return self._pagination(blog, "posts", params)
|
|
|
|
|
|
|
|
def _call(self, blog, endpoint, params):
|
|
|
|
params["api_key"] = self.api_key
|
|
|
|
url = "https://api.tumblr.com/v2/blog/{}.tumblr.com/{}".format(
|
|
|
|
blog, endpoint)
|
|
|
|
|
|
|
|
response = self.extractor.request(
|
|
|
|
url, params=params, fatal=False).json()
|
|
|
|
if response["meta"]["status"] == 404:
|
|
|
|
raise exception.NotFoundError("user")
|
|
|
|
elif response["meta"]["status"] != 200:
|
|
|
|
self.extractor.log.error(response)
|
|
|
|
raise exception.StopExtraction()
|
|
|
|
|
|
|
|
return response["response"]
|
|
|
|
|
|
|
|
def _pagination(self, blog, endpoint, params):
|
|
|
|
while True:
|
|
|
|
data = self._call(blog, endpoint, params)
|
|
|
|
yield from data["posts"]
|
|
|
|
params["offset"] += params["limit"]
|
|
|
|
if params["offset"] >= data["total_posts"]:
|
|
|
|
return
|