2017-05-23 09:38:50 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-30 16:18:22 +01:00
|
|
|
# Copyright 2017-2019 Mike Fährmann
|
2017-05-23 09:38:50 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2019-01-30 16:18:22 +01:00
|
|
|
"""Extract images from subreddits at https://www.reddit.com/"""
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2017-06-16 21:01:40 +02:00
|
|
|
from .. import text, util, extractor, exception
|
2017-05-23 09:38:50 +02:00
|
|
|
from ..cache import cache
|
2017-06-08 16:17:13 +02:00
|
|
|
import time
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
class RedditExtractor(Extractor):
|
|
|
|
"""Base class for reddit extractors"""
|
|
|
|
category = "reddit"
|
|
|
|
|
2019-02-11 13:31:10 +01:00
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self, match)
|
2017-06-03 13:33:48 +02:00
|
|
|
self.api = RedditAPI(self)
|
2017-05-26 16:40:08 +02:00
|
|
|
self.max_depth = int(self.config("recursion", 0))
|
|
|
|
self._visited = set()
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def items(self):
|
2019-02-08 20:08:16 +01:00
|
|
|
subre = RedditSubmissionExtractor.pattern
|
2017-05-26 16:40:08 +02:00
|
|
|
submissions = self.submissions()
|
|
|
|
depth = 0
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
yield Message.Version, 1
|
2018-01-14 18:55:42 +01:00
|
|
|
with extractor.blacklist(
|
|
|
|
util.SPECIAL_EXTRACTORS, [RedditSubredditExtractor]):
|
2017-05-26 16:40:08 +02:00
|
|
|
while True:
|
|
|
|
extra = []
|
2018-12-29 17:52:43 +01:00
|
|
|
for url, data in self._urls(submissions):
|
2017-05-26 16:40:08 +02:00
|
|
|
if url[0] == "#":
|
|
|
|
continue
|
|
|
|
if url[0] == "/":
|
|
|
|
url = "https://www.reddit.com" + url
|
|
|
|
|
|
|
|
match = subre.match(url)
|
|
|
|
if match:
|
|
|
|
extra.append(match.group(1))
|
|
|
|
else:
|
2018-12-29 17:52:43 +01:00
|
|
|
yield Message.Queue, text.unescape(url), data
|
2017-05-26 16:40:08 +02:00
|
|
|
|
|
|
|
if not extra or depth == self.max_depth:
|
|
|
|
return
|
|
|
|
depth += 1
|
|
|
|
submissions = (
|
|
|
|
self.api.submission(sid) for sid in extra
|
|
|
|
if sid not in self._visited
|
2017-05-23 09:38:50 +02:00
|
|
|
)
|
2017-05-26 16:40:08 +02:00
|
|
|
|
|
|
|
def submissions(self):
|
|
|
|
"""Return an iterable containing all (submission, comments) tuples"""
|
|
|
|
|
|
|
|
def _urls(self, submissions):
|
|
|
|
for submission, comments in submissions:
|
|
|
|
self._visited.add(submission["id"])
|
2018-12-29 17:52:43 +01:00
|
|
|
|
2017-05-26 16:40:08 +02:00
|
|
|
if not submission["is_self"]:
|
2018-12-29 17:52:43 +01:00
|
|
|
yield submission["url"], submission
|
|
|
|
|
|
|
|
for url in text.extract_iter(
|
|
|
|
submission["selftext_html"] or "", ' href="', '"'):
|
|
|
|
yield url, submission
|
|
|
|
|
|
|
|
for comment in comments:
|
|
|
|
for url in text.extract_iter(
|
|
|
|
comment["body_html"] or "", ' href="', '"'):
|
|
|
|
yield url, comment
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
class RedditSubredditExtractor(RedditExtractor):
|
|
|
|
"""Extractor for images from subreddits on reddit.com"""
|
2017-05-26 16:40:08 +02:00
|
|
|
subcategory = "subreddit"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:\w+\.)?reddit\.com/r/([^/?&#]+)"
|
2017-05-29 12:34:53 +02:00
|
|
|
r"(/[a-z]+)?/?"
|
2019-02-08 13:45:40 +01:00
|
|
|
r"(?:\?.*?(?:\bt=([a-z]+))?)?$")
|
|
|
|
test = (
|
|
|
|
("https://www.reddit.com/r/lavaporn/"),
|
|
|
|
("https://www.reddit.com/r/lavaporn/top/?sort=top&t=month"),
|
|
|
|
("https://old.reddit.com/r/lavaporn/"),
|
|
|
|
("https://np.reddit.com/r/lavaporn/"),
|
|
|
|
("https://m.reddit.com/r/lavaporn/"),
|
|
|
|
)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
RedditExtractor.__init__(self, match)
|
2017-05-29 12:34:53 +02:00
|
|
|
self.subreddit, self.order, self.timeframe = match.groups()
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def submissions(self):
|
2017-05-29 12:34:53 +02:00
|
|
|
subreddit = self.subreddit + (self.order or "")
|
|
|
|
params = {"t": self.timeframe} if self.timeframe else {}
|
|
|
|
return self.api.submissions_subreddit(subreddit, params)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
class RedditSubmissionExtractor(RedditExtractor):
|
|
|
|
"""Extractor for images from a submission on reddit.com"""
|
2017-05-26 16:40:08 +02:00
|
|
|
subcategory = "submission"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:"
|
|
|
|
r"(?:\w+\.)?reddit\.com/r/[^/?&#]+/comments|"
|
|
|
|
r"redd\.it"
|
|
|
|
r")/([a-z0-9]+)")
|
|
|
|
test = (
|
2018-02-11 16:28:19 +01:00
|
|
|
("https://www.reddit.com/r/lavaporn/comments/2a00np/", {
|
|
|
|
"pattern": r"https?://i\.imgur\.com/AaAUCgy\.jpg",
|
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
("https://old.reddit.com/r/lavaporn/comments/2a00np/"),
|
|
|
|
("https://np.reddit.com/r/lavaporn/comments/2a00np/"),
|
|
|
|
("https://m.reddit.com/r/lavaporn/comments/2a00np/"),
|
|
|
|
("https://redd.it/2a00np/"),
|
|
|
|
)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
RedditExtractor.__init__(self, match)
|
2017-05-23 09:38:50 +02:00
|
|
|
self.submission_id = match.group(1)
|
|
|
|
|
|
|
|
def submissions(self):
|
|
|
|
return (self.api.submission(self.submission_id),)
|
|
|
|
|
|
|
|
|
2018-01-14 18:55:42 +01:00
|
|
|
class RedditImageExtractor(Extractor):
|
|
|
|
"""Extractor for reddit-hosted images"""
|
|
|
|
category = "reddit"
|
|
|
|
subcategory = "image"
|
2019-02-14 16:07:17 +01:00
|
|
|
archive_fmt = "{filename}"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?i\.redd(?:\.it|ituploads\.com)"
|
|
|
|
r"/[^/?&#]+(?:\?[^#]*)?")
|
|
|
|
test = (
|
2018-01-14 18:55:42 +01:00
|
|
|
("https://i.redd.it/upjtjcx2npzz.jpg", {
|
|
|
|
"url": "0de614900feef103e580b632190458c0b62b641a",
|
|
|
|
"content": "cc9a68cf286708d5ce23c68e79cd9cf7826db6a3",
|
|
|
|
}),
|
|
|
|
(("https://i.reddituploads.com/0f44f1b1fca2461f957c713d9592617d"
|
|
|
|
"?fit=max&h=1536&w=1536&s=e96ce7846b3c8e1f921d2ce2671fb5e2"), {
|
|
|
|
"url": "f24f25efcedaddeec802e46c60d77ef975dc52a5",
|
2018-11-10 19:14:54 +01:00
|
|
|
"content": "541dbcc3ad77aa01ee21ca49843c5e382371fae7",
|
2018-01-14 18:55:42 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2018-01-14 18:55:42 +01:00
|
|
|
|
|
|
|
def items(self):
|
|
|
|
data = text.nameext_from_url(self.url)
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, data
|
|
|
|
yield Message.Url, self.url, data
|
|
|
|
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
class RedditAPI():
|
|
|
|
"""Minimal interface for the reddit API"""
|
2017-06-08 16:17:13 +02:00
|
|
|
CLIENT_ID = "6N9uN0krSDE-ig"
|
|
|
|
USER_AGENT = "Python:gallery-dl:0.8.4 (by /u/mikf1)"
|
|
|
|
|
|
|
|
def __init__(self, extractor):
|
2017-06-05 18:37:50 +02:00
|
|
|
self.extractor = extractor
|
2017-06-08 16:17:13 +02:00
|
|
|
self.comments = extractor.config("comments", 500)
|
2017-06-13 18:49:07 +02:00
|
|
|
self.morecomments = extractor.config("morecomments", False)
|
2017-06-08 16:17:13 +02:00
|
|
|
self.refresh_token = extractor.config("refresh-token")
|
|
|
|
self.log = extractor.log
|
2017-10-10 17:29:46 +02:00
|
|
|
|
|
|
|
client_id = extractor.config("client-id", self.CLIENT_ID)
|
|
|
|
user_agent = extractor.config("user-agent", self.USER_AGENT)
|
|
|
|
|
|
|
|
if (client_id == self.CLIENT_ID) ^ (user_agent == self.USER_AGENT):
|
|
|
|
self.client_id = None
|
|
|
|
self.log.warning(
|
|
|
|
"Conflicting values for 'client-id' and 'user-agent': "
|
2017-10-10 17:41:48 +02:00
|
|
|
"override either both or none of them.")
|
2017-10-10 17:29:46 +02:00
|
|
|
else:
|
|
|
|
self.client_id = client_id
|
2018-12-22 14:40:35 +01:00
|
|
|
extractor.session.headers["User-Agent"] = user_agent
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def submission(self, submission_id):
|
|
|
|
"""Fetch the (submission, comments)=-tuple for a submission id"""
|
|
|
|
endpoint = "/comments/" + submission_id + "/.json"
|
2017-06-13 18:49:07 +02:00
|
|
|
link_id = "t3_" + submission_id if self.morecomments else None
|
2017-06-05 18:37:50 +02:00
|
|
|
submission, comments = self._call(endpoint, {"limit": self.comments})
|
2017-05-23 09:38:50 +02:00
|
|
|
return (submission["data"]["children"][0]["data"],
|
2017-06-13 18:49:07 +02:00
|
|
|
self._flatten(comments, link_id))
|
2017-05-23 09:38:50 +02:00
|
|
|
|
2017-05-29 12:34:53 +02:00
|
|
|
def submissions_subreddit(self, subreddit, params):
|
2017-05-23 09:38:50 +02:00
|
|
|
"""Collect all (submission, comments)-tuples of a subreddit"""
|
|
|
|
endpoint = "/r/" + subreddit + "/.json"
|
2017-05-29 12:34:53 +02:00
|
|
|
params["limit"] = 100
|
2017-05-23 09:38:50 +02:00
|
|
|
return self._pagination(endpoint, params)
|
|
|
|
|
2017-06-13 18:49:07 +02:00
|
|
|
def morechildren(self, link_id, children):
|
|
|
|
"""Load additional comments from a submission"""
|
|
|
|
endpoint = "/api/morechildren"
|
|
|
|
params = {"link_id": link_id, "api_type": "json"}
|
|
|
|
index, done = 0, False
|
|
|
|
while not done:
|
|
|
|
if len(children) - index < 100:
|
|
|
|
done = True
|
|
|
|
params["children"] = ",".join(children[index:index + 100])
|
|
|
|
index += 100
|
|
|
|
|
|
|
|
data = self._call(endpoint, params)["json"]
|
|
|
|
for thing in data["data"]["things"]:
|
|
|
|
if thing["kind"] == "more":
|
|
|
|
children.extend(thing["data"]["children"])
|
|
|
|
else:
|
|
|
|
yield thing["data"]
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
def authenticate(self):
|
|
|
|
"""Authenticate the application by requesting an access token"""
|
2017-06-08 16:17:13 +02:00
|
|
|
access_token = self._authenticate_impl(self.refresh_token)
|
2018-12-22 14:40:35 +01:00
|
|
|
self.extractor.session.headers["Authorization"] = access_token
|
2017-05-23 09:38:50 +02:00
|
|
|
|
2019-03-14 22:21:49 +01:00
|
|
|
@cache(maxage=3600, keyarg=1)
|
2017-06-08 16:17:13 +02:00
|
|
|
def _authenticate_impl(self, refresh_token=None):
|
2017-05-23 09:38:50 +02:00
|
|
|
"""Actual authenticate implementation"""
|
|
|
|
url = "https://www.reddit.com/api/v1/access_token"
|
2017-06-08 16:17:13 +02:00
|
|
|
if refresh_token:
|
2018-08-11 23:54:25 +02:00
|
|
|
self.log.info("Refreshing private access token")
|
2017-06-08 16:17:13 +02:00
|
|
|
data = {"grant_type": "refresh_token",
|
|
|
|
"refresh_token": refresh_token}
|
|
|
|
else:
|
|
|
|
self.log.info("Requesting public access token")
|
|
|
|
data = {"grant_type": ("https://oauth.reddit.com/"
|
|
|
|
"grants/installed_client"),
|
|
|
|
"device_id": "DO_NOT_TRACK_THIS_DEVICE"}
|
2018-12-22 14:40:35 +01:00
|
|
|
response = self.extractor.request(
|
|
|
|
url, method="POST", data=data, auth=(self.client_id, ""))
|
2017-05-23 09:38:50 +02:00
|
|
|
if response.status_code != 200:
|
2018-08-11 23:54:25 +02:00
|
|
|
raise exception.AuthenticationError('"{} ({})"'.format(
|
|
|
|
response.json().get("message"), response.status_code))
|
2017-05-23 09:38:50 +02:00
|
|
|
return "Bearer " + response.json()["access_token"]
|
|
|
|
|
|
|
|
def _call(self, endpoint, params):
|
|
|
|
url = "https://oauth.reddit.com" + endpoint
|
2017-06-03 13:33:48 +02:00
|
|
|
params["raw_json"] = 1
|
2017-05-23 09:38:50 +02:00
|
|
|
self.authenticate()
|
2019-07-04 23:45:26 +02:00
|
|
|
response = self.extractor.request(url, params=params, fatal=False)
|
2017-06-08 16:17:13 +02:00
|
|
|
remaining = response.headers.get("x-ratelimit-remaining")
|
|
|
|
if remaining and float(remaining) < 2:
|
|
|
|
wait = int(response.headers["x-ratelimit-reset"])
|
|
|
|
self.log.info("Waiting %d seconds for ratelimit reset", wait)
|
|
|
|
time.sleep(wait)
|
|
|
|
data = response.json()
|
2017-05-26 16:40:08 +02:00
|
|
|
if "error" in data:
|
|
|
|
if data["error"] == 403:
|
|
|
|
raise exception.AuthorizationError()
|
|
|
|
if data["error"] == 404:
|
|
|
|
raise exception.NotFoundError()
|
|
|
|
raise Exception(data["message"])
|
|
|
|
return data
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def _pagination(self, endpoint, params, _empty=()):
|
2017-07-01 18:46:38 +02:00
|
|
|
id_min = self._parse_id("id-min", 0)
|
|
|
|
id_max = self._parse_id("id-max", 2147483647)
|
2019-07-16 22:54:39 +02:00
|
|
|
date_min, date_max = self.extractor._get_date_min_max(0, 253402210800)
|
2017-06-05 18:37:50 +02:00
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
while True:
|
|
|
|
data = self._call(endpoint, params)["data"]
|
|
|
|
|
|
|
|
for submission in data["children"]:
|
|
|
|
submission = submission["data"]
|
2017-07-01 18:46:38 +02:00
|
|
|
if (date_min <= submission["created_utc"] <= date_max and
|
2017-06-29 17:39:22 +02:00
|
|
|
id_min <= self._decode(submission["id"]) <= id_max):
|
2017-06-05 18:37:50 +02:00
|
|
|
if submission["num_comments"] and self.comments:
|
|
|
|
try:
|
|
|
|
yield self.submission(submission["id"])
|
|
|
|
except exception.AuthorizationError:
|
|
|
|
pass
|
2017-06-03 13:33:48 +02:00
|
|
|
else:
|
|
|
|
yield submission, _empty
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
if not data["after"]:
|
|
|
|
return
|
|
|
|
params["after"] = data["after"]
|
|
|
|
|
2017-06-13 18:49:07 +02:00
|
|
|
def _flatten(self, comments, link_id=None):
|
|
|
|
extra = []
|
2017-05-23 09:38:50 +02:00
|
|
|
queue = comments["data"]["children"]
|
|
|
|
while queue:
|
2017-06-13 18:49:07 +02:00
|
|
|
comment = queue.pop(0)
|
2017-05-23 09:38:50 +02:00
|
|
|
if comment["kind"] == "more":
|
2017-06-13 18:49:07 +02:00
|
|
|
if link_id:
|
|
|
|
extra.extend(comment["data"]["children"])
|
2017-05-23 09:38:50 +02:00
|
|
|
continue
|
|
|
|
comment = comment["data"]
|
|
|
|
yield comment
|
|
|
|
if comment["replies"]:
|
|
|
|
queue += comment["replies"]["data"]["children"]
|
2017-06-13 18:49:07 +02:00
|
|
|
if link_id and extra:
|
|
|
|
yield from self.morechildren(link_id, extra)
|
2017-06-29 17:39:22 +02:00
|
|
|
|
2017-07-01 18:46:38 +02:00
|
|
|
def _parse_id(self, key, default):
|
|
|
|
sid = self.extractor.config(key)
|
2018-09-07 18:27:54 +02:00
|
|
|
return self._decode(sid.rpartition("_")[2].lower()) if sid else default
|
2017-06-29 17:39:22 +02:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _decode(sid):
|
|
|
|
return util.bdecode(sid, "0123456789abcdefghijklmnopqrstuvwxyz")
|