2015-04-05 16:23:20 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
# Copyright 2015 Mike Fährmann
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
import os
|
2015-12-12 01:16:02 +01:00
|
|
|
import json
|
|
|
|
import hashlib
|
2016-09-25 09:27:44 +02:00
|
|
|
import platform
|
2016-07-14 14:25:56 +02:00
|
|
|
from . import config, extractor, downloader, text, output, exception
|
2015-11-24 19:47:51 +01:00
|
|
|
from .extractor.message import Message
|
2015-04-05 16:23:20 +02:00
|
|
|
|
2015-12-12 00:11:05 +01:00
|
|
|
class Job():
|
|
|
|
"""Base class for Job-types"""
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2015-11-12 02:35:30 +01:00
|
|
|
def __init__(self, url):
|
2015-11-21 00:30:31 +01:00
|
|
|
self.extractor = extractor.find(url)
|
2015-06-28 12:45:52 +02:00
|
|
|
if self.extractor is None:
|
2016-07-14 14:25:56 +02:00
|
|
|
raise exception.NoExtractorError(url)
|
2015-12-12 00:11:05 +01:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
"""Execute or run the job"""
|
2016-09-24 10:45:11 +02:00
|
|
|
for msg in self.extractor:
|
|
|
|
if msg[0] == Message.Url:
|
|
|
|
self.update_kwdict(msg[2])
|
|
|
|
self.handle_url(msg[1], msg[2])
|
|
|
|
|
|
|
|
elif msg[0] == Message.Directory:
|
|
|
|
self.update_kwdict(msg[1])
|
|
|
|
self.handle_directory(msg[1])
|
|
|
|
|
|
|
|
elif msg[0] == Message.Queue:
|
|
|
|
self.handle_queue(msg[1])
|
|
|
|
|
|
|
|
elif msg[0] == Message.Headers:
|
|
|
|
self.handle_headers(msg[1])
|
|
|
|
|
|
|
|
elif msg[0] == Message.Cookies:
|
|
|
|
self.handle_cookies(msg[1])
|
|
|
|
|
|
|
|
elif msg[0] == Message.Version:
|
|
|
|
if msg[1] != 1:
|
|
|
|
raise "unsupported message-version ({}, {})".format(
|
|
|
|
self.extractor.category, msg[1]
|
|
|
|
)
|
|
|
|
# TODO: support for multiple message versions
|
|
|
|
|
|
|
|
def handle_url(self, url, kexwords):
|
|
|
|
"""Handle Message.Url"""
|
|
|
|
|
|
|
|
def handle_directory(self, keywords):
|
|
|
|
"""Handle Message.Directory"""
|
|
|
|
|
|
|
|
def handle_queue(self, url):
|
|
|
|
"""Handle Message.Queue"""
|
|
|
|
|
|
|
|
def handle_headers(self, headers):
|
|
|
|
"""Handle Message.Headers"""
|
|
|
|
|
|
|
|
def handle_cookies(self, cookies):
|
|
|
|
"""Handle Message.Cookies"""
|
|
|
|
|
|
|
|
def update_kwdict(self, kwdict):
|
|
|
|
"""Add 'category' and 'subcategory' keywords"""
|
|
|
|
kwdict["category"] = self.extractor.category
|
|
|
|
kwdict["subcategory"] = self.extractor.subcategory
|
2015-12-12 00:11:05 +01:00
|
|
|
|
|
|
|
class DownloadJob(Job):
|
|
|
|
"""Download images into appropriate directory/filename locations"""
|
|
|
|
|
|
|
|
def __init__(self, url):
|
|
|
|
Job.__init__(self, url)
|
2015-11-12 02:35:30 +01:00
|
|
|
self.directory = self.get_base_directory()
|
2015-04-08 01:51:48 +02:00
|
|
|
self.downloaders = {}
|
2015-11-26 22:55:11 +01:00
|
|
|
self.queue = None
|
2015-12-01 21:22:58 +01:00
|
|
|
self.printer = output.select()
|
2015-11-30 00:30:02 +01:00
|
|
|
key = ["extractor", self.extractor.category]
|
|
|
|
if self.extractor.subcategory:
|
|
|
|
key.append(self.extractor.subcategory)
|
|
|
|
self.filename_fmt = config.interpolate(
|
|
|
|
key + ["filename_fmt"], default=self.extractor.filename_fmt
|
2015-10-05 13:26:38 +02:00
|
|
|
)
|
2016-09-24 11:29:25 +02:00
|
|
|
self.directory_fmt = config.interpolate(
|
2015-11-30 00:30:02 +01:00
|
|
|
key + ["directory_fmt"], default=self.extractor.directory_fmt
|
2015-04-09 16:40:54 +02:00
|
|
|
)
|
2015-04-08 01:51:48 +02:00
|
|
|
|
|
|
|
def run(self):
|
2016-09-24 10:45:11 +02:00
|
|
|
Job.run(self)
|
|
|
|
if self.queue:
|
|
|
|
for url in self.queue:
|
|
|
|
try:
|
|
|
|
DownloadJob(url).run()
|
|
|
|
except exception.NoExtractorError:
|
|
|
|
pass
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_url(self, url, keywords):
|
|
|
|
"""Download the resource specified in 'url'"""
|
|
|
|
filename = text.clean_path(self.filename_fmt.format(**keywords))
|
2015-04-08 01:51:48 +02:00
|
|
|
path = os.path.join(self.directory, filename)
|
2016-09-25 09:27:44 +02:00
|
|
|
realpath = self.adjust_path(path)
|
|
|
|
if os.path.exists(realpath):
|
2015-12-01 21:22:58 +01:00
|
|
|
self.printer.skip(path)
|
2015-04-08 01:51:48 +02:00
|
|
|
return
|
2015-11-12 02:35:30 +01:00
|
|
|
dlinstance = self.get_downloader(url)
|
2015-12-01 21:22:58 +01:00
|
|
|
self.printer.start(path)
|
2016-09-25 09:27:44 +02:00
|
|
|
with open(realpath, "wb") as file:
|
2015-12-21 22:46:49 +01:00
|
|
|
tries = dlinstance.download(url, file)
|
2015-12-01 21:22:58 +01:00
|
|
|
self.printer.success(path, tries)
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_directory(self, keywords):
|
2015-04-08 01:51:48 +02:00
|
|
|
"""Set and create the target directory for downloads"""
|
2016-09-24 11:29:25 +02:00
|
|
|
segments = [
|
2016-09-25 17:42:47 +02:00
|
|
|
text.clean_path(segment.format(**keywords).strip())
|
2016-09-24 11:29:25 +02:00
|
|
|
for segment in self.directory_fmt
|
|
|
|
]
|
2015-04-08 01:51:48 +02:00
|
|
|
self.directory = os.path.join(
|
2015-11-12 02:35:30 +01:00
|
|
|
self.get_base_directory(),
|
2016-09-24 11:29:25 +02:00
|
|
|
*segments
|
2015-04-08 01:51:48 +02:00
|
|
|
)
|
2016-09-25 09:27:44 +02:00
|
|
|
os.makedirs(self.adjust_path(self.directory), exist_ok=True)
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_queue(self, url):
|
|
|
|
"""Add url to work-queue"""
|
|
|
|
try:
|
|
|
|
self.queue.append(url)
|
|
|
|
except AttributeError:
|
|
|
|
self.queue = [url]
|
|
|
|
|
|
|
|
def handle_headers(self, headers):
|
|
|
|
self.get_downloader("http:").set_headers(headers)
|
|
|
|
|
|
|
|
def handle_cookies(self, cookies):
|
|
|
|
self.get_downloader("http:").set_cookies(cookies)
|
|
|
|
|
2015-04-08 01:51:48 +02:00
|
|
|
def get_downloader(self, url):
|
|
|
|
"""Return, and possibly construct, a downloader suitable for 'url'"""
|
|
|
|
pos = url.find(":")
|
|
|
|
scheme = url[:pos] if pos != -1 else "http"
|
|
|
|
if scheme == "https":
|
|
|
|
scheme = "http"
|
2015-11-12 02:35:30 +01:00
|
|
|
instance = self.downloaders.get(scheme)
|
|
|
|
if instance is None:
|
|
|
|
klass = downloader.find(scheme)
|
2015-12-01 21:22:58 +01:00
|
|
|
instance = klass(self.printer)
|
2015-11-12 02:35:30 +01:00
|
|
|
self.downloaders[scheme] = instance
|
|
|
|
return instance
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_base_directory():
|
|
|
|
"""Return the base-destination-directory for downloads"""
|
2015-12-02 01:00:51 +01:00
|
|
|
bdir = config.get(("base-directory",), default=(".", "gallery-dl"))
|
|
|
|
if not isinstance(bdir, str):
|
|
|
|
bdir = os.path.join(*bdir)
|
|
|
|
return os.path.expanduser(os.path.expandvars(bdir))
|
2015-04-05 16:23:20 +02:00
|
|
|
|
2016-09-25 09:27:44 +02:00
|
|
|
@staticmethod
|
|
|
|
def adjust_path(path, longpaths=platform.system() == "Windows"):
|
|
|
|
"""Enable longer-than-260-character paths on windows"""
|
|
|
|
return "\\\\?\\" + os.path.abspath(path) if longpaths else path
|
|
|
|
|
2015-11-13 01:02:49 +01:00
|
|
|
|
2015-12-12 00:11:05 +01:00
|
|
|
class KeywordJob(Job):
|
|
|
|
"""Print available keywords"""
|
2015-11-13 01:02:49 +01:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
for msg in self.extractor:
|
|
|
|
if msg[0] == Message.Url:
|
|
|
|
print("Keywords for filenames:")
|
2016-09-24 10:45:11 +02:00
|
|
|
self.update_kwdict(msg[2])
|
2015-11-13 01:02:49 +01:00
|
|
|
self.print_keywords(msg[2])
|
|
|
|
return
|
|
|
|
elif msg[0] == Message.Directory:
|
|
|
|
print("Keywords for directory names:")
|
2016-09-24 10:45:11 +02:00
|
|
|
self.update_kwdict(msg[1])
|
2015-11-13 01:02:49 +01:00
|
|
|
self.print_keywords(msg[1])
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def print_keywords(keywords):
|
2015-12-12 01:16:02 +01:00
|
|
|
"""Print key-value pairs with formatting"""
|
2015-11-13 01:02:49 +01:00
|
|
|
offset = max(map(len, keywords.keys())) + 1
|
|
|
|
for key, value in sorted(keywords.items()):
|
|
|
|
print(key, ":", " "*(offset-len(key)), value, sep="")
|
|
|
|
print()
|
2015-12-10 02:14:28 +01:00
|
|
|
|
|
|
|
|
2015-12-12 00:11:05 +01:00
|
|
|
class UrlJob(Job):
|
|
|
|
"""Print download urls"""
|
2015-12-10 02:14:28 +01:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_url(self, url, _):
|
|
|
|
print(url)
|
2016-08-11 13:20:21 +02:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_queue(self, url):
|
|
|
|
try:
|
|
|
|
UrlJob(url).run()
|
|
|
|
except exception.NoExtractorError:
|
|
|
|
pass
|
2015-12-12 01:16:02 +01:00
|
|
|
|
|
|
|
|
|
|
|
class HashJob(DownloadJob):
|
|
|
|
"""Generate SHA1 hashes for extractor results"""
|
|
|
|
|
2015-12-21 22:49:04 +01:00
|
|
|
class HashIO():
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Minimal file-like interface"""
|
2015-12-21 22:49:04 +01:00
|
|
|
|
|
|
|
def __init__(self, hashobj):
|
|
|
|
self.hashobj = hashobj
|
|
|
|
|
|
|
|
def write(self, content):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update SHA1 hash"""
|
2015-12-21 22:49:04 +01:00
|
|
|
self.hashobj.update(content)
|
|
|
|
|
|
|
|
def __init__(self, url, content=False):
|
2015-12-12 01:16:02 +01:00
|
|
|
DownloadJob.__init__(self, url)
|
2015-12-21 22:49:04 +01:00
|
|
|
self.content = content
|
2015-12-12 01:16:02 +01:00
|
|
|
self.hash_url = hashlib.sha1()
|
|
|
|
self.hash_keyword = hashlib.sha1()
|
2015-12-21 22:49:04 +01:00
|
|
|
self.hash_content = hashlib.sha1()
|
|
|
|
if content:
|
|
|
|
self.fileobj = self.HashIO(self.hash_content)
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_url(self, url, keywords):
|
|
|
|
self.update_url(url)
|
|
|
|
self.update_keyword(keywords)
|
|
|
|
self.update_content(url)
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_directory(self, keywords):
|
|
|
|
self.update_keyword(keywords)
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_queue(self, url):
|
2015-12-12 01:16:02 +01:00
|
|
|
self.update_url(url)
|
|
|
|
|
|
|
|
def update_url(self, url):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update the URL hash"""
|
2015-12-12 01:16:02 +01:00
|
|
|
self.hash_url.update(url.encode())
|
|
|
|
|
|
|
|
def update_keyword(self, kwdict):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update the keyword hash"""
|
2015-12-12 01:16:02 +01:00
|
|
|
self.hash_keyword.update(
|
|
|
|
json.dumps(kwdict, sort_keys=True).encode()
|
|
|
|
)
|
2015-12-21 22:49:04 +01:00
|
|
|
|
|
|
|
def update_content(self, url):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update the content hash"""
|
2015-12-21 22:49:04 +01:00
|
|
|
if self.content:
|
|
|
|
self.get_downloader(url).download(url, self.fileobj)
|