anime-downloader/anime_downloader/util.py

249 lines
6.8 KiB
Python
Raw Normal View History

2018-05-27 10:01:49 -07:00
import logging
2018-05-27 11:59:51 -07:00
import sys
2018-05-28 12:36:40 -07:00
import shutil
2018-05-31 04:04:47 -07:00
import click
2018-06-05 14:11:43 -07:00
import subprocess
import platform
2018-06-27 12:52:31 -07:00
import re
import os
import errno
import time
2018-08-06 08:46:58 -07:00
import ast
import math
2019-02-23 08:23:42 -08:00
import coloredlogs
from tabulate import tabulate
2018-06-27 12:52:31 -07:00
from anime_downloader import session
from anime_downloader.sites import get_anime_class
2018-06-27 12:52:31 -07:00
from anime_downloader.const import desktop_headers
2019-02-23 08:23:42 -08:00
logger = logging.getLogger(__name__)
2019-03-09 06:29:30 -08:00
__all__ = [
'check_in_path',
'setup_logger',
'format_search_results',
'search',
'split_anime',
'parse_episode_range',
'parse_ep_str',
'print_episodeurl',
'download_episode',
'play_episode',
'print_info',
]
2019-02-23 08:23:42 -08:00
def check_in_path(app):
"""
Checks to see if the given app exists on the path
:param app: app name to look for
:return: true if the app exists, false otherwise
"""
return shutil.which(app) is not None
2019-02-23 08:23:42 -08:00
2018-05-27 10:01:49 -07:00
def setup_logger(log_level):
if log_level == 'DEBUG':
2019-02-23 08:23:42 -08:00
format = '%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s'
from http.client import HTTPConnection
HTTPConnection.debuglevel = 1
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
2018-05-27 10:01:49 -07:00
else:
2018-06-10 05:37:59 -07:00
format = click.style('anime', fg='green') + ': %(message)s'
2018-05-27 10:01:49 -07:00
2019-02-23 08:23:42 -08:00
logger = logging.getLogger("anime_downloader")
coloredlogs.install(level=log_level, fmt=format, logger=logger)
2018-06-05 14:11:43 -07:00
2018-05-27 11:59:51 -07:00
2018-05-28 12:36:40 -07:00
def format_search_results(search_results):
headers = [
'SlNo',
'Title',
'Meta',
]
table = [(i+1, v.title, v.pretty_metadata)
for i, v in enumerate(search_results)]
table = tabulate(table, headers, tablefmt='psql')
table = '\n'.join(table.split('\n')[::-1])
return table
2018-05-31 04:04:47 -07:00
def search(query, provider):
2018-06-01 01:13:44 -07:00
# Since this function outputs to stdout this should ideally be in
# cli. But it is used in watch too. :(
cls = get_anime_class(provider)
search_results = cls.search(query)
2018-05-31 04:04:47 -07:00
click.echo(format_search_results(search_results))
2018-06-19 05:41:41 -07:00
if not search_results:
2019-02-23 08:23:42 -08:00
logger.error('No such Anime found. Please ensure correct spelling.')
2018-06-19 05:41:41 -07:00
sys.exit(1)
2018-05-31 04:04:47 -07:00
val = click.prompt('Enter the anime no: ', type=int, default=1)
try:
url = search_results[val-1].url
title = search_results[val-1].title
except IndexError:
2019-02-23 08:23:42 -08:00
logger.error('Only maximum of {} search results are allowed.'
' Please input a number less than {}'.format(
2019-03-09 06:29:30 -08:00
len(search_results), len(search_results)+1))
2018-05-31 04:04:47 -07:00
sys.exit(1)
2019-02-23 08:23:42 -08:00
logger.info('Selected {}'.format(title))
2018-05-31 04:04:47 -07:00
return url
2018-06-05 14:11:43 -07:00
def split_anime(anime, episode_range):
try:
start, end = [int(x) for x in episode_range.split(':')]
2018-06-30 11:56:16 -07:00
anime = anime[start-1:end-1]
2018-06-05 14:11:43 -07:00
except ValueError:
# Only one episode specified
episode = int(episode_range)
anime = anime[episode-1:episode]
2018-06-05 14:11:43 -07:00
return anime
def parse_episode_range(anime, episode_range):
if not episode_range:
episode_range = '1:'
if episode_range.endswith(':'):
episode_range += str(len(anime) + 1)
if episode_range.startswith(':'):
episode_range = '1' + episode_range
return episode_range
def parse_ep_str(anime, grammar):
episodes = []
if not grammar:
return split_anime(anime, parse_episode_range(anime, grammar))
for episode_grammar in grammar.split(','):
if ':' in episode_grammar:
start, end = parse_episode_range(anime, episode_grammar).split(':')
episode_grammar = '%d:%d' % (int(start), int(end) + 1)
for episode in split_anime(anime, episode_grammar):
episodes.append(episode)
else:
episodes.append(anime[int(episode_grammar) - 1])
return episodes
def print_episodeurl(episode):
# if episode.source().referer != '':
# print(episode.source().stream_url + "?referer=" + episode.source().referer)
# else:
# Currently I don't know of a way to specify referer in url itself so leaving it here.
print(episode.source().stream_url)
2018-06-05 14:11:43 -07:00
def download_episode(episode, **kwargs):
episode.download(**kwargs)
2018-06-09 08:24:42 -07:00
print()
2018-06-05 14:11:43 -07:00
2018-06-10 12:56:56 -07:00
def play_episode(episode, *, player):
p = subprocess.Popen([player, episode.source().stream_url])
2018-06-09 08:24:42 -07:00
p.wait()
2018-06-05 14:11:43 -07:00
def print_info(version):
2019-02-23 08:23:42 -08:00
logger.info('anime-downloader {}'.format(version))
logger.debug('Platform: {}'.format(platform.platform()))
logger.debug('Python {}'.format(platform.python_version()))
2018-06-27 12:52:31 -07:00
def get_json(url, params=None):
2019-02-23 08:23:42 -08:00
logger.debug('API call URL: {} with params {!r}'.format(url, params))
res = session.get_session().get(url, headers=desktop_headers, params=params)
2019-02-23 08:23:42 -08:00
logger.debug('URL: {}'.format(res.url))
2018-06-27 12:52:31 -07:00
data = res.json()
2019-02-23 08:23:42 -08:00
logger.debug('Returned data: {}'.format(data))
2018-06-27 12:52:31 -07:00
return data
def slugify(file_name):
file_name = str(file_name).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', file_name)
def format_filename(filename, episode):
zerosTofill = math.ceil(math.log10(episode._parent._len))
2018-06-27 12:52:31 -07:00
rep_dict = {
'anime_title': slugify(episode._parent.title),
'ep_no': str(episode.ep_no).zfill(zerosTofill),
2018-06-27 12:52:31 -07:00
}
filename = filename.format(**rep_dict)
return filename
def format_command(cmd, episode, file_format, path):
2018-06-27 12:52:31 -07:00
cmd_dict = {
'{aria2}': 'aria2c {stream_url} -x 12 -s 12 -j 12 -k 10M -o '
2018-07-02 11:05:02 -07:00
'{file_format}.mp4 --continue=true --dir={download_dir}'
2019-05-08 09:49:12 -07:00
' --stream-piece-selector=inorder --min-split-size=5M --referer={referer} --check-certificate=false'
2018-06-27 12:52:31 -07:00
}
rep_dict = {
'stream_url': episode.source().stream_url,
2018-06-29 08:42:46 -07:00
'file_format': file_format,
'download_dir': os.path.abspath(path),
'referer': episode.source().referer,
2018-06-27 12:52:31 -07:00
}
if cmd in cmd_dict:
cmd = cmd_dict[cmd]
cmd = cmd.split(' ')
cmd = [c.format(**rep_dict) for c in cmd]
cmd = [format_filename(c, episode) for c in cmd]
return cmd
def external_download(cmd, episode, file_format, path=''):
logger.debug('cmd: ' + cmd)
logger.debug('episode: {!r}'.format(episode))
logger.debug('file format: ' + file_format)
2018-06-27 12:52:31 -07:00
cmd = format_command(cmd, episode, file_format, path=path)
2018-06-27 12:52:31 -07:00
logger.debug('formatted cmd: ' + ' '.join(cmd))
2018-06-27 12:52:31 -07:00
p = subprocess.Popen(cmd)
return_code = p.wait()
if return_code != 0:
# Sleep for a while to make sure downloader exits correctly
time.sleep(2)
sys.exit(1)
def make_dir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
2018-08-06 08:46:58 -07:00
class ClickListOption(click.Option):
def type_cast_value(self, ctx, value):
try:
if isinstance(value, list):
return value
return ast.literal_eval(value)
except:
raise click.BadParameter(value)