chore: logging replaced with logger, style

master
Vishnunarayan K I 2019-03-22 19:17:00 +05:30
parent 875e9753aa
commit d11e89591a
20 changed files with 153 additions and 140 deletions

View File

@ -4,7 +4,7 @@ verify_ssl = true
name = "pypi"
[packages]
anime-downloader = {editable = true, path = ".", extras = ["cloudflare"]}
anime-downloader = {editable = true, path = "."}
[dev-packages]
twine = "*"

11
Pipfile.lock generated
View File

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "b9e8edea14fa675cf93739cb8693974b6265707ca166796e71a56c0c724c9471"
"sha256": "01fabf42a826553591227799f7bee2447edf81a6bec93559917576e2b1018c8d"
},
"pipfile-spec": 6,
"requires": {},
@ -16,9 +16,6 @@
"default": {
"anime-downloader": {
"editable": true,
"extras": [
"cloudflare"
],
"path": "."
},
"beautifulsoup4": {
@ -105,6 +102,12 @@
],
"version": "==1.8"
},
"tabulate": {
"hashes": [
"sha256:8af07a39377cee1103a5c8b3330a421c2d99b9141e9cc5ddd2e3263fea416943"
],
"version": "==0.8.3"
},
"urllib3": {
"hashes": [
"sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39",

View File

@ -87,7 +87,7 @@ def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
anime = cls(anime_url, quality=quality,
fallback_qualities=fallback_qualities)
logging.info('Found anime: {}'.format(anime.title))
logger.info('Found anime: {}'.format(anime.title))
animes = util.parse_ep_str(anime, episode_range)
@ -99,7 +99,7 @@ def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
skip_download = True
if download_dir and not skip_download:
logging.info('Downloading to {}'.format(os.path.abspath(download_dir)))
logger.info('Downloading to {}'.format(os.path.abspath(download_dir)))
for episode in animes:
if url:
@ -110,7 +110,7 @@ def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
if not skip_download:
if external_downloader:
logging.info('Downloading episode {} of {}'.format(
logger.info('Downloading episode {} of {}'.format(
episode.ep_no, anime.title)
)
util.external_download(external_downloader, episode,

View File

@ -90,14 +90,14 @@ def command(anime_name, new, update_all, _list, quality, remove,
if anime_name:
anime = watcher.get(anime_name)
if not anime:
logging.error(
logger.error(
"Couldn't find '{}'."
"Use a better search term.".format(anime_name))
sys.exit(1)
anime.quality = quality
logging.info('Found {}'.format(anime.title))
logger.info('Found {}'.format(anime.title))
watch_anime(watcher, anime)
@ -186,14 +186,14 @@ def list_animes(watcher, quality, download_dir):
def watch_anime(watcher, anime):
to_watch = anime[anime.episodes_done:]
logging.debug('Sliced epiosdes: {}'.format(to_watch._episode_urls))
logger.debug('Sliced epiosdes: {}'.format(to_watch._episode_urls))
while anime.episodes_done < len(anime):
episode = anime[anime.episodes_done]
anime.episodes_done += 1
watcher.update(anime)
for tries in range(5):
logging.info(
logger.info(
'Playing episode {}'.format(episode.ep_no)
)
try:
@ -201,7 +201,7 @@ def watch_anime(watcher, anime):
except Exception as e:
anime.episodes_done -= 1
watcher.update(anime)
logging.error(str(e))
logger.error(str(e))
sys.exit(1)
returncode = player.play()
@ -209,7 +209,7 @@ def watch_anime(watcher, anime):
if returncode == player.STOP:
sys.exit(0)
elif returncode == player.CONNECT_ERR:
logging.warning("Couldn't connect. Retrying. "
logger.warning("Couldn't connect. Retrying. "
"Attempt #{}".format(tries+1))
continue
elif returncode == player.PREV:

View File

@ -6,10 +6,12 @@ import sys
from anime_downloader import util
from anime_downloader import session
logger = logging.getLogger(__name__)
class BaseDownloader:
def __init__(self, source, path, force, range_size=None):
logging.info(path)
logger.info(path)
self.url = source.stream_url
self.referer = source.referer
@ -21,12 +23,13 @@ class BaseDownloader:
self.chunksize = 16384
# Added Referer Header as kwik needd it.
r = session.get_session().get(self.url, headers={'referer': self.referer}, stream=True)
r = session.get_session().get(
self.url, headers={'referer': self.referer}, stream=True)
self.total_size = int(r.headers['Content-length'])
if os.path.exists(path):
if abs(os.stat(path).st_size - self.total_size) < 10 and not force:
logging.warning('File already downloaded. Skipping download.')
logger.warning('File already downloaded. Skipping download.')
return
else:
os.remove(path)

View File

@ -1,11 +1,9 @@
import logging
import re
from bs4 import BeautifulSoup
import requests
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader import session
from anime_downloader.sites import helpers
session = session.get_session()
logger = logging.getLogger(__name__)
class Kwik(BaseExtractor):
@ -14,6 +12,7 @@ class Kwik(BaseExtractor):
and the kwik video stream when refered through the corresponding
kwik video page.
'''
def _get_data(self):
# Need a javascript deobsufication api/python, so someone smarter
@ -28,17 +27,17 @@ class Kwik(BaseExtractor):
download_url = self.url.replace('kwik.cx/e/', 'kwik.cx/f/')
kwik_text = session.get(download_url, headers={'referer': download_url }).text
kwik_text = helpers.get(download_url, referer=download_url).text
post_url, token = source_parts_re.search(kwik_text).group(1, 2)
stream_url = session.post(post_url,
headers = {'referer': download_url},
stream_url = helpers.post(post_url,
referer=download_url,
data={'_token': token},
allow_redirects=False).headers['Location']
title = stream_url.rsplit('/', 1)[-1].rsplit('.', 1)[0]
logging.debug('Stream URL: %s' % stream_url)
logger.debug('Stream URL: %s' % stream_url)
return {
'stream_url': stream_url,
'meta': {

View File

@ -1,11 +1,10 @@
import logging
import re
from bs4 import BeautifulSoup
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader import session
from anime_downloader.sites import helpers
session = session.get_session()
logger = logging.getLogger(__name__)
class MP4Upload(BaseExtractor):
@ -14,6 +13,7 @@ class MP4Upload(BaseExtractor):
albeit imperfectly as mp4upload doesn't place full title on the main
page of whichever video you are dealing with.
'''
def _get_data(self):
# Extract the important bits from the embed page, with thanks to the
# code I saw from github user py7hon in his/her mp4upload-direct
@ -22,26 +22,26 @@ class MP4Upload(BaseExtractor):
r'.*?100\|(.*?)\|.*?\|video\|(.*?)\|(\d+)\|.*?',
re.DOTALL)
mp4u_embed = session.get(self.url).text
mp4u_embed = helpers.get(self.url).text
domain, video_id, protocol = source_parts_re.match(mp4u_embed).groups()
logging.debug('Domain: %s, Video ID: %s, Protocol: %s' %
logger.debug('Domain: %s, Video ID: %s, Protocol: %s' %
(domain, video_id, protocol))
url = self.url.replace('embed-', '')
# Return to non-embed page to collect title
mp4u_page = BeautifulSoup(session.get(url).text, 'html.parser')
mp4u_page = helpers.soupify(helpers.get(url).text)
title = mp4u_page.find('span', {'class': 'dfilename'}).text
title = title[:title.rfind('_')][:title.rfind('.')].replace(' ', '_')
logging.debug('Title is %s' % title)
logger.debug('Title is %s' % title)
# Create the stream url
stream_url = 'https://{}.mp4upload.com:{}/d/{}/{}.mp4'
stream_url = stream_url.format(domain, protocol, video_id, title)
logging.debug('Stream URL: %s' % stream_url)
logger.debug('Stream URL: %s' % stream_url)
return {
'stream_url': stream_url,

View File

@ -3,32 +3,28 @@ import re
from bs4 import BeautifulSoup
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader import session
from anime_downloader.sites import helpers
session = session.get_session()
logger = logging.getLogger(__name__)
class RapidVideo(BaseExtractor):
def _get_data(self):
url = self.url + '&q=' + self.quality
logging.debug('Calling Rapid url: {}'.format(url))
logger.debug('Calling Rapid url: {}'.format(url))
headers = self.headers
headers['referer'] = url
try:
r = session.get(url, headers=headers)
# This is a fix for new rapidvideo logic
# It will return OK for a get request
# even if there is a click button
# This will make sure a source link is present
soup = BeautifulSoup(r.text, 'html.parser')
get_source(soup)
except:
r = session.post(url, {
'confirm.x': 12,
'confirm.y': 12,
r = helpers.get(url, headers=headers)
except Exception as e:
logger.debug('Exception happened when getting normally')
logger.debug(e)
r = helpers.post(url, {
'cursor.x': 12,
'cursor.y': 12,
'block': 1,
}, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
soup = helpers.soupify(r)
# TODO: Make these a different function. Can be reused in other classes
# too
@ -46,7 +42,7 @@ class RapidVideo(BaseExtractor):
except Exception as e:
title = ''
thumbnail = ''
logging.debug(e)
logger.debug(e)
pass
return {

View File

@ -4,6 +4,8 @@ import os
import subprocess
import logging
logger = logging.getLogger(__name__)
class BasePlayer(metaclass=ABCMeta):
name = ''
@ -43,7 +45,7 @@ class BasePlayer(metaclass=ABCMeta):
def play(self):
cmd = [self._get_executable()] + self.args
logging.debug('Command: {}'.format(cmd))
logger.debug('Command: {}'.format(cmd))
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
returncode = self.process.wait()

View File

@ -1,9 +1,6 @@
"""
anime.py contains the base classes required for other anime classes.
"""
import requests
from bs4 import BeautifulSoup
import os
import logging
import copy
@ -14,6 +11,8 @@ from anime_downloader import util
from anime_downloader.extractors import get_extractor
from anime_downloader.downloader import get_downloader
logger = logging.getLogger(__name__)
class Anime:
"""
@ -66,7 +65,8 @@ class Anime:
fallback_qualities=['720p', '480p', '360p'],
_skip_online_data=False):
self.url = url
self._fallback_qualities = [q for q in fallback_qualities if q in self.QUALITIES]
self._fallback_qualities = [
q for q in fallback_qualities if q in self.QUALITIES]
if quality in self.QUALITIES:
self.quality = quality
@ -75,7 +75,7 @@ class Anime:
'Quality {0} not found in {1}'.format(quality, self.QUALITIES))
if not _skip_online_data:
logging.info('Extracting episode info from page')
logger.info('Extracting episode info from page')
self._episode_urls = self.get_data()
self._len = len(self._episode_urls)
@ -138,12 +138,12 @@ class Anime:
try:
self._scrape_metadata()
except Exception as e:
logging.debug('Metadata scraping error: {}'.format(e))
logger.debug('Metadata scraping error: {}'.format(e))
self._episode_urls = self._scrape_episodes()
self._len = len(self._episode_urls)
logging.debug('EPISODE IDS: length: {}, ids: {}'.format(
logger.debug('EPISODE IDS: length: {}, ids: {}'.format(
self._len, self._episode_urls))
self._episode_urls = [(no+1, id) for no, id in
@ -225,7 +225,7 @@ class AnimeEpisode:
self._sources = None
self.pretty_title = '{}-{}'.format(self._parent.title, self.ep_no)
logging.debug("Extracting stream info of id: {}".format(self.url))
logger.debug("Extracting stream info of id: {}".format(self.url))
def try_data():
self.get_data()
@ -241,7 +241,7 @@ class AnimeEpisode:
except ValueError:
pass
for quality in qualities:
logging.warning('Quality {} not found. Trying {}.'.format(
logger.warning('Quality {} not found. Trying {}.'.format(
self.quality, quality))
self.quality = quality
try:
@ -275,7 +275,7 @@ class AnimeEpisode:
def get_data(self):
self._sources = self._get_sources()
logging.debug('Sources : '.format(self._sources))
logger.debug('Sources : '.format(self._sources))
def _get_sources(self):
raise NotImplementedError
@ -283,7 +283,7 @@ class AnimeEpisode:
def download(self, force=False, path=None,
format='{anime_title}_{ep_no}', range_size=None):
# TODO: Remove this shit
logging.info('Downloading {}'.format(self.pretty_title))
logger.info('Downloading {}'.format(self.pretty_title))
if format:
file_name = util.format_filename(format, self)+'.mp4'
@ -340,3 +340,12 @@ class SearchResult:
def __str__(self):
return self.title
@property
def pretty_metadata(self):
"""
pretty_metadata is the prettified version of metadata
"""
if self.meta:
return ' | '.join(val for _, val in self.meta.items())
return ''

View File

@ -1,13 +1,12 @@
import cfscrape
import logging
import re
from anime_downloader.sites.anime import AnimeEpisode, SearchResult, Anime
from anime_downloader.sites.exceptions import NotFoundError
from anime_downloader.sites import helpers
from anime_downloader import util
from anime_downloader.session import get_session
scraper = get_session(cfscrape.create_scraper())
logger = logging.getLogger(__name__)
class AnimePaheEpisode(AnimeEpisode):
@ -35,7 +34,7 @@ class AnimePaheEpisode(AnimeEpisode):
supported_servers = ['kwik','mp4upload','rapidvideo']
episode_id = self.url.rsplit('/', 1)[-1]
sourcetext = scraper.get(self.url).text
sourcetext = helpers.get(self.url, cf=True).text
sources = []
serverlist = re.findall(r'data-provider="([^"]+)', sourcetext)
for server in serverlist:
@ -78,7 +77,7 @@ class AnimePahe(Anime):
poster=search_result['image']
)
logging.debug(search_result_info)
logger.debug(search_result_info)
results.append(search_result_info)
return results
@ -87,7 +86,7 @@ class AnimePahe(Anime):
# Extract anime id from page, using this shoddy approach as
# I have neglected my regular expression skills to the point of
# disappointment
resp = scraper.get(self.url).text
resp = helpers.get(self.url, cf=True).text
first_search = '$.getJSON(\'/api?m=release&id='
last_search = '&l=\' + limit + \'&sort=\' + sort + \'&page=\' + page'

View File

@ -3,6 +3,8 @@ import logging
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class GogoanimeEpisode(AnimeEpisode, sitename='gogoanime'):
QUALITIES = ['360p', '480p', '720p']
@ -21,12 +23,13 @@ class GogoanimeEpisode(AnimeEpisode, sitename='gogoanime'):
extractor_class = 'mp4upload'
elif extractor_class != 'rapidvideo':
continue
logging.debug('%s: %s' % (extractor_class, source_url))
logger.debug('%s: %s' % (extractor_class, source_url))
extractors_url.append((extractor_class, source_url,))
return extractors_url
class GogoAnime(Anime, sitename='gogoanime'):
sitename = 'gogoanime'
QUALITIES = ['360p', '480p', '720p']
_episode_list_url = 'https://www2.gogoanime.se//load-list-episode'
_search_api_url = 'https://api.watchanime.cc/site/loadAjaxSearch'
@ -50,7 +53,7 @@ class GogoAnime(Anime, sitename='gogoanime'):
url=element.attrs['href'],
poster=''
)
logging.debug(search_result)
logger.debug(search_result)
search_results.append(search_result)
return search_results

View File

@ -50,7 +50,6 @@ def setup(func):
url,
headers=default_headers,
**kwargs)
res = sess.get(url, headers=default_headers, **kwargs)
res.raise_for_status()
# logger.debug(res.text)
if logger.getEffectiveLevel() == logging.DEBUG:
@ -70,6 +69,7 @@ def get(url: str,
'''
@setup
def post(url: str,
cf: bool = True,
referer: str = None,
@ -102,7 +102,7 @@ def soupify(res):
def _log_response_body(res):
import json
file = tempfile.mktemp(dir=temp_dir)
logging.debug(file)
logger.debug(file)
with open(file, 'w') as f:
f.write(res.text)

View File

@ -5,6 +5,8 @@ from anime_downloader.sites.anime import AnimeEpisode, SearchResult, Anime
from anime_downloader.sites import helpers
from anime_downloader.sites.exceptions import NotFoundError
logger = logging.getLogger(__name__)
class KissanimeEpisode(AnimeEpisode, sitename='kissanime'):
QUALITIES = ['360p', '480p', '720p', '1080p']
@ -55,7 +57,7 @@ class KissAnime(Anime, sitename='kissanime'):
url='https://kissanime.ru'+res.find('a').get('href'),
poster='',
)
logging.debug(res)
logger.debug(res)
ret.append(res)
return ret
@ -64,12 +66,12 @@ class KissAnime(Anime, sitename='kissanime'):
soup = helpers.soupify(helpers.get(self.url, cf=True))
ret = ['http://kissanime.ru'+str(a['href'])
for a in soup.select('table.listing a')]
logging.debug('Unfiltered episodes : {}'.format(ret))
logger.debug('Unfiltered episodes : {}'.format(ret))
filter_list = ['opening', 'ending', 'special', 'recap']
ret = list(filter(
lambda x: not any(s in x.lower() for s in filter_list), ret
))
logging.debug('Filtered episodes : {}'.format(ret))
logger.debug('Filtered episodes : {}'.format(ret))
if ret == []:
err = 'No episodes found in url "{}"'.format(self.url)

View File

@ -6,6 +6,9 @@ from anime_downloader.sites.exceptions import NotFoundError
import logging
logger = logging.getLogger(__name__)
class KisscartoonEpisode(AnimeEpisode, sitename='kisscartoon'):
_base_url = ''
VERIFY_HUMAN = False
@ -47,7 +50,7 @@ class KissCartoon(KissAnime, sitename='kisscartoon'):
url=res.get('href'),
poster='',
)
logging.debug(res)
logger.debug(res)
ret.append(res)
return ret

View File

@ -1,15 +1,11 @@
import json
import cfscrape
import logging
from bs4 import BeautifulSoup
from anime_downloader import util
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
from anime_downloader.const import desktop_headers
from anime_downloader.session import get_session
scraper = get_session(cfscrape.create_scraper())
logger = logging.getLogger(__name__)
class MasteraniEpisode(AnimeEpisode, sitename='masterani'):
@ -41,7 +37,7 @@ class MasteraniEpisode(AnimeEpisode, sitename='masterani'):
sources = ['stream.moe', 'rapidvideo', 'mp4upload']
ret = [(name, url) for name, url in ret if name in sources]
logging.debug(ret)
logger.debug(ret)
return ret
@ -65,7 +61,7 @@ class Masterani(Anime, sitename='masterani'):
item['poster']['path'], item['poster']['file']
)
)
logging.debug(s)
logger.debug(s)
ret.append(s)
return ret
@ -77,7 +73,7 @@ class Masterani(Anime, sitename='masterani'):
try:
res = res.json()
except Exception:
logging.debug('Error with html {}'.format(res.text))
logger.debug('Error with html {}'.format(res.text))
raise
base_url = 'https://www.masterani.me/anime/watch/{}'.format(
res['info']['slug']) + '/'

View File

@ -1,4 +1,3 @@
from anime_downloader import session
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites.exceptions import NotFoundError, AnimeDLError
from anime_downloader.sites import helpers
@ -11,7 +10,7 @@ import logging
__all__ = ['NineAnimeEpisode', 'NineAnime']
session = session.get_session()
logger = logging.getLogger(__name__)
class NineAnimeEpisode(AnimeEpisode, sitename='9anime'):
@ -63,7 +62,7 @@ class NineAnime(Anime, sitename='9anime'):
def search(cls, query):
r = helpers.get('https://www4.9anime.to/search?', params={'keyword': query}, headers=desktop_headers)
logging.debug(r.url)
logger.debug(r.url)
soup = BeautifulSoup(r.text, 'html.parser')
@ -72,7 +71,7 @@ class NineAnime(Anime, sitename='9anime'):
ret = []
logging.debug('Search results')
logger.debug('Search results')
for item in search_results:
s = SearchResult(
@ -85,7 +84,7 @@ class NineAnime(Anime, sitename='9anime'):
for item in m.find_all('div'):
meta[item.attrs['class'][0]] = item.text.strip()
s.meta = meta
logging.debug(s)
logger.debug(s)
ret.append(s)
return ret
@ -94,7 +93,7 @@ class NineAnime(Anime, sitename='9anime'):
soup = helpers.soupify(helpers.get(self.url))
ts = soup.find('html')['data-ts']
NineAnimeEpisode.ts = ts
logging.debug('data-ts: {}'.format(ts))
logger.debug('data-ts: {}'.format(ts))
# TODO: !HACK!
# The below code should be refractored whenever I'm not lazy.

View File

@ -11,6 +11,7 @@ import time
import ast
import math
import coloredlogs
from tabulate import tabulate
from anime_downloader import session
from anime_downloader.sites import get_anime_class
@ -53,18 +54,16 @@ def setup_logger(log_level):
def format_search_results(search_results):
_, height = shutil.get_terminal_size()
height -= 4 # Accounting for prompt
ret = ''
for idx, result in enumerate(search_results[:height]):
try:
meta = ' | '.join(val for _, val in result.meta.items())
except AttributeError:
meta = ''
ret += '{:2}: {:40.40}\t{:20.20}\n'.format(idx+1, result.title, meta)
return ret
headers = [
'SlNo',
'Title',
'Meta',
]
table = [(i+1, v.title, v.pretty_metadata)
for i, v in enumerate(search_results)]
table = tabulate(table, headers, tablefmt='psql')
table = '\n'.join(table.split('\n')[::-1])
return table
def search(query, provider):
@ -207,13 +206,13 @@ def format_command(cmd, episode, file_format, path):
def external_download(cmd, episode, file_format, path=''):
logging.debug('cmd: ' + cmd)
logging.debug('episode: {!r}'.format(episode))
logging.debug('file format: ' + file_format)
logger.debug('cmd: ' + cmd)
logger.debug('episode: {!r}'.format(episode))
logger.debug('file format: ' + file_format)
cmd = format_command(cmd, episode, file_format, path=path)
logging.debug('formatted cmd: ' + ' '.join(cmd))
logger.debug('formatted cmd: ' + ' '.join(cmd))
p = subprocess.Popen(cmd)
return_code = p.wait()

View File

@ -9,6 +9,8 @@ import click
import warnings
from time import time
logger = logging.getLogger(__name__)
# Don't warn if not using fuzzywuzzy[speedup]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@ -27,7 +29,7 @@ class Watcher:
self._append_to_watch_file(anime)
logging.info('Added {:.50} to watch list.'.format(anime.title))
logger.info('Added {:.50} to watch list.'.format(anime.title))
return anime
def list(self):
@ -57,7 +59,7 @@ class Watcher:
match = process.extractOne(anime_name, animes, score_cutoff=40)
if match:
anime = match[0]
logging.debug('Anime: {!r}, episodes_done: {}'.format(
logger.debug('Anime: {!r}, episodes_done: {}'.format(
anime, anime.episodes_done))
if (time() - anime._timestamp) > 4*24*60*60:
@ -67,7 +69,7 @@ class Watcher:
def update_anime(self, anime):
if not hasattr(anime, 'meta') or not anime.meta.get('Status') or \
anime.meta['Status'].lower() == 'airing':
logging.info('Updating anime {}'.format(anime.title))
logger.info('Updating anime {}'.format(anime.title))
AnimeInfo = self._get_anime_info_class(anime.url)
newanime = AnimeInfo(anime.url, episodes_done=anime.episodes_done,
timestamp=time())
@ -110,7 +112,7 @@ class Watcher:
def _read_from_watch_file(self):
if not os.path.exists(self.WATCH_FILE):
logging.error('Add something to watch list first.')
logger.error('Add something to watch list first.')
sys.exit(1)
with open(self.WATCH_FILE, 'r') as watch_file:
@ -133,7 +135,7 @@ class Watcher:
cls = get_anime_class(url)
# TODO: Maybe this is better off as a mixin
class AnimeInfo(cls):
class AnimeInfo(cls, sitename=cls.sitename):
def __init__(self, *args, **kwargs):
self.episodes_done = kwargs.pop('episodes_done', 0)
self._timestamp = kwargs.pop('timestamp', 0)

View File

@ -26,15 +26,13 @@ setup(
'Click>=6.7',
'fuzzywuzzy>=0.16.0',
'coloredlogs>=10.0',
'cfscrape>=1.9.5',
'requests-cache>=0.4.13'
'cfscrape>=1.9.7',
'requests-cache>=0.4.13',
'tabulate>=0.8.3',
],
tests_require=[
'pytest',
],
extras_require={
'cloudflare': []
},
long_description=long_description,
long_description_content_type='text/markdown',
entry_points='''