master
Vishnunarayan K I 2018-07-06 23:43:10 +05:30
parent 2a80424bf4
commit 3ef368bf7a
11 changed files with 24 additions and 39 deletions

View File

@ -5,7 +5,6 @@ import os
import logging
from anime_downloader.sites import get_anime_class
from anime_downloader.sites.exceptions import NotFoundError
from anime_downloader.players.mpv import mpv
from anime_downloader.__version__ import __version__
@ -274,7 +273,8 @@ def list_animes(watcher, quality, download_dir):
inp = inp.split('download ')[1]
except IndexError:
inp = ':'
inp = str(anime.episodes_done+1)+inp if inp.startswith(':') else inp
inp = str(anime.episodes_done+1) + \
inp if inp.startswith(':') else inp
inp = inp+str(len(anime)) if inp.endswith(':') else inp
anime = util.split_anime(anime, inp)

View File

@ -19,5 +19,8 @@ ALL_EXTRACTORS = [
def get_extractor(name):
for extractor in ALL_EXTRACTORS:
if extractor['regex'] in name:
module = import_module('anime_downloader.extractors.{}'.format(extractor['modulename']))
module = import_module(
'anime_downloader.extractors.{}'.format(
extractor['modulename'])
)
return getattr(module, extractor['class'])

View File

@ -34,7 +34,7 @@ def get_mpv_configfile():
with open(conf, 'w') as configfile:
configfile.write(
'q quit 50\nCLOSE_WIN quit 50\nSTOP quit 50\nctrl+c quit 50\n'
'> quit 51\nNEXT quit 51\n< quit 52\nPREV quit 52\n'
'> quit 51\nNEXT quit 51\n< quit 52\nPREV quit 52\ni seek 80\n'
)
return conf

View File

@ -31,7 +31,8 @@ class BaseAnime:
if quality in self.QUALITIES:
self.quality = quality
else:
raise AnimeDLError('Quality {0} not found in {1}'.format(quality, self.QUALITIES))
raise AnimeDLError(
'Quality {0} not found in {1}'.format(quality, self.QUALITIES))
if not _skip_online_data:
logging.info('Extracting episode info from page')
@ -60,7 +61,7 @@ class BaseAnime:
self._len, self._episode_urls))
self._episode_urls = [(no+1, id) for no, id in
enumerate(self._episode_urls)]
enumerate(self._episode_urls)]
return self._episode_urls
@ -123,7 +124,8 @@ class BaseEpisode:
qualities = self.QUALITIES
qualities.remove(self.quality)
for quality in qualities:
logging.warning('Quality {} not found. Trying {}.'.format(self.quality, quality))
logging.warning('Quality {} not found. Trying {}.'.format(
self.quality, quality))
self.quality = quality
try:
self.get_data()

View File

@ -26,6 +26,6 @@ class BaseAnimeCF(BaseAnime):
self._len, self._episode_urls))
self._episode_urls = [(no+1, id) for no, id in
enumerate(self._episode_urls)]
enumerate(self._episode_urls)]
return self._episode_urls

View File

@ -52,7 +52,8 @@ class GogoAnime(BaseAnime):
metdata = {}
for elem in meta.find_all('p'):
try:
key, val = [v.strip(' :') for v in elem.text.strip().split('\n')]
key, val = [v.strip(' :')
for v in elem.text.strip().split('\n')]
except Exception:
continue
metdata[key] = val

View File

@ -23,7 +23,9 @@ def get_anime_class(url):
raise
logging.debug("Coudn't import {}, '{}'".format(site[0], e.msg))
logging.warning("Provider '{}' not used. Make sure you have "
"cfscrape and node-js installed".format(site[0]))
"cfscrape and node-js installed".format(
site[0])
)
continue
return getattr(module, site[2])

View File

@ -6,7 +6,7 @@ import logging
from anime_downloader.sites.anime import BaseEpisode, SearchResult
from anime_downloader.sites.baseanimecf import BaseAnimeCF
from anime_downloader.sites.exceptions import NotFoundError
from anime_downloader.const import desktop_headers, get_random_header
from anime_downloader.const import get_random_header
scraper = cfscrape.create_scraper(delay=10)
@ -59,7 +59,8 @@ class KissAnime(BaseAnimeCF):
if soup.title.text.strip().lower() != "find anime":
return [SearchResult(
title=soup.find('a', 'bigChar').text,
url='https://kissanime.ru'+soup.find('a', 'bigChar').get('href'),
url='https://kissanime.ru' +
soup.find('a', 'bigChar').get('href'),
poster='',
)]

View File

@ -60,33 +60,12 @@ class NineAnime(BaseAnime):
@classmethod
def search(cls, query):
r = requests.get('https://www4.9anime.is/search?',
params={'keyword': query})
params={'keyword': query}, headers=desktop_headers)
logging.debug(r.url)
soup = BeautifulSoup(r.text, 'html.parser')
# 9anime has search result in
# <div class="item">
# <div class="inner">
# <a href="https://www4.9anime.is/watch/dragon-ball-super.7jly"
# class="poster tooltipstered" data-tip="ajax/film/tooltip/7jly?5827f020">
# <img src="http://static.akacdn.ru/static/images/2018/03/43012fe439631a2cecfcf248841e15f7.jpg"
# alt="Dragon Ball Super">
# <div class="status">
# <span class="bar">
# </span>
# <div class="ep"> Ep 131/131 </div>
# </div>
# </a>
# <a href="https://www4.9anime.is/watch/dragon-ball-super.7jly"
# data-jtitle="Dragon Ball Super"
# class="name">
# Dragon Ball Super
# </a>
# </div>
# </div>
search_results = soup.find(
'div', {'class': 'film-list'}).find_all('div', {'class': 'item'})

View File

@ -10,11 +10,7 @@ import os
import errno
import time
from bs4 import BeautifulSoup
from anime_downloader.sites import get_anime_class
from anime_downloader.sites.exceptions import NotFoundError
from anime_downloader.const import desktop_headers

View File

@ -57,7 +57,8 @@ class Watcher:
match = process.extractOne(anime_name, animes, score_cutoff=40)
if match:
anime = match[0]
logging.debug('Anime: {!r}, episodes_done: {}'.format(anime, anime.episodes_done))
logging.debug('Anime: {!r}, episodes_done: {}'.format(
anime, anime.episodes_done))
if (time() - anime._timestamp) > 4*24*60*60:
anime = self.update_anime(anime)