Subclass baseanime for cloudflare support

master
Vishnunarayan K I 2018-06-02 00:44:57 +05:30
parent 26e7bccaef
commit 5d2b9ce66a
5 changed files with 42 additions and 21 deletions

View File

@ -1,3 +1 @@
from anime_downloader.sites import get_anime_class
from anime_downloader.sites.nineanime import NineAnime
from anime_downloader.sites.kissanime import Kissanime

View File

@ -111,6 +111,9 @@ def dl(ctx, anime_url, episode_range, playlist, url, player, no_download, qualit
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR']),
help='Sets the level of logger', default='INFO')
def watch(anime_name, new, _list, player, log_level):
"""
WORK IN PROGRESS: MAY NOT WORK
"""
util.setup_logger(log_level)
watcher = _watch.Watcher()

View File

@ -0,0 +1,36 @@
import cfscrape
from anime_downloader.sites.anime import BaseAnime
from bs4 import BeautifulSoup
import logging
scraper = cfscrape.create_scraper()
mobile_headers = {
'user-agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_1 like Mac OS X) \
AppleWebKit/604.1.38 (KHTML, like Gecko) \
Version/11.0 Mobile/15A402 Safari/604.1"
}
desktop_headers = {
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Gecko/20100101 \
Firefox/56.0"
}
scraper = cfscrape.create_scraper()
class BaseAnimeCF(BaseAnime):
def getEpisodes(self):
self._episodeIds = []
r = scraper.get(self.url, headers=desktop_headers)
soup = BeautifulSoup(r.text, 'html.parser')
self._getMetadata(soup)
self._episodeIds = self._getEpisodeUrls(soup)
self._len = len(self._episodeIds)
logging.debug('EPISODE IDS: length: {}, ids: {}'.format(
self._len, self._episodeIds))
return self._episodeIds

View File

@ -1,9 +1,9 @@
import cfscrape
from anime_downloader.sites.anime import BaseAnime, BaseEpisode
from anime_downloader.sites.anime import BaseEpisode
from anime_downloader.sites.baseanimecf import BaseAnimeCF
from anime_downloader.sites.exceptions import NotFoundError
from anime_downloader.sites import util
from bs4 import BeautifulSoup
import logging
import re
scraper = cfscrape.create_scraper()
@ -49,26 +49,11 @@ class KissanimeEpisode(BaseEpisode):
self.image = data['image']
class Kissanime(BaseAnime):
class Kissanime(BaseAnimeCF):
sitename = 'kissanime'
QUALITIES = ['360p', '480p', '720p']
_episodeClass = KissanimeEpisode
def getEpisodes(self):
self._episodeIds = []
r = scraper.get(self.url, headers=desktop_headers)
soup = BeautifulSoup(r.text, 'html.parser')
self._getMetadata(soup)
self._episodeIds = self._getEpisodeUrls(soup)
self._len = len(self._episodeIds)
logging.debug('EPISODE IDS: length: {}, ids: {}'.format(
self._len, self._episodeIds))
return self._episodeIds
def _getEpisodeUrls(self, soup):
ret = soup.find('table', {'class': 'listing'}).find_all('a')
ret = [str(a['href']) for a in ret]

View File

@ -97,7 +97,6 @@ class NineAnime(BaseAnime):
return ret
def _getEpisodeUrls(self, soup):
self.soup = soup
ts = soup.find('html')['data-ts']
self._episodeClass.ts = ts
logging.debug('data-ts: {}'.format(ts))