Animeflix's _scrape_metadata fixed, default server set to AUEngi… (#293)

Co-authored-by: Prayag Jain <offpjain@gmai.com>
master
Prayag Jain 2020-03-18 16:21:13 +05:30 committed by GitHub
parent 849e62d5c4
commit 134c1f6f68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 9 additions and 3 deletions

View File

@ -1,6 +1,8 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
import logging
logger = logging.getLogger(__name__)
class AnimeFlix(Anime, sitename='animeflix'):
"""
@ -30,15 +32,16 @@ class AnimeFlix(Anime, sitename='animeflix'):
def _scrape_episodes(self):
# TODO: find a better way to do splits
# find a way to pass some values within the class
self.slug = self.url.strip('/').split('/')[-1]
episodes = helpers.get(self.episodeList_url,
params={'slug': self.slug}).json()
return [ self.anime_url + episode['url'] for episode in episodes['episodes'] ]
def _scrape_metadata(self):
self.slug = self.url.strip('/').split('/')[-1]
meta = helpers.get(self.meta_url,
params={'slug': self.slug}).json()
self.title = meta['data']['title']
logger.debug(self.title)
@ -52,5 +55,8 @@ class AnimeFlixEpisode(AnimeEpisode, sitename='animeflix'):
params={'episode_num': self.ep_no, 'slug': self.url.strip('/').split('/')[-2]}).json()
id = episode['data']['current']['id']
download_link = helpers.get(
f'{self.stream_url}={id}').json()[0]['file']
return [('no_extractor',download_link)]
f'{self.stream_url}={id}').json()
i = 0
while download_link[i]['provider'] != 'AUEngine' :
i = i + 1
return [('no_extractor',download_link[i]['file'])]