Animeflix's _scrape_metadata fixed, default server set to AUEngi… (#293)

Co-authored-by: Prayag Jain <offpjain@gmai.com>
master
Prayag Jain 2020-03-18 16:21:13 +05:30 committed by GitHub
parent 849e62d5c4
commit 134c1f6f68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 9 additions and 3 deletions

View File

@ -1,6 +1,8 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers from anime_downloader.sites import helpers
import logging
logger = logging.getLogger(__name__)
class AnimeFlix(Anime, sitename='animeflix'): class AnimeFlix(Anime, sitename='animeflix'):
""" """
@ -30,15 +32,16 @@ class AnimeFlix(Anime, sitename='animeflix'):
def _scrape_episodes(self): def _scrape_episodes(self):
# TODO: find a better way to do splits # TODO: find a better way to do splits
# find a way to pass some values within the class # find a way to pass some values within the class
self.slug = self.url.strip('/').split('/')[-1]
episodes = helpers.get(self.episodeList_url, episodes = helpers.get(self.episodeList_url,
params={'slug': self.slug}).json() params={'slug': self.slug}).json()
return [ self.anime_url + episode['url'] for episode in episodes['episodes'] ] return [ self.anime_url + episode['url'] for episode in episodes['episodes'] ]
def _scrape_metadata(self): def _scrape_metadata(self):
self.slug = self.url.strip('/').split('/')[-1]
meta = helpers.get(self.meta_url, meta = helpers.get(self.meta_url,
params={'slug': self.slug}).json() params={'slug': self.slug}).json()
self.title = meta['data']['title'] self.title = meta['data']['title']
logger.debug(self.title)
@ -52,5 +55,8 @@ class AnimeFlixEpisode(AnimeEpisode, sitename='animeflix'):
params={'episode_num': self.ep_no, 'slug': self.url.strip('/').split('/')[-2]}).json() params={'episode_num': self.ep_no, 'slug': self.url.strip('/').split('/')[-2]}).json()
id = episode['data']['current']['id'] id = episode['data']['current']['id']
download_link = helpers.get( download_link = helpers.get(
f'{self.stream_url}={id}').json()[0]['file'] f'{self.stream_url}={id}').json()
return [('no_extractor',download_link)] i = 0
while download_link[i]['provider'] != 'AUEngine' :
i = i + 1
return [('no_extractor',download_link[i]['file'])]