anime-downloader/anime_downloader/sites/shiro.py

92 lines
3.2 KiB
Python
Raw Normal View History

2021-01-27 06:27:49 -08:00
import logging
2021-02-10 02:42:00 -08:00
import re
2021-01-27 06:13:18 -08:00
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
def get_token():
r = helpers.get('https://shiro.is').text
script = 'https://shiro.is' + re.search(r'src\=\"(\/static\/js\/main\..*?)\"', r)[1] # noqa
script = helpers.get(script).text
token = re.search(r'token\:\"(.*?)\"', script)[1]
return token
2021-08-03 13:27:09 -07:00
def get_api_url():
return "https://tapi.shiro.is"
2021-01-27 06:13:18 -08:00
class Shiro(Anime, sitename='shiro'):
sitename = 'shiro'
@classmethod
def search(cls, query):
cls.token = get_token()
2021-08-03 13:27:09 -07:00
cls.api_url = get_api_url()
2021-01-27 06:13:18 -08:00
params = {
'search': query,
'token': cls.token
}
2021-08-03 13:27:09 -07:00
results = helpers.get(f'{cls.api_url}/advanced', params=params).json()['data'] # noqa
2021-01-27 06:13:18 -08:00
if 'nav' in results:
results = results['nav']['currentPage']['items']
search_results = [
SearchResult(
title=i['name'],
url='https://shiro.is/anime/' + i['slug'],
2021-08-03 13:27:09 -07:00
poster=f'{cls.api_url}/' + i['image'],
2021-02-14 07:17:15 -08:00
meta={'year': i['year']},
2021-01-27 06:13:18 -08:00
meta_info={
'version_key_dubbed': '(Sub)' if i['language'] == 'subbed' else '(Dub)' # noqa
}
)
for i in results
]
2021-02-14 07:17:15 -08:00
search_results = sorted(search_results, key=lambda x: int(x.meta['year']))
2021-01-27 06:13:18 -08:00
return search_results
else:
return []
def _scrape_episodes(self):
2021-02-06 14:42:53 -08:00
self.token = get_token()
2021-08-03 13:27:09 -07:00
self.api_url = get_api_url()
2021-02-10 02:42:00 -08:00
slug = self.url.split('/')[-1]
if 'episode' in slug:
2021-08-03 13:27:09 -07:00
api_link = f'{self.api_url}/anime-episode/slug/' + slug
2021-02-14 14:01:56 -08:00
r = helpers.get(api_link, params={'token': self.token}).json()
slug = r['data']['anime_slug']
2021-08-03 13:27:09 -07:00
api_link = f'{self.api_url}/anime/slug/' + slug
2021-01-27 06:13:18 -08:00
r = helpers.get(api_link, params={'token': self.token}).json()
if r['status'] == 'Found':
episodes = r['data']['episodes']
episodes = [
2021-08-03 13:27:09 -07:00
"https://cherry.subsplea.se/" + x['videos'][0]['video_id'] # noqa
2021-01-27 06:13:18 -08:00
for x in episodes
]
return episodes
else:
return []
def _scrape_metadata(self):
2021-02-10 02:42:00 -08:00
self.token = get_token()
2021-08-03 13:27:09 -07:00
self.api_url = get_api_url()
2021-02-10 02:42:00 -08:00
slug = self.url.split('/')[-1]
if 'episode' in slug:
2021-08-03 13:27:09 -07:00
api_link = f'{self.api_url}/anime-episode/slug/' + slug
2021-02-14 14:07:35 -08:00
r = helpers.get(api_link, params={'token': self.token}).json()
slug = r['data']['anime_slug']
2021-08-03 13:27:09 -07:00
api_link = f'{self.api_url}/anime/slug/' + slug
2021-01-27 06:13:18 -08:00
r = helpers.get(api_link, params={'token': self.token}).json()
self.title = r['data']['name']
class ShiroEpisode(AnimeEpisode, sitename='shiro'):
def _get_sources(self):
2021-08-03 13:27:09 -07:00
r = helpers.get(self.url, referer="https://shiro.is/").text
2021-08-03 14:00:51 -07:00
link = re.search(r'source\s+src=\"(.*?)\"', r)[1]
2021-01-27 06:33:05 -08:00
return [('no_extractor', link)]