Merge pull request #643 from ArjixWasTaken/patch-29

master
Arjix 2021-10-16 13:43:22 +03:00 committed by GitHub
commit 22a114074e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 124 additions and 81 deletions

View File

@ -21,12 +21,13 @@ class Anime4(Anime, sitename='4anime'):
"options": "qtranslate_lang=0&set_intitle=None&customset%5B%5D=anime" "options": "qtranslate_lang=0&set_intitle=None&customset%5B%5D=anime"
} }
soup = helpers.soupify(helpers.post( soup = helpers.soupify(helpers.post(
"https://4anime.to/wp-admin/admin-ajax.php", data=data)).select('div.info > a') "https://4anime.to/wp-admin/admin-ajax.php", data=data)).select('.item')
search_results = [ search_results = [
SearchResult( SearchResult(
title=i.text, title=i.select_one('.info > a').text,
url=i['href'] url=i.select_one('.info > a').get('href', ''),
poster="https://4anime.to" + i.find('img').get('src', '')
) )
for i in soup for i in soup
] ]
@ -43,6 +44,19 @@ class Anime4(Anime, sitename='4anime'):
for i in soup.select('.detail > a'): for i in soup.select('.detail > a'):
if 'year' in i.get('href', ''): if 'year' in i.get('href', ''):
self.meta['year'] = int(i.text) if i.text.isnumeric() else None self.meta['year'] = int(i.text) if i.text.isnumeric() else None
elif 'status' in i.get('href', ''):
self.meta['airing_status'] = i.text.strip()
desc_soup = soup.select_one("#description-mob")
if "READ MORE" in str(desc_soup):
desc = desc_soup.select('#fullcontent p')
self.meta['description'] = "\n".join([x.text for x in desc])
else:
self.meta['description'] = desc_soup.select_one('p:nth-child(2)').text
self.meta['poster'] = "https://4anime.to" + soup.select_one("#details > div.cover > img").get('src', '')
self.meta['total_eps'] = len(soup.select('ul.episodes.range.active > li > a'))
self.meta['cover'] = "https://4anime.to/static/Dr1FzAv.jpg"
class Anime4Episode(AnimeEpisode, sitename='4anime'): class Anime4Episode(AnimeEpisode, sitename='4anime'):

View File

@ -1,78 +1,84 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers from anime_downloader.sites import helpers
import re
def parse_search_page(soup):
results = soup.select('ul.thumb > li > a') def parse_search_page(soup):
return [ results = soup.select('ul.thumb > li > a')
SearchResult( return [
title=x['title'], SearchResult(
url=x['href'], title=x['title'],
poster=x.find('img')['src'] url=x['href'],
) poster=x.find('img')['src']
for x in results )
] for x in results
]
class TenshiMoe(Anime, sitename='tenshi.moe'):
class TenshiMoe(Anime, sitename='tenshi.moe'):
sitename = 'tenshi.moe'
sitename = 'tenshi.moe'
@classmethod
def search(cls, query): @classmethod
soup = helpers.soupify( def search(cls, query):
helpers.get( soup = helpers.soupify(
'https://tenshi.moe/anime', helpers.get(
params={'q': query}, 'https://tenshi.moe/anime',
cookies={'loop-view': 'thumb'} params={'q': query},
) cookies={'loop-view': 'thumb'}
) )
)
results = parse_search_page(soup)
results = parse_search_page(soup)
while soup.select_one(".pagination"):
link = soup.select_one('a.page-link[rel="next"]') while soup.select_one(".pagination"):
if link: link = soup.select_one('a.page-link[rel="next"]')
soup = helpers.soupify( if link:
helpers.get( soup = helpers.soupify(
link['href'], helpers.get(
cookies={'loop-view': 'thumb'} link['href'],
) cookies={'loop-view': 'thumb'}
) )
results.extend(parse_search_page(soup)) )
else: results.extend(parse_search_page(soup))
break else:
break
return results
return results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url)) def _scrape_episodes(self):
eps = soup.select( soup = helpers.soupify(helpers.get(self.url))
'li[class*="episode"] > a' eps = soup.select(
) 'li[class*="episode"] > a'
eps = [x['href'] for x in eps] )
return eps eps = [x['href'] for x in eps]
return eps
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url).text) def _scrape_metadata(self):
self.title = soup.title.text.split('')[0].strip() soup = helpers.soupify(helpers.get(self.url).text)
self.title = soup.select_one('span.value > span[title="English"]').parent.text.strip()
self.meta['year'] = int(re.findall(r"(\d{4})", soup.select_one('li.release-date .value').text)[0])
class TenshiMoeEpisode(AnimeEpisode, sitename='tenshi.moe'): self.meta['airing_status'] = soup.select_one('li.status > .value').text.strip()
QUALITIES = ['360p', '480p', '720p', '1080p'] self.meta['total_eps'] = int(soup.select_one('.entry-episodes > h2 > span').text.strip())
self.meta['desc'] = soup.select_one('.entry-description > .card-body').text.strip()
def _get_sources(self): self.meta['poster'] = soup.select_one('img.cover-image').get('src', '')
soup = helpers.soupify(helpers.get(self.url)) self.meta['cover'] = ''
soup = soup.select_one('.embed-responsive > iframe')
mp4moe = helpers.soupify(helpers.get(soup.get('src'), referer=self.url)) class TenshiMoeEpisode(AnimeEpisode, sitename='tenshi.moe'):
mp4moe = mp4moe.select_one('video#player') QUALITIES = ['360p', '480p', '720p', '1080p']
qualities_ = [x.get("title") for x in mp4moe.select('source')]
sources = [ def _get_sources(self):
('no_extractor', x.get('src')) soup = helpers.soupify(helpers.get(self.url))
for x in mp4moe.select('source') soup = soup.select_one('.embed-responsive > iframe')
]
mp4moe = helpers.soupify(helpers.get(soup.get('src'), referer=self.url))
if self.quality in qualities_: mp4moe = mp4moe.select_one('video#player')
return [sources[qualities_.index(self.quality)]] qualities_ = [x.get("title") for x in mp4moe.select('source')]
sources = [
('no_extractor', x.get('src'))
for x in mp4moe.select('source')
]
if self.quality in qualities_:
return [sources[qualities_.index(self.quality)]]

View File

@ -55,6 +55,7 @@ class TwistMoe(Anime, sitename='twist.moe'):
animes.append(SearchResult( animes.append(SearchResult(
title=anime['title'], title=anime['title'],
url='https://twist.moe/a/' + anime['slug']['slug'] + '/', url='https://twist.moe/a/' + anime['slug']['slug'] + '/',
poster=f"https://media.kitsu.io/anime/poster_images/{anime['hb_id']}/large.jpg"
)) ))
animes = [ani[0] for ani in process.extract(query, animes)] animes = [ani[0] for ani in process.extract(query, animes)]
return animes return animes
@ -81,6 +82,28 @@ class TwistMoe(Anime, sitename='twist.moe'):
return self._episode_urls return self._episode_urls
def _scrape_metadata(self):
slug = self.url.split('a/')[-1][:-1]
api_url = "https://api.twist.moe/api/anime/" + slug
res = helpers.get(
api_url,
headers={
'x-access-token': '0df14814b9e590a1f26d3071a4ed7974'
}
).json()
if 'hb_id' in res:
kitsu_api_url = "https://kitsu.io/api/edge/anime/" + str(res['hb_id'])
kitsu_data = helpers.get(kitsu_api_url).json()
attributes = kitsu_data['data']['attributes']
self.meta['title'] = attributes['canonicalTitle']
self.meta['year'] = attributes['startDate'].split('-')[0]
self.meta['airing_status'] = attributes['status']
self.meta['poster'] = attributes['posterImage']['original']
self.meta['cover'] = attributes['coverImage']['original']
self.meta['total_eps'] = attributes['episodeCount']
self.meta['desc'] = attributes['description']
# From stackoverflow https://stackoverflow.com/questions/36762098/how-to-decrypt-password-from-javascript-cryptojs-aes-encryptpassword-passphras # From stackoverflow https://stackoverflow.com/questions/36762098/how-to-decrypt-password-from-javascript-cryptojs-aes-encryptpassword-passphras
def pad(data): def pad(data):
length = BLOCK_SIZE - (len(data) % BLOCK_SIZE) length = BLOCK_SIZE - (len(data) % BLOCK_SIZE)