Merge pull request #643 from ArjixWasTaken/patch-29

master
Arjix 2021-10-16 13:43:22 +03:00 committed by GitHub
commit 22a114074e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 124 additions and 81 deletions

View File

@ -21,12 +21,13 @@ class Anime4(Anime, sitename='4anime'):
"options": "qtranslate_lang=0&set_intitle=None&customset%5B%5D=anime"
}
soup = helpers.soupify(helpers.post(
"https://4anime.to/wp-admin/admin-ajax.php", data=data)).select('div.info > a')
"https://4anime.to/wp-admin/admin-ajax.php", data=data)).select('.item')
search_results = [
SearchResult(
title=i.text,
url=i['href']
title=i.select_one('.info > a').text,
url=i.select_one('.info > a').get('href', ''),
poster="https://4anime.to" + i.find('img').get('src', '')
)
for i in soup
]
@ -43,6 +44,19 @@ class Anime4(Anime, sitename='4anime'):
for i in soup.select('.detail > a'):
if 'year' in i.get('href', ''):
self.meta['year'] = int(i.text) if i.text.isnumeric() else None
elif 'status' in i.get('href', ''):
self.meta['airing_status'] = i.text.strip()
desc_soup = soup.select_one("#description-mob")
if "READ MORE" in str(desc_soup):
desc = desc_soup.select('#fullcontent p')
self.meta['description'] = "\n".join([x.text for x in desc])
else:
self.meta['description'] = desc_soup.select_one('p:nth-child(2)').text
self.meta['poster'] = "https://4anime.to" + soup.select_one("#details > div.cover > img").get('src', '')
self.meta['total_eps'] = len(soup.select('ul.episodes.range.active > li > a'))
self.meta['cover'] = "https://4anime.to/static/Dr1FzAv.jpg"
class Anime4Episode(AnimeEpisode, sitename='4anime'):

View File

@ -1,78 +1,84 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
def parse_search_page(soup):
results = soup.select('ul.thumb > li > a')
return [
SearchResult(
title=x['title'],
url=x['href'],
poster=x.find('img')['src']
)
for x in results
]
class TenshiMoe(Anime, sitename='tenshi.moe'):
sitename = 'tenshi.moe'
@classmethod
def search(cls, query):
soup = helpers.soupify(
helpers.get(
'https://tenshi.moe/anime',
params={'q': query},
cookies={'loop-view': 'thumb'}
)
)
results = parse_search_page(soup)
while soup.select_one(".pagination"):
link = soup.select_one('a.page-link[rel="next"]')
if link:
soup = helpers.soupify(
helpers.get(
link['href'],
cookies={'loop-view': 'thumb'}
)
)
results.extend(parse_search_page(soup))
else:
break
return results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
eps = soup.select(
'li[class*="episode"] > a'
)
eps = [x['href'] for x in eps]
return eps
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url).text)
self.title = soup.title.text.split('')[0].strip()
class TenshiMoeEpisode(AnimeEpisode, sitename='tenshi.moe'):
QUALITIES = ['360p', '480p', '720p', '1080p']
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
soup = soup.select_one('.embed-responsive > iframe')
mp4moe = helpers.soupify(helpers.get(soup.get('src'), referer=self.url))
mp4moe = mp4moe.select_one('video#player')
qualities_ = [x.get("title") for x in mp4moe.select('source')]
sources = [
('no_extractor', x.get('src'))
for x in mp4moe.select('source')
]
if self.quality in qualities_:
return [sources[qualities_.index(self.quality)]]
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
import re
def parse_search_page(soup):
results = soup.select('ul.thumb > li > a')
return [
SearchResult(
title=x['title'],
url=x['href'],
poster=x.find('img')['src']
)
for x in results
]
class TenshiMoe(Anime, sitename='tenshi.moe'):
sitename = 'tenshi.moe'
@classmethod
def search(cls, query):
soup = helpers.soupify(
helpers.get(
'https://tenshi.moe/anime',
params={'q': query},
cookies={'loop-view': 'thumb'}
)
)
results = parse_search_page(soup)
while soup.select_one(".pagination"):
link = soup.select_one('a.page-link[rel="next"]')
if link:
soup = helpers.soupify(
helpers.get(
link['href'],
cookies={'loop-view': 'thumb'}
)
)
results.extend(parse_search_page(soup))
else:
break
return results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
eps = soup.select(
'li[class*="episode"] > a'
)
eps = [x['href'] for x in eps]
return eps
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url).text)
self.title = soup.select_one('span.value > span[title="English"]').parent.text.strip()
self.meta['year'] = int(re.findall(r"(\d{4})", soup.select_one('li.release-date .value').text)[0])
self.meta['airing_status'] = soup.select_one('li.status > .value').text.strip()
self.meta['total_eps'] = int(soup.select_one('.entry-episodes > h2 > span').text.strip())
self.meta['desc'] = soup.select_one('.entry-description > .card-body').text.strip()
self.meta['poster'] = soup.select_one('img.cover-image').get('src', '')
self.meta['cover'] = ''
class TenshiMoeEpisode(AnimeEpisode, sitename='tenshi.moe'):
QUALITIES = ['360p', '480p', '720p', '1080p']
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
soup = soup.select_one('.embed-responsive > iframe')
mp4moe = helpers.soupify(helpers.get(soup.get('src'), referer=self.url))
mp4moe = mp4moe.select_one('video#player')
qualities_ = [x.get("title") for x in mp4moe.select('source')]
sources = [
('no_extractor', x.get('src'))
for x in mp4moe.select('source')
]
if self.quality in qualities_:
return [sources[qualities_.index(self.quality)]]

View File

@ -55,6 +55,7 @@ class TwistMoe(Anime, sitename='twist.moe'):
animes.append(SearchResult(
title=anime['title'],
url='https://twist.moe/a/' + anime['slug']['slug'] + '/',
poster=f"https://media.kitsu.io/anime/poster_images/{anime['hb_id']}/large.jpg"
))
animes = [ani[0] for ani in process.extract(query, animes)]
return animes
@ -81,6 +82,28 @@ class TwistMoe(Anime, sitename='twist.moe'):
return self._episode_urls
def _scrape_metadata(self):
slug = self.url.split('a/')[-1][:-1]
api_url = "https://api.twist.moe/api/anime/" + slug
res = helpers.get(
api_url,
headers={
'x-access-token': '0df14814b9e590a1f26d3071a4ed7974'
}
).json()
if 'hb_id' in res:
kitsu_api_url = "https://kitsu.io/api/edge/anime/" + str(res['hb_id'])
kitsu_data = helpers.get(kitsu_api_url).json()
attributes = kitsu_data['data']['attributes']
self.meta['title'] = attributes['canonicalTitle']
self.meta['year'] = attributes['startDate'].split('-')[0]
self.meta['airing_status'] = attributes['status']
self.meta['poster'] = attributes['posterImage']['original']
self.meta['cover'] = attributes['coverImage']['original']
self.meta['total_eps'] = attributes['episodeCount']
self.meta['desc'] = attributes['description']
# From stackoverflow https://stackoverflow.com/questions/36762098/how-to-decrypt-password-from-javascript-cryptojs-aes-encryptpassword-passphras
def pad(data):
length = BLOCK_SIZE - (len(data) % BLOCK_SIZE)