Merge pull request #588 from ArjixGamer/patch-27

Added arabic provider EgyAnime
master
AbdullahM0hamed 2020-12-28 22:34:11 +00:00 committed by GitHub
commit c54c9d8927
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 347 additions and 246 deletions

View File

@ -74,6 +74,7 @@ Yeah. Me too! That's why this tool exists.
- Darkanime
- Dbanimes
- EraiRaws
- EgyAnime - usually m3u8 (good for streaming, not so much for downloading)
- FastAni
- GurminderBoparai (AnimeChameleon)
- itsaturday

View File

@ -48,6 +48,13 @@ DEFAULT_CONFIG = {
'animefrenzy': {
'version': 'subbed'
},
'egyanime': {
'version': 'subbed',
'servers': [
'clipwatching',
'streamtape'
]
},
'animebinge': {
'version': 'subbed',
'servers': [

View File

@ -0,0 +1,18 @@
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
from anime_downloader.util import deobfuscate_packed_js
import re
import json
class clipwatching(BaseExtractor):
def _get_data(self):
fix_json = [['src:', '"src":'], ['type:', '"type":']]
sources = re.search(r"sources:\s*(\[.*?\])", helpers.get(self.url).text).group(1)
for x, y in fix_json:
sources = sources.replace(x, y)
sources = json.loads(sources)
return {
'stream_url': sources[0]['src'],
'referer': self.url
}

View File

@ -7,6 +7,12 @@ ALL_EXTRACTORS = [
'regex': 'rapidvideo',
'class': 'RapidVideo'
},
{
'sitename': 'clipwatching',
'modulename': 'clipwatching',
'regex': 'clipwatching',
'class': 'clipwatching'
},
{
'sitename': 'no_extractor',
'modulename': 'fake_extractor',

View File

@ -0,0 +1,68 @@
import logging
import re
import urllib.parse
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class EgyAnime(Anime, sitename='egyanime'):
sitename = 'egyanime'
@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get('https://www.egyanime.com/a.php', params={'term': query}).text)
search_results = [
SearchResult(
title = i.text,
url= "https://www.egyanime.com/" + i['href']
)
for i in soup.find_all('a', href=True)
]
return search_results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url).text)
eps = ["https://www.egyanime.com/" + x['href'] for x in soup.select('a.tag.is-dark.is-medium.m-5')]
if len(eps) == 0:
return [self.url.replace('do', 'w')]
return eps
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url).text)
self.title = soup.title.text.split('مشاهدة')[0].strip()
class EgyAnimeEpisode(AnimeEpisode, sitename='egyanime'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url).text)
servers = soup.select('div.server-watch#server-watch > a')
if servers:
servers = [x['data-link'] for x in servers]
logger.debug('Hosts: ' + str([urllib.parse.urlparse(x).netloc for x in servers]))
else:
servers = soup.find_all('a', {'data-link': True, 'class': 'panel-block'})
servers = [x['data-link'] for x in servers]
sources = []
for i in servers:
if 'clipwatching' in i:
sources.append({
'extractor': 'clipwatching',
'url': i,
'server': 'clipwatching',
'version': '1'
})
elif 'streamtape' in i:
sources.append({
'extractor': 'streamtape',
'url': i,
'server': 'streamtape',
'version': '1'
})
if sources:
return self.sort_sources(sources)
else:
logger.error('No episode source was found, file might have been deleted.')
return

View File

@ -25,6 +25,7 @@ ALL_ANIME_SITES = [
('darkanime', 'darkanime', 'DarkAnime'),
('dbanimes', 'dbanimes', 'DBAnimes'),
('erairaws', 'erai-raws', 'EraiRaws'),
('egyanime', 'egyanime', 'EgyAnime'),
('fastani', 'fastani', 'FastAni'),
('itsaturday', 'itsaturday', 'Itsaturday'),
('justdubs', 'justdubs', 'JustDubs'),