Merge pull request #588 from ArjixGamer/patch-27

Added arabic provider EgyAnime
master
AbdullahM0hamed 2020-12-28 22:34:11 +00:00 committed by GitHub
commit c54c9d8927
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 347 additions and 246 deletions

View File

@ -74,6 +74,7 @@ Yeah. Me too! That's why this tool exists.
- Darkanime
- Dbanimes
- EraiRaws
- EgyAnime - usually m3u8 (good for streaming, not so much for downloading)
- FastAni
- GurminderBoparai (AnimeChameleon)
- itsaturday

View File

@ -48,6 +48,13 @@ DEFAULT_CONFIG = {
'animefrenzy': {
'version': 'subbed'
},
'egyanime': {
'version': 'subbed',
'servers': [
'clipwatching',
'streamtape'
]
},
'animebinge': {
'version': 'subbed',
'servers': [

View File

@ -0,0 +1,18 @@
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
from anime_downloader.util import deobfuscate_packed_js
import re
import json
class clipwatching(BaseExtractor):
def _get_data(self):
fix_json = [['src:', '"src":'], ['type:', '"type":']]
sources = re.search(r"sources:\s*(\[.*?\])", helpers.get(self.url).text).group(1)
for x, y in fix_json:
sources = sources.replace(x, y)
sources = json.loads(sources)
return {
'stream_url': sources[0]['src'],
'referer': self.url
}

View File

@ -1,176 +1,182 @@
from importlib import import_module
ALL_EXTRACTORS = [
{
'sitename': 'rapidvideo',
'modulename': 'rapidvideo',
'regex': 'rapidvideo',
'class': 'RapidVideo'
},
{
'sitename': 'no_extractor',
'modulename': 'fake_extractor',
'regex': 'no_extractor',
'class': 'AnimeVideo',
},
{
'sitename': 'animeonline360',
'modulename': 'animeonline360',
'regex': 'animeonline360',
'class': 'AnimeOnline360',
},
{
'sitename': 'stream.moe',
'modulename': 'moe',
'regex': 'stream.moe',
'class': 'StreamMoe',
},
{
'sitename': 'streamango',
'modulename': 'streamango',
'regex': 'streamango',
'class': 'Streamango',
},
{
'sitename': 'mp4upload',
'modulename': 'mp4upload',
'regex': 'mp4upload',
'class': 'MP4Upload'
},
{
'sitename': 'kwik',
'modulename': 'kwik',
'regex': 'kwik',
'class': 'Kwik'
},
{
'sitename': 'trollvid',
'modulename': 'trollvid',
'regex': 'trollvid',
'class': 'Trollvid'
},
{
'sitename': 'mp4sh',
'modulename': 'mp4sh',
'regex': 'mp4sh',
'class': 'MP4Sh'
},
{
'sitename': 'yourupload',
'modulename': 'yourupload',
'regex': 'yourupload',
'class': 'Yourupload'
},
{
'sitename': 'vidstream',
'modulename': 'vidstream',
'regex': 'vidstream',
'class': 'VidStream'
},
{
'sitename': 'haloani',
'modulename': 'haloani',
'regex': 'haloani',
'class': 'Haloani'
},
{
'sitename': 'gcloud',
'modulename': 'gcloud',
'regex': 'gcloud',
'class': 'Gcloud'
},
{
'sitename': 'xstreamcdn',
'modulename': 'xstreamcdn',
'regex': 'xstreamcdn',
'class': 'XStreamCDN'
},
{
'sitename': 'cloud9',
'modulename': 'cloud9',
'regex': 'cloud9',
'class': 'Cloud9'
},
{
'sitename': 'hydrax',
'modulename': 'hydrax',
'regex': 'hydrax',
'class': 'Hydrax'
},
{
'sitename': 'streamx',
'modulename': 'streamx',
'regex': 'streamx',
'class': 'StreamX'
},
{
'sitename': '3rdparty',
'modulename': '3rdparty',
'regex': '3rdparty',
'class': 'Thirdparty'
},
{
'sitename': 'yify',
'modulename': 'yify',
'regex': 'yify',
'class': 'Yify'
},
{
'sitename': 'mixdrop',
'modulename': 'mixdrop',
'regex': 'mixdrop',
'class': 'Mixdrop'
},
{
'sitename': 'sendvid',
'modulename': 'sendvid',
'regex': 'sendvid',
'class': 'SendVid'
},
{
'sitename': 'sibnet',
'modulename': 'sibnet',
'regex': 'sibnet',
'class': 'SibNet'
},
{
'sitename': 'uqload',
'modulename': 'uqload',
'regex': 'uqload',
'class': 'Uqload'
},
{
'sitename': 'vudeo',
'modulename': 'vudeo',
'regex': 'vudeo',
'class': 'Vudeo'
},
{
'sitename': 'eplay',
'modulename': 'eplay',
'regex': 'eplay',
'class': 'EPlay'
},
{
'sitename': 'streamtape',
'modulename': 'streamtape',
'regex': 'streamtape',
'class': 'StreamTape'
},
{
'sitename': 'streamium',
'modulename': 'streamium',
'regex': 'streamium',
'class': 'Streamium'
}
]
def get_extractor(name):
for extractor in ALL_EXTRACTORS:
if extractor['regex'] in name.lower():
module = import_module(
'anime_downloader.extractors.{}'.format(
extractor['modulename'])
)
return getattr(module, extractor['class'])
from importlib import import_module
ALL_EXTRACTORS = [
{
'sitename': 'rapidvideo',
'modulename': 'rapidvideo',
'regex': 'rapidvideo',
'class': 'RapidVideo'
},
{
'sitename': 'clipwatching',
'modulename': 'clipwatching',
'regex': 'clipwatching',
'class': 'clipwatching'
},
{
'sitename': 'no_extractor',
'modulename': 'fake_extractor',
'regex': 'no_extractor',
'class': 'AnimeVideo',
},
{
'sitename': 'animeonline360',
'modulename': 'animeonline360',
'regex': 'animeonline360',
'class': 'AnimeOnline360',
},
{
'sitename': 'stream.moe',
'modulename': 'moe',
'regex': 'stream.moe',
'class': 'StreamMoe',
},
{
'sitename': 'streamango',
'modulename': 'streamango',
'regex': 'streamango',
'class': 'Streamango',
},
{
'sitename': 'mp4upload',
'modulename': 'mp4upload',
'regex': 'mp4upload',
'class': 'MP4Upload'
},
{
'sitename': 'kwik',
'modulename': 'kwik',
'regex': 'kwik',
'class': 'Kwik'
},
{
'sitename': 'trollvid',
'modulename': 'trollvid',
'regex': 'trollvid',
'class': 'Trollvid'
},
{
'sitename': 'mp4sh',
'modulename': 'mp4sh',
'regex': 'mp4sh',
'class': 'MP4Sh'
},
{
'sitename': 'yourupload',
'modulename': 'yourupload',
'regex': 'yourupload',
'class': 'Yourupload'
},
{
'sitename': 'vidstream',
'modulename': 'vidstream',
'regex': 'vidstream',
'class': 'VidStream'
},
{
'sitename': 'haloani',
'modulename': 'haloani',
'regex': 'haloani',
'class': 'Haloani'
},
{
'sitename': 'gcloud',
'modulename': 'gcloud',
'regex': 'gcloud',
'class': 'Gcloud'
},
{
'sitename': 'xstreamcdn',
'modulename': 'xstreamcdn',
'regex': 'xstreamcdn',
'class': 'XStreamCDN'
},
{
'sitename': 'cloud9',
'modulename': 'cloud9',
'regex': 'cloud9',
'class': 'Cloud9'
},
{
'sitename': 'hydrax',
'modulename': 'hydrax',
'regex': 'hydrax',
'class': 'Hydrax'
},
{
'sitename': 'streamx',
'modulename': 'streamx',
'regex': 'streamx',
'class': 'StreamX'
},
{
'sitename': '3rdparty',
'modulename': '3rdparty',
'regex': '3rdparty',
'class': 'Thirdparty'
},
{
'sitename': 'yify',
'modulename': 'yify',
'regex': 'yify',
'class': 'Yify'
},
{
'sitename': 'mixdrop',
'modulename': 'mixdrop',
'regex': 'mixdrop',
'class': 'Mixdrop'
},
{
'sitename': 'sendvid',
'modulename': 'sendvid',
'regex': 'sendvid',
'class': 'SendVid'
},
{
'sitename': 'sibnet',
'modulename': 'sibnet',
'regex': 'sibnet',
'class': 'SibNet'
},
{
'sitename': 'uqload',
'modulename': 'uqload',
'regex': 'uqload',
'class': 'Uqload'
},
{
'sitename': 'vudeo',
'modulename': 'vudeo',
'regex': 'vudeo',
'class': 'Vudeo'
},
{
'sitename': 'eplay',
'modulename': 'eplay',
'regex': 'eplay',
'class': 'EPlay'
},
{
'sitename': 'streamtape',
'modulename': 'streamtape',
'regex': 'streamtape',
'class': 'StreamTape'
},
{
'sitename': 'streamium',
'modulename': 'streamium',
'regex': 'streamium',
'class': 'Streamium'
}
]
def get_extractor(name):
for extractor in ALL_EXTRACTORS:
if extractor['regex'] in name.lower():
module = import_module(
'anime_downloader.extractors.{}'.format(
extractor['modulename'])
)
return getattr(module, extractor['class'])

View File

@ -0,0 +1,68 @@
import logging
import re
import urllib.parse
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class EgyAnime(Anime, sitename='egyanime'):
sitename = 'egyanime'
@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get('https://www.egyanime.com/a.php', params={'term': query}).text)
search_results = [
SearchResult(
title = i.text,
url= "https://www.egyanime.com/" + i['href']
)
for i in soup.find_all('a', href=True)
]
return search_results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url).text)
eps = ["https://www.egyanime.com/" + x['href'] for x in soup.select('a.tag.is-dark.is-medium.m-5')]
if len(eps) == 0:
return [self.url.replace('do', 'w')]
return eps
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url).text)
self.title = soup.title.text.split('مشاهدة')[0].strip()
class EgyAnimeEpisode(AnimeEpisode, sitename='egyanime'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url).text)
servers = soup.select('div.server-watch#server-watch > a')
if servers:
servers = [x['data-link'] for x in servers]
logger.debug('Hosts: ' + str([urllib.parse.urlparse(x).netloc for x in servers]))
else:
servers = soup.find_all('a', {'data-link': True, 'class': 'panel-block'})
servers = [x['data-link'] for x in servers]
sources = []
for i in servers:
if 'clipwatching' in i:
sources.append({
'extractor': 'clipwatching',
'url': i,
'server': 'clipwatching',
'version': '1'
})
elif 'streamtape' in i:
sources.append({
'extractor': 'streamtape',
'url': i,
'server': 'streamtape',
'version': '1'
})
if sources:
return self.sort_sources(sources)
else:
logger.error('No episode source was found, file might have been deleted.')
return

View File

@ -1,70 +1,71 @@
from importlib import import_module
ALL_ANIME_SITES = [
# ('filename', 'sitename', 'classname')
('_4anime', '4anime', 'Anime4'),
('anitube', 'anitube', 'AniTube'),
('anime8', 'anime8', 'Anime8'),
('animebinge', 'animebinge', 'AnimeBinge'),
('animechameleon', 'gurminder', 'AnimeChameleon'),
('animedaisuki', 'animedaisuki', 'Animedaisuki'),
('animeflix', 'animeflix', 'AnimeFlix'),
('animeflv', 'animeflv', 'Animeflv'),
('animefreak', 'animefreak', 'AnimeFreak'),
('animefree','animefree','AnimeFree'),
('animefrenzy','animefrenzy','AnimeFrenzy'),
('animekisa','animekisa','AnimeKisa'),
('animetake','animetake','AnimeTake'),
('animeonline','animeonline360','AnimeOnline'),
('animeout', 'animeout', 'AnimeOut'),
('animerush', 'animerush', 'AnimeRush'),
('animesimple', 'animesimple', 'AnimeSimple'),
('animesuge', 'animesuge', 'AnimeSuge'),
('animevibe', 'animevibe', 'AnimeVibe'),
('animixplay', 'animixplay', 'AniMixPlay'),
('darkanime', 'darkanime', 'DarkAnime'),
('dbanimes', 'dbanimes', 'DBAnimes'),
('erairaws', 'erai-raws', 'EraiRaws'),
('fastani', 'fastani', 'FastAni'),
('itsaturday', 'itsaturday', 'Itsaturday'),
('justdubs', 'justdubs', 'JustDubs'),
('kickass', 'kickass', 'KickAss'),
('kissanimex', 'kissanimex', 'KissAnimeX'),
('kisscartoon', 'kisscartoon', 'KissCartoon'),
('nineanime', '9anime', 'NineAnime'),
('nyaa', 'nyaa', 'Nyaa'),
('putlockers', 'putlockers', 'PutLockers'),
('ryuanime', 'ryuanime', 'RyuAnime'),
('subsplease', 'subsplease', 'SubsPlease'),
('twistmoe', 'twist.moe', 'TwistMoe'),
('tenshimoe', 'tenshi.moe', 'TenshiMoe'),
('vidstream', 'vidstream', 'VidStream'),
('voiranime', 'voiranime', 'VoirAnime'),
('vostfree', 'vostfree', 'VostFree'),
]
def get_anime_class(url):
"""
Get anime class corresposing to url or name.
See :py:data:`anime_downloader.sites.ALL_ANIME_SITES` to get the possible anime sites.
Parameters
----------
url: string
URL of the anime.
Returns
-------
:py:class:`anime_downloader.sites.anime.Anime`
Concrete implementation of :py:class:`anime_downloader.sites.anime.Anime`
"""
for site in ALL_ANIME_SITES:
if site[1] in url:
try:
module = import_module(
'anime_downloader.sites.{}'.format(site[0])
)
except ImportError:
raise
return getattr(module, site[2])
from importlib import import_module
ALL_ANIME_SITES = [
# ('filename', 'sitename', 'classname')
('_4anime', '4anime', 'Anime4'),
('anitube', 'anitube', 'AniTube'),
('anime8', 'anime8', 'Anime8'),
('animebinge', 'animebinge', 'AnimeBinge'),
('animechameleon', 'gurminder', 'AnimeChameleon'),
('animedaisuki', 'animedaisuki', 'Animedaisuki'),
('animeflix', 'animeflix', 'AnimeFlix'),
('animeflv', 'animeflv', 'Animeflv'),
('animefreak', 'animefreak', 'AnimeFreak'),
('animefree','animefree','AnimeFree'),
('animefrenzy','animefrenzy','AnimeFrenzy'),
('animekisa','animekisa','AnimeKisa'),
('animetake','animetake','AnimeTake'),
('animeonline','animeonline360','AnimeOnline'),
('animeout', 'animeout', 'AnimeOut'),
('animerush', 'animerush', 'AnimeRush'),
('animesimple', 'animesimple', 'AnimeSimple'),
('animesuge', 'animesuge', 'AnimeSuge'),
('animevibe', 'animevibe', 'AnimeVibe'),
('animixplay', 'animixplay', 'AniMixPlay'),
('darkanime', 'darkanime', 'DarkAnime'),
('dbanimes', 'dbanimes', 'DBAnimes'),
('erairaws', 'erai-raws', 'EraiRaws'),
('egyanime', 'egyanime', 'EgyAnime'),
('fastani', 'fastani', 'FastAni'),
('itsaturday', 'itsaturday', 'Itsaturday'),
('justdubs', 'justdubs', 'JustDubs'),
('kickass', 'kickass', 'KickAss'),
('kissanimex', 'kissanimex', 'KissAnimeX'),
('kisscartoon', 'kisscartoon', 'KissCartoon'),
('nineanime', '9anime', 'NineAnime'),
('nyaa', 'nyaa', 'Nyaa'),
('putlockers', 'putlockers', 'PutLockers'),
('ryuanime', 'ryuanime', 'RyuAnime'),
('subsplease', 'subsplease', 'SubsPlease'),
('twistmoe', 'twist.moe', 'TwistMoe'),
('tenshimoe', 'tenshi.moe', 'TenshiMoe'),
('vidstream', 'vidstream', 'VidStream'),
('voiranime', 'voiranime', 'VoirAnime'),
('vostfree', 'vostfree', 'VostFree'),
]
def get_anime_class(url):
"""
Get anime class corresposing to url or name.
See :py:data:`anime_downloader.sites.ALL_ANIME_SITES` to get the possible anime sites.
Parameters
----------
url: string
URL of the anime.
Returns
-------
:py:class:`anime_downloader.sites.anime.Anime`
Concrete implementation of :py:class:`anime_downloader.sites.anime.Anime`
"""
for site in ALL_ANIME_SITES:
if site[1] in url:
try:
module = import_module(
'anime_downloader.sites.{}'.format(site[0])
)
except ImportError:
raise
return getattr(module, site[2])