Merge pull request #649 from AbdullahM0hamed/ryuanime

Update ryuanime
master
Red 2021-03-21 20:23:23 +00:00 committed by GitHub
commit a47a5249e6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 39 additions and 18 deletions

View File

@ -117,7 +117,12 @@ DEFAULT_CONFIG = {
},
'ryuanime': {
'version': 'subbed',
'server': 'trollvid',
'servers': [
'vidstream',
'mp4upload',
'xstreamcdn',
'trollvid'
]
},
'animekisa': {
'server': 'gcloud',

View File

@ -28,7 +28,8 @@ class VidStream(BaseExtractor):
}
url = self.url.replace('https:////', 'https://')
url = url.replace('https://gogo-stream.com/download', 'https://gogo-stream.com/server.php')
url = url.replace('https://gogo-stream.com/download',
'https://gogo-stream.com/server.php')
soup = helpers.soupify(helpers.get(url))
linkserver = soup.select('li.linkserver')
logger.debug('Linkserver: {}'.format(linkserver))
@ -64,7 +65,11 @@ class VidStream(BaseExtractor):
# <input type="hidden" id="title" value="Yakusoku+no+Neverland">
# <input type="hidden" id="typesub" value="SUB">
# Used to create a download url.
soup_id = soup.select('input#id')[0]['value']
try:
soup_id = soup.select('input#id')[0]['value']
except IndexError:
return self._get_link_new(soup)
soup_title = soup.select('input#title')[0]['value']
soup_typesub = soup.select('input#typesub')[0].get('value', 'SUB')
@ -103,6 +108,11 @@ class VidStream(BaseExtractor):
return {'stream_url': ''}
def _get_link_new(self, soup):
link_buttons = soup.select('div.mirror_link')[
0].select('div.dowload > a[href]')
return {'stream_url': link_buttons[0].get('href')}
class Extractor:
"""dummy class to prevent changing self"""
@ -110,4 +120,3 @@ class Extractor:
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)

View File

@ -22,13 +22,16 @@ class RyuAnime(Anime, sitename='ryuanime'):
@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get("https://ryuanime.com/browse-anime", params={"search": query}))
result_data = soup.select("li.list-inline-item:has(p.anime-name):has(a.ani-link)")
soup = helpers.soupify(helpers.get(
"https://ryuanime.com/browse-anime", params={"search": query}))
result_data = soup.select(
"li.list-inline-item:has(p.anime-name):has(a.ani-link)")
search_results = [
SearchResult(
title=result.select("p.anime-name")[0].text,
url='https://ryuanime.com' + result.select("a.ani-link")[0].get("href")
url='https://ryuanime.com' +
result.select("a.ani-link")[0].get("href")
)
for result in result_data
]
@ -36,7 +39,8 @@ class RyuAnime(Anime, sitename='ryuanime'):
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
episodes = ['https://ryuanime.com' + x.get("href") for x in soup.select("li.jt-di > a")]
episodes = ['https://ryuanime.com' +
x.get("href") for x in soup.select("li.jt-di > a")]
if len(episodes) == 0:
logger.warning("No episodes found")
@ -49,17 +53,16 @@ class RyuAnime(Anime, sitename='ryuanime'):
class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
def getLink(self, name, _id):
if name == "trollvid":
return "https://trollvid.net/embed/" + _id
elif name == "mp4upload":
return f"https://mp4upload.com/embed-{_id}.html"
elif name == "xstreamcdn":
return f"https://xstreamcdn.com/v/" + _id
def _get_sources(self):
page = helpers.get(self.url).text
server_links = {
'trollvid': 'https://trollvid.net/embed/{}',
'mp4upload': 'https://mp4upload.com/embed-{}.html',
'xstreamcdn': 'https://xstreamcdn.com/v/{}',
'vidstreaming': 'https://vidstreaming.io/download?id={}'
}
# Example:
"""
[
@ -69,16 +72,20 @@ class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
}
]
"""
hosts = json.loads(re.search(r"let.*?episode.*?videos.*?(\[\{.*?\}\])", page).group(1))
hosts = json.loads(
re.search(r"let.*?episode.*?videos.*?(\[\{.*?\}\])", page).group(1))
sources_list = []
for host in hosts:
name = host.get("host")
_id = host.get("id")
link = self.getLink(name, _id)
link = server_links[name].format(_id)
if link:
if name == 'vidstreaming':
name = 'vidstream'
sources_list.append({
"extractor": name,
"url": link,