Update ryuanime

master
AbdullahM0hamed 2021-03-21 20:20:31 +00:00
parent e8ed14f193
commit 2c7c37bc12
3 changed files with 39 additions and 18 deletions

View File

@ -117,7 +117,12 @@ DEFAULT_CONFIG = {
}, },
'ryuanime': { 'ryuanime': {
'version': 'subbed', 'version': 'subbed',
'server': 'trollvid', 'servers': [
'vidstream',
'mp4upload',
'xstreamcdn',
'trollvid'
]
}, },
'animekisa': { 'animekisa': {
'server': 'gcloud', 'server': 'gcloud',

View File

@ -28,7 +28,8 @@ class VidStream(BaseExtractor):
} }
url = self.url.replace('https:////', 'https://') url = self.url.replace('https:////', 'https://')
url = url.replace('https://gogo-stream.com/download', 'https://gogo-stream.com/server.php') url = url.replace('https://gogo-stream.com/download',
'https://gogo-stream.com/server.php')
soup = helpers.soupify(helpers.get(url)) soup = helpers.soupify(helpers.get(url))
linkserver = soup.select('li.linkserver') linkserver = soup.select('li.linkserver')
logger.debug('Linkserver: {}'.format(linkserver)) logger.debug('Linkserver: {}'.format(linkserver))
@ -64,7 +65,11 @@ class VidStream(BaseExtractor):
# <input type="hidden" id="title" value="Yakusoku+no+Neverland"> # <input type="hidden" id="title" value="Yakusoku+no+Neverland">
# <input type="hidden" id="typesub" value="SUB"> # <input type="hidden" id="typesub" value="SUB">
# Used to create a download url. # Used to create a download url.
soup_id = soup.select('input#id')[0]['value'] try:
soup_id = soup.select('input#id')[0]['value']
except IndexError:
return self._get_link_new(soup)
soup_title = soup.select('input#title')[0]['value'] soup_title = soup.select('input#title')[0]['value']
soup_typesub = soup.select('input#typesub')[0].get('value', 'SUB') soup_typesub = soup.select('input#typesub')[0].get('value', 'SUB')
@ -103,6 +108,11 @@ class VidStream(BaseExtractor):
return {'stream_url': ''} return {'stream_url': ''}
def _get_link_new(self, soup):
link_buttons = soup.select('div.mirror_link')[
0].select('div.dowload > a[href]')
return {'stream_url': link_buttons[0].get('href')}
class Extractor: class Extractor:
"""dummy class to prevent changing self""" """dummy class to prevent changing self"""
@ -110,4 +120,3 @@ class Extractor:
def __init__(self, dictionary): def __init__(self, dictionary):
for k, v in dictionary.items(): for k, v in dictionary.items():
setattr(self, k, v) setattr(self, k, v)

View File

@ -22,13 +22,16 @@ class RyuAnime(Anime, sitename='ryuanime'):
@classmethod @classmethod
def search(cls, query): def search(cls, query):
soup = helpers.soupify(helpers.get("https://ryuanime.com/browse-anime", params={"search": query})) soup = helpers.soupify(helpers.get(
result_data = soup.select("li.list-inline-item:has(p.anime-name):has(a.ani-link)") "https://ryuanime.com/browse-anime", params={"search": query}))
result_data = soup.select(
"li.list-inline-item:has(p.anime-name):has(a.ani-link)")
search_results = [ search_results = [
SearchResult( SearchResult(
title=result.select("p.anime-name")[0].text, title=result.select("p.anime-name")[0].text,
url='https://ryuanime.com' + result.select("a.ani-link")[0].get("href") url='https://ryuanime.com' +
result.select("a.ani-link")[0].get("href")
) )
for result in result_data for result in result_data
] ]
@ -36,7 +39,8 @@ class RyuAnime(Anime, sitename='ryuanime'):
def _scrape_episodes(self): def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url)) soup = helpers.soupify(helpers.get(self.url))
episodes = ['https://ryuanime.com' + x.get("href") for x in soup.select("li.jt-di > a")] episodes = ['https://ryuanime.com' +
x.get("href") for x in soup.select("li.jt-di > a")]
if len(episodes) == 0: if len(episodes) == 0:
logger.warning("No episodes found") logger.warning("No episodes found")
@ -49,17 +53,16 @@ class RyuAnime(Anime, sitename='ryuanime'):
class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'): class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
def getLink(self, name, _id):
if name == "trollvid":
return "https://trollvid.net/embed/" + _id
elif name == "mp4upload":
return f"https://mp4upload.com/embed-{_id}.html"
elif name == "xstreamcdn":
return f"https://xstreamcdn.com/v/" + _id
def _get_sources(self): def _get_sources(self):
page = helpers.get(self.url).text page = helpers.get(self.url).text
server_links = {
'trollvid': 'https://trollvid.net/embed/{}',
'mp4upload': 'https://mp4upload.com/embed-{}.html',
'xstreamcdn': 'https://xstreamcdn.com/v/{}',
'vidstreaming': 'https://vidstreaming.io/download?id={}'
}
# Example: # Example:
""" """
[ [
@ -69,16 +72,20 @@ class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
} }
] ]
""" """
hosts = json.loads(re.search(r"let.*?episode.*?videos.*?(\[\{.*?\}\])", page).group(1)) hosts = json.loads(
re.search(r"let.*?episode.*?videos.*?(\[\{.*?\}\])", page).group(1))
sources_list = [] sources_list = []
for host in hosts: for host in hosts:
name = host.get("host") name = host.get("host")
_id = host.get("id") _id = host.get("id")
link = self.getLink(name, _id) link = server_links[name].format(_id)
if link: if link:
if name == 'vidstreaming':
name = 'vidstream'
sources_list.append({ sources_list.append({
"extractor": name, "extractor": name,
"url": link, "url": link,