Merge branch 'anime-dl:master' into KwikFix

master
nate-moo 2021-05-26 12:42:09 -04:00
commit 294a0aa157
7 changed files with 129 additions and 2 deletions

View File

@ -93,6 +93,7 @@ Yeah. Me too! That's why this tool exists.
- Vidstream
- Voiranime
- Vostfree
- Wcostream
Sites that require Selenium **DO NOT** and **WILL NOT** work on mobile operating systems

View File

@ -133,6 +133,10 @@ DEFAULT_CONFIG = {
'servers': ['vidstream', 'gcloud', 'yourupload', 'hydrax'],
'version': 'subbed',
},
'wcostream': {
'servers': ['vidstreampro', 'mcloud'],
'version': 'subbed',
},
'animeflix': {
'server': 'AUEngine',
'fallback_servers': ['FastStream'],

View File

@ -67,6 +67,12 @@ ALL_EXTRACTORS = [
'regex': 'yourupload',
'class': 'Yourupload'
},
{
'sitename': 'wcostream',
'modulename': 'wcostream',
'regex': 'wcostream',
'class': 'WcoStream'
},
{
'sitename': 'vidstream',
'modulename': 'vidstream',

View File

@ -0,0 +1,37 @@
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
import re
class WcoStream(BaseExtractor):
def _get_data(self):
try:
if self.url.startswith('https://vidstream.pro/e'):
base_url = 'https://vidstream.pro'
elif self.url.startswith('https://mcloud.to/e/'):
base_url = 'https://mcloud.to'
else:
return []
html = helpers.get(self.url, referer='https://wcostream.cc/')
id_ = re.findall(r"/e/(.*?)\?domain", self.url)[0]
skey = re.findall(r"skey\s=\s['\"](.*?)['\"];", html.text)[0]
apiLink = f"{base_url}/info/{id_}?domain=wcostream.cc&skey={skey}"
referer = f"{base_url}/e/{id_}?domain=wcostream.cc"
response = helpers.get(apiLink, referer=referer).json()
if response['success'] is True:
sources = [
{
'stream_url': x['file']
}
for x in response['media']['sources']
]
return sources
else:
return []
except Exception:
return {"stream_url": ''}

View File

@ -1,7 +1,7 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
import re
class GenoAnime(Anime, sitename="genoanime"):
sitename = "genoanime"
@ -38,4 +38,11 @@ class GenoAnimeEpisode(AnimeEpisode, sitename='genoanime'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
soup = helpers.soupify(helpers.get(soup.iframe.get("src")))
return [("no_extractor", soup.source.get("src"))]
id_ = re.findall(r"data: {id: [\"'](.*?)[\"']}", str(soup))[0]
response = helpers.post('https://genoanime.com/player/genovids.php', data={"id": id_}).json() # noqa
return [
("no_extractor", x['src'])
for x in response['url']
]

View File

@ -45,6 +45,7 @@ ALL_ANIME_SITES = [
('vidstream', 'vidstream', 'VidStream'),
# ('voiranime', 'voiranime', 'VoirAnime'),
('vostfree', 'vostfree', 'VostFree'),
('wcostream', 'wcostream', 'WcoStream'),
]

View File

@ -0,0 +1,71 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.extractors import get_extractor
from anime_downloader.sites import helpers
import re
class WcoStream(Anime, sitename='wcostream'):
sitename = 'wcostream'
@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get(
'https://wcostream.cc/search',
params={'keyword': query}
))
results = soup.select('.film_list-wrap > .flw-item')
return [
SearchResult(
title=x.find('img')['alt'],
url=x.find('a')['href'],
meta={'year': x.select_one('.fd-infor > .fdi-item').text.strip()},
meta_info={
'version_key_dubbed': '(Dub)'
}
)
for x in results
]
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
episodes = soup.select_one('#content-episodes').select('ul.nav > li.nav-item') # noqa
return [
x.find('a')['href']
for x in episodes
if 'https://wcostream.cc/watch' in x.find('a')['href']
]
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.select_one(
'meta[property="og:title"]'
)['content'].split('Episode')[0].strip()
class WcoStreamEpisode(AnimeEpisode, sitename='wcostream'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
servers = soup.select("#servers-list > ul > li")
servers = [
{
"name": server.find('span').text.strip(),
"link": server.find('a')['data-embed']
}
for server in servers
]
servers = sorted(servers, key=lambda x: x['name'].lower() in self.config['servers'][0].lower())[::-1] # noqa
sources = []
for server in servers:
ext = get_extractor('wcostream')(
server['link'],
quality=self.quality,
headers={}
)
sources.extend([('no_extractor', x['stream_url']) for x in ext._get_data()]) # noqa
return sources