Merge pull request #681 from ArjixWasTaken/patch-32

Added provider wcostream
master
Red 2021-05-23 23:34:38 +01:00 committed by GitHub
commit e3889d0e15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 120 additions and 0 deletions

View File

@ -93,6 +93,7 @@ Yeah. Me too! That's why this tool exists.
- Vidstream
- Voiranime
- Vostfree
- Wcostream
Sites that require Selenium **DO NOT** and **WILL NOT** work on mobile operating systems

View File

@ -133,6 +133,10 @@ DEFAULT_CONFIG = {
'servers': ['vidstream', 'gcloud', 'yourupload', 'hydrax'],
'version': 'subbed',
},
'wcostream': {
'servers': ['vidstreampro', 'mcloud'],
'version': 'subbed',
},
'animeflix': {
'server': 'AUEngine',
'fallback_servers': ['FastStream'],

View File

@ -67,6 +67,12 @@ ALL_EXTRACTORS = [
'regex': 'yourupload',
'class': 'Yourupload'
},
{
'sitename': 'wcostream',
'modulename': 'wcostream',
'regex': 'wcostream',
'class': 'WcoStream'
},
{
'sitename': 'vidstream',
'modulename': 'vidstream',

View File

@ -0,0 +1,37 @@
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
import re
class WcoStream(BaseExtractor):
def _get_data(self):
try:
if self.url.startswith('https://vidstream.pro/e'):
base_url = 'https://vidstream.pro'
elif self.url.startswith('https://mcloud.to/e/'):
base_url = 'https://mcloud.to'
else:
return []
html = helpers.get(self.url, referer='https://wcostream.cc/')
id_ = re.findall(r"/e/(.*?)\?domain", self.url)[0]
skey = re.findall(r"skey\s=\s['\"](.*?)['\"];", html.text)[0]
apiLink = f"{base_url}/info/{id_}?domain=wcostream.cc&skey={skey}"
referer = f"{base_url}/e/{id_}?domain=wcostream.cc"
response = helpers.get(apiLink, referer=referer).json()
if response['success'] is True:
sources = [
{
'stream_url': x['file']
}
for x in response['media']['sources']
]
return sources
else:
return []
except Exception:
return {"stream_url": ''}

View File

@ -44,6 +44,7 @@ ALL_ANIME_SITES = [
('vidstream', 'vidstream', 'VidStream'),
# ('voiranime', 'voiranime', 'VoirAnime'),
('vostfree', 'vostfree', 'VostFree'),
('wcostream', 'wcostream', 'WcoStream'),
]

View File

@ -0,0 +1,71 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.extractors import get_extractor
from anime_downloader.sites import helpers
import re
class WcoStream(Anime, sitename='wcostream'):
sitename = 'wcostream'
@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get(
'https://wcostream.cc/search',
params={'keyword': query}
))
results = soup.select('.film_list-wrap > .flw-item')
return [
SearchResult(
title=x.find('img')['alt'],
url=x.find('a')['href'],
meta={'year': x.select_one('.fd-infor > .fdi-item').text.strip()},
meta_info={
'version_key_dubbed': '(Dub)'
}
)
for x in results
]
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
episodes = soup.select_one('#content-episodes').select('ul.nav > li.nav-item') # noqa
return [
x.find('a')['href']
for x in episodes
if 'https://wcostream.cc/watch' in x.find('a')['href']
]
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.select_one(
'meta[property="og:title"]'
)['content'].split('Episode')[0].strip()
class WcoStreamEpisode(AnimeEpisode, sitename='wcostream'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
servers = soup.select("#servers-list > ul > li")
servers = [
{
"name": server.find('span').text.strip(),
"link": server.find('a')['data-embed']
}
for server in servers
]
servers = sorted(servers, key=lambda x: x['name'].lower() in self.config['servers'][0].lower())[::-1] # noqa
sources = []
for server in servers:
ext = get_extractor('wcostream')(
server['link'],
quality=self.quality,
headers={}
)
sources.extend([('no_extractor', x['stream_url']) for x in ext._get_data()]) # noqa
return sources