Added yify.mx and support for vidcloud9 (#411)

* updated config

* added yify site

* added yify to init

* added extractors to init

* added yify extractor

* added mixdrop extractor

for vidstream currently

* vidstream support for vidcloud 

* readme fixes

Looks good on clean install, merging
master
Blatzar 2020-06-15 11:20:16 +00:00 committed by GitHub
parent da091b7869
commit 6793c56b1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 145 additions and 8 deletions

View File

@ -44,7 +44,6 @@ Yeah. Me too! That's why this tool exists.
## Supported Sites
**Details about the sites can be found in [FAQ](https://github.com/vn-ki/anime-downloader/wiki/FAQ)**
- Animepahe
- Anistream
- Animeflix
- Animefreak
@ -64,9 +63,12 @@ Yeah. Me too! That's why this tool exists.
- Nyaa.si
- Animedaisuki
- Justdubs
- Animevibe
- Yify
- Animepahe - requires Node.js
- twist.moe - requires Node.js
- Kissanime - requires Node.js
- Kisscartoon - requires Node.js
- Kissanime - requires Selenium
- Kisscartoon - requires Selenium
## Installation

View File

@ -91,7 +91,7 @@ DEFAULT_CONFIG = {
"category": "English-translated"
},
'vidstream': {
"servers": ["vidstream","gcloud","mp4upload","cloud9","hydrax"]
"servers": ["vidstream","gcloud","mp4upload","cloud9","hydrax","mixdrop"]
},
'justdubs': {
"servers": ["mp4upload","gcloud"]
@ -115,6 +115,12 @@ DEFAULT_CONFIG = {
"fembed"
]
},
'yify': {
"servers": [
"vidstream",
"yify"
]
},
}
}

View File

@ -102,6 +102,18 @@ ALL_EXTRACTORS = [
'modulename': '3rdparty',
'regex': '3rdparty',
'class': 'Thirdparty'
},
{
'sitename': 'yify',
'modulename': 'yify',
'regex': 'yify',
'class': 'Yify'
},
{
'sitename': 'mixdrop',
'modulename': 'mixdrop',
'regex': 'mixdrop',
'class': 'Mixdrop'
}
]

View File

@ -0,0 +1,19 @@
import re
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
from anime_downloader import util
import logging
logger = logging.getLogger(__name__)
class Mixdrop(BaseExtractor):
def _get_data(self):
eval_regex = r'eval\(.*\)'
wurl_regex = r'wurl.*?=.*?"(.*?)";'
soup = helpers.get(self.url).text
deobfuscated_js = util.deobfuscate_packed_js(re.search(eval_regex,soup).group())
logger.debug('Deobfuscated JS: {}'.format(deobfuscated_js))
url = re.search(wurl_regex,deobfuscated_js).group(1)
logger.debug('Url: {}'.format(url))
url = f'https:{url}' if url.startswith('//') else url
return {'stream_url': url}

View File

@ -23,7 +23,8 @@ class VidStream(BaseExtractor):
"gcloud":"https://gcloud.live/",
"mp4upload":"https://www.mp4upload.com/",
"cloud9":"https://cloud9.to",
"hydrax":"https://hydrax.net"
"hydrax":"https://hydrax.net",
"mixdrop":"https://mixdrop.co"
}
url = self.url.replace('https:////','https://')
@ -32,8 +33,10 @@ class VidStream(BaseExtractor):
servers = Config._read_config()['siteconfig']['vidstream']['servers']
linkserver = soup.select('li.linkserver')
logger.debug('Linkserver: {}'.format(linkserver))
for a in servers:
if a == 'vidstream':
if a == 'vidstream' and 'vidstream' in self.url:
return self._get_link(soup)
for b in linkserver:
if b.get('data-video').startswith(links.get(a,'None')):
@ -45,7 +48,8 @@ class VidStream(BaseExtractor):
info['url'] = b.get('data-video')
_self = Extractor(info)
return extractors.get_extractor(a)._get_data(_self)
def _get_link(self,soup):
QUALITIES = {
"360":[],

View File

@ -0,0 +1,19 @@
import re
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
import logging
logger = logging.getLogger(__name__)
class Yify(BaseExtractor):
def _get_data(self):
api_id = re.search(r'id=([^&]*)',self.url).group(1)
api = f'https://api.streammp4.net/api/backup.php?id={api_id}'
data = helpers.get(api).json()
logger.debug('Data: {}'.format(data))
for i in data:
if self.quality in i.get('label',''):
return {'stream_url': i['file']}
return {'stream_url': ''}

View File

@ -24,7 +24,8 @@ ALL_ANIME_SITES = [
('nyaa','nyaa','Nyaa'),
('animedaisuki','animedaisuki','Animedaisuki'),
('justdubs','justdubs','JustDubs'),
('animevibe','animevibe','AnimeVibe')
('animevibe','animevibe','AnimeVibe'),
('yify','yify','Yify'),
]

View File

@ -0,0 +1,74 @@
import logging
import re
import json
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class Yify(Anime, sitename='yify'):
sitename = 'yify'
url = f'https://{sitename}.mx/search'
@classmethod
def search(cls, query):
search_results = helpers.soupify(helpers.get(cls.url, params={'keyword': query})).select('div.ml-item > a')
search_results = [
SearchResult(
title=a.get('title'),
url=a.get('href')+'/watching.html')
for a in search_results
]
return search_results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
regex = r'id:.*?\"([0-9]*?)\"'
movie_id = re.search(regex,str(soup)).group(1)
load_episodes = f'https://yify.mx/ajax/v2_get_episodes/{movie_id}'
load_embed = 'https://yify.mx/ajax/load_embed/{}'
elements = helpers.soupify(helpers.get(load_episodes)).select('div.les-content > a')
episode_links = []
for a in elements:
source = a.get('episode-id',None)
if source:
embed = helpers.get(load_embed.format(source)).json()
logger.debug('Embed: {}'.format(embed))
if embed.get('embed_url',''):
episode_links.append(embed['embed_url'])
return episode_links
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.select('title')[0].text.replace('Full Movie Free Yify','')
class YifyEpisode(AnimeEpisode, sitename='yify'):
def _get_sources(self):
episode_id = self.url.split('#')[-1]
load_embed = f'https://yify.mx/ajax/load_embed_url/{episode_id}'
episode_info = helpers.get(load_embed).json()
logger.debug(episode_info)
url = episode_info['url']
api_id = re.search(r'id=([^&]*)',url).group(1)
api = f'https://watch.yify.mx/api/?id={api_id}'
sources = helpers.get(api).json()
logger.debug(sources)
sources_list = []
extractors = {
'yify.mx/embed/':['yify','yify'],
'vidcloud9.com/':['vidstream','vidstream']
}
for i in sources:
for j in extractors:
if j in i['link']:
sources_list.append({
'extractor':extractors[j][0],
'url':i['link'],
'server':extractors[j][1],
'version':'subbed'
})
return self.sort_sources(sources_list)