added watchmovie provider (#323)
* Add files via upload * Add files via upload * Add files via upload * Update init.py * Update config.py * Update init.py * Update README.mdmaster
parent
11ea5c877d
commit
4ff51adc10
|
@ -61,6 +61,7 @@ Yeah. Me too! That's why this tool exists.
|
|||
- Erai-Raws
|
||||
- Animesimple
|
||||
- Animerush
|
||||
- Watchmovie
|
||||
- Kissanime - requires Node.js
|
||||
- Kisscartoon - requires Node.js
|
||||
|
||||
|
|
|
@ -61,6 +61,10 @@ DEFAULT_CONFIG = {
|
|||
"version": "subbed",
|
||||
"server": "trollvid",
|
||||
},
|
||||
'watchmovie': {
|
||||
"server": "gcloud",
|
||||
"fallback_servers": ["fembed","yourupload","mp4upload"],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
import re
|
||||
import json
|
||||
import sys
|
||||
from anime_downloader.extractors.base_extractor import BaseExtractor
|
||||
from anime_downloader.sites import helpers
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class Gcloud(BaseExtractor):
|
||||
def _get_data(self):
|
||||
url = self.url
|
||||
if url.find('#') != -1:url = url[:url.find('#')]
|
||||
url = (url[-url[::-1].find('/'):])
|
||||
data = helpers.post(f'https://gcloud.live/api/source/{url}').json()['data']
|
||||
|
||||
if data == 'Video not found or has been removed':
|
||||
logger.warning('File not found (Most likely deleted)')
|
||||
return {'stream_url': ''}
|
||||
|
||||
for a in data:
|
||||
if a['label'] == self.quality:
|
||||
return {'stream_url': a['file']}
|
||||
|
||||
return {'stream_url': ''}
|
|
@ -67,6 +67,12 @@ ALL_EXTRACTORS = [
|
|||
'regex': 'haloani',
|
||||
'class': 'Haloani'
|
||||
},
|
||||
{
|
||||
'sitename': 'gcloud',
|
||||
'modulename': 'gcloud',
|
||||
'regex': 'gcloud',
|
||||
'class': 'Gcloud'
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ ALL_ANIME_SITES = [
|
|||
('dreamanime', 'dreamanime', 'DreamAnime'),
|
||||
('ryuanime', 'ryuanime', 'RyuAnime'),
|
||||
('erairaws', 'erai-raws', 'EraiRaws'),
|
||||
('watchmovie','watchmovie','WatchMovie'),
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
import logging
|
||||
import re
|
||||
import sys
|
||||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class WatchMovie(Anime, sitename='watchmovie'):
|
||||
"""
|
||||
Nice things
|
||||
Siteconfig
|
||||
----------
|
||||
server: Primary server to use (Default: gcloud)
|
||||
fallback_servers: Recorded working servers which is used if the primary server cannot be found
|
||||
"""
|
||||
sitename = 'watchmovie'
|
||||
url = f'https://{sitename}.movie'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url+'/search.html',params={'keyword': query})).select('a.videoHname')
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=a.get('title'),
|
||||
url=cls.url+a.get('href'))
|
||||
for a in search_results
|
||||
]
|
||||
return(search_results)
|
||||
|
||||
def _scrape_episodes(self):
|
||||
url = self.url+'/season'
|
||||
soup = helpers.soupify(helpers.get(url)).select('a.videoHname')
|
||||
return ['https://watchmovie.movie'+a.get('href') for a in soup[::-1]]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url)).select('div.page-title > h1')[0].text
|
||||
|
||||
class WatchMovieEpisode(AnimeEpisode, sitename='watchmovie'):
|
||||
def _get_sources(self):
|
||||
server = self.config['server']
|
||||
fallback = self.config['fallback_servers']
|
||||
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
sources = soup.select('div.anime_muti_link > ul > li > a')
|
||||
|
||||
for a in sources:
|
||||
url = a.get('data-video')
|
||||
if server in url:
|
||||
if server == 'fembed':extractor = 'gcloud'
|
||||
else:extractor = server
|
||||
return [(extractor, url,)]
|
||||
|
||||
logger.debug('Preferred server "%s" not found. Trying all supported servers.',self.config['server'])
|
||||
for a in sources:
|
||||
url = a.get('data-video')
|
||||
for b in fallback:
|
||||
if b in url:
|
||||
if b == 'fembed':extractor = 'gcloud'
|
||||
else:extractor = server
|
||||
return [(extractor, url,)]
|
||||
|
||||
logger.warning('No supported servers found. Trying all servers. This will most likely not work')
|
||||
return [('no_extractor', sources[0].get('data-video'),)]
|
Loading…
Reference in New Issue