added animevibe (#405)

* Create animevibe.py

* added to init

* added animevibe to config

* added 3rdparty extractor

* added extractor to init
master
Blatzar 2020-06-12 07:35:18 +00:00 committed by GitHub
parent 458d6d18e4
commit 8b2654c9e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 111 additions and 1 deletions

View File

@ -105,6 +105,16 @@ DEFAULT_CONFIG = {
"ptserver"
]
},
'animevibe': {
"servers": [
"vidstream",
"3rdparty",
"mp4upload",
"hydrax",
"gcloud",
"fembed"
]
},
}
}

View File

@ -0,0 +1,18 @@
import re
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
from anime_downloader import util
import logging
logger = logging.getLogger(__name__)
class Thirdparty(BaseExtractor):
def _get_data(self):
eval_regex = r'eval\(.*\)'
file_regex = r"file('|\"|):*.'(http.*?),"
soup = helpers.soupify(helpers.get(self.url))
packed_js = r'{}'.format(re.search(eval_regex,str(soup)).group())
logger.debug('Packed javascript: {}'.format(packed_js))
js = util.deobfuscate_packed_js(packed_js)
file = re.search(file_regex,js).group(2)
return {'stream_url': file}

View File

@ -96,6 +96,12 @@ ALL_EXTRACTORS = [
'modulename': 'streamx',
'regex': 'streamx',
'class': 'StreamX'
},
{
'sitename': '3rdparty',
'modulename': '3rdparty',
'regex': '3rdparty',
'class': 'Thirdparty'
}
]

View File

@ -0,0 +1,75 @@
import re
from anime_downloader.extractors import get_extractor
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
import logging
logger = logging.getLogger(__name__)
class AnimeVibe(Anime, sitename='animevibe'):
sitename = 'animevibe'
url = f'https://{sitename}.tv'
@classmethod
def search(cls, query):
search_results = helpers.soupify(helpers.get(cls.url,params={'s':query})).select('h5.title-av-search-res > a')
return [
SearchResult(
title = a.text,
url = a.get('href'))
for a in search_results
]
def _scrape_episodes(self):
#First episode
episodes = [self.url]
soup = helpers.soupify(helpers.get(self.url))
episodes.extend([x.get('href') for x in soup.select('div.wrap-episode-list > a')])
return episodes
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.select('h3.av-episode-title')[0].text
class AnimeVibeEpisode(AnimeEpisode, sitename='animevibe'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
iframe = soup.select('iframe')[0]
logger.debug('iframe: {}'.format('iframe'))
embed = 'https://animevibe.tv' + str(iframe.get('src'))
sources = helpers.soupify(helpers.get(embed)).select('option')
logger.debug('Sources: {}'.format(sources))
sources_list = []
extractors = [
'3rdparty',
'mp4upload',
'fembed',
'gcloud',
'vidstream',
'hydrax'
]
prefix = 'https://animevibe.tv/players/'
for i in sources:
source = None
url = i.get('value').replace('iframe.php?vid=','')
url = prefix + url if url.startswith('3rdparty') else url
#Choosing 3rd-party link is not implemented yet
for j in extractors:
#the 3rd-party url can contain other extractors
if j in url and not ('3rdparty' in url and j != '3rdparty'):
extractor = 'gcloud' if j == 'fembed' else j #fembed gets passed to gcloud too
source = {
'extractor':extractor,
'server':j,
'url':url,
'version':'subbed'
}
if source:
sources_list.append(source)
logger.debug('sources_list: {}'.format(sources_list))
return self.sort_sources(sources_list)

View File

@ -23,7 +23,8 @@ ALL_ANIME_SITES = [
('animekisa','animekisa','AnimeKisa'),
('nyaa','nyaa','Nyaa'),
('animedaisuki','animedaisuki','Animedaisuki'),
('justdubs','justdubs','JustDubs')
('justdubs','justdubs','JustDubs'),
('animevibe','animevibe','AnimeVibe')
]