Adding Justdubs.org (#379)

Added justdubs and fixed gcloud extractor
master
nate-moo 2020-06-03 16:28:54 -04:00 committed by GitHub
parent 8ef4d7d74a
commit 2aecc5f2e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 74 additions and 2 deletions

View File

@ -28,7 +28,7 @@ DEFAULT_CONFIG = {
'provider': 'twist.moe',
},
"siteconfig": {
"nineanime": {
'nineanime': {
"server": "mp4upload",
},
'anistream.xyz': {
@ -85,6 +85,9 @@ DEFAULT_CONFIG = {
},
'vidstream': {
"servers": ["vidstream","gcloud","mp4upload","cloud9","hydrax"]
},
'justdubs': {
"servers": ["mp4upload","gcloud"]
}
}
}

View File

@ -10,6 +10,11 @@ logger = logging.getLogger(__name__)
class Gcloud(BaseExtractor):
def _get_data(self):
url = self.url
"""gcloud uses the same video ID as other sites"""
url = url.replace('fembed.com','gcloud.live')
url = url.replace('feurl.com','gcloud.live')
url = url.replace('gcloud.live/v/','gcloud.live/api/source/')
if url.find('#') != -1:url = url[:url.find('#')]
url = (url[-url[::-1].find('/'):])

View File

@ -22,7 +22,8 @@ ALL_ANIME_SITES = [
('watchmovie','watchmovie','WatchMovie'),
('animekisa','animekisa','AnimeKisa'),
('nyaa','nyaa','Nyaa'),
('animedaisuki','animedaisuki','Animedaisuki')
('animedaisuki','animedaisuki','Animedaisuki'),
('justdubs','justdubs','JustDubs')
]

View File

@ -0,0 +1,63 @@
import logging
import json
import re
from anime_downloader.sites.exceptions import AnimeDLError, NotFoundError
from anime_downloader import util
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class JustDubs(Anime, sitename='justdubs'):
sitename = 'justdubs'
@classmethod
def search(cls, query):
results = helpers.get(f"http://justdubs.org/search/node/{query}").text
soup = helpers.soupify(results)
results_data = soup.select("li.search-result a[href*='http://justdubs.org/watch-']")
logger.debug(results_data)
search_results = [
SearchResult(
title = result.text,
url = result.get("href")
)
for result in results_data
]
return search_results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
ret = [str(a['href'])
for a in soup.find_all('a', {'class' : 'list-group-item'})]
if ret == []:
err = 'No Episodes Found in url "{}"'.format(self.url)
args = [self.url]
raise NotFoundError(err, *args)
return list(reversed(ret))
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.select('h1.page-header')[0].text
class JustDubsEpisode(AnimeEpisode, sitename='justdubs'):
def _get_sources(self):
servers = self.config['servers']
"""maps urls to extractors"""
server_links = {
'mp4upload':'mp4upload.com',
'gcloud':'gcloud.live',
'gcloud':'fembed.com'
}
soup = helpers.soupify(helpers.get(self.url)).select('iframe')
for a in servers:
for b in soup:
for c in server_links:
if server_links[c] in b.get('src') and a == c:
return [(c, b.get('src'))]
logger.warn("Unsupported URL")
return ""