Added Provider: DreamAnime (#318)

so many commits in one
master
AbdullahM0hamed 2020-03-31 19:37:43 +01:00 committed by GitHub
parent 8542cb7c08
commit 792c8337ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 79 additions and 6 deletions

View File

@ -56,6 +56,7 @@ Yeah. Me too! That's why this tool exists.
- itsaturday
- Animeflv
- Kickassanime
- DreamAnime
- Kissanime - requires Node.js
- Kisscartoon - requires Node.js

View File

@ -13,7 +13,7 @@ DEFAULT_CONFIG = {
'skip_download': False,
'download_dir': '.',
'quality': '1080p',
'chunk_size': '10',
'chunk_size': '10',
'fallback_qualities': ['720p', '480p', '360p'],
'force_download': False,
'file_format': '{anime_title}/{anime_title}_{ep_no}',
@ -39,7 +39,11 @@ DEFAULT_CONFIG = {
},
'gogoanime': {
"server": "cdn",
}
},
'dreamanime': {
"version": "subbed",
"server": "trollvid",
},
}
}

View File

@ -0,0 +1,67 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
import json
import re
class DreamAnime(Anime, sitename='dreamanime'):
"""
Site: http://dreamanime.fun
Config
------
version: One of ['subbed', 'dubbed']
Selects the version of audio of anime.
server: One of ['mp4upload', 'trollvid']
Selects the server to download from.
"""
sitename='dreamanime'
@classmethod
def search(cls, query):
results = helpers.get("https://dreamanime.fun/search", params = {"term" : query}).text
soup = helpers.soupify(results)
result_data = soup.find_all("a", {"id":"epilink"})
search_results = [
SearchResult(
title = result.text,
url = result.get("href")
)
for result in result_data
]
return search_results
def _scrape_episodes(self):
version = self.config.get("version", "subbed")
soup = helpers.soupify(helpers.get(self.url))
subbed = []
dubbed = []
_all = soup.find_all("div", {"class":"episode-wrap"})
for i in _all:
ep_type = i.find("div", {"class":re.compile("ep-type type-.* dscd")}).text
if ep_type == 'Sub':
subbed.append(i.find("a").get("data-src"))
elif ep_type == 'Dub':
dubbed.append(i.find("a").get("href"))
return eval(version)
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.find("div", {"class":"contingo"}).find("p").text
class DreamAnimeEpisode(AnimeEpisode, sitename='dreamanime'):
def _get_sources(self):
server = self.config.get("server", "trollvid")
soup = helpers.soupify(helpers.get(self.url))
hosts = json.loads(soup.find("div", {"class":"spatry"}).previous_sibling.previous_sibling.text[21:-2])["videos"]
type = hosts[0]["type"]
host = list(filter(lambda video: video["host"] == server and video["type"] == type, hosts))[0]
name = host["host"]
_id = host["id"]
if name == "trollvid":
link = "https://trollvid.net/embed/" + _id
elif name == "mp4upload":
link = f"https://mp4upload.com/embed-{_id}.html"
return [(name, link)]

View File

@ -12,11 +12,12 @@ ALL_ANIME_SITES = [
('itsaturday', 'itsaturday', 'Itsaturday'),
('animefreak', 'animefreak', 'AnimeFreak'),
('animeflix', 'animeflix', 'AnimeFlix'),
('dubbedanime','dubbedanime','Dubbedanime'),
('a2zanime','a2zanime','A2zanime'),
('dubbedanime', 'dubbedanime', 'Dubbedanime'),
('a2zanime', 'a2zanime', 'A2zanime'),
('animeout', 'animeout', 'AnimeOut'),
('animesimple','animesimple','AnimeSimple'),
('kickass','kickass','KickAss'),
('animesimple', 'animesimple', 'AnimeSimple'),
('kickass', 'kickass', 'KickAss'),
('dreamanime', 'dreamanime', 'DreamAnime'),
]