parent
c65d265a55
commit
f13f13bc36
|
@ -12,6 +12,7 @@ with warnings.catch_warnings():
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnimeInfo:
|
||||
"""
|
||||
Attributes
|
||||
|
@ -27,7 +28,8 @@ class AnimeInfo:
|
|||
episodes: int
|
||||
Max amount of episodes
|
||||
"""
|
||||
def __init__(self, url, episodes,title=None, jp_title=None, metadata={}):
|
||||
|
||||
def __init__(self, url, episodes, title=None, jp_title=None, metadata={}):
|
||||
self.url = url
|
||||
self.episodes = episodes
|
||||
self.title = title
|
||||
|
@ -47,22 +49,24 @@ class MatchObject:
|
|||
A number between 0-100 describing the similarities between SearchResult and AnimeInfo.
|
||||
Higher number = more similar.
|
||||
"""
|
||||
def __init__(self, AnimeInfo, SearchResult, ratio = 100):
|
||||
|
||||
def __init__(self, AnimeInfo, SearchResult, ratio=100):
|
||||
self.AnimeInfo = AnimeInfo
|
||||
self.SearchResult = SearchResult
|
||||
self.ratio = ratio
|
||||
|
||||
# Not used
|
||||
|
||||
|
||||
def search_mal(query):
|
||||
|
||||
def search(query):
|
||||
soup = helpers.soupify(helpers.get('https://myanimelist.net/anime.php', params = {'q':query}))
|
||||
soup = helpers.soupify(helpers.get('https://myanimelist.net/anime.php', params={'q': query}))
|
||||
search_results = soup.select("a.hoverinfo_trigger.fw-b.fl-l")
|
||||
return [SearchResult(
|
||||
url = i.get('href'),
|
||||
title = i.select('strong')[0].text
|
||||
) for i in search_results]
|
||||
|
||||
url=i.get('href'),
|
||||
title=i.select('strong')[0].text
|
||||
) for i in search_results]
|
||||
|
||||
def scrape_metadata(url):
|
||||
soup = helpers.soupify(helpers.get(url))
|
||||
|
@ -77,20 +81,20 @@ def search_mal(query):
|
|||
'jp_title': '約束のネバーランド 第2期'}]
|
||||
"""
|
||||
info_dict = {
|
||||
'url':url
|
||||
'url': url
|
||||
}
|
||||
|
||||
# Maps specified info in sidebar to variables in info_dict
|
||||
name_dict = {
|
||||
'Japanese:':'jp_title',
|
||||
'English:':'title',
|
||||
'synonyms:':'synonyms',
|
||||
'Episodes:':'episodes'
|
||||
'Japanese:': 'jp_title',
|
||||
'English:': 'title',
|
||||
'synonyms:': 'synonyms',
|
||||
'Episodes:': 'episodes'
|
||||
}
|
||||
info = soup.select('span.dark_text')
|
||||
extra_info = [i.parent.text.strip() for i in info]
|
||||
for i in extra_info:
|
||||
text = i.replace('\n','').strip()
|
||||
text = i.replace('\n', '').strip()
|
||||
for j in name_dict:
|
||||
if text.startswith(j):
|
||||
info_dict[name_dict[j]] = text[len(j):].strip()
|
||||
|
@ -106,8 +110,8 @@ def search_mal(query):
|
|||
|
||||
# TODO error message when this stuff is not correctly scraped
|
||||
# Can happen if MAL is down or something similar
|
||||
return AnimeInfo(url = info_dict['url'], title = info_dict.get('title'),
|
||||
jp_title = info_dict.get('jp_title'), episodes = int(info_dict['episodes']))
|
||||
return AnimeInfo(url=info_dict['url'], title=info_dict.get('title'),
|
||||
jp_title=info_dict.get('jp_title'), episodes=int(info_dict['episodes']))
|
||||
|
||||
search_results = search(query)
|
||||
season_info = []
|
||||
|
@ -118,14 +122,16 @@ def search_mal(query):
|
|||
season_info.append(anime_info)
|
||||
|
||||
# Code below uses the first result to compare
|
||||
#season_info = [scrape_metadata(search_results[0].url)]
|
||||
#return season_info
|
||||
#season_info = [scrape_metadata(search_results[0].url)]
|
||||
# return season_info
|
||||
|
||||
# Prompts the user for selection
|
||||
return primitive_search(season_info)
|
||||
|
||||
# Choice allows the user to preselect, used to download from a list overnight.
|
||||
# None prompts the user.
|
||||
|
||||
|
||||
def search_anilist(query, choice=None):
|
||||
|
||||
def search(query):
|
||||
|
@ -154,7 +160,7 @@ def search_anilist(query, choice=None):
|
|||
}
|
||||
"""
|
||||
url = 'https://graphql.anilist.co'
|
||||
|
||||
|
||||
# TODO check in case there's no results
|
||||
# It seems to error on no results (anime -ll DEBUG dl "nev")
|
||||
results = helpers.post(url, json={'query': ani_query, 'variables': {'search': query, 'page': 1, 'type': 'ANIME'}}).json()['data']['Page']['media']
|
||||
|
@ -162,8 +168,8 @@ def search_anilist(query, choice=None):
|
|||
logger.error('No results found in anilist')
|
||||
raise NameError
|
||||
|
||||
search_results = [AnimeInfo(url = 'https://anilist.co/anime/' + str(i['id']), title = i['title']['romaji'],
|
||||
jp_title = i['title']['native'], episodes=int(i['episodes']), metadata=i) for i in results if i['episodes'] != None]
|
||||
search_results = [AnimeInfo(url='https://anilist.co/anime/' + str(i['id']), title=i['title']['romaji'],
|
||||
jp_title=i['title']['native'], episodes=int(i['episodes']), metadata=i) for i in results if i['episodes'] != None]
|
||||
return search_results
|
||||
|
||||
search_results = search(query)
|
||||
|
@ -171,7 +177,7 @@ def search_anilist(query, choice=None):
|
|||
# This can also be fuzzied, but too many options.
|
||||
if choice != None:
|
||||
# Fixes too low or high to get a real value.
|
||||
fixed_choice = ((choice-1)%len(search_results))
|
||||
fixed_choice = ((choice - 1) % len(search_results))
|
||||
return search_results[fixed_choice]
|
||||
else:
|
||||
# Prompts the user for selection
|
||||
|
@ -193,11 +199,11 @@ def fuzzy_match_metadata(seasons_info, search_results):
|
|||
# Essentially adds the chosen key to the query if the version is in use
|
||||
# Dirty solution, but should work pretty well
|
||||
|
||||
config = Config['siteconfig'].get(get_anime_class(j.url).sitename,{})
|
||||
config = Config['siteconfig'].get(get_anime_class(j.url).sitename, {})
|
||||
version = config.get('version')
|
||||
version_use = version == 'dubbed'
|
||||
# Adds something like (Sub) or (Dub) to the title
|
||||
key_used = j.meta_info.get('version_key_dubbed','') if version_use else j.meta_info.get('version_key_subbed','')
|
||||
key_used = j.meta_info.get('version_key_dubbed', '') if version_use else j.meta_info.get('version_key_subbed', '')
|
||||
title_info += ' ' + key_used
|
||||
title_info = title_info.strip()
|
||||
|
||||
|
@ -205,9 +211,9 @@ def fuzzy_match_metadata(seasons_info, search_results):
|
|||
# 0 if there's no japanese name
|
||||
jap_ratio = fuzz.ratio(i.jp_title, j.meta_info['jp_title']) if j.meta_info.get('jp_title') else 0
|
||||
# Outputs the max ratio for japanese or english name (0-100)
|
||||
ratio = max(fuzz.ratio(title_info,title_provider), jap_ratio)
|
||||
ratio = max(fuzz.ratio(title_info, title_provider), jap_ratio)
|
||||
logger.debug('Ratio: {}, Info title: {}, Provider Title: {}, Key used: {}'.format(ratio, title_info, title_provider, key_used))
|
||||
results.append(MatchObject(i, j, ratio))
|
||||
|
||||
# Returns the result with highest ratio
|
||||
return max(results, key=lambda item:item.ratio)
|
||||
return max(results, key=lambda item: item.ratio)
|
||||
|
|
|
@ -5,31 +5,33 @@ from anime_downloader.config import APP_DIR, Config
|
|||
|
||||
data = Config._CONFIG
|
||||
|
||||
|
||||
def create_table(_list, previous):
|
||||
newList = list(enumerate(_list, 1))
|
||||
headers = ['SlNo', f'{previous} settings'.strip()]
|
||||
table = tabulate(newList, headers, tablefmt = "psql")
|
||||
table = tabulate(newList, headers, tablefmt="psql")
|
||||
table = "\n".join(table.split("\n")[::-1])
|
||||
return table
|
||||
|
||||
|
||||
def traverse_json(data, previous=''):
|
||||
click.clear()
|
||||
keys = list(data.keys())
|
||||
click.echo(create_table(keys, previous))
|
||||
val = click.prompt("Select Option", type = int, default = 1) - 1
|
||||
|
||||
val = click.prompt("Select Option", type=int, default=1) - 1
|
||||
|
||||
if type(data[keys[val]]) == dict:
|
||||
traverse_json(data[keys[val]], keys[val])
|
||||
else:
|
||||
click.echo(f"Current value: {data[keys[val]]}")
|
||||
newVal = click.prompt(f"Input new value for {keys[val]}", type = str)
|
||||
newVal = click.prompt(f"Input new value for {keys[val]}", type=str)
|
||||
|
||||
#Normal strings cause an error
|
||||
# Normal strings cause an error
|
||||
try:
|
||||
newVal = eval(newVal)
|
||||
except (SyntaxError, NameError) as e:
|
||||
pass
|
||||
|
||||
|
||||
if type(newVal) != type(data[keys[val]]):
|
||||
choice = click.confirm(f"{newVal} appears to be of an incorrect type. Continue")
|
||||
|
||||
|
@ -44,12 +46,14 @@ def traverse_json(data, previous=''):
|
|||
|
||||
data[keys[val]] = newVal
|
||||
|
||||
|
||||
def remove_config():
|
||||
os.remove(os.path.join(APP_DIR, 'config.json'))
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
'--remove',
|
||||
'--remove',
|
||||
'-r',
|
||||
is_flag=True,
|
||||
help='Delete the config file if this flag is set'
|
||||
|
|
|
@ -50,8 +50,10 @@ def command(test_query):
|
|||
threads.append(t)
|
||||
|
||||
for thread in threads:
|
||||
if os.name == 'nt':p, f = 'Works: ', "Doesn't work: " #Emojis doesn't work in cmd
|
||||
else:p, f = '✅ ', '❌ '
|
||||
if os.name == 'nt':
|
||||
p, f = 'Works: ', "Doesn't work: " # Emojis doesn't work in cmd
|
||||
else:
|
||||
p, f = '✅ ', '❌ '
|
||||
thread.join(timeout=10)
|
||||
if not thread.is_alive():
|
||||
if not thread.exception:
|
||||
|
|
|
@ -14,13 +14,14 @@ logger = logging.Logger(__name__)
|
|||
echo = click.echo
|
||||
sitenames = [v[1] for v in ALL_ANIME_SITES]
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.argument('anime_name', required=False)
|
||||
@click.option(
|
||||
'--new', '-n', type=bool, is_flag=True,
|
||||
help="Create a new anime to watch")
|
||||
@click.option(
|
||||
'--list', '-l', '_list', type=click.Choice(['all','watching','completed','planned','dropped']), help="List all animes in watch list")
|
||||
'--list', '-l', '_list', type=click.Choice(['all', 'watching', 'completed', 'planned', 'dropped']), help="List all animes in watch list")
|
||||
@click.option(
|
||||
'--remove', '-r', 'remove', type=bool, is_flag=True,
|
||||
help="Remove the specified anime")
|
||||
|
@ -41,12 +42,11 @@ sitenames = [v[1] for v in ALL_ANIME_SITES]
|
|||
@click.option(
|
||||
'--mal-import',
|
||||
help='Import xml file from MAL export.',
|
||||
type = bool,
|
||||
is_flag = True
|
||||
)
|
||||
|
||||
type=bool,
|
||||
is_flag=True
|
||||
)
|
||||
def command(anime_name, new, update_all, _list, quality, remove,
|
||||
download_dir,mal_import, provider):
|
||||
download_dir, mal_import, provider):
|
||||
"""
|
||||
With watch you can keep track of any anime you watch.
|
||||
Available Commands after selection of an anime:\n
|
||||
|
@ -90,7 +90,7 @@ def command(anime_name, new, update_all, _list, quality, remove,
|
|||
watcher.update_anime(anime)
|
||||
|
||||
if mal_import:
|
||||
PATH = anime_name # Hack, but needed to prompt the user. Uses the anime name as parameter.
|
||||
PATH = anime_name # Hack, but needed to prompt the user. Uses the anime name as parameter.
|
||||
if PATH:
|
||||
query = PATH
|
||||
else:
|
||||
|
@ -102,14 +102,14 @@ def command(anime_name, new, update_all, _list, quality, remove,
|
|||
else:
|
||||
logging.error("Either the file selected was not an .xml or no file was selected.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Defaults the command to anime watch -l all.
|
||||
# It's a bit of a hack to use sys.argv, but doesn't break
|
||||
# if new commands are added (provided you used a bunch of and statements)
|
||||
_list = 'all' if sys.argv[-1] == 'watch' else _list
|
||||
if _list:
|
||||
filt = _list
|
||||
list_animes(watcher, quality, download_dir, None, _filter = filt)
|
||||
list_animes(watcher, quality, download_dir, None, _filter=filt)
|
||||
sys.exit(0)
|
||||
|
||||
if anime_name:
|
||||
|
@ -123,7 +123,8 @@ def command(anime_name, new, update_all, _list, quality, remove,
|
|||
anime.quality = quality
|
||||
|
||||
logger.info('Found {}'.format(anime.title))
|
||||
watch_anime(watcher, anime,quality,download_dir)
|
||||
watch_anime(watcher, anime, quality, download_dir)
|
||||
|
||||
|
||||
def command_parser(command):
|
||||
# Returns<kUp> a list of the commands
|
||||
|
@ -132,14 +133,15 @@ def command_parser(command):
|
|||
# Better than split(' ') because it accounts for quoutes.
|
||||
# Group 3 for qouted command
|
||||
command_regex = r'(("|\')(.*?)("|\')|.*?\s)'
|
||||
matches = re.findall(command_regex,command + " ")
|
||||
matches = re.findall(command_regex, command + " ")
|
||||
commands = [i[0].strip('"').strip("'").strip() for i in matches if i[0].strip()]
|
||||
return commands
|
||||
|
||||
def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
|
||||
|
||||
def list_animes(watcher, quality, download_dir, imp=None, _filter=None):
|
||||
|
||||
click.echo('Available Commands: swap, new')
|
||||
watcher.list(filt= _filter)
|
||||
watcher.list(filt=_filter)
|
||||
inp = click.prompt('Select an anime', default="1") if not imp else imp
|
||||
provider = Config['watch']['provider']
|
||||
# Not a number as input and command
|
||||
|
@ -157,7 +159,7 @@ def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
|
|||
watcher.new(url)
|
||||
|
||||
if key == 'swap':
|
||||
if vals[0] in ['all','watching','completed','planned','dropped','hold']:
|
||||
if vals[0] in ['all', 'watching', 'completed', 'planned', 'dropped', 'hold']:
|
||||
return list_animes(watcher, quality, download_dir, imp=imp, _filter=vals[0])
|
||||
|
||||
return list_animes(watcher, quality, download_dir, imp=imp)
|
||||
|
@ -166,7 +168,7 @@ def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
|
|||
sys.exit(0)
|
||||
|
||||
try:
|
||||
anime = watcher.get(int(inp)-1)
|
||||
anime = watcher.get(int(inp) - 1)
|
||||
except IndexError:
|
||||
sys.exit(0)
|
||||
|
||||
|
@ -205,7 +207,7 @@ def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
|
|||
watcher.update_anime(anime)
|
||||
elif inp == 'watch':
|
||||
anime.quality = quality
|
||||
watch_anime(watcher, anime,quality, download_dir)
|
||||
watch_anime(watcher, anime, quality, download_dir)
|
||||
|
||||
elif inp.startswith('download'):
|
||||
# You can use download 3:10 for selected episodes
|
||||
|
@ -242,7 +244,7 @@ def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
|
|||
if not val.isnumeric():
|
||||
# Uncomment this if you want to let the user know.
|
||||
#logger.error("Invalid integer")
|
||||
#input()
|
||||
# input()
|
||||
continue
|
||||
# Prevents setting length above max amount of episodes.
|
||||
val = val if int(val) <= len(anime) else len(anime)
|
||||
|
@ -270,16 +272,16 @@ def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
|
|||
watcher.update(anime)
|
||||
|
||||
elif key == 'watch_status':
|
||||
if val in ['watching','completed','dropped','planned','all']:
|
||||
if val in ['watching', 'completed', 'dropped', 'planned', 'all']:
|
||||
colours = {
|
||||
'watching':'cyan',
|
||||
'completed':'green',
|
||||
'dropped':'red',
|
||||
'planned':'yellow',
|
||||
'hold':'white'
|
||||
'watching': 'cyan',
|
||||
'completed': 'green',
|
||||
'dropped': 'red',
|
||||
'planned': 'yellow',
|
||||
'hold': 'white'
|
||||
}
|
||||
anime.watch_status = val
|
||||
anime.colours = colours.get(anime.watch_status,'yellow')
|
||||
anime.colours = colours.get(anime.watch_status, 'yellow')
|
||||
watcher.update(anime)
|
||||
|
||||
|
||||
|
@ -311,7 +313,7 @@ def watch_anime(watcher, anime, quality, download_dir):
|
|||
|
||||
elif returncode == player.CONNECT_ERR:
|
||||
logger.warning("Couldn't connect. Retrying. "
|
||||
"Attempt #{}".format(tries+1))
|
||||
"Attempt #{}".format(tries + 1))
|
||||
continue
|
||||
|
||||
elif returncode == player.PREV:
|
||||
|
|
|
@ -21,23 +21,23 @@ DEFAULT_CONFIG = {
|
|||
'external_downloader': '',
|
||||
'aria2c_for_torrents': False,
|
||||
'selescrape_browser': None,
|
||||
'selescrape_browser_executable_path' : None,
|
||||
'selescrape_driver_binary_path' : None,
|
||||
'speed_limit' : 0,
|
||||
'selescrape_browser_executable_path': None,
|
||||
'selescrape_driver_binary_path': None,
|
||||
'speed_limit': 0,
|
||||
},
|
||||
'ezdl': {
|
||||
'file_format':'{animeinfo_anime_title}/{animeinfo_anime_title}_{provider}_{ep_no}',
|
||||
'provider':'twist.moe',
|
||||
'ratio':50,
|
||||
'fallback_providers':['vidstream','anime8'],
|
||||
'download_metadata':False,
|
||||
'file_format': '{animeinfo_anime_title}/{animeinfo_anime_title}_{provider}_{ep_no}',
|
||||
'provider': 'twist.moe',
|
||||
'ratio': 50,
|
||||
'fallback_providers': ['vidstream', 'anime8'],
|
||||
'download_metadata': False,
|
||||
},
|
||||
'watch': {
|
||||
'quality': '1080p',
|
||||
'fallback_qualities': ['720p', '480p', '360p'],
|
||||
'log_level': 'INFO',
|
||||
'provider': 'twist.moe',
|
||||
'autoplay_next':True
|
||||
'autoplay_next': True
|
||||
},
|
||||
'siteconfig': {
|
||||
'animefrenzy': {
|
||||
|
@ -57,8 +57,8 @@ DEFAULT_CONFIG = {
|
|||
},
|
||||
'9anime': {
|
||||
'server': 'mp4upload',
|
||||
'version':'subbed',
|
||||
'domain_extension':'to'
|
||||
'version': 'subbed',
|
||||
'domain_extension': 'to'
|
||||
},
|
||||
'anistream.xyz': {
|
||||
'version': 'subbed',
|
||||
|
@ -68,7 +68,7 @@ DEFAULT_CONFIG = {
|
|||
'server': 'natsuki',
|
||||
},
|
||||
'anime8': {
|
||||
'version':'subbed',
|
||||
'version': 'subbed',
|
||||
'servers': ['fserver', 'fdserver', 'oserver'],
|
||||
'include_special_eps': False
|
||||
},
|
||||
|
@ -76,24 +76,24 @@ DEFAULT_CONFIG = {
|
|||
'server': 'cdn',
|
||||
'version': 'subbed'
|
||||
},
|
||||
'animerush':{
|
||||
'servers': ['Mp4uploadHD Video','MP4Upload', 'Mp4upload Video', 'Youruploads Video']
|
||||
'animerush': {
|
||||
'servers': ['Mp4uploadHD Video', 'MP4Upload', 'Mp4upload Video', 'Youruploads Video']
|
||||
},
|
||||
'kickass': {
|
||||
'server': 'A-KICKASSANIME',
|
||||
'fallback_servers': ['ORIGINAL-QUALITY-V2','HTML5-HQ','HTML5','A-KICKASSANIME','BETAPLAYER','KICKASSANIME','DEVSTREAM'],
|
||||
'ext_fallback_servers': ['Mp4Upload','Vidcdn','Vidstreaming'],
|
||||
'fallback_servers': ['ORIGINAL-QUALITY-V2', 'HTML5-HQ', 'HTML5', 'A-KICKASSANIME', 'BETAPLAYER', 'KICKASSANIME', 'DEVSTREAM'],
|
||||
'ext_fallback_servers': ['Mp4Upload', 'Vidcdn', 'Vidstreaming'],
|
||||
},
|
||||
'kissanimex': {
|
||||
'version': 'subbed',
|
||||
},
|
||||
'animesimple': {
|
||||
'version': 'subbed',
|
||||
'servers': ['vidstreaming','trollvid','mp4upload','xstreamcdn']
|
||||
'servers': ['vidstreaming', 'trollvid', 'mp4upload', 'xstreamcdn']
|
||||
},
|
||||
'darkanime': {
|
||||
'version': 'subbed',
|
||||
'servers': ['mp4upload','trollvid'],
|
||||
'servers': ['mp4upload', 'trollvid'],
|
||||
},
|
||||
'dreamanime': {
|
||||
'version': 'subbed',
|
||||
|
@ -105,11 +105,11 @@ DEFAULT_CONFIG = {
|
|||
},
|
||||
'animekisa': {
|
||||
'server': 'gcloud',
|
||||
'fallback_servers': ['mp4upload','vidstream']
|
||||
'fallback_servers': ['mp4upload', 'vidstream']
|
||||
},
|
||||
|
||||
|
||||
'watchmovie': {
|
||||
'servers': ['vidstream','gcloud','yourupload','hydrax'],
|
||||
'servers': ['vidstream', 'gcloud', 'yourupload', 'hydrax'],
|
||||
'version': 'subbed',
|
||||
},
|
||||
'animeflix': {
|
||||
|
@ -118,7 +118,7 @@ DEFAULT_CONFIG = {
|
|||
'version': 'sub',
|
||||
},
|
||||
'dubbedanime': {
|
||||
'servers': ['vidstream','mp4upload','trollvid'],
|
||||
'servers': ['vidstream', 'mp4upload', 'trollvid'],
|
||||
'version': 'dubbed',
|
||||
},
|
||||
'animedaisuki': {
|
||||
|
@ -129,11 +129,11 @@ DEFAULT_CONFIG = {
|
|||
'category': 'English-translated'
|
||||
},
|
||||
'vidstream': {
|
||||
'servers': ['vidstream','vidstream_bk','gcloud','mp4upload','cloud9','hydrax','mixdrop'],
|
||||
'servers': ['vidstream', 'vidstream_bk', 'gcloud', 'mp4upload', 'cloud9', 'hydrax', 'mixdrop'],
|
||||
'version': 'subbed'
|
||||
},
|
||||
'justdubs': {
|
||||
'servers': ['mp4upload','gcloud']
|
||||
'servers': ['mp4upload', 'gcloud']
|
||||
},
|
||||
'kisscartoon': {
|
||||
'servers': [
|
||||
|
@ -164,18 +164,18 @@ DEFAULT_CONFIG = {
|
|||
'server': 'sibnet'
|
||||
},
|
||||
'voiranime': {
|
||||
'servers':[
|
||||
'servers': [
|
||||
'gounlimited'
|
||||
]
|
||||
},
|
||||
'dbanimes': {
|
||||
'servers':['mixdrop','gounlimited','vudeo','fembed', 'sendvid']
|
||||
'servers': ['mixdrop', 'gounlimited', 'vudeo', 'fembed', 'sendvid']
|
||||
},
|
||||
'kissanime': {
|
||||
'version':'subbed'
|
||||
'version': 'subbed'
|
||||
},
|
||||
'animeonline360': {
|
||||
'version':'subbed'
|
||||
'version': 'subbed'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ from anime_downloader.downloader.http_downloader import HTTPDownloader
|
|||
from anime_downloader.downloader.external_downloader import ExternalDownloader
|
||||
from anime_downloader.downloader.SmartDL import pySmartDL
|
||||
|
||||
|
||||
def get_downloader(downloader):
|
||||
"""get_downloader returns the proper downloader class
|
||||
|
||||
|
|
|
@ -6,13 +6,14 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Thirdparty(BaseExtractor):
|
||||
def _get_data(self):
|
||||
eval_regex = r'eval\(.*\)'
|
||||
file_regex = r"file('|\"|):*.'(http.*?),"
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
packed_js = r'{}'.format(re.search(eval_regex,str(soup)).group())
|
||||
packed_js = r'{}'.format(re.search(eval_regex, str(soup)).group())
|
||||
logger.debug('Packed javascript: {}'.format(packed_js))
|
||||
js = util.deobfuscate_packed_js(packed_js)
|
||||
file = re.search(file_regex,js).group(2)
|
||||
file = re.search(file_regex, js).group(2)
|
||||
return {'stream_url': file}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from anime_downloader.extractors.base_extractor import BaseExtractor
|
||||
from anime_downloader.sites import helpers
|
||||
import urllib.parse
|
||||
|
||||
|
||||
class AnimeOnline360(BaseExtractor):
|
||||
def _get_data(self):
|
||||
try:
|
||||
|
|
|
@ -7,12 +7,13 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Cloud9(BaseExtractor):
|
||||
def _get_data(self):
|
||||
url = self.url.replace('https://cloud9.to/embed/','https://api.cloud9.to/stream/')
|
||||
url = self.url.replace('https://cloud9.to/embed/', 'https://api.cloud9.to/stream/')
|
||||
data = helpers.get(url).json()['data']
|
||||
if data == 'Video not found or has been removed':
|
||||
logger.warning('File not found (Most likely deleted)')
|
||||
return {'stream_url': ''}
|
||||
|
||||
|
||||
return {'stream_url': data['sources'][0]['file']}
|
||||
|
|
|
@ -7,25 +7,26 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Gcloud(BaseExtractor):
|
||||
def _get_data(self):
|
||||
logger.debug('Gcloud url: {}'.format(self.url)) #Surprisingly not debug printed in anime.py
|
||||
logger.debug('Gcloud url: {}'.format(self.url)) # Surprisingly not debug printed in anime.py
|
||||
"""gcloud uses the same video ID as other sites"""
|
||||
id_regex = r'(gcloud\.live|fembed\.com|feurl\.com)/(v|api/source)/([^(?|#)]*)' #Group 3 for id
|
||||
gcloud_id = re.search(id_regex,self.url)
|
||||
id_regex = r'(gcloud\.live|fembed\.com|feurl\.com)/(v|api/source)/([^(?|#)]*)' # Group 3 for id
|
||||
gcloud_id = re.search(id_regex, self.url)
|
||||
if not gcloud_id:
|
||||
logger.error('Unable to get ID for url "{}"'.format(self.url))
|
||||
logger.error('Unable to get ID for url "{}"'.format(self.url))
|
||||
return {'stream_url': ''}
|
||||
|
||||
gcloud_id = gcloud_id.group(3)
|
||||
data = helpers.post(f'https://gcloud.live/api/source/{gcloud_id}').json()['data']
|
||||
|
||||
|
||||
if data == 'Video not found or has been removed':
|
||||
logger.warning('File not found (Most likely deleted)')
|
||||
return {'stream_url': ''}
|
||||
|
||||
|
||||
for a in data:
|
||||
if a['label'] == self.quality:
|
||||
return {'stream_url': a['file']}
|
||||
|
||||
return {'stream_url': ''}
|
||||
return {'stream_url': ''}
|
||||
|
|
|
@ -8,20 +8,21 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Haloani(BaseExtractor):
|
||||
def _get_data(self):
|
||||
url = self.url
|
||||
for a in range(10):
|
||||
soup = helpers.soupify(helpers.get(url))
|
||||
regex = r"(PHNjcmlwd[^\"]{90,})|Base64\.decode\(\"([^\"]{90,})"
|
||||
decode = re.search(regex,str(soup))
|
||||
decode = re.search(regex, str(soup))
|
||||
if decode:
|
||||
decoded = base64.b64decode(decode.groups()[-1]+'==')
|
||||
decoded = base64.b64decode(decode.groups()[-1] + '==')
|
||||
break
|
||||
|
||||
regex = r"window\.location = '(https://haloani\.ru/[^']*)"
|
||||
window = re.search(regex,str(soup))
|
||||
|
||||
window = re.search(regex, str(soup))
|
||||
|
||||
if window:
|
||||
url = window.group(1)
|
||||
continue
|
||||
|
@ -29,24 +30,24 @@ class Haloani(BaseExtractor):
|
|||
decoded = str(soup)
|
||||
break
|
||||
else:
|
||||
url = url[:url[19:].find('/')+20] #gets the base url
|
||||
url = url[:url[19:].find('/') + 20] # gets the base url
|
||||
url = url + soup.select('iframe')[0].get('src')
|
||||
|
||||
if 'file' not in str(decoded) and 'src=' not in str(decoded):
|
||||
return {'stream_url': '',}
|
||||
return {'stream_url': '', }
|
||||
if decoded[:6] == b'<video':
|
||||
regex = r"src=\"([^\"]*)"
|
||||
link = re.search(regex,str(decoded)).group(1)
|
||||
link = re.search(regex, str(decoded)).group(1)
|
||||
|
||||
else:
|
||||
regex = r'\[{[^\]]*\]'
|
||||
links = re.search(regex,str(decoded)).group()
|
||||
links = re.search(regex, str(decoded)).group()
|
||||
regex = r"[{|,][\n]*?[ ]*?[\t]*?[A-z]*?[^\"]:"
|
||||
for a in re.findall(regex,links): #Because sometimes it's not valid json
|
||||
links = links.replace(a,f'{a[:1]}"{a[1:-1]}"{a[-1:]}') #replaces file: with "file":
|
||||
for a in re.findall(regex, links): # Because sometimes it's not valid json
|
||||
links = links.replace(a, f'{a[:1]}"{a[1:-1]}"{a[-1:]}') # replaces file: with "file":
|
||||
|
||||
links = json.loads(links)
|
||||
link = links[0]['file'].replace('\\','')
|
||||
link = links[0]['file'].replace('\\', '')
|
||||
return {
|
||||
'stream_url': link,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,19 +5,20 @@ import base64
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Hydrax(BaseExtractor):
|
||||
def _get_data(self):
|
||||
url = self.url
|
||||
end = url[url.find('=')+1:]
|
||||
end = url[url.find('=') + 1:]
|
||||
obfuscated_url = helpers.post('https://ping.idocdn.com/',
|
||||
data={'slug':end},
|
||||
referer=url,
|
||||
).json()['url']
|
||||
data={'slug': end},
|
||||
referer=url,
|
||||
).json()['url']
|
||||
|
||||
decoded_url = base64.b64decode(obfuscated_url[-1] + obfuscated_url[:-1]).decode('utf-8')
|
||||
|
||||
# HydraX uses www.url for high quality and url for low quality
|
||||
quality = '' if self.quality in ['360p','480p'] else 'www.'
|
||||
quality = '' if self.quality in ['360p', '480p'] else 'www.'
|
||||
|
||||
return {
|
||||
'stream_url': f'https://{quality}{decoded_url}',
|
||||
|
|
|
@ -9,6 +9,7 @@ from subprocess import CalledProcessError
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Kwik(BaseExtractor):
|
||||
'''Extracts video url from kwik pages, Kwik has some `security`
|
||||
which allows to access kwik pages when only refered by something
|
||||
|
@ -21,7 +22,7 @@ class Kwik(BaseExtractor):
|
|||
# from somewhere, I will just use the url itself. We then
|
||||
# have to rebuild the url. Hopefully kwik doesn't block this too
|
||||
|
||||
#Necessary
|
||||
# Necessary
|
||||
self.url = self.url.replace(".cx/e/", ".cx/f/")
|
||||
self.headers.update({"referer": self.url})
|
||||
|
||||
|
@ -30,7 +31,7 @@ class Kwik(BaseExtractor):
|
|||
if not cookies:
|
||||
resp = util.bypass_hcaptcha(self.url)
|
||||
else:
|
||||
resp = requests.get(self.url, cookies = cookies)
|
||||
resp = requests.get(self.url, cookies=cookies)
|
||||
|
||||
title_re = re.compile(r'title>(.*)<')
|
||||
|
||||
|
@ -47,18 +48,16 @@ class Kwik(BaseExtractor):
|
|||
kwik_text = resp.text
|
||||
|
||||
if type(e) == CalledProcessError:
|
||||
resp = requests.get(self.url, cookies = cookies)
|
||||
resp = requests.get(self.url, cookies=cookies)
|
||||
finally:
|
||||
cookies = resp.cookies
|
||||
title = title_re.search(kwik_text).group(1)
|
||||
loops += 1
|
||||
|
||||
|
||||
|
||||
post_url = deobfuscated.form["action"]
|
||||
token = deobfuscated.input["value"]
|
||||
|
||||
resp = helpers.post(post_url, headers = self.headers, params={"_token": token}, cookies = cookies, allow_redirects = False)
|
||||
resp = helpers.post(post_url, headers=self.headers, params={"_token": token}, cookies=cookies, allow_redirects=False)
|
||||
stream_url = resp.headers["Location"]
|
||||
|
||||
logger.debug('Stream URL: %s' % stream_url)
|
||||
|
|
|
@ -6,6 +6,7 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Mixdrop(BaseExtractor):
|
||||
def _get_data(self):
|
||||
eval_regex = r'eval\(.*\)'
|
||||
|
@ -15,7 +16,7 @@ class Mixdrop(BaseExtractor):
|
|||
redirect_regex = r"\s*window\.location\s*=\s*('|\")(.*?)('|\")"
|
||||
# allow_redirects=True doesn't seem to be working
|
||||
soup = helpers.get(self.url, allow_redirects=True).text
|
||||
redirect = re.search(redirect_regex,soup)
|
||||
redirect = re.search(redirect_regex, soup)
|
||||
|
||||
if redirect:
|
||||
url = 'https://mixdrop.to' + redirect.group(2)
|
||||
|
@ -24,9 +25,9 @@ class Mixdrop(BaseExtractor):
|
|||
if 'WE ARE SORRY' in soup:
|
||||
return ''
|
||||
|
||||
deobfuscated_js = util.deobfuscate_packed_js(re.search(eval_regex,soup).group())
|
||||
deobfuscated_js = util.deobfuscate_packed_js(re.search(eval_regex, soup).group())
|
||||
logger.debug('Deobfuscated JS: {}'.format(deobfuscated_js))
|
||||
url = re.search(wurl_regex,deobfuscated_js).group(1)
|
||||
url = re.search(wurl_regex, deobfuscated_js).group(1)
|
||||
logger.debug('Url: {}'.format(url))
|
||||
url = f'https:{url}' if url.startswith('//') else url
|
||||
return {'stream_url': url}
|
||||
|
|
|
@ -6,9 +6,10 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MP4Sh(BaseExtractor):
|
||||
def _get_data(self):
|
||||
referer = 'https://ww5.dubbedanime.net/'
|
||||
soup = helpers.get(self.url, referer=referer).text
|
||||
url = re.search(r'source src="[^"]*',soup).group().replace('source src="','')
|
||||
return {'stream_url': url}
|
||||
url = re.search(r'source src="[^"]*', soup).group().replace('source src="', '')
|
||||
return {'stream_url': url}
|
||||
|
|
|
@ -7,22 +7,23 @@ from anime_downloader import util
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MP4Upload(BaseExtractor):
|
||||
def _get_data(self):
|
||||
soup = str(helpers.get(self.url).text)
|
||||
if 'File was deleted' in soup:
|
||||
logger.warning('File not found (Most likely deleted)')
|
||||
return {'stream_url':''}
|
||||
return {'stream_url': ''}
|
||||
|
||||
regex = r">\s*(eval\(function[\W\w]*?)</script>"
|
||||
script = re.search(regex,soup).group(1)
|
||||
script = re.search(regex, soup).group(1)
|
||||
script = util.deobfuscate_packed_js(script)
|
||||
|
||||
url = ''
|
||||
if re.search(r'player\.src\("([^"]*)',script):
|
||||
url = re.search(r'player\.src\("([^"]*)',script).group(1)
|
||||
elif re.search(r'src:"([^"]*)',script):
|
||||
url = re.search(r'src:"([^"]*)',script).group(1)
|
||||
if re.search(r'player\.src\("([^"]*)', script):
|
||||
url = re.search(r'player\.src\("([^"]*)', script).group(1)
|
||||
elif re.search(r'src:"([^"]*)', script):
|
||||
url = re.search(r'src:"([^"]*)', script).group(1)
|
||||
return {
|
||||
'stream_url': url,
|
||||
'referer': self.url
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from anime_downloader.extractors.base_extractor import BaseExtractor
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
|
||||
class SendVid(BaseExtractor):
|
||||
def _get_data(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
|
|
|
@ -3,6 +3,7 @@ from anime_downloader.extractors.base_extractor import BaseExtractor
|
|||
from anime_downloader.sites import helpers
|
||||
import re
|
||||
|
||||
|
||||
class SibNet(BaseExtractor):
|
||||
def _get_data(self):
|
||||
resp = helpers.get(self.url).text
|
||||
|
|
|
@ -5,20 +5,21 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StreamX(BaseExtractor):
|
||||
def _get_data(self):
|
||||
url = self.url
|
||||
referer = 'https://kisscartoon.nz/'
|
||||
res = helpers.get(url, referer=referer).text
|
||||
file_regex = r'"file":"(http[^"]*?)"'
|
||||
file = re.search(file_regex,res)
|
||||
file = re.search(file_regex, res)
|
||||
if file:
|
||||
file = file.group(1).replace('\\','')
|
||||
file = file.group(1).replace('\\', '')
|
||||
else:
|
||||
logger.warning('File not found (Most likely deleted)')
|
||||
return {'stream_url': ''}
|
||||
|
||||
return {
|
||||
'stream_url': file,
|
||||
'referer': file
|
||||
'stream_url': file,
|
||||
'referer': file
|
||||
}
|
||||
|
|
|
@ -3,13 +3,13 @@ from anime_downloader.extractors.base_extractor import BaseExtractor
|
|||
from anime_downloader.sites import helpers
|
||||
import re
|
||||
|
||||
|
||||
class Uqload(BaseExtractor):
|
||||
def _get_data(self):
|
||||
resp = helpers.get(self.url).text
|
||||
link = re.search('sources:\s+?\["(.*?mp4)"\]', resp).group(1)
|
||||
|
||||
|
||||
return {
|
||||
'stream_url': link,
|
||||
'referer': self.url
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ import re
|
|||
from anime_downloader.extractors.base_extractor import BaseExtractor
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
|
||||
class Vudeo(BaseExtractor):
|
||||
def _get_data(self):
|
||||
soup = str(helpers.get(self.url).text)
|
||||
|
|
|
@ -3,12 +3,13 @@
|
|||
from anime_downloader.extractors.base_extractor import BaseExtractor
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
|
||||
class XStreamCDN(BaseExtractor):
|
||||
def _get_data(self):
|
||||
post_data = helpers.post("https://www.xstreamcdn.com/api/source/" + self.url.split("/")[-1]).json()
|
||||
data = post_data["data"]
|
||||
link = data[-1]["file"]
|
||||
|
||||
|
||||
return {
|
||||
'stream_url': link,
|
||||
'stream_url': link,
|
||||
}
|
||||
|
|
|
@ -5,15 +5,16 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Yify(BaseExtractor):
|
||||
def _get_data(self):
|
||||
api_id = re.search(r'id=([^&]*)',self.url).group(1)
|
||||
api_id = re.search(r'id=([^&]*)', self.url).group(1)
|
||||
api = f'https://api.streammp4.net/api/backup.php?id={api_id}'
|
||||
data = helpers.get(api).json()
|
||||
logger.debug('Data: {}'.format(data))
|
||||
|
||||
|
||||
for i in data:
|
||||
if self.quality in i.get('label',''):
|
||||
if self.quality in i.get('label', ''):
|
||||
return {'stream_url': i['file']}
|
||||
|
||||
return {'stream_url': ''}
|
||||
|
|
|
@ -6,11 +6,12 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Yourupload(BaseExtractor):
|
||||
def _get_data(self):
|
||||
regex = r"file: '([^']*)"
|
||||
file = re.search(regex,helpers.get(self.url).text).group(1)
|
||||
file = re.search(regex, helpers.get(self.url).text).group(1)
|
||||
return {
|
||||
'stream_url': file,
|
||||
'referer': self.url
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,13 +21,14 @@ class mpv(BasePlayer):
|
|||
def args(self):
|
||||
# Doesnt use the referer if it's none
|
||||
if self.episode.source().referer:
|
||||
return ['--input-conf='+get_mpv_configfile(),
|
||||
'--http-header-fields=referer: '+str(self.episode.source().referer),
|
||||
return ['--input-conf=' + get_mpv_configfile(),
|
||||
'--http-header-fields=referer: ' + str(self.episode.source().referer),
|
||||
self.episode.source().stream_url]
|
||||
else:
|
||||
return ['--input-conf='+get_mpv_configfile(),
|
||||
return ['--input-conf=' + get_mpv_configfile(),
|
||||
self.episode.source().stream_url]
|
||||
|
||||
|
||||
def get_mpv_home():
|
||||
if 'MPV_HOME' in os.environ:
|
||||
return os.environ.get('MPV_HOME')
|
||||
|
|
|
@ -16,12 +16,15 @@ cachefile = os.path.join(tempfile.gettempdir(), 'anime-cache')
|
|||
|
||||
_session = requests_cache.CachedSession(cachefile, backend='sqlite', expire_after=3600)
|
||||
|
||||
|
||||
def cacheinfo_hook(response, *args, **kwargs):
|
||||
if not getattr(response, 'from_cache', False):
|
||||
logger.debug('uncached request')
|
||||
else:
|
||||
logger.debug('cached request')
|
||||
return response
|
||||
|
||||
|
||||
_session.hooks = {'response': cacheinfo_hook}
|
||||
|
||||
# _session = requests.Session()
|
||||
|
|
|
@ -5,49 +5,48 @@ from anime_downloader.const import HEADERS
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class Anime4(Anime, sitename = '4anime'):
|
||||
|
||||
class Anime4(Anime, sitename='4anime'):
|
||||
sitename = '4anime'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
data = {
|
||||
"action": "ajaxsearchlite_search",
|
||||
"aslp": query,
|
||||
"asid": 1,
|
||||
"action": "ajaxsearchlite_search",
|
||||
"aslp": query,
|
||||
"asid": 1,
|
||||
"options": "qtranslate_lang=0&set_intitle=None&customset%5B%5D=anime"
|
||||
}
|
||||
}
|
||||
soup = helpers.soupify(helpers.post("https://4anime.to/wp-admin/admin-ajax.php", data=data)).select('div.info > a')
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = i.text,
|
||||
url = i['href']
|
||||
)
|
||||
title=i.text,
|
||||
url=i['href']
|
||||
)
|
||||
for i in soup
|
||||
]
|
||||
]
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url)).select('ul.episodes.range.active > li > a')
|
||||
return [x['href'] for x in soup]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url).text)
|
||||
self.title = soup.title.text
|
||||
for i in soup.select('.detail > a'):
|
||||
if 'year' in i.get('href',''):
|
||||
if 'year' in i.get('href', ''):
|
||||
self.meta['year'] = int(i.text) if i.text.isnumeric() else None
|
||||
|
||||
|
||||
class Anime4Episode(AnimeEpisode, sitename='4anime'):
|
||||
def _get_sources(self):
|
||||
self.headers = {'user-agent':HEADERS[self.hash_url(self.url, len(HEADERS))]}
|
||||
self.headers = {'user-agent': HEADERS[self.hash_url(self.url, len(HEADERS))]}
|
||||
resp = helpers.get(self.url, headers=self.headers)
|
||||
stream_url = helpers.soupify(resp).find('div', class_='videojs-desktop').find('source')['src']
|
||||
return [('no_extractor', stream_url)]
|
||||
|
||||
|
||||
"""
|
||||
Let's say the user generates link A with user agent X.
|
||||
Upon retry of command it'd normally use Link A (cached), but with user agent Y
|
||||
|
@ -55,8 +54,9 @@ class Anime4Episode(AnimeEpisode, sitename='4anime'):
|
|||
|
||||
This 'hashes' the url to generate a 'random' header which is consistent throughout multiple commands.
|
||||
"""
|
||||
|
||||
def hash_url(self, url, length):
|
||||
total = 0
|
||||
for i in url:
|
||||
total += ord(i)
|
||||
return total%length
|
||||
return total % length
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
class A2zanime(Anime, sitename='a2zanime'):
|
||||
sitename = 'a2zanime'
|
||||
url = f'https://{sitename}.com'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(f'{cls.url}/search?url=search&q={query}')).select('div.main-con > a')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=search_results[a].get('title'),
|
||||
url=cls.url + search_results[a].get('href'))
|
||||
for a in range(len(search_results))
|
||||
]
|
||||
return(search_results)
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
elements = soup.select('div.card-bodyu > a')
|
||||
return [('https://a2zanime.com' + a.get('href')) for a in elements[::-1]]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h1.title')[0].text
|
||||
|
||||
class A2zanimeEpisode(AnimeEpisode, sitename='a2zanime'):
|
||||
def _get_sources(self):
|
||||
#You can get multiple sources from this
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
regex = r"data-video-link=\"(//[^\"]*)"
|
||||
url = 'https:' + re.search(regex,str(soup)).group(1)
|
||||
|
||||
soup = helpers.soupify(helpers.get(url))
|
||||
url = (soup.select('div > iframe')[0].get('src'))
|
||||
|
||||
return [('vidstream', url ,)]
|
||||
|
|
@ -169,8 +169,8 @@ class Anime:
|
|||
self._len, self._episode_urls))
|
||||
|
||||
if not isinstance(self._episode_urls[0], tuple):
|
||||
self._episode_urls = [(no+1, id) for no, id in
|
||||
enumerate(self._episode_urls)]
|
||||
self._episode_urls = [(no + 1, id) for no, id in
|
||||
enumerate(self._episode_urls)]
|
||||
|
||||
return self._episode_urls
|
||||
|
||||
|
@ -354,11 +354,10 @@ class AnimeEpisode:
|
|||
def _get_sources(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def sort_sources(self, data):
|
||||
"""
|
||||
Formatted data should look something like this
|
||||
|
||||
|
||||
[
|
||||
{'extractor': 'mp4upload', 'url': 'https://twist.moe/mp4upload/...', 'server': 'mp4upload', 'version': 'subbed'},
|
||||
{'extractor': 'vidstream', 'url': 'https://twist.moe/vidstream/...', 'server': 'vidstream', 'version': 'dubbed'},
|
||||
|
@ -371,7 +370,7 @@ class AnimeEpisode:
|
|||
version = subbed/dubbed
|
||||
|
||||
The config should consist of a list with servers in preferred order and a preferred language, eg
|
||||
|
||||
|
||||
"servers":["vidstream","default","mp4upload"],
|
||||
"version":"subbed"
|
||||
|
||||
|
@ -379,22 +378,21 @@ class AnimeEpisode:
|
|||
as it prioritizes preferred language over preferred server
|
||||
"""
|
||||
|
||||
version = self.config.get('version','subbed') #TODO add a flag for this
|
||||
servers = self.config.get('servers',[''])
|
||||
version = self.config.get('version', 'subbed') # TODO add a flag for this
|
||||
servers = self.config.get('servers', [''])
|
||||
|
||||
logger.debug('Data : {}'.format(data))
|
||||
|
||||
#Sorts the dicts by preferred server in config
|
||||
# Sorts the dicts by preferred server in config
|
||||
sorted_by_server = sorted(data, key=lambda x: servers.index(x['server']) if x['server'] in servers else len(data))
|
||||
|
||||
#Sorts the above by preferred language
|
||||
#resulting in a list with the dicts sorted by language and server
|
||||
#with language being prioritized over server
|
||||
# Sorts the above by preferred language
|
||||
# resulting in a list with the dicts sorted by language and server
|
||||
# with language being prioritized over server
|
||||
sorted_by_lang = list(sorted(sorted_by_server, key=lambda x: x['version'] == version, reverse=True))
|
||||
logger.debug('Sorted sources : {}'.format(sorted_by_lang))
|
||||
|
||||
return '' if not sorted_by_lang else [(sorted_by_lang[0]['extractor'],sorted_by_lang[0]['url'])]
|
||||
|
||||
return '' if not sorted_by_lang else [(sorted_by_lang[0]['extractor'], sorted_by_lang[0]['url'])]
|
||||
|
||||
def download(self, force=False, path=None,
|
||||
format='{anime_title}_{ep_no}', range_size=None):
|
||||
|
@ -413,7 +411,7 @@ class AnimeEpisode:
|
|||
# TODO: Remove this shit
|
||||
logger.info('Downloading {}'.format(self.pretty_title))
|
||||
if format:
|
||||
file_name = util.format_filename(format, self)+'.mp4'
|
||||
file_name = util.format_filename(format, self) + '.mp4'
|
||||
|
||||
if path is None:
|
||||
path = './' + file_name
|
||||
|
|
|
@ -1,91 +1,91 @@
|
|||
import logging
|
||||
import re
|
||||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class Anime8(Anime, sitename = 'anime8'):
|
||||
sitename = 'anime8'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.get('https://anime8.ru/Search/', params={'s': query}).text)
|
||||
results = soup.select('div.ml-item > a')
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = i.find('h2').text,
|
||||
url = i['href'],
|
||||
meta_info = {
|
||||
'version_key_subbed':'(Sub)',
|
||||
'version_key_dubbed':'(Dub)'
|
||||
})
|
||||
for i in results
|
||||
]
|
||||
return search_results
|
||||
|
||||
|
||||
def _scrape_episodes(self):
|
||||
"""
|
||||
Because of how the website is built,
|
||||
the only way to access the episodes is by going to the last episode available
|
||||
thats why im making two requests here.
|
||||
"""
|
||||
link = helpers.soupify(helpers.get(self.url).text).select_one('div#mv-info > a')['href']
|
||||
soup = helpers.soupify(helpers.get(link).text)
|
||||
eps = soup.select('a[class*="btn-eps first-ep last-ep"]')
|
||||
eps = [x.get('href') for x in eps]
|
||||
|
||||
#Seperating normal episodes from the special episodes
|
||||
correct_eps = []
|
||||
special_eps = []
|
||||
special_seperator = ['-Preview', '-Special']
|
||||
|
||||
for episode in eps:
|
||||
ep_text = episode.split('/')[-1].split('?')[0] #Getting the episode type from the url
|
||||
|
||||
#Only "The God of High School" has a sneak peak episode and it is broken in the 1st 10 seconds
|
||||
if '-Sneak-Peak' in ep_text:
|
||||
continue
|
||||
|
||||
# Here i add the special episodes to a seperate list
|
||||
if ep_text in special_seperator:
|
||||
special_eps.append(episode)
|
||||
|
||||
# Here i add the normal episodes to the correct_eps list
|
||||
else:
|
||||
correct_eps.append(episode)
|
||||
|
||||
# If configured to do so it will add all the special eps to the end of the list
|
||||
if self.config['include_special_eps']:
|
||||
correct_eps.extend(special_eps)
|
||||
return correct_eps
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('div.thumb.mvic-thumb > img')[0]['alt']
|
||||
|
||||
|
||||
class Anime8Episode(AnimeEpisode, sitename='anime8'):
|
||||
def _get_sources(self):
|
||||
resp = helpers.get(self.url)
|
||||
# Gets the ctk and id from the page used for a post request.
|
||||
ctk = re.search(r"ctk\s+=\s+'(.*)?';", resp.text).group(1)
|
||||
_id = re.search(r"episode_id\s*=\s*([^;]*)", resp.text).group(1)
|
||||
|
||||
logger.info('ctk: {}'.format(ctk))
|
||||
logger.info('id: {}'.format(_id))
|
||||
|
||||
for server in self.config['servers']:
|
||||
# The post request returns an embed.
|
||||
logger.info('server: {}'.format(server))
|
||||
resp = helpers.post("https://anime8.ru/ajax/anime/load_episodes_v2?s={}".format(server), data = {"episode_id": _id, "ctk": ctk})
|
||||
# Gets the real embed url. Json could be used on the post request, but this is probably more reliable.
|
||||
# Skips if no episode found.
|
||||
if not resp.json().get('status'):
|
||||
continue
|
||||
embed = re.search(r"iframe\s*src.*?\"([^\"]*)", resp.text).group(1).replace('\\','')
|
||||
return [('streamx', embed)]
|
||||
return ''
|
||||
import logging
|
||||
import re
|
||||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Anime8(Anime, sitename='anime8'):
|
||||
sitename = 'anime8'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.get('https://anime8.ru/Search/', params={'s': query}).text)
|
||||
results = soup.select('div.ml-item > a')
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.find('h2').text,
|
||||
url=i['href'],
|
||||
meta_info={
|
||||
'version_key_subbed': '(Sub)',
|
||||
'version_key_dubbed': '(Dub)'
|
||||
})
|
||||
for i in results
|
||||
]
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
"""
|
||||
Because of how the website is built,
|
||||
the only way to access the episodes is by going to the last episode available
|
||||
thats why im making two requests here.
|
||||
"""
|
||||
link = helpers.soupify(helpers.get(self.url).text).select_one('div#mv-info > a')['href']
|
||||
soup = helpers.soupify(helpers.get(link).text)
|
||||
eps = soup.select('a[class*="btn-eps first-ep last-ep"]')
|
||||
eps = [x.get('href') for x in eps]
|
||||
|
||||
# Seperating normal episodes from the special episodes
|
||||
correct_eps = []
|
||||
special_eps = []
|
||||
special_seperator = ['-Preview', '-Special']
|
||||
|
||||
for episode in eps:
|
||||
ep_text = episode.split('/')[-1].split('?')[0] # Getting the episode type from the url
|
||||
|
||||
# Only "The God of High School" has a sneak peak episode and it is broken in the 1st 10 seconds
|
||||
if '-Sneak-Peak' in ep_text:
|
||||
continue
|
||||
|
||||
# Here i add the special episodes to a seperate list
|
||||
if ep_text in special_seperator:
|
||||
special_eps.append(episode)
|
||||
|
||||
# Here i add the normal episodes to the correct_eps list
|
||||
else:
|
||||
correct_eps.append(episode)
|
||||
|
||||
# If configured to do so it will add all the special eps to the end of the list
|
||||
if self.config['include_special_eps']:
|
||||
correct_eps.extend(special_eps)
|
||||
return correct_eps
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('div.thumb.mvic-thumb > img')[0]['alt']
|
||||
|
||||
|
||||
class Anime8Episode(AnimeEpisode, sitename='anime8'):
|
||||
def _get_sources(self):
|
||||
resp = helpers.get(self.url)
|
||||
# Gets the ctk and id from the page used for a post request.
|
||||
ctk = re.search(r"ctk\s+=\s+'(.*)?';", resp.text).group(1)
|
||||
_id = re.search(r"episode_id\s*=\s*([^;]*)", resp.text).group(1)
|
||||
|
||||
logger.info('ctk: {}'.format(ctk))
|
||||
logger.info('id: {}'.format(_id))
|
||||
|
||||
for server in self.config['servers']:
|
||||
# The post request returns an embed.
|
||||
logger.info('server: {}'.format(server))
|
||||
resp = helpers.post("https://anime8.ru/ajax/anime/load_episodes_v2?s={}".format(server),
|
||||
data={"episode_id": _id, "ctk": ctk})
|
||||
# Gets the real embed url. Json could be used on the post request, but this is probably more reliable.
|
||||
# Skips if no episode found.
|
||||
if not resp.json().get('status'):
|
||||
continue
|
||||
embed = re.search(r"iframe\s*src.*?\"([^\"]*)", resp.text).group(1).replace('\\', '')
|
||||
return [('streamx', embed)]
|
||||
return ''
|
||||
|
|
|
@ -2,20 +2,21 @@ from anime_downloader.sites import helpers
|
|||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
|
||||
|
||||
class AnimeChameleon(Anime, sitename = 'gurminder'):
|
||||
class AnimeChameleon(Anime, sitename='gurminder'):
|
||||
sitename = "gurminder"
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.get('http://anime.gurminderboparai.com/search/{}'.format(query)).text).find('div', class_='panel-body').find_all('a')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = x.text,
|
||||
url = x['href']
|
||||
)
|
||||
title=x.text,
|
||||
url=x['href']
|
||||
)
|
||||
for x in soup
|
||||
]
|
||||
]
|
||||
return search_results
|
||||
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url).text).find('ul', id='episodes-list').find_all('li')
|
||||
eps = [x.a['href'] for x in soup]
|
||||
|
|
|
@ -6,56 +6,59 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Animedaisuki(Anime, sitename='animedaisuki'):
|
||||
sitename = 'animedaisuki'
|
||||
url = f'https://{sitename}.moe/browse'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'q': query})).select('article > a')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=a.select('h3')[0].text,
|
||||
url='https://animedaisuki.moe' + a.get('href'))
|
||||
for a in search_results
|
||||
]
|
||||
return(search_results)
|
||||
sitename = 'animedaisuki'
|
||||
url = f'https://{sitename}.moe/browse'
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
elements = soup.select('li.fa-play-circle > a')[::-1]
|
||||
return ['https://animedaisuki.moe' + a.get('href') for a in elements if a.get('href').startswith('/watch/')]
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'q': query})).select('article > a')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=a.select('h3')[0].text,
|
||||
url='https://animedaisuki.moe' + a.get('href'))
|
||||
for a in search_results
|
||||
]
|
||||
return(search_results)
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
elements = soup.select('li.fa-play-circle > a')[::-1]
|
||||
return ['https://animedaisuki.moe' + a.get('href') for a in elements if a.get('href').startswith('/watch/')]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h2.Title')[0].text
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h2.Title')[0].text
|
||||
|
||||
class AnimedaisukiEpisode(AnimeEpisode, sitename='animedaisuki'):
|
||||
def _get_sources(self):
|
||||
servers = self.config.get('servers',['no_extractor'])
|
||||
def _get_sources(self):
|
||||
servers = self.config.get('servers', ['no_extractor'])
|
||||
|
||||
soup = helpers.soupify(helpers.get(self.url)).select('tbody a')
|
||||
captcha_regex = r'https://.*?s=(https.*)' #removes captcha redirect from link
|
||||
links = []
|
||||
soup = helpers.soupify(helpers.get(self.url)).select('tbody a')
|
||||
captcha_regex = r'https://.*?s=(https.*)' # removes captcha redirect from link
|
||||
links = []
|
||||
|
||||
website_extractors = [
|
||||
['official','https://animedaisuki.moe/','no_extractor'],
|
||||
#['streamango','https://streamango.com/','streamango'],
|
||||
#['openload','https://openload.co/','no_extractor'], both streamango and openload are dead
|
||||
]
|
||||
website_extractors = [
|
||||
['official', 'https://animedaisuki.moe/', 'no_extractor'],
|
||||
# ['streamango','https://streamango.com/','streamango'],
|
||||
# ['openload','https://openload.co/','no_extractor'], both streamango and openload are dead
|
||||
]
|
||||
|
||||
for a in soup: #removes capcha from link
|
||||
if re.search(captcha_regex,a.get('href')):
|
||||
links.append(re.search(captcha_regex,a.get('href')).group(1))
|
||||
else:
|
||||
links.append(a.get('href'))
|
||||
|
||||
logger.debug(links)
|
||||
for a in soup: # removes capcha from link
|
||||
if re.search(captcha_regex, a.get('href')):
|
||||
links.append(re.search(captcha_regex, a.get('href')).group(1))
|
||||
else:
|
||||
links.append(a.get('href'))
|
||||
|
||||
for a in servers:
|
||||
for b in links:
|
||||
for c in website_extractors:
|
||||
if b.startswith(c[1]) and a == c[0]:
|
||||
return [(c[2], b,)]
|
||||
|
||||
logger.debug('No supported servers found')
|
||||
return ''
|
||||
logger.debug(links)
|
||||
|
||||
for a in servers:
|
||||
for b in links:
|
||||
for c in website_extractors:
|
||||
if b.startswith(c[1]) and a == c[0]:
|
||||
return [(c[2], b,)]
|
||||
|
||||
logger.debug('No supported servers found')
|
||||
return ''
|
||||
|
|
|
@ -4,80 +4,81 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnimeFlix(Anime, sitename='animeflix'):
|
||||
"""
|
||||
Nice things
|
||||
Siteconfig
|
||||
----------
|
||||
server: Primary server to use (Default: AUEngine)
|
||||
fallback_servers: Recorded working servers which is used if the primary server cannot be found (FastStream works, but downloads m3u8 files)
|
||||
version: sub/dub, language
|
||||
"""
|
||||
sitename = 'animeflix'
|
||||
search_url = 'https://animeflix.io/api/search'
|
||||
anime_url = 'https://animeflix.io/shows'
|
||||
episodeList_url = 'https://animeflix.io/api/anime-schema'
|
||||
meta_url = 'https://animeflix.io/api/anime/detail'
|
||||
QUALITIES = ['360p', '480p', '720p', '1080p']
|
||||
"""
|
||||
Nice things
|
||||
Siteconfig
|
||||
----------
|
||||
server: Primary server to use (Default: AUEngine)
|
||||
fallback_servers: Recorded working servers which is used if the primary server cannot be found (FastStream works, but downloads m3u8 files)
|
||||
version: sub/dub, language
|
||||
"""
|
||||
sitename = 'animeflix'
|
||||
search_url = 'https://animeflix.io/api/search'
|
||||
anime_url = 'https://animeflix.io/shows'
|
||||
episodeList_url = 'https://animeflix.io/api/anime-schema'
|
||||
meta_url = 'https://animeflix.io/api/anime/detail'
|
||||
QUALITIES = ['360p', '480p', '720p', '1080p']
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.get(cls.search_url,
|
||||
params={'q' : query}).json()
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=result['title'],
|
||||
url=f'{cls.anime_url}/{result["slug"]}',
|
||||
)
|
||||
for result in search_results.get('data',[])
|
||||
]
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.get(cls.search_url,
|
||||
params={'q': query}).json()
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=result['title'],
|
||||
url=f'{cls.anime_url}/{result["slug"]}',
|
||||
)
|
||||
for result in search_results.get('data', [])
|
||||
]
|
||||
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
# TODO: find a better way to do splits
|
||||
# find a way to pass some values within the class
|
||||
episodes = helpers.get(self.episodeList_url,
|
||||
params={'slug': self.slug}).json()
|
||||
return search_results
|
||||
|
||||
if episodes.get('@type','') == 'Movie': #different response if movies
|
||||
return [episodes['potentialAction']['target']]
|
||||
return [ self.anime_url + episode['url'] for episode in episodes['episodes'] ]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.slug = self.url.strip('/').split('/')[-1]
|
||||
meta = helpers.get(self.meta_url,
|
||||
def _scrape_episodes(self):
|
||||
# TODO: find a better way to do splits
|
||||
# find a way to pass some values within the class
|
||||
episodes = helpers.get(self.episodeList_url,
|
||||
params={'slug': self.slug}).json()
|
||||
self.title = meta['data']['title']
|
||||
logger.debug(self.title)
|
||||
|
||||
if episodes.get('@type', '') == 'Movie': # different response if movies
|
||||
return [episodes['potentialAction']['target']]
|
||||
return [self.anime_url + episode['url'] for episode in episodes['episodes']]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.slug = self.url.strip('/').split('/')[-1]
|
||||
meta = helpers.get(self.meta_url,
|
||||
params={'slug': self.slug}).json()
|
||||
self.title = meta['data']['title']
|
||||
logger.debug(self.title)
|
||||
|
||||
|
||||
class AnimeFlixEpisode(AnimeEpisode, sitename='animeflix'):
|
||||
episodeId_url = 'https://animeflix.io/api/episode'
|
||||
stream_url = 'https://animeflix.io/api/videos?episode_id'
|
||||
anime_url = 'https://www.animeflix.io/shows'
|
||||
episodeId_url = 'https://animeflix.io/api/episode'
|
||||
stream_url = 'https://animeflix.io/api/videos?episode_id'
|
||||
anime_url = 'https://www.animeflix.io/shows'
|
||||
|
||||
def _get_sources(self):
|
||||
version = self.config['version']
|
||||
server = self.config['server']
|
||||
fallback = self.config['fallback_servers']
|
||||
def _get_sources(self):
|
||||
version = self.config['version']
|
||||
server = self.config['server']
|
||||
fallback = self.config['fallback_servers']
|
||||
|
||||
episode = helpers.get(self.episodeId_url,
|
||||
params={'episode_num': self.ep_no, 'slug': self.url.strip('/').split('/')[-2]}).json()
|
||||
_id = episode['data']['current']['id']
|
||||
download_link = helpers.get(
|
||||
f'{self.stream_url}={_id}').json()
|
||||
episode = helpers.get(self.episodeId_url,
|
||||
params={'episode_num': self.ep_no, 'slug': self.url.strip('/').split('/')[-2]}).json()
|
||||
_id = episode['data']['current']['id']
|
||||
download_link = helpers.get(
|
||||
f'{self.stream_url}={_id}').json()
|
||||
|
||||
for a in download_link: #Testing sources with selected language and provider
|
||||
if a['lang'] == self.config['version']:
|
||||
if a['provider'] == self.config['server']:
|
||||
return [('no_extractor', a['file'],)]
|
||||
|
||||
logger.debug('Preferred server %s not found. Trying all supported servers in selected language.',server)
|
||||
|
||||
for a in download_link: #Testing sources with selected language
|
||||
if a['lang'] == self.config['version']:
|
||||
for a in download_link: # Testing sources with selected language and provider
|
||||
if a['lang'] == self.config['version']:
|
||||
if a['provider'] == self.config['server']:
|
||||
return [('no_extractor', a['file'],)]
|
||||
|
||||
logger.debug('No %s servers found, trying all servers',self.config['version'])
|
||||
return[('no_extractor', download_link[0]['file'],)]
|
||||
logger.debug('Preferred server %s not found. Trying all supported servers in selected language.', server)
|
||||
|
||||
for a in download_link: # Testing sources with selected language
|
||||
if a['lang'] == self.config['version']:
|
||||
return [('no_extractor', a['file'],)]
|
||||
|
||||
logger.debug('No %s servers found, trying all servers', self.config['version'])
|
||||
return[('no_extractor', download_link[0]['file'],)]
|
||||
|
|
|
@ -6,42 +6,42 @@ from anime_downloader.sites import helpers
|
|||
|
||||
|
||||
class AnimeFreak(Anime, sitename='animefreak'):
|
||||
sitename = 'animefreak'
|
||||
search_url = f'https://www.{sitename}.tv/search/topSearch'
|
||||
anime_url = 'https://www.animefreak.tv/watch'
|
||||
QUALITIES = ['360p', '480p', '720p', '1080p']
|
||||
sitename = 'animefreak'
|
||||
search_url = f'https://www.{sitename}.tv/search/topSearch'
|
||||
anime_url = 'https://www.animefreak.tv/watch'
|
||||
QUALITIES = ['360p', '480p', '720p', '1080p']
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.get(cls.search_url,
|
||||
params={'q': query}).json()
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=result['name'],
|
||||
url=f'{cls.anime_url}/{result["seo_name"]}')
|
||||
for result in search_results.get('data', [])
|
||||
]
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.get(cls.search_url,
|
||||
params={'q': query}).json()
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=result['name'],
|
||||
url=f'{cls.anime_url}/{result["seo_name"]}')
|
||||
for result in search_results.get('data', [])
|
||||
]
|
||||
|
||||
return search_results
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
# Negative index for episode links in cases where full episode
|
||||
# list is available or if not default to usual episode list
|
||||
episode_links = soup.select('ul.check-list')[-1].select('li a')
|
||||
return [a.get('href') for a in episode_links][::-1]
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
# Negative index for episode links in cases where full episode
|
||||
# list is available or if not default to usual episode list
|
||||
episode_links = soup.select('ul.check-list')[-1].select('li a')
|
||||
return [a.get('href') for a in episode_links][::-1]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select_one('.anime-title').text
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select_one('.anime-title').text
|
||||
|
||||
|
||||
class AnimeFreakEpisode(AnimeEpisode, sitename='animefreak'):
|
||||
def _get_sources(self):
|
||||
page = helpers.get(self.url).text
|
||||
source_re = re.compile(r'loadVideo.+file: "([^"]+)', re.DOTALL)
|
||||
match = source_re.findall(page)
|
||||
def _get_sources(self):
|
||||
page = helpers.get(self.url).text
|
||||
source_re = re.compile(r'loadVideo.+file: "([^"]+)', re.DOTALL)
|
||||
match = source_re.findall(page)
|
||||
|
||||
if not match:
|
||||
raise NotFoundError(f'Failed to find video url for {self.url}')
|
||||
return [('no_extractor', match[0],)]
|
||||
if not match:
|
||||
raise NotFoundError(f'Failed to find video url for {self.url}')
|
||||
return [('no_extractor', match[0],)]
|
||||
|
|
|
@ -3,75 +3,75 @@ import re
|
|||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
class AnimeFree(Anime, sitename='animefree'):
|
||||
sitename = 'kissanimefree'
|
||||
url = f'https://{sitename}.xyz/'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'s': query})).select('div.movie-poster')
|
||||
|
||||
# i.select("a")[0].get('href').replace("kissanime","_anime") +"," + i.select("span")[0].get('data-id') )
|
||||
# ^ this will pass all data required directly to _scrape_episodes if you want to get rid of one or two get requests
|
||||
class AnimeFree(Anime, sitename='animefree'):
|
||||
sitename = 'kissanimefree'
|
||||
url = f'https://{sitename}.xyz/'
|
||||
|
||||
# The feature of passing links directly to anime-downloader makes passing data between functions convoluted
|
||||
# and creates a bunch un unnecessary get requests, because of this you gotta do at least 2 extra get requests
|
||||
# this also creates bugs when sitenames are similar
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.select("a > img")[0].get("alt"),
|
||||
url= i.select("a")[0].get('href').replace("kissanime","_anime"))
|
||||
for i in search_results
|
||||
]
|
||||
return search_results
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'s': query})).select('div.movie-poster')
|
||||
|
||||
# i.select("a")[0].get('href').replace("kissanime","_anime") +"," + i.select("span")[0].get('data-id') )
|
||||
# ^ this will pass all data required directly to _scrape_episodes if you want to get rid of one or two get requests
|
||||
|
||||
def _scrape_episodes(self):
|
||||
#This is retarded, you need to replace the url, otherwise it will go to the kissanime site because the links are similar
|
||||
_referer = self.url.replace("_anime", "kissanime")
|
||||
_id = helpers.soupify( helpers.get(_referer)).select("li.addto-later")[0].get("data-id")
|
||||
# The feature of passing links directly to anime-downloader makes passing data between functions convoluted
|
||||
# and creates a bunch un unnecessary get requests, because of this you gotta do at least 2 extra get requests
|
||||
# this also creates bugs when sitenames are similar
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.select("a > img")[0].get("alt"),
|
||||
url=i.select("a")[0].get('href').replace("kissanime", "_anime"))
|
||||
for i in search_results
|
||||
]
|
||||
return search_results
|
||||
|
||||
#data = self.url.split(",")
|
||||
#_id = data[1]
|
||||
#_referer = data[0].replace("_anime", "kissanime")
|
||||
for i in range(1,100):
|
||||
d = helpers.get("https://kissanimefree.xyz/load-list-episode/", params = {"pstart":i, "id":_id, "ide":""})
|
||||
if not d.text: # MOVIES
|
||||
maxEp = 1
|
||||
break
|
||||
maxEp = int(helpers.soupify(d).select("li")[0].text)
|
||||
if not maxEp == i*100:
|
||||
break
|
||||
return [f"{i},{_id},{_referer}" for i in range(1,maxEp+1)]
|
||||
# you gotta know all three, the id of the episode, the id of the movie, and the referer
|
||||
def _scrape_episodes(self):
|
||||
# This is retarded, you need to replace the url, otherwise it will go to the kissanime site because the links are similar
|
||||
_referer = self.url.replace("_anime", "kissanime")
|
||||
_id = helpers.soupify(helpers.get(_referer)).select("li.addto-later")[0].get("data-id")
|
||||
|
||||
#data = self.url.split(",")
|
||||
#_id = data[1]
|
||||
#_referer = data[0].replace("_anime", "kissanime")
|
||||
for i in range(1, 100):
|
||||
d = helpers.get("https://kissanimefree.xyz/load-list-episode/", params={"pstart": i, "id": _id, "ide": ""})
|
||||
if not d.text: # MOVIES
|
||||
maxEp = 1
|
||||
break
|
||||
maxEp = int(helpers.soupify(d).select("li")[0].text)
|
||||
if not maxEp == i * 100:
|
||||
break
|
||||
return [f"{i},{_id},{_referer}" for i in range(1, maxEp + 1)]
|
||||
# you gotta know all three, the id of the episode, the id of the movie, and the referer
|
||||
|
||||
def _scrape_metadata(self):
|
||||
realUrl = self.url.replace("_anime", "kissanime")
|
||||
soup = helpers.soupify(helpers.get(realUrl)).select('div.film > h1')
|
||||
self.title = soup[0].text
|
||||
def _scrape_metadata(self):
|
||||
realUrl = self.url.replace("_anime", "kissanime")
|
||||
soup = helpers.soupify(helpers.get(realUrl)).select('div.film > h1')
|
||||
self.title = soup[0].text
|
||||
|
||||
|
||||
class AnimeFreeEpisode(AnimeEpisode, sitename='kissanimefree'):
|
||||
def _get_sources(self):
|
||||
ids = self.url.split(",")
|
||||
ep = ids[0]
|
||||
realId = int(ids[0]) + int(ids[1]) + 2
|
||||
_referer = ids[2]
|
||||
def _get_sources(self):
|
||||
ids = self.url.split(",")
|
||||
ep = ids[0]
|
||||
realId = int(ids[0]) + int(ids[1]) + 2
|
||||
_referer = ids[2]
|
||||
|
||||
realUrl = helpers.post("https://kissanimefree.xyz/wp-admin/admin-ajax.php",
|
||||
referer=f"https://kissanimefree.xyz/episode/{_referer}-episode-{realId}/",
|
||||
data={"action":"kiss_player_ajax","server":"vidcdn","filmId":realId}).text
|
||||
realUrl = helpers.post("https://kissanimefree.xyz/wp-admin/admin-ajax.php",
|
||||
referer=f"https://kissanimefree.xyz/episode/{_referer}-episode-{realId}/",
|
||||
data={"action": "kiss_player_ajax", "server": "vidcdn", "filmId": realId}).text
|
||||
|
||||
realUrl = realUrl if realUrl.startswith('http') else "https:" + realUrl
|
||||
realUrl = realUrl if realUrl.startswith('http') else "https:" + realUrl
|
||||
|
||||
txt = helpers.get(realUrl).text
|
||||
# Group 2 and/or 3 is the vidstreaming links without https://
|
||||
# Not used because I've yet to test if goto always leads to mp4
|
||||
# vidstream_regex = r"window\.location\s=\s(\"|').*?(vidstreaming\.io/[^(\"|')]*?)\"|(vidstreaming\.io/goto\.php[^(\"|')]*?)(\"|')"
|
||||
txt = helpers.get(realUrl).text
|
||||
# Group 2 and/or 3 is the vidstreaming links without https://
|
||||
# Not used because I've yet to test if goto always leads to mp4
|
||||
# vidstream_regex = r"window\.location\s=\s(\"|').*?(vidstreaming\.io/[^(\"|')]*?)\"|(vidstreaming\.io/goto\.php[^(\"|')]*?)(\"|')"
|
||||
|
||||
vidstream_regex = r"window\.location\s=\s(\"|').*?(vidstreaming\.io/[^(\"|')]*?)\""
|
||||
surl = re.search(vidstream_regex,txt)
|
||||
if surl:
|
||||
if surl.group(2):
|
||||
return [('vidstreaming', surl.group(2),)]
|
||||
return ''
|
||||
vidstream_regex = r"window\.location\s=\s(\"|').*?(vidstreaming\.io/[^(\"|')]*?)\""
|
||||
surl = re.search(vidstream_regex, txt)
|
||||
if surl:
|
||||
if surl.group(2):
|
||||
return [('vidstreaming', surl.group(2),)]
|
||||
return ''
|
||||
|
|
|
@ -6,17 +6,19 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnimeFrenzy(Anime, sitename='animefrenzy'):
|
||||
sitename='animefrenzy'
|
||||
sitename = 'animefrenzy'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
r = helpers.get("https://animefrenzy.net/search", params = {"q": query})
|
||||
r = helpers.get("https://animefrenzy.net/search", params={"q": query})
|
||||
soup = helpers.soupify(r)
|
||||
titleName = soup.select("div.conm > a.cona")
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = a.text,
|
||||
url = 'https://animefrenzy.net/' + a.get('href')
|
||||
title=a.text,
|
||||
url='https://animefrenzy.net/' + a.get('href')
|
||||
)
|
||||
for a in titleName
|
||||
]
|
||||
|
@ -43,7 +45,8 @@ class AnimeFrenzy(Anime, sitename='animefrenzy'):
|
|||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select_one("div.infodes > h1").text
|
||||
self.title = soup.select_one("div.infodes > h1").text
|
||||
|
||||
|
||||
class AnimeFrenzyEpisode(AnimeEpisode, sitename='animefrenzy'):
|
||||
def _get_sources(self):
|
||||
|
|
|
@ -7,53 +7,56 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AnimeKisa(Anime,sitename='animekisa'):
|
||||
|
||||
class AnimeKisa(Anime, sitename='animekisa'):
|
||||
sitename = 'animekisa'
|
||||
url = f'https://animekisa.tv/'
|
||||
|
||||
@classmethod
|
||||
def search(cls,query):
|
||||
search_results = helpers.soupify(helpers.get("https://animekisa.tv/search", params = {"q": query}))
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get("https://animekisa.tv/search", params={"q": query}))
|
||||
search_results = search_results.select('div.similarbox > a.an')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = i.select('div > div > div > div > div.similardd')[0].text,
|
||||
url = 'https://www.animekisa.tv' + i.get('href'))
|
||||
title=i.select('div > div > div > div > div.similardd')[0].text,
|
||||
url='https://www.animekisa.tv' + i.get('href'))
|
||||
for i in search_results if i.get('href') != '/'
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
return search_results
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h1.infodes')[0].text
|
||||
|
||||
self.title = soup.select('h1.infodes')[0].text
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
episode_links = soup.select('a.infovan')
|
||||
episodes = [
|
||||
'https://animekisa.tv'+'/'+i.get('href')
|
||||
for i in episode_links[::-1]]
|
||||
'https://animekisa.tv' + '/' + i.get('href')
|
||||
for i in episode_links[::-1]]
|
||||
return episodes
|
||||
|
||||
class AnimeKisaEpisode(AnimeEpisode,sitename = 'animekisa'):
|
||||
|
||||
class AnimeKisaEpisode(AnimeEpisode, sitename='animekisa'):
|
||||
def _get_sources(self):
|
||||
soup = helpers.get(self.url).text
|
||||
server = self.config['server']
|
||||
fallback = self.config['fallback_servers']
|
||||
regex = {
|
||||
'mp4upload': r'(https://www.mp4upload.com/)+[^"]*',
|
||||
'vidstream': r'(https://vidstreaming.io/)+[^"]*',
|
||||
'gcloud': r'(https://gcloud.live/)+[^"]*',
|
||||
'mp4upload': r'(https://www.mp4upload.com/)+[^"]*',
|
||||
'vidstream': r'(https://vidstreaming.io/)+[^"]*',
|
||||
'gcloud': r'(https://gcloud.live/)+[^"]*',
|
||||
}
|
||||
if re.search(regex[server],soup): #Testing sources with selected provider
|
||||
link = re.search(regex[server],soup).group()
|
||||
if re.search(regex[server], soup): # Testing sources with selected provider
|
||||
link = re.search(regex[server], soup).group()
|
||||
return [(server, link,)]
|
||||
|
||||
logger.debug('Preferred server %s not found. Trying all supported servers',server)
|
||||
logger.debug('Preferred server %s not found. Trying all supported servers', server)
|
||||
|
||||
for a in fallback: #Testing fallback providers
|
||||
if re.search(regex[a],soup):
|
||||
link = re.search(regex[a],soup).group()
|
||||
for a in fallback: # Testing fallback providers
|
||||
if re.search(regex[a], soup):
|
||||
link = re.search(regex[a], soup).group()
|
||||
return [(a, link,)]
|
||||
|
||||
logger.debug('No supported servers found')
|
||||
|
|
|
@ -1,28 +1,30 @@
|
|||
import json, requests
|
||||
import json
|
||||
import requests
|
||||
|
||||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
class AnimeOnline(Anime, sitename = 'animeonline360'):
|
||||
|
||||
class AnimeOnline(Anime, sitename='animeonline360'):
|
||||
|
||||
sitename = 'animeonline360'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
try:
|
||||
r = helpers.soupify(helpers.get('https://animeonline360.me/', params = {'s': query})).select('div.title')
|
||||
r = helpers.soupify(helpers.get('https://animeonline360.me/', params={'s': query})).select('div.title')
|
||||
results = [{"title": x.text, "url": x.a['href']} for x in r]
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = i['title'],
|
||||
url = i['url'],
|
||||
meta_info = {
|
||||
'version_key_dubbed':'Dubbed',
|
||||
'version_key_subbed':'Subbed',
|
||||
}
|
||||
)
|
||||
title=i['title'],
|
||||
url=i['url'],
|
||||
meta_info={
|
||||
'version_key_dubbed': 'Dubbed',
|
||||
'version_key_subbed': 'Subbed',
|
||||
}
|
||||
)
|
||||
for i in results
|
||||
]
|
||||
]
|
||||
|
||||
return search_results
|
||||
except:
|
||||
|
@ -33,8 +35,9 @@ class AnimeOnline(Anime, sitename = 'animeonline360'):
|
|||
return [i.get('href') for i in data[::-1]]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url)).title.text.split('|')[0].strip().title()
|
||||
|
||||
self.title = helpers.soupify(helpers.get(self.url)).title.text.split('|')[0].strip().title()
|
||||
|
||||
|
||||
class AnimeOnlineEpisode(AnimeEpisode, sitename='animeonline360'):
|
||||
def _get_sources(self):
|
||||
return [('animeonline360', self.url)]
|
||||
|
|
|
@ -21,7 +21,7 @@ class AnimeOut(Anime, sitename='animeout'):
|
|||
SearchResult(
|
||||
title=i.text,
|
||||
url=i.get('href'),
|
||||
meta_info = {
|
||||
meta_info={
|
||||
'title_cleaned': re.sub(clean_title_regex, "", i.text).strip()
|
||||
})
|
||||
for i in search_results
|
||||
|
|
|
@ -5,27 +5,26 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnimeRush(Anime, sitename='animerush'):
|
||||
sitename = 'animerush'
|
||||
url = f'https://www.{sitename}.tv/search.php'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params = {'searchquery':query}))
|
||||
title = search_results.select('h3') # Stored in another place
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'searchquery': query}))
|
||||
title = search_results.select('h3') # Stored in another place
|
||||
results = search_results.select('a.highlightit')
|
||||
return [
|
||||
SearchResult(
|
||||
title=title[i].text,
|
||||
url='https:'+ results[i].get('href'))
|
||||
for i in range(1,len(results))]
|
||||
|
||||
url='https:' + results[i].get('href'))
|
||||
for i in range(1, len(results))]
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url)).select('div.episode_list > a')
|
||||
return ['https:' + i.get('href') for i in soup[::-1]]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('div.amin_week_box_up1 > h1')[0].text
|
||||
|
@ -35,7 +34,7 @@ class AnimeRushEpisode(AnimeEpisode, sitename='animerush'):
|
|||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
sources = ([[self._get_url('https:' + i.get('href')), i.text] for i in soup.select('div.episode_mirrors > div > h3 > a')])
|
||||
sources.append([self._get_url(self.url),soup.select('iframe')[-1].get('title')])
|
||||
sources.append([self._get_url(self.url), soup.select('iframe')[-1].get('title')])
|
||||
|
||||
logger.debug('Sources: {}'.format(sources))
|
||||
|
||||
|
@ -47,15 +46,14 @@ class AnimeRushEpisode(AnimeEpisode, sitename='animerush'):
|
|||
# If more advanced sources needs to get added look at watchmovie or darkanime
|
||||
server = 'yourupload' if 'yourupload' in i[0] else 'mp4upload'
|
||||
sources_list.append({
|
||||
'extractor':server,
|
||||
'url':i[0],
|
||||
'server':i[1],
|
||||
'version':'subbed'
|
||||
})
|
||||
'extractor': server,
|
||||
'url': i[0],
|
||||
'server': i[1],
|
||||
'version': 'subbed'
|
||||
})
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
|
||||
|
||||
def _get_url(self,url): #The links are hidden on other pages
|
||||
def _get_url(self, url): # The links are hidden on other pages
|
||||
soup = helpers.soupify(helpers.get(url))
|
||||
return (soup.select('iframe')[-1].get('src'))
|
||||
|
|
|
@ -8,9 +8,11 @@ from anime_downloader.extractors import get_extractor
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnimeSimple(Anime, sitename='animesimple'):
|
||||
sitename = 'animesimple'
|
||||
url = f'https://{sitename}.com/search'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
# Be aware of CSS selectors changing.
|
||||
|
@ -22,20 +24,18 @@ class AnimeSimple(Anime, sitename='animesimple'):
|
|||
for i in search_results
|
||||
]
|
||||
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
anime_id = soup.find(id = 'animeid').get('value')
|
||||
anime_id = soup.find(id='animeid').get('value')
|
||||
elements = helpers.soupify(helpers.get('https://animesimple.com/request',
|
||||
params={
|
||||
'anime-id': anime_id,
|
||||
'epi-page': '1',
|
||||
'top': 10000, #max 10 000 episodes
|
||||
'bottom': 0,
|
||||
}))
|
||||
params={
|
||||
'anime-id': anime_id,
|
||||
'epi-page': '1',
|
||||
'top': 10000, # max 10 000 episodes
|
||||
'bottom': 0,
|
||||
}))
|
||||
return [i.get('href') for i in elements]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url)).select('li.breadcrumb-item.active')[0].text
|
||||
|
||||
|
@ -44,19 +44,19 @@ class AnimeSimpleEpisode(AnimeEpisode, sitename='animesimple'):
|
|||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
regex = r'var json = ([^;]*)'
|
||||
sources = json.loads(re.search(regex,str(soup)).group(1)) #Lots of sources can be found here
|
||||
sources = json.loads(re.search(regex, str(soup)).group(1)) # Lots of sources can be found here
|
||||
|
||||
logger.debug('Sources: {}'.format(sources))
|
||||
|
||||
sources_list = []
|
||||
for i in sources:
|
||||
for i in sources:
|
||||
extractor = 'no_extractor' if not get_extractor(i['host']) else i['host']
|
||||
embed = re.search(r"src=['|\"]([^\'|^\"]*)",str(i['player']), re.IGNORECASE).group(1)
|
||||
embed = re.search(r"src=['|\"]([^\'|^\"]*)", str(i['player']), re.IGNORECASE).group(1)
|
||||
sources_list.append({
|
||||
'extractor':extractor,
|
||||
'url':embed,
|
||||
'server':i['host'],
|
||||
'version':i.get('type','subbed')
|
||||
'extractor': extractor,
|
||||
'url': embed,
|
||||
'server': i['host'],
|
||||
'version': i.get('type', 'subbed')
|
||||
})
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
|
|
|
@ -6,70 +6,70 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnimeVibe(Anime, sitename='animevibe'):
|
||||
sitename = 'animevibe'
|
||||
url = f'https://{sitename}.tv'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url,params={'s':query})).select('h5.title-av-search-res > a')
|
||||
return [
|
||||
SearchResult(
|
||||
title = a.text,
|
||||
url = a.get('href'))
|
||||
for a in search_results
|
||||
]
|
||||
sitename = 'animevibe'
|
||||
url = f'https://{sitename}.tv'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'s': query})).select('h5.title-av-search-res > a')
|
||||
return [
|
||||
SearchResult(
|
||||
title=a.text,
|
||||
url=a.get('href'))
|
||||
for a in search_results
|
||||
]
|
||||
|
||||
def _scrape_episodes(self):
|
||||
#First episode
|
||||
episodes = [self.url]
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
episodes.extend([x.get('href') for x in soup.select('div.wrap-episode-list > a')])
|
||||
return episodes
|
||||
def _scrape_episodes(self):
|
||||
# First episode
|
||||
episodes = [self.url]
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
episodes.extend([x.get('href') for x in soup.select('div.wrap-episode-list > a')])
|
||||
return episodes
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h3.av-episode-title')[0].text
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h3.av-episode-title')[0].text
|
||||
|
||||
|
||||
class AnimeVibeEpisode(AnimeEpisode, sitename='animevibe'):
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
iframe = soup.select('iframe')[0]
|
||||
logger.debug('iframe: {}'.format('iframe'))
|
||||
embed = 'https://animevibe.tv' + str(iframe.get('src'))
|
||||
sources = helpers.soupify(helpers.get(embed)).select('option')
|
||||
logger.debug('Sources: {}'.format(sources))
|
||||
sources_list = []
|
||||
extractors = [
|
||||
'3rdparty',
|
||||
'mp4upload',
|
||||
'fembed',
|
||||
'gcloud',
|
||||
'vidstream',
|
||||
'hydrax'
|
||||
]
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
iframe = soup.select('iframe')[0]
|
||||
logger.debug('iframe: {}'.format('iframe'))
|
||||
embed = 'https://animevibe.tv' + str(iframe.get('src'))
|
||||
sources = helpers.soupify(helpers.get(embed)).select('option')
|
||||
logger.debug('Sources: {}'.format(sources))
|
||||
sources_list = []
|
||||
extractors = [
|
||||
'3rdparty',
|
||||
'mp4upload',
|
||||
'fembed',
|
||||
'gcloud',
|
||||
'vidstream',
|
||||
'hydrax'
|
||||
]
|
||||
|
||||
prefix = 'https://animevibe.tv/players/'
|
||||
for i in sources:
|
||||
source = None
|
||||
url = i.get('value').replace('iframe.php?vid=','')
|
||||
url = prefix + url if url.startswith('3rdparty') else url
|
||||
#Choosing 3rd-party link is not implemented yet
|
||||
for j in extractors:
|
||||
#the 3rd-party url can contain other extractors
|
||||
if j in url and not ('3rdparty' in url and j != '3rdparty'):
|
||||
extractor = 'gcloud' if j == 'fembed' else j #fembed gets passed to gcloud too
|
||||
source = {
|
||||
'extractor':extractor,
|
||||
'server':j,
|
||||
'url':url,
|
||||
'version':'subbed'
|
||||
}
|
||||
prefix = 'https://animevibe.tv/players/'
|
||||
for i in sources:
|
||||
source = None
|
||||
url = i.get('value').replace('iframe.php?vid=', '')
|
||||
url = prefix + url if url.startswith('3rdparty') else url
|
||||
# Choosing 3rd-party link is not implemented yet
|
||||
for j in extractors:
|
||||
# the 3rd-party url can contain other extractors
|
||||
if j in url and not ('3rdparty' in url and j != '3rdparty'):
|
||||
extractor = 'gcloud' if j == 'fembed' else j # fembed gets passed to gcloud too
|
||||
source = {
|
||||
'extractor': extractor,
|
||||
'server': j,
|
||||
'url': url,
|
||||
'version': 'subbed'
|
||||
}
|
||||
|
||||
if source:
|
||||
sources_list.append(source)
|
||||
if source:
|
||||
sources_list.append(source)
|
||||
|
||||
logger.debug('sources_list: {}'.format(sources_list))
|
||||
return self.sort_sources(sources_list)
|
||||
logger.debug('sources_list: {}'.format(sources_list))
|
||||
return self.sort_sources(sources_list)
|
||||
|
|
|
@ -15,9 +15,9 @@ class AniMixPlay(Anime, sitename='animixplay'):
|
|||
def search(cls, query):
|
||||
# V3 not supported
|
||||
v1 = helpers.soupify(helpers.post("https://animixplay.com/api/search/v1",
|
||||
data={"q2": query}, verify=False).json()['result']).select('p.name > a')
|
||||
data={"q2": query}, verify=False).json()['result']).select('p.name > a')
|
||||
v2 = helpers.soupify(helpers.post("https://animixplay.com/api/search/",
|
||||
data={"qfast2": query}, verify=False).json()['result']).select('p.name > a')
|
||||
data={"qfast2": query}, verify=False).json()['result']).select('p.name > a')
|
||||
# v3 = helpers.soupify(helpers.post("https://animixplay.com/api/search/v3",
|
||||
# data = {"q3": query}, verify = False).json()['result'])
|
||||
|
||||
|
@ -25,7 +25,7 @@ class AniMixPlay(Anime, sitename='animixplay'):
|
|||
# HTTPError doesn't seem to play along helpers hence why it's not expected.
|
||||
try:
|
||||
v4 = helpers.soupify(helpers.post("https://animixplay.com/api/search/v4",
|
||||
data={"q": query}, verify=False).json()['result']).select('p.name > a')
|
||||
data={"q": query}, verify=False).json()['result']).select('p.name > a')
|
||||
except:
|
||||
v4 = []
|
||||
|
||||
|
@ -62,15 +62,15 @@ class AniMixPlay(Anime, sitename='animixplay'):
|
|||
# In extremely rare cases the anime isn't loaded and must be generated by the server first
|
||||
try:
|
||||
data = (helpers.post('https://animixplay.com/raw/2ENCwGVubdvzrQ2eu4hBH',
|
||||
data={data_id: post_id}).json())
|
||||
data={data_id: post_id}).json())
|
||||
# 400 HTTPError here
|
||||
except:
|
||||
if '/v4/' in self.url:
|
||||
data = (helpers.post('https://animixplay.com/e4/5SkyXQULLrn9OhR',
|
||||
data={'id': url.split('/')[-1]}).json())['epstream']
|
||||
data={'id': url.split('/')[-1]}).json())['epstream']
|
||||
if '/v2' in self.url:
|
||||
data = (helpers.post('https://animixplay.com/e2/T23nBBj3NfRzTQx',
|
||||
data={'id': url.split('/')[-1]}).json())['epstream']
|
||||
data={'id': url.split('/')[-1]}).json())['epstream']
|
||||
|
||||
logger.debug(data)
|
||||
if '/v4/' in self.url:
|
||||
|
@ -100,7 +100,7 @@ class AniMixPlay(Anime, sitename='animixplay'):
|
|||
except json.decoder.JSONDecodeError:
|
||||
# Link generation
|
||||
data = (helpers.post('https://animixplay.com/e1/9DYiGVLD7ASqZ5p',
|
||||
data={'id': url.split('/')[-1]}).json())['epstream']
|
||||
data={'id': url.split('/')[-1]}).json())['epstream']
|
||||
logger.debug('Data: {}'.format(data))
|
||||
return [data[i] for i in data if i != 'eptotal']
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ class Anistream(Anime, sitename='anistream.xyz'):
|
|||
version = self.config.get('version', 'subbed')
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
versions = soup.select_one('.card-body').select('ul')
|
||||
|
||||
def get_links(version):
|
||||
links = [v.attrs['href'] for v in version.select('a')][::-1]
|
||||
return links
|
||||
|
|
|
@ -7,7 +7,8 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DarkAnime(Anime, sitename = 'darkanime'):
|
||||
|
||||
class DarkAnime(Anime, sitename='darkanime'):
|
||||
sitename = 'darkanime'
|
||||
|
||||
@classmethod
|
||||
|
@ -16,12 +17,11 @@ class DarkAnime(Anime, sitename = 'darkanime'):
|
|||
soup = soup.find_all('a', href=True)
|
||||
return [
|
||||
SearchResult(
|
||||
title = x.find('h3').text.strip(),
|
||||
url = 'https://app.darkanime.stream' + x['href'],
|
||||
)
|
||||
title=x.find('h3').text.strip(),
|
||||
url='https://app.darkanime.stream' + x['href'],
|
||||
)
|
||||
for x in soup
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
def _scrape_episodes(self):
|
||||
html = helpers.soupify(helpers.get(self.url).text)
|
||||
|
@ -30,7 +30,6 @@ class DarkAnime(Anime, sitename = 'darkanime'):
|
|||
eps.reverse()
|
||||
return eps
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url).text).find_all('h2')[0].text.strip()
|
||||
|
||||
|
@ -39,11 +38,11 @@ class DarkAnimeEpisode(AnimeEpisode, sitename='darkanime'):
|
|||
def _get_sources(self):
|
||||
|
||||
server_links = {
|
||||
'mp4upload':'https://www.mp4upload.com/embed-{}.html',
|
||||
'mp4upload': 'https://www.mp4upload.com/embed-{}.html',
|
||||
'trollvid': 'https://trollvid.net/embed/{}',
|
||||
}
|
||||
|
||||
resp = helpers.soupify(helpers.get(self.url).text).find_all('script')#[-3].string
|
||||
resp = helpers.soupify(helpers.get(self.url).text).find_all('script') # [-3].string
|
||||
for i in resp:
|
||||
if i.string:
|
||||
if 'sources' in i.string:
|
||||
|
@ -57,10 +56,10 @@ class DarkAnimeEpisode(AnimeEpisode, sitename='darkanime'):
|
|||
for j in server_links:
|
||||
if i.get('host') in j and i.get('source'):
|
||||
sources_list.append({
|
||||
'extractor':j,
|
||||
'url':server_links[j].format(i['source']),
|
||||
'server':j,
|
||||
'version':i['source']
|
||||
})
|
||||
'extractor': j,
|
||||
'url': server_links[j].format(i['source']),
|
||||
'server': j,
|
||||
'version': i['source']
|
||||
})
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
|
|
|
@ -7,26 +7,25 @@ import re
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DBAnimes(Anime, sitename='dbanimes'):
|
||||
sitename = 'dbanimes'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.get("https://dbanimes.com", params = {'s': query, 'post_type': 'anime'}))
|
||||
soup = helpers.soupify(helpers.get("https://dbanimes.com", params={'s': query, 'post_type': 'anime'}))
|
||||
return [
|
||||
SearchResult(
|
||||
title = x['title'].strip(),
|
||||
url = x['href']
|
||||
title=x['title'].strip(),
|
||||
url=x['href']
|
||||
)
|
||||
for x in soup.select('h6.fet > a')
|
||||
]
|
||||
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
return [x['href'] for x in soup.select('a.btn.btn-default.mb-2')]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select("li[aria-current=page]")[0].text
|
||||
|
@ -34,16 +33,16 @@ class DBAnimes(Anime, sitename='dbanimes'):
|
|||
|
||||
class DBAnimesEpisode(AnimeEpisode, sitename='dbanimes'):
|
||||
def check_server(self, extractor, url):
|
||||
#Sendvid returns 404
|
||||
# Sendvid returns 404
|
||||
try:
|
||||
soup = helpers.soupify(helpers.get(url,allow_redirects=True))
|
||||
soup = helpers.soupify(helpers.get(url, allow_redirects=True))
|
||||
except HTTPError:
|
||||
return False
|
||||
|
||||
if extractor == 'mixdrop':
|
||||
# Checks redirects in mixdrop.
|
||||
redirect_regex = r"\s*window\.location\s*=\s*('|\")(.*?)('|\")"
|
||||
redirect = re.search(redirect_regex,str(soup))
|
||||
redirect = re.search(redirect_regex, str(soup))
|
||||
if redirect:
|
||||
url = 'https://mixdrop.to' + redirect.group(2)
|
||||
soup = helpers.soupify(helpers.get(url))
|
||||
|
@ -77,20 +76,20 @@ class DBAnimesEpisode(AnimeEpisode, sitename='dbanimes'):
|
|||
|
||||
# Exceptions to domain -> extractor
|
||||
extractor_dict = {
|
||||
'fembed':'gcloud',
|
||||
'gounlimited':'mp4upload'
|
||||
'fembed': 'gcloud',
|
||||
'gounlimited': 'mp4upload'
|
||||
}
|
||||
|
||||
sources_list = []
|
||||
for i in range(len(sources)):
|
||||
if domains[i] in servers:
|
||||
extractor = extractor_dict.get(domains[i],domains[i])
|
||||
extractor = extractor_dict.get(domains[i], domains[i])
|
||||
if self.check_server(extractor, sources[i]):
|
||||
sources_list.append({
|
||||
'extractor':extractor,
|
||||
'url':sources[i],
|
||||
'server':domains[i],
|
||||
'version':'subbed'
|
||||
'extractor': extractor,
|
||||
'url': sources[i],
|
||||
'server': domains[i],
|
||||
'version': 'subbed'
|
||||
})
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
|
||||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
import json
|
||||
import re
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DreamAnime(Anime, sitename='dreamanime'):
|
||||
"""
|
||||
Site: http://dreamanime.fun
|
||||
Config
|
||||
------
|
||||
version: One of ['subbed', 'dubbed']
|
||||
Selects the version of audio of anime.
|
||||
server: One of ['mp4upload', 'trollvid']
|
||||
Selects the server to download from.
|
||||
"""
|
||||
|
||||
sitename='dreamanime'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.get("https://dreamanime.fun/search", params = {"term" : query}))
|
||||
result_data = soup.select("a#epilink")
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = result.text,
|
||||
url = result.get("href")
|
||||
)
|
||||
for result in result_data
|
||||
]
|
||||
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
version = self.config.get("version", "subbed")
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
|
||||
episodes = []
|
||||
|
||||
_all = soup.select("div.episode-wrap")
|
||||
for i in _all:
|
||||
ep_type = i.find("div", {"class":re.compile("ep-type type-.* dscd")}).text
|
||||
if ep_type == 'Sub':
|
||||
episodes.append(i.find("a").get("data-src"))
|
||||
elif ep_type == 'Dub':
|
||||
episodes.append(i.find("a").get("href"))
|
||||
|
||||
if len(episodes) == 0:
|
||||
logger.warning("No episodes found")
|
||||
|
||||
return episodes[::-1]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.find("div", {"class":"contingo"}).find("p").text
|
||||
|
||||
class DreamAnimeEpisode(AnimeEpisode, sitename='dreamanime'):
|
||||
def getLink(self, name, _id):
|
||||
if name == "trollvid":
|
||||
return "https://trollvid.net/embed/" + _id
|
||||
elif name == "mp4upload":
|
||||
return f"https://mp4upload.com/embed-{_id}.html"
|
||||
elif name == "xstreamcdn":
|
||||
return "https://www.xstreamcdn.com/v/" + _id
|
||||
|
||||
def _get_sources(self):
|
||||
server = self.config.get("server", "trollvid")
|
||||
resp = helpers.get(self.url).text
|
||||
hosts = json.loads(re.search("var\s+episode\s+=\s+({.*})", resp).group(1))["videos"]
|
||||
_type = hosts[0]["type"]
|
||||
try:
|
||||
host = list(filter(lambda video: video["host"] == server and video["type"] == _type, hosts))[0]
|
||||
except IndexError:
|
||||
host = hosts[0]
|
||||
if host["host"] == "mp4upload" and len(hosts) > 1:
|
||||
host = hosts[1]
|
||||
|
||||
name = host["host"]
|
||||
_id = host["id"]
|
||||
link = self.getLink(name, _id)
|
||||
|
||||
return [(name, link)]
|
|
@ -7,93 +7,96 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Dubbedanime(Anime, sitename='dubbedanime'):
|
||||
sitename = 'dubbedanime'
|
||||
url = f'https://{sitename}.net'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.post(f'https://ww5.dubbedanime.net/ajax/paginate',
|
||||
data={
|
||||
'query[search]': query,
|
||||
'what': 'query',
|
||||
'model': 'Anime',
|
||||
'size': 30,
|
||||
'letter': 'all',
|
||||
}).json()
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=search_results['results'][a]['title'],
|
||||
url=cls.url + search_results['results'][a]['url'])
|
||||
for a in range(len(search_results['results']))
|
||||
]
|
||||
return(search_results)
|
||||
sitename = 'dubbedanime'
|
||||
url = f'https://{sitename}.net'
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
elements = soup.find("ul", {"id": "episodes-grid"}).select('li > div > a')
|
||||
return [('https://dubbedanime.net' + a.get('href')) for a in elements[::-1]]
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.post(f'https://ww5.dubbedanime.net/ajax/paginate',
|
||||
data={
|
||||
'query[search]': query,
|
||||
'what': 'query',
|
||||
'model': 'Anime',
|
||||
'size': 30,
|
||||
'letter': 'all',
|
||||
}).json()
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=search_results['results'][a]['title'],
|
||||
url=cls.url + search_results['results'][a]['url'])
|
||||
for a in range(len(search_results['results']))
|
||||
]
|
||||
return(search_results)
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
elements = soup.find("ul", {"id": "episodes-grid"}).select('li > div > a')
|
||||
return [('https://dubbedanime.net' + a.get('href')) for a in elements[::-1]]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h1.h3')[0].text
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h1.h3')[0].text
|
||||
|
||||
class DubbedanimeEpisode(AnimeEpisode, sitename='dubbedanime'):
|
||||
def _get_sources(self):
|
||||
version = self.config['version']
|
||||
servers = self.config['servers']
|
||||
def _get_sources(self):
|
||||
version = self.config['version']
|
||||
servers = self.config['servers']
|
||||
|
||||
server_links = {
|
||||
'mp4upload':'https://www.mp4upload.com/embed-{}.html',
|
||||
'trollvid': 'https://trollvid.net/embed/{}',
|
||||
'mp4sh': 'https://mp4.sh/embed/{0}{1}',
|
||||
'vidstreaming':'https://vidstreaming.io/download?id={}'
|
||||
}
|
||||
server_links = {
|
||||
'mp4upload': 'https://www.mp4upload.com/embed-{}.html',
|
||||
'trollvid': 'https://trollvid.net/embed/{}',
|
||||
'mp4sh': 'https://mp4.sh/embed/{0}{1}',
|
||||
'vidstreaming': 'https://vidstreaming.io/download?id={}'
|
||||
}
|
||||
|
||||
soup = str(helpers.soupify(helpers.get(self.url)))
|
||||
x = re.search(r"xuath = '([^']*)", soup).group(1)
|
||||
episode_regex = r'var episode = (.*?});'
|
||||
api = json.loads(re.search(episode_regex,soup).group(1))
|
||||
slug = api['slug']
|
||||
sources = api['videos']
|
||||
soup = str(helpers.soupify(helpers.get(self.url)))
|
||||
x = re.search(r"xuath = '([^']*)", soup).group(1)
|
||||
episode_regex = r'var episode = (.*?});'
|
||||
api = json.loads(re.search(episode_regex, soup).group(1))
|
||||
slug = api['slug']
|
||||
sources = api['videos']
|
||||
|
||||
try: #Continues even if vidstream api fails
|
||||
vidstream = helpers.get(f'https://vid.xngine.com/api/episode/{slug}',referer = self.url).json()
|
||||
except:
|
||||
vidstream = []
|
||||
|
||||
for a in vidstream:
|
||||
if a['host'] == 'vidstreaming' and 'id' in a and 'type' in a:
|
||||
sources.append(a)
|
||||
try: # Continues even if vidstream api fails
|
||||
vidstream = helpers.get(f'https://vid.xngine.com/api/episode/{slug}', referer=self.url).json()
|
||||
except:
|
||||
vidstream = []
|
||||
|
||||
for a in servers: #trying all supported servers in order using the correct language
|
||||
for b in sources:
|
||||
if b['type'] == version:
|
||||
if b['host'] == a:
|
||||
if get_extractor(a) == None:
|
||||
continue
|
||||
else:
|
||||
provider = a[:]
|
||||
embed = server_links.get(provider,'{}').format(b['id'],x)
|
||||
return [(provider, embed,)]
|
||||
for a in vidstream:
|
||||
if a['host'] == 'vidstreaming' and 'id' in a and 'type' in a:
|
||||
sources.append(a)
|
||||
|
||||
logger.debug('No servers found in selected language. Trying all supported servers')
|
||||
|
||||
for a in servers: #trying all supported servers in order
|
||||
for b in sources:
|
||||
for a in servers: # trying all supported servers in order using the correct language
|
||||
for b in sources:
|
||||
if b['type'] == version:
|
||||
if b['host'] == a:
|
||||
if get_extractor(a) == None:
|
||||
continue
|
||||
else:
|
||||
provider = a[:]
|
||||
embed = server_links.get(provider,'{}').format(b['id'],x)
|
||||
embed = server_links.get(provider, '{}').format(b['id'], x)
|
||||
return [(provider, embed,)]
|
||||
|
||||
logger.debug('No supported servers found, trying mp4sh')
|
||||
logger.debug('No servers found in selected language. Trying all supported servers')
|
||||
|
||||
if re.search(r'"trollvid","id":"([^"]*)', soup):
|
||||
token = re.search(r'"trollvid","id":"([^"]*)', soup).group(1)
|
||||
embed = server_links.get('mp4sh','{}').format(token,x)
|
||||
return [('mp4sh', embed,)]
|
||||
else:
|
||||
logger.debug('No servers found')
|
||||
return [('no_extractor', '',)]
|
||||
for a in servers: # trying all supported servers in order
|
||||
for b in sources:
|
||||
if b['host'] == a:
|
||||
if get_extractor(a) == None:
|
||||
continue
|
||||
else:
|
||||
provider = a[:]
|
||||
embed = server_links.get(provider, '{}').format(b['id'], x)
|
||||
return [(provider, embed,)]
|
||||
|
||||
logger.debug('No supported servers found, trying mp4sh')
|
||||
|
||||
if re.search(r'"trollvid","id":"([^"]*)', soup):
|
||||
token = re.search(r'"trollvid","id":"([^"]*)', soup).group(1)
|
||||
embed = server_links.get('mp4sh', '{}').format(token, x)
|
||||
return [('mp4sh', embed,)]
|
||||
else:
|
||||
logger.debug('No servers found')
|
||||
return [('no_extractor', '',)]
|
||||
|
|
|
@ -4,11 +4,12 @@ from anime_downloader.sites import helpers
|
|||
from difflib import get_close_matches
|
||||
import re
|
||||
|
||||
|
||||
class EraiRaws(Anime, sitename='erai-raws'):
|
||||
sitename='erai-raws'
|
||||
sitename = 'erai-raws'
|
||||
QUALITIES = ['720p', '1080p']
|
||||
|
||||
#Bypass DDosGuard
|
||||
# Bypass DDosGuard
|
||||
def bypass(self):
|
||||
host = "https://erai-raws.info"
|
||||
resp = helpers.get("https://check.ddos-guard.net/check.js").text
|
||||
|
@ -30,9 +31,9 @@ class EraiRaws(Anime, sitename='erai-raws'):
|
|||
folder = helpers.get(url + "index.php" + row.parent.get("href"))
|
||||
folder = helpers.soupify(folder)
|
||||
|
||||
#Append all episodes in folder - folders are also seperated by quality
|
||||
#So everything in a folder can be taken in one go
|
||||
[episodes.append(url + x.parent.get("href")) for x in folder.find("ul", {"id":"directory-listing"}).find_all("div", {"class":"row"})]
|
||||
# Append all episodes in folder - folders are also seperated by quality
|
||||
# So everything in a folder can be taken in one go
|
||||
[episodes.append(url + x.parent.get("href")) for x in folder.find("ul", {"id": "directory-listing"}).find_all("div", {"class": "row"})]
|
||||
else:
|
||||
episodes.append(url + row.parent.get("href"))
|
||||
|
||||
|
@ -42,7 +43,7 @@ class EraiRaws(Anime, sitename='erai-raws'):
|
|||
if rows[0].parent.get("href")[-3:] != "mkv":
|
||||
url = f"{url}index.php" if url[:-1] == "/" else f"{url}/index.php"
|
||||
folder = helpers.soupify(helpers.get(url + rows[0].parent.get("href")))
|
||||
episodes = [url + x.parent.get("href") for x in folder.find("ul", {"id":"directory-listing"}).find_all("div", {"class":"row"})]
|
||||
episodes = [url + x.parent.get("href") for x in folder.find("ul", {"id": "directory-listing"}).find_all("div", {"class": "row"})]
|
||||
else:
|
||||
episodes = [url + rows[0].parent["href"]]
|
||||
|
||||
|
@ -52,32 +53,32 @@ class EraiRaws(Anime, sitename='erai-raws'):
|
|||
def search(cls, query):
|
||||
cls.bypass(cls)
|
||||
soup = helpers.soupify(helpers.get("https://erai-raws.info/anime-list/"))
|
||||
result_data = soup.find("div", {"class":"shows-wrapper"}).find_all("a")
|
||||
result_data = soup.find("div", {"class": "shows-wrapper"}).find_all("a")
|
||||
titles = [x.text.strip() for x in result_data]
|
||||
|
||||
#Erai-raws doesnt have a search that I could find - so I've opted to implement it myself
|
||||
# Erai-raws doesnt have a search that I could find - so I've opted to implement it myself
|
||||
titles = get_close_matches(query, titles, cutoff=0.2)
|
||||
result_data = [x for x in result_data if x.text.strip() in titles]
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = result.text.strip(),
|
||||
url = "https://erai-raws.info/anime-list/" + result.get("href")
|
||||
)
|
||||
title=result.text.strip(),
|
||||
url="https://erai-raws.info/anime-list/" + result.get("href")
|
||||
)
|
||||
for result in result_data
|
||||
]
|
||||
]
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
self.bypass()
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
files = soup.find("div", {"class":"ddmega"}).find("a").get("href")
|
||||
files = soup.find("div", {"class": "ddmega"}).find("a").get("href")
|
||||
if files[-1] != '/':
|
||||
files = files + '/'
|
||||
index = files + "index.php"
|
||||
html = helpers.get(index, headers = {"Referer":files})
|
||||
html = helpers.get(index, headers={"Referer": files})
|
||||
soup = helpers.soupify(html)
|
||||
rows = soup.find("ul", {"id":"directory-listing"}).find_all("div", {"class":"row"})
|
||||
rows = soup.find("ul", {"id": "directory-listing"}).find_all("div", {"class": "row"})
|
||||
episodes = self.parse(rows, files)
|
||||
return episodes
|
||||
|
||||
|
@ -85,6 +86,7 @@ class EraiRaws(Anime, sitename='erai-raws'):
|
|||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.find("h1").find("span").text
|
||||
|
||||
|
||||
class EraiRawsEpisode(AnimeEpisode, sitename='erai-raws'):
|
||||
def _get_sources(self):
|
||||
return [("no_extractor", self.url)]
|
||||
|
|
|
@ -60,19 +60,20 @@ class GogoAnime(Anime, sitename='gogoanime'):
|
|||
_base_url = 'https://gogoanime.io/'
|
||||
_episode_list_url = 'https://gogoanime.io/load-list-episode'
|
||||
_search_url = 'https://gogoanime.io/search.html'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls._search_url, params = {'keyword': query}))
|
||||
search_results = helpers.soupify(helpers.get(cls._search_url, params={'keyword': query}))
|
||||
search_results = search_results.select('ul.items > li > p > a')
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.get('title'),
|
||||
url='https://gogoanime.io' + i.get('href'),
|
||||
meta_info = {
|
||||
'version_key_dubbed':'(Dub)'
|
||||
}
|
||||
)
|
||||
meta_info={
|
||||
'version_key_dubbed': '(Dub)'
|
||||
}
|
||||
)
|
||||
for i in search_results
|
||||
]
|
||||
return search_results
|
||||
|
|
|
@ -60,8 +60,8 @@ def setup(func):
|
|||
except ImportError:
|
||||
sess = cf_session
|
||||
logger.warning("This provider may not work correctly because it requires selenium to work.\nIf you want to install it then run: 'pip install selenium' .")
|
||||
else:
|
||||
sess = req_session
|
||||
else:
|
||||
sess = req_session
|
||||
|
||||
if headers:
|
||||
default_headers.update(headers)
|
||||
|
@ -79,7 +79,7 @@ def setup(func):
|
|||
headers=default_headers,
|
||||
**kwargs)
|
||||
|
||||
if sess != selescrape: #TODO fix this for selescrape too
|
||||
if sess != selescrape: # TODO fix this for selescrape too
|
||||
res.raise_for_status()
|
||||
logger.debug(res.url)
|
||||
# logger.debug(res.text)
|
||||
|
|
|
@ -40,14 +40,14 @@ def get_browser_config():
|
|||
'''
|
||||
Decides what browser selescrape will use.
|
||||
'''
|
||||
os_browser = { #maps os to a browser
|
||||
'linux':'firefox',
|
||||
'darwin':'chrome',
|
||||
'win32':'chrome'
|
||||
os_browser = { # maps os to a browser
|
||||
'linux': 'firefox',
|
||||
'darwin': 'chrome',
|
||||
'win32': 'chrome'
|
||||
}
|
||||
for a in os_browser:
|
||||
if platform.startswith(a):
|
||||
browser = os_browser[a]
|
||||
browser = os_browser[a]
|
||||
else:
|
||||
browser = 'chrome'
|
||||
value = data['dl']['selescrape_browser']
|
||||
|
@ -73,7 +73,7 @@ def add_url_params(url, params):
|
|||
return url if not params else url + '?' + urlencode(params)
|
||||
|
||||
|
||||
def driver_select(): #
|
||||
def driver_select():
|
||||
'''
|
||||
it configures what each browser should do
|
||||
and gives the driver variable that is used
|
||||
|
@ -88,7 +88,7 @@ def driver_select(): #
|
|||
fireFoxOptions = webdriver.FirefoxOptions()
|
||||
fireFoxOptions.headless = True
|
||||
fireFoxOptions.add_argument('--log fatal')
|
||||
if binary == None:
|
||||
if binary == None:
|
||||
driver = webdriver.Firefox(options=fireFoxOptions, service_log_path=os.path.devnull)
|
||||
else:
|
||||
try:
|
||||
|
@ -172,23 +172,23 @@ def cloudflare_wait(driver):
|
|||
title = driver.title
|
||||
if not title == "Just a moment...":
|
||||
break
|
||||
time.sleep(1) # This is necessary to make sure everything has loaded fine.
|
||||
time.sleep(1) # This is necessary to make sure everything has loaded fine.
|
||||
|
||||
|
||||
def request(request_type, url, **kwargs): #Headers not yet supported , headers={}
|
||||
def request(request_type, url, **kwargs): # Headers not yet supported , headers={}
|
||||
params = kwargs.get('params', {})
|
||||
new_url = add_url_params(url, params)
|
||||
driver = driver_select()
|
||||
status = status_select(driver, new_url, 'hide')
|
||||
try:
|
||||
cloudflare_wait(driver)
|
||||
user_agent = driver.execute_script("return navigator.userAgent;") #dirty, but allows for all sorts of things above
|
||||
user_agent = driver.execute_script("return navigator.userAgent;") # dirty, but allows for all sorts of things above
|
||||
cookies = driver.get_cookies()
|
||||
text = driver.page_source
|
||||
driver.close()
|
||||
return SeleResponse(url, request_type, text, cookies, user_agent)
|
||||
except:
|
||||
driver.save_screenshot(f"{get_data_dir()}/screenshot.png");
|
||||
driver.save_screenshot(f"{get_data_dir()}/screenshot.png")
|
||||
driver.close()
|
||||
logger.error(f'There was a problem getting the page: {new_url}. \
|
||||
See the screenshot for more info:\n{get_data_dir()}/screenshot.png')
|
||||
|
@ -211,6 +211,7 @@ class SeleResponse:
|
|||
user_agent: string
|
||||
User agent used on the webpage
|
||||
"""
|
||||
|
||||
def __init__(self, url, method, text, cookies, user_agent):
|
||||
self.url = url
|
||||
self.method = method
|
||||
|
|
|
@ -4,11 +4,14 @@ import logging
|
|||
def not_working(message):
|
||||
orig_message = message
|
||||
message += " You can use `anime -ll DEBUG` to use it."
|
||||
|
||||
def wrapper(cls):
|
||||
class NotWorking:
|
||||
"""Site is not working"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise RuntimeError(message)
|
||||
|
||||
def search(cls, *args, **kwargs):
|
||||
raise RuntimeError(message)
|
||||
|
||||
|
|
|
@ -6,18 +6,19 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HorribleSubs(Anime, sitename='horriblesubs'):
|
||||
sitename = 'horriblesubs'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.get("https://horriblesubs.info/api.php", params = {"method": "search", "value": query}))
|
||||
titlesDict = dict([(re.search('(.*)-', x.find(text = True, recursive = False)).group(1).strip(), x['href']) for x in soup.select('li > a')])
|
||||
soup = helpers.soupify(helpers.get("https://horriblesubs.info/api.php", params={"method": "search", "value": query}))
|
||||
titlesDict = dict([(re.search('(.*)-', x.find(text=True, recursive=False)).group(1).strip(), x['href']) for x in soup.select('li > a')])
|
||||
|
||||
return [
|
||||
SearchResult(
|
||||
title = x[0],
|
||||
url = 'https://horriblesubs.info' + x[1]
|
||||
title=x[0],
|
||||
url='https://horriblesubs.info' + x[1]
|
||||
)
|
||||
for x in titlesDict.items()
|
||||
]
|
||||
|
@ -29,11 +30,11 @@ class HorribleSubs(Anime, sitename='horriblesubs'):
|
|||
episodes = []
|
||||
|
||||
while True:
|
||||
resp = helpers.get('https://horriblesubs.info/api.php', params = {'method': 'getshows', 'type': 'show', 'showid': show_id, 'nextid': next_id})
|
||||
resp = helpers.get('https://horriblesubs.info/api.php', params={'method': 'getshows', 'type': 'show', 'showid': show_id, 'nextid': next_id})
|
||||
|
||||
if resp.text == "DONE":
|
||||
if next_id == 1:
|
||||
resp = helpers.get('https://horriblesubs.info/api.php', params = {'method': 'getshows', 'type': 'show', 'showid': show_id})
|
||||
resp = helpers.get('https://horriblesubs.info/api.php', params={'method': 'getshows', 'type': 'show', 'showid': show_id})
|
||||
else:
|
||||
break
|
||||
|
||||
|
@ -47,6 +48,7 @@ class HorribleSubs(Anime, sitename='horriblesubs'):
|
|||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.h1.text
|
||||
|
||||
|
||||
class HorribleSubsEpisode(AnimeEpisode, sitename='horriblesubs'):
|
||||
def _get_sources(self):
|
||||
return [('no_extractor', self.url)]
|
||||
|
|
|
@ -2,42 +2,45 @@ from importlib import import_module
|
|||
|
||||
ALL_ANIME_SITES = [
|
||||
# ('filename', 'sitename', 'classname')
|
||||
('_4anime','4anime','Anime4'),
|
||||
('anime8','anime8','Anime8'),
|
||||
('animebinge','animebinge','AnimeBinge'),
|
||||
('_4anime', '4anime', 'Anime4'),
|
||||
('anime8', 'anime8', 'Anime8'),
|
||||
('animebinge', 'animebinge', 'AnimeBinge'),
|
||||
('animechameleon', 'gurminder', 'AnimeChameleon'),
|
||||
('animedaisuki','animedaisuki','Animedaisuki'),
|
||||
('animedaisuki', 'animedaisuki', 'Animedaisuki'),
|
||||
('animeflix', 'animeflix', 'AnimeFlix'),
|
||||
('animeflv', 'animeflv', 'Animeflv'),
|
||||
('animefreak', 'animefreak', 'AnimeFreak'),
|
||||
('animefree','animefree','AnimeFree'),
|
||||
('animefrenzy','animefrenzy','AnimeFrenzy'),
|
||||
('animekisa','animekisa','AnimeKisa'),
|
||||
('animeonline','animeonline360','AnimeOnline'),
|
||||
('animefree', 'animefree', 'AnimeFree'),
|
||||
('animefrenzy', 'animefrenzy', 'AnimeFrenzy'),
|
||||
('animekisa', 'animekisa', 'AnimeKisa'),
|
||||
('animeonline', 'animeonline360', 'AnimeOnline'),
|
||||
('animeout', 'animeout', 'AnimeOut'),
|
||||
('animerush','animerush','AnimeRush'),
|
||||
('animerush', 'animerush', 'AnimeRush'),
|
||||
('animesimple', 'animesimple', 'AnimeSimple'),
|
||||
('animevibe','animevibe','AnimeVibe'),
|
||||
('animixplay','animixplay','AniMixPlay'),
|
||||
('animevibe', 'animevibe', 'AnimeVibe'),
|
||||
('animixplay', 'animixplay', 'AniMixPlay'),
|
||||
('darkanime', 'darkanime', 'DarkAnime'),
|
||||
('dbanimes', 'dbanimes', 'DBAnimes'),
|
||||
# ('erairaws', 'erai-raws', 'EraiRaws'),
|
||||
('fastani', 'fastani', 'FastAni'),
|
||||
('horriblesubs', 'horriblesubs', 'HorribleSubs'),
|
||||
('itsaturday', 'itsaturday', 'Itsaturday'),
|
||||
('justdubs','justdubs','JustDubs'),
|
||||
('justdubs', 'justdubs', 'JustDubs'),
|
||||
('kickass', 'kickass', 'KickAss'),
|
||||
('kissanimex', 'kissanimex', 'KissAnimeX'),
|
||||
('kisscartoon', 'kisscartoon', 'KissCartoon'),
|
||||
('nyaa','nyaa','Nyaa'),
|
||||
('nyaa', 'nyaa', 'Nyaa'),
|
||||
('ryuanime', 'ryuanime', 'RyuAnime'),
|
||||
('twistmoe', 'twist.moe', 'TwistMoe'),
|
||||
('tenshimoe','tenshi.moe','TenshiMoe'),
|
||||
('vidstream','vidstream','VidStream'),
|
||||
('voiranime','voiranime','VoirAnime'),
|
||||
('tenshimoe', 'tenshi.moe', 'TenshiMoe'),
|
||||
('vidstream', 'vidstream', 'VidStream'),
|
||||
('voiranime', 'voiranime', 'VoirAnime'),
|
||||
('vidstream', 'vidstream', 'VidStream'),
|
||||
('voiranime', 'voiranime', 'VoirAnime'),
|
||||
('vostfree', 'vostfree', 'VostFree'),
|
||||
]
|
||||
|
||||
|
||||
def get_anime_class(url):
|
||||
"""
|
||||
Get anime class corresposing to url or name.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
|
||||
class Itsaturday(Anime, sitename='itsaturday'):
|
||||
sitename = 'itsaturday'
|
||||
DOMAIN = 'http://www.itsaturday.com'
|
||||
|
@ -33,4 +34,3 @@ class ItsaturdayEpisode(AnimeEpisode, sitename='itsaturday'):
|
|||
('no_extractor',
|
||||
self._parent.DOMAIN + helpers.soupify(helpers.get(self.url)).select_one('source').attrs['src'])
|
||||
]
|
||||
|
||||
|
|
|
@ -9,8 +9,10 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class JustDubs(Anime, sitename='justdubs'):
|
||||
sitename = 'justdubs'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
results = helpers.get(f"http://justdubs.org/search/node/{query}").text
|
||||
|
@ -19,8 +21,8 @@ class JustDubs(Anime, sitename='justdubs'):
|
|||
logger.debug(results_data)
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = result.text,
|
||||
url = result.get("href")
|
||||
title=result.text,
|
||||
url=result.get("href")
|
||||
)
|
||||
for result in results_data
|
||||
]
|
||||
|
@ -29,7 +31,7 @@ class JustDubs(Anime, sitename='justdubs'):
|
|||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
ret = [str(a['href'])
|
||||
for a in soup.find_all('a', {'class' : 'list-group-item'})]
|
||||
for a in soup.find_all('a', {'class': 'list-group-item'})]
|
||||
if ret == []:
|
||||
err = 'No Episodes Found in url "{}"'.format(self.url)
|
||||
args = [self.url]
|
||||
|
@ -42,24 +44,25 @@ class JustDubs(Anime, sitename='justdubs'):
|
|||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('h1.page-header')[0].text
|
||||
|
||||
|
||||
class JustDubsEpisode(AnimeEpisode, sitename='justdubs'):
|
||||
def _get_sources(self):
|
||||
servers = self.config['servers']
|
||||
|
||||
|
||||
"""maps urls to extractors"""
|
||||
server_links = {
|
||||
'mp4upload':'mp4upload.com',
|
||||
'gcloud':'gcloud.live',
|
||||
'gcloud':'fembed.com'
|
||||
server_links = {
|
||||
'mp4upload': 'mp4upload.com',
|
||||
'gcloud': 'gcloud.live',
|
||||
'gcloud': 'fembed.com'
|
||||
}
|
||||
|
||||
soup = helpers.soupify(helpers.get(self.url)).select('iframe')
|
||||
|
||||
|
||||
for a in servers:
|
||||
for b in soup:
|
||||
for c in server_links:
|
||||
if server_links[c] in b.get('src') and a == c:
|
||||
return [(c, b.get('src'))]
|
||||
|
||||
|
||||
logger.warn("Unsupported URL")
|
||||
return ""
|
||||
|
|
|
@ -7,96 +7,98 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KickAss(Anime, sitename='kickass'):
|
||||
"""
|
||||
Nice things
|
||||
Siteconfig
|
||||
----------
|
||||
server: Primary server to use (Default: A-KICKASSANIME)
|
||||
fallback_servers: Recorded working servers which is used if the primary server cannot be found
|
||||
ext_fallback_servers: Recorded working ext_servers (second video player) which is used if the first video player fails
|
||||
"""
|
||||
sitename = 'kickass'
|
||||
url = f'https://kickassanime.rs/search'
|
||||
"""
|
||||
Nice things
|
||||
Siteconfig
|
||||
----------
|
||||
server: Primary server to use (Default: A-KICKASSANIME)
|
||||
fallback_servers: Recorded working servers which is used if the primary server cannot be found
|
||||
ext_fallback_servers: Recorded working ext_servers (second video player) which is used if the first video player fails
|
||||
"""
|
||||
sitename = 'kickass'
|
||||
url = f'https://kickassanime.rs/search'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url,
|
||||
params={'q': query}))
|
||||
regex = r'\[{[\W\w]*?}]'
|
||||
search_results = json.loads(re.search(regex,str(search_results)).group()) if re.search(regex,str(search_results)) else ''
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=a['name'],
|
||||
url=f'https://kickassanime.rs{a["slug"]}')
|
||||
for a in search_results
|
||||
]
|
||||
return(search_results)
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url,
|
||||
params={'q': query}))
|
||||
regex = r'\[{[\W\w]*?}]'
|
||||
search_results = json.loads(re.search(regex, str(search_results)).group()) if re.search(regex, str(search_results)) else ''
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=a['name'],
|
||||
url=f'https://kickassanime.rs{a["slug"]}')
|
||||
for a in search_results
|
||||
]
|
||||
return(search_results)
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
|
||||
regex = r'\[{[\W\w]*?}]'
|
||||
episodes = json.loads(re.search(regex,str(soup)).group())
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
|
||||
regex = r'\[{[\W\w]*?}]'
|
||||
episodes = json.loads(re.search(regex, str(soup)).group())
|
||||
|
||||
return [f'https://kickassanime.rs{a["slug"]}' for a in episodes[::-1]]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.get(self.url).text
|
||||
|
||||
regex = r'{"name"[^}]*}'
|
||||
info = json.loads(re.search(regex, str(soup)).group() + ']}')
|
||||
self.title = info['name']
|
||||
|
||||
return [f'https://kickassanime.rs{a["slug"]}' for a in episodes[::-1]]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.get(self.url).text
|
||||
|
||||
regex = r'{"name"[^}]*}'
|
||||
info = json.loads(re.search(regex,str(soup)).group()+']}')
|
||||
self.title = info['name']
|
||||
|
||||
class KickAssEpisode(AnimeEpisode, sitename='kickass'):
|
||||
#TODO sitename should be handled at a higher level, allowing shorter names
|
||||
def _get_sources(self):
|
||||
server = self.config['server']
|
||||
fallback = self.config['fallback_servers']
|
||||
ext_fallback = self.config['ext_fallback_servers']
|
||||
# TODO sitename should be handled at a higher level, allowing shorter names
|
||||
def _get_sources(self):
|
||||
server = self.config['server']
|
||||
fallback = self.config['fallback_servers']
|
||||
ext_fallback = self.config['ext_fallback_servers']
|
||||
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
regex = r'{"clip[\w\W]*?}\]} '
|
||||
elements = json.loads(re.search(regex,str(soup)).group())
|
||||
links = ['link1','link2','link3','link4']
|
||||
sources_list = [] #Primary sources which links to more sources
|
||||
ext_servers = []
|
||||
for a in links:
|
||||
if len((elements['episode'][a]).replace(' ','')) != 0:
|
||||
sources_list.append(elements['episode'][a])
|
||||
if elements['ext_servers']:
|
||||
for a in elements['ext_servers']:
|
||||
ext_servers.append(a)
|
||||
soup = helpers.get(sources_list[0]).text
|
||||
regex = r'\[{[\W\w]*?}\]'
|
||||
sources = re.search(regex,str(soup))
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
regex = r'{"clip[\w\W]*?}\]} '
|
||||
elements = json.loads(re.search(regex, str(soup)).group())
|
||||
links = ['link1', 'link2', 'link3', 'link4']
|
||||
sources_list = [] # Primary sources which links to more sources
|
||||
ext_servers = []
|
||||
for a in links:
|
||||
if len((elements['episode'][a]).replace(' ', '')) != 0:
|
||||
sources_list.append(elements['episode'][a])
|
||||
if elements['ext_servers']:
|
||||
for a in elements['ext_servers']:
|
||||
ext_servers.append(a)
|
||||
soup = helpers.get(sources_list[0]).text
|
||||
regex = r'\[{[\W\w]*?}\]'
|
||||
sources = re.search(regex, str(soup))
|
||||
|
||||
if not sources:
|
||||
regex = r"[^/]window\.location = '([^']*)"
|
||||
sources = re.search(regex,str(soup))
|
||||
if sources:
|
||||
return [('vidstream', sources.group(1),)]
|
||||
else:
|
||||
if len(ext_servers) == 0:
|
||||
return ''
|
||||
for i in range(2):
|
||||
for a in ext_servers:
|
||||
if a in ext_fallback or i == 1:
|
||||
if a['name'] == 'Vidstreaming' or a['name'] == 'Vidcdn':
|
||||
return [('vidstream', a['link'],)]
|
||||
else:
|
||||
return [('haloani', a['link'],)]
|
||||
if not sources:
|
||||
regex = r"[^/]window\.location = '([^']*)"
|
||||
sources = re.search(regex, str(soup))
|
||||
if sources:
|
||||
return [('vidstream', sources.group(1),)]
|
||||
else:
|
||||
if len(ext_servers) == 0:
|
||||
return ''
|
||||
for i in range(2):
|
||||
for a in ext_servers:
|
||||
if a in ext_fallback or i == 1:
|
||||
if a['name'] == 'Vidstreaming' or a['name'] == 'Vidcdn':
|
||||
return [('vidstream', a['link'],)]
|
||||
else:
|
||||
return [('haloani', a['link'],)]
|
||||
|
||||
sources = json.loads(sources.group())
|
||||
for a in sources:
|
||||
if a['name'] == self.config['server']:
|
||||
return [('haloani', a['src'],)]
|
||||
|
||||
logger.debug('Preferred server "%s" not found. Trying all supported servers.',self.config['server'])
|
||||
for a in fallback:
|
||||
for b in sources:
|
||||
if b['name'] == a:
|
||||
return [('haloani', b['src'],)]
|
||||
|
||||
logger.warning('No supported servers found. Trying all servers. This will most likely not work')
|
||||
return [('haloani', a['src'],)]
|
||||
sources = json.loads(sources.group())
|
||||
for a in sources:
|
||||
if a['name'] == self.config['server']:
|
||||
return [('haloani', a['src'],)]
|
||||
|
||||
logger.debug('Preferred server "%s" not found. Trying all supported servers.', self.config['server'])
|
||||
for a in fallback:
|
||||
for b in sources:
|
||||
if b['name'] == a:
|
||||
return [('haloani', b['src'],)]
|
||||
|
||||
logger.warning('No supported servers found. Trying all servers. This will most likely not work')
|
||||
return [('haloani', a['src'],)]
|
||||
|
|
|
@ -13,14 +13,13 @@ class KissanimeEpisode(AnimeEpisode, sitename='kissanime'):
|
|||
_base_url = 'https://kissanime.ru'
|
||||
|
||||
def _get_sources(self):
|
||||
ret = helpers.get(self.url+'&s=hydrax', sel=True).text
|
||||
ret = helpers.get(self.url + '&s=hydrax', sel=True).text
|
||||
data = self._scrape_episode(ret)
|
||||
return data
|
||||
|
||||
|
||||
def _scrape_episode(self, response):
|
||||
regex = r'iframe.*src="(https://.*?)"'
|
||||
url = (re.search(regex,response).group(1))
|
||||
url = (re.search(regex, response).group(1))
|
||||
return [('hydrax', url)]
|
||||
|
||||
|
||||
|
@ -33,14 +32,14 @@ class KissAnime(Anime, sitename='kissanime'):
|
|||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
sel = helpers.get("https://kissanime.ru",sel=True)
|
||||
sel = helpers.get("https://kissanime.ru", sel=True)
|
||||
cookies = sel.cookies
|
||||
agent = sel.user_agent # Note that the user agent must be the same as the one which generated the cookies
|
||||
agent = sel.user_agent # Note that the user agent must be the same as the one which generated the cookies
|
||||
cookies = {c['name']: c['value'] for c in cookies}
|
||||
soup = helpers.soupify((helpers.post("https://kissanime.ru/Search/Anime", headers = {
|
||||
soup = helpers.soupify((helpers.post("https://kissanime.ru/Search/Anime", headers={
|
||||
"User-Agent": agent,
|
||||
"Referer": "https://kissanime.ru/Search/Anime"
|
||||
},data = {"keyword": query},cookies=cookies)))
|
||||
}, data={"keyword": query}, cookies=cookies)))
|
||||
|
||||
# If only one anime found, kissanime redirects to anime page.
|
||||
# We don't want that
|
||||
|
@ -49,10 +48,10 @@ class KissAnime(Anime, sitename='kissanime'):
|
|||
title=soup.find('a', 'bigChar').text,
|
||||
url=cls.domain +
|
||||
soup.find('a', 'bigChar').get('href'),
|
||||
poster='',
|
||||
meta_info = {
|
||||
'version_key_dubbed':'(Dub)',
|
||||
'version_key_subbed':'(Sub)'
|
||||
poster='',
|
||||
meta_info={
|
||||
'version_key_dubbed': '(Dub)',
|
||||
'version_key_subbed': '(Sub)'
|
||||
}
|
||||
)]
|
||||
|
||||
|
@ -64,9 +63,9 @@ class KissAnime(Anime, sitename='kissanime'):
|
|||
title=res.text.strip(),
|
||||
url=cls.domain + res.find('a').get('href'),
|
||||
poster='',
|
||||
meta_info = {
|
||||
'version_key_dubbed':'(Dub)',
|
||||
'version_key_subbed':'(Sub)'
|
||||
meta_info={
|
||||
'version_key_dubbed': '(Dub)',
|
||||
'version_key_subbed': '(Sub)'
|
||||
}
|
||||
)
|
||||
logger.debug(res)
|
||||
|
@ -74,7 +73,6 @@ class KissAnime(Anime, sitename='kissanime'):
|
|||
|
||||
return ret
|
||||
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url, sel=True).text)
|
||||
ret = [self.domain + str(a['href'])
|
||||
|
@ -94,7 +92,6 @@ class KissAnime(Anime, sitename='kissanime'):
|
|||
ret = ret[::-1]
|
||||
return ret
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url, sel=True).text)
|
||||
info_div = soup.select_one('.barContent')
|
||||
|
|
|
@ -4,7 +4,8 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class KissAnimeX(Anime, sitename = 'kissanimex'):
|
||||
|
||||
class KissAnimeX(Anime, sitename='kissanimex'):
|
||||
sitename = "kissanimex"
|
||||
|
||||
@classmethod
|
||||
|
@ -13,12 +14,12 @@ class KissAnimeX(Anime, sitename = 'kissanimex'):
|
|||
soup = helpers.soupify(helpers.get(url, params={'q': query}))
|
||||
items = soup.select('td > a')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = x.text,
|
||||
url = 'https://kissanimex.com' + x['href']
|
||||
)
|
||||
for x in items
|
||||
]
|
||||
SearchResult(
|
||||
title=x.text,
|
||||
url='https://kissanimex.com' + x['href']
|
||||
)
|
||||
for x in items
|
||||
]
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
|
@ -29,8 +30,8 @@ class KissAnimeX(Anime, sitename = 'kissanimex'):
|
|||
# This makes it possible to download pokemon (for example) without having to change config.
|
||||
subbed = self.config['version'] != 'dubbed'
|
||||
subbed_converter = {
|
||||
True:'div#episodes-sub',
|
||||
False:'div#episodes-dub',
|
||||
True: 'div#episodes-sub',
|
||||
False: 'div#episodes-dub',
|
||||
}
|
||||
|
||||
eps = soup.select_one(subbed_converter.get(subbed)).select('td > a')
|
||||
|
@ -47,6 +48,7 @@ class KissAnimeX(Anime, sitename = 'kissanimex'):
|
|||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url).text).select_one('a.bigChar').text
|
||||
|
||||
|
||||
class KissAnimeXEpisode(AnimeEpisode, sitename='kissanimex'):
|
||||
def _get_sources(self):
|
||||
r = helpers.get(self.url).text
|
||||
|
|
|
@ -22,32 +22,32 @@ class KisscartoonEpisode(AnimeEpisode, sitename='kisscartoon'):
|
|||
'episode_id': self.url.split('id=')[-1],
|
||||
}
|
||||
api = helpers.post(self._episode_list_url,
|
||||
params=params,
|
||||
referer=self.url).json()
|
||||
if api.get('status',False):
|
||||
params=params,
|
||||
referer=self.url).json()
|
||||
if api.get('status', False):
|
||||
iframe_regex = r'<iframe src="([^"]*?)"'
|
||||
url = re.search(iframe_regex,api['value']).group(1)
|
||||
url = re.search(iframe_regex, api['value']).group(1)
|
||||
if url.startswith('//'):
|
||||
url = 'https:' + url
|
||||
if url.endswith('mp4upload.com/embed-.html') or url.endswith('yourupload.com/embed/'): #Sometimes returns empty link
|
||||
if url.endswith('mp4upload.com/embed-.html') or url.endswith('yourupload.com/embed/'): # Sometimes returns empty link
|
||||
url = ''
|
||||
continue
|
||||
break
|
||||
|
||||
extractor = 'streamx' #defaut extractor
|
||||
extractor_urls = { #dumb, but easily expandable, maps urls to extractors
|
||||
"mp4upload.com":"mp4upload",
|
||||
"yourupload.com":"yourupload"
|
||||
extractor = 'streamx' # defaut extractor
|
||||
extractor_urls = { # dumb, but easily expandable, maps urls to extractors
|
||||
"mp4upload.com": "mp4upload",
|
||||
"yourupload.com": "yourupload"
|
||||
}
|
||||
for i in extractor_urls:
|
||||
if i in url:
|
||||
extractor = extractor_urls[i]
|
||||
|
||||
return [(extractor,url)]
|
||||
return [(extractor, url)]
|
||||
|
||||
|
||||
class KissCartoon(KissAnime, sitename='kisscartoon'):
|
||||
sitename='kisscartoon'
|
||||
sitename = 'kisscartoon'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
|
@ -69,7 +69,6 @@ class KissCartoon(KissAnime, sitename='kisscartoon'):
|
|||
|
||||
return ret
|
||||
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url, sel=True).text)
|
||||
ret = [str(a['href'])
|
||||
|
@ -82,7 +81,6 @@ class KissCartoon(KissAnime, sitename='kisscartoon'):
|
|||
|
||||
return list(reversed(ret))
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url, sel=True).text)
|
||||
self.title = soup.select("a.bigChar")[0].text
|
||||
|
|
|
@ -7,83 +7,83 @@ from anime_downloader.sites import helpers
|
|||
from anime_downloader.config import Config
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NineAnime(Anime, sitename='nineanime'):
|
||||
sitename = '9anime'
|
||||
extension = Config['siteconfig'][sitename]['domain_extension']
|
||||
url = f'https://{sitename}.{extension}/search'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
# Only uses the first page of search results, but it's sufficent.
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'keyword': query})).select('a.name')
|
||||
return [
|
||||
SearchResult(
|
||||
title = i.text,
|
||||
url = i.get('href'),
|
||||
meta_info = {
|
||||
'version_key_dubbed':'(Dub)',
|
||||
'version_key_subbed':''
|
||||
}
|
||||
)
|
||||
for i in search_results
|
||||
]
|
||||
sitename = '9anime'
|
||||
extension = Config['siteconfig'][sitename]['domain_extension']
|
||||
url = f'https://{sitename}.{extension}/search'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
# Only uses the first page of search results, but it's sufficent.
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'keyword': query})).select('a.name')
|
||||
return [
|
||||
SearchResult(
|
||||
title=i.text,
|
||||
url=i.get('href'),
|
||||
meta_info={
|
||||
'version_key_dubbed': '(Dub)',
|
||||
'version_key_subbed': ''
|
||||
}
|
||||
)
|
||||
for i in search_results
|
||||
]
|
||||
|
||||
def _scrape_episodes(self):
|
||||
self.extension = self.config['domain_extension']
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
# Assumptions can cause errors, but if this fails it's better to get issues on github.
|
||||
title_id = soup.select("div#player")[0]
|
||||
title_id = title_id.get('data-id')
|
||||
episode_html = helpers.get(f"https://9anime.{self.extension}/ajax/film/servers?id={title_id}").text
|
||||
# Only using streamtape, MyCloud can get added, but it uses m3u8.
|
||||
streamtape_regex = r'data-id=\\"40\\".*?(data-name|$)'
|
||||
streamtape_episodes = re.search(streamtape_regex, episode_html)
|
||||
if not streamtape_episodes:
|
||||
logger.error('Unable to find streamtape server')
|
||||
return ['']
|
||||
def _scrape_episodes(self):
|
||||
self.extension = self.config['domain_extension']
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
# Assumptions can cause errors, but if this fails it's better to get issues on github.
|
||||
title_id = soup.select("div#player")[0]
|
||||
title_id = title_id.get('data-id')
|
||||
episode_html = helpers.get(f"https://9anime.{self.extension}/ajax/film/servers?id={title_id}").text
|
||||
# Only using streamtape, MyCloud can get added, but it uses m3u8.
|
||||
streamtape_regex = r'data-id=\\"40\\".*?(data-name|$)'
|
||||
streamtape_episodes = re.search(streamtape_regex, episode_html)
|
||||
if not streamtape_episodes:
|
||||
logger.error('Unable to find streamtape server')
|
||||
return ['']
|
||||
|
||||
streamtape_episodes = streamtape_episodes.group()
|
||||
# You can use helpers.soupify on all this, but it's resource intensive, unreliable
|
||||
# and can give recursion errors on large series like naruto.
|
||||
streamtape_episodes = streamtape_episodes.group()
|
||||
# You can use helpers.soupify on all this, but it's resource intensive, unreliable
|
||||
# and can give recursion errors on large series like naruto.
|
||||
|
||||
# Group 3 is the actual id.
|
||||
# Matches stuff like: id=\"a9cfdbd2029a467d2b2c9f156cbedc02b25a23199812ad4dbe5a25afa9edb140\"
|
||||
episode_regex = r'id=(\\|)(\'|")(.{64}?)(\\|)(\'|")'
|
||||
episodes = re.findall(episode_regex, streamtape_episodes)
|
||||
if not episodes:
|
||||
logger.error('Unable to find any episodes')
|
||||
return ['']
|
||||
# Group 3 is the actual id.
|
||||
# Matches stuff like: id=\"a9cfdbd2029a467d2b2c9f156cbedc02b25a23199812ad4dbe5a25afa9edb140\"
|
||||
episode_regex = r'id=(\\|)(\'|")(.{64}?)(\\|)(\'|")'
|
||||
episodes = re.findall(episode_regex, streamtape_episodes)
|
||||
if not episodes:
|
||||
logger.error('Unable to find any episodes')
|
||||
return ['']
|
||||
|
||||
# Returns an ID instead of actual URL
|
||||
return [i[2] for i in episodes]
|
||||
# Returns an ID instead of actual URL
|
||||
return [i[2] for i in episodes]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url)).select('h1.title')[0].text
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url)).select('h1.title')[0].text
|
||||
|
||||
|
||||
class NineAnimeEpisode(AnimeEpisode, sitename='9anime'):
|
||||
def _get_sources(self):
|
||||
self.extension = self.config['domain_extension']
|
||||
if not self.url:
|
||||
return ''
|
||||
def _get_sources(self):
|
||||
self.extension = self.config['domain_extension']
|
||||
if not self.url:
|
||||
return ''
|
||||
|
||||
# Arbitrary timeout to prevent spamming the server which will result in an error.
|
||||
time.sleep(0.3)
|
||||
# Server 40 is streamtape, change this if you want to add other servers
|
||||
episode_ajax = f"https://9anime.{self.extension}/ajax/episode/info?id={self.url}&server=40"
|
||||
target = helpers.get(episode_ajax).json().get('target','')
|
||||
logger.debug('Videolink: {}'.format(target))
|
||||
if not target:
|
||||
return ''
|
||||
# Arbitrary timeout to prevent spamming the server which will result in an error.
|
||||
time.sleep(0.3)
|
||||
# Server 40 is streamtape, change this if you want to add other servers
|
||||
episode_ajax = f"https://9anime.{self.extension}/ajax/episode/info?id={self.url}&server=40"
|
||||
target = helpers.get(episode_ajax).json().get('target', '')
|
||||
logger.debug('Videolink: {}'.format(target))
|
||||
if not target:
|
||||
return ''
|
||||
|
||||
videolink = helpers.soupify(helpers.get(target)).select('div#videolink')
|
||||
logger.debug('Videolink: {}'.format(videolink))
|
||||
if not videolink:
|
||||
return ''
|
||||
videolink = helpers.soupify(helpers.get(target)).select('div#videolink')
|
||||
logger.debug('Videolink: {}'.format(videolink))
|
||||
if not videolink:
|
||||
return ''
|
||||
|
||||
videolink = videolink[0].text
|
||||
videolink = videolink[0].text
|
||||
|
||||
# Appends https
|
||||
videolink = 'https:' + videolink if videolink.startswith('//') else videolink
|
||||
return [('no_extractor', videolink,)]
|
||||
# Appends https
|
||||
videolink = 'https:' + videolink if videolink.startswith('//') else videolink
|
||||
return [('no_extractor', videolink,)]
|
||||
|
|
|
@ -3,7 +3,8 @@ import re
|
|||
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
|
||||
from anime_downloader.sites import helpers
|
||||
|
||||
class Nyaa(Anime, sitename = 'nyaa'):
|
||||
|
||||
class Nyaa(Anime, sitename='nyaa'):
|
||||
"""
|
||||
Site: https://nyaa.si
|
||||
|
||||
|
@ -25,23 +26,24 @@ class Nyaa(Anime, sitename = 'nyaa'):
|
|||
self = cls()
|
||||
|
||||
parameters = {"f": filters[self.config["filter"]], "c": categories[self.config["category"]], "q": query, "s": "size", "o": "desc"}
|
||||
search_results = helpers.soupify(helpers.get(f"https://nyaa.si/", params = parameters))
|
||||
search_results = helpers.soupify(helpers.get(f"https://nyaa.si/", params=parameters))
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = i.select("a:not(.comments)")[1].get("title"),
|
||||
url = i.find_all('a',{'href':re.compile(rex)})[0].get('href'),
|
||||
meta= {'peers':i.find_all('td',class_ = 'text-center')[3].text + ' peers','size':i.find_all('td',class_ = 'text-center')[1].text})
|
||||
title=i.select("a:not(.comments)")[1].get("title"),
|
||||
url=i.find_all('a', {'href': re.compile(rex)})[0].get('href'),
|
||||
meta={'peers': i.find_all('td', class_='text-center')[3].text + ' peers', 'size':i.find_all('td', class_='text-center')[1].text})
|
||||
|
||||
for i in search_results.select("tr.default, tr.success")
|
||||
]
|
||||
]
|
||||
|
||||
return search_results
|
||||
|
||||
|
||||
def _scrape_episodes(self):
|
||||
#the magnet has all episodes making this redundant
|
||||
# the magnet has all episodes making this redundant
|
||||
return [self.url]
|
||||
|
||||
|
||||
class NyaaEpisode(AnimeEpisode, sitename='nyaa'):
|
||||
def _get_sources(self):
|
||||
return [('no_extractor', self.url)]
|
||||
|
|
|
@ -7,6 +7,7 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RyuAnime(Anime, sitename='ryuanime'):
|
||||
"""
|
||||
Site: http://www4.ryuanime.com
|
||||
|
@ -18,20 +19,20 @@ class RyuAnime(Anime, sitename='ryuanime'):
|
|||
Selects the server to download from.
|
||||
"""
|
||||
|
||||
sitename='ryuanime'
|
||||
sitename = 'ryuanime'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.get("https://www4.ryuanime.com/search", params = {"term" : query}))
|
||||
soup = helpers.soupify(helpers.get("https://www4.ryuanime.com/search", params={"term": query}))
|
||||
result_data = soup.select("ul.list-inline")[0].select("a")
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title = result.text,
|
||||
url = result.get("href")
|
||||
)
|
||||
title=result.text,
|
||||
url=result.get("href")
|
||||
)
|
||||
for result in result_data
|
||||
]
|
||||
]
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
|
@ -49,6 +50,7 @@ class RyuAnime(Anime, sitename='ryuanime'):
|
|||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select("div.card-header")[0].find("h1").text
|
||||
|
||||
|
||||
class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
|
||||
def getLink(self, name, _id):
|
||||
if name == "trollvid":
|
||||
|
@ -61,7 +63,7 @@ class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
|
|||
def _get_sources(self):
|
||||
server = self.config.get("server", "trollvid")
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
|
||||
|
||||
hosts = json.loads(re.search("\[.*?\]", soup.select("div.col-sm-9")[0].select("script")[0].text).group())
|
||||
|
||||
_type = hosts[0]["type"]
|
||||
|
@ -69,7 +71,7 @@ class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
|
|||
host = list(filter(lambda video: video["host"] == server and video["type"] == _type, hosts))[0]
|
||||
except IndexError:
|
||||
host = hosts[0]
|
||||
#I will try to avoid mp4upload since it mostly doesn't work
|
||||
# I will try to avoid mp4upload since it mostly doesn't work
|
||||
if host["host"] == "mp4upload" and len(hosts) > 1:
|
||||
host = hosts[1]
|
||||
|
||||
|
|
|
@ -37,8 +37,8 @@ class TwistMoe(Anime, sitename='twist.moe'):
|
|||
@classmethod
|
||||
def search(self, query):
|
||||
headers = {
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.46 Safari/537.36',
|
||||
'x-access-token': '1rj2vRtegS8Y60B3w3qNZm5T2Q0TN2NR'
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.46 Safari/537.36',
|
||||
'x-access-token': '1rj2vRtegS8Y60B3w3qNZm5T2Q0TN2NR'
|
||||
}
|
||||
# soup = helpers.soupify(helpers.get('https://twist.moe/', allow_redirects=True, headers=headers))
|
||||
req = helpers.get('https://twist.moe/api/anime', headers=headers)
|
||||
|
@ -75,7 +75,7 @@ class TwistMoe(Anime, sitename='twist.moe'):
|
|||
decrypt(episode['source'].encode('utf-8'), KEY).decode('utf-8')
|
||||
for episode in episodes]
|
||||
|
||||
self._episode_urls = [(i+1, episode_url)
|
||||
self._episode_urls = [(i + 1, episode_url)
|
||||
for i, episode_url in enumerate(episode_urls)]
|
||||
self._len = len(self._episode_urls)
|
||||
|
||||
|
@ -85,7 +85,7 @@ class TwistMoe(Anime, sitename='twist.moe'):
|
|||
# From stackoverflow https://stackoverflow.com/questions/36762098/how-to-decrypt-password-from-javascript-cryptojs-aes-encryptpassword-passphras
|
||||
def pad(data):
|
||||
length = BLOCK_SIZE - (len(data) % BLOCK_SIZE)
|
||||
return data + (chr(length)*length).encode()
|
||||
return data + (chr(length) * length).encode()
|
||||
|
||||
|
||||
def unpad(data):
|
||||
|
@ -108,7 +108,7 @@ def decrypt(encrypted, passphrase):
|
|||
encrypted = base64.b64decode(encrypted)
|
||||
assert encrypted[0:8] == b"Salted__"
|
||||
salt = encrypted[8:16]
|
||||
key_iv = bytes_to_key(passphrase, salt, 32+16)
|
||||
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
|
||||
key = key_iv[:32]
|
||||
iv = key_iv[32:]
|
||||
aes = AES.new(key, AES.MODE_CBC, iv)
|
||||
|
|
|
@ -5,65 +5,65 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VidStream(Anime, sitename='vidstream'):
|
||||
sitename = 'vidstream'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
"""
|
||||
#Use below code for live ajax search.
|
||||
#Will show max 10 search results
|
||||
sitename = 'vidstream'
|
||||
|
||||
search_results = helpers.get('https://vidstreaming.io/ajax-search.html',
|
||||
params = {'keyword': query},
|
||||
headers = {
|
||||
'X-Requested-With':'XMLHttpRequest',
|
||||
}
|
||||
).json()
|
||||
search_results = helpers.soupify(search_results['content']).select('li > a')
|
||||
return [
|
||||
SearchResult(
|
||||
title=i.text,
|
||||
url=f"https://vidstreaming.io{i.get('href')}")
|
||||
for i in search_results
|
||||
]
|
||||
"""
|
||||
# Only using page 1, resulting in max 30 results
|
||||
# Very few shows will get impacted by this
|
||||
search_results = helpers.soupify(helpers.get('https://vidstreaming.io/search.html',
|
||||
params = {'keyword':query})
|
||||
).select('ul.listing > li.video-block > a')
|
||||
# Regex to cut out the "Episode xxx"
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
"""
|
||||
#Use below code for live ajax search.
|
||||
#Will show max 10 search results
|
||||
|
||||
return [
|
||||
SearchResult(
|
||||
title=re.sub(r"(E|e)pisode\s*[0-9]*", '', i.select('div.name')[0].text.strip()),
|
||||
url=f"https://vidstreaming.io{i.get('href')}",
|
||||
meta_info = {
|
||||
'version_key_dubbed':'(Dub)'
|
||||
})
|
||||
for i in search_results
|
||||
]
|
||||
search_results = helpers.get('https://vidstreaming.io/ajax-search.html',
|
||||
params = {'keyword': query},
|
||||
headers = {
|
||||
'X-Requested-With':'XMLHttpRequest',
|
||||
}
|
||||
).json()
|
||||
search_results = helpers.soupify(search_results['content']).select('li > a')
|
||||
return [
|
||||
SearchResult(
|
||||
title=i.text,
|
||||
url=f"https://vidstreaming.io{i.get('href')}")
|
||||
for i in search_results
|
||||
]
|
||||
"""
|
||||
# Only using page 1, resulting in max 30 results
|
||||
# Very few shows will get impacted by this
|
||||
search_results = helpers.soupify(helpers.get('https://vidstreaming.io/search.html',
|
||||
params={'keyword': query})
|
||||
).select('ul.listing > li.video-block > a')
|
||||
# Regex to cut out the "Episode xxx"
|
||||
|
||||
return [
|
||||
SearchResult(
|
||||
title=re.sub(r"(E|e)pisode\s*[0-9]*", '', i.select('div.name')[0].text.strip()),
|
||||
url=f"https://vidstreaming.io{i.get('href')}",
|
||||
meta_info={
|
||||
'version_key_dubbed': '(Dub)'
|
||||
})
|
||||
for i in search_results
|
||||
]
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
elements = soup.select('div.video-info-left > ul.listing > li.video-block > a')
|
||||
return [f"https://vidstreaming.io{i.get('href')}" for i in elements[::-1]]
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
elements = soup.select('div.video-info-left > ul.listing > li.video-block > a')
|
||||
return [f"https://vidstreaming.io{i.get('href')}" for i in elements[::-1]]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('span.date')[0].text.strip()
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('span.date')[0].text.strip()
|
||||
|
||||
|
||||
class VidStreamEpisode(AnimeEpisode, sitename='vidstream'):
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
iframes = soup.select('iframe')
|
||||
logger.debug('Iframes: {}'.format(iframes))
|
||||
for i in iframes:
|
||||
# Simple check in case there's advertising iframes.
|
||||
if 'streaming.php' in i.get('src'):
|
||||
return [('vidstream', i.get('src'),)]
|
||||
|
||||
return ''
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
iframes = soup.select('iframe')
|
||||
logger.debug('Iframes: {}'.format(iframes))
|
||||
for i in iframes:
|
||||
# Simple check in case there's advertising iframes.
|
||||
if 'streaming.php' in i.get('src'):
|
||||
return [('vidstream', i.get('src'),)]
|
||||
|
||||
return ''
|
||||
|
|
|
@ -6,60 +6,60 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VoirAnime(Anime, sitename='voiranime'):
|
||||
sitename = 'voiranime'
|
||||
url = f'https://{sitename}.com/'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'s': query})).select('div.item-head > h3 > a')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.text,
|
||||
url=i.get('href'))
|
||||
for i in search_results
|
||||
]
|
||||
return search_results
|
||||
sitename = 'voiranime'
|
||||
url = f'https://{sitename}.com/'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'s': query})).select('div.item-head > h3 > a')
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.text,
|
||||
url=i.get('href'))
|
||||
for i in search_results
|
||||
]
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
next_page = soup.select('a.ct-btn')[0].get('href')
|
||||
soup = helpers.soupify(helpers.get(next_page))
|
||||
episodes = soup.select('ul.video-series-list > li > a.btn-default')
|
||||
return [i.get('href') for i in episodes]
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
next_page = soup.select('a.ct-btn')[0].get('href')
|
||||
soup = helpers.soupify(helpers.get(next_page))
|
||||
episodes = soup.select('ul.video-series-list > li > a.btn-default')
|
||||
return [i.get('href') for i in episodes]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('div.container > h1')[0].text
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('div.container > h1')[0].text
|
||||
|
||||
|
||||
class VoirAnimeEpisode(AnimeEpisode, sitename='voiranime'):
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
"""These could probably be condensed down to one, but would look too spooky"""
|
||||
multilinks_regex = r'var\s*multilinks\s*=\s*\[\[{(.*?)}]];'
|
||||
mutilinks_iframe_regex = r"iframe\s*src=\\(\"|')([^(\"|')]*)"
|
||||
multilinks = re.search(multilinks_regex, str(soup)).group(1)
|
||||
logger.debug('Multilinks: {}'.format(multilinks))
|
||||
iframes = re.findall(mutilinks_iframe_regex,multilinks)
|
||||
logger.debug('Iframes: {}'.format(iframes))
|
||||
sources = [i[-1].replace('\\','') for i in iframes]
|
||||
|
||||
extractors = {
|
||||
#url #Extractor #Server in config
|
||||
'https://gounlimited.to/embed':['mp4upload','gounlimited'],
|
||||
}
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
"""These could probably be condensed down to one, but would look too spooky"""
|
||||
multilinks_regex = r'var\s*multilinks\s*=\s*\[\[{(.*?)}]];'
|
||||
mutilinks_iframe_regex = r"iframe\s*src=\\(\"|')([^(\"|')]*)"
|
||||
multilinks = re.search(multilinks_regex, str(soup)).group(1)
|
||||
logger.debug('Multilinks: {}'.format(multilinks))
|
||||
iframes = re.findall(mutilinks_iframe_regex, multilinks)
|
||||
logger.debug('Iframes: {}'.format(iframes))
|
||||
sources = [i[-1].replace('\\', '') for i in iframes]
|
||||
|
||||
sources_list = []
|
||||
for i in sources:
|
||||
for j in extractors:
|
||||
if j in i:
|
||||
sources_list.append({
|
||||
'extractor':extractors[j][0],
|
||||
'url':i,
|
||||
'server':extractors[j][1],
|
||||
'version':'subbed'
|
||||
})
|
||||
extractors = {
|
||||
# url #Extractor #Server in config
|
||||
'https://gounlimited.to/embed': ['mp4upload', 'gounlimited'],
|
||||
}
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
sources_list = []
|
||||
for i in sources:
|
||||
for j in extractors:
|
||||
if j in i:
|
||||
sources_list.append({
|
||||
'extractor': extractors[j][0],
|
||||
'url': i,
|
||||
'server': extractors[j][1],
|
||||
'version': 'subbed'
|
||||
})
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
|
|
|
@ -6,6 +6,7 @@ import logging
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VostFree(Anime, sitename='vostfree'):
|
||||
"""
|
||||
Site: https://vostfree.com
|
||||
|
@ -18,11 +19,11 @@ class VostFree(Anime, sitename='vostfree'):
|
|||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
soup = helpers.soupify(helpers.post('https://vostfree.com', data = {'do': 'search', 'subaction': 'search', 'story': query}))
|
||||
soup = helpers.soupify(helpers.post('https://vostfree.com', data={'do': 'search', 'subaction': 'search', 'story': query}))
|
||||
return [
|
||||
SearchResult(
|
||||
title = re.sub('\s+?FRENCH(\s+)?$', '', x.text.strip()),
|
||||
url = x['href']
|
||||
title=re.sub('\s+?FRENCH(\s+)?$', '', x.text.strip()),
|
||||
url=x['href']
|
||||
)
|
||||
for x in soup.select('div.title > a')
|
||||
]
|
||||
|
@ -52,13 +53,13 @@ class VostFree(Anime, sitename='vostfree'):
|
|||
if current:
|
||||
links.append(self.getLink(soup.find('div', {'id': f'content_{current[0]["id"]}'}).text, alternate_server))
|
||||
continue
|
||||
|
||||
|
||||
return links
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = re.sub('\s+?FRENCH(\s+)?$', '', soup.select('meta[property=og\:title]')[0]['content'])
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = re.sub('\s+?FRENCH(\s+)?$', '', soup.select('meta[property=og\:title]')[0]['content'])
|
||||
|
||||
|
||||
class VostFreeEpisode(AnimeEpisode, sitename='vostfree'):
|
||||
def _get_sources(self):
|
||||
|
|
|
@ -6,69 +6,69 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WatchMovie(Anime, sitename='watchmovie'):
|
||||
"""
|
||||
Nice things
|
||||
Siteconfig
|
||||
----------
|
||||
servers: servers used in order
|
||||
"""
|
||||
sitename = 'watchmovie'
|
||||
url = f'https://{sitename}.movie'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url+'/search.html',params={'keyword': query})).select('a.videoHname')
|
||||
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.get('title'),
|
||||
url=cls.url+i.get('href'),
|
||||
meta_info = {
|
||||
'version_key_dubbed':'(Dub)',
|
||||
}
|
||||
)
|
||||
for i in search_results
|
||||
]
|
||||
return search_results
|
||||
"""
|
||||
Nice things
|
||||
Siteconfig
|
||||
----------
|
||||
servers: servers used in order
|
||||
"""
|
||||
sitename = 'watchmovie'
|
||||
url = f'https://{sitename}.movie'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url + '/search.html', params={'keyword': query})).select('a.videoHname')
|
||||
|
||||
def _scrape_episodes(self):
|
||||
if 'anime-info' in self.url:
|
||||
url = self.url.replace('anime-info','anime') + '/all'
|
||||
else:
|
||||
url = self.url+'/season'
|
||||
soup = helpers.soupify(helpers.get(url)).select('a.videoHname')
|
||||
return ['https://watchmovie.movie'+a.get('href') for a in soup[::-1]]
|
||||
search_results = [
|
||||
SearchResult(
|
||||
title=i.get('title'),
|
||||
url=cls.url + i.get('href'),
|
||||
meta_info={
|
||||
'version_key_dubbed': '(Dub)',
|
||||
}
|
||||
)
|
||||
for i in search_results
|
||||
]
|
||||
return search_results
|
||||
|
||||
def _scrape_episodes(self):
|
||||
if 'anime-info' in self.url:
|
||||
url = self.url.replace('anime-info', 'anime') + '/all'
|
||||
else:
|
||||
url = self.url + '/season'
|
||||
soup = helpers.soupify(helpers.get(url)).select('a.videoHname')
|
||||
return ['https://watchmovie.movie' + a.get('href') for a in soup[::-1]]
|
||||
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url)).select('div.page-title > h1')[0].text
|
||||
def _scrape_metadata(self):
|
||||
self.title = helpers.soupify(helpers.get(self.url)).select('div.page-title > h1')[0].text
|
||||
|
||||
|
||||
class WatchMovieEpisode(AnimeEpisode, sitename='watchmovie'):
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
sources = soup.select('div.anime_muti_link > ul > li > a')
|
||||
def _get_sources(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
sources = soup.select('div.anime_muti_link > ul > li > a')
|
||||
|
||||
#logger.debug('Sources: {}'.format([i.get('data-video') for i in sources]))
|
||||
#logger.debug('Sources: {}'.format([i.get('data-video') for i in sources]))
|
||||
|
||||
extractors = {
|
||||
#url #Extractor #Server in config
|
||||
'vidcloud9.com/':['vidstream','vidstream'],
|
||||
'hydrax.net/':['hydrax','hydrax'],
|
||||
'gcloud.live/v/':['gcloud','gcloud'],
|
||||
'yourupload.com/':['yourupload','yourupload'],
|
||||
}
|
||||
extractors = {
|
||||
# url #Extractor #Server in config
|
||||
'vidcloud9.com/': ['vidstream', 'vidstream'],
|
||||
'hydrax.net/': ['hydrax', 'hydrax'],
|
||||
'gcloud.live/v/': ['gcloud', 'gcloud'],
|
||||
'yourupload.com/': ['yourupload', 'yourupload'],
|
||||
}
|
||||
|
||||
sources_list = []
|
||||
for i in sources:
|
||||
for j in extractors:
|
||||
if j in i.get('data-video'):
|
||||
sources_list.append({
|
||||
'extractor':extractors[j][0],
|
||||
'url':i.get('data-video'),
|
||||
'server':extractors[j][1],
|
||||
'version':'subbed'
|
||||
})
|
||||
sources_list = []
|
||||
for i in sources:
|
||||
for j in extractors:
|
||||
if j in i.get('data-video'):
|
||||
sources_list.append({
|
||||
'extractor': extractors[j][0],
|
||||
'url': i.get('data-video'),
|
||||
'server': extractors[j][1],
|
||||
'version': 'subbed'
|
||||
})
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
return self.sort_sources(sources_list)
|
||||
|
|
|
@ -5,71 +5,71 @@ from anime_downloader.sites import helpers
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Yify(Anime, sitename='yify'):
|
||||
sitename = 'yify'
|
||||
url = f'https://{sitename}.mx/search'
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'keyword': query})).select('div.ml-item > a')
|
||||
return [
|
||||
SearchResult(
|
||||
title=i.get('title'),
|
||||
url=i.get('href')+'/watching.html')
|
||||
for i in search_results
|
||||
]
|
||||
sitename = 'yify'
|
||||
url = f'https://{sitename}.mx/search'
|
||||
|
||||
@classmethod
|
||||
def search(cls, query):
|
||||
search_results = helpers.soupify(helpers.get(cls.url, params={'keyword': query})).select('div.ml-item > a')
|
||||
return [
|
||||
SearchResult(
|
||||
title=i.get('title'),
|
||||
url=i.get('href') + '/watching.html')
|
||||
for i in search_results
|
||||
]
|
||||
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
regex = r'id:.*?\"([0-9]*?)\"'
|
||||
movie_id = re.search(regex,str(soup)).group(1)
|
||||
load_episodes = f'https://yify.mx/ajax/v2_get_episodes/{movie_id}'
|
||||
elements = helpers.soupify(helpers.get(load_episodes)).select('div.les-content > a')
|
||||
# Doesn't really return urls, rather ID:s. This is to prevent loading all episodes with separate
|
||||
# requests if the user only wants one episode
|
||||
return [i.get('episode-id','') for i in elements]
|
||||
def _scrape_episodes(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
regex = r'id:.*?\"([0-9]*?)\"'
|
||||
movie_id = re.search(regex, str(soup)).group(1)
|
||||
load_episodes = f'https://yify.mx/ajax/v2_get_episodes/{movie_id}'
|
||||
elements = helpers.soupify(helpers.get(load_episodes)).select('div.les-content > a')
|
||||
# Doesn't really return urls, rather ID:s. This is to prevent loading all episodes with separate
|
||||
# requests if the user only wants one episode
|
||||
return [i.get('episode-id', '') for i in elements]
|
||||
|
||||
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('title')[0].text.replace('Full Movie Free Yify','')
|
||||
def _scrape_metadata(self):
|
||||
soup = helpers.soupify(helpers.get(self.url))
|
||||
self.title = soup.select('title')[0].text.replace('Full Movie Free Yify', '')
|
||||
|
||||
|
||||
class YifyEpisode(AnimeEpisode, sitename='yify'):
|
||||
def _get_sources(self):
|
||||
if not self.url:
|
||||
return ''
|
||||
def _get_sources(self):
|
||||
if not self.url:
|
||||
return ''
|
||||
|
||||
load_embed = 'https://yify.mx/ajax/load_embed/{}'
|
||||
embed = helpers.get(load_embed.format(self.url)).json()
|
||||
logger.debug('Embed: {}'.format(embed))
|
||||
embed_url = embed['embed_url']
|
||||
load_embed = 'https://yify.mx/ajax/load_embed/{}'
|
||||
embed = helpers.get(load_embed.format(self.url)).json()
|
||||
logger.debug('Embed: {}'.format(embed))
|
||||
embed_url = embed['embed_url']
|
||||
|
||||
episode_id = embed_url.split('#')[-1]
|
||||
load_embed = f'https://yify.mx/ajax/load_embed_url/{episode_id}'
|
||||
episode_info = helpers.get(load_embed).json()
|
||||
logger.debug(episode_info)
|
||||
episode_id = embed_url.split('#')[-1]
|
||||
load_embed = f'https://yify.mx/ajax/load_embed_url/{episode_id}'
|
||||
episode_info = helpers.get(load_embed).json()
|
||||
logger.debug(episode_info)
|
||||
|
||||
url = episode_info['url']
|
||||
api_id = re.search(r'id=([^&]*)',url).group(1)
|
||||
api = f'https://watch.yify.mx/api/?id={api_id}'
|
||||
sources = helpers.get(api).json()
|
||||
logger.debug(sources)
|
||||
url = episode_info['url']
|
||||
api_id = re.search(r'id=([^&]*)', url).group(1)
|
||||
api = f'https://watch.yify.mx/api/?id={api_id}'
|
||||
sources = helpers.get(api).json()
|
||||
logger.debug(sources)
|
||||
|
||||
sources_list = []
|
||||
extractors = {
|
||||
'yify.mx/embed/':['yify','yify'],
|
||||
'vidcloud9.com/':['vidstream','vidstream']
|
||||
}
|
||||
sources_list = []
|
||||
extractors = {
|
||||
'yify.mx/embed/': ['yify', 'yify'],
|
||||
'vidcloud9.com/': ['vidstream', 'vidstream']
|
||||
}
|
||||
|
||||
for i in sources:
|
||||
for j in extractors:
|
||||
if j in i['link']:
|
||||
sources_list.append({
|
||||
'extractor':extractors[j][0],
|
||||
'url':i['link'],
|
||||
'server':extractors[j][1],
|
||||
'version':'subbed'
|
||||
})
|
||||
for i in sources:
|
||||
for j in extractors:
|
||||
if j in i['link']:
|
||||
sources_list.append({
|
||||
'extractor': extractors[j][0],
|
||||
'url': i['link'],
|
||||
'server': extractors[j][1],
|
||||
'version': 'subbed'
|
||||
})
|
||||
|
||||
return self.sort_sources(sources_list)
|
||||
return self.sort_sources(sources_list)
|
||||
|
|
|
@ -72,7 +72,7 @@ def format_search_results(search_results):
|
|||
'Title',
|
||||
'Meta',
|
||||
]
|
||||
table = [(i+1, v.title, v.pretty_metadata)
|
||||
table = [(i + 1, v.title, v.pretty_metadata)
|
||||
for i, v in enumerate(search_results)]
|
||||
table = tabulate(table, headers, tablefmt='psql')
|
||||
table = '\n'.join(table.split('\n')[::-1])
|
||||
|
@ -105,15 +105,15 @@ def search(query, provider, val=None, season_info=None, ratio=50):
|
|||
# Makes it harder to unintentionally exit the anime command if it's automated
|
||||
while True:
|
||||
if val == None:
|
||||
val = click.prompt('Enter the anime no{}:'. format(' (0 to switch provider)'*(season_info != None)),
|
||||
type=int, default=1, err=True)
|
||||
val = click.prompt('Enter the anime no{}:'. format(' (0 to switch provider)' * (season_info != None)),
|
||||
type=int, default=1, err=True)
|
||||
try:
|
||||
url = search_results[val-1].url
|
||||
title = search_results[val-1].title
|
||||
url = search_results[val - 1].url
|
||||
title = search_results[val - 1].title
|
||||
except IndexError:
|
||||
logger.error('Only maximum of {} search results are allowed.'
|
||||
' Please input a number less than {}'.format(
|
||||
len(search_results), len(search_results)+1))
|
||||
len(search_results), len(search_results) + 1))
|
||||
val = False
|
||||
continue
|
||||
break
|
||||
|
@ -128,7 +128,7 @@ def primitive_search(search_results):
|
|||
'SlNo',
|
||||
'Title',
|
||||
]
|
||||
table = [(i+1, v.title)
|
||||
table = [(i + 1, v.title)
|
||||
for i, v in enumerate(search_results)]
|
||||
table = tabulate(table, headers, tablefmt='psql')
|
||||
table = '\n'.join(table.split('\n')[::-1])
|
||||
|
@ -137,11 +137,11 @@ def primitive_search(search_results):
|
|||
while True:
|
||||
val = click.prompt('Enter the anime no: ', type=int, default=1, err=True)
|
||||
try:
|
||||
return search_results[val-1]
|
||||
return search_results[val - 1]
|
||||
except IndexError:
|
||||
logger.error('Only maximum of {} search results are allowed.'
|
||||
' Please input a number less than {}'.format(
|
||||
len(search_results), len(search_results)+1))
|
||||
len(search_results), len(search_results) + 1))
|
||||
|
||||
|
||||
def download_metadata(file_format, metdata, episode, filename='metdata.json'):
|
||||
|
@ -161,14 +161,15 @@ def download_metadata(file_format, metdata, episode, filename='metdata.json'):
|
|||
logger.debug('Downloaded metadata to "{}".'.format(location_metadata))
|
||||
return location_metadata
|
||||
|
||||
|
||||
def split_anime(anime, episode_range):
|
||||
try:
|
||||
start, end = [int(x) for x in episode_range.split(':')]
|
||||
anime = anime[start-1:end-1]
|
||||
anime = anime[start - 1:end - 1]
|
||||
except ValueError:
|
||||
# Only one episode specified
|
||||
episode = int(episode_range)
|
||||
anime = anime[episode-1:episode]
|
||||
anime = anime[episode - 1:episode]
|
||||
|
||||
return anime
|
||||
|
||||
|
@ -220,10 +221,10 @@ def play_episode(episode, *, player, title):
|
|||
'--title={}'.format(title),
|
||||
'--referrer="{}"'.format(episode.source().referer),
|
||||
episode.source().stream_url
|
||||
])
|
||||
])
|
||||
else:
|
||||
p = subprocess.Popen([ player, episode.source().stream_url
|
||||
])
|
||||
p = subprocess.Popen([player, episode.source().stream_url
|
||||
])
|
||||
p.wait()
|
||||
|
||||
|
||||
|
@ -264,13 +265,13 @@ def format_filename(filename, episode):
|
|||
def format_command(cmd, episode, file_format, speed_limit, path):
|
||||
from anime_downloader.config import Config
|
||||
if not Config._CONFIG['dl']['aria2c_for_torrents'] and episode.url.startswith('magnet:?xt=urn:btih:'):
|
||||
return ['open',episode.url]
|
||||
return ['open', episode.url]
|
||||
|
||||
cmd_dict = {
|
||||
'{aria2}': 'aria2c {stream_url} -x 12 -s 12 -j 12 -k 10M -o '
|
||||
'{file_format}.mp4 --continue=true --dir={download_dir}'
|
||||
' --stream-piece-selector=inorder --min-split-size=5M --referer={referer} --check-certificate=false --user-agent={useragent} --max-overall-download-limit={speed_limit}',
|
||||
'{idm}' : 'idman.exe /n /d {stream_url} /p {download_dir} /f {file_format}.mp4'
|
||||
'{idm}': 'idman.exe /n /d {stream_url} /p {download_dir} /f {file_format}.mp4'
|
||||
}
|
||||
|
||||
# Allows for passing the user agent with self.headers in the site.
|
||||
|
@ -290,7 +291,7 @@ def format_command(cmd, episode, file_format, speed_limit, path):
|
|||
}
|
||||
|
||||
if cmd == "{idm}":
|
||||
rep_dict['file_format'] = rep_dict['file_format'].replace('/','\\')
|
||||
rep_dict['file_format'] = rep_dict['file_format'].replace('/', '\\')
|
||||
|
||||
if cmd in cmd_dict:
|
||||
cmd = cmd_dict[cmd]
|
||||
|
@ -301,7 +302,7 @@ def format_command(cmd, episode, file_format, speed_limit, path):
|
|||
return cmd
|
||||
|
||||
|
||||
#Credits to: https://github.com/Futei/SineCaptcha
|
||||
# Credits to: https://github.com/Futei/SineCaptcha
|
||||
def bypass_hcaptcha(url):
|
||||
"""
|
||||
:param url: url to page which gives hcaptcha
|
||||
|
@ -317,18 +318,18 @@ def bypass_hcaptcha(url):
|
|||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko)',
|
||||
'Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
|
||||
))
|
||||
}
|
||||
))
|
||||
}
|
||||
|
||||
logger.info("Bypassing captcha...")
|
||||
|
||||
#Retry until success
|
||||
# Retry until success
|
||||
while not bypassed:
|
||||
site_key = str(uuid4())
|
||||
response = session.post('https://hcaptcha.com/getcaptcha', headers = headers, data = {
|
||||
response = session.post('https://hcaptcha.com/getcaptcha', headers=headers, data={
|
||||
'sitekey': site_key,
|
||||
'host': host
|
||||
}).json()
|
||||
}).json()
|
||||
|
||||
try:
|
||||
key = response['key']
|
||||
|
@ -353,10 +354,10 @@ def bypass_hcaptcha(url):
|
|||
'st': timestamp,
|
||||
'dct': timestamp,
|
||||
'mm': mouse_movements
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = session.post(f'https://hcaptcha.com/checkcaptcha/{key}', json = json)
|
||||
response = session.post(f'https://hcaptcha.com/checkcaptcha/{key}', json=json)
|
||||
|
||||
response = response.json()
|
||||
bypassed = response['pass']
|
||||
|
@ -372,12 +373,12 @@ def bypass_hcaptcha(url):
|
|||
data = dict((x.get('name'), x.get('value')) for x in resp.select('form > input'))
|
||||
data.update({'id': resp.strong.text, 'g-recaptcha-response': token, 'h-captcha-response': token})
|
||||
|
||||
resp = session.post(bypass_url, data = data)
|
||||
resp = session.post(bypass_url, data=data)
|
||||
|
||||
if resp.status_code == 200:
|
||||
pickle.dump(resp.cookies, open(f'{tempfile.gettempdir()}/{host}', 'wb'))
|
||||
logger.info("Succesfully bypassed captcha!")
|
||||
|
||||
|
||||
return resp
|
||||
else:
|
||||
bypassed = False
|
||||
|
@ -394,6 +395,7 @@ def get_hcaptcha_cookies(url):
|
|||
if os.path.isfile(COOKIE_FILE):
|
||||
return pickle.load(open(COOKIE_FILE, 'rb'))
|
||||
|
||||
|
||||
def deobfuscate_packed_js(packedjs):
|
||||
return eval_in_node('eval=console.log; ' + packedjs)
|
||||
|
||||
|
@ -403,6 +405,7 @@ def eval_in_node(js: str):
|
|||
output = subprocess.check_output(['node', '-e', js])
|
||||
return output.decode('utf-8')
|
||||
|
||||
|
||||
def open_magnet(magnet):
|
||||
if sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
|
||||
os.startfile(magnet)
|
||||
|
@ -423,7 +426,7 @@ def external_download(cmd, episode, file_format, speed_limit, path=''):
|
|||
|
||||
logger.debug('formatted cmd: ' + ' '.join(cmd))
|
||||
|
||||
if cmd[0] == 'open': #for torrents
|
||||
if cmd[0] == 'open': # for torrents
|
||||
open_magnet(cmd[1])
|
||||
else:
|
||||
p = subprocess.Popen(cmd)
|
||||
|
@ -444,9 +447,9 @@ def make_dir(path):
|
|||
|
||||
|
||||
def get_filler_episodes(query):
|
||||
def search_filler_episodes(query,page):
|
||||
def search_filler_episodes(query, page):
|
||||
url = 'https://animefillerlist.com/search/node/'
|
||||
search_results = helpers.soupify(helpers.get(url+query, params={'page': page})).select('h3.title > a')
|
||||
search_results = helpers.soupify(helpers.get(url + query, params={'page': page})).select('h3.title > a')
|
||||
urls = [a.get('href') for a in search_results if a.get('href').split('/')[-2] == 'shows']
|
||||
search_results = [
|
||||
[
|
||||
|
@ -455,13 +458,12 @@ def get_filler_episodes(query):
|
|||
]
|
||||
return search_results, urls
|
||||
|
||||
|
||||
results_list, urls_list = [],[]
|
||||
results_list, urls_list = [], []
|
||||
prev = ['']
|
||||
|
||||
for a in range(5): #Max 5 pages, could be done using the pager element
|
||||
search_results, urls = search_filler_episodes(query,a)
|
||||
if urls == prev and not (len(urls) == 0 or a == 0): #stops the loop if the same site is visited twice
|
||||
for a in range(5): # Max 5 pages, could be done using the pager element
|
||||
search_results, urls = search_filler_episodes(query, a)
|
||||
if urls == prev and not (len(urls) == 0 or a == 0): # stops the loop if the same site is visited twice
|
||||
break
|
||||
prev = urls[:]
|
||||
|
||||
|
@ -469,19 +471,19 @@ def get_filler_episodes(query):
|
|||
results_list.append(b)
|
||||
for c in urls:
|
||||
urls_list.append(c)
|
||||
|
||||
[results_list[a].insert(0,a+1)for a in range(len(results_list))] #inserts numbers
|
||||
|
||||
|
||||
[results_list[a].insert(0, a + 1)for a in range(len(results_list))] # inserts numbers
|
||||
|
||||
headers = ["SlNo", "Title"]
|
||||
table = tabulate(results_list, headers, tablefmt='psql')
|
||||
table = '\n'.join(table.split('\n')[::-1])
|
||||
|
||||
|
||||
click.echo(table)
|
||||
val = click.prompt('Enter the filler-anime no (0 to cancel): ', type=int, default=1, err=True)
|
||||
if val == 0:
|
||||
return False
|
||||
|
||||
url = urls_list[val-1]
|
||||
url = urls_list[val - 1]
|
||||
|
||||
try:
|
||||
logger.info("Fetching filler episodes...")
|
||||
|
@ -495,14 +497,14 @@ def get_filler_episodes(query):
|
|||
txt = filler_episode.text.strip()
|
||||
if '-' in txt:
|
||||
split = txt.split('-')
|
||||
for a in range(int(split[0]),int(split[1])+1):
|
||||
for a in range(int(split[0]), int(split[1]) + 1):
|
||||
episodes.append(a)
|
||||
else:
|
||||
episodes.append(int(txt))
|
||||
|
||||
|
||||
logger.debug("Found {} filler episodes.".format(len(episodes)))
|
||||
return episodes
|
||||
|
||||
|
||||
except:
|
||||
logger.warn("Can't get filler episodes. Will download all specified episodes.")
|
||||
return False
|
||||
|
|
|
@ -32,26 +32,26 @@ class Watcher:
|
|||
logger.info('Added {:.50} to watch list.'.format(anime.title))
|
||||
return anime
|
||||
|
||||
def list(self, filt = None):
|
||||
def list(self, filt=None):
|
||||
animes = self._read_from_watch_file()
|
||||
if filt in [None, 'all']:
|
||||
animes = self._sorting_for_list(animes)
|
||||
self.sorted = True
|
||||
click.echo('{:>5} | {:^35} | {:^8} | {} | {:^10}'.format(
|
||||
'SlNo', 'Name', 'Eps','Score', 'Status'
|
||||
'SlNo', 'Name', 'Eps', 'Score', 'Status'
|
||||
))
|
||||
click.echo('-'*65)
|
||||
click.echo('-' * 65)
|
||||
fmt_str = '{:5} | {:35.35} | {:3}/{:<3} | {:^5} | {}'
|
||||
if not filt in [ None, 'all' ]:
|
||||
animes = [ i for i in animes if i.watch_status == filt ]
|
||||
if not filt in [None, 'all']:
|
||||
animes = [i for i in animes if i.watch_status == filt]
|
||||
|
||||
for idx, anime in enumerate(animes):
|
||||
meta = anime.meta
|
||||
click.echo(click.style(fmt_str.format(idx+1,
|
||||
anime.title,
|
||||
*anime.progress(),
|
||||
anime.score,
|
||||
anime.watch_status),fg=anime.colours))
|
||||
click.echo(click.style(fmt_str.format(idx + 1,
|
||||
anime.title,
|
||||
*anime.progress(),
|
||||
anime.score,
|
||||
anime.watch_status), fg=anime.colours))
|
||||
|
||||
def anime_list(self):
|
||||
return self._read_from_watch_file()
|
||||
|
@ -70,20 +70,20 @@ class Watcher:
|
|||
logger.debug('Anime: {!r}, episodes_done: {}'.format(
|
||||
anime, anime.episodes_done))
|
||||
|
||||
if (time() - anime._timestamp) > 4*24*60*60:
|
||||
if (time() - anime._timestamp) > 4 * 24 * 60 * 60:
|
||||
anime = self.update_anime(anime)
|
||||
return anime
|
||||
|
||||
def update_anime(self, anime):
|
||||
if not hasattr(anime,'colours'):
|
||||
if not hasattr(anime, 'colours'):
|
||||
colours = {
|
||||
'watching':'blue',
|
||||
'completed':'green',
|
||||
'dropped':'red',
|
||||
'planned':'yellow',
|
||||
'hold' : 'white'
|
||||
'watching': 'blue',
|
||||
'completed': 'green',
|
||||
'dropped': 'red',
|
||||
'planned': 'yellow',
|
||||
'hold': 'white'
|
||||
}
|
||||
anime.colours = colours.get(anime.watch_status,'yellow')
|
||||
anime.colours = colours.get(anime.watch_status, 'yellow')
|
||||
|
||||
if not hasattr(anime, 'meta') or not anime.meta.get('Status') or \
|
||||
anime.meta['Status'].lower() == 'airing':
|
||||
|
@ -123,23 +123,23 @@ class Watcher:
|
|||
|
||||
self._write_to_watch_file(data)
|
||||
|
||||
def _write_to_watch_file(self, animes, MAL_import = False):
|
||||
def _write_to_watch_file(self, animes, MAL_import=False):
|
||||
if not MAL_import:
|
||||
animes = [anime.__dict__ for anime in animes]
|
||||
|
||||
with open(self.WATCH_FILE, 'w') as watch_file:
|
||||
json.dump(animes, watch_file)
|
||||
|
||||
def _import_from_MAL(self,PATH):
|
||||
import xml.etree.ElementTree as ET #Standard Library import, conditional as it only needs to be imported for this line
|
||||
def _import_from_MAL(self, PATH):
|
||||
import xml.etree.ElementTree as ET # Standard Library import, conditional as it only needs to be imported for this line
|
||||
root = ET.parse(PATH).getroot()
|
||||
list_to_dict = []
|
||||
values = { 'Plan to Watch' : { 'planned' : 'yellow' },
|
||||
'Completed' : { 'completed' : 'green' },
|
||||
'Watching' : { 'watching' : 'cyan' },
|
||||
'Dropped' : { 'dropped' : 'red' },
|
||||
'On-Hold' : { 'hold' : 'white' }
|
||||
}
|
||||
values = {'Plan to Watch': {'planned': 'yellow'},
|
||||
'Completed': {'completed': 'green'},
|
||||
'Watching': {'watching': 'cyan'},
|
||||
'Dropped': {'dropped': 'red'},
|
||||
'On-Hold': {'hold': 'white'}
|
||||
}
|
||||
for type_tag in root.findall('anime'):
|
||||
mal_watched_episodes = type_tag.find('my_watched_episodes').text
|
||||
mal_score = type_tag.find('my_score').text
|
||||
|
@ -148,7 +148,7 @@ class Watcher:
|
|||
mal_watch_status = str(list(values[mal_watch_status].keys())[0])
|
||||
mal_title = type_tag.find('series_title').text
|
||||
mal_episodes = type_tag.find('series_episodes').text
|
||||
list_to_dict.append( {
|
||||
list_to_dict.append({
|
||||
"episodes_done": int(mal_watched_episodes),
|
||||
"_timestamp": time(),
|
||||
"score": int(mal_score),
|
||||
|
@ -160,8 +160,8 @@ class Watcher:
|
|||
"title": mal_title,
|
||||
"_episode_urls": [[1, "https://twist.moe/anime/"]],
|
||||
"_len": int(mal_episodes)
|
||||
})
|
||||
self._write_to_watch_file(list_to_dict, MAL_import = True)
|
||||
})
|
||||
self._write_to_watch_file(list_to_dict, MAL_import=True)
|
||||
|
||||
def _read_from_watch_file(self):
|
||||
if not os.path.exists(self.WATCH_FILE):
|
||||
|
@ -184,8 +184,8 @@ class Watcher:
|
|||
|
||||
return ret
|
||||
|
||||
def _sorting_for_list(self,animes):
|
||||
status_index = ['watching','completed','dropped','planned','hold','all']
|
||||
def _sorting_for_list(self, animes):
|
||||
status_index = ['watching', 'completed', 'dropped', 'planned', 'hold', 'all']
|
||||
animes = sorted(animes, key=lambda x: status_index.index(x.watch_status))
|
||||
return animes
|
||||
|
||||
|
@ -201,6 +201,7 @@ class Watcher:
|
|||
self.watch_status = 'watching'
|
||||
self.colours = 'blue'
|
||||
super(cls, self).__init__(*args, **kwargs)
|
||||
|
||||
def progress(self):
|
||||
return (self.episodes_done, len(self))
|
||||
|
||||
|
|
Loading…
Reference in New Issue