Rebased ezdl command with more sites supported (#466)

* added ezdl
* added support for sites
* fixed config
* added metadata provider
Co-authored-by: Arjix <53124886+ArjixGamer@users.noreply.github.com>
master
Blatzar 2020-08-08 19:17:30 +02:00 committed by GitHub
parent bbbe9db067
commit 8e099e34e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 579 additions and 92 deletions

View File

@ -1 +1 @@
__version__ = '4.5.1'
__version__ = '4.6.0'

View File

@ -0,0 +1,205 @@
from anime_downloader.sites import helpers
import logging
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import get_anime_class
from anime_downloader.config import Config
from anime_downloader.util import primitive_search
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from fuzzywuzzy import fuzz
logger = logging.getLogger(__name__)
class AnimeInfo:
"""
Attributes
----------
url: string
URL for the info page
title: string
English name of the show.
jp_title: string
Japanase name of the show.
metadata: dict
Data not critical for core functions
episodes: int
Max amount of episodes
"""
def __init__(self, url, episodes,title=None, jp_title=None, metadata={}):
self.url = url
self.episodes = episodes
self.title = title
self.jp_title = jp_title
self.metadata = metadata
class MatchObject:
"""
Attributes
----------
AnimeInfo: object
Metadata object from the MAL search.
SearchResult: object
Metadata object from the provider search
ratio: int
A number between 0-100 describing the similarities between SearchResult and AnimeInfo.
Higher number = more similar.
"""
def __init__(self, AnimeInfo, SearchResult, ratio = 100):
self.AnimeInfo = AnimeInfo
self.SearchResult = SearchResult
self.ratio = ratio
# Not used
def search_mal(query):
def search(query):
soup = helpers.soupify(helpers.get('https://myanimelist.net/anime.php', params = {'q':query}))
search_results = soup.select("a.hoverinfo_trigger.fw-b.fl-l")
return [SearchResult(
url = i.get('href'),
title = i.select('strong')[0].text
) for i in search_results]
def scrape_metadata(url):
soup = helpers.soupify(helpers.get(url))
"""
info_dict contains something like this: [{
'url': 'https://myanimelist.net/anime/37779/Yakusoku_no_Neverland',
'title': 'The Promised Neverland',
'jp_title': '約束のネバーランド'
},{
'url': 'https://myanimelist.net/anime/39617/Yakusoku_no_Neverland_2nd_Season',
'title': 'The Promised Neverland 2nd Season',
'jp_title': '約束のネバーランド 第2期'}]
"""
info_dict = {
'url':url
}
# Maps specified info in sidebar to variables in info_dict
name_dict = {
'Japanese:':'jp_title',
'English:':'title',
'synonyms:':'synonyms',
'Episodes:':'episodes'
}
info = soup.select('span.dark_text')
extra_info = [i.parent.text.strip() for i in info]
for i in extra_info:
text = i.replace('\n','').strip()
for j in name_dict:
if text.startswith(j):
info_dict[name_dict[j]] = text[len(j):].strip()
# Backup name if no English name isn't registered in sidebar
if not info_dict.get('title'):
name = soup.select('span[itemprop=name]')
info_dict['title'] = name[0].text if name else None
# Always sets episodes
if not info_dict.get('episodes') or info_dict.get('episodes') == 'Unknown':
info_dict['episodes'] = 0
# TODO error message when this stuff is not correctly scraped
# Can happen if MAL is down or something similar
return AnimeInfo(url = info_dict['url'], title = info_dict.get('title'),
jp_title = info_dict.get('jp_title'), episodes = int(info_dict['episodes']))
search_results = search(query)
season_info = []
# Max 10 results
for i in range(min(len(search_results), 10)):
anime_info = scrape_metadata(search_results[i].url)
if anime_info.episodes:
season_info.append(anime_info)
# Code below uses the first result to compare
#season_info = [scrape_metadata(search_results[0].url)]
#return season_info
# Prompts the user for selection
return primitive_search(season_info)
def search_anilist(query):
def search(query):
ani_query = """
query ($id: Int, $page: Int, $search: String, $type: MediaType) {
Page (page: $page, perPage: 10) {
media (id: $id, search: $search, type: $type) {
id
idMal
description(asHtml: false)
seasonYear
title {
english
romaji
native
}
coverImage {
extraLarge
}
bannerImage
averageScore
status
episodes
}
}
}
"""
url = 'https://graphql.anilist.co'
# TODO check in case there's no results
# It seems to error on no results (anime -ll DEBUG dl "nev")
results = helpers.post(url, json={'query': ani_query, 'variables': {'search': query, 'page': 1, 'type': 'ANIME'}}).json()['data']['Page']['media']
if not results:
logger.error('No results found in anilist')
raise NameError
search_results = [AnimeInfo(url = 'https://anilist.co/anime/' + str(i['id']), title = i['title']['romaji'],
jp_title = i['title']['native'], episodes = int(i['episodes'])) for i in results if i['episodes'] != None]
return search_results
search_results = search(query)
# Prompts the user for selection
return primitive_search(search_results)
def fuzzy_match_metadata(seasons_info, search_results):
# Gets the SearchResult object with the most similarity title-wise to the first MAL/Anilist result
results = []
for i in seasons_info:
for j in search_results:
# Allows for returning of cleaned title by the provider using 'title_cleaned' in meta_info.
# To make fuzzy matching better.
title_provider = j.title.strip() if not j.meta_info.get('title_cleaned') else j.meta_info.get('title_cleaned').strip()
# On some titles this will be None
# causing errors below
title_info = i.title
# Essentially adds the chosen key to the query if the version is in use
# Dirty solution, but should work pretty well
config = Config['siteconfig'].get(get_anime_class(j.url).sitename,{})
version = config.get('version')
version_use = version == 'dubbed'
# Adds something like (Sub) or (Dub) to the title
key_used = j.meta_info.get('version_key_dubbed','') if version_use else j.meta_info.get('version_key_subbed','')
title_info += ' ' + key_used
title_info = title_info.strip()
# TODO add synonyms
# 0 if there's no japanese name
jap_ratio = fuzz.ratio(i.jp_title, j.meta_info['jp_title']) if j.meta_info.get('jp_title') else 0
# Outputs the max ratio for japanese or english name (0-100)
ratio = max(fuzz.ratio(title_info,title_provider), jap_ratio)
logger.debug('Ratio: {}, Info title: {}, Provider Title: {}, Key used: {}'.format(ratio, title_info, title_provider, key_used))
results.append(MatchObject(i, j, ratio))
# Returns the result with highest ratio
return max(results, key=lambda item:item.ratio)

View File

@ -95,7 +95,7 @@ def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
session.get_session().verify = not disable_ssl
if not cls:
anime_url = util.search(anime_url, provider, choice)
anime_url, _ = util.search(anime_url, provider, choice)
cls = get_anime_class(anime_url)
anime = cls(anime_url, quality=quality,

View File

@ -0,0 +1,186 @@
import logging
import os
import click
import requests_cache
from anime_downloader import session, util
from anime_downloader.__version__ import __version__
from anime_downloader.sites import get_anime_class, ALL_ANIME_SITES
from anime_downloader import animeinfo
from anime_downloader.config import Config
logger = logging.getLogger(__name__)
echo = click.echo
sitenames = [v[1] for v in ALL_ANIME_SITES]
# NOTE: Don't put defaults here. Add them to the dict in config
@click.command()
@click.argument('anime_url')
@click.option(
'--episodes', '-e', 'episode_range', metavar='<int>:<int>',
help="Range of anime you want to download in the form <start>:<end>")
@click.option(
'--play', 'player', metavar='PLAYER',
help="Streams in the specified player")
@click.option(
'--force-download', '-f', is_flag=True,
help='Force downloads even if file exists')
@click.option(
'--provider',
help='The anime provider (website) for search.',
type=click.Choice(sitenames)
)
@click.option(
'--ratio', '-r',type=int,
help='Ratio used for the auto select in search. 100 means it only auto selects on complete matches. 0 auto selects regardless of how similar the result is.',
default=50
)
@click.option(
'--url', '-u', type=bool, is_flag=True,
help="If flag is set, prints the stream url instead of downloading")
@click.option("--skip-fillers", is_flag=True, help="Skip downloading of fillers.")
@click.pass_context
def command(ctx, anime_url, episode_range, player,
force_download, provider,
skip_fillers, ratio, url):
# Borrows some config from the original dl command.
# This can all be flags, but ezdl is made to be easy.
fallback_qualities = Config['dl']['fallback_qualities']
download_dir = Config['dl']['download_dir']
quality = Config['dl']['quality']
url = Config['dl']['url'] if not url else url
external_downloader = Config['dl']['external_downloader']
skip_download = Config['dl']['skip_download']
chunk_size = Config['dl']['chunk_size']
speed_limit = Config['dl']['speed_limit']
fallback_providers = Config['ezdl']['fallback_providers']
file_format = Config['ezdl']['file_format']
query = anime_url[:]
util.print_info(__version__)
fallback_providers.insert(0, provider)
# Eliminates duplicates while keeping order
providers = sorted(set(fallback_providers),key=fallback_providers.index)
info = animeinfo.search_anilist(query)
episode_count = info.episodes - 1
# Interprets the episode range for use in a for loop.
# 1:3 -> for _episode in range(1, 4):
episode_range = util.parse_episode_range(episode_count, episode_range)
episode_range_split = episode_range.split(':')
# Stores the choices for each provider, to prevent re-prompting search.
# As the current setup runs episode wise without this a 12 episode series would give 12+ prompts.
choice_dict = {}
# Doesn't work on nyaa since it only returns one episode.
for episode_range in range(int(episode_range_split[0]), int(episode_range_split[-1])+1):
# Exits if all providers are skipped.
if [choice_dict[i] for i in choice_dict] == [0]*len(providers):
logger.info('All providers skipped, exiting')
exit()
for provider in providers:
if not get_anime_class(provider):
logger.info('"{}" is an invalid provider'.format(provider))
continue
logger.debug('Current provider: {}'.format(provider))
# TODO: Replace by factory
cls = get_anime_class(anime_url)
# To make the downloads use the correct name if URL:s are used.
real_provider = cls.sitename if cls else provider
# This will allow for animeinfo metadata in filename and one filename for multiple providers.
rep_dict = {
'animeinfo_anime_title': util.slugify(info.title),
'provider': util.slugify(real_provider),
'anime_title':'{anime_title}',
'ep_no':'{ep_no}'
}
fixed_file_format = file_format.format(**rep_dict)
# Keeping this as I don't know the impact of removing it.
# It's False by default in normal dl.
disable_ssl = False
session.get_session().verify = not disable_ssl
# This is just to make choices in providers presistent between searches.
choice_provider = choice_dict.get(provider)
if not cls:
_anime_url, choice_provider = util.search(anime_url, provider, val=choice_provider, season_info=info, ratio=ratio)
choice_dict[provider] = choice_provider
if choice_provider == 0:
continue
cls = get_anime_class(_anime_url)
try:
anime = cls(_anime_url, quality=quality,
fallback_qualities=fallback_qualities)
# I have yet to investigate all errors this can output
# No sources found gives error which exits the script
except:
continue
logger.debug('Found anime: {}'.format(anime.title))
try:
animes = util.parse_ep_str(anime, str(episode_range))
except RuntimeError:
logger.error('No episode found with index {}'.format(episode_range))
continue
except:
logger.error('Unknown provider error')
continue
# TODO:
# Two types of plugins:
# - Aime plugin: Pass the whole anime
# - Ep plugin: Pass each episode
if url or player:
skip_download = True
if download_dir and not skip_download:
logger.info('Downloading to {}'.format(os.path.abspath(download_dir)))
if skip_fillers:
fillers = util.get_filler_episodes(query)
for episode in animes:
if skip_fillers and fillers:
if episode.ep_no in fillers:
logger.info("Skipping episode {} because it is a filler.".format(episode.ep_no))
continue
if url:
util.print_episodeurl(episode)
if player:
util.play_episode(episode, player=player, title=f'{anime.title} - Episode {episode.ep_no}')
if not skip_download:
if external_downloader:
logging.info('Downloading episode {} of {}'.format(
episode.ep_no, anime.title)
)
util.external_download(external_downloader, episode,
fixed_file_format, path=download_dir, speed_limit=speed_limit)
continue
if chunk_size is not None:
chunk_size = int(chunk_size)
chunk_size *= 1e6
with requests_cache.disabled():
episode.download(force=force_download,
path=download_dir,
format=fixed_file_format,
range_size=chunk_size)
print()
# If it's all successfull proceeds to next ep instead of looping.
break

View File

@ -67,7 +67,7 @@ def command(anime_name, new, update_all, _list, quality, remove,
else:
query = click.prompt('Enter a anime name or url', type=str)
url = util.search(query, provider)
url, _ = util.search(query, provider)
watcher.new(url)
sys.exit(0)
@ -153,7 +153,7 @@ def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
if '--provider' in vals:
if vals.index('--provider') + 1 < len(vals):
provider = vals[vals.index('--provider') + 1]
url = util.search(query, provider)
url, _ = util.search(query, provider)
watcher.new(url)
if key == 'swap':
@ -255,7 +255,7 @@ def list_animes(watcher, quality, download_dir, imp = None, _filter = None):
# Probably good to list providers here before looping.
continue
# Watch can quit if no anime is found, not ideal.
url = util.search(anime.title, val)
url, _ = util.search(anime.title, val)
watcher.remove(anime)
newanime = watcher.new(url)
newanime.episodes_done = anime.episodes_done

View File

@ -25,6 +25,12 @@ DEFAULT_CONFIG = {
'selescrape_driver_binary_path' : None,
'speed_limit' : 0,
},
'ezdl': {
'file_format':'{animeinfo_anime_title}/{animeinfo_anime_title}_{provider}_{ep_no}',
'provider':'twist.moe',
'ratio':50,
'fallback_providers':['9anime','vidstream','watchmovie']
},
'watch': {
'quality': '1080p',
'fallback_qualities': ['720p', '480p', '360p'],
@ -32,113 +38,124 @@ DEFAULT_CONFIG = {
'provider': 'twist.moe',
'autoplay_next':True
},
"siteconfig": {
'siteconfig': {
'animefrenzy': {
"version": "subbed"
'version': 'subbed'
},
'animixplay': {
"server": "vidstream",
'server': 'vidstream',
'version': 'subbed'
},
'nineanime': {
"server": "mp4upload",
'9anime': {
'server': 'mp4upload',
'version':'subbed',
},
'anistream.xyz': {
"version": "subbed",
'version': 'subbed',
},
'animeflv': {
"version": "subbed",
"server": "natsuki",
'version': 'subbed',
'server': 'natsuki',
},
'gogoanime': {
"server": "cdn",
'server': 'cdn',
'version': 'subbed'
},
'animerush':{
"servers": ["Mp4uploadHD Video","MP4Upload", "Mp4upload Video", "Youruploads Video"]
'servers': ['Mp4uploadHD Video','MP4Upload', 'Mp4upload Video', 'Youruploads Video']
},
'kickass': {
"server": "A-KICKASSANIME",
"fallback_servers": ["ORIGINAL-QUALITY-V2","HTML5-HQ","HTML5","A-KICKASSANIME","BETAPLAYER","KICKASSANIME","DEVSTREAM"],
"ext_fallback_servers": ["Mp4Upload","Vidcdn","Vidstreaming"],
'server': 'A-KICKASSANIME',
'fallback_servers': ['ORIGINAL-QUALITY-V2','HTML5-HQ','HTML5','A-KICKASSANIME','BETAPLAYER','KICKASSANIME','DEVSTREAM'],
'ext_fallback_servers': ['Mp4Upload','Vidcdn','Vidstreaming'],
},
'animesimple': {
"version": "subbed",
"servers": ["vidstreaming","trollvid","mp4upload","xstreamcdn"]
'version': 'subbed',
'servers': ['vidstreaming','trollvid','mp4upload','xstreamcdn']
},
'darkanime': {
"version": "subbed",
"servers": ["mp4upload","trollvid"],
'version': 'subbed',
'servers': ['mp4upload','trollvid'],
},
'dreamanime': {
"version": "subbed",
"server": "trollvid",
'version': 'subbed',
'server': 'trollvid',
},
'ryuanime': {
"version": "subbed",
"server": "trollvid",
'version': 'subbed',
'server': 'trollvid',
},
'animekisa': {
"server": "gcloud",
"fallback_servers": ["mp4upload","vidstream"]
'server': 'gcloud',
'fallback_servers': ['mp4upload','vidstream']
},
'watchmovie': {
"servers": ["vidstream",'gcloud','yourupload','hydrax']
'servers': ['vidstream','gcloud','yourupload','hydrax'],
'version': 'subbed',
},
'animeflix': {
"server": "AUEngine",
"fallback_servers": ["FastStream"],
"version": "sub",
'server': 'AUEngine',
'fallback_servers': ['FastStream'],
'version': 'sub',
},
'dubbedanime': {
"servers": ["vidstream","mp4upload","trollvid"],
"version": "dubbed",
'servers': ['vidstream','mp4upload','trollvid'],
'version': 'dubbed',
},
'animedaisuki': {
"servers": ["official"]
'servers': ['official']
},
'nyaa': {
"filter": "Trusted only",
"category": "English-translated"
'filter': 'Trusted only',
'category': 'English-translated'
},
'vidstream': {
"servers": ["vidstream","vidstream_bk","gcloud","mp4upload","cloud9","hydrax","mixdrop"]
'servers': ['vidstream','vidstream_bk','gcloud','mp4upload','cloud9','hydrax','mixdrop'],
'version': 'subbed'
},
'justdubs': {
"servers": ["mp4upload","gcloud"]
'servers': ['mp4upload','gcloud']
},
'kisscartoon': {
"servers": [
"mpserver",
"yuserver",
"oserver",
"xserver",
"ptserver"
'servers': [
'mpserver',
'yuserver',
'oserver',
'xserver',
'ptserver'
]
},
'animevibe': {
"servers": [
"vidstream",
"3rdparty",
"mp4upload",
"hydrax",
"gcloud",
"fembed"
'servers': [
'vidstream',
'3rdparty',
'mp4upload',
'hydrax',
'gcloud',
'fembed'
]
},
'yify': {
"servers": [
"vidstream",
"yify"
'servers': [
'vidstream',
'yify'
]
},
'vostfree': {
'server': 'sibnet'
},
'voiranime': {
"servers":[
"gounlimited"
'servers':[
'gounlimited'
]
},
'kissanime': {
'version':'subbed'
},
'animeonline360': {
'version':'subbed'
}
}
}

View File

@ -453,13 +453,16 @@ class SearchResult:
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
meta_info: dict
Metadata regarding the anime. Not shown in the results, used to match with MAL
"""
def __init__(self, title, url, poster='', meta=''):
def __init__(self, title, url, poster='', meta='', meta_info={}):
self.title = title
self.url = url
self.poster = poster
self.meta = meta
self.meta_info = meta_info
def __repr__(self):
return '<SearchResult Title: {} URL: {}>'.format(self.title, self.url)

View File

@ -16,6 +16,10 @@ class AnimeOnline(Anime, sitename = 'animeonline360'):
SearchResult(
title = i['title'],
url = i['url'],
meta_info = {
'version_key_dubbed':'Dubbed',
'version_key_subbed':'Subbed',
}
)
for i in results
]

View File

@ -12,10 +12,16 @@ class AnimeOut(Anime, sitename='animeout'):
@classmethod
def search(cls, query):
search_results = helpers.soupify(helpers.get(cls.url, params={'s': query})).select('h3.post-title > a')
# Removes the unneded metadata from the title
# Used by MAL matcher
clean_title_regex = r'\(.*?\)'
return [
SearchResult(
title = i.text,
url = i.get('href'))
url = i.get('href'),
meta_info = {
'title_cleaned':re.sub(clean_title_regex,"",i.text).strip()
})
for i in search_results
]

View File

@ -35,7 +35,12 @@ class AniMixPlay(Anime, sitename='animixplay'):
data.append(SearchResult(
title = j.text,
url = 'https://animixplay.com' + j.get('href'),
meta = {'version': i}))
meta = {'version': i},
meta_info = {
'version_key_dubbed':'(Dub)',
}
)
)
return data

View File

@ -67,11 +67,15 @@ class GogoAnime(Anime, sitename='gogoanime'):
search_results = [
SearchResult(
title=a.get('title'),
url='https://gogoanime.io' + a.get('href'))
for a in search_results
title=i.get('title'),
url='https://gogoanime.io' + i.get('href'),
meta_info = {
'version_key_dubbed':'(Dub)'
}
)
for i in search_results
]
return(search_results)
return search_results
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
@ -87,8 +91,8 @@ class GogoAnime(Anime, sitename='gogoanime'):
params=params))
epurls = list(
reversed([self._base_url + a.get('href').strip()
for a in soup.select('li a')])
reversed([self._base_url + i.get('href').strip()
for i in soup.select('li a')])
)
return epurls

View File

@ -50,6 +50,10 @@ class KissAnime(Anime, sitename='kissanime'):
url=cls.domain +
soup.find('a', 'bigChar').get('href'),
poster='',
meta_info = {
'version_key_dubbed':'(Dub)',
'version_key_subbed':'(Sub)'
}
)]
searched = [s for i, s in enumerate(soup.find_all('td')) if not i % 2]
@ -60,6 +64,10 @@ class KissAnime(Anime, sitename='kissanime'):
title=res.text.strip(),
url=cls.domain + res.find('a').get('href'),
poster='',
meta_info = {
'version_key_dubbed':'(Dub)',
'version_key_subbed':'(Sub)'
}
)
logger.debug(res)
ret.append(res)

View File

@ -7,7 +7,7 @@ from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class NineAnime(Anime, sitename='9anime'):
class NineAnime(Anime, sitename='nineanime'):
sitename = '9anime'
url = f'https://{sitename}.to/search'
@classmethod
@ -17,7 +17,11 @@ class NineAnime(Anime, sitename='9anime'):
return [
SearchResult(
title = i.text,
url = i.get('href')
url = i.get('href'),
meta_info = {
'version_key_dubbed':'(Dub)',
'version_key_subbed':''
}
)
for i in search_results
]

View File

@ -33,10 +33,14 @@ class VidStream(Anime, sitename='vidstream'):
params = {'keyword':query})
).select('ul.listing > li.video-block > a')
# Regex to cut out the "Episode xxx"
return [
SearchResult(
title=re.sub(r"(E|e)pisode\s*[0-9]*", '', i.select('div.name')[0].text.strip()),
url=f"https://vidstreaming.io{i.get('href')}")
url=f"https://vidstreaming.io{i.get('href')}",
meta_info = {
'version_key_dubbed':'(Dub)'
})
for i in search_results
]

View File

@ -21,11 +21,16 @@ class WatchMovie(Anime, sitename='watchmovie'):
search_results = [
SearchResult(
title=a.get('title'),
url=cls.url+a.get('href'))
for a in search_results
title=i.get('title'),
url=cls.url+i.get('href'),
meta_info = {
'version_key_dubbed':'(Dub)',
}
)
for i in search_results
]
return(search_results)
return search_results
def _scrape_episodes(self):
if 'anime-info' in self.url:
@ -35,6 +40,7 @@ class WatchMovie(Anime, sitename='watchmovie'):
soup = helpers.soupify(helpers.get(url)).select('a.videoHname')
return ['https://watchmovie.movie'+a.get('href') for a in soup[::-1]]
def _scrape_metadata(self):
self.title = helpers.soupify(helpers.get(self.url)).select('div.page-title > h1')[0].text

View File

@ -78,35 +78,69 @@ def format_search_results(search_results):
return table
def search(query, provider, choice=None):
def search(query, provider, val=None, season_info=None, ratio=50):
# Will use animeinfo sync if season_info is provided
# Since this function outputs to stdout this should ideally be in
# cli. But it is used in watch too. :(
cls = get_anime_class(provider)
search_results = cls.search(query)
click.echo(format_search_results(search_results), err=True)
if not search_results:
logger.error('No such Anime found. Please ensure correct spelling.')
sys.exit(1)
return None, None
if choice:
val = choice
else:
val = click.prompt('Enter the anime no: ', type=int, default=1, err=True)
try:
url = search_results[val-1].url
title = search_results[val-1].title
except IndexError:
logger.error('Only maximum of {} search results are allowed.'
' Please input a number less than {}'.format(
len(search_results), len(search_results)+1))
sys.exit(1)
if season_info:
from anime_downloader import animeinfo
match = animeinfo.fuzzy_match_metadata([season_info], search_results)
logger.debug('Match ratio: {}'.format(match.ratio))
# ratios are a range between 0-100 where 100 means 100% match.
if match.ratio >= ratio and not val:
logger.debug('Selected {}'.format(match.SearchResult.title))
return match.SearchResult.url, None
click.echo(format_search_results(search_results), err=True)
# Loop to allow re-propmt if the user chooses incorrectly
# Makes it harder to unintentionally exit the anime command if it's automated
while True:
if val == None:
val = click.prompt('Enter the anime no{}:'. format(' (0 to switch provider)'*(season_info != None)),
type=int, default=1, err=True)
try:
url = search_results[val-1].url
title = search_results[val-1].title
except IndexError:
logger.error('Only maximum of {} search results are allowed.'
' Please input a number less than {}'.format(
len(search_results), len(search_results)+1))
val = False
continue
break
logger.info('Selected {}'.format(title))
return url
return url, val
def primitive_search(search_results):
headers = [
'SlNo',
'Title',
]
table = [(i+1, v.title)
for i, v in enumerate(search_results)]
table = tabulate(table, headers, tablefmt='psql')
table = '\n'.join(table.split('\n')[::-1])
click.echo(table, err=True)
while True:
val = click.prompt('Enter the anime no: ', type=int, default=1, err=True)
try:
return search_results[val-1]
except IndexError:
logger.error('Only maximum of {} search results are allowed.'
' Please input a number less than {}'.format(
len(search_results), len(search_results)+1))
def split_anime(anime, episode_range):
@ -121,11 +155,12 @@ def split_anime(anime, episode_range):
return anime
def parse_episode_range(anime, episode_range):
def parse_episode_range(max_range, episode_range):
if not episode_range:
episode_range = '1:'
if episode_range.endswith(':'):
episode_range += str(len(anime) + 1)
length = max_range if type(max_range) == int else len(max_range)
episode_range += str(length + 1)
if episode_range.startswith(':'):
episode_range = '1' + episode_range
return episode_range