From 5bca30752137ad8f2b2b94cb4006cb2a1110e0b2 Mon Sep 17 00:00:00 2001 From: Blatzar <46196380+Blatzar@users.noreply.github.com> Date: Wed, 11 Mar 2020 17:47:23 +0100 Subject: [PATCH 1/7] added animeout as a provider --- anime_downloader/animeout.py | 55 ++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 anime_downloader/animeout.py diff --git a/anime_downloader/animeout.py b/anime_downloader/animeout.py new file mode 100644 index 0000000..a6593a8 --- /dev/null +++ b/anime_downloader/animeout.py @@ -0,0 +1,55 @@ +import logging +import re + +from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult +from anime_downloader.sites import helpers + +class AnimeOut(Anime, sitename='animeout'): + sitename = 'animeout' + url = f'https://{sitename}.xyz/' + @classmethod + def search(cls, query): + search_results = helpers.soupify(helpers.get(cls.url, + params={'s': query})).select('h3.post-title > a') + + title_data = { + 'data' : [] + } + for a in range(len(search_results)): + url = search_results[a].get('href') + title = search_results[a].text + data = { + 'url' : url, + 'title' : title, + } + title_data['data'].append(data) + + search_results = [ + SearchResult( + title=result["title"], + url=result["url"]) + for result in title_data.get('data', []) + ] + return(search_results) + + def _scrape_episodes(self): + soup = helpers.soupify(helpers.get(self.url)) + elements = soup.select('div.article-content > p > a') + + episode_links = [] + for a in elements: + if 'Direct Download' in a.text: + episode_links.append(a.get('href')) + return [a for a in episode_links] + + def _scrape_metadata(self): + soup = helpers.soupify(helpers.get(self.url)) + self.title = soup.select('h1.page-title')[0].text + +class AnimeOutEpisode(AnimeEpisode, sitename='animeout'): + def _get_sources(self): + soup = helpers.soupify(helpers.get(self.url)) + link = soup.select('div.Center > p > h2 > a')[0].get('href') + script = helpers.soupify(helpers.get(link)).select('script')[2].text + url = re.search(r'http[^"]*',script).group() + return [('no_extractor', url,)] \ No newline at end of file From 4e1d4994487a8d99e32243aa4428596f7de7d312 Mon Sep 17 00:00:00 2001 From: Blatzar <46196380+Blatzar@users.noreply.github.com> Date: Wed, 11 Mar 2020 17:48:32 +0100 Subject: [PATCH 2/7] updated init to include animeout --- anime_downloader/init.py | 42 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 anime_downloader/init.py diff --git a/anime_downloader/init.py b/anime_downloader/init.py new file mode 100644 index 0000000..77e1f36 --- /dev/null +++ b/anime_downloader/init.py @@ -0,0 +1,42 @@ +from importlib import import_module + +ALL_ANIME_SITES = [ + # ('filename', 'sitename', 'classname') + ('nineanime', '9anime', 'NineAnime'), + ('gogoanime', 'gogoanime', 'GogoAnime'), + ('kissanime', 'kissanime', 'KissAnime'), + ('kisscartoon', 'kisscartoon', 'KissCartoon'), + ('twistmoe', 'twist.moe', 'TwistMoe'), + ('animepahe', 'animepahe', 'AnimePahe'), + ('animeflv', 'animeflv', 'Animeflv'), + ('itsaturday', 'itsaturday', 'Itsaturday'), + ('animefreak', 'animefreak', 'AnimeFreak'), + ('animeflix', 'animeflix', 'AnimeFlix'), + ('animeout', 'animeout', 'AnimeOut'), +] + + +def get_anime_class(url): + """ + Get anime class corresposing to url or name. + See :py:data:`anime_downloader.sites.ALL_ANIME_SITES` to get the possible anime sites. + + Parameters + ---------- + url: string + URL of the anime. + + Returns + ------- + :py:class:`anime_downloader.sites.anime.Anime` + Concrete implementation of :py:class:`anime_downloader.sites.anime.Anime` + """ + for site in ALL_ANIME_SITES: + if site[1] in url: + try: + module = import_module( + 'anime_downloader.sites.{}'.format(site[0]) + ) + except ImportError: + raise + return getattr(module, site[2]) From 9598c87df4d8bd2b07da44fe26af3fa9cef84796 Mon Sep 17 00:00:00 2001 From: Blatzar <46196380+Blatzar@users.noreply.github.com> Date: Sat, 14 Mar 2020 14:58:29 +0000 Subject: [PATCH 3/7] accidentally placed init in the wrong folder --- anime_downloader/sites/init.py | 1 + 1 file changed, 1 insertion(+) diff --git a/anime_downloader/sites/init.py b/anime_downloader/sites/init.py index 749bec0..edc28d4 100644 --- a/anime_downloader/sites/init.py +++ b/anime_downloader/sites/init.py @@ -14,6 +14,7 @@ ALL_ANIME_SITES = [ ('animeflix', 'animeflix', 'AnimeFlix'), ('dubbedanime','dubbedanime','Dubbedanime'), ('a2zanime','a2zanime','A2zanime'), + ('animeout', 'animeout', 'AnimeOut'), ] From a275a0b970ef4bbe7d850ca1bc743d33836b7ad7 Mon Sep 17 00:00:00 2001 From: Blatzar <46196380+Blatzar@users.noreply.github.com> Date: Sat, 14 Mar 2020 14:58:44 +0000 Subject: [PATCH 4/7] Delete init.py --- anime_downloader/init.py | 42 ---------------------------------------- 1 file changed, 42 deletions(-) delete mode 100644 anime_downloader/init.py diff --git a/anime_downloader/init.py b/anime_downloader/init.py deleted file mode 100644 index 77e1f36..0000000 --- a/anime_downloader/init.py +++ /dev/null @@ -1,42 +0,0 @@ -from importlib import import_module - -ALL_ANIME_SITES = [ - # ('filename', 'sitename', 'classname') - ('nineanime', '9anime', 'NineAnime'), - ('gogoanime', 'gogoanime', 'GogoAnime'), - ('kissanime', 'kissanime', 'KissAnime'), - ('kisscartoon', 'kisscartoon', 'KissCartoon'), - ('twistmoe', 'twist.moe', 'TwistMoe'), - ('animepahe', 'animepahe', 'AnimePahe'), - ('animeflv', 'animeflv', 'Animeflv'), - ('itsaturday', 'itsaturday', 'Itsaturday'), - ('animefreak', 'animefreak', 'AnimeFreak'), - ('animeflix', 'animeflix', 'AnimeFlix'), - ('animeout', 'animeout', 'AnimeOut'), -] - - -def get_anime_class(url): - """ - Get anime class corresposing to url or name. - See :py:data:`anime_downloader.sites.ALL_ANIME_SITES` to get the possible anime sites. - - Parameters - ---------- - url: string - URL of the anime. - - Returns - ------- - :py:class:`anime_downloader.sites.anime.Anime` - Concrete implementation of :py:class:`anime_downloader.sites.anime.Anime` - """ - for site in ALL_ANIME_SITES: - if site[1] in url: - try: - module = import_module( - 'anime_downloader.sites.{}'.format(site[0]) - ) - except ImportError: - raise - return getattr(module, site[2]) From 205334367a1447ecba4eca9264fa24e5ff201fb7 Mon Sep 17 00:00:00 2001 From: Blatzar <46196380+Blatzar@users.noreply.github.com> Date: Sat, 14 Mar 2020 15:07:50 +0000 Subject: [PATCH 5/7] placed in the wrong directory --- anime_downloader/animeout.py | 55 ------------------------------------ 1 file changed, 55 deletions(-) delete mode 100644 anime_downloader/animeout.py diff --git a/anime_downloader/animeout.py b/anime_downloader/animeout.py deleted file mode 100644 index a6593a8..0000000 --- a/anime_downloader/animeout.py +++ /dev/null @@ -1,55 +0,0 @@ -import logging -import re - -from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult -from anime_downloader.sites import helpers - -class AnimeOut(Anime, sitename='animeout'): - sitename = 'animeout' - url = f'https://{sitename}.xyz/' - @classmethod - def search(cls, query): - search_results = helpers.soupify(helpers.get(cls.url, - params={'s': query})).select('h3.post-title > a') - - title_data = { - 'data' : [] - } - for a in range(len(search_results)): - url = search_results[a].get('href') - title = search_results[a].text - data = { - 'url' : url, - 'title' : title, - } - title_data['data'].append(data) - - search_results = [ - SearchResult( - title=result["title"], - url=result["url"]) - for result in title_data.get('data', []) - ] - return(search_results) - - def _scrape_episodes(self): - soup = helpers.soupify(helpers.get(self.url)) - elements = soup.select('div.article-content > p > a') - - episode_links = [] - for a in elements: - if 'Direct Download' in a.text: - episode_links.append(a.get('href')) - return [a for a in episode_links] - - def _scrape_metadata(self): - soup = helpers.soupify(helpers.get(self.url)) - self.title = soup.select('h1.page-title')[0].text - -class AnimeOutEpisode(AnimeEpisode, sitename='animeout'): - def _get_sources(self): - soup = helpers.soupify(helpers.get(self.url)) - link = soup.select('div.Center > p > h2 > a')[0].get('href') - script = helpers.soupify(helpers.get(link)).select('script')[2].text - url = re.search(r'http[^"]*',script).group() - return [('no_extractor', url,)] \ No newline at end of file From bcdf8eaffde10ed5ff55f2a6a80b70c3f91da759 Mon Sep 17 00:00:00 2001 From: Blatzar <46196380+Blatzar@users.noreply.github.com> Date: Sat, 14 Mar 2020 15:08:49 +0000 Subject: [PATCH 6/7] added animeout.py in the correct directory --- anime_downloader/sites/animeout.py | 55 ++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 anime_downloader/sites/animeout.py diff --git a/anime_downloader/sites/animeout.py b/anime_downloader/sites/animeout.py new file mode 100644 index 0000000..bcb9fdd --- /dev/null +++ b/anime_downloader/sites/animeout.py @@ -0,0 +1,55 @@ +import logging +import re + +from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult +from anime_downloader.sites import helpers + +class AnimeOut(Anime, sitename='animeout'): + sitename = 'animeout' + url = f'https://{sitename}.xyz/' + @classmethod + def search(cls, query): + search_results = helpers.soupify(helpers.get(cls.url, + params={'s': query})).select('h3.post-title > a') + + title_data = { + 'data' : [] + } + for a in range(len(search_results)): + url = search_results[a].get('href') + title = search_results[a].text + data = { + 'url' : url, + 'title' : title, + } + title_data['data'].append(data) + + search_results = [ + SearchResult( + title=result["title"], + url=result["url"]) + for result in title_data.get('data', []) + ] + return(search_results) + + def _scrape_episodes(self): + soup = helpers.soupify(helpers.get(self.url)) + elements = soup.select('div.article-content > p > a') + + episode_links = [] + for a in elements: + if 'Direct Download' in a.text: + episode_links.append(a.get('href')) + return [a for a in episode_links] + + def _scrape_metadata(self): + soup = helpers.soupify(helpers.get(self.url)) + self.title = soup.select('h1.page-title')[0].text + +class AnimeOutEpisode(AnimeEpisode, sitename='animeout'): + def _get_sources(self): + soup = helpers.soupify(helpers.get(self.url)) + link = soup.select('div.Center > p > h2 > a')[0].get('href') + script = helpers.soupify(helpers.get(link)).select('script')[2].text + url = re.search(r'http[^"]*',script).group() + return [('no_extractor', url,)] From 09bf423e2c241820ba268788ccb89b51d77557f5 Mon Sep 17 00:00:00 2001 From: Vishnunarayan K I Date: Fri, 20 Mar 2020 23:19:41 +0530 Subject: [PATCH 7/7] chore: add to readme and style --- README.md | 1 + anime_downloader/sites/animeout.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e3642b4..a1890dd 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,7 @@ Yeah. Me too! That's why this tool exists. - Gogoanime - Dubbedanime - a2zanime +- animeout - itsaturday - Animeflv - Kissanime - requires Node.js diff --git a/anime_downloader/sites/animeout.py b/anime_downloader/sites/animeout.py index bcb9fdd..5191167 100644 --- a/anime_downloader/sites/animeout.py +++ b/anime_downloader/sites/animeout.py @@ -13,7 +13,7 @@ class AnimeOut(Anime, sitename='animeout'): params={'s': query})).select('h3.post-title > a') title_data = { - 'data' : [] + 'data' : [] } for a in range(len(search_results)): url = search_results[a].get('href') @@ -23,7 +23,7 @@ class AnimeOut(Anime, sitename='animeout'): 'title' : title, } title_data['data'].append(data) - + search_results = [ SearchResult( title=result["title"], @@ -36,7 +36,7 @@ class AnimeOut(Anime, sitename='animeout'): soup = helpers.soupify(helpers.get(self.url)) elements = soup.select('div.article-content > p > a') - episode_links = [] + episode_links = [] for a in elements: if 'Direct Download' in a.text: episode_links.append(a.get('href')) @@ -47,7 +47,7 @@ class AnimeOut(Anime, sitename='animeout'): self.title = soup.select('h1.page-title')[0].text class AnimeOutEpisode(AnimeEpisode, sitename='animeout'): - def _get_sources(self): + def _get_sources(self): soup = helpers.soupify(helpers.get(self.url)) link = soup.select('div.Center > p > h2 > a')[0].get('href') script = helpers.soupify(helpers.get(link)).select('script')[2].text