more docs

master
Vishnunarayan K I 2018-10-15 22:40:35 +05:30
parent 1b197136dc
commit 757dbb941c
6 changed files with 114 additions and 12 deletions

View File

@ -4,7 +4,7 @@ verify_ssl = true
name = "pypi"
[packages]
anime-downloader = {editable = true, path = "."}
anime-downloader = {editable = true, path = ".", extras = ["cloudflare"]}
[dev-packages]
twine = "*"

11
Pipfile.lock generated
View File

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "3c226d0611904abe8b253ddaa90b12046c1c6363b5920e508f067e2d5830e130"
"sha256": "b39a35608ce09e57197e0564221771979dcb36be0f7866a5c73298da5a98b4d5"
},
"pipfile-spec": 6,
"requires": {},
@ -16,6 +16,9 @@
"default": {
"anime-downloader": {
"editable": true,
"extras": [
"cloudflare"
],
"path": "."
},
"beautifulsoup4": {
@ -33,6 +36,12 @@
],
"version": "==2019.3.9"
},
"cfscrape": {
"hashes": [
"sha256:cb9159955d0e6e82cf4ad8cc9b19413e68ebfed1ce98a26e51f62e66d45146f1"
],
"version": "==1.9.5"
},
"chardet": {
"hashes": [
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",

View File

@ -29,9 +29,9 @@ class Anime:
Attributes
----------
sitename: string
sitename: str
name of the site
title: string
title: str
Title of the anime
meta: dict
metadata about the anime. [Can be empty]
@ -51,8 +51,15 @@ class Anime:
"""
Search searches for the anime using the query given.
query :
query is
Parameters
----------
query: str
query is the query keyword to be searched.
Returns
-------
list
List of :py:class:`~anime_downloader.sites.anime.SearchResult`
"""
return
@ -70,7 +77,8 @@ class Anime:
if not _skip_online_data:
logging.info('Extracting episode info from page')
self.get_data()
self._episode_urls = self.get_data()
self._len = len(self._episode_urls)
@classmethod
def verify_url(self, url):
@ -87,6 +95,28 @@ class Anime:
return cls.subclasses[sitename]
def get_data(self):
"""
get_data is called inside the :code:`__init__` of
:py:class:`~anime_downloader.sites.anime.BaseAnime`. It is used to get
the necessary data about the anime and it's episodes.
This function calls
:py:class:`~anime_downloader.sites.anime.BaseAnime._scarpe_episodes`
and
:py:class:`~anime_downloader.sites.anime.BaseAnime._scrape_metadata`
TODO: Refactor this so that classes which need not be soupified don't
have to overload this function.
Returns
-------
list
A list of tuples of episodes containing episode name and
episode url.
Ex::
[('1', 'https://9anime.is/.../...', ...)]
"""
self._episode_urls = []
try:
self._scrape_metadata()
@ -128,10 +158,35 @@ Episode count: {length}
def __str__(self):
return self.title
def _scrape_episodes(self):
def _scarpe_episodes(self, soup):
"""
_scarpe_episodes is function which has to be overridden by the base
classes to scrape the episode urls from the web page.
Parameters
----------
soup: `bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
Returns
-------
:code:`list` of :code:`str`
A list of episode urls.
"""
return
def _scrape_metadata(self):
def _scrape_metadata(self, soup):
"""
_scrape_metadata is function which has to be overridden by the base
classes to scrape the metadata of anime from the web page.
Parameters
----------
soup: :py:class:`bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
"""
return
@ -234,11 +289,38 @@ class AnimeEpisode:
class SearchResult:
def __init__(self, title, url, poster):
"""
SearchResult class holds the search result of a search done by an Anime
class
Parameters
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
Attributes
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
"""
def __init__(self, title, url, poster, meta=''):
self.title = title
self.url = url
self.poster = poster
self.meta = ''
self.meta = meta
def __repr__(self):
return '<SearchResult Title: {} URL: {}>'.format(self.title, self.url)

View File

@ -1,3 +1,11 @@
Writing your own custom site class
**********************************
:code:`anime_downloader` is built with easy extensibility in mind.
Each of the site (in the tool) can roughly be classfied into two.
- Sites which don't use cloudflare DDoS protection. Ex: :py:class:`~anime_downloader.sites.nineanime.NineAnime`
- Sites which use cloudflare DDoS protection. Ex: :py:class:`~anime_downloader.sites.kissanime.KissAnime`
Sites which use cloudflare have the base class :py:class:`~anime_downloader.sites.anime.BaseAnime`. Sites which don't have the base class :py:class:`~anime_downloader.sites.baseanimecf.BaseAnimeCF`.

View File

@ -4,4 +4,6 @@ Base classes
.. automodule:: anime_downloader.sites.anime
.. autoclass:: anime_downloader.sites.anime.BaseAnime
:members: search
:members: search, get_data, _scarpe_episodes, _scrape_metadata
.. autoclass:: anime_downloader.sites.anime.SearchResult

View File

@ -42,6 +42,7 @@ extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.