Merge pull request #311 from Blatzar/kickass

added Kickassanime provider
master
Vishnunarayan K I 2020-03-27 17:55:07 +05:30 committed by GitHub
commit 1cfb9233ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 132 additions and 0 deletions

View File

@ -55,6 +55,7 @@ Yeah. Me too! That's why this tool exists.
- animeout
- itsaturday
- Animeflv
- Kickassanime
- Kissanime - requires Node.js
- Kisscartoon - requires Node.js

View File

@ -0,0 +1,25 @@
import logging
import re
import json
import base64
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class Haloani(BaseExtractor):
def _get_data(self):
url = self.url
soup = helpers.soupify(helpers.get(url))
src = 'https://haloani.ru/KickAssAnime/' + (soup.select('iframe')[0].get('src'))
soup = helpers.get(src).text
regex = r'Base64.decode\("[^"]*'
decoded = base64.b64decode(re.search(regex,soup).group())
regex = r'\[{[^\]]*\]'
links = json.loads(re.search(regex,str(decoded)).group())
link = links[0]['file'].replace('\\','')
return {
'stream_url': link,
'referer': src,
}

View File

@ -49,6 +49,18 @@ ALL_EXTRACTORS = [
'regex': 'mp4sh',
'class': 'MP4Sh'
},
{
'sitename': 'vidstream',
'modulename': 'vidstream',
'regex': 'vidstream',
'class': 'VidStream'
},
{
'sitename': 'haloani',
'modulename': 'haloani',
'regex': 'haloani',
'class': 'Haloani'
},
]

View File

@ -0,0 +1,22 @@
import logging
import re
import sys
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class VidStream(BaseExtractor):
def _get_data(self):
url = self.url.replace('https:////','https://')
soup = helpers.get(url).text
regex = r'https://vidstreaming\.io/download\?[^"]*'
download = re.search(regex,soup).group()
soup = helpers.soupify(helpers.get(download))
link = soup.select('div.dowload > a')[0].get('href')
return {
'stream_url': link,
'referer': download
}

View File

@ -16,6 +16,7 @@ ALL_ANIME_SITES = [
('a2zanime','a2zanime','A2zanime'),
('animeout', 'animeout', 'AnimeOut'),
('animesimple','animesimple','AnimeSimple'),
('kickass','kickass','KickAss'),
]

View File

@ -0,0 +1,71 @@
import logging
import re
import json
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
class KickAss(Anime, sitename='kickassanime'):
sitename = 'kickassanime'
url = f'https://{sitename}.rs/search'
@classmethod
def search(cls, query):
search_results = helpers.soupify(helpers.get(cls.url,
params={'q': query}))
regex = r'\[{[\W\w]*?}]'
search_results = json.loads(re.search(regex,str(search_results)).group())
title_data = {'data' : []}
for a in search_results:
data = {
'url' : f'https://kickassanime.rs{a["slug"]}',
'title' : a['name'],
}
title_data['data'].append(data)
search_results = [
SearchResult(
title=result["title"],
url=result["url"])
for result in title_data.get('data', [])
]
return(search_results)
def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
regex = r'\[{[\W\w]*?}]'
episodes = json.loads(re.search(regex,str(soup)).group())
return [f'https://kickassanime.rs{a["slug"]}' for a in episodes[::-1]]
def _scrape_metadata(self):
soup = helpers.get(self.url).text
regex = r'{"name"[^}]*}'
info = json.loads(re.search(regex,str(soup)).group()+']}')
self.title = info['name']
class KickAssEpisode(AnimeEpisode, sitename='kickassanime'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
regex = r'{"clip[\w\W]*?}\]} '
elements = json.loads(re.search(regex,str(soup)).group())
links = ['link1','link2','link3','link4']
sources_list = [] #Primary sources which links to more sources
for a in links:
if len((elements['episode'][a]).replace(' ','')) != 0:
sources_list.append(elements['episode'][a])
soup = helpers.get(sources_list[0]).text
regex = r'\[{[\W\w]*?}\]'
sources = re.search(regex,str(soup))
if not sources: #Either vidstream or haloani
regex = r"[^/]window\.location = '[^']*"
sources = re.search(regex,str(soup)).group()[20:]
return [('vidstream', sources,)]
sources = json.loads(sources.group())
return [('haloani', sources[0]['src'],)]