diff --git a/anime_downloader/extractors/kwik.py b/anime_downloader/extractors/kwik.py index 37bdaf0..0e157e1 100644 --- a/anime_downloader/extractors/kwik.py +++ b/anime_downloader/extractors/kwik.py @@ -1,10 +1,15 @@ import logging +from platform import node import re +import subprocess import requests +import tempfile from anime_downloader.extractors.base_extractor import BaseExtractor +from anime_downloader.sites.helpers.request import temp_dir from anime_downloader.sites import helpers from anime_downloader import util +from anime_downloader.util import eval_in_node from subprocess import CalledProcessError logger = logging.getLogger(__name__) @@ -18,55 +23,112 @@ class Kwik(BaseExtractor): ''' def _get_data(self): + ld = logger.debug # Kwik servers don't have direct link access you need to be referred # from somewhere, I will just use the url itself. We then # have to rebuild the url. Hopefully kwik doesn't block this too # Necessary - self.url = self.url.replace(".cx/e/", ".cx/f/") - self.headers.update({"referer": self.url}) + #ld(self.url) + #self.url = self.url.replace(".cx/e/", ".cx/f/") + #self.headers.update({"referer": self.url}) - cookies = util.get_hcaptcha_cookies(self.url) + headers = {"Referer": "https://kwik.cx/"} - if not cookies: - resp = util.bypass_hcaptcha(self.url) - else: - resp = requests.get(self.url, cookies=cookies) + + + res = requests.get(self.url, headers=headers) - title_re = re.compile(r'title>(.*)<') + #ld(res.text) - kwik_text = resp.text - deobfuscated = None + evalText = helpers.soupify(res.text) - loops = 0 - while not deobfuscated and loops < 6: - try: - deobfuscated = helpers.soupify(util.deobfuscate_packed_js(re.search(r'<(script).*(var\s+_.*escape.*?)(?s)', kwik_text).group(2))) - except (AttributeError, CalledProcessError) as e: - if type(e) == AttributeError: - resp = util.bypass_hcaptcha(self.url) - kwik_text = resp.text + scripts = evalText.select("script") - if type(e) == CalledProcessError: - resp = requests.get(self.url, cookies=cookies) - finally: - cookies = resp.cookies - title = title_re.search(kwik_text).group(1) - loops += 1 + for i in scripts: + rexd = re.compile("", "") + break - post_url = deobfuscated.form["action"] - token = deobfuscated.input["value"] + tf = tempfile.mktemp(dir=temp_dir) - resp = helpers.post(post_url, headers=self.headers, params={"_token": token}, cookies=cookies, allow_redirects=False) - stream_url = resp.headers["Location"] + with open(tf, 'w', encoding="utf-8") as f: + f.write(rexd) + + #print(tf) - logger.debug('Stream URL: %s' % stream_url) + #ld(nodeRes) + + nodeRes = str(subprocess.getoutput(f"node {tf}")) + + ld(nodeRes) + + stream_url = re.search(r"source='([^;]*)';", nodeRes).group().replace("source='", "").replace("';", "") + #reg = re.compile("[\s\S]*") + + ld(stream_url) + + #kwik_text = resp.text + + #title_re = re.compile(r'title>(.*)<') + #title = title_re.search(kwik_text).group(1) return { 'stream_url': stream_url, - 'meta': { - 'title': title, - 'thumbnail': '' - }, - 'referer': None +# 'meta': { +# 'title': title, +# 'thumbnail': '' +# }, + 'referer': "https://kwik.cx/" } + + + + + #cookies = util.get_hcaptcha_cookies(self.url) + + #if not cookies: + # resp = util.bypass_hcaptcha(self.url) + #else: + # resp = requests.get(self.url, cookies=cookies) + + + + # + #deobfuscated = None + + #loops = 0 + #while not deobfuscated and loops < 6: + # try: + # deobfuscated = helpers.soupify(util.deobfuscate_packed_js(re.search(r'<(script).*(var\s+_.*escape.*?)(?s)', kwik_text).group(2))) + # except (AttributeError, CalledProcessError) as e: + # if type(e) == AttributeError: + # resp = util.bypass_hcaptcha(self.url) + # kwik_text = resp.text + + # if type(e) == CalledProcessError: + # resp = requests.get(self.url, cookies=cookies) + # finally: + # cookies = resp.cookies + # + # loops += 1 + + #post_url = deobfuscated.form["action"] + #token = deobfuscated.input["value"] + + #resp = helpers.post(post_url, headers=self.headers, params={"_token": token}, cookies=cookies, allow_redirects=False) + #stream_url = resp.headers["Location"] + + #logger.debug('Stream URL: %s' % stream_url) + + #return { + # 'stream_url': stream_url, + # 'meta': { + # 'title': title, + # 'thumbnail': '' + # }, + # 'referer': None + #} diff --git a/anime_downloader/sites/animepahe.py b/anime_downloader/sites/animepahe.py index 97ddb6b..9f09cb0 100644 --- a/anime_downloader/sites/animepahe.py +++ b/anime_downloader/sites/animepahe.py @@ -74,7 +74,7 @@ class AnimePahe(Anime, sitename='animepahe'): for search_result in search_results['data']: search_result_info = SearchResult( title=search_result['title'], - url=cls.base_anime_url + search_result['slug'], + url=cls.base_anime_url + search_result['session'], poster=search_result['poster'] ) diff --git a/anime_downloader/sites/init.py b/anime_downloader/sites/init.py index 054d83b..0e8d2c8 100644 --- a/anime_downloader/sites/init.py +++ b/anime_downloader/sites/init.py @@ -18,6 +18,7 @@ ALL_ANIME_SITES = [ ('animetake','animetake','AnimeTake'), ('animeonline','animeonline360','AnimeOnline'), ('animeout', 'animeout', 'AnimeOut'), + ('animepahe', 'animepahe', 'AnimePahe'), ('animerush', 'animerush', 'AnimeRush'), ('animesimple', 'animesimple', 'AnimeSimple'), ('animesuge', 'animesuge', 'AnimeSuge'),