Fixes for review

master
Dmitriy 2018-10-16 13:59:53 +04:00
parent ff2c1916ec
commit e7c4d0e54b
No known key found for this signature in database
GPG Key ID: F7E6393A426A5BBE
7 changed files with 29 additions and 22 deletions

View File

@ -3,6 +3,8 @@ import os
from anime_downloader.downloader.base_downloader import BaseDownloader
from anime_downloader import session
session = session.get_session()
class HTTPDownloader(BaseDownloader):
def _download(self):
@ -21,12 +23,11 @@ class HTTPDownloader(BaseDownloader):
with open(self.path, 'w'):
pass
s = session.get_session()
r = s.get(self.url, stream=True)
r = session.get(self.url, stream=True)
while self.downloaded < self.total_size:
r = s.get(self.url,
headers=set_range(range_start, range_end),
stream=True)
r = session.get(self.url,
headers=set_range(range_start, range_end),
stream=True)
if r.status_code == 206:
with open(self.path, 'ab') as f:
for chunk in r.iter_content(chunk_size=self.chunksize):
@ -42,7 +43,7 @@ class HTTPDownloader(BaseDownloader):
range_end = ''
def _non_range_download(self):
r = session.get_session().get(self.url, stream=True)
r = session.get(self.url, stream=True)
if r.status_code == 200:
with open(self.path, 'wb') as f:

View File

@ -1,11 +1,12 @@
import logging
import re
import requests
from bs4 import BeautifulSoup
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader import session
session = session.get_session()
class MP4Upload(BaseExtractor):
'''Extracts video url from mp4upload embed pages, performs a request
@ -21,8 +22,7 @@ class MP4Upload(BaseExtractor):
r'.*?(www\d+).*?\|video\|(.*?)\|(\d+)\|.*?',
re.DOTALL)
s = session.get_session()
mp4u_embed = s.get(self.url).text
mp4u_embed = session.get(self.url).text
domain, video_id, protocol = source_parts_re.match(mp4u_embed).groups()
logging.debug('Domain: %s, Video ID: %s, Protocol: %s' %
@ -30,7 +30,7 @@ class MP4Upload(BaseExtractor):
url = self.url.replace('embed-', '')
# Return to non-embed page to collect title
mp4u_page = BeautifulSoup(s.get(url).text, 'html.parser')
mp4u_page = BeautifulSoup(session.get(url).text, 'html.parser')
title = mp4u_page.find('span', {'class': 'dfilename'}).text
title = title[:title.rfind('_')][:title.rfind('.')].replace(' ', '_')

View File

@ -5,18 +5,19 @@ from bs4 import BeautifulSoup
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader import session
session = session.get_session()
class RapidVideo(BaseExtractor):
def _get_data(self):
s = session.get_session()
url = self.url + '&q=' + self.quality
logging.debug('Calling Rapid url: {}'.format(url))
headers = self.headers
headers['referer'] = url
try:
r = s.get(url, headers=headers)
r = session.get(url, headers=headers)
except:
r = s.post(url, {
r = session.post(url, {
'cursor.x': 12,
'cursor.y': 12,
'block': 1,

View File

@ -4,13 +4,15 @@ import requests
import re
from bs4 import BeautifulSoup
session = session.get_session()
class GogoanimeEpisode(BaseEpisode):
QUALITIES = ['360p', '480p', '720p']
_base_url = 'https://www2.gogoanime.se'
def _get_sources(self):
soup = BeautifulSoup(session.get_session().get(self.url).text, 'html.parser')
soup = BeautifulSoup(session.get(self.url).text, 'html.parser')
url = 'https:'+soup.select_one('li.anime a').get('data-video')
res = requests.get(url)
@ -35,7 +37,7 @@ class GogoAnime(BaseAnime):
'id': anime_id,
}
res = session.get_session().get(self._episode_list_url, params=params)
res = session.get(self._episode_list_url, params=params)
soup = BeautifulSoup(res.text, 'html.parser')
epurls = list(

View File

@ -9,6 +9,7 @@ import cfscrape
import logging
scraper = cfscrape.create_scraper()
session = session.get_session()
class KisscartoonEpisode(BaseEpisode):
@ -18,19 +19,18 @@ class KisscartoonEpisode(BaseEpisode):
QUALITIES = ['720p']
def _get_sources(self):
s = session.get_session()
params = {
'v': '1.1',
'episode_id': self.url.split('id=')[-1],
}
headers = desktop_headers
headers['referer'] = self.url
res = s.get(self._episode_list_url, params=params, headers=headers)
res = session.get(self._episode_list_url, params=params, headers=headers)
url = res.json()['value']
headers = desktop_headers
headers['referer'] = self.url
res = s.get('https:' + url, headers=headers)
res = session.get('https:' + url, headers=headers)
return [(
'no_extractor',

View File

@ -10,6 +10,8 @@ import logging
__all__ = ['NineAnimeEpisode', 'NineAnime']
session = session.get_session()
class NineAnimeEpisode(BaseEpisode):
QUALITIES = ['360p', '480p', '720p', '1080p']
@ -59,7 +61,7 @@ class NineAnime(BaseAnime):
@classmethod
def search(cls, query):
r = session.get_session().get('https://www4.9anime.is/search?', params={'keyword': query}, headers=desktop_headers)
r = session.get('https://www4.9anime.is/search?', params={'keyword': query}, headers=desktop_headers)
logging.debug(r.url)
@ -103,7 +105,7 @@ class NineAnime(BaseAnime):
params = {}
params['_'] = int(generate_(params))
params['_'] = 648
soup = BeautifulSoup(session.get_session().get(api_url, params=params).json()['html'], 'html.parser')
soup = BeautifulSoup(session.get(api_url, params=params).json()['html'], 'html.parser')
episodes = soup.find('div', {'class': 'server', 'data-name': 33})
episodes = episodes.find_all('li')

View File

@ -16,6 +16,7 @@ with warnings.catch_warnings():
BLOCK_SIZE = 16
KEY = b"k8B$B@0L8D$tDYHGmRg98sQ7!%GOEGOX27T"
session = session.get_session()
class TwistMoeEpisode(BaseEpisode):
@ -33,7 +34,7 @@ class TwistMoe(BaseAnime):
@classmethod
def search(self, query):
r = session.get_session().get('https://twist.moe')
r = session.get('https://twist.moe')
soup = BeautifulSoup(r.text, 'html.parser')
all_anime = soup.select_one('nav.series').select('li')
animes = []
@ -49,7 +50,7 @@ class TwistMoe(BaseAnime):
def get_data(self):
anime_name = self.url.split('/a/')[-1].split('/')[0]
url = self._api_url.format(anime_name)
episodes = session.get_session().get(
episodes = session.get(
url,
headers={
'x-access-token': '1rj2vRtegS8Y60B3w3qNZm5T2Q0TN2NR'