Add "Disable SSL cert verifying" via options for requests

master
Dmitriy 2018-10-04 17:00:29 +04:00
parent 29cc13a57e
commit 6d71d8a899
No known key found for this signature in database
GPG Key ID: F7E6393A426A5BBE
12 changed files with 57 additions and 32 deletions

View File

@ -77,10 +77,15 @@ def cli():
'experience throttling.',
type=int
)
@click.option(
'--disable-ssl',
is_flag=True,
help='Disable verifying the SSL certificate, if flag is set'
)
@click.pass_context
def dl(ctx, anime_url, episode_range, url, player, skip_download, quality,
force_download, log_level, download_dir, file_format, provider,
external_downloader, chunk_size, fallback_qualities):
external_downloader, chunk_size, disable_ssl, fallback_qualities):
""" Download the anime using the url or search for it.
"""
@ -89,6 +94,12 @@ def dl(ctx, anime_url, episode_range, url, player, skip_download, quality,
cls = get_anime_class(anime_url)
disable_ssl = cls and cls.__name__ == 'Masterani' or disable_ssl
util.set_default_requests_options(
# Set global options for requests here. For example: timeout=N, proxies={}
verify=not disable_ssl
)
if not cls:
anime_url = util.search(anime_url, provider)
cls = get_anime_class(anime_url)
@ -139,14 +150,7 @@ def dl(ctx, anime_url, episode_range, url, player, skip_download, quality,
if chunk_size is not None:
chunk_size *= 1e6
chunk_size = int(chunk_size)
if cls.__name__ == 'Masterani':
episode.download(force=force_download,
path=download_dir,
format=file_format,
range_size=chunk_size,
ssl=False)
else:
episode.download(force=force_download,
episode.download(force=force_download,
path=download_dir,
format=file_format,
range_size=chunk_size)

View File

@ -8,20 +8,19 @@ from anime_downloader import util
class BaseDownloader:
def __init__(self, source, path, force, range_size=None, ssl=True):
def __init__(self, source, path, force, range_size=None):
logging.info(path)
self.url = source.stream_url
self.referer = source.referer
self.path = path
self.range_size = range_size
self.ssl = ssl
util.make_dir(path.rsplit('/', 1)[0])
self.chunksize = 16384
r = requests.get(self.url, stream=True, verify=ssl)
r = requests.get(self.url, stream=True, **util.get_requests_options())
self.total_size = int(r.headers['Content-length'])
if os.path.exists(path):

View File

@ -1,6 +1,7 @@
import requests
import os
from anime_downloader import util
from anime_downloader.downloader.base_downloader import BaseDownloader
@ -21,11 +22,12 @@ class HTTPDownloader(BaseDownloader):
with open(self.path, 'w'):
pass
r = requests.get(self.url, stream=True, verify=self.ssl)
r = requests.get(self.url, stream=True, **util.get_requests_options())
while self.downloaded < self.total_size:
r = requests.get(self.url,
headers=set_range(range_start, range_end),
stream=True)
stream=True,
**util.get_requests_options())
if r.status_code == 206:
with open(self.path, 'ab') as f:
for chunk in r.iter_content(chunk_size=self.chunksize):
@ -41,7 +43,7 @@ class HTTPDownloader(BaseDownloader):
range_end = ''
def _non_range_download(self):
r = requests.get(self.url, stream=True, verify=self.ssl)
r = requests.get(self.url, stream=True, **util.get_requests_options())
if r.status_code == 200:
with open(self.path, 'wb') as f:

View File

@ -2,14 +2,14 @@ import requests
import re
import base64
from anime_downloader import util
from anime_downloader.extractors.base_extractor import BaseExtractor
class StreamMoe(BaseExtractor):
def _get_data(self):
url = self.url
res = requests.get(url)
res = requests.get(url, **util.get_requests_options())
content_re = re.compile(r"= atob\('(.*?)'\)")
source_re = re.compile(r'source src="(.*?)"')

View File

@ -2,6 +2,8 @@ import logging
import re
import requests
from bs4 import BeautifulSoup
from anime_downloader import util
from anime_downloader.extractors.base_extractor import BaseExtractor
@ -19,7 +21,7 @@ class MP4Upload(BaseExtractor):
r'.*?(www\d+).*?\|video\|(.*?)\|(\d+)\|.*?',
re.DOTALL)
mp4u_embed = requests.get(self.url).text
mp4u_embed = requests.get(self.url, **util.get_requests_options()).text
domain, video_id, protocol = source_parts_re.match(mp4u_embed).groups()
logging.debug('Domain: %s, Video ID: %s, Protocol: %s' %
@ -27,7 +29,7 @@ class MP4Upload(BaseExtractor):
url = self.url.replace('embed-', '')
# Return to non-embed page to collect title
mp4u_page = BeautifulSoup(requests.get(url).text, 'html.parser')
mp4u_page = BeautifulSoup(requests.get(url, **util.get_requests_options()).text, 'html.parser')
title = mp4u_page.find('span', {'class': 'dfilename'}).text
title = title[:title.rfind('_')][:title.rfind('.')].replace(' ', '_')

View File

@ -3,6 +3,7 @@ import requests
import re
from bs4 import BeautifulSoup
from anime_downloader import util
from anime_downloader.extractors.base_extractor import BaseExtractor
@ -13,13 +14,13 @@ class RapidVideo(BaseExtractor):
headers = self.headers
headers['referer'] = url
try:
r = requests.get(url, headers=headers)
r = requests.get(url, headers=headers, **util.get_requests_options())
except:
r = requests.post(url, {
'cursor.x': 12,
'cursor.y': 12,
'block': 1,
}, headers=headers)
}, headers=headers, **util.get_requests_options())
soup = BeautifulSoup(r.text, 'html.parser')
# TODO: Make these a different function. Can be reused in other classes

View File

@ -49,7 +49,7 @@ class BaseAnime:
def get_data(self):
self._episode_urls = []
r = requests.get(self.url, headers=desktop_headers)
r = requests.get(self.url, headers=desktop_headers, **util.get_requests_options())
soup = BeautifulSoup(r.text, 'html.parser')
try:
@ -104,7 +104,7 @@ class BaseEpisode:
stream_url = ''
def __init__(self, url, quality='720p', parent=None,
ep_no=None):
ep_no=None, requests_options=None):
if quality not in self.QUALITIES:
raise AnimeDLError('Incorrect quality: "{}"'.format(quality))
@ -114,6 +114,7 @@ class BaseEpisode:
self._parent = parent
self._sources = None
self.pretty_title = '{}-{}'.format(self._parent.title, self.ep_no)
self.requests_options = requests_options or {}
logging.debug("Extracting stream info of id: {}".format(self.url))

View File

@ -1,3 +1,4 @@
from anime_downloader import util
from anime_downloader.sites.anime import BaseAnime, BaseEpisode
import requests
import re
@ -9,7 +10,7 @@ class GogoanimeEpisode(BaseEpisode):
_base_url = 'https://www2.gogoanime.se'
def _get_sources(self):
soup = BeautifulSoup(requests.get(self.url).text, 'html.parser')
soup = BeautifulSoup(requests.get(self.url, **util.get_requests_options()).text, 'html.parser')
url = 'https:'+soup.select_one('li.anime a').get('data-video')
res = requests.get(url)
@ -34,7 +35,7 @@ class GogoAnime(BaseAnime):
'id': anime_id,
}
res = requests.get(self._episode_list_url, params=params)
res = requests.get(self._episode_list_url, params=params, **util.get_requests_options())
soup = BeautifulSoup(res.text, 'html.parser')
epurls = list(

View File

@ -1,3 +1,4 @@
from anime_downloader import util
from anime_downloader.sites.kissanime import KissAnime
from anime_downloader.sites.anime import BaseEpisode, SearchResult
from anime_downloader.sites.exceptions import NotFoundError
@ -25,12 +26,12 @@ class KisscartoonEpisode(BaseEpisode):
headers = desktop_headers
headers['referer'] = self.url
res = requests.get(self._episode_list_url,
params=params, headers=headers)
params=params, headers=headers, **util.get_requests_options())
url = res.json()['value']
headers = desktop_headers
headers['referer'] = self.url
res = requests.get('https:' + url, headers=headers)
res = requests.get('https:' + url, headers=headers, **util.get_requests_options())
return [(
'no_extractor',

View File

@ -60,7 +60,7 @@ class NineAnime(BaseAnime):
@classmethod
def search(cls, query):
r = requests.get('https://www4.9anime.is/search?',
params={'keyword': query}, headers=desktop_headers)
params={'keyword': query}, headers=desktop_headers, **util.get_requests_options())
logging.debug(r.url)
@ -104,7 +104,7 @@ class NineAnime(BaseAnime):
params = {}
params['_'] = int(generate_(params))
params['_'] = 648
soup = BeautifulSoup(requests.get(api_url,params=params).json()['html'], 'html.parser')
soup = BeautifulSoup(requests.get(api_url, params=params, **util.get_requests_options()).json()['html'], 'html.parser')
episodes = soup.find('div', {'class': 'server', 'data-name': 33})
episodes = episodes.find_all('li')

View File

@ -6,6 +6,7 @@ import requests
from bs4 import BeautifulSoup
import warnings
from anime_downloader import util
from anime_downloader.sites.anime import BaseAnime, BaseEpisode, SearchResult
@ -33,7 +34,7 @@ class TwistMoe(BaseAnime):
@classmethod
def search(self, query):
r = requests.get('https://twist.moe')
r = requests.get('https://twist.moe', **util.get_requests_options())
soup = BeautifulSoup(r.text, 'html.parser')
all_anime = soup.select_one('nav.series').select('li')
animes = []
@ -53,7 +54,8 @@ class TwistMoe(BaseAnime):
url,
headers={
'x-access-token': '1rj2vRtegS8Y60B3w3qNZm5T2Q0TN2NR'
}
},
**util.get_requests_options()
)
episodes = episodes.json()
self.title = anime_name

View File

@ -10,10 +10,22 @@ import os
import errno
import time
import ast
from copy import deepcopy
from anime_downloader.sites import get_anime_class
from anime_downloader.const import desktop_headers
_requests_options = dict()
def set_default_requests_options(**options):
_requests_options.clear()
_requests_options.update(options)
def get_requests_options():
return deepcopy(_requests_options)
def setup_logger(log_level):
if log_level == 'DEBUG':
@ -110,7 +122,7 @@ def print_info(version):
def get_json(url, params=None):
logging.debug('API call URL: {} with params {!r}'.format(url, params))
res = requests.get(url, headers=desktop_headers, params=params)
res = requests.get(url, headers=desktop_headers, params=params, **get_requests_options())
logging.debug('URL: {}'.format(res.url))
data = res.json()
logging.debug('Returned data: {}'.format(data))