yohoo. Downloader refractor

master
Vishnunarayan K I 2018-07-28 00:22:21 +05:30
parent 027288563e
commit a4c9d6899f
5 changed files with 151 additions and 30 deletions

View File

@ -69,10 +69,16 @@ def cli():
'Use "{aria2}" to use aria2 as downloader. See github wiki.',
metavar='DOWNLOAD COMMAND'
)
@click.option(
'--chunk-size',
help='Chunk size for downloading in chunks(in MB). Use this if you '
'experience throttling.',
type=int
)
@click.pass_context
def dl(ctx, anime_url, episode_range, url, player, skip_download, quality,
force_download, log_level, download_dir, file_format, provider,
external_downloader):
external_downloader, chunk_size):
""" Download the anime using the url or search for it.
"""
@ -127,10 +133,13 @@ def dl(ctx, anime_url, episode_range, url, player, skip_download, quality,
util.external_download(external_downloader, episode,
file_format, path=download_dir)
continue
if chunk_size is not None:
chunk_size *= 1e6
chunk_size = int(chunk_size)
episode.download(force=force_download,
path=download_dir,
format=file_format)
format=file_format,
range_size=chunk_size)
print()
@ -160,6 +169,7 @@ def dl(ctx, anime_url, episode_range, url, player, skip_download, quality,
help='The anime provider (website) for search.',
type=click.Choice(['9anime', 'kissanime', 'twist.moe'])
)
@click.option(
'--log-level', '-ll', 'log_level',
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR']),

View File

@ -0,0 +1,5 @@
from anime_downloader.downloader.http_downloader import HTTPDownloader
def get_downloader(downloader):
return HTTPDownloader

View File

@ -0,0 +1,66 @@
import os
import requests
import time
import logging
import sys
from anime_downloader import util
class BaseDownloader:
def __init__(self, source, path, force, range_size=None):
logging.info(path)
self.url = source.stream_url
self.referer = source.referer
self.path = path
self.range_size = range_size
util.make_dir(path.rsplit('/', 1)[0])
self.chunksize = 16384
r = requests.get(self.url, stream=True)
self.total_size = int(r.headers['Content-length'])
if os.path.exists(path):
if abs(os.stat(path).st_size - self.total_size)<10 and not force:
logging.warning('File already downloaded. Skipping download.')
return
else:
os.remove(path)
def download(self):
self.pre_process()
self.start_time = time.time()
self.downloaded = 0
self._download()
self.post_process()
def _download(self):
raise NotImplementedError
def pre_process(self):
pass
def post_process(self):
pass
def report_chunk_downloaded(self):
self.downloaded += self.chunksize
write_status(self.downloaded, self.total_size, self.start_time)
def write_status(downloaded, total_size, start_time):
elapsed_time = time.time()-start_time
rate = (downloaded/1024)/elapsed_time if elapsed_time else 'x'
downloaded = float(downloaded)/1048576
total_size = float(total_size)/1048576
status = 'Downloaded: {0:.2f}MB/{1:.2f}MB, Rate: {2:.2f}KB/s'.format(
downloaded, total_size, rate)
sys.stdout.write("\r" + status + " "*5 + "\r")
sys.stdout.flush()

View File

@ -0,0 +1,61 @@
import requests
import os
from anime_downloader.downloader.base_downloader import BaseDownloader
class HTTPDownloader(BaseDownloader):
def _download(self):
if self.range_size is None:
self._non_range_download()
else:
self._ranged_download()
def _ranged_download(self):
http_chunksize = self.range_size
range_start = 0
range_end = http_chunksize
# Make a new file, maybe not the best way
with open(self.path, 'w'):
pass
r = requests.get(self.url, stream=True)
while self.downloaded < self.total_size:
r = requests.get(self.url,
headers=set_range(range_start, range_end),
stream=True)
if r.status_code == 206:
with open(self.path, 'ab') as f:
for chunk in r.iter_content(chunk_size=self.chunksize):
if chunk:
f.write(chunk)
self.report_chunk_downloaded()
if range_end == '':
break
range_start = os.stat(self.path).st_size
range_end += http_chunksize
if range_end > self.total_size:
range_end = ''
def _non_range_download(self):
r = requests.get(self.url, stream=True)
if r.status_code == 200:
with open(self.path, 'wb') as f:
for chunk in r.iter_content(chunk_size=self.chunksize):
if chunk:
f.write(chunk)
self.report_chunk_downloaded()
def set_range(start=0, end=''):
headers = {
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Gecko/20100101"
"Firefox/56.0"
}
headers['Range'] = 'bytes={}-{}'.format(start, end)
return headers

View File

@ -11,7 +11,7 @@ from anime_downloader.sites.exceptions import AnimeDLError, NotFoundError
from anime_downloader import util
from anime_downloader.const import desktop_headers
from anime_downloader.extractors import get_extractor
from anime_downloader.downloader import get_downloader
class BaseAnime:
sitename = ''
@ -159,7 +159,7 @@ class BaseEpisode:
raise NotImplementedError
def download(self, force=False, path=None,
format='{anime_title}_{ep_no}'):
format='{anime_title}_{ep_no}', range_size=None):
logging.info('Downloading {}'.format(self.pretty_title))
if format:
file_name = util.format_filename(format, self)+'.mp4'
@ -171,32 +171,11 @@ class BaseEpisode:
else:
path = os.path.join(path, file_name)
logging.info(path)
r = requests.get(self.source().stream_url, stream=True)
util.make_dir(path.rsplit('/', 1)[0])
total_size = int(r.headers['Content-length'])
downloaded, chunksize = 0, 16384
start_time = time.time()
if os.path.exists(path):
if os.stat(path).st_size == total_size and not force:
logging.warning('File already downloaded. Skipping download.')
return
else:
os.remove(path)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunksize):
if chunk:
f.write(chunk)
downloaded += chunksize
write_status((downloaded), (total_size),
start_time)
Downloader = get_downloader('http')
downloader = Downloader(self.source(),
path, force, range_size=range_size)
downloader.download()
class SearchResult:
def __init__(self, title, url, poster):