PRs Merged
Merged old PRs and Started workign on ac.qq.com
This commit is contained in:
parent
aa88e468af
commit
4045972c5d
@ -112,4 +112,6 @@
|
||||
- PR Merge for #161
|
||||
- PR Merge for #162
|
||||
- PR Merge for #167
|
||||
- PR Merge for #172 [2018.09.24]
|
||||
- PR Merge for #172 [2018.09.24]
|
||||
- PR Merge for #189 [2018.11.05]
|
||||
- PR Merge for #199 [2018.11.27]
|
@ -7,10 +7,12 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -yq \
|
||||
build-essential chrpath libssl-dev libxft-dev \
|
||||
libfreetype6 libfreetype6-dev libfontconfig1 libfontconfig1-dev \
|
||||
wget nodejs-legacy
|
||||
|
||||
# We're not using PhantomJS anymore. So, this step should be removed for now.
|
||||
# install phantomjs and symlink to /usr/local/bin/
|
||||
RUN wget -q https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-2.1.1-linux-x86_64.tar.bz2 && \
|
||||
tar xvjf phantomjs-2.1.1-linux-x86_64.tar.bz2 -C /usr/local/share/ && \
|
||||
ln -s /usr/local/share/phantomjs-2.1.1-linux-x86_64/bin/phantomjs /usr/local/bin/
|
||||
#RUN wget -q https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-2.1.1-linux-x86_64.tar.bz2 && \
|
||||
# tar xvjf phantomjs-2.1.1-linux-x86_64.tar.bz2 -C /usr/local/share/ && \
|
||||
# ln -s /usr/local/share/phantomjs-2.1.1-linux-x86_64/bin/phantomjs /usr/local/bin/
|
||||
|
||||
# This install comic-dl and symlink to comic_dl command
|
||||
FROM base
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__version__ = "2018.09.24"
|
||||
__version__ = "2019.01.26"
|
||||
|
@ -6,6 +6,7 @@ import globalFunctions
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
import base64
|
||||
|
||||
"""A HUGE thanks to @abcfy2 for his amazing implementation of the ac.qq.com APIs.
|
||||
Original code for ac.qq.com : https://github.com/abcfy2/getComic/
|
||||
@ -43,6 +44,8 @@ class AcQq(object):
|
||||
source, cookies_main = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url)
|
||||
|
||||
base64data = re.findall(r"DATA\s*=\s*'(.+?)'", str(source))[0][1:]
|
||||
data = re.findall(r"data:\s*'(.+?)',", str(source))
|
||||
nonce = re.findall(r'data-mpmvr="(.+?)"', str(source))[0]
|
||||
logging.debug("base64data : %s" % base64data)
|
||||
# print(base64data)
|
||||
# import sys
|
||||
@ -88,21 +91,30 @@ class AcQq(object):
|
||||
|
||||
def full_series(self, comic_url, comic_name, sorting, download_directory, chapter_range, conversion, keep_files):
|
||||
# TODO fix, broken, doesn't return a json anymore
|
||||
chapter_list = "http://m.ac.qq.com/GetData/getChapterList?id=" + str(comic_name)
|
||||
chapter_list = "https://ac.qq.com/Comic/comicInfo/id/" + str(comic_name)
|
||||
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=chapter_list)
|
||||
content_json = json.loads(str(source))
|
||||
logging.debug("content_json : %s" % content_json)
|
||||
last = int(content_json['last'])
|
||||
first = int(content_json['first'])
|
||||
logging.debug("first : %s" % first)
|
||||
logging.debug("last : %s" % last)
|
||||
|
||||
all_links = []
|
||||
|
||||
for chapter_number in range(first, last + 1):
|
||||
"http://ac.qq.com/ComicView/index/id/538359/cid/114"
|
||||
chapter_url = "http://ac.qq.com/ComicView/index/id/%s/cid/%s" % (comic_name, chapter_number)
|
||||
all_links.append(chapter_url)
|
||||
raw_chapters_table = source.find_all('ol', {'class': 'chapter-page-all works-chapter-list'})
|
||||
for table_data in raw_chapters_table:
|
||||
x = table_data.findAll('a')
|
||||
for a in x:
|
||||
if "/ComicView/" in str(a['href']):
|
||||
all_links.append("https://ac.qq.com" + str(a['href']).strip())
|
||||
# import sys
|
||||
# sys.exit()
|
||||
# content_json = json.loads(str(source))
|
||||
# logging.debug("content_json : %s" % content_json)
|
||||
# last = int(content_json['last'])
|
||||
# first = int(content_json['first'])
|
||||
# logging.debug("first : %s" % first)
|
||||
# logging.debug("last : %s" % last)
|
||||
#
|
||||
# all_links = []
|
||||
#
|
||||
# for chapter_number in range(first, last + 1):
|
||||
# "http://ac.qq.com/ComicView/index/id/538359/cid/114"
|
||||
# chapter_url = "http://ac.qq.com/ComicView/index/id/%s/cid/%s" % (comic_name, chapter_number)
|
||||
# all_links.append(chapter_url)
|
||||
|
||||
logging.debug("all_links : %s" % all_links)
|
||||
if chapter_range != "All":
|
||||
@ -160,6 +172,20 @@ class AcQq(object):
|
||||
|
||||
return 0
|
||||
|
||||
def __decode_data(data, nonce):
|
||||
t = list(data)
|
||||
n = re.findall(r'(\d+)([a-zA-Z]+)', nonce)
|
||||
n_len = len(n)
|
||||
index = n_len - 1
|
||||
while index >= 0:
|
||||
locate = int(n[index][0]) & 255
|
||||
del t[locate:locate + len(n[index][1])]
|
||||
index = index - 1
|
||||
|
||||
base64_str = ''.join(t)
|
||||
json_str = base64.b64decode(base64_str).decode('utf-8')
|
||||
return json.loads(json_str)
|
||||
|
||||
def __decode_base64_data(self, base64data):
|
||||
base64DecodeChars = [- 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1,
|
||||
|
@ -112,4 +112,6 @@
|
||||
- PR Merge for #161
|
||||
- PR Merge for #162
|
||||
- PR Merge for #167
|
||||
- PR Merge for #172 [2018.09.24]
|
||||
- PR Merge for #172 [2018.09.24]
|
||||
- PR Merge for #189 [2018.11.05]
|
||||
- PR Merge for #199 [2018.11.27]
|
Loading…
x
Reference in New Issue
Block a user