Check the "Changelog" for more info.
master
Xonshiz 2017-02-16 08:37:05 +05:30
parent be22e9c072
commit 38a13dfeae
20 changed files with 598 additions and 418 deletions

View File

@ -2,7 +2,26 @@
<project version="4"> <project version="4">
<component name="ChangeListManager"> <component name="ChangeListManager">
<list default="true" id="52845b00-4ab8-4409-b4b9-d104838212e1" name="Default" comment=""> <list default="true" id="52845b00-4ab8-4409-b4b9-d104838212e1" name="Default" comment="">
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/Changelog.md" afterPath="$PROJECT_DIR$/Changelog.md" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/ReadMe.md" afterPath="$PROJECT_DIR$/ReadMe.md" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/docs/Changelog.md" afterPath="$PROJECT_DIR$/docs/Changelog.md" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/docs/index.md" afterPath="$PROJECT_DIR$/docs/index.md" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/.idea/workspace.xml" afterPath="$PROJECT_DIR$/.idea/workspace.xml" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/MANIFEST" afterPath="$PROJECT_DIR$/MANIFEST" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/__init__.py" afterPath="$PROJECT_DIR$/comic_dl/__init__.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/downloader/cookies_required.py" afterPath="$PROJECT_DIR$/comic_dl/downloader/cookies_required.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/downloader/universal.py" afterPath="$PROJECT_DIR$/comic_dl/downloader/universal.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/batoto.py" afterPath="$PROJECT_DIR$/comic_dl/sites/batoto.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/comic_naver.py" afterPath="$PROJECT_DIR$/comic_dl/sites/comic_naver.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/gomanga.py" afterPath="$PROJECT_DIR$/comic_dl/sites/gomanga.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/kisscomicus.py" afterPath="$PROJECT_DIR$/comic_dl/sites/kisscomicus.py" /> <change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/kisscomicus.py" afterPath="$PROJECT_DIR$/comic_dl/sites/kisscomicus.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/kissmanga.py" afterPath="$PROJECT_DIR$/comic_dl/sites/kissmanga.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/mangafox.py" afterPath="$PROJECT_DIR$/comic_dl/sites/mangafox.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/readcomic.py" afterPath="$PROJECT_DIR$/comic_dl/sites/readcomic.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/sites/yomanga.py" afterPath="$PROJECT_DIR$/comic_dl/sites/yomanga.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/comic_dl/version.py" afterPath="$PROJECT_DIR$/comic_dl/version.py" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/setup.cfg" afterPath="$PROJECT_DIR$/setup.cfg" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/setup.py" afterPath="$PROJECT_DIR$/setup.py" />
</list> </list>
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" /> <option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
<option name="TRACKING_ENABLED" value="true" /> <option name="TRACKING_ENABLED" value="true" />
@ -13,7 +32,7 @@
</component> </component>
<component name="CoverageDataManager"> <component name="CoverageDataManager">
<SUITE FILE_PATH="coverage/comic-dl$readcomic.coverage" NAME="readcomic Coverage Results" MODIFIED="1483330843656" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/comic_dl/sites" /> <SUITE FILE_PATH="coverage/comic-dl$readcomic.coverage" NAME="readcomic Coverage Results" MODIFIED="1483330843656" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/comic_dl/sites" />
<SUITE FILE_PATH="coverage/comic-dl$comic_dl.coverage" NAME="comic_dl Coverage Results" MODIFIED="1484207738752" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/comic_dl" /> <SUITE FILE_PATH="coverage/comic-dl$comic_dl.coverage" NAME="comic_dl Coverage Results" MODIFIED="1487213669271" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/comic_dl" />
<SUITE FILE_PATH="coverage/comic-dl$comic_naver.coverage" NAME="comic_naver Coverage Results" MODIFIED="1483326807533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/comic_dl/sites" /> <SUITE FILE_PATH="coverage/comic-dl$comic_naver.coverage" NAME="comic_naver Coverage Results" MODIFIED="1483326807533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/comic_dl/sites" />
</component> </component>
<component name="CreatePatchCommitExecutor"> <component name="CreatePatchCommitExecutor">
@ -25,8 +44,8 @@
<file leaf-file-name="comic_dl.py" pinned="false" current-in-tab="false"> <file leaf-file-name="comic_dl.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py"> <entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="221"> <state relative-caret-position="323">
<caret line="13" column="40" lean-forward="false" selection-start-line="13" selection-start-column="40" selection-end-line="13" selection-end-column="40" /> <caret line="27" column="74" lean-forward="false" selection-start-line="27" selection-start-column="74" selection-end-line="27" selection-end-column="74" />
<folding> <folding>
<element signature="e#47#85#0" expanded="true" /> <element signature="e#47#85#0" expanded="true" />
</folding> </folding>
@ -34,6 +53,30 @@
</provider> </provider>
</entry> </entry>
</file> </file>
<file leaf-file-name="cookies_required.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/comic_dl/downloader/cookies_required.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="1224">
<caret line="72" column="19" lean-forward="false" selection-start-line="72" selection-start-column="19" selection-end-line="72" selection-end-column="19" />
<folding>
<element signature="e#496#534#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</file>
<file leaf-file-name="universal.py" pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/comic_dl/downloader/universal.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="641">
<caret line="40" column="12" lean-forward="false" selection-start-line="40" selection-start-column="12" selection-end-line="40" selection-end-column="12" />
<folding>
<element signature="e#432#470#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</file>
<file leaf-file-name="honcho.py" pinned="false" current-in-tab="false"> <file leaf-file-name="honcho.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/comic_dl/honcho.py"> <entry file="file://$PROJECT_DIR$/comic_dl/honcho.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
@ -44,38 +87,11 @@
</provider> </provider>
</entry> </entry>
</file> </file>
<file leaf-file-name="readcomic.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/comic_dl/sites/readcomic.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="408">
<caret line="88" column="19" lean-forward="false" selection-start-line="88" selection-start-column="19" selection-end-line="88" selection-end-column="19" />
<folding>
<element signature="e#47#86#0" expanded="true" />
<element signature="e#384#1537#0" expanded="false" />
<element signature="e#1586#2954#0" expanded="false" />
</folding>
</state>
</provider>
</entry>
</file>
<file leaf-file-name="kisscomicus.py" pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/comic_dl/sites/kisscomicus.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="317">
<caret line="68" column="26" lean-forward="true" selection-start-line="68" selection-start-column="26" selection-end-line="68" selection-end-column="26" />
<folding>
<element signature="e#47#62#0" expanded="true" />
<element signature="e#291#2333#0" expanded="false" />
</folding>
</state>
</provider>
</entry>
</file>
<file leaf-file-name="version.py" pinned="false" current-in-tab="false"> <file leaf-file-name="version.py" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/comic_dl/version.py"> <entry file="file://$PROJECT_DIR$/comic_dl/version.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="102"> <state relative-caret-position="102">
<caret line="6" column="24" lean-forward="false" selection-start-line="6" selection-start-column="24" selection-end-line="6" selection-end-column="24" /> <caret line="6" column="25" lean-forward="false" selection-start-line="6" selection-start-column="25" selection-end-line="6" selection-end-column="25" />
<folding /> <folding />
</state> </state>
</provider> </provider>
@ -90,16 +106,41 @@
</list> </list>
</option> </option>
</component> </component>
<component name="FindInProjectRecents">
<findStrings>
<find>Image L</find>
<find>Complete</find>
<find>sys</find>
<find>logging</find>
<find>re.</find>
<find>reques</find>
<find>I took</find>
<find>cfscrape.</find>
<find>sys.</find>
<find>requests.</find>
<find>shutil.</find>
<find>logging.</find>
<find>os.</find>
</findStrings>
</component>
<component name="Git.Settings"> <component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" /> <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component> </component>
<component name="IdeDocumentHistory"> <component name="IdeDocumentHistory">
<option name="CHANGED_PATHS"> <option name="CHANGED_PATHS">
<list> <list>
<option value="$PROJECT_DIR$/comic_dl/sites/readcomic.py" />
<option value="$PROJECT_DIR$/comic_dl/honcho.py" /> <option value="$PROJECT_DIR$/comic_dl/honcho.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/kisscomicus.py" />
<option value="$PROJECT_DIR$/comic_dl/version.py" /> <option value="$PROJECT_DIR$/comic_dl/version.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/comic_naver.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/kissmanga.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/gomanga.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/yomanga.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/mangafox.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/readcomic.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/kisscomicus.py" />
<option value="$PROJECT_DIR$/comic_dl/sites/batoto.py" />
<option value="$PROJECT_DIR$/comic_dl/downloader/cookies_required.py" />
<option value="$PROJECT_DIR$/comic_dl/downloader/universal.py" />
</list> </list>
</option> </option>
</component> </component>
@ -130,8 +171,6 @@
<foldersAlwaysOnTop value="true" /> <foldersAlwaysOnTop value="true" />
</navigator> </navigator>
<panes> <panes>
<pane id="Scope" />
<pane id="Scratches" />
<pane id="ProjectPane"> <pane id="ProjectPane">
<subPane> <subPane>
<PATH> <PATH>
@ -196,11 +235,15 @@
</PATH> </PATH>
</subPane> </subPane>
</pane> </pane>
<pane id="Scratches" />
<pane id="Scope" />
</panes> </panes>
</component> </component>
<component name="PropertiesComponent"> <component name="PropertiesComponent">
<property name="WebServerToolWindowFactoryState" value="false" /> <property name="WebServerToolWindowFactoryState" value="false" />
<property name="js.eslint.eslintPackage" value="" /> <property name="js.eslint.eslintPackage" value="" />
<property name="nodejs_interpreter_path" value="C:/Program Files/nodejs/node" />
<property name="js-jscs-nodeInterpreter" value="C:\Program Files\nodejs\node.exe" />
</component> </component>
<component name="RunManager" selected="Python.comic_dl"> <component name="RunManager" selected="Python.comic_dl">
<configuration default="false" name="comic_naver" type="PythonConfigurationType" factoryName="Python" temporary="true"> <configuration default="false" name="comic_naver" type="PythonConfigurationType" factoryName="Python" temporary="true">
@ -227,15 +270,15 @@
<envs> <envs>
<env name="PYTHONUNBUFFERED" value="1" /> <env name="PYTHONUNBUFFERED" value="1" />
</envs> </envs>
<option name="SDK_HOME" value="" /> <option name="SDK_HOME" value="C:\Pythons\Python3.5\python.exe" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/comic_dl" /> <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/comic_dl" />
<option name="IS_MODULE_SDK" value="true" /> <option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" /> <option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" /> <option name="ADD_SOURCE_ROOTS" value="true" />
<module name="comic-dl" /> <module name="comic-dl" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" enabled="false" sample_coverage="true" runner="coverage.py" /> <EXTENSION ID="PythonCoverageRunConfigurationExtension" enabled="false" sample_coverage="true" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/comic_dl/comic_dl.py" /> <option name="SCRIPT_NAME" value="$PROJECT_DIR$/comic_dl/comic_dl.py" />
<option name="PARAMETERS" value="-i &quot;http://kisscomic.us/comics/sons-of-anarchy-redwood-original.html&quot;" /> <option name="PARAMETERS" value="-i &quot;http://bato.to/comic/_/comics/12-beast-r8357&quot; --verbose -u AnimeRG -p 8cbb6ceb8d9079d" />
<option name="SHOW_COMMAND_LINE" value="false" /> <option name="SHOW_COMMAND_LINE" value="false" />
<method /> <method />
</configuration> </configuration>
@ -498,16 +541,17 @@
<servers /> <servers />
</component> </component>
<component name="ToolWindowManager"> <component name="ToolWindowManager">
<frame x="-8" y="-8" width="1936" height="1056" extended-state="6" /> <frame x="-8" y="-8" width="1936" height="1056" extended-state="7" />
<editor active="true" /> <editor active="true" />
<layout> <layout>
<window_info id="Project" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.25799572" sideWeight="0.5" order="0" side_tool="false" content_ui="combo" /> <window_info id="Project" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.26332623" sideWeight="0.5" order="0" side_tool="false" content_ui="combo" />
<window_info id="TODO" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="6" side_tool="false" content_ui="tabs" /> <window_info id="TODO" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="6" side_tool="false" content_ui="tabs" />
<window_info id="Event Log" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="true" content_ui="tabs" /> <window_info id="Event Log" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="true" content_ui="tabs" />
<window_info id="Database" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="3" side_tool="false" content_ui="tabs" /> <window_info id="Database" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="3" side_tool="false" content_ui="tabs" />
<window_info id="Version Control" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" /> <window_info id="Find" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Version Control" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.32900432" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" />
<window_info id="Python Console" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" /> <window_info id="Python Console" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" />
<window_info id="Run" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.32900432" sideWeight="0.5" order="2" side_tool="false" content_ui="tabs" /> <window_info id="Run" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.32900432" sideWeight="0.5" order="2" side_tool="false" content_ui="tabs" />
<window_info id="Structure" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" /> <window_info id="Structure" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Terminal" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" /> <window_info id="Terminal" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="7" side_tool="false" content_ui="tabs" />
<window_info id="Favorites" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="2" side_tool="true" content_ui="tabs" /> <window_info id="Favorites" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="2" side_tool="true" content_ui="tabs" />
@ -517,7 +561,6 @@
<window_info id="Commander" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" /> <window_info id="Commander" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Inspection" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="5" side_tool="false" content_ui="tabs" /> <window_info id="Inspection" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="5" side_tool="false" content_ui="tabs" />
<window_info id="Hierarchy" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="2" side_tool="false" content_ui="combo" /> <window_info id="Hierarchy" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="2" side_tool="false" content_ui="combo" />
<window_info id="Find" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Ant Build" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" /> <window_info id="Ant Build" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
</layout> </layout>
</component> </component>
@ -534,6 +577,94 @@
<watches-manager /> <watches-manager />
</component> </component>
<component name="editorHistoryManager"> <component name="editorHistoryManager">
<entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="85">
<caret line="13" column="14" lean-forward="false" selection-start-line="13" selection-start-column="14" selection-end-line="13" selection-end-column="14" />
<folding>
<element signature="e#47#85#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/honcho.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="306">
<caret line="22" column="22" lean-forward="false" selection-start-line="22" selection-start-column="11" selection-end-line="22" selection-end-column="22" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/readcomic.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="1326">
<caret line="88" column="0" lean-forward="false" selection-start-line="88" selection-start-column="0" selection-end-line="88" selection-end-column="0" />
<folding>
<element signature="e#47#86#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/kisscomicus.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/version.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="102">
<caret line="6" column="24" lean-forward="false" selection-start-line="6" selection-start-column="24" selection-end-line="6" selection-end-column="24" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="85">
<caret line="13" column="14" lean-forward="false" selection-start-line="13" selection-start-column="14" selection-end-line="13" selection-end-column="14" />
<folding>
<element signature="e#47#85#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/honcho.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="374">
<caret line="22" column="22" lean-forward="false" selection-start-line="22" selection-start-column="11" selection-end-line="22" selection-end-column="22" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/readcomic.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="1496">
<caret line="88" column="0" lean-forward="true" selection-start-line="88" selection-start-column="0" selection-end-line="88" selection-end-column="0" />
<folding>
<element signature="e#47#86#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/kisscomicus.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/version.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="102">
<caret line="6" column="24" lean-forward="false" selection-start-line="6" selection-start-column="24" selection-end-line="6" selection-end-column="24" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py"> <entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="221"> <state relative-caret-position="221">
@ -558,8 +689,6 @@
<caret line="37" column="30" lean-forward="false" selection-start-line="37" selection-start-column="30" selection-end-line="37" selection-end-column="30" /> <caret line="37" column="30" lean-forward="false" selection-start-line="37" selection-start-column="30" selection-end-line="37" selection-end-column="30" />
<folding> <folding>
<element signature="e#47#86#0" expanded="true" /> <element signature="e#47#86#0" expanded="true" />
<element signature="e#384#1537#0" expanded="false" />
<element signature="e#1586#2954#0" expanded="false" />
</folding> </folding>
</state> </state>
</provider> </provider>
@ -568,10 +697,7 @@
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0"> <state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" /> <caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding> <folding />
<element signature="e#47#62#0" expanded="true" />
<element signature="e#291#2333#0" expanded="false" />
</folding>
</state> </state>
</provider> </provider>
</entry> </entry>
@ -593,27 +719,11 @@
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/comic_naver.py"> <entry file="file://$PROJECT_DIR$/comic_dl/version.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="395"> <state relative-caret-position="102">
<caret line="52" column="39" lean-forward="false" selection-start-line="51" selection-start-column="8" selection-end-line="52" selection-end-column="39" /> <caret line="6" column="25" lean-forward="false" selection-start-line="6" selection-start-column="25" selection-end-line="6" selection-end-column="25" />
</state> <folding />
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/downloader/universal.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="303">
<caret line="34" column="52" lean-forward="false" selection-start-line="34" selection-start-column="37" selection-end-line="34" selection-end-column="52" />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="221">
<caret line="13" column="40" lean-forward="false" selection-start-line="13" selection-start-column="40" selection-end-line="13" selection-end-column="40" />
<folding>
<element signature="e#47#85#0" expanded="true" />
</folding>
</state> </state>
</provider> </provider>
</entry> </entry>
@ -625,33 +735,104 @@
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/readcomic.py"> <entry file="file://$PROJECT_DIR$/comic_dl/sites/gomanga.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="408"> <state relative-caret-position="170">
<caret line="88" column="19" lean-forward="false" selection-start-line="88" selection-start-column="19" selection-end-line="88" selection-end-column="19" /> <caret line="10" column="29" lean-forward="false" selection-start-line="10" selection-start-column="29" selection-end-line="10" selection-end-column="29" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/comic_naver.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="170">
<caret line="10" column="0" lean-forward="false" selection-start-line="10" selection-start-column="0" selection-end-line="10" selection-end-column="36" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/mangafox.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="235">
<caret line="69" column="21" lean-forward="false" selection-start-line="69" selection-start-column="15" selection-end-line="69" selection-end-column="21" />
<folding> <folding>
<element signature="e#47#86#0" expanded="true" /> <element signature="e#47#85#0" expanded="true" />
<element signature="e#384#1537#0" expanded="false" />
<element signature="e#1586#2954#0" expanded="false" />
</folding> </folding>
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/comic_dl/version.py"> <entry file="file://$PROJECT_DIR$/comic_dl/sites/yomanga.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="102"> <state relative-caret-position="306">
<caret line="6" column="24" lean-forward="false" selection-start-line="6" selection-start-column="24" selection-end-line="6" selection-end-column="24" /> <caret line="18" column="15" lean-forward="false" selection-start-line="18" selection-start-column="15" selection-end-line="18" selection-end-column="15" />
<folding /> <folding>
<element signature="e#47#85#0" expanded="true" />
</folding>
</state> </state>
</provider> </provider>
</entry> </entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/kisscomicus.py"> <entry file="file://$PROJECT_DIR$/comic_dl/sites/kisscomicus.py">
<provider selected="true" editor-type-id="text-editor"> <provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="317"> <state relative-caret-position="119">
<caret line="68" column="26" lean-forward="true" selection-start-line="68" selection-start-column="26" selection-end-line="68" selection-end-column="26" /> <caret line="7" column="36" lean-forward="false" selection-start-line="7" selection-start-column="36" selection-end-line="7" selection-end-column="36" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/readcomic.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="255">
<caret line="15" column="0" lean-forward="false" selection-start-line="15" selection-start-column="0" selection-end-line="15" selection-end-column="0" />
<folding> <folding>
<element signature="e#47#62#0" expanded="true" /> <element signature="e#47#86#0" expanded="true" />
<element signature="e#291#2333#0" expanded="false" /> </folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/kissmanga.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="85">
<caret line="5" column="37" lean-forward="false" selection-start-line="5" selection-start-column="37" selection-end-line="5" selection-end-column="37" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/sites/batoto.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="323">
<caret line="19" column="28" lean-forward="true" selection-start-line="19" selection-start-column="28" selection-end-line="19" selection-end-column="28" />
<folding>
<element signature="e#47#85#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/comic_dl.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="323">
<caret line="27" column="74" lean-forward="false" selection-start-line="27" selection-start-column="74" selection-end-line="27" selection-end-column="74" />
<folding>
<element signature="e#47#85#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/downloader/cookies_required.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="1224">
<caret line="72" column="19" lean-forward="false" selection-start-line="72" selection-start-column="19" selection-end-line="72" selection-end-column="19" />
<folding>
<element signature="e#496#534#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/comic_dl/downloader/universal.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="641">
<caret line="40" column="12" lean-forward="false" selection-start-line="40" selection-start-column="12" selection-end-line="40" selection-end-column="12" />
<folding>
<element signature="e#432#470#0" expanded="true" />
</folding> </folding>
</state> </state>
</provider> </provider>

View File

@ -15,4 +15,6 @@
- Added a YouTube Tutorial for the script [2016.12.30] - Added a YouTube Tutorial for the script [2016.12.30]
- Site support for readcomiconlin.to [2017.01.02] - Site support for readcomiconlin.to [2017.01.02]
- Added `Verbose Logging` [2017.01.22] - Added `Verbose Logging` [2017.01.22]
- Fixed chapter count error in Kissmanga [2017.01.22] - Fixed chapter count error in Kissmanga [2017.01.22]
- Fixed #4 [2017.02.16]
- Optimized Imports [2017.02.16]

View File

@ -2,6 +2,18 @@
setup.cfg setup.cfg
setup.py setup.py
comic_dl\__init__.py comic_dl\__init__.py
comic_dl\comic-dl.py comic_dl\comic_dl.py
comic_dl\honcho.py comic_dl\honcho.py
comic_dl\version.py comic_dl\version.py
comic_dl\downloader\__init__.py
comic_dl\downloader\cookies_required.py
comic_dl\downloader\universal.py
comic_dl\sites\__init__.py
comic_dl\sites\batoto.py
comic_dl\sites\comic_naver.py
comic_dl\sites\gomanga.py
comic_dl\sites\kisscomicus.py
comic_dl\sites\kissmanga.py
comic_dl\sites\mangafox.py
comic_dl\sites\readcomic.py
comic_dl\sites\yomanga.py

View File

@ -188,7 +188,7 @@ You can check the changelog [**`HERE`**](https://github.com/Xonshiz/comic-dl/blo
If your're planning to open an issue for the script or ask for a new feature or anything that requires opening an Issue, then please do keep these things in mind. If your're planning to open an issue for the script or ask for a new feature or anything that requires opening an Issue, then please do keep these things in mind.
### Reporting Issues ### Reporting Issues
If you're going to report an issue, then please run the script again with the "-v or --verbose" argument. It should generate a file in the same directory, with the name "Error Log.log". Copy that log file's data and post it on a [Gist](https://gist.github.com/) and share that gist's link while reporting the issue here. If you're going to report an issue, then please run the script again with the "-v or --verbose" argument. It should generate a file in the same directory, with the name "Error Log.log". Copy that log file's data and post it on a [Gist](https://gist.github.com/) and share that gist's link while reporting the issue here. Make sure you **EDIT OUT YOUR USERNAME AND PASSWORD**, if supplied within the command.
If you don't include the verbose log, there are chances it'll take time to fix the issue(s) you're having. If you don't include the verbose log, there are chances it'll take time to fix the issue(s) you're having.

View File

@ -0,0 +1,2 @@
import sites
import downloader

View File

@ -13,36 +13,36 @@ This module uses `requests` library to achieve the handling of cookies.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import os from os import path
import requests from requests import get
import shutil from shutil import move,copyfileobj
from downloader.universal import main as FileDownloader from downloader.universal import main as FileDownloader
import logging from logging import debug, basicConfig, DEBUG
def main(File_Name_Final,Directory_path,tasty_cookies,ddl_image, logger): def main(File_Name_Final,Directory_path,tasty_cookies,ddl_image, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
File_Check_Path = str(Directory_path)+'/'+str(File_Name_Final) File_Check_Path = str(Directory_path)+'/'+str(File_Name_Final)
logging.debug("File Check Path : %s" % File_Check_Path) debug("File Check Path : %s" % File_Check_Path)
if os.path.isfile(File_Check_Path): if path.isfile(File_Check_Path):
print('[Comic-dl] File Exist! Skipping ',File_Name_Final,'\n') print('[Comic-dl] File Exist! Skipping ',File_Name_Final,'\n')
pass pass
if not os.path.isfile(File_Check_Path): if not path.isfile(File_Check_Path):
print('[Comic-dl] Downloading : ',File_Name_Final) print('[Comic-dl] Downloading : ',File_Name_Final)
response = requests.get(ddl_image, stream=True,cookies=tasty_cookies) response = get(ddl_image, stream=True,cookies=tasty_cookies)
try: try:
with open(File_Name_Final, 'wb') as out_file: with open(File_Name_Final, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file) copyfileobj(response.raw, out_file)
File_Path = os.path.normpath(File_Name_Final) File_Path = path.normpath(File_Name_Final)
except Exception as e: except Exception as e:
logging.debug("File download error : %s" % e) debug("File download error : %s" % e)
print("Couldn't download file from : ",ddl_image) print("Couldn't download file from : ",ddl_image)
pass pass
try: try:
shutil.move(File_Path,Directory_path) move(File_Path,Directory_path)
except Exception as e: except Exception as e:
print(e,'\n') print(e,'\n')
pass pass
@ -50,27 +50,27 @@ def main(File_Name_Final,Directory_path,tasty_cookies,ddl_image, logger):
def with_referer(File_Name_Final,Directory_path,tasty_cookies,ddl_image,referer, logger): def with_referer(File_Name_Final,Directory_path,tasty_cookies,ddl_image,referer, logger):
File_Check_Path = str(Directory_path)+'/'+str(File_Name_Final) File_Check_Path = str(Directory_path)+'/'+str(File_Name_Final)
logging.debug("File Check Path : %s" % File_Check_Path) debug("File Check Path : %s" % File_Check_Path)
logging.debug("Referrer Received : %s" % referer) debug("Referrer Received : %s" % referer)
if os.path.isfile(File_Check_Path): if path.isfile(File_Check_Path):
print('[Comic-dl] File Exist! Skipping ',File_Name_Final,'\n') print('[Comic-dl] File Exist! Skipping ',File_Name_Final,'\n')
pass pass
if not os.path.isfile(File_Check_Path): if not path.isfile(File_Check_Path):
print('[Comic-dl] Downloading : ',File_Name_Final) print('[Comic-dl] Downloading : ',File_Name_Final)
headers = {'Referer': referer} headers = {'Referer': referer}
response = requests.get(ddl_image, stream=True,cookies=tasty_cookies,headers=headers) response = get(ddl_image, stream=True,cookies=tasty_cookies,headers=headers)
try: try:
with open(File_Name_Final, 'wb') as out_file: with open(File_Name_Final, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file) copyfileobj(response.raw, out_file)
File_Path = os.path.normpath(File_Name_Final) File_Path = path.normpath(File_Name_Final)
except Exception as e: except Exception as e:
logging.debug("File download error : %s" % e) debug("File download error : %s" % e)
print("Couldn't download file from : ",ddl_image) print("Couldn't download file from : ",ddl_image)
pass pass
try: try:
shutil.move(File_Path,Directory_path) move(File_Path,Directory_path)
except Exception as e: except Exception as e:
print(e,'\n') print(e,'\n')
pass pass

View File

@ -11,40 +11,37 @@ ddl_image is the direct link to the image itself.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import os from os import path, remove
# import urllib from shutil import move
import shutil
import urllib import urllib
#from urllib import URLError from logging import debug, basicConfig, DEBUG
# import sys
import logging
def main(File_Name_Final,Directory_path,ddl_image, logger): def main(File_Name_Final,Directory_path,ddl_image, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
File_Check_Path = str(Directory_path)+'/'+str(File_Name_Final) File_Check_Path = str(Directory_path)+'/'+str(File_Name_Final)
logging.debug("File Check Path : %s" % File_Check_Path) debug("File Check Path : %s" % File_Check_Path)
if os.path.isfile(File_Check_Path): if path.isfile(File_Check_Path):
print('[Comic-dl] File Exist! Skipping ',File_Name_Final,'\n') print('[Comic-dl] File Exist! Skipping ',File_Name_Final,'\n')
pass pass
if not os.path.isfile(File_Check_Path): if not path.isfile(File_Check_Path):
print('[Comic-dl] Downloading : ',File_Name_Final) print('[Comic-dl] Downloading : ',File_Name_Final)
urllib.request.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36' urllib.request.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
try: try:
urllib.request.urlretrieve(ddl_image, File_Name_Final) urllib.request.urlretrieve(ddl_image, File_Name_Final)
except Exception as e: except Exception as e:
logging.debug("Error in retrieving image : %s" % e) debug("Error in retrieving image : %s" % e)
#filename, headers = urllib.urlretrieve(ddl_image,File_Name_Final) #filename, headers = urllib.urlretrieve(ddl_image,File_Name_Final)
#print "File Name : ",filename #print "File Name : ",filename
#print "Headers : ",headers #print "Headers : ",headers
File_Path = os.path.normpath(File_Name_Final) File_Path = path.normpath(File_Name_Final)
try: try:
shutil.move(File_Path,Directory_path) move(File_Path,Directory_path)
except Exception as e: except Exception as e:
print(e,'\n') print(e,'\n')
os.remove(File_Path) remove(File_Path)
pass pass

View File

@ -3,10 +3,9 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import re from re import search,sub,compile, findall
import os from os import path,makedirs
import sys from sys import exit
from more_itertools import unique_everseen
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from selenium import webdriver from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
@ -15,7 +14,7 @@ from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from downloader.universal import main as FileDownloader from downloader.universal import main as FileDownloader
from six.moves import range from six.moves import range
import logging from logging import debug, basicConfig, DEBUG
"""Bato serves the chapters in 2 ways : """Bato serves the chapters in 2 ways :
@ -68,7 +67,7 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
if str(User_Name) not in ["N"] or str(User_Password) not in ["N"]: if str(User_Name) not in ["N"] or str(User_Password) not in ["N"]:
if str(User_Name) in ["N"] or str(User_Password) in ["N"]: if str(User_Name) in ["N"] or str(User_Password) in ["N"]:
print("Username or Password cannot be empty.") print("Username or Password cannot be empty.")
sys.exit() exit()
print("Authenticating Your Username and Password ...") print("Authenticating Your Username and Password ...")
batoto_login(driver, User_Name, User_Password, logger) batoto_login(driver, User_Name, User_Password, logger)
@ -92,10 +91,10 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
) )
except Exception as e: except Exception as e:
logging.debug("Error in loading page : %s\nTrying to move on." % e) debug("Error in loading page : %s\nTrying to move on." % e)
pass pass
page_title = str(driver.title) page_title = str(driver.title)
logging.debug("Page Title : %s" % page_title) debug("Page Title : %s" % page_title)
"""Batoto doesn't provide shit in the source code of the web page. Hence, we'll be using the outer HTML """Batoto doesn't provide shit in the source code of the web page. Hence, we'll be using the outer HTML
to scrap all the info we need. to scrap all the info we need.
@ -115,44 +114,44 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
"ERROR [10030]: The thing you're looking for is unavailable. It may be due to:"]: "ERROR [10030]: The thing you're looking for is unavailable. It may be due to:"]:
print("You cannot access this page. You'll need to log in to download this page.") print("You cannot access this page. You'll need to log in to download this page.")
driver.quit() driver.quit()
sys.exit() exit()
else: else:
pass pass
except Exception as e: except Exception as e:
logging.debug("Error in access check : %s" % e) debug("Error in access check : %s" % e)
pass pass
try: try:
# Getting the Series Name from the <title></title> tags of the web # Getting the Series Name from the <title></title> tags of the web
# page. # page.
Series_Name = str( Series_Name = str(
re.search( search(
'^(.*)\ \-', '^(.*)\ \-',
page_title).group(1)).strip().replace( page_title).group(1)).strip().replace(
'_', '_',
' ').title() ' ').title()
except Exception as e: except Exception as e:
logging.debug("Error in Series Name : %s" % e) debug("Error in Series Name : %s" % e)
Series_Name = "Unkown Series" Series_Name = "Unkown Series"
try: try:
# Getting the Series Name from the <title></title> tags of the web # Getting the Series Name from the <title></title> tags of the web
# page. # page.
volume_number = int( volume_number = int(
str(re.search('vol (\d+)', page_title).group(1)).strip()) str(search('vol (\d+)', page_title).group(1)).strip())
except Exception as e: except Exception as e:
logging.debug("Error in Volume Number : %s" % e) debug("Error in Volume Number : %s" % e)
volume_number = '0' volume_number = '0'
try: try:
# Getting the Series Name from the <title></title> tags of the web # Getting the Series Name from the <title></title> tags of the web
# page. # page.
chapter_number = int( chapter_number = int(
str(re.search('ch (\d+)', page_title).group(1)).strip()) str(search('ch (\d+)', page_title).group(1)).strip())
except Exception as e: except Exception as e:
logging.debug("Error in Chapter Number : %s" % e) debug("Error in Chapter Number : %s" % e)
chapter_number = '0' chapter_number = '0'
try: try:
@ -161,7 +160,7 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
'//*[@id="reader"]/div[1]/ul/li[3]/select').text).replace("/", " ").strip() '//*[@id="reader"]/div[1]/ul/li[3]/select').text).replace("/", " ").strip()
except Exception as e: except Exception as e:
logging.debug("Error in Group Name : %s\nMoving forward..." % e) debug("Error in Group Name : %s\nMoving forward..." % e)
# Some entries on batoto don't have a name. So, if we get to any such # Some entries on batoto don't have a name. So, if we get to any such
# occassion, let's be prepared. # occassion, let's be prepared.
Group_Name_Finder = str('No Group') Group_Name_Finder = str('No Group')
@ -172,7 +171,7 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
page_list = driver.find_element_by_id('page_select') page_list = driver.find_element_by_id('page_select')
except Exception as e: except Exception as e:
logging.debug("Error in Page Select : %s" % e) debug("Error in Page Select : %s" % e)
# If we cannot find the 'page_select' element, it means that this # If we cannot find the 'page_select' element, it means that this
# chapter is showing all the images in one page. # chapter is showing all the images in one page.
@ -188,12 +187,12 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
'/' + "Chapter " + str(chapter_number) + " [" + str(Group_Name_Finder) + " ]" '/' + "Chapter " + str(chapter_number) + " [" + str(Group_Name_Finder) + " ]"
# Fix for "Special Characters" in The series name # Fix for "Special Characters" in The series name
File_Directory = re.sub( File_Directory = sub(
'[^A-Za-z0-9\-\.\'\#\/ \[\]]+', '[^A-Za-z0-9\-\.\'\#\/ \[\]]+',
'', '',
Raw_File_Directory) Raw_File_Directory)
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
print('\n') print('\n')
print('{:^80}'.format('%s - %s') % (Series_Name, chapter_number)) print('{:^80}'.format('%s - %s') % (Series_Name, chapter_number))
@ -208,24 +207,24 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
Look at the last number for the image. Manipulate that and we have what we need. Look at the last number for the image. Manipulate that and we have what we need.
""" """
items_list = page_list.find_elements_by_tag_name("option") items_list = page_list.find_elements_by_tag_name("option")
logging.debug("Items List : %s" % items_list) debug("Items List : %s" % items_list)
for item in items_list: for item in items_list:
list_of_pages = item.text list_of_pages = item.text
logging.debug("List of Pages : %s" % list_of_pages) debug("List of Pages : %s" % list_of_pages)
lst_pag = str(list_of_pages) lst_pag = str(list_of_pages)
Last_Page_number = int( Last_Page_number = int(
str(re.search('(\d+)', lst_pag).group(1)).strip()) str(search('(\d+)', lst_pag).group(1)).strip())
logging.debug("Last Page Number : %s" % Last_Page_number) debug("Last Page Number : %s" % Last_Page_number)
img_link = driver.find_element_by_id('comic_page').get_attribute('src') img_link = driver.find_element_by_id('comic_page').get_attribute('src')
logging.debug("Image Link : %s" % img_link) debug("Image Link : %s" % img_link)
for i in range(1, Last_Page_number + 1): for i in range(1, Last_Page_number + 1):
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
if len(str(i)) == 1: if len(str(i)) == 1:
ddl_image = str(img_link).replace( ddl_image = str(img_link).replace(
@ -237,7 +236,7 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
'img000001', 'img0000%s') % (i) 'img000001', 'img0000%s') % (i)
File_Name_Final = str( File_Name_Final = str(
i).strip() + "." + str(re.search('\d\.(.*?)$', ddl_image).group(1)).strip() i).strip() + "." + str(search('\d\.(.*?)$', ddl_image).group(1)).strip()
FileDownloader(File_Name_Final, Directory_path, ddl_image, logger) FileDownloader(File_Name_Final, Directory_path, ddl_image, logger)
print('\n') print('\n')
@ -254,18 +253,18 @@ def single_chapter(driver, url, current_directory, User_Name, User_Password, log
Image_Links = soup.findAll('div', {'style': 'text-align:center;'}) Image_Links = soup.findAll('div', {'style': 'text-align:center;'})
for link in Image_Links: for link in Image_Links:
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
x = link.findAll('img') x = link.findAll('img')
for a in x: for a in x:
ddl_image = a['src'] ddl_image = a['src']
logging.debug("Image Download Link : %s" % ddl_image) debug("Image Download Link : %s" % ddl_image)
File_Name_Final = str( File_Name_Final = str(
re.search( search(
'img0000(\d+)\.([jpg]|[png])', 'img0000(\d+)\.([jpg]|[png])',
ddl_image).group(1)).strip() + "." + str( ddl_image).group(1)).strip() + "." + str(
re.search( search(
'\d\.(.*?)$', '\d\.(.*?)$',
ddl_image).group(1)).strip() ddl_image).group(1)).strip()
FileDownloader(File_Name_Final, Directory_path, ddl_image, logger) FileDownloader(File_Name_Final, Directory_path, ddl_image, logger)
@ -286,7 +285,7 @@ def whole_series(driver, url, current_directory, User_Name, User_Password, logge
if str(User_Name) not in ["N"] or str(User_Password) not in ["N"]: if str(User_Name) not in ["N"] or str(User_Password) not in ["N"]:
if str(User_Name) in ["N"] or str(User_Password) in ["N"]: if str(User_Name) in ["N"] or str(User_Password) in ["N"]:
print("Username or Password cannot be empty.") print("Username or Password cannot be empty.")
sys.exit() exit()
print("Authenticating Your Username and Password ...") print("Authenticating Your Username and Password ...")
batoto_login(driver, User_Name, User_Password, logger) batoto_login(driver, User_Name, User_Password, logger)
@ -305,7 +304,7 @@ def whole_series(driver, url, current_directory, User_Name, User_Password, logge
) )
except Exception as e: except Exception as e:
logging.debug("Error in loading the page : %s\nMoving ahead..." % e) debug("Error in loading the page : %s\nMoving ahead..." % e)
pass pass
elem = driver.find_element_by_xpath("//*") elem = driver.find_element_by_xpath("//*")
Page_Source = elem.get_attribute("outerHTML").encode('utf-8') Page_Source = elem.get_attribute("outerHTML").encode('utf-8')
@ -319,13 +318,13 @@ def whole_series(driver, url, current_directory, User_Name, User_Password, logge
soup = BeautifulSoup(Page_Source, "html.parser") soup = BeautifulSoup(Page_Source, "html.parser")
all_links = soup.findAll( all_links = soup.findAll(
'tr', {'class': 'row lang_English chapter_row'}) 'tr', {'class': 'row lang_English chapter_row'})
logging.debug("Image Links : %s" % all_links) debug("Image Links : %s" % all_links)
for link in all_links: for link in all_links:
x = link.findAll('a') x = link.findAll('a')
for a in x: for a in x:
ddl_image = a['href'] ddl_image = a['href']
logging.debug("Second Image Link : %s" % ddl_image) debug("Second Image Link : %s" % ddl_image)
if "reader" in ddl_image: if "reader" in ddl_image:
link_list.append(ddl_image) link_list.append(ddl_image)
@ -352,7 +351,7 @@ def whole_series(driver, url, current_directory, User_Name, User_Password, logge
) )
except Exception as e: except Exception as e:
logging.debug("Error in loading the page : %s\nMoving ahead." % e) debug("Error in loading the page : %s\nMoving ahead." % e)
pass pass
elem = driver.find_element_by_xpath("//*") elem = driver.find_element_by_xpath("//*")
Page_Source = elem.get_attribute("outerHTML").encode('utf-8') Page_Source = elem.get_attribute("outerHTML").encode('utf-8')
@ -369,7 +368,7 @@ def whole_series(driver, url, current_directory, User_Name, User_Password, logge
ddl_image = a['href'] ddl_image = a['href']
if "reader" in ddl_image: if "reader" in ddl_image:
link_list.append(ddl_image) link_list.append(ddl_image)
logging.debug("%s added in the bag!" % ddl_image) debug("%s added in the bag!" % ddl_image)
print("Total Chapters To Download : ", len(link_list)) print("Total Chapters To Download : ", len(link_list))
#print(link_list) #print(link_list)
@ -396,17 +395,17 @@ def batoto_login(driver, User_Name, User_Password, logger):
) )
except Exception as e: except Exception as e:
logging.debug("Error in loading page : %s\nSaving screenshot and moving..." % e) debug("Error in loading page : %s\nSaving screenshot and moving..." % e)
# driver.save_screenshot('Single_exception.png') # driver.save_screenshot('Single_exception.png')
pass pass
LoggedOut_Title = driver.title LoggedOut_Title = driver.title
logging.debug("Logged out Title : %s" % LoggedOut_Title) debug("Logged out Title : %s" % LoggedOut_Title)
driver.find_element_by_id('ips_username').send_keys(User_Name) driver.find_element_by_id('ips_username').send_keys(User_Name)
driver.find_element_by_id('ips_password').send_keys(User_Password) driver.find_element_by_id('ips_password').send_keys(User_Password)
driver.find_element_by_xpath('//*[@id="login"]/fieldset[2]/input').click() driver.find_element_by_xpath('//*[@id="login"]/fieldset[2]/input').click()
LoggedIn_Title = driver.title LoggedIn_Title = driver.title
logging.debug("Logged In Title : %s" % LoggedIn_Title) debug("Logged In Title : %s" % LoggedIn_Title)
"""A little check to see whether we've logged in or not. Comparing the titles of the before and after logging """A little check to see whether we've logged in or not. Comparing the titles of the before and after logging
pages. pages.
@ -415,21 +414,21 @@ def batoto_login(driver, User_Name, User_Password, logger):
if str(LoggedIn_Title).strip() == str(LoggedOut_Title).strip(): if str(LoggedIn_Title).strip() == str(LoggedOut_Title).strip():
print("Couldn't log you in. Please check your credentials.") print("Couldn't log you in. Please check your credentials.")
driver.quit() driver.quit()
sys.exit() exit()
def batoto_Url_Check(input_url, current_directory, User_Name, User_Password, logger): def batoto_Url_Check(input_url, current_directory, User_Name, User_Password, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='[Comic-dl]%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='[Comic-dl]%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
batoto_single_regex = re.compile( batoto_single_regex = compile(
'https?://(?P<host>bato.to)/reader\#(?P<extra_characters>[\d\w-]+)?(\/|.)') 'https?://(?P<host>bato.to)/reader\#(?P<extra_characters>[\d\w-]+)?(\/|.)')
batoto_whole_regex = re.compile( batoto_whole_regex = compile(
'^https?://(?P<host>bato.to)/comic/\_/comics/(?P<comic>[\d\w-]+)?(\/|.)$') '^https?://(?P<host>bato.to)/comic/\_/comics/(?P<comic>[\d\w-]+)?(\/|.)$')
#print "Inside" #print "Inside"
lines = input_url.split('\n') lines = input_url.split('\n')
for line in lines: for line in lines:
found = re.search(batoto_single_regex, line) found = search(batoto_single_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['extra_characters']: if match['extra_characters']:
@ -445,7 +444,7 @@ def batoto_Url_Check(input_url, current_directory, User_Name, User_Password, log
else: else:
pass pass
found = re.search(batoto_whole_regex, line) found = search(batoto_whole_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['comic']: if match['comic']:

View File

@ -4,48 +4,49 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import re from re import search,sub,compile, findall
import sys from os import path,makedirs
import os from sys import exit
import requests from logging import debug, basicConfig, DEBUG
from requests import Session,cookies
from downloader.cookies_required import with_referer as FileDownloader from downloader.cookies_required import with_referer as FileDownloader
from six.moves import range from six.moves import range
from six.moves import input from six.moves import input
import logging
def single_chapter(url,current_directory, logger): def single_chapter(url,current_directory, logger):
s = requests.Session() s = Session()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'} headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'}
req = s.get(url,headers=headers) req = s.get(url,headers=headers)
cookies = req.cookies cookies = req.cookies
page_source_1 = str(req.text.encode('utf-8')) page_source_1 = str(req.text.encode('utf-8'))
try: try:
#Korean_Name = re.search(r'<h2>(.*?)<span class="wrt_nm">',str(page_source)).group(1) #Korean_Name = search(r'<h2>(.*?)<span class="wrt_nm">',str(page_source)).group(1)
Series_Name = re.search(r'titleId=(\d+)',url).group(1) Series_Name = search(r'titleId=(\d+)',url).group(1)
except Exception as e: except Exception as e:
logging.debug("Error in Series Name : %s" % e) debug("Error in Series Name : %s" % e)
Series_Name = "Unknown" Series_Name = "Unknown"
try: try:
#chapter_number = int(re.search(r'\<span\ class\=\"total\"\>(.\d+)\<\/span\>',page_source_1).group(1)) #chapter_number = int(search(r'\<span\ class\=\"total\"\>(.\d+)\<\/span\>',page_source_1).group(1))
chapter_number = re.search(r'&no=(\d+)',url).group(1) chapter_number = search(r'&no=(\d+)',url).group(1)
except Exception as e: except Exception as e:
# print(e) # print(e)
logging.debug("Error in Chapter Number : %s" % e) debug("Error in Chapter Number : %s" % e)
chapter_number = 0 chapter_number = 0
img_regex = r'http://imgcomic.naver.net/webtoon/\d+/\d+/.+?\.(?:jpg|png|gif|bmp|JPG|PNG|GIF|BMP)' img_regex = r'http://imgcomic.naver.net/webtoon/\d+/\d+/.+?\.(?:jpg|png|gif|bmp|JPG|PNG|GIF|BMP)'
img_links = list(re.findall(img_regex,page_source_1)) img_links = list(findall(img_regex,page_source_1))
logging.debug("Image Links : %s" % img_links) debug("Image Links : %s" % img_links)
Raw_File_Directory = str(Series_Name) +'/'+"Chapter "+str(chapter_number) Raw_File_Directory = str(Series_Name) +'/'+"Chapter "+str(chapter_number)
File_Directory = re.sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name File_Directory = sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
print('\n') print('\n')
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
@ -53,8 +54,8 @@ def single_chapter(url,current_directory, logger):
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
for x,items in enumerate(img_links): for x,items in enumerate(img_links):
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
FileDownloader(str(x+1)+str(items[-4:]),Directory_path,cookies,items,url, logger) FileDownloader(str(x+1)+str(items[-4:]),Directory_path,cookies,items,url, logger)
print('\n') print('\n')
@ -67,41 +68,41 @@ def whole_series(url, current_directory, logger):
s = requests.Session() s = Session()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'} headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'}
req = s.get(url,headers=headers) req = s.get(url,headers=headers)
cookies = req.cookies cookies = req.cookies
page_source_1 = req.text.encode('utf-8') page_source_1 = req.text.encode('utf-8')
titleId = re.search(r'titleId=(\d+)',url).group(1) titleId = search(r'titleId=(\d+)',url).group(1)
try: try:
first_link = int(re.search(r'\/webtoon\/detail\.nhn\?titleId\=%s\&no\=(\d+)\&weekday\=tue' %(titleId),page_source_1).group(1)) first_link = int(search(r'\/webtoon\/detail\.nhn\?titleId\=%s\&no\=(\d+)\&weekday\=tue' %(titleId),page_source_1).group(1))
except Exception as e: except Exception as e:
first_link = eval(input("Please Enter the Last chapter of the series : ")) first_link = eval(input("Please Enter the Last chapter of the series : "))
if not first_link: if not first_link:
print("You failed to enter the last chapter count. Script will exit now.") print("You failed to enter the last chapter count. Script will exit now.")
sys.exit() exit()
for x in range(1,int(first_link)): for x in range(1,int(first_link)):
Chapter_Url = "http://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%s" %(titleId,x) Chapter_Url = "http://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%s" %(titleId,x)
logging.debug("Chapter URL : %s" % Chapter_Url) debug("Chapter URL : %s" % Chapter_Url)
single_chapter(Chapter_Url,current_directory, logger) single_chapter(Chapter_Url,current_directory, logger)
def comic_naver_Url_Check(input_url, current_directory, logger): def comic_naver_Url_Check(input_url, current_directory, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
comic_naver_single_regex = re.compile( comic_naver_single_regex = compile(
'https?://(?P<host>comic.naver.com)/webtoon/(?P<detail>detail.nhn)\?titleId\=(?P<extra_characters>[\d]+)?(\/|.)') 'https?://(?P<host>comic.naver.com)/webtoon/(?P<detail>detail.nhn)\?titleId\=(?P<extra_characters>[\d]+)?(\/|.)')
comic_naver_whole_regex = re.compile( comic_naver_whole_regex = compile(
'https?://(?P<host>comic.naver.com)/webtoon/(?P<list>list.nhn)\?titleId\=(?P<extra_characters>[\d]+)?(\/|.)') 'https?://(?P<host>comic.naver.com)/webtoon/(?P<list>list.nhn)\?titleId\=(?P<extra_characters>[\d]+)?(\/|.)')
lines = input_url.split('\n') lines = input_url.split('\n')
for line in lines: for line in lines:
found = re.search(comic_naver_single_regex, line) found = search(comic_naver_single_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['detail']: if match['detail']:
@ -111,7 +112,7 @@ def comic_naver_Url_Check(input_url, current_directory, logger):
else: else:
pass pass
found = re.search(comic_naver_whole_regex, line) found = search(comic_naver_whole_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['list']: if match['list']:

View File

@ -3,55 +3,55 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import requests from requests import Session,cookies
import re
import os
import sys
from more_itertools import unique_everseen from more_itertools import unique_everseen
from re import search,sub,compile, findall
from os import path,makedirs
from sys import exit
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from downloader.cookies_required import main as FileDownloader from downloader.cookies_required import main as FileDownloader
import logging from logging import debug, basicConfig, DEBUG
def single_chapter(url,current_directory, logger): def single_chapter(url,current_directory, logger):
if not url: if not url:
print("Couldn't get the URL. Please report it on Github Repository.") print("Couldn't get the URL. Please report it on Github Repository.")
sys.exit(0) exit(0)
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36' 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'
} }
s = requests.Session() s = Session()
response = s.get(url, headers=headers) response = s.get(url, headers=headers)
tasty_cookies = response.cookies tasty_cookies = response.cookies
Page_source = str(response.text.encode('utf-8')) Page_source = str(response.text.encode('utf-8'))
Series_Name = str(re.search('\/read\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories. Series_Name = str(search('\/read\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories.
try: try:
chapter_number = int(str(re.search('0\/(.*?)/', url).group(1)).strip().replace('0','').replace('/','')) # Getting the chapter count from the URL itself for naming the folder/dicrectories in integer. chapter_number = int(str(search('0\/(.*?)/', url).group(1)).strip().replace('0','').replace('/','')) # Getting the chapter count from the URL itself for naming the folder/dicrectories in integer.
except Exception as e: except Exception as e:
logging.debug("Error in Chapter Number : %s" % e) debug("Error in Chapter Number : %s" % e)
chapter_number = 0 # Name the chapter 0 if nothing INTEGER type comes up chapter_number = 0 # Name the chapter 0 if nothing INTEGER type comes up
Raw_File_Directory = str(Series_Name)+'/'+"Chapter "+str(chapter_number) Raw_File_Directory = str(Series_Name)+'/'+"Chapter "+str(chapter_number)
File_Directory = re.sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name File_Directory = sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
ddl_image_list = re.findall('comics(.*?)\"', Page_source) ddl_image_list = findall('comics(.*?)\"', Page_source)
ddl_list = list(unique_everseen(ddl_image_list)) ddl_list = list(unique_everseen(ddl_image_list))
logging.debug("Image Links : %s" % ddl_list) debug("Image Links : %s" % ddl_list)
print('\n') print('\n')
@ -60,12 +60,12 @@ def single_chapter(url,current_directory, logger):
for i in ddl_list: for i in ddl_list:
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
ddl_image = "http://gomanga.co/reader/content/comics"+str(i).replace('"','').replace('\\','') ddl_image = "http://gomanga.co/reader/content/comics"+str(i).replace('"','').replace('\\','')
logging.debug("Image Link : %s" % ddl_image) debug("Image Link : %s" % ddl_image)
File_Name_Final = str(re.findall('\/(\d+)\.[jpg]|[png]', i)).replace("[","").replace("]","").replace("'","").replace(",","").strip()+"."+str(re.findall('\d\.(.*?)$', str(i))).replace(",","").replace("[","").replace("]","").replace("'","").strip() File_Name_Final = str(findall('\/(\d+)\.[jpg]|[png]', i)).replace("[","").replace("]","").replace("'","").replace(",","").strip()+"."+str(findall('\d\.(.*?)$', str(i))).replace(",","").replace("[","").replace("]","").replace("'","").strip()
FileDownloader(File_Name_Final,Directory_path,tasty_cookies,ddl_image, logger) FileDownloader(File_Name_Final,Directory_path,tasty_cookies,ddl_image, logger)
print('\n') print('\n')
@ -80,13 +80,13 @@ def whole_series(url,current_directory, logger):
} }
s = requests.Session() s = Session()
response = s.get(url, headers=headers) response = s.get(url, headers=headers)
tasty_cookies = response.cookies tasty_cookies = response.cookies
Page_source = str(response.text.encode('utf-8')) Page_source = str(response.text.encode('utf-8'))
Series_Name = str(re.search('\/series\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories. Series_Name = str(search('\/series\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories.
soup = BeautifulSoup(Page_source, 'html.parser') soup = BeautifulSoup(Page_source, 'html.parser')
@ -96,19 +96,19 @@ def whole_series(url,current_directory, logger):
x = link.findAll('a') x = link.findAll('a')
for a in x: for a in x:
url = a['href'] url = a['href']
logging.debug("Final URL : %s" % url) debug("Final URL : %s" % url)
single_chapter(url,current_directory, logger) single_chapter(url,current_directory, logger)
def gomanga_Url_Check(input_url,current_directory, logger): def gomanga_Url_Check(input_url,current_directory, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
gomanga_single_regex = re.compile('https?://(?P<host>gomanga.co)/reader/read/(?P<comic_single>[\d\w-]+)/en/(?P<volume>\d+)?/(?P<Chapter>\d+)?()|(/page/(?P<PageNumber>\d+)?)') gomanga_single_regex = compile('https?://(?P<host>gomanga.co)/reader/read/(?P<comic_single>[\d\w-]+)/en/(?P<volume>\d+)?/(?P<Chapter>\d+)?()|(/page/(?P<PageNumber>\d+)?)')
gomanga_whole_regex = re.compile('^https?://(?P<host>gomanga.co)/reader/(?P<series>series)?/(?P<comic>[\d\w-]+)?(\/|.)$') gomanga_whole_regex = compile('^https?://(?P<host>gomanga.co)/reader/(?P<series>series)?/(?P<comic>[\d\w-]+)?(\/|.)$')
lines = input_url.split('\n') lines = input_url.split('\n')
for line in lines: for line in lines:
found = re.search(gomanga_single_regex, line) found = search(gomanga_single_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['Chapter']: if match['Chapter']:
@ -119,7 +119,7 @@ def gomanga_Url_Check(input_url,current_directory, logger):
found = re.search(gomanga_whole_regex, line) found = search(gomanga_whole_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['comic']: if match['comic']:

View File

@ -1,19 +1,19 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import cfscrape from re import search,sub,compile, findall
import re from os import path,makedirs
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import os from cfscrape import create_scraper
from logging import debug, basicConfig, DEBUG
# from downloader.universal import main as FileDownloader # from downloader.universal import main as FileDownloader
from downloader.cookies_required import main as FileDownloader from downloader.cookies_required import main as FileDownloader
import requests from requests import session
import logging
def single_chapter(url, directory, logger): def single_chapter(url, directory, logger):
sess = requests.session() sess = session()
sess = cfscrape.create_scraper(sess) sess = create_scraper(sess)
s = sess.get(url) s = sess.get(url)
cookies = sess.cookies cookies = sess.cookies
connection = s.text.encode('utf-8') connection = s.text.encode('utf-8')
@ -31,10 +31,10 @@ def single_chapter(url, directory, logger):
Raw_File_Directory = str(Series_Name) + '/' + "Chapter " + str(chapter_number) Raw_File_Directory = str(Series_Name) + '/' + "Chapter " + str(chapter_number)
File_Directory = re.sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', File_Directory = sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '',
Raw_File_Directory) # Fix for "Special Characters" in The series name Raw_File_Directory) # Fix for "Special Characters" in The series name
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
print('\n') print('\n')
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
@ -43,16 +43,16 @@ def single_chapter(url, directory, logger):
# soup = BeautifulSoup(connection, "html.parser") # soup = BeautifulSoup(connection, "html.parser")
linkFinder = soup.findAll('ul', {'class': 'list-image'}) linkFinder = soup.findAll('ul', {'class': 'list-image'})
logging.debug("Image Links : %s" % linkFinder) debug("Image Links : %s" % linkFinder)
# print("Link Finder :s %s" % linkFinder) # print("Link Finder :s %s" % linkFinder)
for link in linkFinder: for link in linkFinder:
x = link.findAll('img') x = link.findAll('img')
for a in x: for a in x:
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
ddlLink = a['src'] ddlLink = a['src']
logging.debug("Final URL : %s" % ddlLink) debug("Final URL : %s" % ddlLink)
fileName = str(ddlLink).split("/")[-1].strip() fileName = str(ddlLink).split("/")[-1].strip()
# print("Link : %s\nFile Name : %s" % (ddlLink, fileName)) # print("Link : %s\nFile Name : %s" % (ddlLink, fileName))
FileDownloader(File_Name_Final=fileName, Directory_path=File_Directory, tasty_cookies=cookies, ddl_image=ddlLink, logger=logger) FileDownloader(File_Name_Final=fileName, Directory_path=File_Directory, tasty_cookies=cookies, ddl_image=ddlLink, logger=logger)
@ -62,7 +62,7 @@ def single_chapter(url, directory, logger):
def whole_series(url, directory, logger): def whole_series(url, directory, logger):
scraper = cfscrape.create_scraper() scraper = create_scraper()
connection = scraper.get(url).content connection = scraper.get(url).content
soup = BeautifulSoup(connection, "html.parser") soup = BeautifulSoup(connection, "html.parser")
@ -73,7 +73,7 @@ def whole_series(url, directory, logger):
for a in x: for a in x:
# print(a['href']) # print(a['href'])
url = "http://kisscomic.us" + a['href'] url = "http://kisscomic.us" + a['href']
logging.debug("Chapter URL : %s" % url) debug("Chapter URL : %s" % url)
single_chapter(url, directory, logger) single_chapter(url, directory, logger)
print("Finished Downloading") print("Finished Downloading")
@ -81,13 +81,13 @@ def whole_series(url, directory, logger):
def kissmcomicus_Url_Check(input_url, current_directory, logger): def kissmcomicus_Url_Check(input_url, current_directory, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
kissmcomicus_single_regex = re.compile('https?://(?P<host>[^/]+)/chapters/(?P<comic>[\d\w-]+)(?:/Issue-)?') kissmcomicus_single_regex = compile('https?://(?P<host>[^/]+)/chapters/(?P<comic>[\d\w-]+)(?:/Issue-)?')
kissmcomicus_whole_regex = re.compile('https?://(?P<host>[^/]+)/comics/(?P<comic_name>[\d\w-]+)?') kissmcomicus_whole_regex = compile('https?://(?P<host>[^/]+)/comics/(?P<comic_name>[\d\w-]+)?')
lines = input_url.split('\n') lines = input_url.split('\n')
for line in lines: for line in lines:
found = re.search(kissmcomicus_single_regex, line) found = search(kissmcomicus_single_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['comic']: if match['comic']:
@ -96,7 +96,7 @@ def kissmcomicus_Url_Check(input_url, current_directory, logger):
else: else:
pass pass
found = re.search(kissmcomicus_whole_regex, line) found = search(kissmcomicus_whole_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['comic_name']: if match['comic_name']:

View File

@ -1,18 +1,21 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re from __future__ import unicode_literals
import os from __future__ import absolute_import
import sys from __future__ import print_function
from re import search,sub,compile, findall
from os import path,makedirs
from sys import exit
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from downloader.universal import main as FileDownloader from downloader.universal import main as FileDownloader
import cfscrape from cfscrape import create_scraper
import logging from logging import debug, basicConfig, DEBUG
def single_chapter(url, current_directory, logger): def single_chapter(url, current_directory, logger):
scraper = cfscrape.create_scraper() scraper = create_scraper()
Page_Source = scraper.get(str(url)).content Page_Source = scraper.get(str(url)).content
@ -21,37 +24,38 @@ def single_chapter(url, current_directory, logger):
meta = formatted.findAll('title') meta = formatted.findAll('title')
meta_data = list(str(meta).split('\n')) meta_data = list(str(meta).split('\n'))
# print(meta_data)
try: try:
Series_Name = str(meta_data[2]) Series_Name = str(meta_data[2])
except Exception as e: except Exception as e:
# print (e) # print (e)
logging.debug("Error in Series Name : %s" % e) debug("Error in Series Name : %s" % e)
Series_Name = "Unkown Series" Series_Name = "Unkown Series"
try: try:
# Getting the Volume Number from the page source. # Getting the Volume Number from the page source.
volume_number = int( volume_number = int(
str(re.search('Vol\.(.*)\ Ch', Page_Source).group(1)).strip()) str(search('Vol\.(.*)\ Ch', Page_Source).group(1)).strip())
except Exception as e: except Exception as e:
logging.debug("Error in Volume Number : %s" % e) debug("Error in Volume Number : %s" % e)
volume_number = '0' volume_number = '0'
try: try:
chapter_number = str(meta_data[3]) chapter_number = str(meta_data[3])
except Exception as e: except Exception as e:
logging.debug("Error in Chapter Number : %s\nTrying Something else." % e) debug("Error in Chapter Number : %s\nTrying Something else." % e)
try: try:
# Getting the Volume Number from the page source. # Getting the Volume Number from the page source.
chapter_number = str(re.search('Ch\.(.*)\:', str(Page_Source)).group(1)).strip() chapter_number = str(search('Ch\.(.*)\:', str(Page_Source)).group(1)).strip()
except Exception as e: except Exception as e:
logging.debug("Error in Chapter Number : %s" % e) debug("Error in Chapter Number : %s" % e)
chapter_number = str('0') chapter_number = str('0')
all_links = re.findall('lstImages.push\(\"(.*)\"\)\;', str(formatted)) all_links = findall('lstImages.push\(\"(.*)\"\)\;', str(formatted))
logging.debug("Image Links : %s" % all_links) debug("Image Links : %s" % all_links)
if volume_number == '0': if volume_number == '0':
# Some series don't seem to have volumes mentioned. Let's assume # Some series don't seem to have volumes mentioned. Let's assume
@ -59,34 +63,41 @@ def single_chapter(url, current_directory, logger):
Raw_File_Directory = str(Series_Name) + '/' + \ Raw_File_Directory = str(Series_Name) + '/' + \
"Chapter " + str(chapter_number) "Chapter " + str(chapter_number)
else: else:
logging.debug("Found the Volume. Making a directory.") debug("Found the Volume. Making a directory.")
Raw_File_Directory = str(Series_Name) + '/' + "Volume " + \ Raw_File_Directory = str(Series_Name) + '/' + "Volume " + \
str(volume_number) + '/' + "Chapter " + str(chapter_number) str(volume_number) + '/' + "Chapter " + str(chapter_number)
# Fix for "Special Characters" in The series name # Fix for "Special Characters" in The series name
File_Directory = re.sub( File_Directory = sub(
'[^A-Za-z0-9\-\.\'\#\/ \[\]]+', '[^A-Za-z0-9\-\.\'\#\/ \[\]]+',
'', '',
Raw_File_Directory) Raw_File_Directory)
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
print ('\n') print ('\n')
print('{:^80}'.format('%s - %s') % (Series_Name, chapter_number)) print('{:^80}'.format('%s - %s') % (Series_Name, chapter_number))
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
for elements in all_links: for elements in all_links:
if not os.path.exists(File_Directory): sane_url = str(elements).replace("%3a",":").replace("%2f","/").replace("&imgmax=30000","").replace("https://images1-focus-opensocial.googleusercontent.com/gadgets/proxy?container=focus&gadget=a&no_expand=1&resize_h=0&rewriteMime=image%2F*&url=","")
os.makedirs(File_Directory) # print(sane_url)
ddl_image = str(elements).strip() if not path.exists(File_Directory):
makedirs(File_Directory)
ddl_image = str(sane_url).strip()
try: try:
File_Name_Final = str(re.search( File_Name_Final = str(search(
's0/(.*)\.([png]|[jpg])', ddl_image).group(1)).strip() + "." + str(ddl_image[-3:]) 's0/(.*)\.([png]|[jpg])', ddl_image).group(1)).strip() + "." + str(ddl_image[-3:])
except Exception as e: except Exception as e:
logging.debug("Error in File Name : %s" % e) debug("Error in File Name : %s" % e)
File_Name_Final = str(re.search( try:
'title\=(.*)\_(\d+)\.([png]|[jpg])', ddl_image).group(1)).strip() + "." + str(ddl_image[-3:]) File_Name_Final = str(search(
'title\=(.*)\_(\d+)\.([png]|[jpg])', ddl_image).group(1)).strip() + "." + str(ddl_image[-3:])
except Exception as e:
debug("Error inside Error : %s" % e)
File_Name_Final = str(ddl_image[-6:])
# print(File_Name_Final)
FileDownloader(File_Name_Final, Directory_path, ddl_image, logger) FileDownloader(File_Name_Final, Directory_path, ddl_image, logger)
print('\n') print('\n')
@ -95,7 +106,7 @@ def single_chapter(url, current_directory, logger):
def whole_series(url, current_directory, logger): def whole_series(url, current_directory, logger):
scraper = cfscrape.create_scraper() scraper = create_scraper()
Page_Source = scraper.get(str(url)).content Page_Source = scraper.get(str(url)).content
@ -103,7 +114,7 @@ def whole_series(url, current_directory, logger):
soup = BeautifulSoup(Page_Source, "html.parser") soup = BeautifulSoup(Page_Source, "html.parser")
all_links = soup.findAll('table', {'class': 'listing'}) all_links = soup.findAll('table', {'class': 'listing'})
logging.debug("Chapter Links : %s" % all_links) debug("Chapter Links : %s" % all_links)
for link in all_links: for link in all_links:
x = link.findAll('a') x = link.findAll('a')
@ -113,41 +124,46 @@ def whole_series(url, current_directory, logger):
if "Manga" in ddl_image: if "Manga" in ddl_image:
final_url = "http://kissmanga.com" + ddl_image final_url = "http://kissmanga.com" + ddl_image
link_list.append(final_url) link_list.append(final_url)
logging.debug("%s added in the bag!" % final_url) debug("%s added in the bag!" % final_url)
if int(len(link_list)) == '0': if int(len(link_list)) == '0':
print("Sorry, I couldn't bypass KissManga's Hooman check. Please try again in a few minutes.") print("Sorry, I couldn't bypass KissManga's Hooman check. Please try again in a few minutes.")
sys.exit() exit()
print("Total Chapters To Download : ", len(link_list)) print("Total Chapters To Download : ", len(link_list))
for item in link_list: for item in link_list:
url = str(item) url = str(item)
logging.debug("Chapter Links : %s" % url) debug("Chapter Links : %s" % url)
single_chapter(url, current_directory, logger) single_chapter(url, current_directory, logger)
def kissmanga_Url_Check(input_url, current_directory, logger): def kissmanga_Url_Check(input_url, current_directory, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
kissmanga_single_regex = re.compile( kissmanga_single_regex = compile(
'https?://(?P<host>kissmanga.com)/Manga/(?P<Series_Name>[\d\w-]+)?/((?P<Volume>[Vol\-\d]+)|(.*)(?P<Chapter>[Ch\-\d]+))\-(?P<Chap_Name>[\d\w-]+)\?(?P<id>[\=\d\w-]+)') 'https?://(?P<host>kissmanga.com)/Manga/(?P<Series_Name>[\d\w-]+)?/((?P<Volume>[Vol\-\d]+)|(.*)(?P<Chapter>[Ch\d\w-]+))\-(?P<Chap_Name>[\d\w-]+)\?(?P<id>[\=\d\w-]+)')
kissmanga_whole_regex = re.compile( kissmanga_whole_regex = compile(
'^https?://(?P<host>kissmanga.com)/Manga/(?P<comic>[\d\w\-]+)?(\/|.)$') '^https?://(?P<host>kissmanga.com)/Manga/(?P<comic>[\d\w\-]+)?(\/|.)$')
lines = input_url.split('\n') lines = input_url.split('\n')
# print(lines)
for line in lines: for line in lines:
found = re.search(kissmanga_single_regex, line) found = search(kissmanga_single_regex, line)
# print(found)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['Chap_Name']: if match['Chap_Name']:
url = str(input_url) url = str(input_url)
# print("Here inside!")
single_chapter(url, current_directory, logger) single_chapter(url, current_directory, logger)
# print("Passed it")
else: else:
pass pass
found = re.search(kissmanga_whole_regex, line) found = search(kissmanga_whole_regex, line)
# print(found)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['comic']: if match['comic']:

View File

@ -3,12 +3,10 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import requests from requests import get
import os from re import search,sub,compile, findall
import re from os import path,makedirs
import sys from sys import exit
import shutil
from bs4 import BeautifulSoup
from selenium import webdriver from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
@ -16,7 +14,7 @@ from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from downloader.universal import main as FileDownloader from downloader.universal import main as FileDownloader
from six.moves import range from six.moves import range
import logging from logging import debug, basicConfig, DEBUG
def create_driver(): def create_driver():
@ -31,21 +29,21 @@ def create_driver():
def single_chapter(driver,url,current_directory, logger): def single_chapter(driver,url,current_directory, logger):
try: try:
Series_Name = str(re.search('manga\/(.*?)/v', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories. Series_Name = str(search('manga\/(.*?)/v', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories.
except Exception as e: except Exception as e:
logging.debug("Error in Series Name : %s\nTrying something else." % e) debug("Error in Series Name : %s\nTrying something else." % e)
Series_Name = str(re.search('manga\/(.*?)/c', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories. Series_Name = str(search('manga\/(.*?)/c', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories.
try: try:
volume_number = "Volume " + str(re.search('v(.*?)/c', url).group(1)).strip() # Getting the volume count from the URL itself for naming the folder/dicrectories. volume_number = "Volume " + str(search('v(.*?)/c', url).group(1)).strip() # Getting the volume count from the URL itself for naming the folder/dicrectories.
except Exception as e: except Exception as e:
logging.debug("Error in Volume Number : %s" % e) debug("Error in Volume Number : %s" % e)
volume_number = "Volume 01" volume_number = "Volume 01"
try: try:
chapter_number = int(str(re.search('\/c(.*?)/\d', url).group(1)).strip()) # Getting the chapter count from the URL itself for naming the folder/dicrectories in integer. chapter_number = int(str(search('\/c(.*?)/\d', url).group(1)).strip()) # Getting the chapter count from the URL itself for naming the folder/dicrectories in integer.
except Exception as e: except Exception as e:
logging.debug("Error in Chapter Number : %s\nTrying something else." % e) debug("Error in Chapter Number : %s\nTrying something else." % e)
chapter_number = 0 # Getting the chapter count from the URL itself for naming the folder/dicrectories in float. chapter_number = 0 # Getting the chapter count from the URL itself for naming the folder/dicrectories in float.
if volume_number == '0': if volume_number == '0':
@ -53,9 +51,9 @@ def single_chapter(driver,url,current_directory, logger):
else: else:
Raw_File_Directory = str(Series_Name)+'/'+str(volume_number)+'/'+"Chapter "+str(chapter_number) Raw_File_Directory = str(Series_Name)+'/'+str(volume_number)+'/'+"Chapter "+str(chapter_number)
File_Directory = re.sub('[^A-Za-z0-9\-\.\'\#\/ \[\]]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name File_Directory = sub('[^A-Za-z0-9\-\.\'\#\/ \[\]]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
driver.get(url) driver.get(url)
@ -65,7 +63,7 @@ def single_chapter(driver,url,current_directory, logger):
) )
except Exception as e: except Exception as e:
logging.debug("Error in loading the webpage : %s\nScreenshot saved." % e) debug("Error in loading the webpage : %s\nScreenshot saved." % e)
driver.save_screenshot("error.png") driver.save_screenshot("error.png")
print("Couldn't load the element. I'll try to move ahead in any case.") print("Couldn't load the element. I'll try to move ahead in any case.")
print('\n') print('\n')
@ -75,27 +73,27 @@ def single_chapter(driver,url,current_directory, logger):
elem = driver.find_element_by_xpath("//*") elem = driver.find_element_by_xpath("//*")
Page_Source = str(elem.get_attribute("outerHTML").encode('utf-8')) Page_Source = str(elem.get_attribute("outerHTML").encode('utf-8'))
First_chapter_link = str(re.search('http://(.*?)/(.*?)/manga/(.*?)/(.*?)/compressed/(.*?)\.jpg', Page_Source).group(0)).strip() # Fix if they change the CDN all of a sudden. First_chapter_link = str(search('http://(.*?)/(.*?)/manga/(.*?)/(.*?)/compressed/(.*?)\.jpg', Page_Source).group(0)).strip() # Fix if they change the CDN all of a sudden.
current_chapter_count = int(str(re.search('current_page\=(.*?)\;', Page_Source).group(1)).strip()) # Getting the last chapter number from the URL itself for naming the folder/dicrectories. current_chapter_count = int(str(search('current_page\=(.*?)\;', Page_Source).group(1)).strip()) # Getting the last chapter number from the URL itself for naming the folder/dicrectories.
last_chapter_count = int(str(re.search('total_pages\=(.*?)\;', Page_Source).group(1)).strip()) # Getting the last chapter number from the URL itself for naming the folder/dicrectories. last_chapter_count = int(str(search('total_pages\=(.*?)\;', Page_Source).group(1)).strip()) # Getting the last chapter number from the URL itself for naming the folder/dicrectories.
print('\n') print('\n')
print('{:^80}'.format('%s - %s')%(Series_Name,chapter_number)) print('{:^80}'.format('%s - %s')%(Series_Name,chapter_number))
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
for x in range(current_chapter_count,last_chapter_count+1): for x in range(current_chapter_count,last_chapter_count+1):
driver.refresh() driver.refresh()
File_Name_Final = str(x)+'.jpg' File_Name_Final = str(x)+'.jpg'
link_container = driver.find_element_by_xpath('//*[@id="image"]') link_container = driver.find_element_by_xpath('//*[@id="image"]')
logging.debug("Link Container : %s" % link_container) debug("Link Container : %s" % link_container)
ddl_image = str(link_container.get_attribute('src')) ddl_image = str(link_container.get_attribute('src'))
logging.debug("Image Link : %s" % ddl_image) debug("Image Link : %s" % ddl_image)
FileDownloader(File_Name_Final,Directory_path,ddl_image, logger) FileDownloader(File_Name_Final,Directory_path,ddl_image, logger)
driver.find_element_by_xpath('//*[@id="top_bar"]/div/a[2]').click() driver.find_element_by_xpath('//*[@id="top_bar"]/div/a[2]').click()
@ -109,7 +107,7 @@ def whole_series(url,current_directory, logger):
print("Couldn't get the URL. Please report it on Github Repository.") print("Couldn't get the URL. Please report it on Github Repository.")
try: try:
Series_Name = str(re.search('manga\/(.*?)/', url).group(1)).strip() # Getting the Series Name from the URL itself for naming the folder/dicrectories. Series_Name = str(search('manga\/(.*?)/', url).group(1)).strip() # Getting the Series Name from the URL itself for naming the folder/dicrectories.
except Exception as e: except Exception as e:
print('Check if the URL is correct or not. Report on Github.') print('Check if the URL is correct or not. Report on Github.')
@ -119,17 +117,17 @@ def whole_series(url,current_directory, logger):
} }
response = requests.get(url, headers=headers) response = get(url, headers=headers)
Page_source = str(response.text.encode('utf-8')) Page_source = str(response.text.encode('utf-8'))
try: try:
chapter_link_format = "http://mangafox.me/manga/"+str(Series_Name)+"/v" chapter_link_format = "http://mangafox.me/manga/"+str(Series_Name)+"/v"
links = re.findall('{0}(.*?)html'.format(chapter_link_format),Page_source) links = findall('{0}(.*?)html'.format(chapter_link_format),Page_source)
if len(links) == 0: if len(links) == 0:
chapter_link_format = "http://mangafox.me/manga/"+str(Series_Name)+"/c" chapter_link_format = "http://mangafox.me/manga/"+str(Series_Name)+"/c"
#print chapter_link_format #print chapter_link_format
links = re.findall('{0}(.*?)html'.format(chapter_link_format),Page_source) links = findall('{0}(.*?)html'.format(chapter_link_format),Page_source)
except Exception as e: except Exception as e:
@ -149,14 +147,14 @@ def whole_series(url,current_directory, logger):
def mangafox_Url_Check(input_url,current_directory, logger): def mangafox_Url_Check(input_url,current_directory, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
mangafox_single_regex = re.compile('https?://(?P<host>mangafox.me)/manga/(?P<comic>[\d\w-]+)(?P<Volume>(/v\d+)|(.))/(?P<chapter>c\d+(\.\d)?)?/(?P<issue>\d+)?\.html') mangafox_single_regex = compile('https?://(?P<host>mangafox.me)/manga/(?P<comic>[\d\w-]+)(?P<Volume>(/v\d+)|(.))/(?P<chapter>c\d+(\.\d)?)?/(?P<issue>\d+)?\.html')
mangafox_whole_regex = re.compile('^https?://(?P<host>mangafox.me)/manga/(?P<comic_series>[\d\w-]+)?|(\/)$') mangafox_whole_regex = compile('^https?://(?P<host>mangafox.me)/manga/(?P<comic_series>[\d\w-]+)?|(\/)$')
lines = input_url.split('\n') lines = input_url.split('\n')
for line in lines: for line in lines:
found = re.search(mangafox_single_regex, line) found = search(mangafox_single_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['issue']: if match['issue']:
@ -168,13 +166,13 @@ def mangafox_Url_Check(input_url,current_directory, logger):
print(e) print(e)
driver.quit() driver.quit()
driver.quit() driver.quit()
sys.exit() exit()
else: else:
pass pass
found = re.search(mangafox_whole_regex, line) found = search(mangafox_whole_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['comic_series']: if match['comic_series']:
@ -184,6 +182,6 @@ def mangafox_Url_Check(input_url,current_directory, logger):
whole_series(url,current_directory, logger) whole_series(url,current_directory, logger)
except Exception as e: except Exception as e:
print(e) print(e)
sys.exit() exit()
else: else:
pass pass

View File

@ -6,23 +6,23 @@ from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
from builtins import str from builtins import str
from downloader.universal import main as FileDownloader from downloader.universal import main as FileDownloader
import re from re import search,sub,compile, findall
import sys from os import path,makedirs
import cfscrape from sys import exit
import os
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import logging from cfscrape import create_scraper
from logging import debug, basicConfig, DEBUG
def readcomic_Url_Check(input_url, current_directory, logger): def readcomic_Url_Check(input_url, current_directory, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
Issue_Regex = re.compile('https?://(?P<host>[^/]+)/Comic/(?P<comic>[\d\w-]+)(?:/Issue-)?(?P<issue>\d+)?') Issue_Regex = compile('https?://(?P<host>[^/]+)/Comic/(?P<comic>[\d\w-]+)(?:/Issue-)?(?P<issue>\d+)?')
Annual_Regex = re.compile('https?://(?P<host>[^/]+)/Comic/(?P<comic>[\d\w-]+)(?:/Annual-)?(?P<issue>\d+)?') Annual_Regex = compile('https?://(?P<host>[^/]+)/Comic/(?P<comic>[\d\w-]+)(?:/Annual-)?(?P<issue>\d+)?')
lines = input_url.split('\n') lines = input_url.split('\n')
for line in lines: for line in lines:
found = re.search(Issue_Regex, line) found = search(Issue_Regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['issue']: if match['issue']:
@ -34,7 +34,7 @@ def readcomic_Url_Check(input_url, current_directory, logger):
url = str(input_url) url = str(input_url)
Whole_Series(url, current_directory, logger) Whole_Series(url, current_directory, logger)
found = re.search(Annual_Regex, line) found = search(Annual_Regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
@ -49,11 +49,11 @@ def readcomic_Url_Check(input_url, current_directory, logger):
if not found: if not found:
print() print()
'Please Check Your URL one again!' 'Please Check Your URL one again!'
sys.exit() exit()
def Single_Issue(url, current_directory, logger): def Single_Issue(url, current_directory, logger):
scraper = cfscrape.create_scraper() scraper = create_scraper()
connection = scraper.get(url).content connection = scraper.get(url).content
Series_Name_Splitter = url.split('/') Series_Name_Splitter = url.split('/')
@ -61,48 +61,48 @@ def Single_Issue(url, current_directory, logger):
Issue_Number_Splitter = str(Series_Name_Splitter[5]) Issue_Number_Splitter = str(Series_Name_Splitter[5])
Issue_Or_Annual_Split = str(Issue_Number_Splitter).split("?") Issue_Or_Annual_Split = str(Issue_Number_Splitter).split("?")
Issue_Or_Annual = str(Issue_Or_Annual_Split[0]).replace("-", " ").strip() Issue_Or_Annual = str(Issue_Or_Annual_Split[0]).replace("-", " ").strip()
reg = re.findall(r'[(\d)]+', Issue_Number_Splitter) reg = findall(r'[(\d)]+', Issue_Number_Splitter)
Issue_Number = str(reg[0]) Issue_Number = str(reg[0])
Raw_File_Directory = str(Series_Name) + '/' + "Chapter " + str(Issue_Or_Annual) Raw_File_Directory = str(Series_Name) + '/' + "Chapter " + str(Issue_Or_Annual)
File_Directory = re.sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', File_Directory = sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '',
Raw_File_Directory) # Fix for "Special Characters" in The series name Raw_File_Directory) # Fix for "Special Characters" in The series name
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
print('\n') print('\n')
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
print('{:^80}'.format('%s - %s') % (Series_Name, Issue_Or_Annual)) print('{:^80}'.format('%s - %s') % (Series_Name, Issue_Or_Annual))
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
linksList = re.findall('lstImages.push\(\"(.*?)\"\)\;', str(connection)) linksList = findall('lstImages.push\(\"(.*?)\"\)\;', str(connection))
logging.debug("Image Links : %s" % linksList) debug("Image Links : %s" % linksList)
for link in linksList: for link in linksList:
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
fileName = str(linksList.index(link)) + ".jpg" fileName = str(linksList.index(link)) + ".jpg"
# logging.debug("Name of File : %s" % fileName) # debug("Name of File : %s" % fileName)
FileDownloader(fileName, Directory_path, link, logger) FileDownloader(fileName, Directory_path, link, logger)
def Whole_Series(url, current_directory, logger): def Whole_Series(url, current_directory, logger):
scraper = cfscrape.create_scraper() scraper = create_scraper()
connection = scraper.get(url).content connection = scraper.get(url).content
soup = BeautifulSoup(connection, "html.parser") soup = BeautifulSoup(connection, "html.parser")
# logging.debug("Soup : %s" % soup) # debug("Soup : %s" % soup)
all_links = soup.findAll('table', {'class': 'listing'}) all_links = soup.findAll('table', {'class': 'listing'})
# logging.debug("Issue Links : %s" % all_links) # debug("Issue Links : %s" % all_links)
for link in all_links: for link in all_links:
# logging.debug("link : %s" % link) # debug("link : %s" % link)
x = link.findAll('a') x = link.findAll('a')
logging.debug("Actual Link : %s" % x) debug("Actual Link : %s" % x)
for a in x: for a in x:
url = "http://readcomiconline.to" + a['href'] url = "http://readcomiconline.to" + a['href']
logging.debug("Final URL : %s" % url) debug("Final URL : %s" % url)
Single_Issue(url, current_directory=current_directory, logger=logger) Single_Issue(url, current_directory=current_directory, logger=logger)
print("Finished Downloading") print("Finished Downloading")

View File

@ -4,61 +4,62 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import requests import requests
import re
import os
import sys
from more_itertools import unique_everseen from more_itertools import unique_everseen
from re import search,sub,compile, findall
from os import path,makedirs
from sys import exit
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from downloader.cookies_required import main as FileDownloader from downloader.cookies_required import main as FileDownloader
import logging from logging import debug, basicConfig, DEBUG
from requests import Session,cookies
def single_chapter(url, current_directory, logger): def single_chapter(url, current_directory, logger):
if not url: if not url:
print("Couldn't get the URL. Please report it on Github Repository.") print("Couldn't get the URL. Please report it on Github Repository.")
sys.exit(0) exit(0)
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36' 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'
} }
s = requests.Session() s = Session()
response = s.get(url, headers=headers) response = s.get(url, headers=headers)
tasty_cookies = response.cookies tasty_cookies = response.cookies
Page_source = str(response.text.encode('utf-8')) Page_source = str(response.text.encode('utf-8'))
Series_Name = str(re.search('\/read\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories. Series_Name = str(search('\/read\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories.
try: try:
chapter_number = int(str(re.search('0\/(.*?)/', url).group(1)).strip().replace('0','').replace('/','')) # Getting the chapter count from the URL itself for naming the folder/dicrectories in integer. chapter_number = int(str(search('0\/(.*?)/', url).group(1)).strip().replace('0','').replace('/','')) # Getting the chapter count from the URL itself for naming the folder/dicrectories in integer.
except Exception as e: except Exception as e:
logging.debug("Error in Chapter Number : %s" % e) debug("Error in Chapter Number : %s" % e)
chapter_number = 0 # Name the chapter 0 if nothing INTEGER type comes up chapter_number = 0 # Name the chapter 0 if nothing INTEGER type comes up
Raw_File_Directory = str(Series_Name)+'/'+"Chapter "+str(chapter_number) Raw_File_Directory = str(Series_Name)+'/'+"Chapter "+str(chapter_number)
File_Directory = re.sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name File_Directory = sub('[^A-Za-z0-9\-\.\'\#\/ ]+', '', Raw_File_Directory) # Fix for "Special Characters" in The series name
Directory_path = os.path.normpath(File_Directory) Directory_path = path.normpath(File_Directory)
ddl_image_list = re.findall('comics(.*?)\"', Page_source) ddl_image_list = findall('comics(.*?)\"', Page_source)
ddl_list = list(unique_everseen(ddl_image_list)) ddl_list = list(unique_everseen(ddl_image_list))
logging.debug("Image Links : %s" % ddl_list) debug("Image Links : %s" % ddl_list)
print('\n') print('\n')
print('{:^80}'.format('%s - %s')%(Series_Name,chapter_number)) print('{:^80}'.format('%s - %s')%(Series_Name,chapter_number))
print('{:^80}'.format('=====================================================================\n')) print('{:^80}'.format('=====================================================================\n'))
for i in ddl_list: for i in ddl_list:
if not os.path.exists(File_Directory): if not path.exists(File_Directory):
os.makedirs(File_Directory) makedirs(File_Directory)
ddl_image = "http://yomanga.co/reader/content/comics"+str(i).replace('"','').replace('\\','') ddl_image = "http://yomanga.co/reader/content/comics"+str(i).replace('"','').replace('\\','')
logging.debug("Image Download Link : %s" % ddl_image) debug("Image Download Link : %s" % ddl_image)
File_Name_Final = str(re.findall('\/(\d+)\.[jpg]|[png]', i)).replace("[","").replace("]","").replace("'","").replace(",","").strip()+"."+str(re.findall('\d\.(.*?)$', str(i))).replace(",","").replace("[","").replace("]","").replace("'","").strip() File_Name_Final = str(findall('\/(\d+)\.[jpg]|[png]', i)).replace("[","").replace("]","").replace("'","").replace(",","").strip()+"."+str(findall('\d\.(.*?)$', str(i))).replace(",","").replace("[","").replace("]","").replace("'","").strip()
FileDownloader(File_Name_Final,Directory_path,tasty_cookies,ddl_image, logger) FileDownloader(File_Name_Final,Directory_path,tasty_cookies,ddl_image, logger)
print('\n') print('\n')
@ -73,36 +74,36 @@ def whole_series(url, current_directory, logger):
} }
s = requests.Session() s = Session()
response = s.get(url, headers=headers) response = s.get(url, headers=headers)
tasty_cookies = response.cookies tasty_cookies = response.cookies
Page_source = str(response.text.encode('utf-8')) Page_source = str(response.text.encode('utf-8'))
Series_Name = str(re.search('\/series\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories. Series_Name = str(search('\/series\/(.*?)/', url).group(1)).strip().replace('_',' ').title() # Getting the Series Name from the URL itself for naming the folder/dicrectories.
soup = BeautifulSoup(Page_source, 'html.parser') soup = BeautifulSoup(Page_source, 'html.parser')
chapter_text = soup.findAll('div',{'class':'title'}) chapter_text = soup.findAll('div',{'class':'title'})
logging.debug("Chapter Text : %s" % chapter_text) debug("Chapter Text : %s" % chapter_text)
for link in chapter_text: for link in chapter_text:
x = link.findAll('a') x = link.findAll('a')
for a in x: for a in x:
url = a['href'] url = a['href']
logging.debug("Chapter URL : %s" % url) debug("Chapter URL : %s" % url)
single_chapter(url, current_directory, logger) single_chapter(url, current_directory, logger)
def yomanga_Url_Check(input_url, current_directory, logger): def yomanga_Url_Check(input_url, current_directory, logger):
if logger == "True": if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
yomanga_single_regex = re.compile('https?://(?P<host>yomanga.co)/reader/read/(?P<comic_single>[\d\w-]+)/en/(?P<volume>\d+)?/(?P<Chapter>\d+)?()|(/page/(?P<PageNumber>\d+)?)') yomanga_single_regex = compile('https?://(?P<host>yomanga.co)/reader/read/(?P<comic_single>[\d\w-]+)/en/(?P<volume>\d+)?/(?P<Chapter>\d+)?()|(/page/(?P<PageNumber>\d+)?)')
yomanga_whole_regex = re.compile('^https?://(?P<host>yomanga.co)/reader/(?P<series>series)?/(?P<comic>[\d\w-]+)?(\/|.)$') yomanga_whole_regex = compile('^https?://(?P<host>yomanga.co)/reader/(?P<series>series)?/(?P<comic>[\d\w-]+)?(\/|.)$')
lines = input_url.split('\n') lines = input_url.split('\n')
for line in lines: for line in lines:
found = re.search(yomanga_single_regex, line) found = search(yomanga_single_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['Chapter']: if match['Chapter']:
@ -113,7 +114,7 @@ def yomanga_Url_Check(input_url, current_directory, logger):
found = re.search(yomanga_whole_regex, line) found = search(yomanga_whole_regex, line)
if found: if found:
match = found.groupdict() match = found.groupdict()
if match['comic']: if match['comic']:

View File

@ -4,4 +4,4 @@ Date Format : YY/MM/DD
''' '''
__version__ = '2017.01.22' __version__ = '2017.02.16'

View File

@ -15,4 +15,6 @@
- Added a YouTube Tutorial for the script [2016.12.30] - Added a YouTube Tutorial for the script [2016.12.30]
- Site support for readcomiconlin.to [2017.01.02] - Site support for readcomiconlin.to [2017.01.02]
- Added `Verbose Logging` [2017.01.22] - Added `Verbose Logging` [2017.01.22]
- Fixed chapter count error in Kissmanga [2017.01.22] - Fixed chapter count error in Kissmanga [2017.01.22]
- Fixed #4 [2017.02.16]
- Optimized Imports [2017.02.16]

View File

@ -187,10 +187,9 @@ You can check the changelog [**`HERE`**](http://comic-dl.readthedocs.io/en/lates
If your're planning to open an issue for the script or ask for a new feature or anything that requires opening an Issue, then please do keep these things in mind. If your're planning to open an issue for the script or ask for a new feature or anything that requires opening an Issue, then please do keep these things in mind.
### Reporting Issues ### Reporting Issues
If you're about to report some issue with the script, then please do include these things : If you're going to report an issue, then please run the script again with the "-v or --verbose" argument. It should generate a file in the same directory, with the name "Error Log.log". Copy that log file's data and post it on a [Gist](https://gist.github.com/) and share that gist's link while reporting the issue here. Make sure you **EDIT OUT YOUR USERNAME AND PASSWORD**, if supplied within the command.
* The command your entered. Yes, with the URL
* The output of that command. You can simply copy the text from the terminal/command prompt and paste it. Make sure you put that inside inside `` (tilde). If you don't include the verbose log, there are chances it'll take time to fix the issue(s) you're having.
* Your Operating System and python version.
### Suggesting A Feature ### Suggesting A Feature
If you're here to make suggestions, please follow the basic syntax to post a request : If you're here to make suggestions, please follow the basic syntax to post a request :

View File

@ -1,2 +1,2 @@
[metadata] [metadata]
description-file = README.md description-file = ReadMe.md

View File

@ -1,55 +1,26 @@
#!/usr/bin/env python from distutils.core import setup
# coding: utf-8
import os readme = open('ReadMe.md').read()
import sys
import comic_dl
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.txt').read()
history = open('Changelog.md').read() history = open('Changelog.md').read()
exec(compile(open('comic_dl/version.py').read(),
'comic_dl/version.py', 'exec'))
setup( setup(
name='comic-dl', name = 'comic_dl',
version=__version__, packages = ['comic_dl','comic_dl.sites','comic_dl.downloader'], # this must be the same as the name above
description='Comic-dl is a command line tool to download Comics and Manga from various Manga and Comic sites easily.', install_requires=["selenium",
long_description=readme + '\n\n' + history,
author='Xonshiz',
author_email='xonshiz@psychoticelites.com',
url='https://github.com/Xonshiz/comic-dl',
download_url = 'https://codeload.github.com/Xonshiz/comic-dl/legacy.tar.gz/v2016.11.26(1)',
packages=[
'comic_dl',
'comic_dl.sites',
'comic_dl.downloader',
],
package_dir={'comic_dl': 'comic_dl'},
include_package_data=True,
install_requires=["selenium",
"requests", "requests",
"more_itertools", "more_itertools",
"cfscrape",
"bs4" "bs4"
], ],
entry_points={ version = '2017.01.22',
'console_scripts': [ description = 'Comic-dl is a command line tool to download Comics and Manga from various Manga and Comic sites easily.',
'comic_dl = comic_dl:main' long_description=readme + '\n\n' + history,
], author = 'Xonshiz',
}, author_email = 'xonshiz@psychoticelites.com',
license="MIT Licence", url='https://github.com/Xonshiz/comic-dl',
zip_safe=False, download_url = 'https://codeload.github.com/Xonshiz/comic-dl/legacy.tar.gz/v2016.11.26(1)',
keywords = ['comic-dl', 'cli', 'comic downloader','manga downloader','mangafox','batoto','kissmanga'], keywords = ['comic-dl', 'cli', 'comic downloader','manga downloader','mangafox','batoto','kissmanga','comic naver'],
classifiers=[ classifiers=[
'Development Status :: 5 - Production/Stable', 'Development Status :: 5 - Production/Stable',
'Environment :: Console', 'Environment :: Console',
'Intended Audience :: End Users/Desktop', 'Intended Audience :: End Users/Desktop',
@ -65,5 +36,4 @@ setup(
'Operating System :: OS Independent', 'Operating System :: OS Independent',
'Topic :: Multimedia :: Graphics' 'Topic :: Multimedia :: Graphics'
], ],
#test_suite='tests',
) )