Skip to content
Commits on Source (2)
......@@ -23,6 +23,7 @@ import logging
import cfscrape
import requests
from puffotter.os import makedirs
from puffotter.print import pprint
from typing import Callable, List
from typing import Optional
from subprocess import Popen, DEVNULL
......@@ -187,6 +188,7 @@ class Chapter:
index_fill = len(str(len(self.pages)))
downloaded = []
for i, image_url in enumerate(self.pages):
cloudflare = False
......@@ -198,8 +200,12 @@ class Chapter:
filename = "{}.{}".format(str(i).zfill(index_fill), ext)
image_file = os.path.join(tempdir, filename)
self.logger.info("Downloading image file {} to {}"
.format(image_url, image_file))
pprint("{} Chapter {} ({}/{})".format(
self.series_name,
self.chapter_number,
i + 1,
len(self.pages)
), fg="black", bg="lyellow", end="\r")
if cloudflare:
scraper = cfscrape.create_scraper()
......@@ -219,6 +225,8 @@ class Chapter:
downloaded.append(image_file)
print()
if len(downloaded) == 0:
self.logger.warning("Couldn't download chapter {}".format(self))
else:
......
......@@ -14,7 +14,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with manga-dl. If not, see <http://www.gnu.org/licenses/>.
along with manga-dl. If not, see <http://www.gnu.cc/licenses/>.
LICENSE"""
import re
......@@ -27,7 +27,7 @@ from manga_dl.scrapers.Scraper import Scraper
class MangaDexScraper(Scraper):
"""
Scraper for mangadex.org
Scraper for mangadex.cc
"""
@classmethod
......@@ -44,7 +44,7 @@ class MangaDexScraper(Scraper):
:param url: The URL to check
:return: Whether the URL is valid
"""
return bool(re.match(r"^https://mangadex.org/title/[0-9]+", url))
return bool(re.match(r"^https://mangadex.cc/title/[0-9]+", url))
def generate_url(self, _id: str) -> str:
"""
......@@ -52,18 +52,18 @@ class MangaDexScraper(Scraper):
:param _id: The ID to use
:return: The generated URL
"""
return "https://mangadex.org/title/" + _id
return "https://mangadex.cc/title/" + _id
def _load_chapters(self, url: str) -> List[Chapter]:
"""
Loads the chapters from mangadex.org
Loads the chapters from mangadex.cc
:param url: The URL to scrape
:return: The chapters found for the series
"""
scraper = cfscrape.create_scraper()
mangadex_id = url.split("https://mangadex.org/title/")[1].split("/")[0]
manga_url = "https://mangadex.org/api/manga/" + str(mangadex_id)
mangadex_id = url.split("https://mangadex.cc/title/")[1].split("/")[0]
manga_url = "https://mangadex.cc/api/manga/" + str(mangadex_id)
resp = scraper.get(manga_url)
......@@ -85,7 +85,7 @@ class MangaDexScraper(Scraper):
chapters = []
for chapter_id, chapter in chapter_list.items():
chapter_url = "https://mangadex.org/api/chapter/" + str(chapter_id)
chapter_url = "https://mangadex.cc/api/chapter/" + str(chapter_id)
chapters.append(Chapter(
chapter_url,
chapter["lang_code"],
......@@ -123,7 +123,7 @@ class MangaDexScraper(Scraper):
server = chapter_info["server"]
if server == "/data/":
server = "CF!https://mangadex.org/data/" # Cloudflare protected
server = "CF!https://mangadex.cc/data/" # Cloudflare protected
chapter_hash = chapter_info["hash"]
base_url = server + chapter_hash + "/"
......