Ejemplo n.º 1
0
def save_volume(sender, instance, **kwargs):
    if instance.series:
        clear_pages_cache()
    if instance.volume_cover:
        save_dir = os.path.join(os.path.dirname(str(instance.volume_cover)))
        vol_cover = os.path.basename(str(instance.volume_cover))
        for old_data in os.listdir(os.path.join(settings.MEDIA_ROOT,
                                                save_dir)):
            if old_data != vol_cover:
                os.remove(os.path.join(settings.MEDIA_ROOT, save_dir,
                                       old_data))
        filename, ext = vol_cover.rsplit(".", 1)
        image = Image.open(
            os.path.join(settings.MEDIA_ROOT, save_dir, vol_cover))
        image.save(
            os.path.join(settings.MEDIA_ROOT, save_dir, f"{filename}.webp"),
            lossless=False,
            quality=60,
            method=6,
        )
        image.save(
            os.path.join(settings.MEDIA_ROOT, save_dir, f"{filename}.jp2"))
        blur = Image.open(
            os.path.join(settings.MEDIA_ROOT, save_dir, vol_cover))
        blur = blur.convert("RGB")
        blur.thumbnail((blur.width / 8, blur.height / 8), Image.ANTIALIAS)
        blur = blur.filter(ImageFilter.GaussianBlur(radius=4))
        blur.save(
            os.path.join(settings.MEDIA_ROOT, save_dir,
                         f"{filename}_blur.{ext}"),
            "JPEG",
            quality=100,
            optimize=True,
            progressive=True,
        )
Ejemplo n.º 2
0
def delete_chapter_folder(sender, instance, **kwargs):
    if instance.folder and instance.series:
        clear_pages_cache()
        folder_path = os.path.join(
            settings.settings.MEDIA_ROOT,
            "manga",
            instance.series.slug,
            "chapters",
            instance.folder,
        )
        if os.path.exists(os.path.join(folder_path, str(instance.group.id))):
            shutil.rmtree(os.path.join(folder_path, str(instance.group.id)))
        if os.path.exists(
                os.path.join(folder_path, f"{str(instance.group.id)}_shrunk")):
            shutil.rmtree(
                os.path.join(folder_path, f"{str(instance.group.id)}_shrunk"))
        if os.path.exists(
                os.path.join(folder_path,
                             f"{str(instance.group.id)}_shrunk_blur")):
            shutil.rmtree(
                os.path.join(folder_path,
                             f"{str(instance.group.id)}_shrunk_blur"))
        if os.path.exists(
                os.path.join(folder_path,
                             f"{str(instance.clean_chapter_number())}.zip")):
            os.remove(
                os.path.join(folder_path,
                             f"{str(instance.clean_chapter_number())}.zip"))
        if os.path.exists(folder_path) and not os.listdir(folder_path):
            shutil.rmtree(folder_path)
        chapter = ContentType.objects.get(app_label="reader", model="chapter")
        hit_count_obj = HitCount.objects.filter(content_type=chapter,
                                                object_id=instance.id).first()
        if hit_count_obj:
            hit_count_obj.delete()
Ejemplo n.º 3
0
 def create_chapter_obj(self, chapter, group, series, latest_volume, title):
     chapter_number = float(chapter)
     existing_chapter = Chapter.objects.filter(
         chapter_number=chapter_number, series=series).first()
     chapter_folder_numb = f"{int(chapter_number):04}"
     chapter_folder_numb += (f"-{str(chapter_number).rsplit('.')[1]}_" if
                             not str(chapter_number).endswith("0") else "_")
     if not existing_chapter:
         uid = chapter_folder_numb + random_chars()
     else:
         uid = existing_chapter.folder
     Chapter.objects.create(
         chapter_number=chapter_number,
         group=group,
         series=series,
         folder=uid,
         title=title,
         volume=latest_volume,
         uploaded_on=datetime.utcnow().replace(tzinfo=timezone.utc),
     )
     chapter_folder = os.path.join(settings.MEDIA_ROOT, "manga",
                                   series.slug, "chapters", uid)
     os.makedirs(os.path.join(chapter_folder, str(group.id)))
     os.makedirs(os.path.join(chapter_folder, f"{str(group.id)}_shrunk"))
     os.makedirs(
         os.path.join(chapter_folder, f"{str(group.id)}_shrunk_blur"))
     clear_pages_cache()
     return chapter_folder, str(group.id)
Ejemplo n.º 4
0
def delete_volume_folder(sender, instance, **kwargs):
    if instance.volume_cover:
        clear_pages_cache()
        folder_path = os.path.join(settings.MEDIA_ROOT, "manga",
                                   instance.series.slug, "volume_covers",
                                   str(instance.volume_number))
        if os.path.exists(folder_path):
            shutil.rmtree(folder_path)
Ejemplo n.º 5
0
def delete_chapter_folder(sender, instance, **kwargs):
    if instance.folder and instance.series:
        clear_pages_cache()
        folder_path = os.path.join(
            settings.MEDIA_ROOT,
            "manga",
            instance.series.slug,
            "chapters",
            instance.folder,
        )
        delete_chapter_pages_if_exists(folder_path,
                                       instance.clean_chapter_number(),
                                       instance.group.id)
        if os.path.exists(folder_path) and not os.listdir(folder_path):
            shutil.rmtree(folder_path, ignore_errors=True)
        chapter = ContentType.objects.get(app_label="reader", model="chapter")
        hit_count_obj = HitCount.objects.filter(content_type=chapter,
                                                object_id=instance.id).first()
        if hit_count_obj:
            hit_count_obj.delete()
Ejemplo n.º 6
0
 async def mangadex_download(self,
                             chapters,
                             series,
                             group,
                             latest_volume,
                             url=""):
     for chapter in chapters:
         if not chapters[chapter]:
             print(f"Could not download chapter {chapter}.")
             continue
         chapter_pages = chapters[chapter][1]
         chapter_folder, group_folder = self.create_chapter_obj(
             chapter, group, series, latest_volume, chapters[chapter][0])
         ch = Chapter.objects.get(series=series,
                                  chapter_number=float(chapter),
                                  group=group)
         padding = len(str(len(chapter_pages)))
         print(f"Downloading chapter {chapter}...")
         print(f"Found {len(chapter_pages)} pages...")
         async with aiohttp.ClientSession() as session:
             for idx, page in enumerate(chapter_pages):
                 extension = page.rsplit(".", 1)[1]
                 page_file = f"{str(idx+1).zfill(padding)}.{extension}"
                 async with session.get(page) as resp:
                     if resp.status == 200:
                         page_content = await resp.read()
                         with open(
                                 os.path.join(chapter_folder, group_folder,
                                              page_file), 'wb') as f:
                             f.write(page_content)
                         create_preview_pages(chapter_folder, group_folder,
                                              page_file)
                         zip_chapter(series.slug, ch.chapter_number)
                     else:
                         print("failed at mangadex_download", idx, page)
         clear_pages_cache()
         print(f"Successfully downloaded chapter and added to db.")
         await self.index_chapter(ch, group_folder)
Ejemplo n.º 7
0
def post_save_chapter(sender, instance, **kwargs):
    if instance.series:
        clear_pages_cache()
Ejemplo n.º 8
0
 async def jaiminis_box_checker(self,
                                downloaded_chapters,
                                series,
                                latest_volume,
                                url,
                                latest_chap=None):
     chapters = {}
     group = Group.objects.get(pk=self.jb_group)
     series = Series.objects.get(slug=series)
     if not latest_chap:
         async with aiohttp.ClientSession() as session:
             async with session.get(url) as resp:
                 if resp.status == 200:
                     webpage = await resp.text()
                     soup = BeautifulSoup(webpage, "html.parser")
                     for chapter in soup.select(".list .group .element"):
                         chapter_regex = re.search(
                             r'^Chapter (\d*\.?\d*): (.*)$',
                             chapter.select(".title")[0].text)
                         chap_numb = chapter_regex.group(1)
                         if str(float(
                                 chap_numb)) in downloaded_chapters or str(
                                     float(chap_numb)) in self.blacklist_jb[
                                         series.slug]:
                             continue
                         else:
                             print(
                                 f"Found new chapter ({chap_numb}) on Jaiminisbox for {series}."
                             )
                             chapter_dl_url = chapter.select(
                                 ".icon_wrapper a")[0]["href"]
                             chapters[chap_numb] = {
                                 "title": chapter_regex.group(2),
                                 "url": chapter_dl_url
                             }
                 else:
                     print(
                         f"[{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')}] Failed to reach JB page for {series}. Response status: {resp.status}"
                     )
     else:
         latest_chap_slug = latest_chap.replace(".", "/")
         async with aiohttp.ClientSession() as session:
             async with session.get(
                     f"https://jaiminisbox.com/reader/read/{series.slug.lower()}/en/0/{latest_chap_slug}/page/1"
             ) as resp:
                 if resp.status == 200:
                     webpage = await resp.text()
                     soup = BeautifulSoup(webpage, "html.parser")
                     title = soup.select(".tbtitle .text a")[1].text.split(
                         ":", 1)[1].strip()
                     chapters[str(latest_chap)] = {
                         "title":
                         title,
                         "url":
                         f"https://jaiminisbox.com/reader/download/{series.slug.lower()}/en/0/{latest_chap_slug}/"
                     }
                 else:
                     print(resp.status)
     for chapter in chapters:
         if str(float(chapter)) not in downloaded_chapters:
             chapter_folder, group_folder = self.create_chapter_obj(
                 chapter, group, series, latest_volume,
                 chapters[chapter]["title"])
             ch = Chapter.objects.get(series=series,
                                      group=self.jb_group,
                                      chapter_number=float(chapter))
             print(f"Downloading chapter {chapter}...")
         else:
             ch = Chapter.objects.get(series=series,
                                      group=self.jb_group,
                                      chapter_number=float(chapter))
             ch.version = ch.version + 1 if ch.version else 2
             ch.save()
             chapter_folder = os.path.join(settings.MEDIA_ROOT, "manga",
                                           series.slug, "chapters",
                                           ch.folder)
             group_folder = str(self.jb_group)
             print(f"Reupdating chapter pages for {chapter}...")
             for f in os.listdir(os.path.join(chapter_folder,
                                              group_folder)):
                 os.remove(os.path.join(chapter_folder, group_folder, f))
         async with aiohttp.ClientSession() as session:
             async with session.get(chapters[chapter]["url"]) as resp:
                 if resp.status == 200:
                     data = await resp.read()
                     with zipfile.ZipFile(io.BytesIO(data)) as zip_file:
                         all_pages = sorted(zip_file.namelist())
                         padding = len(str(len(all_pages)))
                         for idx, page in enumerate(all_pages):
                             extension = page.rsplit(".", 1)[1]
                             page_file = f"{str(idx+1).zfill(padding)}.{extension}"
                             with open(
                                     os.path.join(chapter_folder,
                                                  group_folder, page_file),
                                     "wb") as f:
                                 f.write(zip_file.read(page))
                             create_preview_pages(chapter_folder,
                                                  group_folder, page_file)
                             zip_chapter(series.slug, ch.chapter_number)
                     clear_pages_cache()
                     print(
                         f"Successfully downloaded chapter and added to db."
                     )
                     await self.index_chapter(ch, group_folder)