Example #1
0
    def test_find_chapter_images(self):
        item_markup = Path(path.join(path.dirname(__file__), '../resources/manga_item.html')).read_text(encoding='utf-8')
        item_content_markup = Path(path.join(path.dirname(__file__), '../resources/manga_item_content.html')).read_text(encoding='utf-8')

        manga = MangaScraper(item_markup, Sources.MANGAKAKALOT)
        updated_chapter = manga.find_images(item_content_markup, manga.chapters[0])

        assert len(updated_chapter.images) == 8
        assert updated_chapter.images[0] == 'https://s8.mkklcdnv8.com/mangakakalot/r2/rx919523/chapter_71/1.jpg'
Example #2
0
    def test_replace_chapter(self):
        markup = Path(path.join(path.dirname(__file__), '../resources/manga_item.html')).read_text(encoding='utf-8')

        manga = MangaScraper(markup, Sources.MANGAKAKALOT)
        manga.update(manga.chapters[0], Chapter('New', 'New', 'New'))
        chapter = manga.chapters[0]

        assert chapter.uid == 'New'
        assert chapter.url == 'New'
        assert chapter.title == 'New'
def test_search_mangakakalot():
    mangas = MangaScraper.search('Sorcerer', Sources.MANGAKAKALOT)
    print(len(mangas))
    print(mangas[0])

    assert mangas[0]['title'] == 'I Am The Sorcerer King'
    assert mangas[0]['url'] == 'https://mangakakalot.com/manga/rx919523'
def test_search_leviatanscans():
    mangas = MangaScraper.search('survival', Sources.LEVIATANSCANS)
    print(len(mangas))
    print(mangas[0])

    assert mangas[0][
        'title'] == 'Survival Story of a Sword King in a Fantasy World'
Example #5
0
    def s(self):
        mangas = MangaScraper.search(self.keyword_text_input.text, Sources.MANGAKAKALOT)

        manga_list = []
        for manga in mangas:
            if Sources.MANGAKAKALOT in manga.url:
                manga_list.append({'text': manga.title, 'url': manga.url})

        self.searched_list.data = manga_list
        print("Searched Done.")
Example #6
0
    def test_find_chapters_leviathanscans(self):
        markup = Path(path.join(path.dirname(__file__), "../resources/leviatan_item.html")).read_text(encoding="utf-8")

        manga = MangaScraper(markup, Sources.LEVIATANSCANS)
        chapters = manga.chapters
        chapter = chapters[0]

        assert len(chapters) == 5
        assert chapter.uid == '4'
        assert chapter.url == 'https://leviatanscans.com/comics/11268-survival-story-of-a-sword-king-in-a-fantasy-world/1/4'
        assert chapter.title == 'Chapter 4'
Example #7
0
    def test_find_chapters_mangakakalot(self):
        markup = Path(path.join(path.dirname(__file__), '../resources/manga_item.html')).read_text(encoding='utf-8')

        manga = MangaScraper(markup, Sources.MANGAKAKALOT)
        chapters = manga.chapters
        chapter = chapters[0]

        assert len(chapters) == 71
        assert chapter.uid == '71'
        assert chapter.url == 'https://mangakakalot.com/chapter/rx919523/chapter_71'
        assert chapter.title == 'Chapter 71'
Example #8
0
    def load_info(self, url):

        resp = requests.get(url)
        scraper = MangaScraper(resp.text, Sources.MANGAKAKALOT)
        manga = scraper.manga

        self.title_label.text = manga.title

        chapters = []
        for chapter in manga.chapters:
            chapters.append({"text": chapter.title, "chapter": chapter})
        self.chapter_list.data = chapters
Example #9
0
    def retrieve_images(self, chapter):
        resp = requests.get(chapter.url)
        if resp.ok:
            markup = resp.text

            list_dict_img_bytes = []
            image_urls = MangaScraper.find_images(markup, Sources.MANGAKAKALOT)

            with concurrent.futures.ThreadPoolExecutor() as executor:
                results = [executor.submit(self.retrieve_image, url) for url in image_urls]

                for future in concurrent.futures.as_completed(results):
                    print("Complete")
                    Clock.schedule_once(partial(self.add_retrieved_image, future.result()[0], future.result()[1]), 0)