Ejemplo n.º 1
0
    def __getAllChapter__(self, url: str) -> Union[List[Tuple[str, str]], BeautifulSoup]:
        list_chapter = []

        r = getAPage(url, True)
        if (r == None):
            return None
        soup = BeautifulSoup(r.text, features="html.parser")
        list_chapter = self.recupAllChapter(soup)
        return (list_chapter, soup)
Ejemplo n.º 2
0
    def __getOneChapterManga__(self, url: str, path: str)-> List[Tuple[str, int, str, str]]:
        imageList: List[Tuple[str, int, str]]

        r = getAPage(url)
        if (r == None):
            print(colored(("Problem Page for this URL: " + url, "red")))
            return (None)
        soup = BeautifulSoup(r.text, features="html.parser")
        imageList = self.getImageFromOneChapter(soup, path)
        return imageList
def downloadImage(path, url) -> bool:
    if not (os.path.isfile(path)):
        r = getAPage(url, False, True)
        if (r == None or imghdr.what(None, r.content) == None):
            return False
        os.makedirs(os.path.dirname(path), exist_ok=True)
        with open(path, 'wb') as f:
            f.write(r.content)
        return (True)
    else:
        return (None)
Ejemplo n.º 4
0
 def __getInfoManga__(self, url, soup = None)-> Dict[str, str]:
     if (soup == None):
         r = getAPage(url)
         if (r == None):
             cprint("Problem Info Page", 'red', file=sys.stderr)
             return None
         soup = BeautifulSoup(r.text, features="html.parser")
     info = self.recupInfoManga(soup)
     info["frontImage"] = self.getMangaImage(soup)
     info["urlSite"] = self.url
     info["urlInfo"] = url
     return (info)
Ejemplo n.º 5
0
def test_getAPage():
    assert (getAPage("dab", True) == None)
    assert (getAPage("https://wuxiaworld.site/novel/the-legendary-mechanic/",
                     True) != None)
Ejemplo n.º 6
0
 def __getSoupFromNovel__(self, urlOneChapter:str)->BeautifulSoup:
     r = getAPage(urlOneChapter)
     if (r == None):
         return None
     soup = BeautifulSoup(r.text, features="html.parser")
     return (soup)