def get_chapters(self, manga_url: str) -> 'List[Chapter]': soup = BeautifulSoup(self.get_str(manga_url), 'html.parser') chapters = [ Chapter('', get_text(a), a['href']) for a in soup.find('ul', class_='chapters').find_all('a') ] chapters.reverse() return chapters
def get_chapters(self, manga_url: str) -> 'List[Chapter]': soup = BeautifulSoup(self.get_str(manga_url), 'html.parser') chapters = [ Chapter('', a['title'], a['href']) for a in soup.find('section', id='examples').find_all('a') ] chapters.reverse() return chapters
def get_chapters(self, manga_url: str) -> 'List[Chapter]': soup = BeautifulSoup(self.get_str(manga_url), 'html.parser') chapters = [ Chapter('', get_text(a), self.site_url + a['href']) for a in soup.find('table', id='listing').find_all('a') ] # don't need to use `chapters.reverse()` here return chapters
def _download_chapter(self, chapter: Chapter) -> None: """Downloads the given chapter only if it is not downloaded already, if the chapter was previously interrupted the download is resumed. :param chapter: the chapter to download. """ if chapter.current == len(chapter.images): self.logger.info( _("Skipping chapter '{}': Already downloaded.").format( chapter)) return elif chapter.current == -1: self.logger.info( _("Getting images list for chapter '{}' ...").format(chapter)) chapter.images = self.get_images(chapter.url) chapter.current = 0 chapter.save_data() img_count = len(chapter.images) dcount = len(str(img_count)) for url in chapter.images[chapter.current:]: current = chapter.current + 1 print('\r' + _("[{}] Downloading '{}' (image: {}/{})").format( self.name, chapter, current, img_count), end='') name = os.path.join(chapter.path, str(current).zfill(dcount)) self.download_img(self.get_image(url), name) chapter.current = current chapter.save_data() if img_count > 0: print()
def get_chapters(self, manga_url: str) -> 'List[Chapter]': soup = BeautifulSoup(self.get_str(manga_url), 'html.parser') ulist = soup.find('div', class_='detail_list').ul chapters = [ Chapter('', get_text(a), 'http:' + a['href']) for a in ulist.find_all('a') ] chapters.reverse() return chapters
def get_chapters(self, manga_url: str) -> 'List[Chapter]': soup = BeautifulSoup(self.get_str(manga_url), 'html.parser') div = soup.find('div', class_='chapter-list') chapters = [] for anchor in div.find_all('a'): if anchor['href'].startswith('/'): anchor['href'] = 'https:' + anchor['href'] chapters.append(Chapter('', get_text(anchor), anchor['href'])) chapters.reverse() return chapters
def get_chapters(self, manga_url: str) -> 'List[Chapter]': soup = BeautifulSoup(self.get_str(manga_url), 'html.parser') tag = soup.find('div', class_='warning') if tag: soup = BeautifulSoup(self.get_str(tag.a['href']), 'html.parser') tag = soup.find('div', class_='silde') chapters = [ Chapter('', a['title'], a['href']) for a in tag.find_all('a', class_='chapter_list_a') ] chapters.reverse() return chapters