Beispiel #1
0
 def get_images(self, chapter_url: str) -> 'List[str]':
     soup = BeautifulSoup(self.get_str(chapter_url), 'html.parser')
     opts = soup.find('select', class_='wid60').find_all('option')
     return [
         'http:' + opt['value'] for opt in opts
         if get_text(opt) != 'Featured'
     ]
Beispiel #2
0
 def get_chapters(self, manga_url: str) -> 'List[Chapter]':
     soup = BeautifulSoup(self.get_str(manga_url), 'html.parser')
     chapters = [
         Chapter('', get_text(a), a['href'])
         for a in soup.find('ul', class_='chapters').find_all('a')
     ]
     chapters.reverse()
     return chapters
Beispiel #3
0
 def get_chapters(self, manga_url: str) -> 'List[Chapter]':
     soup = BeautifulSoup(self.get_str(manga_url), 'html.parser')
     chapters = [
         Chapter('', get_text(a), self.site_url + a['href'])
         for a in soup.find('table', id='listing').find_all('a')
     ]
     # don't need to use `chapters.reverse()` here
     return chapters
Beispiel #4
0
 def get_chapters(self, manga_url: str) -> 'List[Chapter]':
     soup = BeautifulSoup(self.get_str(manga_url), 'html.parser')
     ulist = soup.find('div', class_='detail_list').ul
     chapters = [
         Chapter('', get_text(a), 'http:' + a['href'])
         for a in ulist.find_all('a')
     ]
     chapters.reverse()
     return chapters
Beispiel #5
0
 def get_chapters(self, manga_url: str) -> 'List[Chapter]':
     soup = BeautifulSoup(self.get_str(manga_url), 'html.parser')
     div = soup.find('div', class_='chapter-list')
     chapters = []
     for anchor in div.find_all('a'):
         if anchor['href'].startswith('/'):
             anchor['href'] = 'https:' + anchor['href']
         chapters.append(Chapter('', get_text(anchor), anchor['href']))
     chapters.reverse()
     return chapters
Beispiel #6
0
 def search(self, manga: str) -> 'List[Manga]':
     # TODO: find a better way to do this:
     url = '{}/buscar/{}.html'.format(self.site_url, quote_plus(manga))
     # page restriction: len(manga) must to be >= 4
     soup = BeautifulSoup(self.get_str(url), 'html.parser')
     divs = soup.find_all('div', class_='cont_manga')
     return [
         Manga('', get_text(div.a.header), div.a['href'], self.name)
         for div in divs
     ]
Beispiel #7
0
 def search(self, manga: str) -> 'List[Manga]':
     url = self.site_url + "/search/"
     soup = BeautifulSoup(self.get_str(url, {'wd': manga}), 'html.parser')
     direlist = soup.find('ul', class_='direlist')
     results = [
         Manga('', get_text(a), a['href'], self.name)
         for a in direlist.find_all('a', class_='bookname')
     ]  # type: List[Manga]
     pagelist = soup.find('ul', class_='pagelist')
     if pagelist:
         # this get only first few pages:
         for page in pagelist.find_all('a')[1:-1]:
             soup = BeautifulSoup(self.get_str(page['href']), 'html.parser')
             direlist = soup.find('ul', class_='direlist')
             anchors = direlist.find_all('a', class_='bookname')
             results.extend(
                 Manga('', get_text(a), a['href'], self.name)
                 for a in anchors)
     return results
Beispiel #8
0
 def search(self, manga: str) -> 'List[Manga]':
     query_str = ''
     for char in manga:
         if char.isalnum():
             query_str += char
         else:
             query_str += ' '
     query_str = '_'.join(query_str.split())
     req_data = {'search_style': 'tentruyen', 'searchword': query_str}
     url = self.site_url + '/home_json_search/'
     res_data = self.get_json(url, req_data,
                              method='POST')  # type: List[dict]
     return [
         Manga('', get_text(BeautifulSoup(result['name'], 'html.parser')),
               self.site_url + '/manga/' + result['nameunsigned'],
               self.name) for result in res_data
     ]