def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: trs = bs.find('table', id='searchResult').findAll('tr')[1:] for tr in trs: a = tr.find(class_='detName').find('a') name = a.text link = '/%s' % '/'.join(a.attrs['href'].split('/')[3:]) tds = tr.findAll('td') magnet_url = tds[1].findAll('a')[1].attrs['href'] seeders = int(tds[2].text) leeches = int(tds[3].text) size = tr.find(class_='detDesc').text.split(',')[1] size = size.replace('Size ', '').replace('i', '') result.append( SearchResult( type(self), name, link, seeders, leeches, size, magnet_url ) ) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: trs = bs.find( class_='table-list table table-responsive table-striped' ).find('tbody').findAll('tr') for tr in trs: tds = tr.findAll('td') a = tds[0].findAll('a')[1] name = a.string link = a.attrs['href'] seeders = int(tds[1].string) leechers = int(tds[2].string) size = '%sB' % tds[4].text.split('B')[0] result.append( SearchResult( type(self), name, link, seeders, leechers, size ) ) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: # They're not making it easy. rows = bs.findAll('div', class_='col s12')[1].findAll('div')[1:] for r in rows: a = r.find('h5').find('a') name = a.attrs['title'] link = a.attrs['href'] magnet_url = self._encode_magnet(link.lstrip('/magnet/'), name) for s in r.findAll('span', class_='lightColor'): s.decompose() spans = r.findAll('span')[3:] size = spans[0].find('b').text seeders = spans[1].find('b').text leechers = spans[2].find('b').text result.append( SearchResult(self, name, link, seeders, leechers, size, magnet_url)) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: trs = bs.find(class_='ttable_headinner').findAll('tr')[1:] for tr in trs: c2 = tr.findAll(class_='ttable_col2') if len(c2) > 1: c1 = tr.findAll(class_='ttable_col1') a = c2[0].findAll('a')[1] name = a.attrs['title'] link = a.attrs['href'] magnet_url = c2[1].find('a').attrs['href'] size = c1[2].text seeders = int(c2[2].find('b').text.replace(',', '')) leechers = int(c1[3].find('b').text.replace(',', '')) result.append( SearchResult( type(self), name, link, seeders, leechers, size, magnet_url ) ) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: divs = bs.findAll('div', class_='media') for d in divs: mb = d.find('div', class_='media-body') a = d.find(class_='item-title').find('a') name = a.attrs['title'] link = a.attrs['href'].replace('//btdb.eu', '') info = mb.find('div', class_='item-meta-info').findAll('small') size = info[0].find('strong').text seeders = info[2].find('strong').text leechers = info[3].find('strong').text mr = d.find('div', class_='media-right') magnet_url = mr.find('a', class_='btn-success').attrs['href'] result.append( SearchResult(self, name, link, seeders, leechers, size, magnet_url)) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: trs = bs.find('tbody').findAll('tr') for tr in trs: tds = tr.findAll('td')[1:] # Remove link to comments that some listings have. # Most concise way I have found so far. for a in tds[0].findAll(class_='comments'): a.decompose() a = tds[0].find('a') name = a.attrs['title'] link = a.attrs['href'] magnet_url = tds[1].findAll('a')[1].attrs['href'] # Site uses binary prefixes. # Should calculate proper sizes at some point. size = tds[2].text.replace('i', '') seeders = tds[4].text leechers = tds[5].text result.append( SearchResult(self, name, link, seeders, leechers, size, magnet_url)) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: trs = bs.find( class_='table table-condensed table-torrents vmiddle'). \ findAll('tr')[1:] for tr in trs: name = tr.find(class_='text-trunc text-nowrap') a = name.find('a') name = a.text link = a.attrs['href'] magnet_url = tr.find(align='left').find('ul'). \ findAll('li')[1].find('a').attrs['href'] try: size = tr.find(class_='progress-bar prog-blue prog-l').text except Exception: size = '0B' seeders = self._get_from_cls(tr, self._s_cls) leechers = self._get_from_cls(tr, self._l_cls) result.append( SearchResult(self, name, link, seeders, leechers, size, magnet_url)) except Exception: pass return result
def _process_search(self, response): result = [] try: results = json.loads(response)['results'] for o in results: swarm = o['swarm'] result.append( SearchResult(self, o['title'], '/search?q=%s' % self._current_search, swarm['seeders'], swarm['leechers'], self._hr_size(o['size']), o['magnet'])) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: trs = bs.findAll('tr', class_='odd') trs.extend(bs.findAll('tr', class_='even')) for tr in trs: link = tr.find(class_='cellMainLink').attrs['href'] size = tr.find(class_='nobr center').text.replace('\n', '') seeders = tr.find(class_='green center').text leechers = tr.find(class_='red lasttd center').text name = tr.find(class_='cellMainLink').text.replace('\n', ''). \ strip() result.append( SearchResult(self, name, link, seeders, leechers, size)) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: t = bs.find(class_='table2') trs = t.findAll('tr')[1:] for tr in trs: a = tr.find(class_='tt-name').findAll('a')[1] name = a.string link = a.attrs['href'] size = tr.findAll(class_='tdnormal')[1].string seeders = tr.find(class_='tdseed').string.replace(',', '') leechers = tr.find(class_='tdleech').string.replace(',', '') result.append( SearchResult(self, name, link, seeders, leechers, size)) except Exception: pass return result
def _process_search(self, response): bs = BeautifulSoup(response, features='html.parser') result = [] try: rows = bs.findAll('div', class_='tgxtablerow') for r in rows: cells = r.findAll('div', class_='tgxtablecell')[3:] a = cells[0].find('a') name = a.attrs['title'] link = a.attrs['href'] magnet_url = cells[1].findAll('a')[1].attrs['href'] size = cells[4].find('span').text bs = cells[7].findAll('b') seeders = bs[0].text leechers = bs[1].text result.append( SearchResult(self, name, link, seeders, leechers, size, magnet_url)) except Exception: pass return result