Example #1
0
 def search(self, query):
     search_url = self.base_url
     payload = {
         'page': 'search',
         'term': query,
         'sort': '2',
         'cats': '1_0',
         'filter': '0'
     }
     torrents = []
     response = requests.get(search_url,
                             params=payload,
                             headers=self.headers).text
     soup = bs(response, "lxml")
     table = soup.find('table', class_='tlist')
     for tr in table.find_all('tr')[1:]:
         t = Torrent()
         cols = tr.findAll('td')
         t.title = cols[1].find('a').text
         size = cols[3].text
         t.size = string_to_byte(size)
         t.seeds = cols[4].text
         t.torrent_url = cols[2].find('a').get('href') + "&magnet=1"
         torrents.append(t)
     return torrents
Example #2
0
 def _parse_page(self, page_text):
     torrents = []
     for result in page_text['torrents']:
         t = Torrent()
         t.title = result['torrent_title']
         t.seeds = result['seeds']
         t.size = result['size']
         t.torrent_url = result['magnet_uri']
         torrents.append(t)
     return torrents[:50]
Example #3
0
 def _parse_page(self, page_text):
     torrents = []
     for result in page_text['torrents']:
         t = Torrent()
         t.title = result['torrent_title']
         t.seeds = result['seeds']
         t.size = result['size']
         t.torrent_url = result['magnet_uri']
         torrents.append(t)
     return torrents[:50]
Example #4
0
 def _parse_page(self, page_text):
     torrents = []
     for result in page_text["torrents"]:
         t = Torrent()
         t.title = result["torrent_title"]
         t.seeds = result["seeds"]
         t.size = torrentutils.hsize(result["size"])
         t.torrent_url = result["magnet_uri"]
         torrents.append(t)
     return torrents[:50]
Example #5
0
 def _row_to_torrent(self, row):
     torrent = Torrent()
     torrent.name = row.find(class_='torrentname').find_all('a')[1].text
     torrent.magnet_link = row.find(name='a', class_='imagnet').get('href')
     torrent.torrent_link = row.find(name='a', class_='idownload').get('href')
     tds = row.find_all('td')
     torrent.seeders = int(tds[4].text)
     torrent.leechers = int(tds[5].text)
     torrent.size = 'UNKNOWN'  # TODO
     return torrent
Example #6
0
 def _row_to_torrent(self, row):
     torrent = Torrent()
     torrent.name = row.find(class_='torrentname').find_all('a')[1].text
     torrent.magnet_link = row.find(name='a', class_='imagnet').get('href')
     torrent.torrent_link = row.find(name='a',
                                     class_='idownload').get('href')
     tds = row.find_all('td')
     torrent.seeders = int(tds[4].text)
     torrent.leechers = int(tds[5].text)
     torrent.size = 'UNKNOWN'  # TODO
     return torrent
Example #7
0
 def _row_to_torrent(self, row):
     torrent = Torrent()
     torrent_name_tag = row.find(class_='detName')
     description = row.find(class_='detDesc').text
     torrent.name = torrent_name_tag.find(class_='detLink').text
     torrent.magnet_link = torrent_name_tag.find_next_sibling('a').get('href')
     tds = row.find_all('td')
     torrent.seeders = int(tds[2].text)
     torrent.leechers = int(tds[3].text)
     torrent.size = ' '.join(self.size_regex.search(description).groups())
     return torrent
Example #8
0
 def _row_to_torrent(self, row):
     torrent = Torrent()
     torrent_name_tag = row.find(class_='detName')
     description = row.find(class_='detDesc').text
     torrent.name = torrent_name_tag.find(class_='detLink').text
     torrent.magnet_link = torrent_name_tag.find_next_sibling('a').get(
         'href')
     tds = row.find_all('td')
     torrent.seeders = int(tds[2].text)
     torrent.leechers = int(tds[3].text)
     torrent.size = ' '.join(self.size_regex.search(description).groups())
     return torrent
Example #9
0
 def _parse_page(self, page_text):
     soup = BS(page_text, "lxml")
     torrents = []
     lines = soup.find_all(class_='ligne0') + soup.find_all(class_='ligne1')
     for line in lines:
         t = Torrent()
         t.title = line.find('a').text
         t.size = line.find(class_='poid').text
         t.seeds = int(line.find(class_='seed_ok').text)
         t.torrent_url = self._torrent_link(line.find('a').get('href'))
         torrents.append(t)
     return torrents
Example #10
0
 def _parse_page(self, page_text):
     soup = BS(page_text, "lxml")
     torrents = []
     lines = soup.find_all(class_='ligne0') + soup.find_all(class_='ligne1')
     for line in lines:
         t = Torrent()
         t.title = line.find('a').text
         t.size = line.find(class_='poid').text
         t.seeds = int(line.find(class_='seed_ok').text)
         t.torrent_url = self._torrent_link(line.find('a').get('href'))
         torrents.append(t)
     return torrents
Example #11
0
 def search(self, query):
     payload = {"q": query, "field": "seeder", "order": "desc", "page": "1"}
     search_url = self.base_url + "/json.php"
     data = requests.get(search_url, params=payload, headers=self.headers).json()
     torrents = []
     for movie in data["list"]:
         t = Torrent()
         t.title = movie["title"]
         t.seeds = int(movie["seeds"])
         t.size = int(movie["size"])
         t.torrent_url = movie["torrentLink"]
         torrents.append(t)
     return torrents
Example #12
0
 def search(self, query):
     payload = {'q': query, 'field': 'seeder', 'order': 'desc', 'page': '1'}
     search_url = self.base_url + '/json.php'
     data = requests.get(
         search_url, params=payload, headers=self.headers).json()
     torrents = []
     for movie in data['list']:
         t = Torrent()
         t.title = movie['title']
         t.seeds = int(movie['seeds'])
         t.size = int(movie['size'])
         t.torrent_url = movie['torrentLink']
         torrents.append(t)
     return torrents
Example #13
0
 def search(self, query):
     payload = {'q': query, 'field': 'seeder', 'order': 'desc', 'page': '1'}
     search_url = self.base_url + '/json.php'
     data = requests.get(
         search_url, params=payload, headers=self.headers).json()
     torrents = []
     for movie in data['list']:
         t = Torrent()
         t.title = movie['title']
         t.seeds = int(movie['seeds'])
         t.size = torrentutils.hsize(movie['size'])
         t.torrent_url = movie['torrentLink']
         torrents.append(t)
     return torrents
Example #14
0
 def get_top(self):
     search_url = self.base_url + '/movies'
     data = requests.get(search_url, headers=self.headers).text
     soup = BS(data, 'lxml')
     torrents = []
     table = soup.find(class_='data')
     for row in table.find_all('tr')[1:]:
         cells = row.find_all('td')
         t = Torrent()
         t.title = cells[0].find(class_='cellMainLink').text
         t.torrent_url = cells[0].find_all('a')[3].get('href')
         t.size = string_to_byte(cells[1].text)
         t.seeds = int(cells[4].text)
         torrents.append(t)
     return torrents
Example #15
0
 def get_top(self):
     search_url = self.base_url + '/movies'
     data = requests.get(search_url, headers=self.headers).text
     soup = BS(data, "lxml")
     torrents = []
     table = soup.find(class_="data")
     for row in table.find_all('tr')[1:]:
         cells = row.find_all('td')
         t = Torrent()
         t.title = cells[0].find(class_="cellMainLink").text
         t.torrent_url = cells[0].find_all("a")[3].get('href')
         t.size = cells[1].text
         t.seeds = int(cells[4].text)
         torrents.append(t)
     return torrents
Example #16
0
 def _parse_page(self, page_text):
     soup = BS(page_text, "lxml")
     torrents = []
     table = soup.find(id="searchResult")
     for row in table.find_all('tr')[1:30]:
         t = Torrent()
         cells = row.find_all('td')
         a = cells[1].find_all('a')
         t.title = a[0].text
         t.torrent_url = a[1]['href']
         t.seeds = int(cells[2].text)
         pattern = re.compile("Uploaded (.*), Size (.*), ULed by (.*)")
         match = pattern.match(cells[1].font.text)
         t.size = match.groups()[1].replace('xa0', ' ')
         torrents.append(t)
     return torrents
Example #17
0
 def _parse_page(self, page_text):
     soup = BS(page_text, "lxml")
     torrents = []
     table = soup.find(id="searchResult")
     for row in table.find_all('tr')[1:30]:
         t = Torrent()
         cells = row.find_all('td')
         a = cells[1].find_all('a')
         t.title = a[0].text
         t.torrent_url = a[1]['href']
         t.seeds = int(cells[2].text)
         pattern = re.compile("Uploaded (.*), Size (.*), ULed by (.*)")
         match = pattern.match(cells[1].font.text)
         t.size = match.groups()[1].replace('xa0', ' ')
         torrents.append(t)
     return torrents
Example #18
0
 def get_top(self):
     payload = {"sort": "date_added", "order": "desc", "set": "1", "limit": 20}
     search_url = self.base_url + "/api/v2/list_movies.json"
     try:
         response = requests.get(search_url, params=payload, headers=self.headers).json()
     except Exception:
         return
     torrents = []
     for movie in response["data"]["movies"]:
         for torrent in movie["torrents"]:
             t = Torrent()
             t.title = movie["title_long"] + " " + torrent["quality"]
             t.seeds = torrent["seeds"]
             t.size = torrent["size"]
             t.torrent_url = torrent["url"]
             torrents.append(t)
     return torrents
Example #19
0
 def search(self, query):
     search_url = self.base_url
     payload = {'page': 'search', 'term': query,
                'sort': '2', 'cats': '1_0', 'filter': '0'}
     torrents = []
     response = requests.get(
         search_url, params=payload, headers=self.headers).text
     soup = bs(response, "lxml")
     table = soup.find('table', class_='tlist')
     for tr in table.find_all('tr')[1:]:
         t = Torrent()
         cols = tr.findAll('td')
         t.title = cols[1].find('a').text
         t.size = cols[3].text
         t.seeds = cols[4].text
         t.torrent_url = cols[2].find('a').get('href')+"&magnet=1"
         torrents.append(t)
     return torrents
Example #20
0
 def get_top(self):
     payload = {
         'sort': 'date_added', 'order': 'desc', 'set': '1', 'limit': 20}
     search_url = self.base_url + '/api/v2/list_movies.json'
     try:
         response = requests.get(
             search_url, params=payload, headers=self.headers).json()
     except Exception as e:
         return
     torrents = []
     for movie in response['data']['movies']:
         for torrent in movie['torrents']:
             t = Torrent()
             t.title = movie['title_long']+" "+torrent['quality']
             t.seeds = torrent['seeds']
             t.size = torrent['size']
             t.torrent_url = torrent['url']
             torrents.append(t)
     return torrents
Example #21
0
 def _parse_page(self, page_text):
     soup = BS(page_text, "lxml")
     tabl = soup.find('table', class_='lista2t')
     torrents = []
     for tr in tabl.find_all('tr')[1:]:
         rows = tr.find_all('td')
         try:
             t = Torrent()
             t.title = rows[1].find('a').text
             rarbg_id = rows[1].find('a')['href'].strip('/torrent/')
             title = requests.utils.quote(t.title) + "-[rarbg.com].torrent"
             download_url = self.base_url + "/download.php?id=%s&f=%s" % (
                 rarbg_id, title)
             t.torrent_url = self._to_magnet(download_url)
             t.size = naturalsize(rows[3].text)
             t.seeds = int(rows[4].text)
             torrents.append(t)
         except bencode.BTL.BTFailure:
             pass
     return torrents
Example #22
0
 def _parse_page(self, page_text):
     soup = BS(page_text, "lxml")
     tabl = soup.find('table', class_='lista2t')
     torrents = []
     for tr in tabl.find_all('tr')[1:]:
         rows = tr.find_all('td')
         try:
             t = Torrent()
             t.title = rows[1].find('a').text
             rarbg_id = rows[1].find('a')['href'].strip('/torrent/')
             title = requests.utils.quote(t.title) + "-[rarbg.com].torrent"
             download_url = self.base_url + "/download.php?id=%s&f=%s" % (
                 rarbg_id, title)
             t.torrent_url = self._to_magnet(download_url)
             t.size = rows[3].text
             t.seeds = int(rows[4].text)
             torrents.append(t)
         except bencode.BTL.BTFailure:
             pass
     return torrents
Example #23
0
 def search(self, query):
     self._get_token()
     search_payload = {
         'sort': 'seeders',
         'category': 'movies',
         'mode': 'search',
         'app_id': 'xxx',
         'format': 'json_extended',
         'search_string': query,
         'token': self.token,
     }
     results = requests.get(self.base_url, params=search_payload).json()
     torrents = []
     for result in results['torrent_results']:
         t = Torrent()
         t.title = result['title']
         t.seeds = result['seeders']
         t.size = naturalsize(result['size'])
         t.torrent_url = result['download']
         torrents.append(t)
     return torrents
Example #24
0
    def search(self, query):
        self._get_token()
        search_payload = {
            'sort': 'seeders',
            'category': 'movies',
            'mode': 'search',
            'app_id': 'xxx',
            'format': 'json_extended',
            'search_string': queryt,
            'token': self.token,

        }
        results = requests.get(self.base_url, params=search_payload).json()
        torrents = []
        for result in results['torrent_results']:
            t = Torrent()
            t.title = result['title']
            t.seeds = result['seeders']
            t.size = hsize(result['size'])
            t.torrent_url = result['download']
            torrents.append(t)
        return torrents
Example #25
0
 def get_top(self):
     payload = {
         'sort': 'date_added',
         'order': 'desc',
         'set': '1',
         'limit': 20
     }
     search_url = self.base_url + '/api/v2/list_movies.json'
     try:
         response = requests.get(search_url,
                                 params=payload,
                                 headers=self.headers).json()
     except Exception:
         return
     torrents = []
     for movie in response['data']['movies']:
         for torrent in movie['torrents']:
             t = Torrent()
             t.title = movie['title_long'] + " " + torrent['quality']
             t.seeds = torrent['seeds']
             t.size = torrent['size']
             t.torrent_url = torrent['url']
             torrents.append(t)
     return torrents