def login(self): cookie_dict = dict_from_cookiejar(self.session.cookies) if cookie_dict.get('uid') and cookie_dict.get('pass'): return True if self.cookies: success, status = self.add_cookies_from_ui() if not success: logger.info(status) return False login_params = { 'username': self.username, 'password': self.password, 'login': '******' } if self.custom_url: if not validators.url(self.custom_url): logger.warning("Invalid custom url: {0}".format( self.custom_url)) return False # Get the index, redirects to login data = self.get_url(self.custom_url or self.url, returns='text') if not data: logger.warning("Unable to connect to provider") return False with BS4Parser(data, 'html5lib') as html: action = html.find('form', { 'action': re.compile(r'.*login.*') }).get('action') if not action: logger.warning( 'Could not find the login form. Try adding cookies instead' ) return False response = self.get_url(urljoin(self.custom_url or self.url, action), post_data=login_params, returns='text') if not response: logger.warning("Unable to connect to provider") return False # Invalid username and password combination if re.search('Invalid username and password combination', response): logger.warning("Invalid username or password. Check your settings") return False # You tried too often, please try again after 2 hours! if re.search('You tried too often', response): logger.warning( "You tried too often, please try again after 2 hours! Disable IPTorrents for at least 2 hours" ) return False # Captcha! if re.search('Captcha verification failed.', response): logger.warning("Stupid captcha") return False return True
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results # Search Params search_params = { "cat[]": [1, 4], # 1 for SERIE, 4 for ANIME # Both ASC and DESC are available for sort direction "SortOrder": "desc", "SortOn": "Created", } # Units units = ["O", "Ko", "Mo", "Go", "To", "Po"] for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in {*search_strings[mode]}: if mode != "RSS": logger.debug("Search string: {0}".format(search_string)) search_params["search"] = re.sub(r"[()]", "", search_string) data = self.get_url(self.urls["search"], params=search_params, returns="text") if not data: continue with BS4Parser(data, "html5lib") as html: torrent_table = html.find(class_="table-rows") torrent_rows = torrent_table("tr") if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.debug( "Data returned from provider does not contain any torrents" ) continue # Catégorie, Release, M, DL, Taille, S, L labels = [ label.get_text(strip=True) for label in torrent_rows[0]("th") ] # Skip column headers for result in torrent_rows[1:]: cells = result("td") if len(cells) < len(labels): continue try: title = cells[labels.index("Release")].get_text( strip=True) download_url = urljoin( self.url, cells[labels.index("DL")].find("a")["href"]) if not all([title, download_url]): continue seeders = try_int( cells[labels.index("S")].get_text(strip=True)) leechers = try_int( cells[labels.index("L")].get_text(strip=True)) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != "RSS": logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue size_index = labels.index( "Size") if "Size" in labels else labels.index( "Taille") torrent_size = cells[size_index].get_text() size = convert_size(torrent_size, units=units) or -1 item = { "title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": "" } if mode != "RSS": logger.debug( _("Found result: {title} with {seeders} seeders and {leechers} leechers" .format(title=title, seeders=seeders, leechers=leechers))) items.append(item) except Exception: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results # Search Params search_params = { "searchstr": "", "filter_cat[1]": 1, "filter_cat[2]": 1, "filter_cat[3]": 1, "filter_cat[4]": 1, "filter_cat[5]": 1 } # Units units = ["B", "KB", "MB", "GB", "TB", "PB"] def process_column_header(td): result = "" if td.a and td.a.img: result = td.a.img.get("title", td.a.get_text(strip=True)) if not result: result = td.get_text(strip=True) return result for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in {*search_strings[mode]}: if mode != "RSS": logger.debug("Search string: {0}".format(search_string)) search_params["searchstr"] = search_string search_url = self.urls["search"] data = self.get_url(search_url, params=search_params, returns="text") if not data: logger.debug("No data returned from provider") continue with BS4Parser(data, "html5lib") as html: torrent_table = html.find("table", id="torrent_table") torrent_rows = torrent_table("tr") if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.debug( "Data returned from provider does not contain any torrents" ) continue # "", "", "Name /Year", "Files", "Time", "Size", "Snatches", "Seeders", "Leechers" labels = [ process_column_header(label) for label in torrent_rows[0]("td") ] # Skip column headers for result in torrent_rows[1:]: cells = result("td") if len(cells) < len(labels): continue try: title = cells[labels.index("Name /Year")].find( "a", dir="ltr").get_text(strip=True) download_url = urljoin( self.url, cells[labels.index("Name /Year")].find( "a", title="Download")["href"]) if not all([title, download_url]): continue seeders = try_int( cells[labels.index("Seeders")].get_text( strip=True)) leechers = try_int( cells[labels.index("Leechers")].get_text( strip=True)) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != "RSS": logger.debug( "Discarding torrent because it doesn't meet the" " minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue torrent_size = cells[labels.index( "Size")].get_text(strip=True) size = convert_size(torrent_size, units=units) or -1 item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': '' } if mode != "RSS": logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_params, age=0, ep_obj=None): results = [] if not self.login(): return results freeleech = '&free=on' if self.freeleech else '' for mode in search_params: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in search_params[mode]: if mode != 'RSS': logger.debug( _("Search String: {search_string}".format( search_string=search_string))) # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile search_url = self.urls['search'] % (self.categories, freeleech, search_string) search_url += ';o=seeders' if mode != 'RSS' else '' if self.custom_url: if not validators.url(self.custom_url): logger.warning("Invalid custom url: {0}".format( self.custom_url)) return results search_url = urljoin(self.custom_url, search_url.split(self.url)[1]) data = self.get_url(search_url, returns='text') if not data: continue try: data = re.sub(r'(?im)<button.+?</button>', '', data, 0) with BS4Parser(data, 'html5lib') as html: if not html: logger.debug("No data returned from provider") continue if html.find(text='No Torrents Found!'): logger.debug( "Data returned from provider does not contain any torrents" ) continue torrent_table = html.find('table', id='torrents') torrents = torrent_table('tr') if torrent_table else [] # Continue only if one Release is found if not torrents or len(torrents) < 2: logger.debug( "Data returned from provider does not contain any torrents" ) continue for result in torrents[1:]: try: title = result('td')[1].find('a').text download_url = urljoin( search_url, result('td')[3].find('a')['href']) seeders = int( result.find('td', class_='ac t_seeders').text) leechers = int( result.find('td', class_='ac t_leechers').text) torrent_size = result('td')[5].text size = convert_size(torrent_size) or -1 except (AttributeError, TypeError, KeyError): continue if not all([title, download_url]): continue # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': '' } if mode != 'RSS': logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception as e: logger.exception( "Failed parsing provider. Error: {0!r}".format(str(e))) logger.exception(traceback.format_exc()) # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results for mode in search_strings: items = [] for search_string in {*search_strings[mode]}: if mode != 'RSS': logger.debug( _("Search String: {search_string}".format( search_string=search_string))) search_params = { 'searchtext': search_string, 'filter_freeleech': (0, 1)[self.freeleech is True], 'order_by': ('seeders', 'time')[mode == 'RSS'], "order_way": "desc" } if not search_string: del search_params['searchtext'] data = self.get_url(self.urls['search'], params=search_params, returns='text') if not data: logger.debug("No data returned from provider") continue try: with BS4Parser(data, 'html5lib') as html: torrent_table = html.find('table', {'id': 'torrent_table'}) if not torrent_table: logger.debug( "Data returned from {0} does not contain any torrents" .format(self.name)) continue labels = [ x.get_text(strip=True) or x.a.img.get('alt') for x in torrent_table.find( 'tr', class_='colhead').find_all('td') ] torrent_rows = torrent_table('tr', class_='torrent') # Continue only if one Release is found if not torrent_rows: logger.debug( "Data returned from {0} does not contain any torrents" .format(self.name)) continue for torrent_row in torrent_rows: freeleech = torrent_row.find( 'img', alt="Freeleech") is not None if self.freeleech and not freeleech: continue # Normal Download Link download_item = torrent_row.find( 'a', {'title': 'Download Torrent'}) if not download_item: # If the user has downloaded it download_item = torrent_row.find( 'a', { 'title': 'Previously Grabbed Torrent File' }) if not download_item: # If the user is seeding download_item = torrent_row.find( 'a', {'title': 'Currently Seeding Torrent'}) if not download_item: # If the user is leeching download_item = torrent_row.find( 'a', {'title': 'Currently Leeching Torrent'}) if not download_item: # If there are none continue download_url = urljoin(self.url, download_item['href']) temp_anchor = torrent_row.find( 'a', {"data-src": True}) title = temp_anchor['data-src'].rsplit('.', 1)[0] if not all([title, download_url]): continue cells = torrent_row('td') seeders = try_int( cells[labels.index('∧')].text.strip()) leechers = try_int( cells[labels.index('∨')].get_text(strip=True)) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug( "Discarding torrent because it doesn't meet the" " minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue size = temp_anchor['data-filesize'] or -1 item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': '' } if mode != 'RSS': logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception: logger.exception( "Failed parsing provider. Traceback: {0}".format( traceback.format_exc())) # For each search mode sort all the items by seeders items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] for mode in search_strings: items = [] logger.debug(_(f"Search Mode: {mode}")) for search_string in search_strings[mode]: if mode != "RSS": logger.debug(_(f"Search String: {search_string}")) search = slugify(search_string) search_url = urljoin(self.url, '{}/{}/'.format(search[0], search)) else: search_url = self.urls['rss'] if self.custom_url: if not validators.url(self.custom_url): logger.warning("Invalid custom url: {0}".format( self.custom_url)) return results search_url = urljoin(self.custom_url, search_url.split(self.url)[1]) data = self.get_url(search_url, returns="text") if not data: logger.debug( "URL did not return results/data, if the results are on the site maybe try a custom url, or a different one" ) continue with BS4Parser(data, "html5lib") as html: torrent_table = html.find("table", class_="download") torrent_body = torrent_table.find( 'tbody') if torrent_table else [] # noinspection PyCallingNonCallable torrent_rows = torrent_body("tr") if torrent_body else [] # Continue only if at least one Release is found if not torrent_rows: logger.debug( "Data returned from provider does not contain any torrents" ) continue labels = [ x.get_text(strip=True) for x in torrent_table.find('thead').find('tr')('th') ] # Skip column headers for result in torrent_rows[0:-1:2]: try: if len(result("td")) < len(labels): continue title = result.find("td", class_="n").find("a")['title'] magnet = result.find("td", class_="m").find("a")['href'] seeders = try_int( result.find("td", class_="s").get_text(strip=True)) leechers = try_int( result.find("td", class_="l").get_text(strip=True)) size = convert_size( result("td")[labels.index('Size')].get_text( strip=True) or '') or -1 if not all([title, magnet]): continue # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != "RSS": logger.debug( "Discarding torrent because it doesn't meet the" " minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue item = { 'title': title, 'link': magnet + self._custom_trackers, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': '' } if mode != "RSS": logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception as e: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: d.get('seeders', 0), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in {*search_strings[mode]}: if mode != "RSS": logger.debug( _("Search String: {search_string}".format( search_string=search_string))) search_url = (self.urls["search"], self.urls["rss"])[mode == "RSS"] if self.custom_url: if not validators.url(self.custom_url): logger.warning("Invalid custom url: {0}".format( self.custom_url)) return results search_url = urljoin(self.custom_url, search_url.split(self.url)[1]) if mode != "RSS": search_params = { "search": search_string, "sort": ("seeders", "created")[mode == "RSS"] } else: search_params = { "category": "show", "type": "video", "sort": "created" } data = self.get_url(search_url, params=search_params, returns="text") if not data: logger.debug( "Data returned from provider does not contain any torrents" ) continue with BS4Parser(data, "html5lib") as html: labels = [ label.get_text(strip=True) for label in html("th") ] for item in html("tr", attrs={"data-size": True}): try: size = try_int(item["data-size"]) cells = item.find_all("td") title_block_links = cells[labels.index( "Name")].find_all("a") title = title_block_links[0].get_text(strip=True) info_hash = title_block_links[0]["href"].split( "/")[1] download_url = title_block_links[2]["href"] seeders = try_int( cells[labels.index("Seeders")].get_text( strip=True)) leechers = try_int( cells[labels.index("Leechers")].get_text( strip=True)) if seeders < self.minseed or leechers < self.minleech: if mode != "RSS": logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue item = { "title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": info_hash } if mode != "RSS": logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except (AttributeError, TypeError, KeyError, ValueError): continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): self.login() results = [] for mode in search_strings: items = [] logger.debug(_(f'Search Mode: {mode}')) for search_string in search_strings[mode]: if mode != 'RSS': logger.debug(_(f'Search String: {search_string}')) # search string needs to be normalized, single quotes are apparently not allowed on the site # ç should also be replaced, people tend to use c instead replace_chars = { "'": '', "ç": 'c' } for k, v in replace_chars.items(): search_string = search_string.replace(k, v) logger.debug('Sanitized string: {0}'.format(search_string)) try: search_params = { 'category': '2145', 'sub_category': 'all', 'name': re.sub(r'[()]', '', search_string), 'do': 'search' } data = self.get_url(self.urls['search'], params=search_params, returns='text') if not data: continue if 'logout' not in data: logger.debug('Refreshing cookies') self.login() with BS4Parser(data, 'html5lib') as html: torrent_table = html.find(class_='table') torrent_rows = torrent_table('tr') if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.debug('Data returned from provider does not contain any torrents') continue # Skip column headers for result in torrent_rows[1:]: cells = result('td') if len(cells) < 9: continue title = cells[1].find('a').get_text(strip=True) id = cells[2].find('a')['target'] download_url = urljoin(self.url, 'engine/download_torrent?id=' + id) if not (title and download_url): continue seeders = try_int(cells[7].get_text(strip=True)) leechers = try_int(cells[8].get_text(strip=True)) torrent_size = cells[5].get_text() size = convert_size(torrent_size) or -1 # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format (title, seeders, leechers)) continue item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''} if mode != 'RSS': logger.debug(_(f'Found result: {title} with {seeders} seeders and {leechers} leechers')) items.append(item) except (AttributeError, TypeError, KeyError, ValueError): logger.exception('Failed parsing provider {}.'.format(self.name)) # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results # Search Params search_params = { 'do': 'search', 'include_dead_torrents': 'no', 'search_type': 't_name', 'category': 0, 'keywords': '' } # Units units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] def process_column_header(td): td_title = '' if td.img: td_title = td.img.get('title', td.get_text(strip=True)) if not td_title: td_title = td.get_text(strip=True) return td_title for mode in search_strings: items = [] logger.debug(_(f"Search Mode: {mode}")) for search_string in search_strings[mode]: if mode != 'RSS': logger.debug(_(f"Search String: {search_string}")) search_params['keywords'] = search_string data = self.get_url(self.urls['search'], params=search_params, returns='text') if not data: logger.debug("No data returned from provider") continue with BS4Parser(data, 'html5lib') as html: torrent_table = html.find('table', id='sortabletable') torrent_rows = torrent_table('tr') if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.debug("Data returned from provider does not contain any torrents") continue labels = [process_column_header(label) for label in torrent_rows[0]('td')] # Skip column headers for result in torrent_rows[1:]: try: title = result.find('div', class_='tooltip-target').get_text(strip=True) # skip if torrent has been nuked due to poor quality if title.startswith('Nuked.'): continue download_url = result.find( 'img', title='Click to Download this Torrent in SSL!').parent['href'] if not all([title, download_url]): continue cells = result('td') seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True)) leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True)) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug("Discarding torrent because it doesn't meet the" " minimum seeders or leechers: {0} (S:{1} L:{2})".format (title, seeders, leechers)) continue torrent_size = cells[labels.index('Size')].get_text(strip=True) size = convert_size(torrent_size, units=units) or -1 item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''} if mode != 'RSS': logger.debug("Found result: {0} with {1} seeders and {2} leechers".format (title, seeders, leechers)) items.append(item) except Exception: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results # Search Params search_params = { 'do': 'search', 'search_type': 't_name', 'category': 0, 'include_dead_torrents': 'no', 'submit': 'search' } # Units units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in search_strings[mode]: if mode == 'Season': search_string = re.sub(r'(.*)S0?', r'\1Series ', search_string) if mode != 'RSS': logger.debug( _("Search String: {search_string}".format( search_string=search_string))) search_params['keywords'] = search_string data = self.get_url(self.urls['search'], post_data=search_params, returns='text') if not data: logger.debug(_('No data returned from provider')) continue with BS4Parser(data, 'html5lib') as html: torrent_table = html.find(id='sortabletable') torrent_rows = torrent_table("tr") if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.debug( "Data returned from provider does not contain any torrents" ) continue labels = [ label.img['title'] if label.img else label.get_text( strip=True) for label in torrent_rows[0]('td') ] for torrent in torrent_rows[1:]: try: if self.freeleech and not torrent.find( 'img', alt=re.compile('Free Torrent')): continue title = torrent.find( class_='tooltip-content').div.get_text( strip=True) download_url = torrent.find( title='Click to Download this Torrent!' ).parent['href'] if not all([title, download_url]): continue seeders = try_int( torrent.find(title='Seeders').get_text( strip=True)) leechers = try_int( torrent.find(title='Leechers').get_text( strip=True)) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug( 'Discarding torrent because it doesn\'t meet the' ' minimum seeders or leechers: {0} (S:{1} L:{2})' .format(title, seeders, leechers)) continue # Chop off tracker/channel prefix or we cant parse the result! if mode != 'RSS' and search_params['keywords']: show_name_first_word = re.search( r'^[^ .]+', search_params['keywords']).group() if not title.startswith(show_name_first_word): title = re.sub( r'.*(' + show_name_first_word + '.*)', r'\1', title) # Change title from Series to Season, or we can't parse if mode == 'Season': title = re.sub(r'(.*)(?i)Series', r'\1Season', title) # Strip year from the end or we can't parse it! title = re.sub(r'(.*)[. ]?\(\d{4}\)', r'\1', title) title = re.sub(r'\s+', r' ', title) torrent_size = torrent('td')[labels.index( 'Size')].get_text(strip=True) size = convert_size(torrent_size, units=units) or -1 if mode != 'RSS': logger.debug( _('Found result: {title} with {seeders} seeders and {leechers} leechers' .format(title=title, seeders=seeders, leechers=leechers))) item = { 'title': title + '.hdtv.x264', 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers } items.append(item) except Exception: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results freeleech = "2" if self.freeleech else "0" # Search Params # c59=1&c73=1&c5=1&c41=1&c60=1&c66=1&c65=1&c67=1&c62=1&c64=1&c61=1&search=Good+Behavior+S01E01&cat=0&incldead=0&freeleech=0&lang=0 search_params = { "c5": "1", # Category: Series - DVDRip "c41": "1", # Category: Series - HD "c60": "1", # Category: Series - Pack TV "c62": "1", # Category: Series - BDRip "c64": "1", # Category: Series - VOSTFR "c65": "1", # Category: Series - TV 720p "c66": "1", # Category: Series - TV 1080p "c67": "1", # Category: Series - Pack TV HD "c73": "1", # Category: Anime "incldead": "0", # Include dead torrent - 0: off 1: yes 2: only dead "freeleech": freeleech, # Only freeleech torrent - 0: off 1: no freeleech 2: Only freeleech "lang": "0", # Langugage - 0: off 1: English 2: French .... } # Units units = ["B", "KB", "MB", "GB", "TB", "PB"] for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in {*search_strings[mode]}: logger.debug("Search String: {0} for mode {1}".format( search_strings[mode], mode)) if mode != "RSS": logger.debug("Search string: {0}".format(search_string)) search_params["search"] = re.sub(r"[()]", "", search_string) data = self.get_url(self.urls["search"], params=search_params, returns="text") if not data: continue with BS4Parser(data, "html5lib") as html: torrent_table = html.find(class_="ttable_headinner") torrent_rows = torrent_table("tr") if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.debug( "Data returned from provider does not contain any torrents" ) continue # Catégorie, Release, Date, DL, Size, C, S, L labels = [ label.get_text(strip=True) for label in torrent_rows[0]("th") ] # Skip column headers for result in torrent_rows[1:]: cells = result("td") if len(cells) < len(labels): continue try: id = re.search( "id=([0-9]+)", cells[labels.index("Nom")].find( "a")["href"]).group(1) title = cells[labels.index("Nom")].get_text( strip=True) download_url = urljoin( self.urls["download"], "?id={0}&name={1}".format(id, title)) if not all([title, download_url]): continue seeders = try_int( cells[labels.index("S")].get_text(strip=True)) leechers = try_int( cells[labels.index("L")].get_text(strip=True)) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != "RSS": logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue size_index = labels.index( "Size") if "Size" in labels else labels.index( "Taille") torrent_size = cells[size_index].get_text() size = convert_size(torrent_size, units=units) or -1 item = { "title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": "" } if mode != "RSS": logger.debug( _("Found result: {title} with {seeders} seeders and {leechers} leechers" .format(title=title, seeders=seeders, leechers=leechers))) items.append(item) except Exception: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in {*search_strings[mode]}: if mode == 'Season': search_string = re.sub(r'(.*)S0?', r'\1Saison ', search_string) if mode != 'RSS': logger.debug( _("Search String: {search_string}".format( search_string=search_string))) search_url = self.url post_data = {'torrentSearch': search_string} else: search_url = self.url + '/torrents_series.html' post_data = None data = self.get_url(search_url, post_data, returns='text') if not data: continue with BS4Parser(data, 'html5lib') as html: torrent_table = html.find('div', {'class': 'table-responsive'}) if torrent_table: torrent_rows = torrent_table.findAll('tr') else: torrent_rows = None if not torrent_rows: continue for result in torrent_rows: try: title = result.find('a').get_text( strip=False).replace("HDTV", "HDTV x264-Torrent9") title = re.sub(r' Saison', ' Season', title, flags=re.I) tmp = result.find("a")['href'] download_url = urljoin( self.url, self._retrieve_dllink_from_url( urljoin(self.url, tmp))) if not all([title, download_url]): continue seeders = try_int( result.find(class_="seed_ok").get_text( strip=True)) leechers = try_int( result.find_all('td')[3].get_text(strip=True)) if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue torrent_size = result.find_all('td')[1].get_text( strip=True) units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po'] size = convert_size(torrent_size, units=units) or -1 item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': '' } if mode != 'RSS': logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): """ Searches indexer using the params in search_strings, either for latest releases, or a string/id search Returns: list of results in dict form """ results = [] if not self._check_auth(): return results if 'gingadaddy' not in self.url: # gingadaddy has no caps. if not self.caps: self.get_newznab_categories(just_caps=True) if not self.caps: return results for mode in search_strings: search_params = { 't': ('search', 'tvsearch')[bool(self.use_tv_search)], 'limit': 100, 'offset': 0, 'cat': self.catIDs.strip(', ') or '5030,5040', 'maxage': settings.USENET_RETENTION } if self.needs_auth and self.key: search_params['apikey'] = self.key if mode != 'RSS': if self.use_tv_search: if 'tvdbid' in str(self.cap_tv_search): search_params['tvdbid'] = ep_obj.show.indexerid if ep_obj.show.air_by_date or ep_obj.show.sports: # date_str = str(ep_obj.airdate) # search_params['season'] = date_str.partition('-')[0] # search_params['ep'] = date_str.partition('-')[2].replace('-', '/') search_params['q'] = str(ep_obj.airdate) elif ep_obj.show.is_anime: search_params['ep'] = ep_obj.absolute_number else: search_params['season'] = ep_obj.scene_season search_params['ep'] = ep_obj.scene_episode if mode == 'Season': search_params.pop('ep', '') if self.torznab: search_params.pop('ep', '') search_params.pop('season', '') items = [] logger.debug('Search Mode: {0}'.format(mode)) for search_string in search_strings[mode]: if mode != 'RSS': logger.debug( _("Search String: {search_string}".format( search_string=search_string))) if 'tvdbid' not in search_params: search_params['q'] = search_string time.sleep(cpu_presets[settings.CPU_PRESET]) data = self.get_url(urljoin(self.url, 'api'), params=search_params, returns='text') if not data: break with BS4Parser(data, 'html5lib') as html: if not self._check_auth_from_data(html): break # try: # self.torznab = 'xmlns:torznab' in html.rss.attrs # except AttributeError: # self.torznab = False for item in html('item'): try: title = item.title.get_text(strip=True) download_url = None if item.link: if validators.url( item.link.get_text(strip=True)): download_url = item.link.get_text( strip=True) elif validators.url(item.link.next.strip()): download_url = item.link.next.strip() if (not download_url, item.enclosure and validators.url( item.enclosure.get('url', '').strip())): download_url = item.enclosure.get('url', '').strip() if not (title and download_url): continue seeders = leechers = None if 'gingadaddy' in self.url: size_regex = re.search(r'\d*.?\d* [KMGT]B', str(item.description)) item_size = size_regex.group( ) if size_regex else -1 else: item_size = item.size.get_text( strip=True) if item.size else -1 for attr in item.find_all( ['newznab:attr', 'torznab:attr']): item_size = attr['value'] if attr[ 'name'] == 'size' else item_size seeders = try_int( attr['value'] ) if attr['name'] == 'seeders' else seeders leechers = try_int( attr['value'] ) if attr['name'] == 'peers' else leechers if not item_size or (self.torznab and (seeders is None or leechers is None)): continue size = convert_size(item_size) or -1 result = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers } items.append(result) except Exception: continue # Since we aren't using the search string, # break out of the search string loop if 'tvdbid' in search_params: break if self.torznab: results.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results # https://www.thegft.org/browse.php?view=0&c26=1&c37=1&c19=1&c47=1&c17=1&c4=1&search=arrow # Search Params search_params = { 'view': 0, # BROWSE 'c4': 1, # TV/XVID 'c17': 1, # TV/X264 'c19': 1, # TV/DVDRIP 'c26': 1, # TV/BLURAY 'c37': 1, # TV/DVDR 'c47': 1, # TV/SD 'search': '', } # Units units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] def process_column_header(td): result = '' if td.a and td.a.img: result = td.a.img.get('title', td.a.get_text(strip=True)) if not result: result = td.get_text(strip=True) return result for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in search_strings[mode]: if mode != 'RSS': logger.debug("Search string: {0}".format(search_string)) search_params['search'] = search_string data = self.get_url(self.urls['search'], params=search_params, returns='text') if not data: logger.debug("No data returned from provider") continue with BS4Parser(data, 'html5lib') as html: torrent_table = html.find('div', id='torrentBrowse') torrent_rows = torrent_table('tr') if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.debug( "Data returned from provider does not contain any torrents" ) continue labels = [ process_column_header(label) for label in torrent_rows[0]('td') ] # Skip column headers for result in torrent_rows[1:]: try: cells = result('td') title = cells[labels.index('Name')].find( 'a').find_next('a')['title'] or cells[ labels.index('Name')].find('a')['title'] download_url = self.url + cells[labels.index( 'DL')].find('a')['href'] if not all([title, download_url]): continue peers = cells[labels.index('S/L')].get_text( strip=True).split('/', 1) seeders = try_int(peers[0]) leechers = try_int(peers[1]) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug( "Discarding torrent because it doesn't meet the" " minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue torrent_size = cells[labels.index( 'Size/Snatched')].get_text(strip=True).split( '/', 1)[0] size = convert_size(torrent_size, units=units) or -1 item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': '' } if mode != 'RSS': logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in {*search_strings[mode]}: if mode != 'RSS': logger.debug("Search string: {0}".format(search_string)) self.search_params['search'] = search_string data = self.get_url(self.urls['search'], params=self.search_params, returns='text') if not data: continue # Checks if cookie has timed-out causing search to redirect to login page. # If text matches on loginpage we login and generate a new cookie and load the search data again. if re.search('Still need help logging in?', data): logger.debug( "Login has timed out. Need to generate new cookie for GimmePeers and search again." ) self.session.cookies.clear() self.login() data = self.get_url(self.urls['search'], params=self.search_params, returns='text') if not data: continue try: with BS4Parser(data, "html.parser") as html: torrent_table = html.find('table', class_='browsetable') torrent_rows = torrent_table( 'tr') if torrent_table else [] # Continue only if one Release is found if len(torrent_rows) < 2: logger.debug( "Data returned from provider does not contain any torrents" ) continue for result in torrent_rows[1:]: cells = result('td') link = cells[1].find('a') download_url = self.urls['download'] % cells[ 2].find('a')['href'] try: title = link.getText() seeders = int(cells[10].getText().replace( ',', '')) leechers = int(cells[11].getText().replace( ',', '')) torrent_size = cells[8].getText() size = convert_size(torrent_size) or -1 except (AttributeError, TypeError): continue if not all([title, download_url]): continue # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue if seeders >= 32768 or leechers >= 32768: continue item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': '' } if mode != 'RSS': logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception: logger.warning( "Failed parsing provider. Traceback: {0}".format( traceback.format_exc())) # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results
def search(self, search_params, age=0, ep_obj=None): results = [] if not self.login(): return results for mode in search_params: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in search_params[mode]: if mode != "RSS": logger.debug( _("Search String: {search_string}".format( search_string=search_string))) search_url = self.urls["search"] % (quote(search_string), self.categories) data = self.get_url(search_url, returns="text") if not data: continue try: with BS4Parser(data, "html5lib") as html: # Continue only if one Release is found empty = html.find( "h2", text="No .torrents fit this filter criteria") if empty: logger.debug( "Data returned from provider does not contain any torrents" ) continue torrent_table = html.find( "table", style="border: none; width: 100%;") if not torrent_table: logger.exception( "Could not find table of torrents") continue torrent_rows = torrent_table("tr", class_="browse") for result in torrent_rows: cells = result("td") size = None link = cells[1].find( "a", style="font-size: 1.25em; font-weight: bold;") torrent_id = link["href"].replace( "details.php?id=", "") try: if link.get("title", ""): title = link["title"] else: title = link.contents[0] download_url = self.urls["download"] % ( torrent_id, link.contents[0]) seeders = int(cells[9].contents[0]) leechers = int(cells[10].contents[0]) # Need size for failed downloads handling if size is None: torrent_size = cells[7].text size = convert_size(torrent_size) or -1 except (AttributeError, TypeError): continue if not all([title, download_url]): continue # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != "RSS": logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue item = { "title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": "" } if mode != "RSS": logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception: logger.exception( "Failed parsing provider. Traceback: {0}".format( traceback.format_exc())) # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True) results += items return results
def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results for mode in search_strings: items = [] logger.debug(_("Search Mode: {mode}".format(mode=mode))) for search_string in {*search_strings[mode]}: if mode != "RSS": logger.debug( _("Search String: {search_string}".format( search_string=search_string))) self.search_params["searchstr"] = search_string data = self.get_url(self.urls["search"], params=self.search_params, returns="text") if not data: logger.debug("URL did not return data") continue strTableStart = '<table class="torrent_table' startTableIndex = data.find(strTableStart) trimmedData = data[startTableIndex:] if not trimmedData: continue try: with BS4Parser(trimmedData, "html5lib") as html: result_table = html.find("table", {"id": "torrent_table"}) if not result_table: logger.debug( "Data returned from provider does not contain any torrents" ) continue result_tbody = result_table.find("tbody") entries = result_tbody.contents del entries[1::2] for result in entries[1:]: torrent = result("td") if len(torrent) <= 1: break allAs = (torrent[1])("a") try: notinternal = result.find( "img", src="/static//common/user_upload.png") if self.ranked and notinternal: logger.debug( "Found a user uploaded release, Ignoring it.." ) continue freeleech = result.find( "img", src="/static//common/browse/freeleech.png") if self.freeleech and not freeleech: continue title = allAs[2].string download_url = self.urls["base_url"] + allAs[ 0].attrs["href"] torrent_size = result.find( "td", class_="nobr").find_next_sibling( "td").string if torrent_size: size = convert_size(torrent_size) or -1 seeders = try_int( (result("td")[6]).text.replace(",", "")) leechers = try_int( (result("td")[7]).text.replace(",", "")) except (AttributeError, TypeError): continue if not title or not download_url: continue # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != "RSS": logger.debug( "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers)) continue item = { "title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": "" } if mode != "RSS": logger.debug( "Found result: {0} with {1} seeders and {2} leechers" .format(title, seeders, leechers)) items.append(item) except Exception: logger.exception( "Failed parsing provider. Traceback: {0}".format( traceback.format_exc())) # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True) results += items return results