def search(q, cat): url = 'https://torrentapi.org/pubapi_v2.php' token = gettoken() sleep(2) payload = { 'mode': 'search', 'search_string': q.replace(' ', '+'), 'category': cat, 'limit': '100', 'sort': 'seeders', 'min_seeders': '1', 'format': 'json_extended', 'token': token } data = requests.get(url, params=payload) j = json.loads(data.text) torrents = [] for tor in j['torrent_results']: name = tor['title'] if filtor(name, None, q): torrents.append({ 'name': name, 'size': bytesize(tor['size']), 'seeds': tor['seeders'], 'peers': tor['leechers'], 'magnet': tor['download'], 'hash': tor['download'][20:60] }) return torrents
def search(q, cat): # not using &fmt=rss because it ignores categories and sorting params xpaths = { 'names': '//a[@class=" small"]', 'sizes': '//td[@class="smaller"]//text()', 'seeds': '//div[@title]/div[1]/text()', 'peers': '//div[@title]/div[2]/text()', 'mags': '//a[@title="Magnet link"]/@href', } urls = [ f'https://zooqle.com/search?pg={pg}&q={q}+category%3A{cat}' '&v=t&s=ns&sd=d' for pg in range(1, 4) ] pages = [html.fromstring(requests.get(url).content) for url in urls] names = [ name.text_content() for page in pages for name in page.xpath(xpaths['names']) ] sizes = [size for page in pages for size in page.xpath(xpaths['sizes'])] seeds = [seed for page in pages for seed in page.xpath(xpaths['seeds'])] peers = [peer for page in pages for peer in page.xpath(xpaths['peers'])] mags = [mag for page in pages for mag in page.xpath(xpaths['mags'])] torrents = [] results = zip(names, sizes, seeds, peers, mags) for name, size, seed, peer, mag in results: if filtor(name, seed, q, True, True, True): torrents.append({ 'name': name, 'size': size, 'seeds': seed, 'peers': peer, 'magnet': mag, 'hash': mag[20:60] }) return torrents
def search(q, cat): xpaths = { 'names': '//table[2]//a[2]/@href', 'sizes': '//td[@class="tdnormal"][2]/text()', 'seeds': '//table[2]//td[@class="tdseed"]/text()', 'peers': '//table[2]//td[@class="tdleech"]/text()', 'links': '//a[@class="csprite_dl14"]/@href' } urls = [f'https://www.limetorrents.cc/search/{cat}/{q}/seeds/{pg}/' for pg in range(1, 6)] pages = [html.fromstring(requests.get(url).content) for url in urls] names = [name[1:-21].replace('-', ' ') for page in pages for name in page.xpath(xpaths['names'])] sizes = [size for page in pages for size in page.xpath(xpaths['sizes'])] seeds = [seed.replace(',', '') for page in pages for seed in page.xpath(xpaths['seeds'])] peers = [peer for page in pages for peer in page.xpath(xpaths['peers'])] links = [link for page in pages for link in page.xpath(xpaths['links'])] torrents = [] results = zip(names, sizes, seeds, peers, links) for name, size, seed, peer, link in results: if filtor(name, seed, q, True, True, True): torrents.append({ 'name': name, 'size': size, 'seeds': seed, 'peers': peer, 'download': link, 'hash': link[29:69] }) return torrents
def search(q, cat): xpaths = { 'names': '//a[@class="detLink"]/text()', 'sizes': '//font[@class="detDesc"]/text()', 'seeds': '//td[@align="right"][1]/text()', 'peers': '//td[@align="right"][2]/text()', 'mags': '//a[contains(@href, "magnet")]/@href', 'cats': '//td[@class="vertTh"]//a[2]/text()', } urls = [ f'https://thepiratebay.org/search/{q}/{pg}/7/200/' for pg in range(5) ] pages = [html.fromstring(requests.get(url).content) for url in urls] names = [name for page in pages for name in page.xpath(xpaths['names'])] sizes = [ size.split(',')[1][6:] for page in pages for size in page.xpath(xpaths['sizes']) ] seeds = [seed for page in pages for seed in page.xpath(xpaths['seeds'])] peers = [peer for page in pages for peer in page.xpath(xpaths['peers'])] mags = [mag for page in pages for mag in page.xpath(xpaths['mags'])] cats = [cat for page in pages for cat in page.xpath(xpaths['cats'])] torrents = [] results = zip(names, sizes, seeds, peers, mags, cats) for name, size, seed, peer, mag, tcat in results: if cat in tcat.lower() and filtor(name, seed, q): torrents.append({ 'name': name, 'size': size, 'seeds': seed, 'peers': peer, 'magnet': mag, 'hash': mag[20:60] }) return torrents
def search(q): xpaths = { 'names': '//tbody//a[@title]/text()', 'sizes': '//tbody//td[2]/text()', 'seeds': '//tbody//td[last()-1]/text()', 'peers': '//tbody//td[last()]/text()', 'mags': '//tbody//a[1]/@href' } urls = [ f'https://www.skytorrents.in/search/all/ed/{pg}/{q}' for pg in range(1, 4) ] pages = [html.fromstring(requests.get(url).content) for url in urls] names = [name for page in pages for name in page.xpath(xpaths['names'])] sizes = [size for page in pages for size in page.xpath(xpaths['sizes'])] seeds = [seed for page in pages for seed in page.xpath(xpaths['seeds'])] peers = [peer for page in pages for peer in page.xpath(xpaths['peers'])] mags = [mag for page in pages for mag in page.xpath(xpaths['mags'])] torrents = [] results = zip(names, sizes, seeds, peers, mags) for name, size, seed, peer, mag in results: if skyfiltor(name, q) and filtor(name, seed, None, True, True, True): torrents.append({ 'name': name, 'size': size, 'seeds': seed, 'peers': peer, 'magnet': mag, 'hash': mag[20:60] }) return torrents
def search(q): # Torrent = namedtuple('Torrent', ['name', 'size', 'seed', 'mag']) url = f'https://eztv.ag/search/{q}' data = requests.get(url) tree = html.fromstring(data.content) results = zip( tree.xpath('//tr[@name="hover"]//a[@class="epinfo"]/@title'), tree.xpath('//tr[@name="hover"]/td[4]/text()'), tree.xpath('//tr[@name="hover"]/td[6]//text()'), tree.xpath('//tr[@name="hover"]/td[3]/a[1]/@href'), tree.xpath('//tr[@name="hover"]/td[1]/a/@title') ) torrents = [] for name, size, seed, mag, show in results: if mag.startswith('magnet') and 'Other' not in show: name = name.replace(f' ({size})', '') seed = seed.replace(',', '') if filtor(name, seed, q): torrents.append({ 'name': name, 'size': size, 'seeds': seed, 'peers': '-', 'magnet': mag, 'hash': mag[20:60] }) return torrents