def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) table = common.table(data, 2) size = [table[x][3] for x in range(len(table))] # list the size lname = [common.parse_tag(table[x][1])[1][1] for x in range(len(table))] # list the size # seeds = re.findall('green center">(.*?)<', data) # list the seeds # peers = re.findall('red lasttd center">(.*?)<', data) # list the peers cont = 0 results = [] for cm, page in enumerate(re.findall('/torrent/(.*?)"', data)): torrent = "http://itorrents.org/torrent//torrent/" + page name = size[cm + 1] + ' - ' + lname[cm + 1] + ' - ' + settings.name_provider if filters.verify(name, size[cm + 1]): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) data = data[data.find('<div id="sz">') :] page = re.findall('a href="(.*?)"', data) # list the size cont = 0 results = [] if len(page) > 0: for cm, size in enumerate(re.findall('valign="middle" nowrap="nowrap">(.*?)<', data)): torrent = common.getlinks(settings.url + page[cm]) name = page[cm].replace("-", " ").replace("/", "").replace(".html", "") name = size + " - " + name + " - " + settings.name_provider if filters.verify(name, size): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<") return results except: provider.log.error(">>>>>>>ERROR parsing data<<<<<<<") provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: cont = 0 for cm, (ntorrent, name) in enumerate(re.findall('/torrent/(.*?)/(.*?)/', data)): torrent = '%s/download.php?id=%s' % (settings.url, ntorrent) name = name.replace('-', ' ').title( ) + ' - ' + settings.name_provider #find name in the torrent if filters.verify(name, None): yield { "name": name, "uri": torrent, "seeds": 10000, "peers": 5000 } # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) table = common.table(data, 1) cont = 0 results = [] for cm, (page, name) in enumerate(re.findall('/torrent/(.*?)">(.*?)</a>', data, re.S)): size = table[cm][5] page = settings.url + "/torrent/" + page torrent = common.getlinks(page) name = size + ' - ' + name.replace('\n', '') + ' - ' + settings.name_provider if filters.verify(name, size): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) size = re.findall('class="nobr center">(.*?)B', data) # list the size #seeds = re.findall('green center">(.*?)<', data) # list the seeds #peers = re.findall('red lasttd center">(.*?)<', data) # list the peers cont = 0 results = [] for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)): info_magnet = common.Magnet(magnet) size[cm] = size[cm].replace('<span>', '') name = size[cm] + 'B' + ' - ' + info_magnet.name + ' - ' + settings.name_provider if filters.verify(name, size[cm]): results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) cont = 0 for row in re.findall('<tr class="lista2">(.*?)</tr>', data, re.S): # get each row from table if '/torrent/' in row: ntorrent = re.search('/torrent/(.*?)"', row).group(1) size = re.search( '<td align="center" width="100px" class="lista">(.*?)B<', row).group(1) name = size + 'B - ' + re.search( 'title="(.*?)"', row).group(1) + ' - ' + settings.name_provider torrent = '%s/download.php?id=%s&f=%s-[rarbg.com].torrent' % ( settings.url, ntorrent, provider.quote(name)) if filters.verify(name, size): yield {"name": name, "uri": torrent} # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) data = data[data.find('FILENAME'):data.find('twitter')] size = re.findall('class="tsize">(.*?)<', data) cont = 0 for cm, ntorrent in enumerate(re.findall('-tf(.*?).html">(.*?)<', data)): name = size[cm] + ' - ' + ntorrent[ 1] + ' - ' + settings.name_provider torrent = '%s/torrentdownload.php?id=%s' % (settings.url, ntorrent[0]) if filters.verify(name, size[cm]): yield {"name": name, "uri": torrent} # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def search_episode(info): title= ' S%02dE%02d' % (info['episode'],info['season']) if settings.time_noti > 0 : provider.notify(message='Searching: ' + info['title'].encode("utf-8").title() + title +'...', header=None, time=settings.time_noti, image=settings.icon) url_search = "%s/show/%s" % (settings.url ,info['imdb_id']) provider.log.info(url_search) response = provider.GET(url_search) results=[] if str(response.data)!='': filters.use_TV() filters.information() items = provider.parse_json(response.data) for episode in items['episodes']: if (episode['episode']==info['episode'] and episode['season']==info['season']): for resolution in episode['torrents']: resASCII =resolution.encode('utf-8') name = resASCII + ' - ' + items['title'] + ' - ' + episode['title'].encode('utf-8') + ' - ' + 'S%02dE%02d'% (info['season'], info['episode']) if filters.included(resASCII, filters.quality_allow) and not filters.included(resASCII, filters.quality_deny): res_val=values3[resASCII] magnet = episode['torrents'][resolution]['url'] info_magnet = common.Magnet(magnet) results.append({'name': name + ' - ' + settings.name_provider, 'uri': magnet}) else: provider.log.warning(name + ' ***Blocked File by Keyword, Name or Size***') return results
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) value_search = 'total <b style="color: #ff0000;">0</b> torrents found on your search query' in data size = re.findall('</span></td><td>(.*?)B</td>', data) # list the size cont = 0 results = [] for cm, torrent in enumerate(re.findall(r'/torrent_download(.*?).torrent', data)): name = torrent[len(re.search("/*[0-9]*/",torrent).group()):] name = size[cm].replace(' ',' ') + 'B' + ' - ' + unquote_plus(name) + ' - ' + settings.name_provider #find name in the torrent torrent = settings.url + '/download' + torrent + '.torrent' # torrent to send to Pulsar if filters.verify(name, size[cm].replace(' ',' ')) and not value_search: results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) size = re.findall('class="tac">[0-9.]*[0-9]..B', data) # list the size size = [s.replace('class="tac">','') for s in size] #seeds = re.findall('class="tac green" style="width: 5%;">(.*?)<', data) #peers = re.findall('class="tac red" style="width: 5%;">(.*?)<', data) cont = 0 results = [] for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)): info_magnet = common.Magnet(magnet) name = re.search('dn=(.*?)&tr=',magnet).group(1) # find name in the magnet name = size[cm] + ' - ' + unquote_plus(name).replace('.',' ').title() + ' - ' + settings.name_provider if filters.verify(name,size[cm]): results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash}) # cont+= 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def search(query): global filters query = query.lower() filters.title = query # to do filtering by name if settings.extra <> '': query += ' ' + settings.extra if settings.time_noti > 0: provider.notify(message="Searching: " + query.encode('utf-8', 'ignore').title() + '...', header=None, time=settings.time_noti, image=settings.icon) query = provider.quote_plus( query.lstrip()) #Esto añade los %20 de los espacios url_search = "%s/index.php?page=buscar&q=%s&ordenar=Nombre&inon=Ascendente&idioma=1" % ( settings.url, query) provider.log.info(url_search) if browser.open(url_search): results = extract_torrents(browser.content, query) else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) results = [] return results
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) size = re.findall(r'o</strong> .(.*?).  ', data) # find all sizes cont = 0 for cm, torrent in enumerate(re.findall(r'/descargar/(.*?)"', data)): sname = re.search("_(.*?).html", torrent) if sname is None: name = torrent else: name = sname.group(1) name = name.replace('-', ' ').title() torrent = settings.url + '/torrent/' + torrent # create torrent to send Pulsar if filters.verify(name, size[cm] + ' MB'): yield { "name": size[cm] + ' MB - ' + name.title() + ' - ' + settings.name_provider, "uri": torrent } # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) size = re.findall(r'class=s>(.*?)B</td>', data) # find all sizes cont = 0 for cm, torrent in enumerate( re.findall(r'/torrent/(.*?)/(.*?).html', data)): name = torrent[1].replace('-', ' ').title() torrent = settings.url + '/down/' + torrent[ 0] + '.torrent' # create torrent to send Pulsar if filters.verify(name, size[cm]): yield { "name": size[cm] + 'B - ' + name + ' - ' + settings.name_provider, "uri": torrent } # torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) rows = re.findall( "fa fa-download(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</tr>", data, re.S ) size = [s[2].replace("\n <td>", "") for s in rows] # seeds = [s[5].replace('\n <td>', '') for s in rows] # peers = [s[6].replace('\n <td>', '') for s in rows] lname = re.findall('torrent-filename">(.*?)>(.*?)<', data, re.S) # list the name cont = 0 results = [] for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)): info_magnet = common.Magnet(magnet) name = ( size[cm] + "B - " + unquote_plus(lname[cm][1]).replace(".", " ").title() + " - " + settings.name_provider ) if filters.verify(name, size[cm]): results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<") return results except: provider.log.error(">>>>>>>ERROR parsing data<<<<<<<") provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) data = data[data.find('Latest Top Release'):] # short the result size = re.findall('class=ts>(.*?)B<', data) # list the size cont = 0 results = [] for cm, (torrent, name) in enumerate(re.findall('/torrent/(.*?)/(.*?).html', data)): torrent = settings.url + '/tor/' + torrent + '.torrent' name = size[cm] + 'B' + ' - ' + name.replace('-', ' ').title() + ' - ' + settings.name_provider # find # name in the torrent if filters.verify(name, size[cm]): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) data = data[data.find('Added'):].replace(' ', ' ') # short the result size = re.findall('class="size">(.*?)<', data) # list the size cont = 0 for cm, (torrent, name) in enumerate(re.findall('/torrent/(.*?)/(.*?)"', data)): torrent = settings.url + '/get/' + torrent name = size[ cm] + ' - ' + name + ' - ' + settings.name_provider #find name in the torrent if filters.verify(name, size[cm]): yield {"name": name, "uri": torrent} # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) lname = re.findall('<span class="highlight">(.*?)</a>', data) # list names size = re.findall('class="grey" nowrap><span>(.*?)</span>', data) # list the size cont = 0 results = [] for cm, torrent in enumerate(re.findall('torrent-rating" value="(.*?)"', data)): torrent = settings.url + '/torrent/' + torrent name = size[cm] + ' - ' + lname[cm].replace('<span class="highlight">', '').replace('<span>', '').replace( '</span>', '') name += ' - ' + settings.name_provider # find name in the torrent name = name.replace('72 0p', '720p').replace('10 80p', '1080p') if filters.verify(name, size[cm]): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
def search_episode(info): info['title'] = common.exception(info['title']) if info['absolute_number'] == 0: query = info['title'] + ' %sx%02d'% (info['season'], info['episode']) # define query else: query = info['title'] + ' %02d' % info['absolute_number'] # define query anime query = query.encode('utf-8') filters.title = query if settings.time_noti > 0: provider.notify(message="Searching: " + query.title() + '...', header=None, time=settings.time_noti, image=settings.icon) query = provider.quote_plus(query) url_search = "%s/buscar.php?busqueda=%s" % (settings.url,query) provider.log.info(url_search) if browser.open(url_search): results = [] data = browser.content search_serie = re.search('/series/(.*?)/" title', data) if search_serie is not None: url_search = '%s/series/%s/' % (settings.url, search_serie.group(1)) browser.open(url_search) data = browser.content cont = 0 lname = re.search(filters.title.replace(' ', '.') + '(.*?).torrent', data, re.IGNORECASE) if lname is not None: torrent = '%s/torrents_tor/%s' % (settings.url, lname.group()) name = lname.group().replace('.torrent', '') + ' S%02dE%02d'% (info['season'], info['episode']) + ' - ' + settings.name_provider #find name in the torrent results.append({"name": name, "uri": torrent, "seeds": 10000, "peers": 5000}) # return le torrent cont = 1 provider.log.info('>>>>>> ' + str(cont) + ' torrents sent to Pulsar<<<<<<<') else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) return results
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) size = re.findall(r'o</strong> .(.*?).  ',data) # find all sizes cont = 0 for cm, torrent in enumerate(re.findall(r'/descargar/(.*?)"', data)): sname = re.search("_(.*?).html",torrent) if sname is None: name = torrent else: name = sname.group(1) name = name.replace('-',' ').title() torrent = settings.url + '/torrent/' + torrent # create torrent to send Pulsar if filters.verify(name, size[cm] + ' MB'): yield {"name": size[cm] + ' MB - ' + name.title() + ' - ' + settings.name_provider, "uri": torrent} # return le torrent cont+= 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data).replace('<td data-href="magnet:?', '') lname = re.findall('<td data-href="/torrent/(.*?)/(.*?)"', data) # list the size size = re.findall('<td class="size_td">(.*?)</td>', data) # list the size seeds = re.findall('<td class="seed_td">(.*?)</td>', data) # list the seeds peers = re.findall('<td class="leech_td">(.*?)</td>', data) # list the seeds cont = 0 results = [] for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)): info_magnet = common.Magnet(magnet) name = size[cm] + ' - ' + lname[cm][1].replace('-', ' ') + ' - ' + settings.name_provider #find name in the torrent if filters.verify(name,size[cm]): results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash, "size": common.size_int(size[cm]), "seeds": int(seeds[cm]), "peers": int(peers[cm]), "language": settings.language }) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) size = re.findall('Size (.*?)B', data) # list the size seedsPeers = re.findall('<td align="right">(.*?)</td>', data) # list the size seeds = seedsPeers[0:][::2] peers = seedsPeers[1:][::2] cont = 0 results = [] for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)): info = common.Magnet(magnet) name = size[cm].replace(' ',' ') + 'B' + ' - ' + info.name + ' - ' + settings.name_provider if filters.verify(name, size[cm].replace(' ', ' ')): results.append({"name": name, "uri": magnet, "info_hash": info.hash, "size": common.size_int(size[cm].replace(' ', ' ')), "seeds": int(seeds[cm]), "peers": int(peers[cm]), "language": settings.language}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data).replace('n/a</td></TR>', "color='#0066CC'>0< ").replace('n/a', " color='#00CC00'>0") lname = re.findall('action="http://(.*?)/(.*?)/(.*?)/(.*?)"', data) # list the name name = [item[3].replace('_', ' ') for item in lname] size = re.findall("color='#FF6600'>(.*?)<", data) # list the size #seeds = re.findall("color='#00CC00'>(.*?)<", data) # list the seeds #peers = re.findall("color='#0066CC'>(.*?)<", data) # list the peers cont = 0 results = [] for cm, infohash in enumerate(re.findall('value="(.*?)"', data)): torrent = 'http://torcache.net/torrent/%s.torrent' % infohash name[cm] = size[cm] + ' - ' + name[cm] + ' - ' + settings.name_provider #find name in the torrent if filters.verify(name[cm],size[cm]): results.append({"name": name[cm], "uri": torrent, "info_hash": infohash}) # return le torrent cont+= 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) cont = 0 results = [] for row in re.findall('<div class="torrent">(.*?)</div>', data, re.S): size = re.search('size">(.*?)<', row).group(1) name = ' - ' + size + ' - ' + re.search("title='(.*?)'", row).group(1) + ' - ' + settings.name_provider #find name in the torrent infohash = re.search('http://(.*?)/(.*?)/', row).group(2) torrent = 'magnet:?xt=urn:btih:%s' % infohash seeds_peers = re.findall('<b>(.*?)<', row) seeds = seeds_peers[0] peers = seeds_peers[1] if filters.verify(name,size): results.append({"name": name, "uri": torrent, "info_hash": infohash, "size": common.size_int(size), "seeds": int(seeds), "peers": int(peers), "language": settings.language }) # return le torrent cont+= 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) size = re.findall('<div class="coll-4"><span>(.*?)<', data) # list the size # seeds = re.findall('green center">(.*?)<', data) # list the seeds # peers = re.findall('red lasttd center">(.*?)<', data) # list the peers cont = 0 results = [] for cm, (page, name) in enumerate(re.findall('/torrent/(.*?)">(.*?)</a>', data)): page = settings.url + "/torrent/" + page torrent = common.getlinks(page) name = name.replace("<b>", "").replace("</b>", "").replace("<strong>", "").replace("</strong>", "") name = size[cm] + " - " + name + " - " + settings.name_provider if filters.verify(name, size[cm]): results.append({"name": name, "uri": torrent}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<") return results except: provider.log.error(">>>>>>>ERROR parsing data<<<<<<<") provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon) return []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) data = data[data.find('Search in :'):] data = data.replace('<strong>', '').replace('</strong>', '').replace('\n', '').replace('\t', '').replace( '<font color="#008000">', '').replace('<font color="#000080">', '').replace('</font>', '') rows = re.findall('<td class="trow" align="center">(.*?)</td>', data, re.S) size = rows[3::6] seeds = rows[4::6] peers = rows[5::6] cont = 0 results = [] for cm, line in enumerate(re.findall('/download/(.*?)\.torrent', data)): torrent = '%s/torrent_download/%s.torrent' % (settings.url, line.replace(' ', '+')) name = size[cm] + ' - ' + line.split('/')[-1].split('_')[0] + ' - ' + settings.name_provider if filters.verify(name, size[cm]): results.append({"name": name, "uri": torrent, "size": common.size_int(size[cm]), "seeds": int(seeds[cm]), "peers": int(peers[cm]), "language": settings.language}) # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) results = []
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) data = data.replace("text-orange", "") size = re.findall('<td><span class="">(.*?)<', data) lname = re.findall('title="Download:(.*?)"', data) cont = 0 results = [] for cm, url_torrent in enumerate(re.findall('/download/torrent/(.*?)"', data)): name = size[cm] + ' - ' + lname[cm].strip() + ' - ' + settings.name_provider if filters.verify(name,size[cm]): # download the .torrent file torrent = settings.url + '/download/torrent/' + url_torrent header = "" for item in browser.cookies: header += item.name + '=' + item.value + '; ' uri = provider.append_headers(torrent, {'cookie' : header}) print uri # Send information to Pulsar results.append({"name": name, "uri": uri}) # return le torrent cont+= 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon) results = []
def search_movie(info): filters.use_movie() if settings.time_noti > 0: provider.notify(message='Searching: ' + info['title'].title().encode("utf-8") + '...', header=None, time=settings.time_noti, image=settings.icon) url_search = "%s/v2/list_movies.json?query_term=%s" % (settings.url, info['imdb_id']) provider.log.info(url_search) response = provider.GET(url_search) return extract_magnets_json(response.data)
def search(query): query += ' ' + extra if time_noti > 0: provider.notify(message='Searching: ' + query + '...', header=None, time=1500, image=icon) url_search = "%s/search/0/0/000/4/%s" % (url, query.replace(' ', '%20')) # search string provider.log.info(url_search) response = provider.GET(url_search) if response == (None, None): provider.log.error('404 Page not found') return [] else: return extract_torrents(response.data)
def search(query): query += ' ' + settings.extra # add the extra information query = filters.type_filtering(query, '+') # check type filter and set-up filters.title url_search = '%s/?hl=en&safe=off&num=100&start=0&orderby=seeders&s=%s+&filter=2000' % (settings.url, query) provider.log.info(url_search) if browser.open(url_search): results = extract_torrents(browser.content) else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) results = [] return results
def search(query): query += " " + settings.extra # add the extra information query = filters.type_filtering(query, "+") # check type filter and set-up filters.title url_search = "%s/search/?search=%s&srt=seeds&order=desc" % (settings.url, query) # change in each provider provider.log.info(url_search) if browser.open(url_search): results = extract_torrents(browser.content) else: provider.log.error(">>>>>>>%s<<<<<<<" % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) results = [] return results
def search_episode(info): info['title'] = common.exception(info['title']) if info['absolute_number'] == 0: query = info['title'] + ' %sx%02d' % (info['season'], info['episode'] ) # define query else: query = info['title'] + ' %02d' % info[ 'absolute_number'] # define query anime query = query.encode('utf-8') filters.title = query if settings.time_noti > 0: provider.notify(message="Searching: " + query.title() + '...', header=None, time=settings.time_noti, image=settings.icon) query = provider.quote_plus(query) url_search = "%s/buscar.php?busqueda=%s" % (settings.url, query) provider.log.info(url_search) if browser.open(url_search): results = [] data = browser.content search_serie = re.search('/series/(.*?)/" title', data) if search_serie is not None: url_search = '%s/series/%s/' % (settings.url, search_serie.group(1)) browser.open(url_search) data = browser.content cont = 0 lname = re.search( filters.title.replace(' ', '.') + '(.*?).torrent', data, re.IGNORECASE) if lname is not None: torrent = '%s/torrents_tor/%s' % (settings.url, lname.group()) name = lname.group( ).replace('.torrent', '') + ' S%02dE%02d' % ( info['season'], info['episode'] ) + ' - ' + settings.name_provider #find name in the torrent results.append({ "name": name, "uri": torrent, "seeds": 10000, "peers": 5000 }) # return le torrent cont = 1 provider.log.info('>>>>>> ' + str(cont) + ' torrents sent to Pulsar<<<<<<<') else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) return results
def search(query): query += ' ' + settings.extra # add the extra information query = filters.type_filtering(query, '+') # check type filter and set-up filters.title url_search = "%s/index.php?page=torrents&search=%s&category=0&options=0&active=1&order=5&by=2" % (settings.url,query) provider.log.info(url_search) if browser.open(url_search): results = extract_torrents(browser.content) else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) results = [] return results
def search(info): query = info['query'] + ' ' + extra provider.notify(message = "Searching: " + query.title() + '...', header = None, time = 1500, image=icon) query = provider.quote_plus(query) url_search = "%s/search/?search=%s&srt=seeds&order=desc" % (url,query) provider.log.info(url_search) response = provider.GET(url_search) if response == (None, None): provider.log.error('404 Page not found') return [] else: return extract_torrents(response.data)
def search(query): query += ' ' + settings.extra # add the extra information query = filters.type_filtering(query, '-') # check type filter and set-up filters.title url_search = "%s/search/all/%s" % (settings.url, query) provider.log.info(url_search) if browser.open(url_search): results = extract_torrents(browser.content) else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) results = [] return results
def search(query): query += ' ' + extra if time_noti > 0: provider.notify(message="Searching: " + query + '...', header=None, time=time_noti, image=icon) url_search = "%s/files/?category=0&subcategory=All&quality=All&seeded=2&external=2&query=%s&to=1&uid=0&sort=S" \ % (url, query.replace(' ', '%20')) provider.log.info(url_search) response = provider.GET(url_search) if response == (None, None): provider.log.error('404 Page not found') return [] else: return extract_torrents(response.data)
def search(query): query += ' ' + settings.extra # add the extra information query = filters.type_filtering(query, '+') # check type filter and set-up filters.title query = re.sub('s..e...', '', query) url_search = "%s/torrents?in=1&search=%s&tags=&type=0&language=0&subtitle=0&discount=0&rip_type=0&video_quality=0&tv_type=0&uploader=" % (settings.url,query) provider.log.info(url_search) if browser.open(url_search): results = extract_torrents(browser.content) else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) results = [] return results
def search_movie(info): filters.use_movie() if settings.time_noti > 0: provider.notify(message='Searching: ' + info['title'].title().encode("utf-8") + '...', header=None, time=settings.time_noti, image=settings.icon) id = findID(info["title"], info["year"]) if id == "0": return [] else: url_search = "%s/movies/%s" % (settings.url, id) provider.log.info(url_search) response = provider.GET(url_search) return extract_magnets_json(response.data)
def search(query): query += ' ' + settings.extra # add the extra information query = filters.type_filtering(query, '+') # check type filter and set-up filters.title browser.open("%s?get_token=get_token&app_id=script.pulsar.rarbg-mc" % settings.url) items = provider.parse_json(browser.content) url_search = "%s?mode=search&search_string=%s&app_id=script.pulsar.rarbg-mc&token=%s" % (settings.url, query, items["token"]) # change in each provider provider.log.info(url_search) if browser.open(url_search): results = extract_torrents(browser.content) else: provider.log.error('>>>>>>>%s<<<<<<<' % browser.status) provider.notify(message=browser.status, header=None, time=5000, image=settings.icon) results = [] return results
def extract_torrents(data): try: filters.information() # print filters settings data = common.clean_html(data) title = [] lmagnet = [] size = [] seeds = [] peers = [] for row in re.findall('<tr(.*?)>(.*?)</tr>', data, re.S): # get each row in the table columns = re.findall('<td(.*?)>(.*?)</td>', row[1], re.S) # get each column for the row if len(columns) > 0: size.append(columns[3][1]) seeds.append(columns[4][1]) peers.append(columns[5][1]) aref = re.findall('<a(.*?)href="(.*?)"(.*?)>(.*?)<', columns[1][1]) # get the aref title.append(aref[0][3]) lmagnet.append(aref[2][1]) cont = 0 results = [] for cm, magnet in enumerate(lmagnet): info_magnet = common.Magnet(magnet) name = size[cm] + ' - ' + title[cm] + ' - ' + settings.name_provider if filters.verify(name, size[cm]): results.append({ "name": name, "uri": magnet, "info_hash": info_magnet.hash }) # # return le torrent cont += 1 else: provider.log.warning(filters.reason) if cont == settings.max_magnets: # limit magnets break provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<') return results except: provider.log.error('>>>>>>>ERROR parsing data<<<<<<<') provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)