def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        rows = re.findall(
            "fa fa-download(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</tr>", data, re.S
        )
        size = [s[2].replace("\n                            <td>", "") for s in rows]
        # seeds = [s[5].replace('\n                            <td>', '') for s in rows]
        # peers = [s[6].replace('\n                            <td>', '') for s in rows]
        lname = re.findall('torrent-filename">(.*?)>(.*?)<', data, re.S)  # list the name
        cont = 0
        results = []
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info_magnet = common.Magnet(magnet)
            name = (
                size[cm]
                + "B - "
                + unquote_plus(lname[cm][1]).replace(".", " ").title()
                + " - "
                + settings.name_provider
            )
            if filters.verify(name, size[cm]):
                results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<")
        return results
    except:
        provider.log.error(">>>>>>>ERROR parsing data<<<<<<<")
        provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall(r'o</strong> .(.*?). &nbsp', data)  # find all sizes
        cont = 0
        for cm, torrent in enumerate(re.findall(r'/descargar/(.*?)"', data)):
            sname = re.search("_(.*?).html", torrent)
            if sname is None:
                name = torrent
            else:
                name = sname.group(1)
            name = name.replace('-', ' ').title()
            torrent = settings.url + '/torrent/' + torrent  # create torrent to send Pulsar
            if filters.verify(name, size[cm] + ' MB'):
                yield {
                    "name": size[cm] + ' MB - ' + name.title() + ' - ' +
                    settings.name_provider,
                    "uri": torrent
                }  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #3
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('Latest Top Release'):]  # short the result
        size = re.findall('class=ts>(.*?)B<', data)  # list the size
        cont = 0
        results = []
        for cm, (torrent, name) in enumerate(re.findall('/torrent/(.*?)/(.*?).html', data)):
            torrent = settings.url + '/tor/' + torrent + '.torrent'
            name = size[cm] + 'B' + ' - ' + name.replace('-', ' ').title() + ' - ' + settings.name_provider  # find
            # name in the torrent
            if filters.verify(name, size[cm]):
                results.append({"name": name, "uri": torrent})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
Example #4
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        table = common.table(data, 2)
        size = [table[x][3] for x in range(len(table))]  # list the size
        lname = [common.parse_tag(table[x][1])[1][1] for x in range(len(table))]  # list the size
        # seeds = re.findall('green center">(.*?)<', data)  # list the seeds
        # peers = re.findall('red lasttd center">(.*?)<', data)  # list the peers
        cont = 0
        results = []
        for cm, page in enumerate(re.findall('/torrent/(.*?)"', data)):
            torrent = "http://itorrents.org/torrent//torrent/" + page
            name = size[cm + 1] + ' - ' + lname[cm + 1] + ' - ' + settings.name_provider
            if filters.verify(name, size[cm + 1]):
                results.append({"name": name, "uri": torrent})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #5
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        lname = re.findall('<span class="highlight">(.*?)</a>', data)  # list names
        size = re.findall('class="grey" nowrap><span>(.*?)</span>', data)  # list the size
        cont = 0
        results = []
        for cm, torrent in enumerate(re.findall('torrent-rating" value="(.*?)"', data)):
            torrent = settings.url + '/torrent/' + torrent
            name = size[cm] + ' - ' + lname[cm].replace('<span class="highlight">', '').replace('<span>', '').replace(
                '</span>', '')
            name += ' - ' + settings.name_provider  # find name in the torrent
            name = name.replace('72 0p', '720p').replace('10 80p', '1080p')
            if filters.verify(name, size[cm]):
                results.append({"name": name, "uri": torrent})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
Example #6
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        value_search = 'total <b style="color: #ff0000;">0</b> torrents found on your search query' in data
        size = re.findall('</span></td><td>(.*?)B</td>', data)  # list the size
        cont = 0
        for cm, torrent in enumerate(
                re.findall(r'/torrent_download(.*?).torrent', data)):
            name = torrent[len(re.search("/*[0-9]*/", torrent).group()):]
            name = size[cm].replace(
                '&nbsp;', ' ') + 'B' + ' - ' + unquote_plus(
                    name
                ) + ' - ' + settings.name_provider  #find name in the torrent
            torrent = settings.url + '/download' + torrent + '.torrent'  # torrent to send to Pulsar
            if filters.verify(name, size[cm].replace(
                    '&nbsp;', ' ')) and not value_search:
                yield {"name": name, "uri": torrent}  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('Size (.*?)B', data) # list the size
        seedsPeers = re.findall('<td align="right">(.*?)</td>', data)  # list the size
        seeds = seedsPeers[0:][::2]
        peers = seedsPeers[1:][::2]
        cont = 0
        results = []
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info = common.Magnet(magnet)
            name = size[cm].replace('&nbsp;',' ') + 'B' + ' - ' + info.name + ' - ' + settings.name_provider
            if filters.verify(name, size[cm].replace('&nbsp;', ' ')):
                    results.append({"name": name, "uri": magnet, "info_hash": info.hash,
                                     "size": common.size_int(size[cm].replace('&nbsp;', ' ')),
                                     "seeds": int(seeds[cm]), "peers": int(peers[cm]),
                                     "language": settings.language})  # return le torrent
                    cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #8
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        name = re.findall(r'/.page=view&#..;tid=(.*?)>(.*?)</a></td>',
                          data)  # find all names
        size = re.findall(r'<td class="tlistsize">(.*?)</td>',
                          data)  # find all sizes
        cont = 0
        for cm, torrent in enumerate(
                re.findall(r'/.page=download&#..;tid=(.*?)"', data)):
            #find name in the torrent
            if re.search(r'Searching torrents', data) is not None:
                if filters.verify(name[cm][1], size[cm]):
                    yield {
                        "name": size[cm] + ' - ' + name[cm][1] + ' - ' +
                        settings.name_provider,
                        "uri": settings.url + '/?page=download&tid=' + torrent
                    }
                    cont += 1
                else:
                    provider.log.warning(filters.reason)
                if cont == settings.max_magnets:  # limit magnets
                    break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #9
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        table = common.table(data, 1)
        cont = 0
        results = []
        for cm, (page, name) in enumerate(re.findall('/torrent/(.*?)">(.*?)</a>', data, re.S)):
            size = table[cm][5]
            page = settings.url + "/torrent/" + page
            torrent = common.getlinks(page)
            name = size + ' - ' + name.replace('\n', '') + ' - ' + settings.name_provider
            if filters.verify(name, size):
                results.append({"name": name, "uri": torrent})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        cont = 0
        results = []
        for row in re.findall('<div class="torrent">(.*?)</div>', data, re.S):
            size = re.search('size">(.*?)<', row).group(1)
            name = ' - ' + size + ' - ' + re.search("title='(.*?)'", row).group(1) + ' - ' + settings.name_provider #find name in the torrent
            infohash = re.search('http://(.*?)/(.*?)/', row).group(2)
            torrent = 'magnet:?xt=urn:btih:%s' % infohash
            seeds_peers = re.findall('<b>(.*?)<', row)
            seeds = seeds_peers[0]
            peers = seeds_peers[1]
            if filters.verify(name,size):
                    results.append({"name": name, "uri": torrent, "info_hash": infohash,
                                    "size": common.size_int(size), "seeds": int(seeds),
                                    "peers": int(peers), "language": settings.language
                                    })  # return le torrent
                    cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall(r'o</strong> .(.*?). &nbsp',data) # find all sizes
        cont = 0
        for cm, torrent in  enumerate(re.findall(r'/descargar/(.*?)"', data)):
            sname = re.search("_(.*?).html",torrent)
            if sname is None:
                name = torrent
            else:
                name = sname.group(1)
            name = name.replace('-',' ').title()
            torrent = settings.url + '/torrent/' + torrent  # create torrent to send Pulsar
            if filters.verify(name, size[cm] + ' MB'):
                    yield {"name": size[cm] + ' MB - ' + name.title() + ' - ' + settings.name_provider, "uri": torrent}  # return le torrent
                    cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #12
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('class="nobr center">(.*?)B', data)  # list the size
        #seeds = re.findall('green center">(.*?)<', data)  # list the seeds
        #peers = re.findall('red lasttd center">(.*?)<', data)  # list the peers
        cont = 0
        results = []
        for cm, magnet in enumerate(
                re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info_magnet = common.Magnet(magnet)
            size[cm] = size[cm].replace('<span>', '')
            name = size[
                cm] + 'B' + ' - ' + info_magnet.name + ' - ' + settings.name_provider
            if filters.verify(name, size[cm]):
                results.append({
                    "name": name,
                    "uri": magnet,
                    "info_hash": info_magnet.hash
                })  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #13
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('class="nobr center">(.*?)B', data)  # list the size
        #seeds = re.findall('green center">(.*?)<', data)  # list the seeds
        #peers = re.findall('red lasttd center">(.*?)<', data)  # list the peers
        cont = 0
        results = []
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info_magnet = common.Magnet(magnet)
            size[cm] = size[cm].replace('<span>', '')
            name = size[cm] + 'B' + ' - ' + info_magnet.name + ' - ' + settings.name_provider
            if filters.verify(name, size[cm]):
                results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
Example #14
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        value_search = 'total <b style="color: #ff0000;">0</b> torrents found on your search query' in data
        size = re.findall('</span></td><td>(.*?)B</td>', data) # list the size
        cont = 0
        results = []
        for cm, torrent in  enumerate(re.findall(r'/torrent_download(.*?).torrent', data)):
            name = torrent[len(re.search("/*[0-9]*/",torrent).group()):]
            name = size[cm].replace('&nbsp;',' ') + 'B' + ' - ' + unquote_plus(name) + ' - ' + settings.name_provider #find name in the torrent
            torrent = settings.url + '/download' + torrent + '.torrent' # torrent to send to Pulsar
            if filters.verify(name, size[cm].replace('&nbsp;',' ')) and not value_search:
                results.append({"name": name, "uri": torrent})   # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
def Browse_ItemRecenzje(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    data = byteify(json.loads(html))['items']
    ItemCount = len(data)
    for x in range(len(data)):
        item = data[x]
        name = item['snippet']['title']
        plot = item['snippet']['description']
        plot = clean_html(plot)
        img = item['snippet']['thumbnails']['high']['url']
        url = item['snippet']['resourceId']['videoId']
        strona = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % url
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
        contextLabs = {'title': name, 'year': '0000', 'url': strona, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': labs['plot']}
        contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
        pars = {'mode': 'PlayFromHost', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=False, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    eod()
Example #16
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('Search in :'):]
        data = data.replace('<strong>', '').replace('</strong>', '').replace('\n', '').replace('\t', '').replace(
            '<font color="#008000">', '').replace('<font color="#000080">', '').replace('</font>', '')
        rows = re.findall('<td class="trow" align="center">(.*?)</td>', data, re.S)
        size = rows[3::6]
        seeds = rows[4::6]
        peers = rows[5::6]
        cont = 0
        results = []
        for cm, line in enumerate(re.findall('/download/(.*?)\.torrent', data)):
            torrent = '%s/torrent_download/%s.torrent' % (settings.url, line.replace(' ', '+'))
            name = size[cm] + ' - ' + line.split('/')[-1].split('_')[0] + ' - ' + settings.name_provider
            if filters.verify(name, size[cm]):
                results.append({"name": name, "uri": torrent,
                                "size": common.size_int(size[cm]),
                                "seeds": int(seeds[cm]), "peers": int(peers[cm]),
                                "language": settings.language})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        results = []
Example #17
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data).replace('<td data-href="magnet:?', '')
        lname = re.findall('<td data-href="/torrent/(.*?)/(.*?)"', data)  # list the size
        size = re.findall('<td class="size_td">(.*?)</td>', data)  # list the size
        seeds = re.findall('<td class="seed_td">(.*?)</td>', data)  # list the seeds
        peers = re.findall('<td class="leech_td">(.*?)</td>', data)  # list the seeds
        cont = 0
        results = []
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info_magnet = common.Magnet(magnet)
            name = size[cm] + ' - ' + lname[cm][1].replace('-', ' ') + ' - ' + settings.name_provider #find name in the torrent
            if filters.verify(name,size[cm]):
                results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash,
                                "size": common.size_int(size[cm]), "seeds": int(seeds[cm]), "peers": int(peers[cm]),
                                "language": settings.language
                                })  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #18
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('class="tac">[0-9.]*[0-9]..B', data)  # list the size
        size = [s.replace('class="tac">','') for s in size]
        #seeds = re.findall('class="tac green" style="width: 5%;">(.*?)<', data)
        #peers = re.findall('class="tac red" style="width: 5%;">(.*?)<', data)
        cont = 0
        results = []
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info_magnet = common.Magnet(magnet)
            name = re.search('dn=(.*?)&tr=',magnet).group(1)  # find name in the magnet
            name = size[cm] + ' - ' + unquote_plus(name).replace('.',' ').title() + ' - ' + settings.name_provider
            if filters.verify(name,size[cm]):
                    results.append({"name": name, "uri": magnet, "info_hash": info_magnet.hash})  #
                    cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #19
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall(r'class=s>(.*?)B</td>', data)  # find all sizes
        cont = 0
        for cm, torrent in enumerate(
                re.findall(r'/torrent/(.*?)/(.*?).html', data)):
            name = torrent[1].replace('-', ' ').title()
            torrent = settings.url + '/down/' + torrent[
                0] + '.torrent'  # create torrent to send Pulsar
            if filters.verify(name, size[cm]):
                yield {
                    "name":
                    size[cm] + 'B - ' + name + ' - ' + settings.name_provider,
                    "uri": torrent
                }  # torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #20
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        cont = 0
        for cm, (torrent,
                 name) in enumerate(re.findall(r'/torrent/(.*?)/(.*?)"',
                                               data)):
            torrent = settings.url + '/get-torrent/' + torrent  # create torrent to send Pulsar
            name = name.replace('-', ' ')
            if filters.verify(name, None):
                yield {
                    "name": name.title() + ' - ' + settings.name_provider,
                    "uri": torrent
                }  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #21
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        lname = re.findall('<span class="highlight">(.*?)</a>',
                           data)  # list names
        size = re.findall('class="grey" nowrap><span>(.*?)</span>',
                          data)  # list the size
        cont = 0
        for cm, torrent in enumerate(
                re.findall('torrent-rating" value="(.*?)"', data)):
            torrent = settings.url + '/torrent/' + torrent
            name = size[cm] + ' - ' + lname[cm].replace(
                '<span class="highlight">', '').replace('<span>', '').replace(
                    '</span>', '')
            name += ' - ' + settings.name_provider  #find name in the torrent
            name = name.replace('72 0p', '720p').replace('10 80p', '1080p')
            if filters.verify(name, size[cm]):
                yield {"name": name, "uri": torrent}  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        cont = 0
        for row in re.findall('<tr class="lista2">(.*?)</tr>', data,
                              re.S):  # get each row from table
            if '/torrent/' in row:
                ntorrent = re.search('/torrent/(.*?)"', row).group(1)
                size = re.search(
                    '<td align="center"  width="100px" class="lista">(.*?)B<',
                    row).group(1)
                name = size + 'B - ' + re.search(
                    'title="(.*?)"',
                    row).group(1) + ' - ' + settings.name_provider
                torrent = '%s/download.php?id=%s&f=%s-[rarbg.com].torrent' % (
                    settings.url, ntorrent, provider.quote(name))
                if filters.verify(name, size):
                    yield {"name": name, "uri": torrent}  # return le torrent
                    cont += 1
                else:
                    provider.log.warning(filters.reason)
                if cont == settings.max_magnets:  # limit magnets
                    break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #23
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data.replace("text-orange", "")
        size = re.findall('<td><span class="">(.*?)<', data)
        lname = re.findall('title="Download:(.*?)"', data)
        cont = 0
        results = []
        for cm, url_torrent in enumerate(re.findall('/download/torrent/(.*?)"', data)):
            name = size[cm] + ' - ' + lname[cm].strip() + ' - ' + settings.name_provider
            if filters.verify(name,size[cm]):
                # download the .torrent file
                torrent = settings.url + '/download/torrent/' + url_torrent
                header = ""
                for item in  browser.cookies:
                    header += item.name + '=' + item.value + '; '
                uri = provider.append_headers(torrent, {'cookie' : header})
                print uri
                # Send information to Pulsar
                results.append({"name": name, "uri": uri})  # return le torrent
                cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        results = []
Example #24
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('FILENAME'):data.find('twitter')]
        size = re.findall('class="tsize">(.*?)<', data)
        cont = 0
        for cm, ntorrent in enumerate(re.findall('-tf(.*?).html">(.*?)<',
                                                 data)):
            name = size[cm] + ' - ' + ntorrent[
                1] + ' - ' + settings.name_provider
            torrent = '%s/torrentdownload.php?id=%s' % (settings.url,
                                                        ntorrent[0])
            if filters.verify(name, size[cm]):
                yield {"name": name, "uri": torrent}  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #25
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('Added'):].replace('&nbsp;',
                                                 ' ')  # short the result
        size = re.findall('class="size">(.*?)<', data)  # list the size
        cont = 0
        for cm, (torrent,
                 name) in enumerate(re.findall('/torrent/(.*?)/(.*?)"', data)):
            torrent = settings.url + '/get/' + torrent
            name = size[
                cm] + ' - ' + name + ' - ' + settings.name_provider  #find name in the torrent
            if filters.verify(name, size[cm]):
                yield {"name": name, "uri": torrent}  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('Size:</span>(.*?)<span class="opt-desc">(.*?)<',
                          data, re.S)  # list the size
        cont = 0
        results = []
        for cm, magnet in enumerate(
                re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info = common.Magnet(magnet)
            name = size[cm][
                1] + ' - ' + info.name + ' - ' + settings.name_provider
            if filters.verify(name, size[cm][1]):
                results.append({
                    "name": name,
                    "uri": magnet,
                    "info_hash": info.hash
                })  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #27
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('<div class="coll-4"><span>(.*?)<', data)  # list the size
        # seeds = re.findall('green center">(.*?)<', data)  # list the seeds
        # peers = re.findall('red lasttd center">(.*?)<', data)  # list the peers
        cont = 0
        results = []
        for cm, (page, name) in enumerate(re.findall('/torrent/(.*?)">(.*?)</a>', data)):
            page = settings.url + "/torrent/" + page
            torrent = common.getlinks(page)
            name = name.replace("<b>", "").replace("</b>", "").replace("<strong>", "").replace("</strong>", "")
            name = size[cm] + " - " + name + " - " + settings.name_provider
            if filters.verify(name, size[cm]):
                results.append({"name": name, "uri": torrent})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<")
        return results
    except:
        provider.log.error(">>>>>>>ERROR parsing data<<<<<<<")
        provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
        return []
Example #28
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('<div id="sz">') :]
        page = re.findall('a href="(.*?)"', data)  # list the size
        cont = 0
        results = []
        if len(page) > 0:
            for cm, size in enumerate(re.findall('valign="middle" nowrap="nowrap">(.*?)<', data)):
                torrent = common.getlinks(settings.url + page[cm])
                name = page[cm].replace("-", " ").replace("/", "").replace(".html", "")
                name = size + " - " + name + " - " + settings.name_provider
                if filters.verify(name, size):
                    results.append({"name": name, "uri": torrent})  # return le torrent
                    cont += 1
                else:
                    provider.log.warning(filters.reason)
                if cont == settings.max_magnets:  # limit magnets
                    break
            provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<")
        return results
    except:
        provider.log.error(">>>>>>>ERROR parsing data<<<<<<<")
        provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
        return []
Example #29
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data).replace('n/a</td></TR>', "color='#0066CC'>0< ").replace('n/a', " color='#00CC00'>0")
        lname = re.findall('action="http://(.*?)/(.*?)/(.*?)/(.*?)"', data)  # list the name
        name = [item[3].replace('_', ' ') for item in lname]
        size = re.findall("color='#FF6600'>(.*?)<", data)  # list the size
        #seeds = re.findall("color='#00CC00'>(.*?)<", data)  # list the seeds
        #peers = re.findall("color='#0066CC'>(.*?)<", data)  # list the peers
        cont = 0
        results = []
        for cm, infohash in enumerate(re.findall('value="(.*?)"', data)):
            torrent = 'http://torcache.net/torrent/%s.torrent' % infohash
            name[cm] = size[cm] + ' - ' + name[cm] + ' - ' + settings.name_provider #find name in the torrent
            if filters.verify(name[cm],size[cm]):
                    results.append({"name": name[cm], "uri":  torrent, "info_hash": infohash})  # return le torrent
                    cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('</td><td>(.*?)</td><td>(.*?)</td><td>(.*?)B</td>',
                          data)  # list the size
        cont = 0
        for cm, magnet in enumerate(
                re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            name = re.search('dn=(.*?)&amp;tr=',
                             magnet).group(1)  #find name in the magnet
            name = size[cm][2] + 'B' + ' - ' + unquote_plus(name).replace(
                '.', ' ') + ' - ' + settings.name_provider
            if filters.verify(name, size[cm][2]):
                yield {"name": name, "uri": magnet}  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #31
0
def Browse_Aktualnosci(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    r = re.compile("<div class='head'><h2><a href='/news/(.+?)'>(.+?)</a>").findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for _url, _name in r:
            strona = 'http://diff-anime.pl' + '/news/' + _url
            html2 = nURL(strona)
            _name2 = clean_html(_name)
            _name2 = _name.replace(";", "")
#
            idx = html2.find("class='news-category' />")
            if idx == -1:
                return
            idx2 = html2.find("</div>", idx)
            if idx2 == -1:
                return
            plot = html2[idx:idx2]
            plot = clean_html(plot)
            plot = plot.replace("class='news-category' />", "")

#
            image = re.compile("<div class='content'><img src='(.+?)' alt='(.+?)' class='news-category' />(.+?).<br />").findall(html2)
            ItemCount = len(image)
            if len(image) > 0:
                for foto, plot1, plot2 in image:
                    img = "http://diff-anime.pl" + foto
            fanart = fanartSite
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
# YOUTUBE LINK
            _link = re.compile('src="//www.youtube.com/embed/(.+?)"').findall(html2)
            ItemCount = len(_link)
            if len(_link) > 0:
                for link in _link:
                    _url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % link
            contextLabs = {'title': _name2, 'year': '0000', 'url': strona, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': labs['plot']}
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {'mode': 'PlayFromHost', 'site': site, 'section': section, 'title': _name2, 'url': _url, 'img': img, 'fanart': fanart}
            labs['title'] = _name2
            _addon.add_directory(pars, labs, is_folder=False, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, int(addst('tvshows-view')))
    eod()
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('Size (.*?)B', data)  # list the size
        seedsPeers = re.findall('<td align="right">(.*?)</td>',
                                data)  # list the size
        seeds = seedsPeers[0:][::2]
        peers = seedsPeers[1:][::2]
        cont = 0
        results = []
        for cm, magnet in enumerate(
                re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info = common.Magnet(magnet)
            name = size[cm].replace(
                '&nbsp;',
                ' ') + 'B' + ' - ' + info.name + ' - ' + settings.name_provider
            if filters.verify(name, size[cm].replace('&nbsp;', ' ')):
                results.append({
                    "name":
                    name,
                    "uri":
                    magnet,
                    "info_hash":
                    info.hash,
                    "size":
                    common.size_int(size[cm].replace('&nbsp;', ' ')),
                    "seeds":
                    int(seeds[cm]),
                    "peers":
                    int(peers[cm]),
                    "language":
                    settings.language
                })  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        title = []
        lmagnet = []
        size = []
        seeds = []
        peers = []
        for row in re.findall('<tr(.*?)>(.*?)</tr>', data,
                              re.S):  # get each row in the table
            columns = re.findall('<td(.*?)>(.*?)</td>', row[1],
                                 re.S)  # get each column for the row
            if len(columns) > 0:
                size.append(columns[3][1])
                seeds.append(columns[4][1])
                peers.append(columns[5][1])
                aref = re.findall('<a(.*?)href="(.*?)"(.*?)>(.*?)<',
                                  columns[1][1])  # get the aref
                title.append(aref[0][3])
                lmagnet.append(aref[2][1])
        cont = 0
        results = []
        for cm, magnet in enumerate(lmagnet):
            info_magnet = common.Magnet(magnet)
            name = size[cm] + ' - ' + title[cm] + ' - ' + settings.name_provider
            if filters.verify(name, size[cm]):
                results.append({
                    "name": name,
                    "uri": magnet,
                    "info_hash": info_magnet.hash
                })  #
                # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        lname = re.findall('.HorribleSubs.(.*?)<', data)  # list the names
        cont = 0
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            name = lname[cm] + ' - ' + settings.name_provider
            if filters.verify(name, None):
                    yield {"name": name, "uri": magnet}  # return le torrent
                    cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        # lname = re.findall('.HorribleSubs.(.*?)<', data)  # list the names
        cont = 0
        for name, magnet in re.findall('class="dl-label"><i>(.*?)</i>.*?a href="(magnet:.*?)"', data):
            # name = lname[cm] + ' - ' + settings.name_provider
            if filters.verify(name, None):
                yield {"name": name, "uri": magnet}  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<")
    except:
        provider.log.error(">>>>>>>ERROR parsing data<<<<<<<")
        provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
Example #36
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = (
            data[data.find("peers") :]
            .replace("<b>", "")
            .replace("</b>", "")
            .replace('class="pe">Pending', 'class="s">0 MB')
        )  # short
        # the result
        size = re.findall('class="s">(.*?)</span>', data)  # list the size
        seeds = re.findall('class="u">(.*?)</span>', data)  # list the seeds
        peers = re.findall('class="d">(.*?)</span>', data)  # list the seeds
        cont = 0
        results = []
        for cm, (infohash, name) in enumerate(re.findall('<dl><dt><a href="/(.*?)">(.*?)<', data)):
            torrent = "magnet:?xt=urn:btih:%s" % infohash
            name = size[cm] + " - " + name.replace("-", " ").title() + " - " + settings.name_provider  # find name in
            #  the torrent
            if filters.verify(name, size[cm]):
                results.append(
                    {
                        "name": name,
                        "uri": torrent,
                        "info_hash": infohash,
                        "size": common.size_int(size[cm]),
                        "seeds": int(seeds[cm].replace(",", "")),
                        "peers": int(peers[cm].replace(",", "")),
                        "language": settings.language,
                    }
                )  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<")
        return results
    except:
        provider.log.error(">>>>>>>ERROR parsing data<<<<<<<")
        provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
        return []
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        rows = re.findall(
            'fa fa-download(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</td>(.*?)</tr>',
            data, re.S)
        size = [
            s[2].replace('\n                            <td>', '')
            for s in rows
        ]
        #seeds = [s[5].replace('\n                            <td>', '') for s in rows]
        #peers = [s[6].replace('\n                            <td>', '') for s in rows]
        lname = re.findall('torrent-filename">(.*?)>(.*?)<', data,
                           re.S)  # list the name
        cont = 0
        results = []
        for cm, magnet in enumerate(
                re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info_magnet = common.Magnet(magnet)
            name = size[cm] + 'B - ' + unquote_plus(lname[cm][1]).replace(
                '.', ' ').title() + ' - ' + settings.name_provider
            if filters.verify(name, size[cm]):
                results.append({
                    "name": name,
                    "uri": magnet,
                    "info_hash": info_magnet.hash
                })  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data).replace('>NEW<', '>0.01 GB<')
        name = re.findall(r'title="View details:.(.*?)">', data)  # find all names
        size = re.findall(r'class="lista">(.*?)B</td>', data)  # find all sizes
        cont = 0
        for cm, torrent in  enumerate(re.findall(r'/get/(.*?)"', data)):
            torrent = settings.url + '/get/' + torrent  # create torrent to send Pulsar
            if filters.verify(name[cm], size[cm]):
                    yield {"name": size[cm] + 'B - ' + name[cm] + ' - ' + settings.name_provider, "uri": torrent}  # return le torrent
                    cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('</td><td>(.*?)</td><td>(.*?)</td><td>(.*?)B</td>', data) # list the size
        cont = 0
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            name = re.search('dn=(.*?)&amp;tr=',magnet).group(1) #find name in the magnet
            name = size[cm][2] + 'B' + ' - ' + unquote_plus(name).replace('.', ' ') + ' - ' + settings.name_provider
            if filters.verify(name, size[cm][2]):
                    yield {"name": name, "uri": magnet}  # return le torrent
                    cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall(r'class=s>(.*?)B</td>', data)  # find all sizes
        cont = 0
        for cm, torrent in enumerate(re.findall(r'/torrent/(.*?)/(.*?).html', data)):
            name = torrent[1].replace('-', ' ').title()
            torrent = settings.url + '/down/' + torrent[0] + '.torrent'  # create torrent to send Pulsar
            if filters.verify(name, size[cm]):
                    yield {"name": size[cm] + 'B - ' + name + ' - ' + settings.name_provider, "uri": torrent}  # torrent
                    cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #41
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        name = re.findall(r'/.page=view&#..;tid=(.*?)>(.*?)</a></td>',data) # find all names
        size = re.findall(r'<td class="tlistsize">(.*?)</td>',data) # find all sizes
        cont = 0
        for cm, torrent in enumerate(re.findall(r'/.page=download&#..;tid=(.*?)"', data)):
            #find name in the torrent
            if re.search(r'Searching torrents',data) is not None:
                if filters.verify(name[cm][1], size[cm]):
                        yield { "name": size[cm] + ' - ' + name[cm][1] + ' - ' + settings.name_provider, "uri": settings.url + '/?page=download&tid=' + torrent}
                        cont += 1
                else:
                    provider.log.warning(filters.reason)
                if cont == settings.max_magnets:  # limit magnets
                    break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        name = re.findall(r'<div class="link"><a href=".*?">(.*?)</a>',data) # find all names
        size = re.findall(r'<div class="size" title=".*?">(.*?)</div>',data) # find all sizes
        cont = 0
        for cm, torrent in enumerate(re.findall(r'<a href="(.*?)" class="dllink">Torrent</a>', data)):
            #find name in the torrent
            nm = name[cm].replace('<wbr/>', '')
            if filters.verify(nm, size[cm]):
                yield { "name": nm + ' - ' + size[cm] + ' - ' + settings.name_provider, "uri": torrent}
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('Added'):].replace('&nbsp;', ' ')  # short the result
        size = re.findall('class="size">(.*?)<', data)  # list the size
        cont = 0
        for cm, (torrent,name) in enumerate(re.findall('/torrent/(.*?)/(.*?)"', data)):
            torrent = settings.url + '/get/' + torrent
            name = size[cm] + ' - ' + name + ' - ' + settings.name_provider #find name in the torrent
            if filters.verify(name,size[cm]):
                    yield {"name": name, "uri":  torrent }  # return le torrent
                    cont+= 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('FILENAME'):data.find('twitter')]
        size = re.findall('class="tsize">(.*?)<', data)
        cont = 0
        for cm, ntorrent in enumerate(re.findall('-tf(.*?).html">(.*?)<', data)):
            name = size[cm] + ' - ' + ntorrent[1] + ' - ' + settings.name_provider
            torrent = '%s/torrentdownload.php?id=%s' % (settings.url, ntorrent[0])
            if filters.verify(name, size[cm]):
                    yield {"name": name, "uri": torrent}  # return le torrent
                    cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #45
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        cont = 0
        results = []
        for cm, (torrent, name) in  enumerate(re.findall(r'/torrent/(.*?)/(.*?)"', data)):
            torrent = settings.url + '/get-torrent/' + torrent  # create torrent to send Pulsar
            name = name.replace('-', ' ') + ' - ' + settings.name_provider
            if filters.verify(name, None):
                    results.append({"name": name.title() , "uri": torrent})  # return le torrent
                    cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('Size:</span>(.*?)<span class="opt-desc">(.*?)<', data, re.S) # list the size
        cont = 0
        results = []
        for cm, magnet in enumerate(re.findall(r'magnet:\?[^\'"\s<>\[\]]+', data)):
            info = common.Magnet(magnet)
            name = size[cm][1] + ' - ' + info.name + ' - ' + settings.name_provider
            if filters.verify(name, size[cm][1]):
                    results.append({"name": name, "uri": magnet, "info_hash": info.hash})  # return le torrent
                    cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        cont = 0
        results = []
        for row in re.findall('<div class="torrent">(.*?)</div>', data, re.S):
            size = re.search('size">(.*?)<', row).group(1)
            name = ' - ' + size + ' - ' + re.search("title='(.*?)'", row).group(
                1) + ' - ' + settings.name_provider  #find name in the torrent
            infohash = re.search('http://(.*?)/(.*?)/', row).group(2)
            torrent = 'magnet:?xt=urn:btih:%s' % infohash
            seeds_peers = re.findall('<b>(.*?)<', row)
            seeds = seeds_peers[0]
            peers = seeds_peers[1]
            if filters.verify(name, size):
                results.append({
                    "name": name,
                    "uri": torrent,
                    "info_hash": infohash,
                    "size": common.size_int(size),
                    "seeds": int(seeds),
                    "peers": int(peers),
                    "language": settings.language
                })  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        cont = 0
        for row in re.findall('<tr class="lista2">(.*?)</tr>', data,re.S): # get each row from table
            if '/torrent/' in row:
                ntorrent = re.search('/torrent/(.*?)"', row).group(1)
                size = re.search('<td align="center"  width="100px" class="lista">(.*?)B<', row).group(1)
                name = size + 'B - ' + re.search('title="(.*?)"', row).group(1) + ' - ' + settings.name_provider
                torrent = '%s/download.php?id=%s&f=%s-[rarbg.com].torrent' % (settings.url, ntorrent, provider.quote(name))
                if filters.verify(name,size):
                        yield {"name": name, "uri": torrent}  # return le torrent
                        cont+= 1
                else:
                    provider.log.warning(filters.reason)
                if cont == settings.max_magnets:  # limit magnets
                    break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
Example #49
0
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data).replace('n/a</td></TR>',
                                               "color='#0066CC'>0< ").replace(
                                                   'n/a', " color='#00CC00'>0")
        lname = re.findall('action="http://(.*?)/(.*?)/(.*?)/(.*?)"',
                           data)  # list the name
        name = [item[3].replace('_', ' ') for item in lname]
        size = re.findall("color='#FF6600'>(.*?)<", data)  # list the size
        #seeds = re.findall("color='#00CC00'>(.*?)<", data)  # list the seeds
        #peers = re.findall("color='#0066CC'>(.*?)<", data)  # list the peers
        cont = 0
        results = []
        for cm, infohash in enumerate(re.findall('value="(.*?)"', data)):
            torrent = 'http://torcache.net/torrent/%s.torrent' % infohash
            name[cm] = size[cm] + ' - ' + name[
                cm] + ' - ' + settings.name_provider  #find name in the torrent
            if filters.verify(name[cm], size[cm]):
                results.append({
                    "name": name[cm],
                    "uri": torrent,
                    "info_hash": infohash
                })  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) +
                          ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data',
                        header=None,
                        time=5000,
                        image=settings.icon)
Example #50
0
def Browse_Itemscen(html, name, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    data = re.compile('<div class="anim"><a href="(.+?)">  ' + name +
                      '(.+?)</a>').findall(html)
    ItemCount = len(data)
    for item in data:
        strona = 'http://anime-joy.tv/' + item[0]
        name2 = name + item[1]
        ### scraper
        if (tfalse(addst("ajoy-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name2)
            try:
                if (name2 not in scrap):
                    html = nURL(strona)
                    htmlimg = GetDataBeetwenMarkers(
                        html, '<div class="animedetay">',
                        '<div class="addcolumn centerbutton">', False)[1]
                    data = re.findall('<img src="(.+?)"/>', htmlimg)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = item
                    else:
                        img = ''
                    htmlplot = GetDataBeetwenMarkers(
                        html, '<strong>Summary:</strong> </br>', '</div>',
                        False)[1]
                    ItemCount = len(htmlplot)
                    if len(data) > 0:
                        plot = clean_html(htmlplot)
                    else:
                        plot = ''
                    scraper.scraper_add(host, name2, img, plot, '')
                    scrap = scraper.scraper_check(host, name2)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


##
        pars = {
            'mode': 'Episodesjoy',
            'site': site,
            'section': section,
            'title': name2,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name2,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'animejoy':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name2
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('links-view'))
    eod()
Example #51
0
def extract_torrents(data, query):
    try:
        query_re = query.replace("+","-")
        provider.log.info('Query de busqueda : ' + query + " y query retocada : " + query_re)
        filters.information()  # Pintamos las opciones de filtrado en el log
        data = common.clean_html(data) # Elimina los comentarios que haya ('<!--(.*?)-->')
        cont = 0
        last_item = ''
        pattern = r'<a\shref=[\'"]?([^\'" >]+%s.*?").*?<span>(.*?)</span>.*?<span>(.*?[GB MB])</span>' % query_re
        provider.log.info('Patron : ' + pattern)
        datos_lista = data
        pagina = 0
        while True:
          pagina = pagina + 1
          if datos_lista=='': provider.log.info('Error. No vienen datos ' )
          for cm,(item,fecha,tam) in enumerate(re.findall(pattern, datos_lista)): #http://www.newpct1.com/descarga-torrent/peliculas/carretera-perdida-1997--en--blurayrip-ac3-5-1--peliculas
            nombre_largo = item.split("/")[4]
            provider.log.info("Nombre largo: " + nombre_largo)
            if last_item != item and nombre_largo == query.replace('+','-'):
                provider.log.info('Item url : ' + item) 
                next_url = item.replace(".com/",".com/descarga-torrent/")
                next_url = next_url.replace('"','')
                print next_url
                print next_url.split("/")
                nombre = next_url.split("/")[4]
                browser.open(next_url)
                provider.log.info('Next Url : ' + next_url)
                provider.log.info('Status of browser request : ' + str(browser.status))
                data_next = browser.content
                pattern_next = '<a href="([^"]+)" title="[^"]+" class="btn-torrent" target="_blank">'
                # Con el patron anterior obtengo <a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a>
                
                link =re.findall(pattern_next,data_next)
                link_url = link[0]
                provider.log.info('Link : ' + link_url) 
                provider.log.info('Longitud : '+ str(len(link_url)))
                provider.log.info('Ultimo caracter : ' + link_url[len(link_url)-1])
                if link_url[len(link_url)-1] <> "/": link_url = link_url + "/"
                
                partes = link_url.split("/")
                cadena = partes[ len(partes)-2 ]
                torrent = cadena.split("_")
                provider.log.info(torrent)
                provider.log.info('Torrent : ' + torrent[0])
                cadena = torrent[1].split(".")
                titulo = cadena[0]
                provider.log.info('Titulo : ' + titulo)
                titulo = titulo + ' - ' + fecha + " - " +  tam + " - " + settings.name_provider
                
                if filters.verify(titulo, None):
                    yield {"name": titulo, "uri": link_url}  # devuelve el torrent
                    cont += 1
                else:
                    provider.log.warning(filters.reason)
                if cont == settings.max_magnets:  # limit magnets
                    break  
            if nombre_largo <> query.replace('+','-'): provider.log.info('No se contabliza : nombre_largo: ' + nombre_largo + ' y query : ' + query)
            last_item = item
          # Elimina los comentarios que haya ('<!--(.*?)-->')
          if pagina == 2: break
          if "Next" in datos_lista:
            provider.log.info('Hay mas paginas. Paginamos')
            url_next_page  = re.findall('Next.*?<a href=(.*?)>',datos_lista)[0]
            url_next_page = url_next_page.replace('"',"")
            provider.log.info("Siguiente enlace de paginacion : " + url_next_page)
            browser.content = ''
            browser.open(url_next_page)
            provider.log.info('Status of browser request : ' + str(browser.status))
            if "Next" in browser.content: provider.log.info('<<<<<<<<<<<<ERORRRRR>>>>>>>>>>>>>>>>>>>>>>')
            datos_lista = ''
            datos_lista = browser.content
            datos_lista = common.clean_html(datos_lista)
            # print datos_lista
          else:
             provider.log.info('<<<<<<<<<<<<<<<<<No hay mas paginas. >>>>>>>>>>>><')
             break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
    except:
        print "Unexpected error:", sys.exc_info()
        provider.log.error('>>>>>>> ERROR parsing data from newpct1<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)