예제 #1
0
파일: main.py 프로젝트: Inter95/tutvguia
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        data = data[data.find('<div id="sz">') :]
        page = re.findall('a href="(.*?)"', data)  # list the size
        cont = 0
        results = []
        if len(page) > 0:
            for cm, size in enumerate(re.findall('valign="middle" nowrap="nowrap">(.*?)<', data)):
                torrent = common.getlinks(settings.url + page[cm])
                name = page[cm].replace("-", " ").replace("/", "").replace(".html", "")
                name = size + " - " + name + " - " + settings.name_provider
                if filters.verify(name, size):
                    results.append({"name": name, "uri": torrent})  # return le torrent
                    cont += 1
                else:
                    provider.log.warning(filters.reason)
                if cont == settings.max_magnets:  # limit magnets
                    break
            provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<")
        return results
    except:
        provider.log.error(">>>>>>>ERROR parsing data<<<<<<<")
        provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
        return []
예제 #2
0
파일: main.py 프로젝트: Inter95/tutvguia
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        table = common.table(data, 1)
        cont = 0
        results = []
        for cm, (page, name) in enumerate(re.findall('/torrent/(.*?)">(.*?)</a>', data, re.S)):
            size = table[cm][5]
            page = settings.url + "/torrent/" + page
            torrent = common.getlinks(page)
            name = size + ' - ' + name.replace('\n', '') + ' - ' + settings.name_provider
            if filters.verify(name, size):
                results.append({"name": name, "uri": torrent})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Pulsar<<<<<<<')
        return results
    except:
        provider.log.error('>>>>>>>ERROR parsing data<<<<<<<')
        provider.notify(message='ERROR parsing data', header=None, time=5000, image=settings.icon)
        return []
예제 #3
0
def extract_torrents(data):
    filters.information()  # print filters settings
    sint = common.ignore_exception(ValueError)(int)
    results = []
    cont = 0
    if data is not None:
        soup = BeautifulSoup(data, 'html5lib')
        links = soup.select('ul.clearfix li')
        for link in links:
            a = link.div.select('a')[1]
            name = a.text  # name
            page = settings.value["url_address"] + a["href"]  # page
            size = link.find('div', class_="coll-4").text  # size
            seeds = link.find('div', class_="coll-2").text  # seeds
            peers = link.find('div', class_="coll-3").text  # peers
            # info_magnet = common.Magnet(magnet)
            if filters.verify(name, size):
                magnet = common.getlinks(page)  # magnet
                cont += 1
                results.append({"name": name.strip(),
                                "uri": magnet,
                                # "info_hash": info_magnet.hash,
                                "size": size.strip(),
                                "seeds": sint(seeds),
                                "peers": sint(peers),
                                "language": settings.value.get("language", "en"),
                                "provider": settings.name,
                                "icon": settings.icon,
                                })  # return the torrent
                if cont >= int(settings.value.get("max_magnets", 10)):  # limit magnets
                    break
            else:
                provider.log.warning(filters.reason)
    provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Quasar<<<<<<<')
    return results
예제 #4
0
파일: main.py 프로젝트: Inter95/tutvguia
def extract_torrents(data):
    try:
        filters.information()  # print filters settings
        data = common.clean_html(data)
        size = re.findall('<div class="coll-4"><span>(.*?)<', data)  # list the size
        # seeds = re.findall('green center">(.*?)<', data)  # list the seeds
        # peers = re.findall('red lasttd center">(.*?)<', data)  # list the peers
        cont = 0
        results = []
        for cm, (page, name) in enumerate(re.findall('/torrent/(.*?)">(.*?)</a>', data)):
            page = settings.url + "/torrent/" + page
            torrent = common.getlinks(page)
            name = name.replace("<b>", "").replace("</b>", "").replace("<strong>", "").replace("</strong>", "")
            name = size[cm] + " - " + name + " - " + settings.name_provider
            if filters.verify(name, size[cm]):
                results.append({"name": name, "uri": torrent})  # return le torrent
                cont += 1
            else:
                provider.log.warning(filters.reason)
            if cont == settings.max_magnets:  # limit magnets
                break
        provider.log.info(">>>>>>" + str(cont) + " torrents sent to Pulsar<<<<<<<")
        return results
    except:
        provider.log.error(">>>>>>>ERROR parsing data<<<<<<<")
        provider.notify(message="ERROR parsing data", header=None, time=5000, image=settings.icon)
        return []
def extract_torrents(data):
    filters.information()  # print filters settings
    sint = common.ignore_exception(ValueError)(int)
    results = []
    cont = 0
    if data is not None:
        soup = BeautifulSoup(data, 'html5lib')
        links = soup.find('table', class_="tl").findAll('tr')
        for link in links:
            columns = link.findAll('td')
            if len(columns) == 7:
                name = columns[2].text.strip()  # name
                magnet = settings.value["url_address"] + columns[2].a["href"]  # magnet
                size = columns[3].text.strip()  # size
                seeds = columns[4].text  # seeds
                peers = columns[5].text  # peers
                size = common.Filtering.normalize(size)
                # info_magnet = common.Magnet(magnet)
                if filters.verify(name, size):
                    cont += 1
                    magnet = common.getlinks(magnet)
                    results.append({"name": name,
                                    "uri": magnet,
                                    # "info_hash": info_magnet.hash,
                                    "size": size,
                                    "seeds": sint(seeds),
                                    "peers": sint(peers),
                                    "language": settings.value.get("language", "en"),
                                    "provider": settings.name,
                                    "icon": settings.icon,
                                    })  # return the torrent
                    if cont >= int(settings.value.get("max_magnets", 10)):  # limit magnets
                        break
                else:
                    provider.log.warning(filters.reason)
    provider.log.info('>>>>>>' + str(cont) + ' torrents sent to Quasar<<<<<<<')
    return results