Ejemplo n.º 1
0
def response(resp):
    results = []

    dom = html.fromstring(resp.text)

    for result in dom.xpath('//div[@class="results"]/dl'):
        name_cell = result.xpath('./dt')[0]
        title = extract_text(name_cell)

        # skip rows that do not contain a link to a torrent
        links = name_cell.xpath('./a')
        if len(links) != 1:
            continue

        # extract url and remove a slash in the beginning
        link = links[0].attrib.get('href').lstrip('/')

        seed = result.xpath('./dd/span[@class="u"]/text()')[0].replace(',', '')
        leech = result.xpath('./dd/span[@class="d"]/text()')[0].replace(
            ',', '')

        params = {
            'url': base_url + link,
            'title': title,
            'seed': int_or_zero(seed),
            'leech': int_or_zero(leech),
            'template': 'torrent.html'
        }

        # let's try to calculate the torrent size
        try:
            size_str = result.xpath('./dd/span[@class="s"]/text()')[0]
            size, suffix = size_str.split()
            params['filesize'] = int(size) * get_filesize_mul(suffix)
        except:
            pass

        # does our link contain a valid SHA1 sum?
        if re.compile('[0-9a-fA-F]{40}').match(link):
            # add a magnet link to the result
            params['magnetlink'] = 'magnet:?xt=urn:btih:' + link

        # extract and convert creation date
        try:
            date_str = result.xpath(
                './dd/span[@class="a"]/span')[0].attrib.get('title')
            # Fri, 25 Mar 2016 16:29:01
            date = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S')
            params['publishedDate'] = date
        except:
            pass

        results.append(params)

    return results
Ejemplo n.º 2
0
def response(resp):
    results = []

    dom = html.fromstring(resp.text)

    for result in dom.xpath('//div[@class="results"]/dl'):
        name_cell = result.xpath('./dt')[0]
        title = extract_text(name_cell)

        # skip rows that do not contain a link to a torrent
        links = name_cell.xpath('./a')
        if len(links) != 1:
            continue

        # extract url and remove a slash in the beginning
        link = links[0].attrib.get('href').lstrip('/')

        seed = result.xpath('./dd/span[@class="u"]/text()')[0].replace(',', '')
        leech = result.xpath('./dd/span[@class="d"]/text()')[0].replace(',', '')

        params = {
            'url': base_url + link,
            'title': title,
            'seed': int_or_zero(seed),
            'leech': int_or_zero(leech),
            'template': 'torrent.html'
        }

        # let's try to calculate the torrent size
        try:
            size_str = result.xpath('./dd/span[@class="s"]/text()')[0]
            size, suffix = size_str.split()
            params['filesize'] = int(size) * get_filesize_mul(suffix)
        except Exception as e:
            pass

        # does our link contain a valid SHA1 sum?
        if re.compile('[0-9a-fA-F]{40}').match(link):
            # add a magnet link to the result
            params['magnetlink'] = 'magnet:?xt=urn:btih:' + link

        # extract and convert creation date
        try:
            date_str = result.xpath('./dd/span[@class="a"]/span')[0].attrib.get('title')
            # Fri, 25 Mar 2016 16:29:01
            date = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S')
            params['publishedDate'] = date
        except Exception as e:
            pass

        results.append(params)

    return results
Ejemplo n.º 3
0
def response(resp):
    results = []

    dom = html.fromstring(resp.text)
    rows = dom.xpath(
        '//table[@class="listing"]//tr[contains(@class, "category_0")]')

    # check if there are no results or page layout was changed so we cannot parse it
    # currently there are two rows for each result, so total count must be even
    if len(rows) == 0 or len(rows) % 2 != 0:
        return []

    # regular expression for parsing torrent size strings
    size_re = re.compile(r'Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)

    # processing the results, two rows at a time
    for i in range(0, len(rows), 2):
        # parse the first row
        name_row = rows[i]

        links = name_row.xpath('./td[@class="desc-top"]/a')
        params = {
            'template': 'torrent.html',
            'url': links[-1].attrib.get('href'),
            'title': extract_text(links[-1])
        }
        # I have not yet seen any torrents without magnet links, but
        # it's better to be prepared to stumble upon one some day
        if len(links) == 2:
            magnet = links[0].attrib.get('href')
            if magnet.startswith('magnet'):
                # okay, we have a valid magnet link, let's add it to the result
                params['magnetlink'] = magnet

        # no more info in the first row, start parsing the second one
        info_row = rows[i + 1]
        desc = extract_text(info_row.xpath('./td[@class="desc-bot"]')[0])
        for item in desc.split('|'):
            item = item.strip()
            if item.startswith('Size:'):
                try:
                    # ('1.228', 'GB')
                    groups = size_re.match(item).groups()
                    multiplier = get_filesize_mul(groups[1])
                    params['filesize'] = int(multiplier * float(groups[0]))
                except:
                    pass
            elif item.startswith('Date:'):
                try:
                    # Date: 2016-02-21 21:44 UTC
                    date = datetime.strptime(item, 'Date: %Y-%m-%d %H:%M UTC')
                    params['publishedDate'] = date
                except:
                    pass
            elif item.startswith('Comment:'):
                params['content'] = item
        stats = info_row.xpath('./td[@class="stats"]/span')
        # has the layout not changed yet?
        if len(stats) == 3:
            params['seed'] = int_or_zero(extract_text(stats[0]))
            params['leech'] = int_or_zero(extract_text(stats[1]))

        results.append(params)

    return results
Ejemplo n.º 4
0
def response(resp):
    results = []

    dom = html.fromstring(resp.text)
    rows = dom.xpath('//table[@class="listing"]//tr[contains(@class, "category_0")]')

    # check if there are no results or page layout was changed so we cannot parse it
    # currently there are two rows for each result, so total count must be even
    if len(rows) == 0 or len(rows) % 2 != 0:
        return []

    # regular expression for parsing torrent size strings
    size_re = re.compile(r'Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)

    # processing the results, two rows at a time
    for i in xrange(0, len(rows), 2):
        # parse the first row
        name_row = rows[i]

        links = name_row.xpath('./td[@class="desc-top"]/a')
        params = {
            'template': 'torrent.html',
            'url': links[-1].attrib.get('href'),
            'title': extract_text(links[-1])
        }
        # I have not yet seen any torrents without magnet links, but
        # it's better to be prepared to stumble upon one some day
        if len(links) == 2:
            magnet = links[0].attrib.get('href')
            if magnet.startswith('magnet'):
                # okay, we have a valid magnet link, let's add it to the result
                params['magnetlink'] = magnet

        # no more info in the first row, start parsing the second one
        info_row = rows[i + 1]
        desc = extract_text(info_row.xpath('./td[@class="desc-bot"]')[0])
        for item in desc.split('|'):
            item = item.strip()
            if item.startswith('Size:'):
                try:
                    # ('1.228', 'GB')
                    groups = size_re.match(item).groups()
                    multiplier = get_filesize_mul(groups[1])
                    params['filesize'] = int(multiplier * float(groups[0]))
                except Exception as e:
                    pass
            elif item.startswith('Date:'):
                try:
                    # Date: 2016-02-21 21:44 UTC
                    date = datetime.strptime(item, 'Date: %Y-%m-%d %H:%M UTC')
                    params['publishedDate'] = date
                except Exception as e:
                    pass
            elif item.startswith('Comment:'):
                params['content'] = item
        stats = info_row.xpath('./td[@class="stats"]/span')
        # has the layout not changed yet?
        if len(stats) == 3:
            params['seed'] = int_or_zero(extract_text(stats[0]))
            params['leech'] = int_or_zero(extract_text(stats[1]))

        results.append(params)

    return results