def extract_search_results(html: str,
                           page_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//*[@role="navigation"]//tr/td[@class="YyVfkd"]/text()'))
    results = [{
        'url':
        extract_first(result.xpath("./a/@href")),
        'title':
        join_all(result.xpath("./a//div[@role='heading']/text()")),
        'preview_text':
        join_all(result.xpath('.//div[@class="Y3v8qd"]/text()')),
        'publisher':
        extract_first(result.xpath('.//div[@class="XTjFC WF4CUc"]/text()')),
        'publish_date':
        publish_time(
            extract_first(
                result.xpath('.//span[@class="WG9SHc"]/span/text()'))),
        'page_number':
        page_number,
    } for result in root.xpath('//div[@class="dbsr"]')]
    next_page_url = extract_first(root.xpath('//a[@id="pnnext"]/@href'))
    if next_page_url:
        next_page_url = 'https://www.google.com' + next_page_url
    return results, next_page_url
Beispiel #2
0
async def extract_search_results(
        html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//*[@role="navigation"]//tr/td[@class="YyVfkd"]/text()'))
    results = [{
        'url':
        extract_first(result.xpath("./a/@href")),
        'title':
        join_all(result.xpath("./a/h3/text()")),
        'preview_text':
        join_all(
            result.xpath(
                "./following-sibling::*[@class='s']//*[@class='st']//test()")),
        'search_url':
        search_url,
        'page_number':
        page_number,
    } for result in root.xpath('//*[@class="r"]')]
    print(f"Extracted {len(results)} results from page {page_number}.")
    next_page_url = extract_first(root.xpath('//a[@id="pnnext"]/@href'))
    if next_page_url:
        next_page_url = 'https://www.google.com' + next_page_url
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
Beispiel #3
0
async def extract_search_results(
        html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//div[@class="compPagination"]/strong/text()'))
    results = []
    for result in root.xpath('//ol[contains(@class,"searchCenterMiddle")]/li'):
        url = extract_first(
            result.xpath(".//h3[contains(@class,'title')]//a/@href"))
        if url and 'news.search.yahoo' not in url and 'video.search.yahoo' not in url:
            results.append({
                'url':
                url,
                'title':
                join_all(
                    result.xpath(
                        ".//h3[contains(@class,'title')]//a//text()")),
                'preview_text':
                join_all(
                    result.xpath(".//div[@class='compText aAbs']//text()")),
                'search_url':
                search_url,
                'page_number':
                page_number,
            })
    print(f"Extracted {len(results)} results from page {page_number}.")
    next_page_url = extract_first(root.xpath("//*[@class='next']/@href"))
    if next_page_url:
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
Beispiel #4
0
async def extract_search_results(
        html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//a[@class="sb_pagS sb_pagS_bp b_widePag sb_bp"]/text()'))
    results = [{
        'url':
        extract_first(result.xpath("./h2/a/@href")),
        'title':
        join_all(result.xpath("./h2/a//text()")),
        'preview_text':
        join_all(result.xpath("./*[@class='b_caption']/p//text()")),
        'search_url':
        search_url,
        'page_number':
        page_number,
    } for result in root.xpath("//*[@class='b_algo']")]
    print(f"Extracted {len(results)} results from page {page_number}.")
    # extract url of next page.
    next_page_url = extract_first(root.xpath("//a[@title='Next page']/@href"))
    if next_page_url:
        next_page_url = 'https://www.bing.com' + next_page_url
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
Beispiel #5
0
def extract_search_results(html: str,
                           page_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//div[@class="compPagination"]//strong/text()'))
    results = []
    for result in root.xpath('//ol[contains(@class,"searchCenterMiddle")]/li'):
        url = extract_first(
            result.xpath(".//h3[contains(@class,'title')]//a/@href"))
        if url and 'news.search.yahoo' not in url and 'video.search.yahoo' not in url:
            results.append({
                'url':
                url,
                'title':
                join_all(
                    result.xpath(
                        ".//h3[contains(@class,'title')]//a//text()")),
                'preview_text':
                join_all(
                    result.xpath(".//div[@class='compText aAbs']//text()"))
                or join_all(
                    result.xpath(".//span[@class=' fc-falcon']//text()")),
                'page_number':
                page_number,
            })
    next_page_url = extract_first(root.xpath("//*[@class='next']/@href"))
    return results, next_page_url
def extract_search_results(html: str,
                           page_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    results = []
    params = dict(parse_qs(urlsplit(page_url).query))
    current_page = str(int(((int(params["first"][0]) - 1) / 10) + 1))

    for result in root.xpath(
            '//div[@class="news-card newsitem cardcommon b_cards2"]'):
        pub_time = extract_first(
            result.xpath('.//div[@class="source"]//span/@aria-label'))
        publish_date = re.search(r'\d{1,2}\/\d{1,2}\/\d{4}', pub_time)
        results.append({
            'url':
            extract_first(result.xpath('.//a[@class="title"]/@href')),
            'title':
            extract_first(result.xpath('.//a[@class="title"]/text()')),
            'preview_text':
            extract_first(result.xpath('.//div[@class="snippet"]/@title')),
            'publisher':
            extract_first(
                result.xpath(
                    './/div[@class="source"]//a[@aria-label]/text()')),
            'publish_date':
            publish_date.group() if publish_date else publish_time(pub_time),
            'page_number':
            current_page,
        })

    next_page_url = f'https://www.bing.com/news/infinitescrollajax?q={quote(params["q"][0])}&InfiniteScroll=1&first=' + str(
        int(params["first"][0]) + 10)

    return results, next_page_url
def extract_search_results(html: str, page_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(root.xpath(
        '//li[@class="PartialWebPagination-condensed PartialWebPagination-pgsel PartialWebPagination-button"]/text()'))
    results = [
        {
            'url': extract_first(result.xpath('.//a[@class="PartialSearchResults-item-title-link result-link"]/@href')),
            'title': extract_first(result.xpath('.//a[@class="PartialSearchResults-item-title-link result-link"]/text()')),
            'preview_text': extract_first(result.xpath('.//p[@class="PartialSearchResults-item-abstract"]/text()')),
            'page_number': page_number or "1",
        } for result in root.xpath('//div[@class="PartialSearchResults-item"]')]
    next_page_url = extract_first(
        root.xpath('//li[@class="PartialWebPagination-next"]/a/@href'))
    if next_page_url:
        next_page_url = 'https://www.ask.com' + next_page_url
    return results, next_page_url
def extract_search_results(html: str,
                           page_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//div[@class="compPagination"]//strong/text()'))
    results = [{
        'url':
        extract_first(result.xpath('.//a[@title]/@href')),
        'title':
        extract_first(result.xpath('.//a[@title]/@title')),
        'preview_text':
        join_all(result.xpath(".//p[@class='s-desc']//text()")),
        'publisher':
        extract_first(
            result.xpath('.//span[contains(@class,"mr-5 cite-co")]/text()')),
        'publish_date':
        publish_time(
            extract_first(
                result.xpath('.//span[contains(@class,"s-time")]/text()'))),
        'page_number':
        page_number,
    } for result in root.xpath(
        '//ol[contains(@class,"searchCenterMiddle")]/li')]
    next_page_url = extract_first(root.xpath('//a[@class="next"]/@href'))

    return results, next_page_url
Beispiel #9
0
async def extract_search_results(html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(root.xpath(
        '//span[@class="pagination__num pagination__num--active"]/text()'))
    results = [
        {
            'url': extract_first(result.xpath('.//a[@class="web-bing__title"]/@href')),
            'title': extract_first(result.xpath('.//a[@class="web-bing__title"]/text()')),
            'preview_text': extract_first(result.xpath('.//span[@class="web-bing__description"]/text()')),
            'search_url': search_url,
            'page_number': page_number,
        } for result in root.xpath('//div[@class="web-bing__result"]')]
    print(
        f"Extracted {len(results)} results from page {page_number}.")
    next_page_url = extract_first(root.xpath(
        '//a[@class="pagination__num pagination__num--next-prev pagination__num--next"]/@href'))
    if next_page_url:
        next_page_url = 'https://www.dogpile.com' + next_page_url
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
Beispiel #10
0
async def extract_search_results(html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(root.xpath(
        '//*[@role="navigation"]//tr/td[@class="YyVfkd"]/text()'))
    results = [
        {
            'url': extract_first(result.xpath("./a/@href")),
            'title': join_all(result.xpath("./a//div[@role='heading']/text()")),
            'preview_text': join_all(result.xpath(".//div[@class='dbsr']/a//div[@class='eYN3rb']/text()")),
            'publisher': extract_first(result.xpath('./a//div[@class="pDavDe RGRr8e"]/text()')),
            'publish_date': publish_date_from_time(extract_first(result.xpath('.//span[@class="eNg7of"]/span/text()'))),
            'search_url': search_url,
            'page_number': page_number,
        } for result in root.xpath('//div[@class="dbsr"]')]
    print(
        f"Extracted {len(results)} results from page {page_number}.")
    next_page_url = extract_first(root.xpath('//a[@id="pnnext"]/@href'))
    if next_page_url:
        next_page_url = 'https://www.google.com' + next_page_url
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
Beispiel #11
0
async def extract_search_results(
        html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath(
            '//li[@class="PartialWebPagination-condensed PartialWebPagination-pgsel PartialWebPagination-button"]/text()'
        ))
    results = [{
        'url':
        extract_first(
            result.xpath(
                './/a[@class="PartialSearchResults-item-title-link result-link"]/@href'
            )),
        'title':
        extract_first(
            result.xpath(
                './/a[@class="PartialSearchResults-item-title-link result-link"]/text()'
            )),
        'preview_text':
        extract_first(
            result.xpath(
                './/p[@class="PartialSearchResults-item-abstract"]/text()')),
        'search_url':
        search_url,
        'page_number':
        page_number if page_number else "1",
    } for result in root.xpath('//div[@class="PartialSearchResults-item"]')]
    print(f"Extracted {len(results)} results from page {page_number}.")
    next_page_url = extract_first(
        root.xpath('//li[@class="PartialWebPagination-next"]/parent::a/@href'))
    if next_page_url:
        next_page_url = 'https://www.ask.com' + next_page_url
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
def extract_search_results(html: str,
                           page_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//*[@role="navigation"]//tr/td[@class="YyVfkd"]/text()'))
    results = []
    for result in root.xpath('//*[@class="g"]'):
        url_ele = result.xpath(".//a[@data-ved][@ping]")
        if len(url_ele):
            url_ele = url_ele[0]
            results.append({
                'url':
                extract_first(url_ele.xpath('./@href')),
                'title':
                extract_first(url_ele.xpath('./h3/text()')),
                'preview_text':
                join_all(result.xpath('.//span[@class="aCOpRe"]//text()')),
                'page_number':
                page_number,
            })
    next_page_url = extract_first(root.xpath('//a[@id="pnnext"]/@href'))
    if next_page_url:
        next_page_url = 'https://www.google.com' + next_page_url
    return results, next_page_url
Beispiel #13
0
def extract_search_results(html: str,
                           page_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath(
            '//span[@class="pagination__num pagination__num--active"]/text()'))
    results = [{
        'url':
        extract_first(result.xpath('.//a[@class="web-bing__title"]/@href')),
        'title':
        join_all(result.xpath('.//a[@class="web-bing__title"]//text()')),
        'preview_text':
        join_all(
            result.xpath('.//span[@class="web-bing__description"]//text()')),
        'page_number':
        page_number,
    } for result in root.xpath('//div[@class="web-bing__result"]')]
    next_page_url = extract_first(
        root.xpath(
            '//a[@class="pagination__num pagination__num--next-prev pagination__num--next"]/@href'
        ))
    if next_page_url:
        next_page_url = 'https://www.dogpile.com' + next_page_url
    return results, next_page_url
Beispiel #14
0
async def extract_search_results(
        html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//a[@class="sb_pagS sb_pagS_bp b_widePag sb_bp"]/text()'))
    results = []
    for r in root.xpath(
            '//div[@class="news-card newsitem cardcommon b_cards2"]'):
        publish_time = extract_first(
            r.xpath('.//div[@class="source"]//span/@aria-label'))
        publish_date = re.search(r'\d{1,2}\/\d{1,2}\/\d{4}', publish_time)
        results.append({
            'url':
            extract_first(r.xpath('.//a[@class="title"]/@href')),
            'title':
            extract_first(r.xpath('.//a[@class="title"]/text()')),
            'preview_text':
            extract_first(r.xpath('.//div[@class="snippet"]/@title')),
            'publisher':
            extract_first(
                r.xpath('.//div[@class="source"]//a[@aria-label]/text()')),
            'publish_date':
            publish_date.group()
            if publish_date else publish_date_from_time(publish_time),
            'search_url':
            search_url,
            'page_number':
            page_number,
        })
    next_page_url = extract_first(root.xpath("//a[@title='Next page']/@href"))
    if next_page_url:
        next_page_url = 'https://www.bing.com' + next_page_url
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
Beispiel #15
0
async def extract_search_results(
        html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath(
            '//span[@class="pagination__num pagination__num--active"]/text()'))
    results = []
    for result in root.xpath('//p[@class="article"]'):
        pushlish_info = extract_first(
            result.xpath('.//*[@class="source"]/text()')).split(",")
        publisher = pushlish_info.pop(0).strip() if len(pushlish_info) else ""
        publish_date = pushlish_info.pop(0).strip() if len(
            pushlish_info) else ""
        results.append({
            'url':
            extract_first(result.xpath('./a/@href')),
            'title':
            extract_first(result.xpath('./a/*[@class="title"]/text()')),
            'preview_text':
            extract_first(result.xpath('./span[@class="description"]/text()')),
            'publisher':
            publisher,
            'publish_date':
            publish_date,
            'search_url':
            search_url,
            'page_number':
            page_number,
        })
    print(f"Extracted {len(results)} results from page {page_number}.")
    next_page_url = extract_first(
        root.xpath(
            '//a[@class="pagination__num pagination__num--next-prev pagination__num--next"]/@href'
        ))
    if next_page_url:
        next_page_url = 'https://www.dogpile.com' + next_page_url
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url
Beispiel #16
0
async def extract_search_results(
        html: str, search_url: str) -> Tuple[List[Dict[str, str]], str]:
    root = fromstring(html)
    page_number = extract_first(
        root.xpath('//div[@class="compPagination"]//strong/text()'))
    results = [{
        'url':
        extract_first(result.xpath('.//a[@title]/@href')),
        'title':
        extract_first(result.xpath('.//a[@title]/@title')),
        'preview_text':
        join_all(result.xpath("//div[@class='compText aAbs']//text()")),
        'publisher':
        extract_first(
            result.xpath(
                './/a[@title]/following-sibling::span[@class="mr-5 cite-co"]/text()'
            )),
        'publish_date':
        publish_date_from_time(
            extract_first(
                result.xpath(
                    './/a[@title]/following-sibling::span[@class="fc-2nd mr-8"]/text()'
                ))),
        'search_url':
        search_url,
        'page_number':
        page_number,
    } for result in root.xpath(
        '//ol[contains(@class,"searchCenterMiddle")]/li')]
    print(f"Extracted {len(results)} results from page {page_number}.")
    next_page_url = extract_first(root.xpath('//a[@class="next"]/@href'))
    if next_page_url:
        print(f"Extracted next page url: {next_page_url}")
    else:
        print(f"No next page url found: {search_url}")
    return results, next_page_url