Exemplo n.º 1
0
def test_robot_file_url():
    scraper = SimpleWebScraper()
    url = "https://en.wikipedia.org/wiki/Tomato"
    assert scraper.get_robot_url(url) == "https://en.wikipedia.org/robots.txt"

    url = "https://scikit-learn.org/stable/documentation.html"
    assert scraper.get_robot_url(url) == "https://scikit-learn.org/robots.txt"
Exemplo n.º 2
0
def test_web_scraper_fetch():
    scraper = SimpleWebScraper()
    headers, body = scraper.fetch("https://fr.wikipedia.org/wiki/Tomate")
    assert isinstance(headers, dict)
    assert isinstance(body, bytes)
    assert headers['Content-Type'] == "text/html; charset=UTF-8"

    article = WikipediaArticle(body, encoding="utf-8")
    expected_link = "https://en.wikipedia.org/wiki/Tomato"
    assert expected_link in article.get_language_links()

    main_text = article.get_main_text()
    assert main_text.startswith("Solanum lycopersicum\n\nLe plant de tomates")
Exemplo n.º 3
0
def test_web_scraper_fetch_and_save(tmpdir):
    scraper = SimpleWebScraper(output_folder=tmpdir)
    result_folder = scraper.fetch_and_save(
        "https://fr.wikipedia.org/wiki/Pomme_de_terre")

    result_folder == tmpdir / "fr.wikipedia.org" / "wiki" / "Pomme_de_terre"
    with open(result_folder / "headers.json") as f:
        headers = json.load(f)
    assert headers['Content-Type'] == "text/html; charset=UTF-8"

    body = (result_folder / "body").read_bytes()
    article = WikipediaArticle(body)
    expected_link = "https://en.wikipedia.org/wiki/Potato"
    assert expected_link in article.get_language_links()
    assert article.get_main_text().startswith(
        "Solanum tuberosum\n\nLa pomme de terre, ou patate[1]")
Exemplo n.º 4
0
def test_web_scraper_robots_file_handling():
    scraper = SimpleWebScraper()
    assert scraper.can_fetch("https://en.wikipedia.org/wiki/Tomato")
    assert not scraper.can_fetch("https://en.wikipedia.org/api/")
    assert not scraper.can_fetch("https://en.wikipedia.org/wiki/Special:")