Exemple #1
0
def test_scrape_no_reviews():
    """
    Tests that the scrape function works for page with no reviews
    """
    scraper = EverydayHealthScraper()
    scraper.scrape('https://www.everydayhealth.com/drugs/triprolidine/reviews')
    assert len(scraper.reviews) == 0
Exemple #2
0
def test_scrape_empty_reviews():
    """
    Tests to make sure that the scrape function would discard the reviews
    of a scraper object that already has data collected in 'reviews'
    """
    scraper = EverydayHealthScraper()
    scraper.scrape('https://www.everydayhealth.com/drugs/phenadoz/reviews')
    num_reviews = len(scraper.reviews)
    scraper.scrape('https://www.everydayhealth.com/drugs/phenadoz/reviews')
    assert num_reviews == len(scraper.reviews)
Exemple #3
0
def test_scrape_with_parameters():
    """
    Tests that, when calling the scrape function with a scraper of non-default parameters, the
    correct types of data are stored in the 'reviews' attribute
    """
    scraper = EverydayHealthScraper(collect_urls=True)
    scraper.scrape('https://www.everydayhealth.com/drugs/gabapentin/reviews')
    assert len(scraper.reviews) > 20
    data_collected = list(scraper.reviews[0].keys())
    assert len(data_collected) == 5
    assert 'url' in data_collected
Exemple #4
0
def test_scrape_correct_review_data():
    """
    Tests to make sure that the last review in the scraped reviews list has
    the correct data when the scrape function is called
    (this data is from the oldest review of the drug)
    """
    scraper = EverydayHealthScraper(collect_urls=True)
    scraper.scrape(
        'https://www.everydayhealth.com/drugs/ciclopirox-topical/reviews')
    assert scraper.reviews[-1]['comment'][:10] == 'After OVER'
    assert scraper.reviews[-1]['comment'][-10:] == 'inally hav'
    assert scraper.reviews[-1]['rating'] == 5
    assert scraper.reviews[-1]['date'] == '5/22/2015 4:18:19 AM'
Exemple #5
0
def test_scrape_default_parameter():
    """
    Tests that, when calling the scrape function with a scraper with default parameters,
    the correct types of data are stored in the 'reviews' attribute and that the
    correct number of reviews are collected (more than 20, this proves that it's
    scraping multiple pages)
    """
    scraper = EverydayHealthScraper()
    scraper.scrape('https://www.everydayhealth.com/drugs/gabapentin/reviews')
    assert len(scraper.reviews) > 20
    data_collected = list(scraper.reviews[0].keys())
    assert len(data_collected) == 4
    assert 'comment' in data_collected
    assert 'rating' in data_collected
    assert 'date' in data_collected
    assert 'drug' in data_collected
Exemple #6
0
def test_scrape_invalid_url_no_title():
    """
    Tests that when the scrape function is called on a url that lacks a title
    (invalid url), it raises an AttributeError and returns 0
    """
    scraper = EverydayHealthScraper()
    returned = scraper.scrape('https://www.everydayhealth.com/drugs/')
    assert returned == 0
Exemple #7
0
def test_everydayhealth_scrape():
    """Test everydayhealth scrape"""
    input_url = 'https://www.everydayhealth.com/drugs/citalopram/reviews'
    everydayhealth_scraper = EverydayHealthScraper()
    review_list = everydayhealth_scraper.scrape(input_url)
    assert len(review_list) > 5
    keys = list(review_list[-1].keys())
    assert 'comment' in keys
    assert 'rating' in keys
Exemple #8
0
def test_scrape_assert_title_error():
    """
    Tests that when the scrape function is called with an invalid url that does have a
    title, but the title is wrong (doesn't have the phrase 'Drug Reviews') that an AssertionError
    is raised and the function returns 0
    """
    scraper = EverydayHealthScraper()
    returned = scraper.scrape('https://www.everydayhealth.com/drugs/')
    assert returned == 0
def main():
    scraper = WebMDScraper(
    )  # non funziona DrugsScraper(), non funziona DrugRatingzScraper(), or EverydayHealthScraper()
    url = ""
    json_aggregrationReviews = {"website": "webmd.com"}
    json_aggregrationReviews["ratingSystem"] = "stars"
    json_aggregrationReviews["itemsNamesAggregration"] = input_list
    reviewsAggregrate = []
    for i in range(len(input_list)):
        json_reviews = {"name": input_list[i]}
        try:
            url = scraper.get_url(input_list[i])  # or any other drug name
            scraper.scrape(url)
            dataframe_reviews = pd.DataFrame.from_dict(scraper.reviews)
            json_reviews["averageEffectiveness"] = round(
                pd.DataFrame.from_records(
                    dataframe_reviews["rating"])["effectiveness"].mean(), 1)
            json_reviews["averageEaseOfUse"] = round(
                pd.DataFrame.from_records(
                    dataframe_reviews["rating"])["ease of use"].mean(), 1)
            json_reviews["averageSatisfaction"] = round(
                pd.DataFrame.from_records(
                    dataframe_reviews["rating"])["satisfaction"].mean(), 1)
            json_reviews["minRating"] = round(
                pd.DataFrame.from_records(
                    dataframe_reviews["rating"])["satisfaction"].min(), 1)
            json_reviews["maxRating"] = round(
                pd.DataFrame.from_records(
                    dataframe_reviews["rating"])["satisfaction"].max(), 1)
            json_reviews["reviews"] = scraper.reviews
        except:
            print("Could not get " + input_list[i] + " from webmd website")
            webmd_names_errors.append(input_list[i])
        reviewsAggregrate.append(json_reviews)
    json_aggregrationReviews["aggregrateReviews"] = reviewsAggregrate

    with open("webmdresult.json", "w") as f:
        obj = json.dumps(json_aggregrationReviews, indent=4)
        f.write(obj)

    scraper2 = EverydayHealthScraper()
    json_aggregrationReviews = {"website": "everydayhealth.com"}
    json_aggregrationReviews["ratingSystem"] = "stars"
    json_aggregrationReviews["itemsNamesAggregration"] = input_list
    reviewsAggregrate = []
    for i in range(len(input_list)):
        json_reviews = {"name": input_list[i]}
        try:
            url = scraper2.get_url("Adderall")
            print(url)
            scraper2.scrape(url)
            dataframe_reviews = pd.DataFrame.from_dict(scraper2.reviews)
            json_reviews["averageRating"] = round(
                dataframe_reviews["rating"].mean(), 1)
            json_reviews["minRating"] = round(
                dataframe_reviews["rating"].min(), 1)
            json_reviews["maxRating"] = round(
                dataframe_reviews["rating"].max(), 1)
            json_reviews["reviews"] = scraper2.reviews
        except:
            print("Could not get " + input_list[i] +
                  " from everydayhealthscraper website ")
            everydayhealth_names_errors.append(input_list[i])
        reviewsAggregrate.append(json_reviews)

    json_aggregrationReviews["aggregrateReviews"] = reviewsAggregrate

    with open("everydayhealth.json", "w") as f:
        obj = json.dumps(json_aggregrationReviews, indent=4)
        f.write(obj)

    if (len(webmd_names_errors) != 0):
        print("I could not get from webmd " + str(webmd_names_errors))

    if (len(everydayhealth_names_errors) != 0):
        print("I could not get from everydayhealth " +
              str(everydayhealth_names_errors))