def get_restaurant_reviews(file, url): # connecting to website browser = webdriver.Chrome(driver_location) browser.get(url=url) all_reviews = [] # check if existing if scraper.has_results(browser.page_source): # skip welcome form if browser.find_element_by_id("zone_form"): welcome_form = browser.find_element_by_id("zone_form") welcome_form.submit() # check if there is next page try: while browser.find_element_by_id("nav_next_page"): page_reviews = scraper.get_page_data(browser.page_source, browser.current_url) all_reviews = all_reviews + page_reviews next_page = browser.find_element_by_id("nav_next_page") next_page.click() # no next page except: page_reviews = scraper.get_page_data(browser.page_source, browser.current_url) all_reviews = all_reviews + page_reviews print("Collected reviews for restaurant: " + url + " , now writing file: " + file) writer.write_reviews(file, all_reviews) else: print("Restaurant: " + url + " does not exist!") browser.quit() time.sleep(1) return len(all_reviews)
def get(self): return {"data": get_page_data()}