예제 #1
0
    def fetch_within_date_range(self, news_num, url, date_range, task, ticker):
        # if it doesn't have a single news for X consecutive days, stop iterating dates
        # set this ticker into the second-lowest priority list
        missing_days = 0
        has_content = False
        no_news_days = []
        for timestamp in date_range:
            print('trying ' + timestamp, end='\r',
                  flush=True)  # print timestamp on the same line
            new_time = timestamp[
                4:] + timestamp[:
                                4]  # change 20151231 to 12312015 to match reuters format
            soup = util.get_soup_with_repeat(url + "?date=" + new_time)
            if soup and self.parse_and_save_news(soup, task, ticker,
                                                 timestamp):
                missing_days = 0  # if get news, reset missing_days as 0
                has_content = True
            else:
                missing_days += 1

            # the more news_num, the longer we can wait
            # e.g., if news_num is 2, we can wait up to 30 days; 10 news, wait up to 70 days
            if missing_days > news_num * 5 + 20:
                # no news in X consecutive days, stop crawling
                print("%s has no news for %d days, stop this candidate ..." %
                      (ticker, missing_days))
                break
            if missing_days > 0 and missing_days % 20 == 0:
                no_news_days.append(timestamp)

        return has_content, no_news_days
 def get_news_num_whenever(self, url):
     # check the website to see if the ticker has any news
     # return the number of news
     soup = util.get_soup_with_repeat(url, repeat_times=4)
     if soup:
         return len(soup.find_all("div", {'class': ['topStory', 'feature']}))
     return 0