def parse(self, response):
     news_url_list = extract_full_href_list(response.css('div.m-miM09'), response.url)
     LOGGER.info(f'scraped {len(news_url_list)} news urls from {response.url}')
     for news_url in news_url_list:
         yield response.follow(news_url, callback=self.parse_news)
     self.news_count += len(news_url_list)
     if self.news_count < self.limit:
         yield response.follow(self.build_next_url(), self.parse)
 def parse(self, response):
     news_url_list = extract_full_href_list(
         response.xpath('//section[@id="article-list"]//li'), response.url)
     LOGGER.info(
         f'scraped {len(news_url_list)} news urls from {response.url}')
     for news_url in news_url_list:
         if 'premier' not in news_url:  # ToDo: process premier article
             yield response.follow(news_url, callback=self.parse_news)
예제 #3
0
 def parse(self, response):
     news_url_list = extract_full_href_list(
         response.xpath('//section[@id="moreSectionNews"]//article'), response.url)
     LOGGER.info(f'scraped {len(news_url_list)} news urls from {response.url}')
     for news_url in news_url_list:
         yield response.follow(news_url, callback=self.parse_news)
     self.news_count += len(news_url_list)
     if self.news_count < self.limit:
         yield response.follow(self.build_next_url(), self.parse)
 def parse(self, response):
     news_url_list = extract_full_href_list(
         response.xpath('//section[@class="newslist"]').css(
             'ul.list-typeA').xpath('./li'), response.url)
     LOGGER.info(
         f'scraped {len(news_url_list)} news urls from {response.url}')
     for news_url in news_url_list:
         if 'premier' not in news_url:  # ToDo: process premier article
             yield response.follow(news_url, callback=self.parse_news)
     self.news_count += len(news_url_list)
     if self.news_count < self.limit:
         yield response.follow(self.build_next_url(), self.parse)