Beispiel #1
0
    def parse_news(self, response):
        title = response.css('h1[itemprop="headline"]::text').extract()[0]
        author_name = response.css('a[rel="author"] > span::text').extract()[0]
        raw_content = response.css('.content').extract()[0]

        if not (title and author_name and raw_content):
            return

        self.logger.info('parse_news: %s' % response)

        # Initialize item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)
        loader.add_value('title', title)
        loader.add_value('author_name', author_name)
        loader.add_value('raw_content', raw_content)

        # Parse date information
        try:
            # Example: Selasa,  6 Oktober 2015 - 05:23 WIB
            date_time_str = response.css('article > div.time::text').extract()[0]
            date_time_str = date_time_str.split(',')[1].strip()[:-4]
            date_time_str = ' '.join([_(w) for w in date_time_str.split(' ')])
            self.logger.info('parse_date: parse_news: date_str: %s', date_time_str)
            published_at = wib_to_utc(datetime.strptime(date_time_str, '%d %B %Y - %H:%M'))
            loader.add_value('published_at', published_at)
        except Exception as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        # Move scraped news to pipeline
        return loader.load_item()
Beispiel #2
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)
        parsed_news = json.loads(str(response.body))[0]

        # Initialize item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', parsed_news['url'])

        if not parsed_news['title']:
            # Will be dropped on the item pipeline
            return loader.load_item()
        loader.add_value('title', parsed_news['title'])

        # Convert HTML text to a scrapy response
        html_response = HtmlResponse(url=parsed_news['url'],
                body=parsed_news['content'].encode('utf-8', 'ignore'))
        xpath_query = '''
            //body/node()
                [not(descendant-or-self::comment()|
                    descendant-or-self::style|
                    descendant-or-self::script|
                    descendant-or-self::div|
                    descendant-or-self::span|
                    descendant-or-self::image|
                    descendant-or-self::img|
                    descendant-or-self::iframe
                )]
        '''
        raw_content_selectors = html_response.xpath(xpath_query)
        if not raw_content_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        raw_content = ' '.join([w.strip() for w in raw_content])
        loader.add_value('raw_content', raw_content)

        if not parsed_news['published']:
            # Will be dropped on the item pipeline
            return loader.load_item()

        # Parse date information
        # Example: 12 Oct 2016 - 05:25
        date_time_str = ' '.join([_(w) for w in parsed_news['published'].split(',')[1].strip()[:-4].split(' ')])
        try:
            published_at_wib = datetime.strptime(date_time_str,
                    '%d %b %Y - %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        if not parsed_news['author']:
            loader.add_value('author_name', '')
        else:
            loader.add_value('author_name', parsed_news['author'])

        # Move scraped news to pipeline
        return loader.load_item()
Beispiel #3
0
    def parse_news(self, response):

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css(
            'div.detail > article > div.detail_area > h1::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        # parse date
        date_selectors = response.css(
            "div.detail > article > div.detail_area > div.date::text")
        if not date_selectors:
            return loader.load_item()
        # Selasa 10 Oktober 2017, 13:40 WIB
        date_str = date_selectors.extract()[0]

        date_str = filter(None, re.split('[\s,]', date_str))[1:5]
        info_time = ' '.join([_(s) for s in date_str if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #TODO check the published_at, if it is smaller than the last time
        #we crawl, just drop the data.

        #parse author name
        author_name_selectors = response.css(
            "div.detail > article > div.detail_area > div.author > strong::text"
        )
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract_first()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css(
            "div.detail > article > div.text_detail.detail_area")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('div#mdk-news-title::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css("div.mdk-date-reporter > span::text")
        # We need to do this because sometimes selector can contains 3 or 2 elements.
        pos = len(date_selectors) - 2
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract()[pos]

        # eg: 8 September 2017 21:02
        date_str = date_str.split("|")[1].strip()
        time_arr = filter(None, re.split('[\s,|]', date_str))
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)

        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            "div.mdk-date-reporter > span::text")
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[1]
            author_name = author_name.split(":")[1].strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css("div.mdk-body-paragraph")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('div.artikel > h1.artikel::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css('div.artikel > div.tanggal::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract_first()

        # eg: Tuesday, 12 September 2017 | 20:21 WIB
        time_arr = filter(None, re.split('[\s,|]', date_str))[1:-1]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)

        if self.media['last_crawl_at'] >= published_at:
            is_no_update = True
            return loader.load_item()

        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            'div.artikel > div > p > strong::text')
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[-1].strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css('div.artikel > div > p')
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
    def parse_news(self, response):

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css(
            'div.main-container > div > section.main-content > h1.page-header::text'
        )
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css(
            'div.post-meta > div > div > div.submitted > span::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract_first()
        info_time = re.sub(r'[,-]', '', date_str)
        info_time = re.sub(r'\s+', ' ', info_time)
        time_arr = filter(None, re.split('[\s,|-]', info_time))[:4]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            return loader.load_item()

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            'div.post-meta > div > div > div.items-penulis > span > a::text'
        ).extract_first()
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css(
            'div.region.region-content > section > article > div.field.field-name-body.field-type-text-with-summary > div.field-items > div.field-item.even'
        )
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Beispiel #7
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        # Initialize item loader
        # extract news title, published_at, author, content, url
        # Required: title, raw_content, published_at
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        title_selectors = response.css('h1.detailtitle::text')
        if not title_selectors:
            # If error, drop from the item pipeline
            return loader.load_item()
        title = title_selectors.extract_first().strip()
        loader.add_value('title', title)

        # Parse date information
        date_time = response.css(
            'body > div > div.container > div.page-header > div::text'
        ).extract_first().strip()
        date_time = date_time.split(',')[-1].strip()
        date_time = ' '.join([_(w) for w in date_time.split(' ')
                              ])  # October => Oktober
        try:
            published_at_wib = datetime.strptime(date_time, '%d %B %Y %H:%M')
        except ValueError:
            # If error, drop from the item pipeline
            return loader.load_item()

        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        # If multipage
        multipage_selectors = response.css('.newsPagingWrap > a')
        if multipage_selectors:
            return self.parse_indices(multipage_selectors, loader)

        # Else if not multipage

        author_name_selectors = response.css('.newsContent > p > strong::text')
        if not author_name_selectors:
            loader.add_value('author_name', '')
        else:
            author_name = author_name_selectors.extract()[-1].strip()
            loader.add_value('author_name', author_name)

        # Extract the news content
        raw_content_selectors = response.css('.newsContent > p')
        if not raw_content_selectors:
            # Drop from the item pipeline
            return loader.load_item()

        raw_content = ' '.join(raw_content_selectors.extract())
        raw_content = raw_content.strip()
        loader.add_value('raw_content', raw_content)

        # Move scraped news to pipeline
        return loader.load_item()
Beispiel #8
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('h1::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        # parse date
        date_selectors = response.css("div.date > span::text")
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract()[0]

        # eg: Selasa, 12 Sep 2017 20:08
        date_str = date_str.split(",")[1].strip()
        time_arr = filter(None, re.split('[\s,|]', date_str))
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %b %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #TODO check the published_at, if it is smaller than the last time
        #we crawl, just drop the data.

        #parse author name
        author_name_selectors = response.css("div.date > span")[1].css(
            "span > span::text")
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract_first()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css("div.contentdetail")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Beispiel #9
0
    def parse_news(self, response):

        # self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('div.wrap-head > h2 > a::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        ##parse date
        date_selectors = response.css('div.wrap-head > span::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract()[0]

        # eg: Tuesday, 12 September 2017 | 20:21 WIB
        time_arr = filter(None, re.split('[\s,|]', date_str))[1:-1]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)

        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css('div.red::text')
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[0].strip()
            author_name = author_name.replace('Rep: ',
                                              '').replace('Red: ', '').strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css('div.content-detail')
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.view-content > div > article")

        if articles:
            for article in articles:
                url_selector = article.css(
                    "div.entry-main > h2.entry-title > a ::attr(href)")
                if not url_selector:
                    continue
                url = url_selector.extract_first()

                info_selectors = article.css(
                    "div.entry-main > div.entry-meta > span.entry-date > span::text"
                )
                if not info_selectors:
                    continue

                # 25 July, 2018 - 11:04
                info = info_selectors.extract_first()

                info_time = re.sub(r'[,-]', '', info)
                info_time = re.sub(r'\s+', ' ', info_time)
                time_arr = filter(None, re.split('[\s,|-]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                #parse date information
                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s' % e)

                #convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                print url
                print published_at_wib

                full_url = '%s%s' % (self.base_url, url)

                yield Request(url=full_url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
        else:
            next_selectors = response.css("ul.pagination > li.next")
            if next_selectors:
                next_url = next_selectors.css("a::attr(href)").extract_first()
                next_url = '%s%s' % (self.base_url, next_url)
                yield Request(next_url, callback=self.parse)
Beispiel #11
0
    def parse(self, response):
        self.logger.info('parse: {}'.format(response))
        is_no_update = False

        # Collect list of news from current page
        article_selectors = response.css('ul.indexlist > li')
        if not article_selectors:
            raise CloseSpider('article_selectors not found')
        for article in article_selectors:
            url_selectors = article.css('a::attr(href)')
            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            # Example: 7 Oktober 2016 19:37
            info_selectors = article.css('div.upperdeck::text')
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            info = info_selectors.extract()[1]
            info = info.split(',')[1].replace('\t', '').strip()
            # Example: 7 October 2016 19:37
            info_time = info.split(' ')
            info_time = ' '.join([_(s) for s in info_time])

            # Parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %B %Y %H:%M')
            except ValueError as err:
                raise CloseSpider('cannot_parse_date: {}'.format(err))
            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break
            # For each url we create new scrapy Request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        # Collect news on next page
        tag_selectors = response.css('div.pagination > a')
        if not tag_selectors:
            raise CloseSpider('tag_selectors not found')
        for tag in tag_selectors:
            more_selectors = tag.css('a::text')
            if not more_selectors:
                raise CloseSpider('more_selectors not found')
            more = more_selectors.extract()[0]
            if more == 'NEXT':
                next_page = tag.css('a::attr(href)').extract()[0]
                next_page_url = response.urljoin(next_page)
                yield Request(next_page_url, callback=self.parse)
Beispiel #12
0
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.list.media_rows.list-berita > article")
        if articles:
            for article in articles:
                # TODO handle for photo

                url_selector = article.css("a::attr(href)")
                if not url_selector:
                    continue

                url = url_selector.extract_first()
                self.logger.info(url)

                # parse date
                date_selectors = article.css("a > span.box_text > span.date::text")
                if not date_selectors:
                    continue
                date_str = date_selectors.extract_first()

                # date_str = Senin, 09 Okt 2017 16:12 WIB

                info_time = date_str.split(',')[1].strip()
                time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                try:
                    published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s in %s' % (e, url))

                # convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                # TODO
                # eg: 6 Hours ago
                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    self.logger.info('Done sending requests. No more')
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return
        else:
            if response.css("div.paging.text_center > a.last"):
                navs = response.css("div.paging.text_center > a.last")
                for nav in navs:
                    direction = nav.css("img::attr(alt)").extract_first()
                    if direction.lower() == 'kanan':
                        next_page = nav.css("a::attr(href)").extract_first()
                        yield Request(next_page, callback=self.parse)
Beispiel #13
0
    def parse_news_metro(self, response):
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        date_selector = response.css('.artikel > div.block-tanggal::text')
        if not date_selector:
            return self.parse_news_pilkada(loader, response)
        try:
            date_time_str = date_selector.extract()[0].split(',')[1].strip()[:-4]
            date_time_str = ' '.join([_(x) for x in date_time_str.split(' ')])
            published_at_wib = datetime.strptime(date_time_str, '%d %B %Y | %H:%M')
        except Exception:
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        if (self.media['last_scraped_at'] >= published_at):
            is_no_update = True
            self.logger.info('Media have no update')
            raise CloseSpider('finished')
        loader.add_value('published_at', published_at)

        title_selector = response.css('.artikel > h1::text')
        if not title_selector:
            return loader.load_item()
        loader.add_value('title', title_selector.extract()[0])

        # Select all p which don't have iframe inside it
        raw_content_selector = response.xpath('//div[@class="artikel"]//p[not(iframe)]')
        if not raw_content_selector:
            return loader.load_item()
        raw_content = ''
        for raw_content_selector_one in raw_content_selector:
            raw_content = raw_content + raw_content_selector_one.extract()

        # Go to next page while there is next page button
        next_page_selector = response.css('.pagination-nb').xpath('//a[text()="next"]/@href')
        if next_page_selector:
            return Request(next_page_selector.extract()[0], callback=lambda x, loader=loader, raw_content=raw_content: self.parse_next_page_metro(x, loader, raw_content))

        loader.add_value('raw_content', raw_content)

        # The author usually put inside <strong> tag, however, some news is not using <strong> tag.
        # NOTE: this block of code may need revision in the future
        author_name = ''
        for author_name_selector in reversed(raw_content_selector):
            author_name_selector = author_name_selector.css('strong::text')
            for tmp in reversed(author_name_selector.extract()):
                tmp = tmp.strip()
                if tmp and all((x.isalpha() and x.isupper()) or x.isspace() or x == '.' or x == '|' for x in tmp):
                    author_name = tmp
                    break
            if author_name:
                break
        author_name = ','.join(author_name.split(' | '))
        loader.add_value('author_name', author_name)
        return loader.load_item()
Beispiel #14
0
    def parse_news(self, response):

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css(
            'div.pa15.bgwhite > h1.f32.fno.crimson::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css(
            'div.pa15.bgwhite > div.mt10.mb10 > time.grey.f13.dip::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract_first()
        # eg: Kompas.com - 10/10/2017, 13:37 WIB
        info_time = date_str.split(',')[1].strip()
        time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            return loader.load_item()

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        # author_name_selectors = response.css('div.read__author > a::text').extract_first()
        # if not author_name_selectors:
        loader.add_value('author_name', 'N/A')
        # else:
        #     author_name = author_name_selectors
        #      loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css(
            'div.ptb15 > div.txt-article.mb20')
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Beispiel #15
0
    def parse(self, response):
        is_no_update = False

        articles = response.css(
            "div.left_ib_rightx > div.set_subkanal > div.txt_subkanal")
        if articles:
            for article in articles:
                # TODO handle for photo

                url_selector = article.css("h2 > a::attr(href)")
                if not url_selector:
                    continue
                url = url_selector.extract_first()

                # parse date
                date_selectors = article.css("h6::text")
                if not date_selectors:
                    continue
                date_str = date_selectors.extract_first()

                # Tuesday, 14 Aug 2018 21:37 WIB

                info_time = date_str.split(',')[1].strip()
                time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s in %s' % (e, url))

                # convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                # TODO
                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
        else:
            if response.css("div.pagination > section > nav > a"):
                links = response.css("div.pagination > section > nav > a")
                for link in links:
                    l = link.css("a::text").extract_first()
                    if l.lower() == 'next':
                        next_page = link.css("a::attr(href)").extract_first()
                        yield Request(next_page, callback=self.parse)
                    else:
                        continue
Beispiel #16
0
    def parse_news(self, response):
        self.logger.info('parse_news: {}'.format(response))

        # Init item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        title_selectors = response.css('div.content-detail > h4::text')
        if not title_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        title = title_selectors.extract()[0]
        loader.add_value('title', title)

        # Extract raw html, not the text
        raw_content_selectors = response.css('div.content-body')
        if not raw_content_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        raw_content = ' '.join([w.strip() for w in raw_content])
        raw_content = raw_content.strip()
        loader.add_value('raw_content', raw_content)

        # Example: Selasa, 11 Oktober 2016 | 10:48
        date_selectors = response.css('div.date::text')
        if not date_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        date_str = date_selectors.extract()[0]
        # Example: 11 October 2016 10:48
        date_str = re.split('[\s,|-]', date_str)
        date_str = ' '.join([_(s) for s in date_str[1:] if s])

        # Parse date information
        try:
            published_at_wib = datetime.strptime(date_str, '%d %B %Y %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        author_selectors = response.css('div.content-detail > p::text')
        if not author_selectors:
            loader.add_value('author_name', '')
        else:
            author_name = author_selectors.extract()[0]
            author_name = author_name.split('/')[0]
            loader.add_value('author_name', author_name)

        # Move scraped news to pipeline
        return loader.load_item()
Beispiel #17
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('section.main-content > h1::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css("div.submitted > span::text")
        if not date_selectors:
            return loader.load_item()
        # eg: 5 September, 2017 - 18:54
        date_str = date_selectors.extract_first()

        time_arr = filter(None, re.split('[\s,-]', date_str))
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            "div.items-penulis > span > a::text")
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[0].strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css("div.field-item.even")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Beispiel #18
0
    def parse(self, response):
        base_url = "https://m.merdeka.com"
        self.logger.info('parse: %s' % response)
        print self.media['last_crawl_at']
        is_no_update = False

        articles = response.css("div#mdk-tag-news-list_mobile > ul > li")
        if not articles:
            raise CloseSpider('articles not found')

        for article in articles:
            url_selector = article.css("div > a::attr(href)")
            if not url_selector:
                raise CloseSpider('url_selectors not found')
            url = base_url + url_selector.extract()[0]
            print url

            info_selectors = article.css("div > b.mdk-time::text")
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            #info = Jumat, 8 September 2017 16:45:19
            info = info_selectors.extract_first().replace(u'\xa0', u'')

            #info_time = 8 September 2017 16:45:19
            info_time = info.split(',')[1].strip()
            time_arr = filter(None, re.split('[\s,|]', info_time))
            info_time = ' '.join([_(s) for s in time_arr if s])
            print info_time

            #parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %B %Y %H:%M:%S')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            #convert to utc+0
            published_at = wib_to_utc(published_at_wib)

            if self.media['last_crawl_at'] >= published_at:
                is_no_update = True
                break

            yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        if response.css('div.paging-box'):
            next_page = response.css('a.link_next::attr(href)')[0].extract()
            yield Request(next_page, callback=self.parse)
Beispiel #19
0
    def parse_news(self, response):
        self.logger.info('parse_news: {}'.format(response))

        # Init item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        title_selectors = response.css('h1.title-big-detail::text')
        if not title_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        title = title_selectors.extract()[0].strip()
        loader.add_value('title', title)

        # Extract raw html, not the text
        raw_content_selectors = response.css('div.detail-content')
        if not raw_content_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        raw_content = raw_content_selectors.extract()[0]
        loader.add_value('raw_content', raw_content)

        date_selectors = response.css(
            'span.meta-author > span:nth-child(3)::text')
        if not date_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        # Example: Sabtu, 1 Oktober 2016, 15:47 WIB
        date_str = date_selectors.extract()[0].strip()
        # Example: 1 October 2016 15:47
        date_str = date_str.replace(',', '').split(' ')[1:-1]
        date_str = ' '.join([_(s) for s in date_str])
        # Parse date information
        try:
            published_at_wib = datetime.strptime(date_str, '%d %B %Y %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        author_selectors = response.css('span.meta-author > span > b::text')
        if not author_selectors:
            author_name = ''
            loader.add_value('author_name', author_name)
        else:
            author_name = author_selectors.extract()[0]
            loader.add_value('author_name', author_name)

        # Move scraped news to pipeline
        return loader.load_item()
Beispiel #20
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        # Initialize item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        title_selectors = response.css('h1[itemprop="headline"]::text')
        if not title_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        title = title_selectors.extract()[0]
        loader.add_value('title', title)

        author_name_selectors = response.css('a[rel="author"] > span::text')
        if not author_name_selectors:
            loader.add_value('author_name', '')
        else:
            author_name = author_name_selectors.extract()[0]
            loader.add_value('author_name', author_name)

        raw_content_selectors = response.css('.content')
        if not raw_content_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        raw_content = ' '.join([w.strip() for w in raw_content])
        raw_content = raw_content.strip()
        loader.add_value('raw_content', raw_content)

        date_time_str_selectors = response.css('article > div.time::text')
        if not date_time_str_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()

        # Parse date information
        # Example: Selasa,  6 Oktober 2015 - 05:23 WIB
        date_time_str = date_time_str_selectors.extract()[0]
        date_time_str = date_time_str.split(',')[1].strip()[:-4]
        date_time_str = ' '.join([_(w) for w in date_time_str.split(' ')])
        try:
            published_at_wib = datetime.strptime(date_time_str,
                                                 '%d %B %Y - %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        # Move scraped news to pipeline
        return loader.load_item()
Beispiel #21
0
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.lsi > ul#latestul > li > div")

        if articles:
            for article in articles:
                url_selector = article.css(
                    "div.m-btsquare > h3 > a::attr(href)")
                if not url_selector:
                    continue
                url = url_selector.extract_first()

                info_selectors = article.css(
                    " div.m-btsquare > div > time::text")
                if not info_selectors:
                    continue

                #info = Jumat, 17 Agustus 2018 04:43 WIB
                info = info_selectors.extract_first()

                info_time = info.split(',')[1].strip()
                time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                #parse date information
                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s' % e)

                #convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
        else:
            next_selectors = response.css("div.ma10.paging#paginga > a")
            for num in range(0, len(next_selectors)):
                if next_selectors[num].css(
                        "a::text").extract_first().lower() == 'next':
                    next_url = next_selectors[num].css(
                        "a::attr(href)").extract_first()
                    yield Request(next_url, callback=self.parse)
                    break
    def parse_news(self, response):

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('h1.read__title::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css('div.read__time::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract_first()
        # eg: Kompas.com - 10/10/2017, 13:37 WIB
        time_arr = filter(None, re.split('[\s,-]', date_str))[1:3]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d/%m/%Y %H:%M')
        except ValueError as e:
            return loader.load_item()

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            'div.read__author > a::text').extract_first()
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css('div.read__content')
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Beispiel #23
0
    def parse(self, response):
        self.logger.info('parse: {}'.format(response))
        is_no_update = False

        # beritasatu response is HTML snippet wrapped in a JSON response
        json_response = json.loads(response.body_as_unicode())
        if not 'content' in json_response:
            raise CloseSpider('json_response invalid')
        data = json_response['content']
        response = HtmlResponse(url=response.url,
                                body=data.encode('utf-8', 'ignore'))

        # Note: no next page button on beritasatu, all is loaded here
        # adjust how many links to extract from NEWS_LIMIT const
        article_selectors = response.css('div.headfi')
        if not article_selectors:
            raise CloseSpider('article_selectors not found')
        for article in article_selectors:
            url_selectors = article.css('h4 > a::attr(href)')
            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url_raw = url_selectors.extract()[0]
            url = 'http://www.beritasatu.com{}'.format(url_raw)

            # Example: Kamis, 06 Oktober 2016 | 10:11 -
            info_selectors = article.css('div.ptime > span.datep::text')
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            info = info_selectors.extract()[0]
            info_time = re.split('[\s,|-]', info)
            info_time = ' '.join([_(s) for s in info_time[1:] if s])

            # Parse date information
            try:
                # Example: 06 October 2016 10:11
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %B %Y %H:%M')
            except ValueError as err:
                raise CloseSpider('cannot_parse_date: {}'.format(err))

            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break
            # For each url we create new scrapy Request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return
Beispiel #24
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        #parse title
        title_selectors = response.css('h1.read__title::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css('div.read__date::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract()[0]

        # eg: Tuesday, 12 September 2017 | 20:21 WIB
        time_arr = filter(None, re.split('[\s,|]', date_str))[1:-1]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            'div.contentArticle.box-shadow-new > h6::text').extract_first()
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css(
            'div.contentArticle.box-shadow-new').extract()
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Beispiel #25
0
    def parse(self, response):
        # self.logger.info('parse this url: %s' % response.url)
        is_no_update = False

        articles = response.css('div.wp-terhangat')
        if not articles:
            raise CloseSpider('articles not found')

        for article in articles:
            url_selector = article.css('div.item3 > a::attr(href)')
            if not url_selector:
                continue
                # raise CloseSpider('url_selectors not found ' + response.url)
            url = url_selector.extract()[0]
            print url

            info_selectors = article.css("div.item3 > span::text")
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            #info = Tuesday, 12 September 2017
            info = info_selectors.extract_first()

            #info_time = 8 September 2017 16:45:19
            info_time = info.split(',')[1].strip()
            time_arr = filter(None, re.split('[\s,|]', info_time))
            info_time = ' '.join([_(s) for s in time_arr if s])

            #parse date information
            try:
                published_at_wib = datetime.strptime(info_time, '%d %B %Y')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            #convert to utc+0
            published_at = wib_to_utc(published_at_wib)

            if self.media['last_crawl_at'] >= published_at:
                is_no_update = True
                break

            yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        if response.css('div.pagination > section > nav > ul > li.button'):
            next_page = response.css(
                'div.pagination > section > nav > ul > li.button > a::attr(href)'
            )[0].extract()
            yield Request(next_page, callback=self.parse)
Beispiel #26
0
    def parse(self, response):
        self.logger.info('parse: {}'.format(response))
        is_no_update = False

        for article in response.css('li > div.breaking-title'):
            # http://metro.sindonews.com/read/1146316/171/penyidik-bareskrim-mulai-dalami-video-dugaan-penistaan-agama-1476179831
            url_selectors = article.css('a::attr(href)')

            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            # Example 'Kamis, 13 Oktober 2016 - 11:18 WIB'
            date_time_str_selectors = article.css('p::text')

            if not date_time_str_selectors:
                raise CloseSpider('date_time_str_selectors not found')

            date_time_str = date_time_str_selectors.extract()[0]

            # Parse date information
            # Example '13 Oktober 2016 - 11:18'
            date_time_str = date_time_str.split(',')[1].strip()[:-4]
            date_time_str = ' '.join([_(w) for w in date_time_str.split(' ')])
            try:
                published_at_wib = datetime.strptime(date_time_str,
                                                     '%d %B %Y - %H:%M')
            except Exception as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break

            # For each url we create new scrapy request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        for next_button in response.css('.mpaging > ul > li'):
            if len(next_button.css('a:not(.active) > .fa-angle-right')) > 0:
                next_page = next_button.css('a::attr(href)').extract()[0]
                next_page_url = response.urljoin(next_page)
                yield Request(next_page_url, callback=self.parse)
                break
Beispiel #27
0
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.l_content > ul > li > article")
        if articles:
            for article in articles:
                url_selector = article.css("a::attr(href)")
                if not url_selector:
                    raise CloseSpider('url_selectors not found')
                url = url_selector.extract()[0]
                # print url

                # parse date
                date_selectors = article.css("div.box_text > div.date::text")
                if not date_selectors:
                    raise CloseSpider('url_selectors not found')
                date_str = date_selectors.extract()[0]

                # date_str = Senin, 09 Okt 2017 16:12 WIB

                info_time = date_str.split(',')[1].strip()
                time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d %b %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s' % e)

                #convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                # eg: 6 Hours ago
                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        if response.css("div.paging_ > a.pn::attr(href)"):
            next_page = response.css(
                "div.paging_ > a.pn::attr(href)")[1].extract()
            yield Request(next_page, callback=self.parse)
Beispiel #28
0
    def parse(self, response):
        base_url = "http://www.pikiran-rakyat.com"
        self.logger.info('parse: %s' % response)

        articles = response.css("div.view-content > div")
        if not articles:
            raise CloseSpider('articles not found')

        for article in articles:
            url_selector = article.css("h2.entry-title > a::attr(href)")
            if not url_selector:
                continue
                raise CloseSpider('url_selectors not found')
            url = base_url + url_selector.extract()[0]

            info_selectors = article.css(
                "div.entry-meta > span.entry-date > span::text")
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            #info = 12 September, 2017 - 15:15
            info = info_selectors.extract_first()

            time_arr = filter(None, re.split('[\s,-]', info))
            info_time = ' '.join([_(s) for s in time_arr if s])

            #parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %B %Y %H:%M')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            #convert to utc+0
            published_at = wib_to_utc(published_at_wib)

            # #TODO check the last time for scrapping
            #

            yield Request(url=url, callback=self.parse_news)

        pagination = response.css('div.text-center > ul.pagination > li.next')
        if pagination:
            next_page = base_url + pagination.css(
                'a::attr(href)').extract_first()
            yield Request(next_page, callback=self.parse)
Beispiel #29
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)
        parsed_news = json.loads(str(response.body))
        parsed_news = parsed_news[0]

        # Initialize item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        if not parsed_news['title']:
            # Will be dropped on the item pipeline
            return loader.load_item()
        loader.add_value('title', parsed_news['title'])

        if not parsed_news['content']:
            # Will be dropped on the item pipeline
            return loader.load_item()
        parsed_news['content'] = re.search(r'<body>(.*)</body>', parsed_news['content'], re.S|re.I).group(1)
        parsed_news['content'] = re.sub(r'<img[^>]+\>', '', parsed_news['content'])
        loader.add_value('raw_content', parsed_news['content'])

        if not parsed_news['published']:
            # Will be dropped on the item pipeline
            return loader.load_item()

        # Parse date information
        # Example: 12 Oct 2016 - 05:25
        date_time_str = ' '.join([_(w) for w in parsed_news['published'].split(',')[1].strip()[:-4].split(' ')])
        try:
            published_at_wib = datetime.strptime(date_time_str,
                    '%d %b %Y - %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        if not parsed_news['author']:
            loader.add_value('author_name', '')
        else:
            loader.add_value('author_name', parsed_news['author'])

        # Move scraped news to pipeline
        return loader.load_item()
Beispiel #30
0
    def parse(self, response):
        self.logger.info('parse: %s' % response)
        is_no_update = False

        # Get list of news from the current page
        articles = response.css('div.article-snippet__info')
        if not articles:
            raise CloseSpider('article not found')
        for article in articles:
            # Close the spider if we don't find the list of urls
            url_selectors = article.css('a::attr(href)')
            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            info_selectors = article.css('div.article-snippet__date')
            info_selectors = info_selectors.css('.timeago::text')
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            # Example '13 Okt 2016 16:10'
            info_time = info_selectors.extract()[0]
            # Example '13 Oct 2016 16:10'
            info_time = ' '.join([_(w) for w in info_time.split(' ')])

            # Parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %b %Y %H:%M')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: {}'.format(e))

            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break

            # For each url we create new scrapy Request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return