Пример #1
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)
        parsed_news = json.loads(str(response.body))[0]

        # Initialize item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', parsed_news['url'])

        if not parsed_news['title']:
            # Will be dropped on the item pipeline
            return loader.load_item()
        loader.add_value('title', parsed_news['title'])

        # Convert HTML text to a scrapy response
        html_response = HtmlResponse(url=parsed_news['url'],
                body=parsed_news['content'].encode('utf-8', 'ignore'))
        xpath_query = '''
            //body/node()
                [not(descendant-or-self::comment()|
                    descendant-or-self::style|
                    descendant-or-self::script|
                    descendant-or-self::div|
                    descendant-or-self::span|
                    descendant-or-self::image|
                    descendant-or-self::img|
                    descendant-or-self::iframe
                )]
        '''
        raw_content_selectors = html_response.xpath(xpath_query)
        if not raw_content_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        raw_content = ' '.join([w.strip() for w in raw_content])
        loader.add_value('raw_content', raw_content)

        if not parsed_news['published']:
            # Will be dropped on the item pipeline
            return loader.load_item()

        # Parse date information
        # Example: 12 Oct 2016 - 05:25
        date_time_str = ' '.join([_(w) for w in parsed_news['published'].split(',')[1].strip()[:-4].split(' ')])
        try:
            published_at_wib = datetime.strptime(date_time_str,
                    '%d %b %Y - %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        if not parsed_news['author']:
            loader.add_value('author_name', '')
        else:
            loader.add_value('author_name', parsed_news['author'])

        # Move scraped news to pipeline
        return loader.load_item()
Пример #2
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)
        loader = ItemLoader(item=News(), response=response)
        json_response = json.loads(response.body)

        try:
            url = json_response['NewsML']['NewsItem']['NewsComponent'][
                'NewsComponent']['NewsComponent']['NewsLines']['MoreLink']
        except KeyError:
            return loader.load_item()
        loader.add_value('url', url)

        try:
            title = json_response['NewsML']['NewsItem']['NewsComponent'][
                'NewsComponent']['NewsComponent']['NewsLines']['HeadLine']
        except KeyError:
            return loader.load_item()
        if not title:
            return loader.load_item()
        loader.add_value('title', title)

        try:
            raw_content = json_response['NewsML']['NewsItem']['NewsComponent'][
                'NewsComponent']['NewsComponent']['ContentItem'][
                    'DataContent']['nitf']['body']['body.content']['p']
        except KeyError:
            return loader.load_item()
        if not raw_content:
            return loader.load_item()
        loader.add_value('raw_content', raw_content)

        try:
            author_name = json_response['NewsML']['NewsItem']['NewsComponent'][
                'NewsComponent']['Author']
        except KeyError:
            return loader.load_item()
        if not author_name:
            loader.add_value('author_name', '')
        else:
            loader.add_value('author_name', author_name)

        try:
            date_time_str = json_response['NewsML']['NewsItem'][
                'NewsManagement']['FirstCreated']
        except KeyError:
            return loader.load_item()
        if not date_time_str:
            return loader.load_item()

        date_time_str = date_time_str.split('T')
        date_time_str[1] = '0' * (6 - len(date_time_str[1])) + date_time_str[1]
        try:
            published_at_wib = datetime.strptime(' '.join(date_time_str),
                                                 '%Y%m%d %H%M%S')
        except Exception:
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        return loader.load_item()
Пример #3
0
    def parse_news(self, article, sub_article):
        if not (sub_article['news_url'] and article['news_title'] and
                article['news_reporter'] and sub_article['news_description']
                and article['news_date_publish']):
            return

        self.logger.info('parse_news: %s' % article)

        # Example: https://m.merdeka.com/tag/p/pilgub-dki/politik/nachrowi-pastikan-agus-sylvi-tak-cuma-incar-suara-santri-ulama.html
        url = 'https://www.merdeka.com' + sub_article['news_url']

        # Initialize item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News())
        loader.add_value('url', url)
        loader.add_value('title', article['news_title'])
        loader.add_value('author_name', article['news_reporter'])
        loader.add_value('raw_content', sub_article['news_description'])

        # Parse date information
        try:
            # Example: 2016-10-12 15:16:04
            date_time_str = article['news_date_publish']
            self.logger.info('parse_date: parse_news: date_str: %s',
                             date_time_str)
            published_at = wib_to_utc(
                datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S'))
            loader.add_value('published_at', published_at)
        except Exception as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        # Move scraped news to pipeline
        return loader.load_item()
Пример #4
0
    def parse_news(self, response):
        title = response.css('h1[itemprop="headline"]::text').extract()[0]
        author_name = response.css('a[rel="author"] > span::text').extract()[0]
        raw_content = response.css('.content').extract()[0]

        if not (title and author_name and raw_content):
            return

        self.logger.info('parse_news: %s' % response)

        # Initialize item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)
        loader.add_value('title', title)
        loader.add_value('author_name', author_name)
        loader.add_value('raw_content', raw_content)

        # Parse date information
        try:
            # Example: Selasa,  6 Oktober 2015 - 05:23 WIB
            date_time_str = response.css('article > div.time::text').extract()[0]
            date_time_str = date_time_str.split(',')[1].strip()[:-4]
            date_time_str = ' '.join([_(w) for w in date_time_str.split(' ')])
            self.logger.info('parse_date: parse_news: date_str: %s', date_time_str)
            published_at = wib_to_utc(datetime.strptime(date_time_str, '%d %B %Y - %H:%M'))
            loader.add_value('published_at', published_at)
        except Exception as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        # Move scraped news to pipeline
        return loader.load_item()
Пример #5
0
    def parse(self, response):
        self.logger.info('parse: {}'.format(response))
        is_no_update = False

        # Collect list of news from current page
        articles = json.loads(response.body)['response']
        for article in articles:
            # Example: 2016-10-12 15:16:04
            date_time_str = article['news_date_publish']

            # Parse date information
            try:
                published_at_wib = datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
            except Exception as e:
                raise CloseSpider('cannot_parse_date: {}'.format(e))
            published_at = wib_to_utc(published_at_wib)

            if (self.media['last_scraped_at'] >= published_at):
                is_no_update = True
                break;

            for sub_article in article['news_content']:
                yield self.parse_news(article, sub_article)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        # Collect news on next page
        if len(articles) > 0:
            # Example: 'http://api.merdeka.com/mobile/gettag/pilgub-dki/0/20/L9pTAoWB269T&-E/'
            next_page_url = response.url.split('/')
            next_page_url[-4] = str(int(next_page_url[-4]) + 20)
            next_page_url = '/'.join(next_page_url)
            yield Request(next_page_url, callback=self.parse)
Пример #6
0
    def parse(self, response):
        self.logger.info('parse: {}'.format(response))
        is_no_update = False

        # Collect list of news from current page
        articles_grid = response.css('li:not(.last) > div.grid')
        articles = zip(articles_grid, [NEWS_GRID] * len(articles_grid))
        articles += zip(response.css('div.topic'), [NEWS_HEADLINE])

        if not articles:
            raise CloseSpider('article not found')

        for article in articles:
            # Close the spider if we don't find the list of urls
            url_selectors = None
            if article[1] == NEWS_GRID:
                url_selectors = article[0].css('h2 > a::attr(href)')
            elif article[1] == NEWS_HEADLINE:
                url_selectors = article[0].css('h1 > a::attr(href)')

            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            self.logger.info('Url: {}'.format(url))

            # Example: Minggu, 09 Oct 2016 15:14
            info_selectors = article[0].css('div.reg::text')
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            info = info_selectors.extract()[1]
            # Example: 09 Oct 2016 15:14
            info_time = info.split(',')[1].strip()

            # Parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %b %Y %H:%M')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: {}'.format(e))

            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break
            # For each url we create new scrapy request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        # Collect news on next page
        if response.css('div.bu.fr > a'):
            next_page = response.css(
                'div.bu.fr > a[rel="next"]::attr(href)').extract()[0]
            next_page_url = response.urljoin(next_page)
            yield Request(next_page_url, callback=self.parse)
Пример #7
0
    def parse_news(self, response):

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css(
            'div.detail > article > div.detail_area > h1::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        # parse date
        date_selectors = response.css(
            "div.detail > article > div.detail_area > div.date::text")
        if not date_selectors:
            return loader.load_item()
        # Selasa 10 Oktober 2017, 13:40 WIB
        date_str = date_selectors.extract()[0]

        date_str = filter(None, re.split('[\s,]', date_str))[1:5]
        info_time = ' '.join([_(s) for s in date_str if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #TODO check the published_at, if it is smaller than the last time
        #we crawl, just drop the data.

        #parse author name
        author_name_selectors = response.css(
            "div.detail > article > div.detail_area > div.author > strong::text"
        )
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract_first()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css(
            "div.detail > article > div.text_detail.detail_area")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Пример #8
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        # Initialize item loader
        # extract news title, published_at, author, content, url
        # Required: title, raw_content, published_at
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)
        title_selectors = response.css('div.detail_area > h1.jdl::text')
        if not title_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        title = title_selectors.extract()[0]
        loader.add_value('title', title)

        # Extract the content using XPath instead of CSS selector
        # We get the XPath from chrome developer tools (copy XPath)
        # or equivalent tools from other browser
        xpath_query = """
            //div[@class="text_detail detail_area"]/node()
                [not(self::comment()|self::script|self::div)]
        """
        raw_content_selectors = response.xpath(xpath_query)
        if not raw_content_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        raw_content = ' '.join(raw_content_selectors.extract())
        raw_content = raw_content.strip()
        loader.add_value('raw_content', raw_content)

        # Parse date information
        # Example: Kamis 15 Sep 2016, 18:33 WIB
        date_selectors = response.css('div.detail_area > div.date::text')
        if not date_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()

        date_str = date_selectors.extract()[0]
        # Example: '15 Sep 2016, 18:33'
        date_str = ' '.join(date_str.split(' ')[1:5])
        try:
            published_at_wib = datetime.strptime(date_str, '%d %b %Y, %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()

        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        author_name_selectors = response.css('div.author > strong::text')
        if not author_name_selectors:
            loader.add_value('author_name', '')
        else:
            author_name = author_name_selectors.extract()[0]
            loader.add_value('author_name', author_name)

        # Move scraped news to pipeline
        return loader.load_item()
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('div.artikel > h1.artikel::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css('div.artikel > div.tanggal::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract_first()

        # eg: Tuesday, 12 September 2017 | 20:21 WIB
        time_arr = filter(None, re.split('[\s,|]', date_str))[1:-1]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)

        if self.media['last_crawl_at'] >= published_at:
            is_no_update = True
            return loader.load_item()

        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            'div.artikel > div > p > strong::text')
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[-1].strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css('div.artikel > div > p')
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Пример #10
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        # Initialize item loader
        # extract news title, published_at, author, content, url
        # Required: title, raw_content, published_at
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        title_selectors = response.css('h1.detailtitle::text')
        if not title_selectors:
            # If error, drop from the item pipeline
            return loader.load_item()
        title = title_selectors.extract_first().strip()
        loader.add_value('title', title)

        # Parse date information
        date_time = response.css(
            'body > div > div.container > div.page-header > div::text'
        ).extract_first().strip()
        date_time = date_time.split(',')[-1].strip()
        date_time = ' '.join([_(w) for w in date_time.split(' ')
                              ])  # October => Oktober
        try:
            published_at_wib = datetime.strptime(date_time, '%d %B %Y %H:%M')
        except ValueError:
            # If error, drop from the item pipeline
            return loader.load_item()

        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        # If multipage
        multipage_selectors = response.css('.newsPagingWrap > a')
        if multipage_selectors:
            return self.parse_indices(multipage_selectors, loader)

        # Else if not multipage

        author_name_selectors = response.css('.newsContent > p > strong::text')
        if not author_name_selectors:
            loader.add_value('author_name', '')
        else:
            author_name = author_name_selectors.extract()[-1].strip()
            loader.add_value('author_name', author_name)

        # Extract the news content
        raw_content_selectors = response.css('.newsContent > p')
        if not raw_content_selectors:
            # Drop from the item pipeline
            return loader.load_item()

        raw_content = ' '.join(raw_content_selectors.extract())
        raw_content = raw_content.strip()
        loader.add_value('raw_content', raw_content)

        # Move scraped news to pipeline
        return loader.load_item()
    def parse_news(self, response):

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css(
            'div.main-container > div > section.main-content > h1.page-header::text'
        )
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css(
            'div.post-meta > div > div > div.submitted > span::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract_first()
        info_time = re.sub(r'[,-]', '', date_str)
        info_time = re.sub(r'\s+', ' ', info_time)
        time_arr = filter(None, re.split('[\s,|-]', info_time))[:4]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            return loader.load_item()

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            'div.post-meta > div > div > div.items-penulis > span > a::text'
        ).extract_first()
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css(
            'div.region.region-content > section > article > div.field.field-name-body.field-type-text-with-summary > div.field-items > div.field-item.even'
        )
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('div#mdk-news-title::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css("div.mdk-date-reporter > span::text")
        # We need to do this because sometimes selector can contains 3 or 2 elements.
        pos = len(date_selectors) - 2
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract()[pos]

        # eg: 8 September 2017 21:02
        date_str = date_str.split("|")[1].strip()
        time_arr = filter(None, re.split('[\s,|]', date_str))
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)

        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            "div.mdk-date-reporter > span::text")
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[1]
            author_name = author_name.split(":")[1].strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css("div.mdk-body-paragraph")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Пример #13
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('h1::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        # parse date
        date_selectors = response.css("div.date > span::text")
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract()[0]

        # eg: Selasa, 12 Sep 2017 20:08
        date_str = date_str.split(",")[1].strip()
        time_arr = filter(None, re.split('[\s,|]', date_str))
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %b %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #TODO check the published_at, if it is smaller than the last time
        #we crawl, just drop the data.

        #parse author name
        author_name_selectors = response.css("div.date > span")[1].css(
            "span > span::text")
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract_first()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css("div.contentdetail")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.view-content > div > article")

        if articles:
            for article in articles:
                url_selector = article.css(
                    "div.entry-main > h2.entry-title > a ::attr(href)")
                if not url_selector:
                    continue
                url = url_selector.extract_first()

                info_selectors = article.css(
                    "div.entry-main > div.entry-meta > span.entry-date > span::text"
                )
                if not info_selectors:
                    continue

                # 25 July, 2018 - 11:04
                info = info_selectors.extract_first()

                info_time = re.sub(r'[,-]', '', info)
                info_time = re.sub(r'\s+', ' ', info_time)
                time_arr = filter(None, re.split('[\s,|-]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                #parse date information
                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s' % e)

                #convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                print url
                print published_at_wib

                full_url = '%s%s' % (self.base_url, url)

                yield Request(url=full_url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
        else:
            next_selectors = response.css("ul.pagination > li.next")
            if next_selectors:
                next_url = next_selectors.css("a::attr(href)").extract_first()
                next_url = '%s%s' % (self.base_url, next_url)
                yield Request(next_url, callback=self.parse)
Пример #15
0
    def parse_news(self, response):

        # self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('div.wrap-head > h2 > a::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        ##parse date
        date_selectors = response.css('div.wrap-head > span::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract()[0]

        # eg: Tuesday, 12 September 2017 | 20:21 WIB
        time_arr = filter(None, re.split('[\s,|]', date_str))[1:-1]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)

        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css('div.red::text')
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[0].strip()
            author_name = author_name.replace('Rep: ',
                                              '').replace('Red: ', '').strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css('div.content-detail')
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Пример #16
0
 def convert_date(self, idn_date):
     # Example Rabu, 12 Oktober 2016 | 10:23 WIB
     info_time = idn_date.split(',')[1].strip().split('|')
     info_date = info_time[0].strip().split(' ')
     info_hours = info_time[1].strip().split(' ')[0].strip()
     day = info_date[0]
     month = self.translator.translate('idn')[info_date[1]]
     year = info_date[2]
     formatted_date = day + ' ' + month + ' ' + year + ', ' + info_hours
     return wib_to_utc(datetime.strptime(formatted_date, '%d %B %Y, %H:%M'))
Пример #17
0
    def parse(self, response):
        self.logger.info('parse: %s' % response)
        is_no_update = False

        # Get list of news from the current page
        articles = response.css('article > div > div.post-content')

        if not articles:
            raise CloseSpider('article not found')
        for article in articles:
            # Close the spider if we don't find the list of urls
            url_selectors = article.css('a.timestamp-link::attr(href)')
            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            # Example 'Sabtu, November 19, 2016'
            date_selectors = article.css('a.timestamp-link > abbr::text')
            if not date_selectors:
                raise CloseSpider('date_selectors not found')

            # Parse date information
            try:
                date = date_selectors.extract()[0].split(' ')
                # Sanitize month - Indo month to Eng month
                # Example: Nov 19 2016
                date[1] = sanitize(date[1])
                published_at_wib = datetime.strptime(' '.join(date[1:]),
                                                     '%b %d, %Y')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break

            # For each url we create new scrapy request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        # try getting next page
        if len(articles) > 0:
            try:
                yield Request(
                    'http://www.nusanews.co/search/label/Pilkada?updated-max='
                    + str(published_at_wib).replace(' ', 'T') +
                    '%2B07:00&max-results=20',
                    callback=self.parse)
            except Exception as e:
                pass
Пример #18
0
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.list.media_rows.list-berita > article")
        if articles:
            for article in articles:
                # TODO handle for photo

                url_selector = article.css("a::attr(href)")
                if not url_selector:
                    continue

                url = url_selector.extract_first()
                self.logger.info(url)

                # parse date
                date_selectors = article.css("a > span.box_text > span.date::text")
                if not date_selectors:
                    continue
                date_str = date_selectors.extract_first()

                # date_str = Senin, 09 Okt 2017 16:12 WIB

                info_time = date_str.split(',')[1].strip()
                time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                try:
                    published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s in %s' % (e, url))

                # convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                # TODO
                # eg: 6 Hours ago
                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    self.logger.info('Done sending requests. No more')
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return
        else:
            if response.css("div.paging.text_center > a.last"):
                navs = response.css("div.paging.text_center > a.last")
                for nav in navs:
                    direction = nav.css("img::attr(alt)").extract_first()
                    if direction.lower() == 'kanan':
                        next_page = nav.css("a::attr(href)").extract_first()
                        yield Request(next_page, callback=self.parse)
Пример #19
0
    def parse_news_metro(self, response):
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        date_selector = response.css('.artikel > div.block-tanggal::text')
        if not date_selector:
            return self.parse_news_pilkada(loader, response)
        try:
            date_time_str = date_selector.extract()[0].split(',')[1].strip()[:-4]
            date_time_str = ' '.join([_(x) for x in date_time_str.split(' ')])
            published_at_wib = datetime.strptime(date_time_str, '%d %B %Y | %H:%M')
        except Exception:
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        if (self.media['last_scraped_at'] >= published_at):
            is_no_update = True
            self.logger.info('Media have no update')
            raise CloseSpider('finished')
        loader.add_value('published_at', published_at)

        title_selector = response.css('.artikel > h1::text')
        if not title_selector:
            return loader.load_item()
        loader.add_value('title', title_selector.extract()[0])

        # Select all p which don't have iframe inside it
        raw_content_selector = response.xpath('//div[@class="artikel"]//p[not(iframe)]')
        if not raw_content_selector:
            return loader.load_item()
        raw_content = ''
        for raw_content_selector_one in raw_content_selector:
            raw_content = raw_content + raw_content_selector_one.extract()

        # Go to next page while there is next page button
        next_page_selector = response.css('.pagination-nb').xpath('//a[text()="next"]/@href')
        if next_page_selector:
            return Request(next_page_selector.extract()[0], callback=lambda x, loader=loader, raw_content=raw_content: self.parse_next_page_metro(x, loader, raw_content))

        loader.add_value('raw_content', raw_content)

        # The author usually put inside <strong> tag, however, some news is not using <strong> tag.
        # NOTE: this block of code may need revision in the future
        author_name = ''
        for author_name_selector in reversed(raw_content_selector):
            author_name_selector = author_name_selector.css('strong::text')
            for tmp in reversed(author_name_selector.extract()):
                tmp = tmp.strip()
                if tmp and all((x.isalpha() and x.isupper()) or x.isspace() or x == '.' or x == '|' for x in tmp):
                    author_name = tmp
                    break
            if author_name:
                break
        author_name = ','.join(author_name.split(' | '))
        loader.add_value('author_name', author_name)
        return loader.load_item()
Пример #20
0
    def parse(self, response):
        self.logger.info('parse: {}'.format(response))
        is_no_update = False

        # Collect list of news from current page
        article_selectors = response.css('ul.indexlist > li')
        if not article_selectors:
            raise CloseSpider('article_selectors not found')
        for article in article_selectors:
            url_selectors = article.css('a::attr(href)')
            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            # Example: 7 Oktober 2016 19:37
            info_selectors = article.css('div.upperdeck::text')
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            info = info_selectors.extract()[1]
            info = info.split(',')[1].replace('\t', '').strip()
            # Example: 7 October 2016 19:37
            info_time = info.split(' ')
            info_time = ' '.join([_(s) for s in info_time])

            # Parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %B %Y %H:%M')
            except ValueError as err:
                raise CloseSpider('cannot_parse_date: {}'.format(err))
            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break
            # For each url we create new scrapy Request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        # Collect news on next page
        tag_selectors = response.css('div.pagination > a')
        if not tag_selectors:
            raise CloseSpider('tag_selectors not found')
        for tag in tag_selectors:
            more_selectors = tag.css('a::text')
            if not more_selectors:
                raise CloseSpider('more_selectors not found')
            more = more_selectors.extract()[0]
            if more == 'NEXT':
                next_page = tag.css('a::attr(href)').extract()[0]
                next_page_url = response.urljoin(next_page)
                yield Request(next_page_url, callback=self.parse)
Пример #21
0
    def parse(self, response):
        is_no_update = False

        articles = response.css(
            "div.left_ib_rightx > div.set_subkanal > div.txt_subkanal")
        if articles:
            for article in articles:
                # TODO handle for photo

                url_selector = article.css("h2 > a::attr(href)")
                if not url_selector:
                    continue
                url = url_selector.extract_first()

                # parse date
                date_selectors = article.css("h6::text")
                if not date_selectors:
                    continue
                date_str = date_selectors.extract_first()

                # Tuesday, 14 Aug 2018 21:37 WIB

                info_time = date_str.split(',')[1].strip()
                time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s in %s' % (e, url))

                # convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                # TODO
                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
        else:
            if response.css("div.pagination > section > nav > a"):
                links = response.css("div.pagination > section > nav > a")
                for link in links:
                    l = link.css("a::text").extract_first()
                    if l.lower() == 'next':
                        next_page = link.css("a::attr(href)").extract_first()
                        yield Request(next_page, callback=self.parse)
                    else:
                        continue
Пример #22
0
    def parse_news(self, response):
        self.logger.info('parse_news: {}'.format(response))

        # Init item loader
        # extract news title, published_at, author, content, url
        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        title_selectors = response.css('div.content-detail > h4::text')
        if not title_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        title = title_selectors.extract()[0]
        loader.add_value('title', title)

        # Extract raw html, not the text
        raw_content_selectors = response.css('div.content-body')
        if not raw_content_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        raw_content = ' '.join([w.strip() for w in raw_content])
        raw_content = raw_content.strip()
        loader.add_value('raw_content', raw_content)

        # Example: Selasa, 11 Oktober 2016 | 10:48
        date_selectors = response.css('div.date::text')
        if not date_selectors:
            # Will be dropped on the item pipeline
            return loader.load_item()
        date_str = date_selectors.extract()[0]
        # Example: 11 October 2016 10:48
        date_str = re.split('[\s,|-]', date_str)
        date_str = ' '.join([_(s) for s in date_str[1:] if s])

        # Parse date information
        try:
            published_at_wib = datetime.strptime(date_str, '%d %B %Y %H:%M')
        except ValueError:
            # Will be dropped on the item pipeline
            return loader.load_item()
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        author_selectors = response.css('div.content-detail > p::text')
        if not author_selectors:
            loader.add_value('author_name', '')
        else:
            author_name = author_selectors.extract()[0]
            author_name = author_name.split('/')[0]
            loader.add_value('author_name', author_name)

        # Move scraped news to pipeline
        return loader.load_item()
Пример #23
0
    def extract_date_from_url(self, url):
        # date equals to '2017 01 31'
        date = ' '.join(url.split('/')[-5:-2])
        try:
            published_at_wib = datetime.strptime(date, '%Y %m %d')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        return published_at
Пример #24
0
    def parse(self, response):
        self.logger.info('parse: %s' % response)
        is_no_update = False

        # Get list of news from the current page
        articles = response.css('article')
        if not articles:
            raise CloseSpider('article not found')
        for article in articles:
            # Close the spider if we don't find the list of urls
            url_selectors = article.css('a::attr(href)')
            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            # Example 'detikNews | Sabtu 08 Oct 2016, 14:54 WIB'
            info_selectors = article.css('a > .text > span.info::text')
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            info = info_selectors.extract()[0]
            # Example 'Sabtu 08 Oct 2016, 14:54 WIB'
            info_time = info.split('|')[1].strip()
            # Example '08 Oct 2016, 14:54'
            info_time = ' '.join(info_time.split(' ')[1:5])

            # Parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %b %Y, %H:%M')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break

            # For each url we create new scrapy request
            yield Request(url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        if response.css('a.btn_more'):
            next_page = response.css('a.btn_more::attr(href)')[0].extract()
            next_page_url = response.urljoin(next_page)
            yield Request(next_page_url, callback=self.parse)
        elif response.css('div.pag-nextprev > a'):
            next_page = response.css(
                'div.pag-nextprev > a::attr(href)')[1].extract()
            next_page_url = response.urljoin(next_page)
            yield scrapy.Request(next_page_url, callback=self.parse)
Пример #25
0
    def parse(self, response):
        self.logger.info('parse: %s' % response)
        is_no_update = False

        # Get list of news from the current page
        articles = response.css('li.media')
        if not articles:
            raise CloseSpider('article not found')
        for article in articles:
            # Close the spider if we don't find the list of urls
            url_selectors = article.css('a::attr(href)')
            if not url_selectors:
                raise CloseSpider('url_selectors not found')
            url = url_selectors.extract()[0]

            # Example '02 November 2016'
            date_selectors = article.css('time::text')
            if not date_selectors:
                raise CloseSpider('date_selectors not found')

            # Parse date information
            try:
                date = date_selectors.extract()[0].split(' ')
                # Sanitize month - Indo month to Eng month
                # Example: 02 Nov 2016
                date[1] = sanitize(date[1])
                published_at_wib = datetime.strptime(' '.join(date),
                                                     '%d %b %Y')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            published_at = wib_to_utc(published_at_wib)

            if self.media['last_scraped_at'] >= published_at:
                is_no_update = True
                break

            # For each url we create new scrapy request
            yield Request('http:' + url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        # try getting next page
        try:
            next_page_url = response.xpath(
                '//section[@class="pagination-numeric"]/span/a/@href'
            )[-1].extract()

            if next_page_url and next_page_url != response.url:
                yield Request(next_page_url, callback=self.parse)
        except:
            pass
Пример #26
0
    def parse_news(self, response):

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css(
            'div.pa15.bgwhite > h1.f32.fno.crimson::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css(
            'div.pa15.bgwhite > div.mt10.mb10 > time.grey.f13.dip::text')
        if not date_selectors:
            return loader.load_item()
        date_str = date_selectors.extract_first()
        # eg: Kompas.com - 10/10/2017, 13:37 WIB
        info_time = date_str.split(',')[1].strip()
        time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            return loader.load_item()

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        # author_name_selectors = response.css('div.read__author > a::text').extract_first()
        # if not author_name_selectors:
        loader.add_value('author_name', 'N/A')
        # else:
        #     author_name = author_name_selectors
        #      loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css(
            'div.ptb15 > div.txt-article.mb20')
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract_first()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Пример #27
0
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.article__list.clearfix")

        if articles:
            for article in articles:
                url_selector = article.css(
                    "div.article__list__title > h3 > a::attr(href)")
                if not url_selector:
                    continue
                url = url_selector.extract_first()

                info_selectors = article.css(
                    "div.article__list__info > div.article__date::text")
                if not info_selectors:
                    continue

                #info = 10/10/2017, 13:37 WIB
                info = info_selectors.extract_first()

                time_arr = filter(None, re.split('[\s,]', info))[:2]
                info_time = ' '.join([s for s in time_arr if s])
                # print info_time
                #parse date information
                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d/%m/%Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s' % e)

                #convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
        else:
            next_selectors = response.css(
                "div.paging__wrap > div.paging__item > a.paging__link.paging__link--next"
            )
            for num in range(0, len(next_selectors)):
                if next_selectors[num].css(
                        "a::attr(rel)").extract_first() == 'next':
                    next_url = next_selectors[num].css(
                        "a::attr(href)").extract_first()
                    yield Request(next_url, callback=self.parse)
                    break
Пример #28
0
    def parse_news(self, response):
        self.logger.info('parse_news: %s' % response)

        loader = ItemLoader(item=News(), response=response)
        loader.add_value('url', response.url)

        loader.add_value('media_id', self.media_id)
        loader.add_value('election_id', self.election_id)

        #parse title
        title_selectors = response.css('section.main-content > h1::text')
        if not title_selectors:
            return loader.load_item()
        title = title_selectors.extract_first()
        loader.add_value('title', title)

        #parse date
        date_selectors = response.css("div.submitted > span::text")
        if not date_selectors:
            return loader.load_item()
        # eg: 5 September, 2017 - 18:54
        date_str = date_selectors.extract_first()

        time_arr = filter(None, re.split('[\s,-]', date_str))
        info_time = ' '.join([_(s) for s in time_arr if s])

        #parse date information
        try:
            published_at_wib = datetime.strptime(info_time, '%d %B %Y %H:%M')
        except ValueError as e:
            raise CloseSpider('cannot_parse_date: %s' % e)

        #convert to utc+0
        published_at = wib_to_utc(published_at_wib)
        loader.add_value('published_at', published_at)

        #parse author name
        author_name_selectors = response.css(
            "div.items-penulis > span > a::text")
        if not author_name_selectors:
            loader.add_value('author_name', 'N/A')
        else:
            author_name = author_name_selectors.extract()[0].strip()
            loader.add_value('author_name', author_name)

        #parse raw content
        raw_content_selectors = response.css("div.field-item.even")
        if not raw_content_selectors:
            return loader.load_item()
        raw_content = raw_content_selectors.extract()
        loader.add_value('raw_content', raw_content)

        return loader.load_item()
Пример #29
0
    def parse(self, response):
        is_no_update = False

        articles = response.css("div.lsi > ul#latestul > li > div")

        if articles:
            for article in articles:
                url_selector = article.css(
                    "div.m-btsquare > h3 > a::attr(href)")
                if not url_selector:
                    continue
                url = url_selector.extract_first()

                info_selectors = article.css(
                    " div.m-btsquare > div > time::text")
                if not info_selectors:
                    continue

                #info = Jumat, 17 Agustus 2018 04:43 WIB
                info = info_selectors.extract_first()

                info_time = info.split(',')[1].strip()
                time_arr = filter(None, re.split('[\s,|]', info_time))[:4]
                info_time = ' '.join([_(s) for s in time_arr if s])

                #parse date information
                try:
                    published_at_wib = datetime.strptime(
                        info_time, '%d %B %Y %H:%M')
                except ValueError as e:
                    raise CloseSpider('cannot_parse_date: %s' % e)

                #convert to utc+0
                published_at = wib_to_utc(published_at_wib)

                if self.media['last_crawl_at'] >= published_at:
                    is_no_update = True
                    break

                yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
        else:
            next_selectors = response.css("div.ma10.paging#paginga > a")
            for num in range(0, len(next_selectors)):
                if next_selectors[num].css(
                        "a::text").extract_first().lower() == 'next':
                    next_url = next_selectors[num].css(
                        "a::attr(href)").extract_first()
                    yield Request(next_url, callback=self.parse)
                    break
Пример #30
0
    def parse(self, response):
        base_url = "https://m.merdeka.com"
        self.logger.info('parse: %s' % response)
        print self.media['last_crawl_at']
        is_no_update = False

        articles = response.css("div#mdk-tag-news-list_mobile > ul > li")
        if not articles:
            raise CloseSpider('articles not found')

        for article in articles:
            url_selector = article.css("div > a::attr(href)")
            if not url_selector:
                raise CloseSpider('url_selectors not found')
            url = base_url + url_selector.extract()[0]
            print url

            info_selectors = article.css("div > b.mdk-time::text")
            if not info_selectors:
                raise CloseSpider('info_selectors not found')
            #info = Jumat, 8 September 2017 16:45:19
            info = info_selectors.extract_first().replace(u'\xa0', u'')

            #info_time = 8 September 2017 16:45:19
            info_time = info.split(',')[1].strip()
            time_arr = filter(None, re.split('[\s,|]', info_time))
            info_time = ' '.join([_(s) for s in time_arr if s])
            print info_time

            #parse date information
            try:
                published_at_wib = datetime.strptime(info_time,
                                                     '%d %B %Y %H:%M:%S')
            except ValueError as e:
                raise CloseSpider('cannot_parse_date: %s' % e)

            #convert to utc+0
            published_at = wib_to_utc(published_at_wib)

            if self.media['last_crawl_at'] >= published_at:
                is_no_update = True
                break

            yield Request(url=url, callback=self.parse_news)

        if is_no_update:
            self.logger.info('Media have no update')
            return

        if response.css('div.paging-box'):
            next_page = response.css('a.link_next::attr(href)')[0].extract()
            yield Request(next_page, callback=self.parse)