コード例 #1
0
    def scrape_minutes_activities_urls(self, response):
        date_time, meeting_name = None, None
        for row in response.xpath('//div[@id="library"]/table//tr'):
            tds = row.xpath('./td')
            term = tds[1].xpath('.//text()').get()
            desc = tds[3].xpath('.//text()').get().split()[0]
            if term == '開会日':
                date_time = datetime.strptime(desc, '%Y年%m月%d日')
            if term == '会議名':
                meeting_name = self.get_full_meeting_name(desc)
        if not (date_time and meeting_name):
            msg = f'failed to extract minutes detail: date_time={date_time}, meeting_name={meeting_name}'
            raise ValueError(msg)

        minutes = build_minutes(self.house_name + meeting_name, date_time)
        tables = response.xpath('//div[@id="library2"]/table')
        topics = self.scrape_table(tables[0])
        if topics:
            LOGGER.debug(f'scraped topics={topics}')
            minutes.topics = topics
        speakers = self.scrape_table(tables[2])
        if speakers:
            LOGGER.debug(f'scraped speakers={speakers}')
            minutes.speakers = speakers  # this field won't be written to GraphQL directly

        activity_list, url_list = self.build_activities_and_urls(
            tables.xpath('.//a'), minutes, response.url)
        url = build_url(response.url, UrlTitle.SHINGI_TYUKEI, self.domain)
        url.to_id = minutes.id
        url_list.append(url)

        return minutes, activity_list, url_list
コード例 #2
0
    def parse_minutes(self, response):
        # merge url if exists
        maybe_href = extract_full_href_or_none(response.xpath('//h4'),
                                               response.url)
        if not maybe_href:
            LOGGER.warning(f'failed to find url in {response.url}')
            return
        url = build_url(maybe_href,
                        title=UrlTitle.GAIYOU_PDF,
                        domain=self.domain)
        self.gql_client.merge(url)
        LOGGER.debug(f'merged {url.id}')

        # link to minutes
        title = extract_text(response.xpath('//title'))
        committee_name = response.meta['committee_name']
        date_time = self.extract_datetime_from_title(title)
        minutes = build_minutes(committee_name, date_time)
        try:
            self.gql_client.get(minutes.id,
                                ['id'])  # minutes should already exist
            self.gql_client.link(url.id, minutes.id)
        except GraphQLException:
            LOGGER.warning(
                f'failed to find minutes ({committee_name}, {date_time})')
コード例 #3
0
    def scrape_minutes_activities_urls(self, response):
        date_time, meeting_name = None, None
        for row in response.xpath('//div[@id="library"]/table//tr'):
            tds = row.xpath('./td')
            term = tds[1].xpath('.//text()').get()
            desc = tds[3].xpath('.//text()').get().split()[0]
            if term == '開会日':
                date_time = extract_datetime(desc)
            if term == '会議名':
                meeting_name = self.get_full_meeting_name(desc)
        if not (date_time and meeting_name):
            msg = f'failed to extract minutes detail: date_time={date_time}, meeting_name={meeting_name}'
            raise ValueError(msg)

        minutes = build_minutes(self.house_name + meeting_name, date_time)
        tables = response.xpath('//div[@id="library2"]/table')
        topics = self.scrape_table(tables[0])
        if topics:
            LOGGER.debug(f'scraped topics={topics}')
            minutes.topics = topics
            minutes.topic_ids = self.get_topic_ids(topics)
        speakers = self.scrape_table(tables[2], first_section_only=True)
        speakers = deduplicate(speakers)
        if speakers:
            LOGGER.debug(f'scraped speakers={speakers}')
            minutes.speakers = speakers
            minutes.speaker_ids = self.get_speakers_ids(speakers)

        activity_list, url_list = self.build_activities_and_urls(tables.xpath('.//a'), minutes, response.url)
        url = build_url(response.url, UrlTitle.SHINGI_TYUKEI, self.domain)
        url.to_id = minutes.id
        url_list.append(url)

        return minutes, activity_list, url_list
コード例 #4
0
    def test_scrape_bill_actions(self):
        speeches = [
            'これより会議を始めます',
            '法律案Aと法律案Bを一括して議題とします',
            '趣旨説明は既に聴取しておりますので、質疑に入ります',  # 2
            '法律案Aと法律案Bの質疑を終わります',
            '法律案Cを議題とします',
            '採決に入ります',
            '法律案Dを議題とします',
            '趣旨説明お願いします',  # 7
            '採決に入ります',  # 8
            'お疲れ様でした',
        ]
        speech_recs = [
            self.build_rec(speech, i) for i, speech in enumerate(speeches)
        ]
        minutes = build_minutes('猫ちゃん会議', datetime(2021, 1, 1))
        minutes.topics = ['法律案A', '法律案B', '法律案C', '法律案D']
        minutes.topic_ids = ['Bill:A', 'Bill:B', 'Bill:D']
        bill_id2name = {'Bill:A': '法律案A', 'Bill:B': '法律案B', 'Bill:D': '法律案D'}

        bill_actions = MinutesSpider.scrape_bill_actions(
            speech_recs, minutes, bill_id2name)
        assert len(bill_actions) == 4
        self.assert_bill_action('Bill:A', minutes.id, 2,
                                BillActionType.QUESTION, bill_actions[0])
        self.assert_bill_action('Bill:B', minutes.id, 2,
                                BillActionType.QUESTION, bill_actions[1])
        self.assert_bill_action('Bill:D', minutes.id, 7,
                                BillActionType.BILL_EXPLANATION,
                                bill_actions[2])
        self.assert_bill_action('Bill:D', minutes.id, 8, BillActionType.VOTE,
                                bill_actions[3])
コード例 #5
0
 def parse(self, response):
     page_title = response.xpath('//title/text()').get()
     house_name, meeting_name, date_time = self.parse_page_title(page_title)
     minutes = build_minutes(house_name + meeting_name, date_time)
     url = build_url(response.url, UrlTitle.VRSDD, self.domain)
     LOGGER.info(f'found url for minutes: {minutes}, {url}')
     try:
         # do not merge minutes because this is unofficial data source
         self.delete_old_urls(minutes.id, url.title)
         self.gql_client.merge(url)
         self.gql_client.link(url.id, minutes.id)
     except GraphQLException as e:  # expected when official minutes does not exist yet
         LOGGER.warning(e)
     if self.next_id < self.last_id:
         yield response.follow(self.build_next_url(), callback=self.parse)
コード例 #6
0
    def scrape_minutes_activities_speeches_urls(self, response_body):
        minutes_lst, activity_lst, speech_lst, url_lst = [], [], [], []

        for meeting_rec in response_body['meetingRecord']:
            try:
                minutes = build_minutes(
                    meeting_rec['nameOfHouse'] + meeting_rec['nameOfMeeting'],
                    datetime.strptime(meeting_rec['date'], '%Y-%m-%d'))
                minutes.ndl_min_id = meeting_rec['issueID']
                topics = extract_topics(
                    meeting_rec['speechRecord'][0]['speech'])
                if topics:
                    minutes.topics = topics
            except ValueError as e:
                LOGGER.warning(f'failed to parse minutes: {e}')
                continue
            minutes_lst.append(minutes)

            url = build_url(meeting_rec['meetingURL'], UrlTitle.HONBUN,
                            self.domain)
            url.to_id = minutes.id
            url_lst.append(url)

            speakers = set()
            for speech_rec in meeting_rec['speechRecord']:
                speaker = speech_rec['speaker']
                speech = build_speech(minutes.id,
                                      int(speech_rec['speechOrder']))
                speech.speaker_name = speaker
                if self.collect_speech:
                    speech_lst.append(speech)
                if speaker not in speakers:
                    speakers.add(speaker)
                    try:
                        member = self.member_finder.find_one(speaker)
                    except Exception:
                        pass
                    else:
                        activity = build_minutes_activity(
                            member.id, minutes.id, minutes.start_date_time)
                        url = build_url(speech_rec['speechURL'],
                                        UrlTitle.HONBUN, self.domain)
                        url.to_id = activity.id
                        activity_lst.append(activity)
                        url_lst.append(url)

        return minutes_lst, activity_lst, speech_lst, url_lst
コード例 #7
0
    def scrape_minutes_activities_urls(self, response):
        content = response.xpath('//div[@id="detail-contents-inner"]')
        if not content:
            content = response.xpath('//div[@id="detail-contents-inner2"]')
        date_time, meeting_name = None, None
        for dl in content.xpath('//dl'):
            term = dl.xpath('./dt/text()').get()
            desc = dl.xpath('./dd/text()').get()
            if term == '開会日':
                date_time = extract_datetime(desc)
            elif term == '会議名':
                meeting_name = desc.replace('、', '')
        if not (date_time and meeting_name):
            msg = f'failed to extract minutes detail: date_time={date_time}, meeting_name={meeting_name}'
            raise ValueError(msg)

        minutes = build_minutes(self.house_name + meeting_name, date_time)
        summary = ''.join(
            map(lambda x: x.strip(),
                content.xpath('./span/text()').getall()))
        if summary:
            minutes.summary = summary
        topics = content.xpath('./ul/li/text()').getall()
        if topics:
            LOGGER.debug(f'scraped topics={topics}')
            minutes.topics = topics
            minutes.topic_ids = self.get_topic_ids(topics)
        speakers = content.xpath('./ul/li/a/text()').getall()
        speakers = deduplicate(speakers)
        if speakers:
            LOGGER.debug(f'scraped speakers={speakers}')
            minutes.speakers = speakers
            minutes.speaker_ids = self.get_speakers_ids(speakers)

        activity_list, url_list = self.build_activities_and_urls(
            content.xpath('./ul/li/a'), minutes, response.url)
        url = build_url(response.url, UrlTitle.SHINGI_TYUKEI, self.domain)
        url.to_id = minutes.id
        url_list.append(url)

        return minutes, activity_list, url_list