def parse(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)

        table = sel.xpath('//table[@bordercolordark="#4ab69c"]')
        for i, tr in enumerate(table.xpath('tr')):
            if i == 0:
                continue

            item = MeetingMinutes()
            cols = [td.xpath('.//text()').extract()[0].strip() for td in tr.xpath('td')]
            item['sitting'] = cols[0]
            item['date'] = cols[1]
            item['councilor'] = cols[2]
            # item['meeting'] = meeting
            download_urls = tr.xpath('.//a/@href').extract()
            item['download_url'] = download_urls

            for url in download_urls:
                filename = os.path.basename(url)
                if not filename:
                    continue

                file_path = '../../meeting_minutes/hccc/' + filename
                result = misc.download(url, file_path)
                if not result['skipped']:
                    time.sleep(1)

            yield item
    def parse(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)

        table = sel.xpath('//table[@bordercolordark="#4ab69c"]')
        for i, tr in enumerate(table.xpath('tr')):
            if i == 0:
                continue

            item = MeetingMinutes()
            cols = [
                td.xpath('.//text()').extract()[0].strip()
                for td in tr.xpath('td')
            ]
            item['sitting'] = cols[0]
            item['date'] = cols[1]
            item['councilor'] = cols[2]
            # item['meeting'] = meeting
            download_urls = tr.xpath('.//a/@href').extract()
            item['download_url'] = download_urls

            for url in download_urls:
                filename = os.path.basename(url)
                if not filename:
                    continue

                file_path = '../../meeting_minutes/hccc/' + filename
                result = misc.download(url, file_path)
                if not result['skipped']:
                    time.sleep(1)

            yield item
    def parse_profile(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        meta = response.request.meta
        sel = Selector(response)
        curr_url = response.url
        county = u'宜蘭縣'

        tables = sel.xpath('//table[@bgcolor="#333333"]')

        item = Councilor()
        item['contact_details'] = []
        item['election_year'] = '2009'
        item['term_end'] = {'date': '2014-12-25'}
        item['term_start'] = '%s-12-25' % item['election_year']
        item['in_office'] = True
        item['county'] = county
        item['links'] = [{'url': response.url, 'note': u'議會個人官網'}]
        img_url = sel.xpath('.//div[@id="Layer2"]/img/@src').extract()[0]
        item['image'] = urljoin(curr_url, img_url)

        if meta:
            area = meta['area']
            item['constituency'] = county + area[0]
            item['district'] = area[1]

        key_map = {
            u'黨籍': 'party',
            u'姓名': 'name'
        }
        tds = tables[0].xpath('.//td')
        pairs = [(tds[2 * i], tds[2 * i + 1]) for i in range(len(tds) / 2)]
        for k, v in pairs:
            key = parse.get_inner_text(k, remove_white=True)
            value = parse.get_inner_text(v).strip()

            k_eng = key_map.get(key)
            if k_eng:
                item[k_eng] = value
            elif key == 'E-mail':
                if value:
                    misc.append_contact(item, 'email', key, value)
            elif u'電話' in key:
                misc.append_contact_list(item, 'voice', key, value.split(u'、'))
            elif key == u'服務處所':
                misc.append_contact(item, 'address', key, value)
            elif key == u'學歷':
                item['education'] = value.split()

        exp_node = tables[1].xpath('.//td[@bgcolor="#FFFFFF"]')
        experience = []
        for ex in exp_node:
            ex = parse.get_inner_text(ex).split()
            experience += ex

        item['experience'] = experience
        m = re.search(u'(副?議長)。?$', item['experience'][0])
        item['title'] = m.group(1) if m else u'議員'
        item['platform'] = parse.get_inner_text(tables[2].xpath('.//td[@bgcolor="#FFFFFF"]')).split()
        return item
Esempio n. 4
0
    def parse_profile(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        meta = response.request.meta
        sel = Selector(response)
        curr_url = response.url
        county = u'宜蘭縣'

        tables = sel.xpath('//table[@bgcolor="#333333"]')

        item = Councilor()
        item['contact_details'] = []
        item['election_year'] = '2009'
        item['term_end'] = {'date': '2014-12-25'}
        item['term_start'] = '%s-12-25' % item['election_year']
        item['in_office'] = True
        item['county'] = county
        item['links'] = [{'url': response.url, 'note': u'議會個人官網'}]
        img_url = sel.xpath('.//div[@id="Layer2"]/img/@src').extract()[0]
        item['image'] = urljoin(curr_url, img_url)

        if meta:
            area = meta['area']
            item['constituency'] = county + area[0]
            item['district'] = area[1]

        key_map = {u'黨籍': 'party', u'姓名': 'name'}
        tds = tables[0].xpath('.//td')
        pairs = [(tds[2 * i], tds[2 * i + 1]) for i in range(len(tds) / 2)]
        for k, v in pairs:
            key = parse.get_inner_text(k, remove_white=True)
            value = parse.get_inner_text(v).strip()

            k_eng = key_map.get(key)
            if k_eng:
                item[k_eng] = value
            elif key == 'E-mail':
                if value:
                    misc.append_contact(item, 'email', key, value)
            elif u'電話' in key:
                misc.append_contact_list(item, 'voice', key, value.split(u'、'))
            elif key == u'服務處所':
                misc.append_contact(item, 'address', key, value)
            elif key == u'學歷':
                item['education'] = value.split()

        exp_node = tables[1].xpath('.//td[@bgcolor="#FFFFFF"]')
        experience = []
        for ex in exp_node:
            ex = parse.get_inner_text(ex).split()
            experience += ex

        item['experience'] = experience
        m = re.search(u'(副?議長)。?$', item['experience'][0])
        item['title'] = m.group(1) if m else u'議員'
        item['platform'] = parse.get_inner_text(
            tables[2].xpath('.//td[@bgcolor="#FFFFFF"]')).split()
        return item
Esempio n. 5
0
    def parse_profile(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        #print response
        sel = Selector(response)
        item = Councilor()
        item['name'] = sel.xpath(".//span[@class='title1']/text()").extract()[0]
        print item['name']

        # Add information from reference data
        for data in self.reference_data:
            if data["name"] == item["name"]:
                item.update(data)

        item['election_year'] = '2009'
        item['county'] = '嘉義市'
        item['term_start'] = '%s-12-25' % item['election_year']
        item['term_end'] = {'date': '2014-12-25'}
        item['in_office'] = True
        item['links'] = [{'url': response.url, 'note': u'議會個人官網'}]
        item['constituency'] = response.request.meta['constituency']

        img_url = sel.xpath(".//td[@bgcolor='B8DD6C']/img/@src").extract()[0]
        print img_url
        item['image'] = urljoin(response.url, urllib.quote(img_url.encode('utf8')))
        print item['image']

        details = sel.xpath(".//th[@class='T84 color06']/..")
        print len(details)
        item['contact_details']=[]
        for detail in details:
            info = detail.xpath(".//th/text()")
            content = re.sub('\s', '', detail.xpath(".//td/span/text()").extract()[0])
            if info.re(u'[\s]*黨[\s]*籍[\s]*'):
                item['party'] = content
                print item['party']
            if info.re(u'[\s]*選[\s]*區[\s]*'):
                item['district'] = content
                print item['district']
            if info.re(u'[\s]*地[\s]*址[\s]*'):
                print content
                item['contact_details'].append({'type': 'address', 'label': u'通訊處', 'value': content})
            if info.re(u'[\s]*電[\s]*話[\s]*'):
                print content
                item['contact_details'].append({'type': 'voice', 'label': u'電話', 'value': content})
            if info.re(u'[\s]*傳[\s]*真[\s]*'):
                print content
                item['contact_details'].append({'type': 'fax', 'label': u'傳真', 'value': content})
            if info.re(u'[\s]*學[\s]*歷[\s]*'):
                item['education'] = [x.strip() for x in detail.xpath(".//td/span/text()").extract()]
                print item['education']
            if info.re(u'[\s]*經[\s]*歷[\s]*'):
                item['experience'] = [x.strip() for x in detail.xpath(".//td/span/text()").extract()]
                print item['experience']

        return item
    def parse(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)
        areas = sel.xpath(u'//div[@class="testdiv"]')
        base_url = self.base_url

        for area in areas:
            urls = area.xpath('.//a/@href').extract()
            urls = area.xpath('.//a')
            for url in urls:
                href = url.xpath('@href').extract()[0]
                if not re.search('^view.asp\?id=', href):
                    continue
                item = Councilor()
                item['constituency'] = url.xpath('preceding::strong[1]/text()').re(u'第\d+選區')[0]
                logging.warning('url: %s', href)
                the_url = urljoin(base_url, href)
                yield Request(the_url, callback=self.parse_profile, meta={'item': item})
    def parse(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)
        areas = sel.xpath(u'//div[@class="testdiv"]')
        base_url = self.base_url

        for area in areas:
            urls = area.xpath('.//a/@href').extract()
            urls = area.xpath('.//a')
            for url in urls:
                href = url.xpath('@href').extract()[0]
                if not re.search('^view.asp\?id=', href):
                    continue
                item = Councilor()
                item['constituency'] = url.xpath(
                    'preceding::strong[1]/text()').re(u'第(\d+)選區')[0]
                logging.warning('url: %s', href)
                the_url = urljoin(base_url, href)
                yield Request(the_url,
                              callback=self.parse_profile,
                              meta={'item': item})
    def parse(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)
        areas = sel.xpath(u'//table[contains(@summary,"議員列表")]')
        curr_url = response.url

        for area in areas:
            name = area.xpath('@summary').extract()[0]
            m = re.match('(.*)\((.*)\).*', name)
            area_info = m.groups()

            urls = area.xpath('.//a/@href').extract()

            for i, url in enumerate(urls):
                url = url.encode('Big5')
                url = urljoin(curr_url, url)

                meta = {'area': area_info}

                # manually do the request, because the actual content of H0052.aspx is determined by the session
                res = misc.get_response(url, meta=meta)
                yield self.parse_profile_frameset(res)
Esempio n. 9
0
    def parse(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)
        areas = sel.xpath(u'//table[contains(@summary,"議員列表")]')
        curr_url = response.url

        for area in areas:
            name = area.xpath('@summary').extract()[0]
            m = re.match('(.*)\((.*)\).*', name)
            area_info = m.groups()

            urls = area.xpath('.//a/@href').extract()

            for i, url in enumerate(urls):
                url = url.encode('Big5')
                url = urljoin(curr_url, url)

                meta = {'area': area_info}

                # manually do the request, because the actual content of H0052.aspx is determined by the session
                res = misc.get_response(url, meta=meta)
                yield self.parse_profile_frameset(res)
Esempio n. 10
0
    def parse(self, response):
        response = parse.get_decoded_response(response, 'big5')
        sel = Selector(response)
        items = []
        item = Bills()
        nodes = sel.xpath('//table/tr')
        item['election_year'] = '2009'
        item['county'] = u'南投縣'

        extract_result = nodes.xpath('td[2]/text()').extract()

        item['proposed_by'] = extract_result[0].strip().split(u'、')
        item['petitioned_by'] = extract_result[1].replace(
            u'議長', '').strip().split(u'、')
        item['abstract'] = extract_result[2].strip()
        item['description'] = extract_result[3].strip()
        item['methods'] = extract_result[4].strip()
        value = extract_result[5].strip()
        item['category'] = value[0:2]
        item['bill_no'] = value
        item['motions'] = []
        item['motions'].append({
            "motion": u'審查意見',
            "resolution": extract_result[6]
        })
        item['motions'].append({
            "motion": u'大會決議',
            "resolution": extract_result[7]
        })
        item['last_action'] = extract_result[8].strip()
        item['remark'] = extract_result[9].strip()
        item['links'] = response.url
        item['id'] = response.url[51:66]
        print item['links']

        return item
Esempio n. 11
0
    def parse_bill(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)

        # convert to list of pairs
        rows = sel.xpath('//tr')
        pairs = misc.rows_to_pairs(rows)

        item = Bills()
        item['election_year'] = self.election_year[int(sel.xpath('//span[@id="lbFmotion_expireb"]/text()').re('\d+')[0])]
        item['county'] = u'宜蘭縣'
        item['links'] = response.url
        print response.url
        get_param = parse_qs(urlparse(response.url).query)
        item['id'] = get_param['Fmotion_instanceOS'][0].decode('Big5')
        item['proposed_by'] = re.sub(u'、', ' ', sel.xpath('//*[@id="lbFmotion_People"]/text()').extract()[0]).split()
        petitioned_by = sel.xpath('//*[@id="lbFmotion_AddTo"]/text()').extract()
        item['petitioned_by'] = re.sub(u'、', ' ', petitioned_by[0]).split() if petitioned_by else []
        item['motions'] = []
        main_title = parse.get_inner_text(sel.xpath('//font[@color="#800000"]'), remove_white=True)
        m = re.match(u'宜蘭縣議會(.*)議案資料', main_title)
        if m:
            main_sitting = m.group(1)

        k_map = {
            u'來源別':'type',
            # u'建檔日期':'',
            # u'議案程序':'',
            # u'系統編號':'',
            u'案號': 'bill_no',
            u'類別': 'category',
            # u'小組':'',
            u'案由': 'abstract',
            # u'法規名稱':'',
            u'辦法': 'methods',
            u'理由': 'description',
            # u'附件':'',
            # u'審議日期':'',
            # u'大會決議':'',
        }

        curr_motion = None
        for i, pair in enumerate(pairs):
            n = len(pair)
            if n < 2:
                if n == 1:
                    td = pair[0]
                    text = parse.get_inner_text(td, remove_white=True)
                    if td.xpath(u'.//img[@alt="小圖示"]'):
                        if text != u'案由、辦法、理由及附件':
                            if curr_motion: item['motions'].append(curr_motion)
                            curr_motion = {'motion': text}
                    elif curr_motion is not None and not curr_motion.get('sitting'):
                        curr_motion['sitting'] = ' '.join(td.xpath('.//span/text()').extract())

                continue

            k_raw, v_raw = pair
            k = parse.get_inner_text(k_raw, remove_white=True)
            v = parse.get_inner_text(v_raw)
            k_eng = k_map.get(k)

            if k_eng:
                item[k_eng] = v
            elif k == u'建檔日期':
                misc.append_motion(item, u'建檔', None, v, main_sitting)

            if curr_motion is not None:
                if u'日期' in k:
                    curr_motion['date'] = v
                elif 'date' in curr_motion:
                    curr_motion['resolution'] = v

        if curr_motion:
            item['motions'].append(curr_motion)

        return item
Esempio n. 12
0
    def parse_bill(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)

        # convert to list of pairs
        rows = sel.xpath('//tr')
        pairs = misc.rows_to_pairs(rows)

        item = Bills()
        item['links'] = response.url
        item['motions'] = []
        main_title = parse.get_inner_text(
            sel.xpath('//font[@color="#800000"]'), remove_white=True)
        m = re.match(u'宜蘭縣議會(.*)議案資料', main_title)
        if m:
            main_sitting = m.group(1)

        k_map = {
            # u'來源別':'',
            # u'建檔日期':'',
            # u'議案程序':'',
            # u'系統編號':'',
            u'動議人': 'proposed_by',
            u'提案單位': 'proposed_by',
            u'案號': 'bill_no',
            u'附議人': 'petitioned_by',
            u'類別': 'category',
            # u'小組':'',
            u'案由': 'abstract',
            # u'法規名稱':'',
            u'辦法': 'methods',
            u'理由': 'description',
            # u'附件':'',
            # u'審議日期':'',
            # u'大會決議':'',
        }

        curr_motion = None
        for i, pair in enumerate(pairs):
            n = len(pair)
            if n < 2:
                if n == 1:
                    td = pair[0]
                    text = parse.get_inner_text(td, remove_white=True)
                    if td.xpath(u'.//img[@alt="小圖示"]'):
                        if text != u'案由、辦法、理由及附件':
                            if curr_motion: item['motions'].append(curr_motion)
                            curr_motion = {'motion': text}
                    elif curr_motion is not None and not curr_motion.get(
                            'sitting'):
                        curr_motion['sitting'] = ' '.join(
                            td.xpath('.//span/text()').extract())

                continue

            k_raw, v_raw = pair
            k = parse.get_inner_text(k_raw, remove_white=True)
            v = parse.get_inner_text(v_raw)
            k_eng = k_map.get(k)

            if k_eng:
                new_v = v
                if k_eng in ['petitioned_by', 'proposed_by']:
                    new_v = v.split()
                item[k_eng] = new_v
            elif k == u'建檔日期':
                misc.append_motion(item, u'建檔', None, v, main_sitting)

            if curr_motion is not None:
                if u'日期' in k:
                    curr_motion['date'] = v
                elif 'date' in curr_motion:
                    curr_motion['resolution'] = v

        if curr_motion:
            item['motions'].append(curr_motion)

        return item
Esempio n. 13
0
    def parse_profile(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)
        name_node = sel.xpath('//td[@class="w06"]')
        logging.warning('name_node: %s', name_node)
        name_str = parse.get_inner_text(name_node)

        logging.warning('name_str: %s', name_str)

        item = response.request.meta['item']
        item['county'] = u'新竹縣'
        item['election_year'] = '2009'
        item['term_start'] = '%s-12-25' % item['election_year']
        item['term_end'] = {'date': '2014-12-25'}
        item['in_office'] = True
        item['name'] = name_str.split('-')[-1]
        item['title'] = re.search(u'(副?議長|議員)', name_str).group()

        w02_nodes = sel.xpath('//th[@class="w02"]')
        for each_node in w02_nodes:
            key = parse.get_inner_text(each_node).strip()
            logging.warning('w02_node: key: %s', key)
            if key != u'學歷':
                continue
            education_node = each_node.xpath('../td')
            education_str = parse.get_inner_text(education_node)
            logging.warning('key: %s education_str: %s', key, education_str)
            item['education'] = education_str.split('\n')

            image_node = each_node.xpath('../../../../td[2]/img/@src')
            image_str = parse.get_extracted(image_node)

            logging.warning('key: %s education_str: %s image_str: %s', key, education_str, image_str)
            item['image'] = urljoin(response.url, urllib.quote(image_str.encode('utf8')))

        main_nodes = sel.xpath('//tr[@class="line_02"]')

        contact_details = []
        links = [{'url': response.url, 'note': u'議會個人官網'}]
        for each_node in main_nodes:
            key = parse.get_inner_text(each_node.xpath('./th'))
            item_key = _key_map.get(key, '')

            if item_key == 'experience':
                val_nodes = each_node.xpath('./td/ol/li')
                if val_nodes:
                    val = [re.sub(ur' ', '', re.sub(ur'。', '', parse.get_inner_text(each_each_node))) for each_each_node in val_nodes]
                else:
                    val = parse.get_inner_text(each_node.xpath('./td')).split("\n")
                    val = [re.sub(ur' ', '', each_val) for each_val in val]
            elif item_key == 'platform':
                val_nodes = each_node.xpath('./td/ol/li')
                val = [re.sub(ur' ', '', parse.get_inner_text(each_each_node)) for each_each_node in val_nodes]
            else:
                val = parse.get_inner_text(each_node.xpath('./td'))

            if key not in _key_map:
                logging.error('key not in _key_map!: key: %s', key)
                continue

            if item_key in ['email', 'address', 'voice']:
                contact_details.append({"type": item_key, "value": val, "label": key})
            elif item_key in ['link']:
                val = re.sub(ur'^\.\.', 'http://www.hcc.gov.tw', val)
                links.append({"url": val, "note": key})
            else:
                item[item_key] = val

            logging.warning('key: %s val: %s item_key: %s', key, val, item_key)

            # item[item_key] = val
        item['contact_details'] = contact_details
        item['links'] = links

        return item
Esempio n. 14
0
    def parse_profile(self, response):
        response = parse.get_decoded_response(response, 'Big5')
        sel = Selector(response)
        name_node = sel.xpath('//td[@class="w06"]')
        logging.warning('name_node: %s', name_node)
        name_str = parse.get_inner_text(name_node)

        logging.warning('name_str: %s', name_str)

        item = response.request.meta['item']
        item['county'] = u'新竹縣'
        item['election_year'] = '2009'
        item['term_start'] = '%s-12-25' % item['election_year']
        item['term_end'] = {'date': '2014-12-25'}
        item['in_office'] = True
        item['name'] = name_str.split('-')[-1]
        item['title'] = re.search(u'(副?議長|議員)', name_str).group()

        w02_nodes = sel.xpath('//th[@class="w02"]')
        for each_node in w02_nodes:
            key = parse.get_inner_text(each_node).strip()
            logging.warning('w02_node: key: %s', key)
            if key != u'學歷':
                continue
            education_node = each_node.xpath('../td')
            education_str = parse.get_inner_text(education_node)
            logging.warning('key: %s education_str: %s', key, education_str)
            item['education'] = education_str.split('\n')

            image_node = each_node.xpath('../../../../td[2]/img/@src')
            image_str = parse.get_extracted(image_node)

            logging.warning('key: %s education_str: %s image_str: %s', key,
                            education_str, image_str)
            item['image'] = urljoin(response.url,
                                    urllib.quote(image_str.encode('utf8')))

        main_nodes = sel.xpath('//tr[@class="line_02"]')

        contact_details = []
        links = [{'url': response.url, 'note': u'議會個人官網'}]
        for each_node in main_nodes:
            key = parse.get_inner_text(each_node.xpath('./th'))
            item_key = _key_map.get(key, '')

            if item_key == 'experience':
                val_nodes = each_node.xpath('./td/ol/li')
                if val_nodes:
                    val = [
                        re.sub(
                            ur' ', '',
                            re.sub(ur'。', '',
                                   parse.get_inner_text(each_each_node)))
                        for each_each_node in val_nodes
                    ]
                else:
                    val = parse.get_inner_text(
                        each_node.xpath('./td')).split("\n")
                    val = [re.sub(ur' ', '', each_val) for each_val in val]
            elif item_key == 'platform':
                val_nodes = each_node.xpath('./td/ol/li')
                val = [
                    re.sub(ur' ', '', parse.get_inner_text(each_each_node))
                    for each_each_node in val_nodes
                ]
            else:
                val = parse.get_inner_text(each_node.xpath('./td'))

            if key not in _key_map:
                logging.error('key not in _key_map!: key: %s', key)
                continue

            if item_key in ['email', 'address', 'voice']:
                contact_details.append({
                    "type": item_key,
                    "value": val,
                    "label": key
                })
            elif item_key in ['link']:
                val = re.sub(ur'^\.\.', 'http://www.hcc.gov.tw', val)
                links.append({"url": val, "note": key})
            else:
                item[item_key] = val

            logging.warning('key: %s val: %s item_key: %s', key, val, item_key)

            # item[item_key] = val
        item['contact_details'] = contact_details
        item['links'] = links

        return item