def get_people(self): # mayor first, can't find email page = lxmlize(MAYOR_URL) photo_url = page.xpath('string(//img/@src[contains(., "Maire")])') name = page.xpath('string(//td[@class="contenu"]/text()[last()])') p = Legislator(name=name, post_id=u"Trois-Rivières", role="Maire", image=photo_url) p.add_source(MAYOR_URL) yield p resp = requests.get(COUNCIL_PAGE) # page rendering through JS on the client page_re = re.compile(r'createItemNiv3.+"District (.+?)".+(index.+)\\"') for district, url_rel in page_re.findall(resp.text): if district not in ('des Estacades', 'des Plateaux', 'des Terrasses', 'du Sanctuaire'): district = re.sub('\A(?:de(?: la)?|des|du) ', '', district) url = urljoin(COUNCIL_PAGE, url_rel) page = lxmlize(url) name = page.xpath('string(//h2)') email = page.xpath( 'string(//a/@href[contains(., "mailto:")])')[len('mailto:'):] photo_url = page.xpath( 'string(//img/@src[contains(., "Conseiller")])') p = Legislator(name=name, post_id=district, role='Conseiller', image=photo_url) p.add_source(url) p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillor_trs = [tr for tr in page.xpath('//table//tr[1]') if len(tr) == 2][:-1] for councillor_tr in councillor_trs: desc = [text.strip() for text in councillor_tr.xpath('.//text()[normalize-space()]') if text.strip()] if len(desc) == 3: role = 'Maire' district = u'Saint-Jérôme' else: role = 'Conseiller' district = desc[0].replace(u'numéro ', '') name = desc[-3] phone = desc[-2] email = desc[-1] image = councillor_tr.xpath('string(.//img/@src)')[0] p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.image = image p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) table = page.cssselect('table')[0] rows = table.cssselect('tr')[1:] assert len(rows) == 27 # There should be 27 districts for row in rows: districtnumcell, districtcell, membercell, dummy2 = row.cssselect( 'td') district_name = districtcell.cssselect( 'a')[0].text_content().strip() district = district_name.replace(' - ', '-') name = (membercell.cssselect('a')[0].text_content().replace( 'Hon. ', '').replace(' (LIB)', '').replace(' (PC)', '').strip()) url = membercell.cssselect('a')[0].get('href') email, phone, photo_url = scrape_extended_info(url) p = Legislator(name=name, post_id=district, role='MLA', image=photo_url) p.add_source(COUNCIL_PAGE) p.add_source(url) p.add_contact('email', email, None) p.add_contact('voice', phone, 'legislature') yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillor_elems = page.xpath('//a[contains(@class, "slide item-")]') email_links = page.xpath('//a[contains(@href, "mailto:")]') for elem in councillor_elems: name_elem = elem.xpath('.//strong')[0] name = re.search('(Mr\. )?(.+)', name_elem.text).group(2) position = name_elem.xpath('string(following-sibling::text())') role = 'Conseiller' if 'Mayor' in position: district = 'Brossard' role = 'Maire' else: district = re.sub(r'(?<=[0-9]).+', '', position).strip() photo = re.search(r'url\((.+)\)', elem.attrib['style']).group(1) p = Legislator(name=name, post_id=district, role=role, image=photo) p.add_source(COUNCIL_PAGE) try: email_elem = [link for link in email_links if name in link.text_content().replace(u'\u2019', "'")][0] email = re.match('mailto:([email protected])', email_elem.attrib['href']).group(1) p.add_contact('email', email, None) phone = email_elem.xpath( './following-sibling::text()[contains(., "450")]')[0] p.add_contact('voice', phone, 'legislature') except IndexError: # oh Francyne/Francine Raymond, who are you, really? pass yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillor_trs = [ tr for tr in page.xpath('//table//tr[1]') if len(tr) == 2 ][:-1] for councillor_tr in councillor_trs: desc = [ text.strip() for text in councillor_tr.xpath('.//text()[normalize-space()]') if text.strip() ] if len(desc) == 3: role = 'Maire' district = u'Saint-Jérôme' else: role = 'Conseiller' district = desc[0].replace(u'numéro ', '') name = desc[-3] phone = desc[-2] email = desc[-1] image = councillor_tr.xpath('string(.//img/@src)')[0] p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.image = image p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) for councillor_row in page.xpath('//tr'): post = councillor_row.xpath('string(./td[2]/p/text())') if post == 'Maire de Laval': district = 'Laval' role = 'Maire' else: district = re.sub('^C.?irconscription (?:no )?\d+\D- ', '', post).replace("L'", '').replace(' ', '').replace( 'bois', 'Bois') role = 'Conseiller' full_name = councillor_row.xpath( 'string(./td[2]/p/text()[2])').strip() name = ' '.join(full_name.split()[1:]) phone = councillor_row.xpath( 'string(.//span[@class="icon-phone"]/following::text())') email = councillor_row.xpath( 'string(.//a[contains(@href, "mailto:")]/@href)')[len('mailto:' ):] photo_url = councillor_row[0][0].attrib['src'] p = Legislator(name=name, post_id=district, role=role, image=photo_url) p.add_source(COUNCIL_PAGE) p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="printArea"]//strong') for councillor in councillors: info = councillor.xpath('./parent::p/text()') if not info: info = councillor.xpath('./parent::div/text()') info = [x for x in info if x.strip()] district = re.sub('(?<=Ward \d).+', '', info.pop(0)) if 'Mayor' in district: district = 'Woolwich' role = 'Mayor' else: district = district.replace('Councillor', '').strip() role = 'Councillor' p = Legislator(name=councillor.text_content(), post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.image = councillor.xpath('./img/@src')[0] for contact in info: note, num = contact.split(':') num = num.strip().replace('(', '').replace(') ', '-').replace('extension ', 'x') p.add_contact(note, num, note) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//p[@class="WSIndent"]/a') for councillor in councillors: district = re.findall(r'(Ward [0-9]{1,2})', councillor.text_content()) if district: district = district[0] name = councillor.text_content().replace(district, '').strip() role = 'Councillor' else: district = 'Kawartha Lakes' name = councillor.text_content().replace('Mayor', '').strip() role = 'Mayor' url = councillor.attrib['href'] page = lxmlize(url) email = page.xpath( '//a[contains(@href, "mailto:")]/@href')[0].rsplit( ':', 1)[1].strip() image = page.xpath('//img[@class="image-right"]/@src')[0] p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.add_source(url) p.add_contact('email', email, None) p.image = image yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) member_cells = page.xpath( '//div[@class="views-field views-field-field-picture"]/' 'parent::td') for cell in member_cells: name = cell[1].text_content().replace(' .', '. ') # typo on page riding = cell[2].text_content() if 'Mackenzie Delta' in riding: riding = 'Mackenzie-Delta' detail_url = cell[0].xpath('string(.//a/@href)') detail_page = lxmlize(detail_url) photo_url = detail_page.xpath( 'string(//div[@class="field-item even"]/img/@src)') email = detail_page.xpath('string(//a[contains(@href, "mailto:")])') contact_text = detail_page.xpath( 'string(//div[@property="content:encoded"]/p[1])') phone = re.search(r'P(hone)?: ([-0-9]+)', contact_text).group(2) p = Legislator(name=name, post_id=riding, role='MLA', image=photo_url) p.add_source(COUNCIL_PAGE) p.add_source(detail_url) p.add_contact('email', email, None) p.add_contact('voice', phone, 'legislature') yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE, 'iso-8859-1') nodes = page.xpath('//table[@width="484"]//tr') try: for district_row, councillor_row, contact_row, _ in chunks( nodes, 4): post_id = district_row.xpath('string(.//strong)') name = councillor_row.xpath('string(.)')[len('Councillor '):] # TODO: phone numbers on site don't include area code. Add manually? #phone = contact_row.xpath('string(td[2]/text())') email = contact_row.xpath('string(td[4]/a)').replace( '[at]', '@') p = Legislator(name=name, post_id=post_id, role='Councillor') p.add_source(COUNCIL_PAGE) #p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) yield p except ValueError: # on the last run through, there will be less than 4 rows to unpack pass mayor_page = lxmlize(MAYOR_PAGE, 'iso-8859-1') name = mayor_page.xpath( 'string(//h1[contains(., "Bio")])')[:-len(' Bio')] contact_page = lxmlize(MAYOR_CONTACT_URL, 'iso-8859-1') email = contact_page.xpath('string(//a[contains(., "@")][1])') p = Legislator(name=name, post_id='Halifax', role='Councillor') p.add_source(MAYOR_PAGE) p.add_source(MAYOR_CONTACT_URL) p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) mayor_info = page.xpath('//h2[contains(text(), "MAYOR")]//following-sibling::p')[0] yield self.scrape_mayor(mayor_info) wards = page.xpath('//h3') for ward in wards: district = re.sub('\AWARD \d+ - ', '', ward.text_content()) councillors = ward.xpath('following-sibling::p') for councillor in councillors: name = councillor.xpath('./strong')[0].text_content() p = Legislator(name=name, post_id=district, role='Councillor') p.add_source(COUNCIL_PAGE) info = councillor.xpath('./text()') address = info.pop(0) p.add_contact('address', address, 'legislature') # get phone numbers for line in info: stuff = re.split(ur'(\xbb)|(\xa0)', line) tmp = [y for y in stuff if y and not re.match(ur'\xa0', y)] self.get_tel_numbers(tmp, p) email = councillor.xpath('string(./a)') p.add_contact('email', email, None) yield p if councillor == councillors[1]: break
def get_people(self): member_page = lxmlize(COUNCIL_PAGE) table = member_page.xpath('//table')[0] rows = table.cssselect('tr')[1:] for row in rows: (namecell, constitcell, partycell) = row.cssselect('td') full_name = namecell.text_content().strip() if full_name.lower() == 'vacant': continue (last, first) = full_name.split(',') name = first.replace('Hon.', '').strip() + ' ' + last.title().strip() district = ' '.join(constitcell.text_content().split()) party = get_party(partycell.text) data = { 'elected_office': 'MLA', 'source_url': COUNCIL_PAGE } url = namecell.cssselect('a')[0].get('href') photo, email = get_details(url) p = Legislator(name=name, post_id=district, role='MLA', party=party, image=photo) p.add_source(COUNCIL_PAGE) p.add_source(url) p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) mayor_url = page.xpath('//a[contains(text(), "Mayor")]/@href')[0] yield self.scrape_mayor(mayor_url) councillors_url = page.xpath('//a[contains(text(), "Councillors")]/@href')[0] cpage = lxmlize(councillors_url) councillor_rows = cpage.xpath('//tr[td//img]')[:-1] for councillor_row in councillor_rows: img_cell, info_cell = tuple(councillor_row) name = info_cell.xpath( 'string(.//span[contains(text(), "Councillor")])')[len('Councillor '):] district = info_cell.xpath('string(.//p[contains(text(), "District")])') email = info_cell.xpath('string(.//a[contains(@href, "mailto:")])') if not email: email = info_cell.xpath('string(.//strong[contains(text(), "E-mail")]/following-sibling::text())') phone = info_cell.xpath( 'string(.//p[contains(.//text(), "Telephone:")])').split(':')[1] img_url_rel = img_cell.xpath('string(//img/@href)') img_url = urljoin(councillors_url, img_url_rel) p = Legislator(name=name, post_id=district, role='Conseiller') p.add_source(COUNCIL_PAGE) p.add_source(councillors_url) p.add_contact('email', email, None) p.add_contact('voice', phone, 'legislature') p.image = img_url yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="c2087"]//a') for councillor in councillors: name = councillor.text_content() url = councillor.attrib['href'] page = lxmlize(url) if 'Maire' in page.xpath('//h2/text()')[0]: district = 'Sherbrooke' role = 'Maire' else: district = page.xpath('//div[@class="csc-default"]//a[@target="_blank"]/text()')[0].replace('district', '').replace('Domaine Howard', 'Domaine-Howard').strip() role = 'Conseiller' if district in ('de Brompton', 'de Lennoxville'): district = district.replace('de ', '') p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.add_source(url) p.image = page.xpath('//div[@class="csc-textpic-image csc-textpic-last"]//img/@src')[0] parts = page.xpath('//li[contains(text(), "phone")]/text()')[0].split(':') note = parts[0] phone = parts[1] p.add_contact(note, phone, note) email = page.xpath('//a[contains(@href, "mailto:")]/@href') if email: email = email[0].split(':')[1] p.add_contact('email', email, None) if district == 'Brompton': p.add_extra('boundary_url', '/boundaries/sherbrooke-boroughs/brompton/') elif district == 'Lennoxville': p.add_extra('boundary_url', '/boundaries/sherbrooke-boroughs/lennoxville/') yield p
def get_people(self): member_parties = dict(process_parties(lxmlize(PARTY_PAGE))) page = lxmlize(COUNCIL_PAGE) for row in page.xpath('//table[not(@id="footer")]/tr')[1:]: name, district, _, email = [ cell.xpath('string(.)').replace(u'\xa0', u' ') for cell in row ] phone = row[2].xpath('string(text()[1])') try: photo_page_url = row[0].xpath('./a/@href')[0] except IndexError: continue # there is a vacant district photo_page = lxmlize(photo_page_url) photo_url = photo_page.xpath('string(//table//img/@src)') district = district.replace(' - ', u'—') # m-dash party = get_party(member_parties[name.strip()]) p = Legislator(name=name, post_id=district, role='MHA', party=party, image=photo_url) p.add_source(COUNCIL_PAGE) p.add_source(photo_page_url) p.add_contact('email', email, None) # TODO: either fix phone regex or tweak phone value p.add_contact('voice', phone, 'legislature') yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE, 'iso-8859-1') councillors = page.xpath('//div[@id="PageContent"]/table/tbody/tr/td') for councillor in councillors: if not councillor.text_content().strip(): continue if councillor == councillors[0]: district = 'Kirkland' role = 'Maire' else: district = councillor.xpath('.//h2')[0].text_content() district = re.search('- (.+)', district).group(1).strip() district = district.replace(' Ouest', ' ouest').replace(' Est', ' est') role = 'Conseiller' name = councillor.xpath('.//strong/text()')[0] phone = councillor.xpath('.//div[contains(text(), "#")]/text()')[0].replace('T ', '').replace(' ', '-').replace(',-#-', ' x') email = councillor.xpath('.//a[contains(@href, "mailto:")]')[0].text_content() p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) p.image = councillor.xpath('.//img/@src')[0] yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillor_links = page.xpath('//li[@id="pageid2117"]/ul/li/a')[2:10] for link in councillor_links: if not link.text.startswith('Councillor'): continue url = link.attrib['href'] page = lxmlize(url) mail_link = page.xpath('//a[@title]')[0] name = mail_link.attrib['title'] email = mail_link.attrib['href'][len('mailto:'):] photo_url = page.xpath( 'string(//div[@class="pageContent"]//img[@align="right"]/@src)' ) p = Legislator(name=name, post_id='Abbotsford', role='Councillor', image=photo_url) p.add_source(url) p.add_contact('email', email, None) yield p page = lxmlize(MAYOR_URL) name = page.xpath('string(//h1)').split(' ', 1)[1] photo_url = page.xpath('string(//img[@hspace=10]/@src)') # email is hidden behind a form p = Legislator(name=name, post_id='Abbotsford', role='Mayor', image=photo_url) p.add_source(MAYOR_URL) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) # it's all javascript rendered on the client... wow. js = page.xpath('string(//div[@class="inner_container"]/div/script[2])') districts = re.findall(r'arrayDistricts\[a.+"(.+)"', js) members = re.findall(r'arrayMembres\[a.+"(.+)"', js) urls = re.findall(r'arrayLiens\[a.+"(.+)"', js) # first item in list is mayor p = Legislator(name=members[0], post_id = 'Gatineau', role='Maire') p.add_source(COUNCIL_PAGE) mayor_page = lxmlize(MAYOR_CONTACT_PAGE) p.add_source(MAYOR_CONTACT_PAGE) email = '*****@*****.**' # hardcoded p.add_contact('email', email, None) yield p for district, member, url in zip(districts, members, urls)[1:]: profile_url = COUNCIL_PAGE + '/' + url.split('/')[-1] profile_page = lxmlize(profile_url) photo_url = profile_page.xpath('string(//img/@src)') post_id = 'District ' + re.search('\d+', district).group(0) email = profile_page.xpath( 'string(//a[contains(@href, "mailto:")]/@href)')[len('mailto:'):] p = Legislator(name=member, post_id=post_id, role='Conseiller') p.add_source(COUNCIL_PAGE) p.add_source(profile_url) p.image = photo_url p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillor_links = page.xpath('//li[@id="pageid2117"]/ul/li/a')[2:10] for link in councillor_links: if not link.text.startswith('Councillor'): continue url = link.attrib['href'] page = lxmlize(url) mail_link = page.xpath('//a[@title]')[0] name = mail_link.attrib['title'] email = mail_link.attrib['href'][len('mailto:'):] photo_url = page.xpath('string(//div[@class="pageContent"]//img[@align="right"]/@src)') p = Legislator(name=name, post_id='Abbotsford', role='Councillor', image=photo_url) p.add_source(url) p.add_contact('email', email, None) yield p page = lxmlize(MAYOR_URL) name = page.xpath('string(//h1)').split(' ', 1)[1] photo_url = page.xpath('string(//img[@hspace=10]/@src)') # email is hidden behind a form p = Legislator(name=name, post_id='Abbotsford', role='Mayor', image=photo_url) p.add_source(MAYOR_URL) yield p
def get_people(self): # mayor first, can't find email page = lxmlize(MAYOR_URL) photo_url = page.xpath('string(//img/@src[contains(., "Maire")])') name = page.xpath('string(//td[@class="contenu"]/text()[last()])') p = Legislator(name=name, post_id=u"Trois-Rivières", role="Maire", image=photo_url) p.add_source(MAYOR_URL) yield p resp = requests.get(COUNCIL_PAGE) # page rendering through JS on the client page_re = re.compile(r'createItemNiv3.+"District (.+?)".+(index.+)\\"') for district, url_rel in page_re.findall(resp.text): if district not in ("des Estacades", "des Plateaux", "des Terrasses", "du Sanctuaire"): district = re.sub("\A(?:de(?: la)?|des|du) ", "", district) url = urljoin(COUNCIL_PAGE, url_rel) page = lxmlize(url) name = page.xpath("string(//h2)") email = page.xpath('string(//a/@href[contains(., "mailto:")])')[len("mailto:") :] photo_url = page.xpath('string(//img/@src[contains(., "Conseiller")])') p = Legislator(name=name, post_id=district, role="Conseiller", image=photo_url) p.add_source(url) p.add_contact("email", email, None) yield p
def get_people(self): member_page = lxmlize(COUNCIL_PAGE) table = member_page.xpath('//table')[0] rows = table.cssselect('tr')[1:] for row in rows: (namecell, constitcell, partycell) = row.cssselect('td') full_name = namecell.text_content().strip() if full_name.lower() == 'vacant': continue (last, first) = full_name.split(',') name = first.replace('Hon.', '').strip() + ' ' + last.title().strip() district = ' '.join(constitcell.text_content().split()) party = get_party(partycell.text) data = {'elected_office': 'MLA', 'source_url': COUNCIL_PAGE} url = namecell.cssselect('a')[0].get('href') photo, email = get_details(url) p = Legislator(name=name, post_id=district, role='MLA', party=party, image=photo) p.add_source(COUNCIL_PAGE) p.add_source(url) p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE, user_agent='Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)') yield self.scrape_mayor(page) councillors = page.xpath('//strong[contains(text(), "Councillor")]/parent::p|//b[contains(text(), "Councillor")]/parent::p') for councillor in councillors: name = councillor.xpath('./strong/text()|./b/text()')[0].replace('Councillor', '').strip() district = re.findall('(?<=Ward \d, ).*', councillor.text_content())[0].strip() p = Legislator(name=name, post_id=district, role='Councillor') p.add_source(COUNCIL_PAGE) p.image = councillor.xpath('.//img/@src')[0] phone = re.findall(r'Phone(.*)', councillor.text_content()) node = councillor while not phone: node = node.xpath('./following-sibling::p')[1] phone = re.findall(r'Phone(.*)', node.text_content()) phone = phone[0].strip() email = councillor.xpath('.//a[contains(@href, "mailto:")]') if not email: email = councillor.xpath('./following-sibling::p//a[contains(@href, "mailto")]') email = email[0].text_content() if len(re.sub(r'\D', '', phone)) == 7: phone = '902-%s' % phone p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) # it's all javascript rendered on the client... wow. js = page.xpath( 'string(//div[@class="inner_container"]/div/script[2])') districts = re.findall(r'arrayDistricts\[a.+"(.+)"', js) members = re.findall(r'arrayMembres\[a.+"(.+)"', js) urls = re.findall(r'arrayLiens\[a.+"(.+)"', js) # first item in list is mayor p = Legislator(name=members[0], post_id='Gatineau', role='Maire') p.add_source(COUNCIL_PAGE) mayor_page = lxmlize(MAYOR_CONTACT_PAGE) p.add_source(MAYOR_CONTACT_PAGE) email = '*****@*****.**' # hardcoded p.add_contact('email', email, None) yield p for district, member, url in zip(districts, members, urls)[1:]: profile_url = COUNCIL_PAGE + '/' + url.split('/')[-1] profile_page = lxmlize(profile_url) photo_url = profile_page.xpath('string(//img/@src)') post_id = 'District ' + re.search('\d+', district).group(0) email = profile_page.xpath( 'string(//a[contains(@href, "mailto:")]/@href)')[len('mailto:' ):] p = Legislator(name=member, post_id=post_id, role='Conseiller') p.add_source(COUNCIL_PAGE) p.add_source(profile_url) p.image = photo_url p.add_contact('email', email, None) yield p
def scrape_mayor(self, url): page = lxmlize(url) name = page.xpath("//h1/text()")[0].replace("Toronto Mayor", "").strip() p = Legislator(name, post_id="Toronto", role='Mayor') p.add_source(COUNCIL_PAGE) p.add_source(url) p.image = page.xpath('string(//article/img/@src)') url = page.xpath( '//a[contains(text(), "Contact the Mayor")]')[0].attrib['href'] url = url.replace( 'www.', 'www1.' ) # @todo fix lxmlize to use the redirected URL to make links absolute p.add_source(url) page = lxmlize(url) mail_elem, phone_elem = page.xpath('//h3')[:2] address = ''.join(mail_elem.xpath('./following-sibling::p//text()')) phone = phone_elem.xpath('string(./following-sibling::p[1])') p.add_contact('address', address, 'legislature') p.add_contact('voice', phone, 'legislature') return p
def get_people(self): page = lxmlize(COUNCIL_PAGE) mayor = page.xpath('.//div[@class="item-page clearfix"]//table[1]//p')[1] name = mayor.xpath('.//strong/text()')[0] p = Legislator(name=name, post_id='Pointe-Claire', role='Maire') p.add_source(COUNCIL_PAGE) phone = re.findall(r'[0-9]{3}[ -][0-9]{3}-[0-9]{4}', mayor.text_content())[0].replace(' ', '-') p.add_contact('voice', phone, 'legislature') yield p rows = page.xpath('//tr') for i, row in enumerate(rows): if i % 2 == 0: continue councillors = row.xpath('./td') for j, councillor in enumerate(councillors): name = councillor.text_content() # rows[i + 1].xpath('.//td//a[contains(@href, "maps")]/text()')[j] # district number district = rows[i + 1].xpath('.//td/p[1]/text()')[j].replace(' / ', '/') p = Legislator(name=name, post_id=district, role='Conseiller') p.add_source(COUNCIL_PAGE) p.image = councillor.xpath('.//img/@src')[0] phone = re.findall(r'[0-9]{3}[ -][0-9]{3}-[0-9]{4}', rows[i + 1].xpath('.//td')[j].text_content())[0].replace(' ', '-') p.add_contact('voice', phone, 'legislature') yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@class="article-content"]//td[@class="ms-rteTableOddCol-0"]') yield scrape_mayor(councillors[0]) for councillor in councillors[1:]: if not councillor.xpath('.//a'): continue name = councillor.xpath('.//a')[0].text_content().strip() district = councillor.xpath('.//a')[1].text_content() url = councillor.xpath('.//a/@href')[0] page = lxmlize(url) p = Legislator(name=name, post_id=district, role='Conseiller') p.add_source(COUNCIL_PAGE) p.add_source(url) p.image = councillor.xpath('./preceding-sibling::td//img/@src')[-1] contacts = page.xpath('.//td[@class="ms-rteTableOddCol-0"]//text()') for contact in contacts: if re.findall(r'[0-9]', contact): phone = contact.strip().replace(' ', '-') p.add_contact('voice', phone, 'legislature') get_links(p, page.xpath('.//td[@class="ms-rteTableOddCol-0"]')[0]) email = page.xpath( 'string(//a[contains(@href, "mailto:")]/@href)')[len('mailto:'):] p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize( COUNCIL_PAGE, user_agent= 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)') councillors = page.xpath('//table[last()]//tr/td[1]//strong') for i, councillor in enumerate(councillors): name = councillor.text_content().strip() if not name: continue if 'maire' in name: name = name.split('maire')[1].strip() district = u'Montréal-Est' else: district = councillor.xpath( './ancestor::td/following-sibling::td//strong' )[-1].text_content() district = 'District %s' % re.sub('\D+', '', district) email = councillor.xpath( './ancestor::tr/following-sibling::tr//a[contains(@href, "mailto:")]' )[0].text_content().strip() role = 'Maire' if i == 0 else 'Conseiller' p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE, 'iso-8859-1') nodes = page.xpath('//table[@width="484"]//tr') try: for district_row, councillor_row, contact_row, _ in chunks(nodes, 4): post_id = district_row.xpath('string(.//strong)') name = councillor_row.xpath('string(.)')[len('Councillor '):] # TODO: phone numbers on site don't include area code. Add manually? #phone = contact_row.xpath('string(td[2]/text())') email = contact_row.xpath('string(td[4]/a)').replace('[at]', '@') p = Legislator(name=name, post_id=post_id, role='Councillor') p.add_source(COUNCIL_PAGE) #p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) yield p except ValueError: # on the last run through, there will be less than 4 rows to unpack pass mayor_page = lxmlize(MAYOR_PAGE, 'iso-8859-1') name = mayor_page.xpath('string(//h1[contains(., "Bio")])')[:-len(' Bio')] contact_page = lxmlize(MAYOR_CONTACT_URL, 'iso-8859-1') email = contact_page.xpath('string(//a[contains(., "@")][1])') p = Legislator(name=name, post_id='Halifax', role='Councillor') p.add_source(MAYOR_PAGE) p.add_source(MAYOR_CONTACT_URL) p.add_contact('email', email, None) yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//ul[@class="subNav top"]/li/ul//li/a') for councillor in councillors: name = councillor.text_content() url = councillor.attrib['href'] page = lxmlize(url) if councillor == councillors[0]: district = 'Ajax' role = 'Mayor' else: district = re.findall(r'Ward.*', page.xpath('//div[@id="printAreaContent"]//h1')[0].text_content())[0].strip() role = page.xpath('//div[@id="printAreaContent"]//h1')[0].text_content() role = re.findall('((Regional)? ?(Councillor))', role)[0][0] p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.add_source(url) p.image = page.xpath('//div[@class="intQuicklinksPhoto"]/img/@src')[0] contact_info = page.xpath('//table[@class="datatable"][1]//tr')[1:] for line in contact_info: contact_type = line.xpath('./td')[0].text_content().strip() contact = line.xpath('./td')[1].text_content().strip() if re.match(r'(Phone)|(Fax)|(Email)', contact_type): contact_type = CONTACT_DETAIL_TYPE_MAP[contact_type] p.add_contact(contact_type, contact, None if contact_type == 'email' else 'legislature') else: p.add_link(contact, None) yield p
def scrape_mayor(self, div): name = div.xpath('.//a')[0].text_content().replace('Mayor', '') url = div.xpath('.//a')[0].attrib['href'] p = Legislator(name=name, post_id='Guelph', role='Mayor') p.add_source(COUNCIL_PAGE) p.add_source(url) phone = div.xpath('.//text()[normalize-space()]')[2] email = div.xpath('.//a[contains(@href,"mailto:")]')[0].text_content() page = lxmlize(url) p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) p.add_link( page.xpath( '//div[@class="entry-content"]//a[contains(@href, "facebook")]' )[0].attrib['href'], None) p.add_link( page.xpath( '//div[@class="entry-content"]//a[contains(@href, "twitter")]') [0].attrib['href'], None) p.image = page.xpath('//header/img/@src')[0] return p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//p[@class="WSIndent"]/a') for councillor in councillors: district = re.findall(r'(Ward [0-9]{1,2})', councillor.text_content()) if district: district = district[0] name = councillor.text_content().replace(district, '').strip() role = 'Councillor' else: district = 'Kawartha Lakes' name = councillor.text_content().replace('Mayor', '').strip() role = 'Mayor' url = councillor.attrib['href'] page = lxmlize(url) email = page.xpath('//a[contains(@href, "mailto:")]/@href')[0].rsplit(':', 1)[1].strip() image = page.xpath('//img[@class="image-right"]/@src')[0] p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.add_source(url) p.add_contact('email', email, None) p.image = image yield p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="content"]//tr') for i, councillor in enumerate(councillors): if 'Maire' in councillor.text_content(): name = councillor.xpath('./td')[1].text_content() district = 'Sainte-Anne-de-Bellevue' role = 'Maire' else: name = councillor.xpath('./td')[1].text_content() district = 'District ' + re.findall( r'\d', councillor.xpath('./td')[0].text_content())[0] role = 'Conseiller' p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) email = councillor.xpath('.//a') if email: email = email[0].attrib['href'].replace('mailto:', '') p.add_contact('email', email, None) yield p
def get_people(self): reader = csv_reader(COUNCIL_PAGE, header=True) for row in reader: kwargs = {'role': 'candidate'} email = None links = [] extra = {} offices = [] for k, v in row.items(): v = v.strip() if not v: continue k = k.strip() match = re.search(r'\AOffice (\d): ', k) if match: index = int(match.group(1)) while index > len(offices): offices.append({}) if k[10:] == 'Type': offices[index - 1]['note'] = v elif k[10:] in CONTACT_TYPE_KEYS: offices[index - 1][CONTACT_TYPE_KEYS[k[10:]]] = v else: raise Exception(k) elif k == 'Party Name': kwargs['party'] = PARTY_MAP[v] elif k in KEYS: kwargs[KEYS[k]] = v elif k == 'Email': email = v elif k in LINKS_KEYS: links.append({'url': v, 'note': k}) elif k in IGNORE_KEYS: continue elif k in EXTRA_KEYS: extra[re.sub(r'[^a-z0-9_]', '', k.lower().replace(' ', '_'))] = v else: raise Exception(k) contacts = [] for office in offices: for _, type in CONTACT_TYPE_KEYS.items(): if office.get(type): contacts.push({'note': office['note'], type: type, 'value': office[type]}) if 'name' in kwargs: p = Legislator(**kwargs) p.add_source(COUNCIL_PAGE) if email: p.add_contact('email', email, None) for link in links: p.add_link(**links) for contact in contacts: p.add_contact(**contact) for k, v in extra.items(): p.add_extra(k, v) yield p
def councillor_data(url, name, ward): page = lxmlize(url) # sadly, email is a form on a separate page phone = page.xpath('string(//strong[contains(., "Phone")])').split(':')[1] photo_url_rel = page.xpath('string(//div[@id="contentcontainer"]//img/@src)') photo_url = urljoin(url, photo_url_rel) m = Legislator(name=name, post_id=ward, role='Councillor') m.add_source(COUNCIL_PAGE) m.add_source(url) m.add_contact('voice', phone, 'legislature') m.image = photo_url yield m
def councillor_data(html): name = html.xpath('string(./div[@class="councillorInfo"]/a/text()[2])') email = html.xpath('string(./div[@class="emailInfo"])') district, phone = html.xpath('./div[@class="wardInfo"]/text()') photo = html.xpath('string((.//@src)[1])') p = Legislator(name=name, post_id=district, role='Councillor') p.add_source(COUNCIL_PAGE) p.add_contact('voice', phone, 'legislature') p.add_contact('email', email, None) p.image = photo return p
def councillor_data(url, name, ward): page = lxmlize(url) # sadly, email is a form on a separate page phone = page.xpath('string(//strong[contains(., "Phone")])').split(':')[1] photo_url_rel = page.xpath( 'string(//div[@id="contentcontainer"]//img/@src)') photo_url = urljoin(url, photo_url_rel) m = Legislator(name=name, post_id=ward, role='Councillor') m.add_source(COUNCIL_PAGE) m.add_source(url) m.add_contact('voice', phone, 'legislature') m.image = photo_url yield m
def mayor_data(node): name = node.xpath('string(.//strong)')[6:] phone = node.xpath('string(.//p[2]/text()[1])') email = node.xpath('string((.//a)[1])') photo_url = node.xpath('string(.//img/@src)') p = Legislator(name=name, post_id='Hamilton', role='Mayor') p.add_source(COUNCIL_PAGE) p.add_contact('email', email, None) p.add_contact('voice', phone, 'legislature') p.image = photo_url return p
def mayor_data(url): page = lxmlize(url) # TODO: Consider getting photo. It's on a separate page. name_text = page.xpath('//p[contains(text(), "Worship Mayor")]/text()')[0] name = ' '.join(name_text.split()[3:]) # TODO: probably too brittle email = page.xpath('//a[contains(@href, "mailto")]/text()')[0] p = Legislator(name=name, post_id='Mississauga', role='Mayor') p.add_source(url) p.add_contact('email', email, None) return p
def mayor_data(url): page = lxmlize(url) # TODO: Consider getting photo. It's on a separate page. name_text = page.xpath('//p[contains(text(), "Worship Mayor")]/text()')[0] name = ' '.join(name_text.split()[3:]) # TODO: probably too brittle email = page.xpath('//a[contains(@href, "mailto")]/text()')[0] p = Legislator(name=name, post_id='Mississauga', role='Mayor') p.add_source(url) p.add_contact('email', email, None) return p
def scrape_mayor(url): page = lxmlize(url) name = page.xpath('//div[@id="printAreaContent"]/h1/strong/text()')[0].replace('Mayor', '').strip() address = page.xpath('//strong[contains(text(), "mail")]/parent::p/text()')[1].replace(':', '').strip() phone = page.xpath('//strong[contains(text(), "phone")]/parent::p/text()')[1].split()[1] p = Legislator(name=name, post_id='Caledon', role='Mayor') p.add_source(COUNCIL_PAGE) p.add_source(url) p.image = page.xpath('//h2[contains(text(), "About me")]/img/@src')[0] p.add_contact('address', address, 'legislature') p.add_contact('voice', phone, 'legislature') return p
def scrape_mayor(self, name, url): page = lxmlize(url) contact = page.xpath( '//div[@id="secondary align_RightSideBar"]/blockquote/p/text()') phone = contact[0] fax = contact[1] email = page.xpath( '//div[@id="secondary align_RightSideBar"]/blockquote/p/a[contains(@href, "mailto:")]/text()' )[0] mayor_page = lxmlize('http://www.burlingtonmayor.com') contact_url = mayor_page.xpath( '//div[@class="menu"]//a[contains(text(),"Contact")]' )[0].attrib['href'] mayor_page = lxmlize(contact_url) address = mayor_page.xpath( '//div[@class="entry-content"]//p[contains(text(),"City Hall")]' )[0].text_content() p = Legislator(name=name, post_id="Burlington", role='Mayor') p.add_source(COUNCIL_PAGE) p.add_source(url) p.add_source('http://www.burlingtonmayor.com') p.image = page.xpath( '//div[@id="secondary align_RightSideBar"]/p/img/@src')[0] p.add_contact('voice', phone, 'legislature') p.add_contact('fax', fax, 'legislature') p.add_contact('email', email, None) p.add_contact('address', address, 'legislature') return p
def scrape_mayor(): page = lxmlize(MAYOR_PAGE) name = page.xpath('//strong[contains(text(), "Mayor")]/text()')[0].replace('Mayor', '').strip() p = Legislator(name=name, post_id='Edmonton', role='Mayor') p.add_source(MAYOR_PAGE) image = page.xpath('//div[@id="contentArea"]//img/@src')[0] p.image = image address = ' '.join(page.xpath('//address/p/text()')) p.add_contact('address', address, 'legislature') return p
def scrape_mayor(self, page): info = page.xpath('//div[@class="entry-content"]/p')[:4] name = info[0].text_content().replace('Mayor', '') email = info[2].xpath('./a')[0].text_content() phone = info[3].text_content().replace('Phone ', '') p = Legislator(name=name, post_id='Stratford', role='Mayor') p.add_source(COUNCIL_PAGE) p.image = page.xpath('//div[@class="entry-content"]/p/a/img/@src')[0] p.add_contact('email', email, None) if len(re.sub(r'\D', '', phone)) == 7: phone = '902-%s' % phone p.add_contact('voice', phone, 'legislature') return p
def scrape_mayor(url): page = lxmlize(url) name = ' '.join( page.xpath('//div[@id="content"]/p[2]/text()')[0].split()[1:3]) p = Legislator(name=name, post_id='Moncton', role='Mayor') p.add_source(url) p.image = page.xpath('//div[@id="content"]/p[1]/img/@src')[0] info = page.xpath('//table[@class="whiteroundedbox"]//tr[2]/td[1]')[1] address = ', '.join(info.xpath('./p[1]/text()')[1:4]) address = re.sub(r'\s{2,}', ' ', address).strip() phone = info.xpath('.//p[2]/text()')[0].split(':')[1].strip() fax = info.xpath('.//p[2]/text()')[1].split(':')[1].strip() email = info.xpath('.//a/@href')[0].split(':')[1].strip() p.add_contact('address', address, 'legislature') if len(re.sub(r'\D', '', phone)) == 7: phone = '506-%s' % phone p.add_contact('voice', phone, 'legislature') p.add_contact('fax', fax, 'legislature') p.add_contact('email', email, None) return p
def get_people(self): page = lxmlize(COUNCIL_PAGE) for row in page.xpath(r'//table[@class="listtable"]//tr')[1:]: celltext = row.xpath('./td//text()') last, first = celltext[0].split(', ') name = ' '.join((first, last)) p = Legislator(name=name, post_id=None, role=celltext[1]) p.add_source(COUNCIL_PAGE) p.post_id = 'Grande Prairie' p.add_contact('voice', celltext[3], 'legislature') p.add_contact('email', row.xpath('string(./td[last()]//a/@href)').split(':')[1], None) yield p
def scrape_mayor(self, div): url = div.attrib['href'] page = lxmlize(url) name = div.text_content().replace('Mayor ', '') contact_url = page.xpath('//ul[@class="navSecondary"]//a[contains(text(),"Contact")]')[0].attrib['href'] page = lxmlize(contact_url) contact_div = page.xpath('//div[@class="col"][2]')[0] address = contact_div.xpath('.//p[1]')[0].text_content() address = re.findall(r'(City of Greater .*)', address, flags=re.DOTALL)[0] phone = contact_div.xpath('.//p[2]')[0].text_content() phone = phone.replace('Phone: ', '') fax = contact_div.xpath('.//p[3]')[0].text_content() fax = fax.split(' ')[-1] email = contact_div.xpath('//a[contains(@href, "mailto:")]')[0].text_content() p = Legislator(name=name, post_id='Greater Sudbury', role='Mayor') p.add_source(COUNCIL_PAGE) p.add_source(contact_url) p.add_contact('address', address, 'legislature') p.add_contact('voice', phone, 'legislature') p.add_contact('fax', fax, 'legislature') p.add_contact('email', email, None) return p
def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="navMultilevel"]//a') for councillor in councillors: if councillor == councillors[0]: yield self.scrape_mayor(councillor) continue if not '-' in councillor.text_content(): break district, name = councillor.text_content().split(' - ') if name == 'Vacant': continue page = lxmlize(councillor.attrib['href']) address = page.xpath('//div[@class="column last"]//p')[0].text_content() phone = page.xpath('//article[@id="primary"]//*[contains(text(),"Tel")]')[0].text_content() phone = re.findall(r'([0-9].*)', phone)[0].replace(') ', '-') fax = page.xpath('//article[@id="primary"]//*[contains(text(),"Fax")]')[0].text_content() fax = re.findall(r'([0-9].*)', fax)[0].replace(') ', '-') email = page.xpath('//a[contains(@href, "mailto:")]')[0].text_content() p = Legislator(name=name, post_id=district, role='Councillor') p.add_source(COUNCIL_PAGE) p.add_source(councillor.attrib['href']) p.add_contact('address', address, 'legislature') p.add_contact('voice', phone, 'legislature') p.add_contact('fax', fax, 'legislature') p.add_contact('email', email, None) p.image = page.xpath('//article[@id="primary"]//img/@src')[1] yield p
def scrape_mayor(self, div): url = div.attrib['href'] page = lxmlize(url) name = div.text_content().replace('Mayor ', '') contact_url = page.xpath( '//ul[@class="navSecondary"]//a[contains(text(),"Contact")]' )[0].attrib['href'] page = lxmlize(contact_url) contact_div = page.xpath('//div[@class="col"][2]')[0] address = contact_div.xpath('.//p[1]')[0].text_content() address = re.findall(r'(City of Greater .*)', address, flags=re.DOTALL)[0] phone = contact_div.xpath('.//p[2]')[0].text_content() phone = phone.replace('Phone: ', '') fax = contact_div.xpath('.//p[3]')[0].text_content() fax = fax.split(' ')[-1] email = contact_div.xpath( '//a[contains(@href, "mailto:")]')[0].text_content() p = Legislator(name=name, post_id='Greater Sudbury', role='Mayor') p.add_source(COUNCIL_PAGE) p.add_source(contact_url) p.add_contact('address', address, 'legislature') p.add_contact('voice', phone, 'legislature') p.add_contact('fax', fax, 'legislature') p.add_contact('email', email, None) return p
def scrape_mayor(self, page): info = page.xpath('//div[@class="entry-content"]/p')[:4] name = info[0].text_content().replace('Mayor', '') email = info[2].xpath('./a')[0].text_content() phone = info[3].text_content().replace('Phone ', '') p = Legislator(name=name, post_id='Stratford', role='Mayor') p.add_source(COUNCIL_PAGE) p.image = page.xpath('//div[@class="entry-content"]/p/a/img/@src')[0] p.add_contact('email', email, None) if len(re.sub(r'\D', '', phone)) == 7: phone = '902-%s' % phone p.add_contact('voice', phone, 'legislature') return p
def get_people(self): page = lxmlize(COUNCIL_PAGE) for person_link in page.xpath('//a[@class="L4"]'): role, name = person_link.text_content().split(' ', 1) url = person_link.attrib['href'] page = lxmlize(url) photo_url = page.xpath('string(//img[@class="img-right"]/@src)') email = page.xpath('string(//a[starts-with(@href, "mailto:")])') p = Legislator(name=name, post_id='Coquitlam', role=role, image=photo_url) p.add_source(COUNCIL_PAGE) p.add_source(url) p.add_contact('email', email, None) yield p