def scrape_interim_committee(self, link, name): url = re.sub(r"\s+", "", link.attrib["href"]) html = self.get(url).text doc = lxml.html.fromstring(html) doc.make_links_absolute(url) if "Subcommittee" in name: # Check whether the parent committee is manually defined first # before attempting to automatically resolve it. parent = WVCommitteeScraper.subcommittee_parent_map.get(name, None) if parent is None: parent = name.partition("Subcommittee")[0].strip() comm = Organization( name=name, classification="committee", parent_id=self._joint_committees[parent], ) else: comm = Organization(name=name, classification="committee", chamber="legislature") self._joint_committees[name] = comm comm.add_source(url) xpath = '//a[contains(@href, "?member=")]' for link in doc.xpath(xpath): name = link.text_content().strip() name = re.sub(r"^Delegate\s+", "", name) name = re.sub(r"^Senator\s+", "", name) role = link.getnext().text or "member" comm.add_member(name, role.strip()) return comm
def get_organizations(self): legislature = Organization(name=self.metadata.legislature_name, classification="legislature") yield legislature if not self.metadata.unicameral: yield Organization( self.metadata.upper.name, classification="upper", parent_id=legislature._id, ) yield Organization( self.metadata.lower.name, classification="lower", parent_id=legislature._id, )
def scrape_chamber(self, chamber): session = self.latest_session() # since we are scraping only latest_session session_id = session_metadata.session_id_meta_data[session] client = AZClient() committees = client.list_committees( sessionId=session_id, includeOnlyCommitteesWithAgendas="false", legislativeBody="S" if chamber == "upper" else "H", ) for committee in committees.json(): c = Organization( name=committee["CommitteeName"], chamber=chamber, classification="committee", ) details = client.get_standing_committee( sessionId=session_id, legislativeBody="S" if chamber == "upper" else "H", committeeId=committee["CommitteeId"], includeMembers="true", ) for member in details.json()[0]["Members"]: c.add_member( u"{} {}".format(member["FirstName"], member["LastName"]), role=parse_role(member), ) c.add_source(details.url) c.add_source(committees.url) yield c
def scrape(self, chamber=None): if chamber: chambers = [chamber] else: chambers = ["upper", "lower"] for chamber in chambers: insert = self.jurisdiction.session_slugs[self.latest_session()] chamber_names = {"lower": "Assembly", "upper": "Senate"} list_url = "%s/%s/HomeCommittee/LoadCommitteeListTab" % ( nelis_root, insert) html = self.get(list_url).text doc = lxml.html.fromstring(html) sel = "panel%sCommittees" % chamber_names[chamber] ul = doc.xpath('//ul[@id="%s"]' % sel)[0] coms = ul.xpath('li/div/div/div[@class="col-md-4"]/a') for com in coms: name = com.text.strip() com_id = re.match(r".*/Committee/(?P<id>[0-9]+)/Overview", com.attrib["href"]).group("id") com_url = ( "%s/%s/Committee/FillSelectedCommitteeTab?committeeOrSubCommitteeKey=%s" "&selectedTab=Overview" % (nelis_root, insert, com_id)) org = Organization(name=name, chamber=chamber, classification="committee") org.add_source(com_url) self.scrape_comm_members(chamber, org, com_url) yield org
def test_committee_add_member_person(): c = Organization("Defense", classification="committee") p = Person("John Adams") c.add_member(p, role="chairman") assert c._related[0].person_id == p._id assert c._related[0].organization_id == c._id assert c._related[0].role == "chairman"
def handle_page(self): name = self.doc.xpath('//h2[@class="committeeName"]')[0].text if name.startswith("Appropriations Subcommittee"): return # TODO: restore scraping of Appropriations Subcommittees # name = name.replace('Appropriations ', '') # parent = {'name': 'Appropriations', 'classification': 'upper'} # chamber = None else: if name.startswith("Committee on"): name = name.replace("Committee on ", "") parent = None chamber = "upper" comm = Organization(name=name, classification="committee", chamber=chamber, parent_id=parent) for dt in self.doc.xpath('//div[@id="members"]/dl/dt'): role = dt.text.replace(": ", "").strip().lower() member = dt.xpath("./following-sibling::dd")[0].text_content() member = self.clean_name(member) comm.add_member(member, role=role) for ul in self.doc.xpath('//div[@id="members"]/ul/li'): member = self.clean_name(ul.text_content()) comm.add_member(member) comm.add_source(self.url) yield comm
def __missing__(self, key): (chamber, committee_name) = key committee = Organization( name=committee_name, chamber=chamber, classification="committee" ) self[key] = committee return committee
def scrape_lower_committee(self, name, url): page = self.lxmlize(url) committee = Organization(chamber="lower", name=name, classification="committee") committee.add_source(url) seen = set() member_links = self.get_nodes( page, '//div[@class="mod-inner"]//a[contains(@href, "mem")]') for member_link in member_links: member_name = None member_role = None member_name = member_link.text if member_name is None: continue # Figure out if this person is the chair. if member_link == member_links[0]: member_role = "chair" else: member_role = "member" if name not in seen: committee.add_member(member_name, member_role) seen.add(member_name) return committee
def _scrape_upper_committee(self, name, url2): cat = "Assignments.asp" url3 = url2.replace("default.asp", cat) committee = Organization(name, chamber="upper", classification="committee") committee.add_source(url2) page = self.lxmlize(url3) members = page.xpath('//table[@id="table38"]//font/a/b') for link in members: role = "member" if link == members[0]: role = "Chairman" if link == members[1]: role = "Vice-Chairman" name = link.xpath("string()") name = name.replace("Senator ", "") name = re.sub(r"[\s]{2,}", " ", name).strip() committee.add_member(name, role) yield committee
def scrape_reps_comm(self): # As of 1/27/15, the committee page has the wrong # session number (126th) at the top, but # has newly elected people, so we're rolling with it. url = "http://legislature.maine.gov/house/hsecoms.htm" page = self.get(url).text root = lxml.html.fromstring(page) count = 0 for n in range(1, 12, 2): path = "string(//body/center[%s]/h1/a)" % (n) comm_name = root.xpath(path) committee = Organization(chamber="lower", name=comm_name, classification="committee") count = count + 1 path2 = "/html/body/ul[%s]/li/a" % (count) for el in root.xpath(path2): rep = el.text if rep.find("(") != -1: mark = rep.find("(") rep = rep[15:mark].strip() if "chair" in rep.lower(): role = "chair" rep = re.sub(r"(?i)[\s,]*chair\s*$", "", rep).strip() else: role = "member" committee.add_member(rep, role) committee.add_source(url) yield committee
def scrape_comm(self, chamber): url = "http://billstatus.ls.state.ms.us/htms/%s_cmtememb.xml" % chamber comm_page = self.get(url) root = lxml.etree.fromstring(comm_page.content) if chamber == "h": chamber = "lower" else: chamber = "upper" for mr in root.xpath("//COMMITTEE"): name = mr.xpath("string(NAME)") comm = Organization(name, chamber=chamber, classification="committee") chair = mr.xpath("string(CHAIR)") chair = chair.replace(", Chairman", "") role = "Chairman" if len(chair) > 0: comm.add_member(chair, role=role) vice_chair = mr.xpath("string(VICE_CHAIR)") vice_chair = vice_chair.replace(", Vice-Chairman", "") role = "Vice-Chairman" if len(vice_chair) > 0: comm.add_member(vice_chair, role=role) members = mr.xpath("string(MEMBERS)").split(";") if "" in members: members.remove("") for leg in members: leg = leg.strip() comm.add_member(leg) comm.add_source(url) yield comm
def add_committees(self, legislator_page, legislator, chamber, url): # as of today, both chambers do committees the same way! Yay! rows = self.get_nodes( legislator_page, '//div[@id="ContentPlaceHolder1_TabSenator_TabCommittees"]//table/' "tr", ) if len(rows) == 0: return for row in rows[1:]: committee_name_text = self.get_node(row, "./td[2]").text_content() committee_name = committee_name_text.strip() if not committee_name: continue role_text = self.get_node(row, "./td[3]").text_content() role = role_text.strip() if committee_name not in self.committees: comm = Organization(name=committee_name, chamber=chamber, classification="committee") comm.add_source(url) self.committees[committee_name] = comm self.committees[committee_name].add_member(legislator.name, role=role)
def scrape_chamber(self, chamber, session): url = "%s/GetActiveCommittees?biennium=%s" % (self._base_url, session) page = self.get(url) page = lxml.etree.fromstring(page.content) for comm in xpath(page, "//wa:Committee"): agency = xpath(comm, "string(wa:Agency)") comm_chamber = {"House": "lower", "Senate": "upper"}[agency] if comm_chamber != chamber: continue name = xpath(comm, "string(wa:Name)") # comm_id = xpath(comm, "string(wa:Id)") # acronym = xpath(comm, "string(wa:Acronym)") phone = xpath(comm, "string(wa:Phone)") comm = Organization(name, chamber=chamber, classification="committee") comm.extras["phone"] = phone self.scrape_members(comm, agency) comm.add_source(url) if not comm._related: self.warning("empty committee: %s", name) else: yield comm
def get_organization(self, name, chamber): key = (name, chamber) if key not in self._committees: self._committees[key] = Organization(name=name, chamber=chamber, classification="committee") return self._committees[key]
def scrape_approp_subcommittees(self): URL = "http://www.senate.michigan.gov/committee/appropssubcommittee.html" html = self.get(URL).text doc = lxml.html.fromstring(html) for strong in doc.xpath("//strong"): com = Organization( name=strong.text.strip(), parent_id=self._senate_appropriations, classification="committee", ) com.add_source(URL) legislators = strong.getnext().tail.replace("Senators", "").strip() for leg in re.split(", | and ", legislators): if leg.endswith("(C)"): role = "chairman" leg = leg[:-4] elif leg.endswith("(VC)"): role = "vice chairman" leg = leg[:-5] elif leg.endswith("(MVC)"): role = "minority vice chairman" leg = leg[:-6] else: role = "member" com.add_member(leg, role=role) yield com
def scrape_senate_committee(self, url): html = self.get(url).text doc = lxml.html.fromstring(html) headers = doc.xpath('(//div[@class="row"])[2]//h1') assert len(headers) == 1 name = " ".join(headers[0].xpath("./text()")) name = re.sub(r"\s+Committee.*$", "", name) com = Organization(chamber="upper", name=name, classification="committee") for member in doc.xpath('(//div[@class="row"])[3]/div[1]/ul[1]/li'): text = member.text_content() member_name = member.xpath("./a/text()")[0].replace( "Representative ", "") if "Committee Chair" in text: role = "chair" elif "Minority Vice" in text: role = "minority vice chair" elif "Vice" in text: role = "majority vice chair" else: role = "member" com.add_member(member_name, role=role) com.add_source(url) if com.name == "Appropriations": self._senate_appropriations = com yield com
def scrape_comm(self, url, chamber): data = self.post(url).json()["Data"] for item in data: comm_name = item["CommitteeName"] committee = Organization(name=comm_name, chamber=chamber, classification="committee") chair_man = str(item["ChairName"]) vice_chair = str(item["ViceChairName"]) comm_id = item["CommitteeId"] comm_url = self.get_comm_url(chamber, comm_id, comm_name) members = self.scrape_member_info(comm_url) if vice_chair != "None": committee.add_member(vice_chair, role="Vice-Chair") if chair_man != "None": committee.add_member(chair_man, role="Chairman") for member in members: # vice_chair and chair_man already added. if chair_man not in member and vice_chair not in member: member = " ".join(member.split()) if member: committee.add_member(member) committee.add_source(comm_url) committee.add_source(url) yield committee
def scrape_page(self, link, chamber=None): page = self.lxmlize(link.attrib["href"]) comName = link.text roles = { "Chair": "chair", "Vice Chair": "vice-chair", "Vice-Chair": "vice-chair", } committee = Organization(comName, chamber=chamber, classification="committee") committee.add_source(link.attrib["href"]) for member in page.xpath('//div[@class="members"]/' + 'div[@class="roster-item"]'): details = member.xpath('.//div[@class="member-details"]')[0] person = details.xpath("./h4")[0].text_content() # This page does random weird things with whitespace to names person = " ".join(person.strip().split()) if not person: continue role = details.xpath('./span[@class="member-role"]') if role: role = roles[role[0].text] else: role = "member" committee.add_member(person, role=role) yield committee
def scrape_senate_comm(self): url = ("http://legislature.maine.gov/committee-information/" "standing-committees-of-the-senate") html = self.get(url).text doc = lxml.html.fromstring(html) headings = doc.xpath("//p/strong") for heading in headings: committee = Organization( chamber="upper", name=heading.text.strip(":"), classification="committee", ) committee.add_source(url) par = heading.getparent().getnext() while True: link = par.xpath("a") if len(link) == 0: break res = self.senate_committee_pattern.search(link[0].text) name, chair = res.groups() committee.add_member( name, "chair" if chair is not None else "member") par = par.getnext() yield committee
def _scrape_lower_special_committees(self): url = "http://house.louisiana.gov/H_Cmtes/SpecialCommittees.aspx" page = self.lxmlize(url) committee_list = page.xpath('//div[@class="accordion"]')[0] headers = committee_list.xpath("./h3") for header in headers: committee_name_text = header.xpath("string()") committee_name = committee_name_text.strip() committee_name = self._normalize_committee_name(committee_name) chamber = "legislature" if committee_name.startswith("Joint") else "lower" committee = Organization( committee_name, chamber=chamber, classification="committee" ) committee.add_source(url) committee_memberlist = header.xpath( './following-sibling::div[@class="pane"]' '//tr[@class="linkStyle2"]' ) for row in committee_memberlist: member_name = row.xpath("normalize-space(string(./th[1]))") member_name = self._normalize_member_name(member_name) member_role = row.xpath("normalize-space(string(./th[2]))") member_role = self._normalize_member_role(member_role) committee.add_member(member_name, member_role) yield committee
def scrape_committee(self, name, url, chamber): org = Organization(name=name, chamber=chamber, classification="committee") org.add_source(url) data = self.get(url).text doc = lxml.html.fromstring(data) for leg in doc.xpath( '//div[@id="members"]/div[@id="members"]/p/a/text()'): leg = leg.replace("Representative ", "") leg = leg.replace("Senator ", "") leg = leg.strip() if " (" in leg: leg, role = leg.split(" (") if "Vice-Chair" in role: role = "vice-chair" elif "Co-Chair" in role: role = "co-chair" elif "Chair" in role: role = "chair" else: raise Exception("unknown role: %s" % role) else: role = "member" org.add_member(leg, role) return org
def _scrape_committee(self, committee_name, link, chamber): """Scrape individual committee page and add members""" page = self.get(link).text page = lxml.html.fromstring(page) page.make_links_absolute(link) is_subcommittee = bool(page.xpath('//li/a[text()="Committee"]')) if is_subcommittee: # All TN subcommittees are just the name of the parent committee with " Subcommittee" # at the end parent_committee_name = re.sub(r"\s*(Study )?Subcommittee\s*", "", committee_name) com = Organization( committee_name, classification="committee", parent_id=self.parents[parent_committee_name], ) else: com = Organization(committee_name, chamber=chamber, classification="committee") self.parents[committee_name] = com._id OFFICER_SEARCH = ('//h2[contains(text(), "Committee Officers")]/' "following-sibling::div/ul/li/a") MEMBER_SEARCH = ('//h2[contains(text(), "Committee Members")]/' "following-sibling::div/ul/li/a") for a in page.xpath(OFFICER_SEARCH) + page.xpath(MEMBER_SEARCH): member_name = " ".join([ x.strip() for x in a.xpath("text()") + a.xpath("span/text()") if x.strip() ]) role = a.xpath("small") if role: role = role[0].xpath("text()")[0].strip() else: role = "member" if "(Vacant)" in role: continue com.add_member(member_name, role) com.add_link(link) com.add_source(link) return com
def scrape(self, session=None): if session is None: session = self.latest_session() self.info("no session specified, using %s", session) # com_types = ['J', 'SE', 'O'] # base_url = 'https://wyoleg.gov/LsoService/api/committeeList/2018/J' url = "https://wyoleg.gov/LsoService/api/committees/{}".format(session) response = self.get(url) coms_json = json.loads(response.content.decode("utf-8")) for row in coms_json: com_url = "https://wyoleg.gov/LsoService/api/committeeDetail/{}/{}".format( session, row["ownerID"]) com_response = self.get(com_url) com = json.loads(com_response.content.decode("utf-8")) # WY doesn't seem to have any house/senate only committees that I can find committee = Organization(name=com["commName"], chamber="legislature", classification="committee") for member in com["commMembers"]: role = "chairman" if member[ "chairman"] == "Chairman" else "member" committee.add_member(member["name"], role) # some WY committees have non-legislators appointed to the member by the Governor # but the formatting is super inconsistent if com["otherMembers"]: committee.extras["other_members"] = com["otherMembers"] committee.extras["wy_id"] = com["commID"] committee.extras["wy_code"] = com["ownerID"] committee.extras["wy_type_code"] = com["type"] committee.extras["budget"] = com["budget"] if com["statAuthority"]: committee.extras["statutory_authority"] = com["statAuthority"] if com["number"]: committee.extras["seat_distribution"] = com["number"] committee.add_identifier(scheme="WY Committee ID", identifier=str(com["commID"])) committee.add_identifier(scheme="WY Committee Code", identifier=str(com["ownerID"])) if com["description"]: committee.add_identifier(scheme="Common Name", identifier=com["description"]) source_url = "http://wyoleg.gov/Committees/{}/{}".format( session, com["ownerID"]) committee.add_source(source_url) yield committee
def scrape_senate_committee(self, url): html = self.get(url).text doc = lxml.html.fromstring(html) com_name = doc.xpath('//a[contains(@href, "committee_bio")]/text()')[0] parent = doc.xpath('//h4//a[contains(@href, "committee_bio")]/text()') if parent: self.log("%s is subcommittee of %s", com_name, parent[0]) com = Organization( com_name, chamber="upper", classification="committee", parent_id={ "name": parent[0], "classification": "upper" }, ) else: com = Organization(com_name, chamber="upper", classification="committee") for link in doc.xpath( '//div[@id="members"]//a[contains(@href, "member_bio")]'): name = link.text_content().strip() if name: position = link.xpath(".//preceding-sibling::b/text()") if not position: position = "member" elif position[0] == "Chair:": position = "chair" elif position[0] == "Vice Chair:": position = "vice chair" elif position[0] == "Ranking Minority Member:": position = "ranking minority member" else: raise ValueError("unknown position: %s" % position[0]) name = name.split(" (")[0] com.add_member(name.strip(), position) com.add_source(url) yield com
def test_org_add_post(): """ Test that we can hack posts in on the fly'""" orga = Organization("name", classification="committee") orga.add_source(url="http://example.com") orga.validate() orga.add_post("Human Readable Name", "Chef") assert orga._related[0].role == "Chef" assert orga._related[0].label == "Human Readable Name"
def scrape_lower_committee(self, name, parent, url): page = self.curl_lxmlize(url) if "Joint" in name or (parent and "Joint" in parent): chamber = "joint" else: chamber = "lower" if parent: comm = Organization( name=parent, chamber=chamber, classification="committee" ) subcomm = Organization( name=name, parent_id=comm, classification="committee" ) else: comm = Organization(name=name, chamber=chamber, classification="committee") comm.add_source(url) xpath = "//a[contains(@href, 'District')]" for link in page.xpath(xpath): member = link.xpath("string()").strip() member = re.sub(r"\s+", " ", member) if not member or member == "House District Maps": continue match = re.match(r"((Co-)?(Vice )?Chair)?Rep\. ([^\(]+)", member) member = match.group(4).strip() role = match.group(1) or "member" member = member.replace("Representative ", "") comm.add_member(member, role.lower()) if not comm._related: if subcomm.name == "test": # Whoopsie, prod data. return raise Exception("no members for %s (%s)" % (comm.name, subcomm.name)) yield comm
def _scrape_standing_committees(self): """Scrapes the Standing Committees page of the Nebraska state legislature.""" main_url = ( "http://www.nebraskalegislature.gov/committees/standing-committees.php" ) page = self.lxmlize(main_url) committee_nodes = self.get_nodes( page, '//a[@class="accordion-switch"][contains(text(), "Standing Committees")]' '/ancestor::div[@class="panel panel-leg"]//div[@class="list-group"]' '/a[@class="list-group-item"]', ) for committee_node in committee_nodes: committee_page_url = committee_node.attrib["href"] committee_page = self.lxmlize(committee_page_url) name_text = self.get_node( committee_page, '//div[@class="container view-front"]/div[@class="row"]/' 'div[@class="col-sm-6 col-md-7"]/h1/text()[normalize-space()]', ) name = name_text.split()[0:-1] committee_name = "" for x in range(len(name)): committee_name += name[x] + " " committee_name = committee_name[0:-1] org = Organization(name=committee_name, chamber="legislature", classification="committee") members = self.get_nodes( committee_page, '//div[@class="col-sm-4 col-md-3 ltc-col-right"][1]/' 'div[@class="block-box"][1]/ul[@class="list-unstyled ' 'feature-content"]/li/a/text()[normalize-space()]', ) for member in members: member_name = re.sub(r"\Sen\.\s+", "", member) member_name = re.sub(r", Chairperson", "", member_name).strip() if "Chairperson" in member: member_role = "Chairperson" else: member_role = "member" org.add_member(member_name, member_role) org.add_source(main_url) org.add_source(committee_page_url) yield org
def scrape_comms(self, chamber, ctype): for a in self.scrape_comm_list(ctype): link = a.attrib["href"] commName = clean(a.text_content()) self.info("url " + link) c = Organization(chamber=chamber, name=commName, classification="committee") self.add_members(c, link) c.add_source(link) yield c
def test_vote_event_org_obj(): o = Organization("something", classification="committee") ve = VoteEvent( legislative_session="2009", motion_text="passage of the bill", start_date="2009-01-07", result="pass", classification="passage", organization=o, ) assert ve.organization == o._id
def scrape_current(self, chamber): if chamber == "upper": chambers = ["special_committees", "senate_committees"] else: chambers = ["house_committees"] committee_request = self.get(ksapi.url + "ctte/").text committee_json = json.loads(committee_request) for com_type in chambers: committees = committee_json["content"][com_type] for committee_data in committees: # set to joint if we are using the special_committees com_chamber = ("legislature" if com_type == "special_committees" else chamber) committee = Organization( committee_data["TITLE"], chamber=com_chamber, classification="committee", ) com_url = ksapi.url + "ctte/%s/" % committee_data["KPID"] try: detail_json = self.get(com_url).text except scrapelib.HTTPError: self.warning("error fetching committee %s" % com_url) continue details = json.loads(detail_json)["content"] for chair in details["CHAIR"]: if chair.get("FULLNAME", None): chair_name = chair["FULLNAME"] else: chair_name = self.parse_kpid(chair["KPID"]) self.warning("no FULLNAME for %s", chair["KPID"]) committee.add_member(chair_name, "chairman") for vicechair in details["VICECHAIR"]: committee.add_member(vicechair["FULLNAME"], "vice-chairman") for rankedmember in details["RMMEM"]: committee.add_member(rankedmember["FULLNAME"], "ranking member") for member in details["MEMBERS"]: committee.add_member(member["FULLNAME"]) if not committee._related: self.warning("skipping blank committee %s" % committee_data["TITLE"]) else: committee.add_source(com_url) yield committee