def scrape(self, window=28, matter_ids=None): '''By default, scrape board reports updated in the last 28 days. Optionally specify a larger or smaller window of time from which to scrape updates, or specific matters to scrape. Note that passing a value for :matter_ids supercedes the value of :window, such that the given matters will be scraped regardless of when they were updated. Optional parameters :window (numeric) - Amount of time for which to scrape updates, e.g. a window of 7 will scrape legislation updated in the last week. Pass a window of 0 to scrape all legislation. :matter_ids (str) - Comma-separated list of matter IDs to scrape ''' if matter_ids: matters = [ self.matter(matter_id) for matter_id in matter_ids.split(',') ] matters = filter( None, matters) # Skip matters that are not yet in Legistar elif float(window): # Support for partial days, i.e., window=0.15 n_days_ago = datetime.datetime.utcnow() - datetime.timedelta( float(window)) matters = self.matters(n_days_ago) else: # Scrape all matters, including those without a last-modified date matters = self.matters() n_days_ago = datetime.datetime.utcnow() - datetime.timedelta( float(window)) for matter in matters: # Skip this bill, until Metro cleans up duplicate in Legistar API if matter['MatterFile'] == '2017-0447': continue matter_id = matter['MatterId'] date = matter['MatterIntroDate'] title = matter['MatterTitle'] identifier = matter['MatterFile'] if not all((date, title, identifier)): continue # Do not scrape private bills introduced before this timestamp. if self._is_restricted(matter) and ( date < self.START_DATE_PRIVATE_SCRAPE): continue bill_session = self.session(self.toTime(date)) bill_type = BILL_TYPES[matter['MatterTypeName']] if identifier.startswith('S'): alternate_identifiers = [identifier] identifier = identifier[1:] else: alternate_identifiers = [] bill = Bill(identifier=identifier, legislative_session=bill_session, title=title, classification=bill_type, from_organization={"name": "Board of Directors"}) # The Metro scraper scrapes private bills. # However, we do not want to capture significant data about private bills, # other than the value of the helper function `_is_restricted` and a last modified timestamp. # We yield private bills early, wipe data from previously imported once-public bills, # and include only data *required* by the pupa schema. # https://github.com/opencivicdata/pupa/blob/master/pupa/scrape/schemas/bill.py bill.extras = {'restrict_view': self._is_restricted(matter)} # Add API source early. # Private bills should have this url for debugging. legistar_api = self.BASE_URL + '/matters/{0}'.format(matter_id) bill.add_source(legistar_api, note='api') if self._is_restricted(matter): # required fields bill.title = 'Restricted View' # wipe old data bill.extras['plain_text'] = '' bill.extras['rtf_text'] = '' bill.sponsorships = [] bill.related_bills = [] bill.versions = [] bill.documents = [] bill.actions = [] yield bill continue legistar_web = matter['legistar_url'] bill.add_source(legistar_web, note='web') for identifier in alternate_identifiers: bill.add_identifier(identifier) for action, vote in self.actions(matter_id): act = bill.add_action(**action) if action['description'] == 'Referred': body_name = matter['MatterBodyName'] act.add_related_entity( body_name, 'organization', entity_id=_make_pseudo_id(name=body_name)) result, votes = vote if result: vote_event = VoteEvent( legislative_session=bill.legislative_session, motion_text=action['description'], organization=action['organization'], classification=None, start_date=action['date'], result=result, bill=bill) vote_event.add_source(legistar_web) vote_event.add_source(legistar_api + '/histories') for vote in votes: try: raw_option = vote['VoteValueName'].lower() except AttributeError: raw_option = None clean_option = self.VOTE_OPTIONS.get( raw_option, raw_option) vote_event.vote(clean_option, vote['VotePersonName'].strip()) yield vote_event for sponsorship in self.sponsorships(matter_id): bill.add_sponsorship(**sponsorship) for topic in self.topics(matter_id): bill.add_subject(topic['MatterIndexName'].strip()) for relation in self.relations(matter_id): try: # Get data (i.e., json) for the related bill. # Then, we can find the 'MatterFile' (i.e., identifier) and the 'MatterIntroDate' (i.e., to determine its legislative session). # Sometimes, the related bill does not yet exist: in this case, throw an error, and continue. related_bill = self.endpoint( '/matters/{0}', relation['MatterRelationMatterId']) except scrapelib.HTTPError: continue else: date = related_bill['MatterIntroDate'] related_bill_session = self.session(self.toTime(date)) identifier = related_bill['MatterFile'] bill.add_related_bill( identifier=identifier, legislative_session=related_bill_session, relation_type='companion') # Currently, the relation type for bills can be one of a few possibilites: https://github.com/opencivicdata/python-opencivicdata/blob/master/opencivicdata/common.py#L104 # Metro simply understands these as related files, suggesting that they receive a relation of 'companion'. bill.add_version_link( 'Board Report', 'https://metro.legistar.com/ViewReport.ashx?M=R&N=TextL5&GID=557&ID={}&GUID=LATEST&Title=Board+Report' .format(matter_id), media_type="application/pdf") for attachment in self.attachments(matter_id): if attachment['MatterAttachmentName']: bill.add_document_link( attachment['MatterAttachmentName'], attachment['MatterAttachmentHyperlink'].strip(), media_type="application/pdf") bill.extras['local_classification'] = matter['MatterTypeName'] matter_version_value = matter['MatterVersion'] text = self.text(matter_id, matter_version_value) if text: if text['MatterTextPlain']: bill.extras['plain_text'] = text['MatterTextPlain'] if text['MatterTextRtf']: bill.extras['rtf_text'] = text['MatterTextRtf'].replace( u'\u0000', '') yield bill
def scrape_bills(self): """ Does the following 1) Scrapes bill data from unitedstates project and saves the data to path specified in UnitedStates module 2) Iterates over bill data and converts each one to an OCD-compliant bill model. 3) Yields the OCD-compliant bill model instance @return: yield Bill instance """ # run scraper first to pull in all the bill data self.run_unitedstates_bill_scraper() # iterate over all the files and build and yield Bill objects for filename in find_files(settings.SCRAPED_DATA_DIR, '.*[a-z]*\/[a-z]*[0-9]*\/data\.json'): try: with open(filename) as json_file: json_data = json.load(json_file) # Initialize Object bill = Bill(self.TYPE_MAP[json_data['bill_type']]['canonical'] + ' ' + json_data['number'], json_data['congress'], json_data['official_title'], chamber=self.TYPE_MAP[json_data['bill_type']]['chamber'] ) # Basics bill.type = [json_data['bill_type']] bill.subject = json_data['subjects'] bill.add_summary(json_data['summary']['as'], json_data['summary']['text'], json_data['summary']['date']) # Common Fields bill.sources = [{'url': json_data['url'], 'note': 'all'}] # Other/Related Bills bill.other_titles = [{'note': t['type'], 'title': t['title']} for t in json_data['titles']] # change value of relationship_type to 'type' field from json_data when permitted by schema bill.related_bills = [{'session': b['session'], 'name': b['name'], 'relationship_type':'companion'} for b in json_data['related_bills']] # add primary sponsor bill.add_sponsorship_by_identifier(json_data['sponsor']['name'], 'person', 'person', True, scheme='thomas_id', identifier=json_data['sponsor']['thomas_id'], chamber=self.TYPE_MAP[json_data['bill_type']]['chamber']) # add cosponsors for cs in json_data['cosponsors']: bill.add_sponsorship_by_identifier(cs['name'], 'person', 'person', False, scheme='thomas_id', identifier=cs['thomas_id'], chamber=self.TYPE_MAP[json_data['bill_type']]['chamber']) # add introduced_at and actions bill.actions.append({'date': json_data['introduced_at'], 'type': 'introduced', 'description': 'date of introduction', 'actor': self.TYPE_MAP[json_data['bill_type']]['chamber'], 'related_entities': []}) for action in json_data['actions']: bill.actions.append({'date': action['acted_at'], 'type': [action['type']], 'description': action['text'], 'actor': self.TYPE_MAP[json_data['bill_type']]['chamber'], 'related_entities': [] }) # add bill versions for version_path in find_files(os.path.join(settings.SCRAPED_DATA_DIR, 'data', bill.session, 'bills', json_data['bill_type'], json_data['bill_type'] + json_data['number'], 'text-versions'), '*\.json'): try: with open(version_path) as version_file: version_json_data = json.load(version_file) for k, v in version_json_data['urls'].iteritems(): bill.versions.append({'date': version_json_data['issued_on'], 'type': version_json_data['version_code'], 'name': self.VERSION_MAP[version_json_data['version_code']], 'links': [{'mimetype': k, 'url': v}]}) except IOError: print("Unable to open or parse file with path " + version_path) continue yield bill except IOError: print("Unable to open or parse file with path " + filename) continue
def scrape_bills(self): """ Does the following 1) Scrapes bill data from unitedstates project and saves the data to path specified in UnitedStates module 2) Iterates over bill data and converts each one to an OCD-compliant bill model. 3) Yields the OCD-compliant bill model instance @return: yield Bill instance """ # run scraper first to pull in all the bill data self.run_unitedstates_bill_scraper() # iterate over all the files and build and yield Bill objects for filename in find_files(settings.SCRAPED_DATA_DIR, '.*[a-z]*\/[a-z]*[0-9]*\/data\.json'): try: with open(filename) as json_file: json_data = json.load(json_file) # Initialize Object bill = Bill( self.TYPE_MAP[json_data['bill_type']]['canonical'] + ' ' + json_data['number'], json_data['congress'], json_data['official_title'], chamber=self.TYPE_MAP[ json_data['bill_type']]['chamber']) # Basics bill.type = [json_data['bill_type']] bill.subject = json_data['subjects'] bill.add_summary(json_data['summary']['as'], json_data['summary']['text'], json_data['summary']['date']) # Common Fields bill.sources = [{'url': json_data['url'], 'note': 'all'}] # Other/Related Bills bill.other_titles = [{ 'note': t['type'], 'title': t['title'] } for t in json_data['titles']] # change value of relationship_type to 'type' field from json_data when permitted by schema bill.related_bills = [{ 'session': b['session'], 'name': b['name'], 'relationship_type': 'companion' } for b in json_data['related_bills']] # add primary sponsor bill.add_sponsorship_by_identifier( json_data['sponsor']['name'], 'person', 'person', True, scheme='thomas_id', identifier=json_data['sponsor']['thomas_id'], chamber=self.TYPE_MAP[ json_data['bill_type']]['chamber']) # add cosponsors for cs in json_data['cosponsors']: bill.add_sponsorship_by_identifier( cs['name'], 'person', 'person', False, scheme='thomas_id', identifier=cs['thomas_id'], chamber=self.TYPE_MAP[ json_data['bill_type']]['chamber']) # add introduced_at and actions bill.actions.append({ 'date': json_data['introduced_at'], 'type': 'introduced', 'description': 'date of introduction', 'actor': self.TYPE_MAP[json_data['bill_type']]['chamber'], 'related_entities': [] }) for action in json_data['actions']: bill.actions.append({ 'date': action['acted_at'], 'type': [action['type']], 'description': action['text'], 'actor': self.TYPE_MAP[json_data['bill_type']]['chamber'], 'related_entities': [] }) # add bill versions for version_path in find_files( os.path.join( settings.SCRAPED_DATA_DIR, 'data', bill.session, 'bills', json_data['bill_type'], json_data['bill_type'] + json_data['number'], 'text-versions'), '*\.json'): try: with open(version_path) as version_file: version_json_data = json.load(version_file) for k, v in version_json_data[ 'urls'].iteritems(): bill.versions.append({ 'date': version_json_data['issued_on'], 'type': version_json_data['version_code'], 'name': self.VERSION_MAP[ version_json_data['version_code']], 'links': [{ 'mimetype': k, 'url': v }] }) except IOError: print("Unable to open or parse file with path " + version_path) continue yield bill except IOError: print("Unable to open or parse file with path " + filename) continue