def make_objects(item, court, sha1_hash, content): blocked = item["blocked_statuses"] if blocked: date_blocked = date.today() else: date_blocked = None case_name_short = item.get("case_name_shorts") or cnt.make_case_name_short( item["case_names"] ) docket = Docket( docket_number=item.get("docket_numbers", ""), case_name=item["case_names"], case_name_short=case_name_short, court=court, blocked=blocked, date_blocked=date_blocked, date_argued=item["case_dates"], source=item.get("source") or Docket.SCRAPER, ) audio_file = Audio( judges=item.get("judges", ""), source=item.get("cluster_source") or "C", case_name=item["case_names"], case_name_short=case_name_short, sha1=sha1_hash, download_url=item["download_urls"], blocked=blocked, date_blocked=date_blocked, ) error = False try: cf = ContentFile(content) extension = get_extension(content) if extension not in [".mp3", ".wma"]: extension = "." + item["download_urls"].lower().rsplit(".", 1)[1] # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(item["case_names"].lower(), 75) + extension audio_file.file_with_date = docket.date_argued audio_file.local_path_original_file.save(file_name, cf, save=False) except: msg = ( "Unable to save binary to disk. Deleted audio file: %s.\n " "%s" % (item["case_names"], traceback.format_exc()) ) logger.critical(msg.encode("utf-8")) ErrorLog(log_level="CRITICAL", court=court, message=msg).save() error = True return docket, audio_file, error
def setUp(self) -> None: self.court = Court.objects.get(pk="test") self.docket = Docket(case_name="Docket", court=self.court) self.opinioncluster = OpinionCluster( case_name="Hotline Bling", docket=self.docket, date_filed=datetime.date(2015, 12, 14), ) self.opinion = Opinion( cluster=self.opinioncluster, type="Lead Opinion" )
def setUp(self): self.court = Court.objects.get(pk='test') self.docket = Docket(case_name=u'Docket', court=self.court) self.opinioncluster = OpinionCluster( case_name=u'Hotline Bling', docket=self.docket, date_filed=datetime.date(2015, 12, 14), ) self.opinion = Opinion( cluster=self.opinioncluster, type='Lead Opinion', )
def make_objects(self, item, court, sha1_hash, content): blocked = item['blocked_statuses'] if blocked: date_blocked = date.today() else: date_blocked = None case_name_short = (item.get('case_name_shorts') or self.cnt.make_case_name_short(item['case_names'])) docket = Docket( docket_number=item.get('docket_numbers', ''), case_name=item['case_names'], case_name_short=case_name_short, court=court, blocked=blocked, date_blocked=date_blocked, date_argued=item['case_dates'], source=Docket.SCRAPER, ) audio_file = Audio( judges=item.get('judges', ''), source='C', case_name=item['case_names'], case_name_short=case_name_short, sha1=sha1_hash, download_url=item['download_urls'], blocked=blocked, date_blocked=date_blocked, ) error = False try: cf = ContentFile(content) extension = get_extension(content) if extension not in ['.mp3', '.wma']: extension = '.' + item['download_urls'].lower().rsplit('.', 1)[1] # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(item['case_names'].lower(), 75) + extension audio_file.file_with_date = docket.date_argued audio_file.local_path_original_file.save(file_name, cf, save=False) except: msg = 'Unable to save binary to disk. Deleted audio file: %s.\n ' \ '%s' % (item['case_names'], traceback.format_exc()) logger.critical(msg.encode('utf-8')) ErrorLog(log_level='CRITICAL', court=court, message=msg).save() error = True return docket, audio_file, error
def find_docket_object(court_id, pacer_case_id, docket_number): """Attempt to find the docket based on the parsed docket data. If cannot be found, create a new docket. If multiple are found, return all of them. :param court_id: The CourtListener court_id to lookup :param pacer_case_id: The PACER case ID for the docket :param docket_number: The docket number to lookup. :returns a tuple. The first item is either a QuerySet of all the items found if more than one is identified or just the docket found if only one is identified. The second item in the tuple is the count of items found (this number is zero if we had to create a new docket item). """ # Attempt several lookups of decreasing specificity. Note that # pacer_case_id is required for Docket and Docket History uploads. d = None docket_number_core = make_docket_number_core(docket_number) for kwargs in [ { "pacer_case_id": pacer_case_id, "docket_number_core": docket_number_core, }, { "pacer_case_id": pacer_case_id }, { "pacer_case_id": None, "docket_number_core": docket_number_core }, ]: ds = Docket.objects.filter(court_id=court_id, **kwargs) count = ds.count() if count == 0: continue # Try a looser lookup. if count == 1: d = ds[0] break # Nailed it! elif count > 1: return ds, count # Problems. Let caller decide what to do. if d is None: # Couldn't find a docket. Make a new one. d = Docket( source=Docket.RECAP, pacer_case_id=pacer_case_id, court_id=court_id, ) return d, 0 return d, 1
def import_law_box_case(case_path): """Open the file, get its contents, convert to XML and extract the meta data. Return a document object for saving in the database """ raw_text = open(case_path).read() clean_html_tree, complete_html_tree, clean_html_str, body_text = get_html_from_raw_text( raw_text) sha1 = hashlib.sha1(clean_html_str).hexdigest() citations = get_citations_from_tree(complete_html_tree, case_path) judges = get_judge(clean_html_tree, case_path) court = get_court_object(clean_html_tree, citations, case_path, judges) doc = Document( source='L', sha1=sha1, html=clean_html_str, # we clear this field later, putting the value into html_lawbox. date_filed=get_date_filed(clean_html_tree, citations=citations, case_path=case_path, court=court), precedential_status=get_precedential_status(), judges=judges, download_url=case_path, ) docket = Docket( docket_number=get_docket_number(clean_html_tree, case_path=case_path, court=court), case_name=get_case_name(complete_html_tree, case_path), court=court, ) # Necessary for dup_finder. path = '//p/text()' doc.body_text = ' '.join(clean_html_tree.xpath(path)) # Add the dict of citations to the object as its attributes. citations_as_dict = map_citations_to_models(citations) for k, v in citations_as_dict.items(): setattr(doc, k, v) doc.docket = docket return doc
def test_save_old_opinion(self): """Can we save opinions older than 1900?""" docket = Docket(case_name=u"Blah", court_id='test', source=Docket.DEFAULT) docket.save() self.oc.date_filed = date(1899, 1, 1) self.oc.save() try: cf = ContentFile(StringIO.StringIO('blah').read()) self.o.file_with_date = date(1899, 1, 1) self.o.local_path.save('file_name.pdf', cf, save=False) self.o.save(index=False) except ValueError as e: raise ValueError("Unable to save a case older than 1900. Did you " "try to use `strftime`...again?")
def make_objects( item: Dict[str, Any], court: Court, sha1_hash: str, content: str, ) -> Tuple[Docket, Audio]: blocked = item["blocked_statuses"] if blocked: date_blocked = date.today() else: date_blocked = None case_name_short = item.get("case_name_shorts") or cnt.make_case_name_short( item["case_names"] ) docket = Docket( docket_number=item.get("docket_numbers", ""), case_name=item["case_names"], case_name_short=case_name_short, court=court, blocked=blocked, date_blocked=date_blocked, date_argued=item["case_dates"], source=item.get("source") or Docket.SCRAPER, ) audio_file = Audio( judges=item.get("judges", ""), source=item.get("cluster_source") or "C", case_name=item["case_names"], case_name_short=case_name_short, sha1=sha1_hash, download_url=item["download_urls"], blocked=blocked, date_blocked=date_blocked, ) cf = ContentFile(content) extension = get_extension(content) if extension not in [".mp3", ".wma"]: extension = "." + item["download_urls"].lower().rsplit(".", 1)[1] file_name = trunc(item["case_names"].lower(), 75) + extension audio_file.file_with_date = docket.date_argued audio_file.local_path_original_file.save(file_name, cf, save=False) return docket, audio_file
def test_auto_blocking_small_bankr_docket(self): """Do we properly set small bankruptcy dockets to private?""" d = Docket() d.court = Court.objects.get(pk='akb') blocked, date_blocked = get_blocked_status(d) self.assertTrue(blocked, msg="Bankruptcy dockets with few entries " "should be blocked.") blocked, date_blocked = get_blocked_status(d, count_override=501) self.assertFalse(blocked, msg="Bankruptcy dockets with many entries " "should not be blocked") # This should stay blocked even though it's a big bankruptcy docket. d.blocked = True blocked, date_blocked = get_blocked_status(d, count_override=501) self.assertTrue(blocked, msg="Bankruptcy dockets that start blocked " "should stay blocked.")
def setUp(self): docket = Docket(case_name=u'foo', court=Court.objects.get(pk='test'), source=Docket.DEFAULT) docket.save() # Must be more than a year old for all tests to be runnable. last_month = now().date() - timedelta(days=400) self.doc_cluster = OpinionCluster(case_name=u"foo", docket=docket, date_filed=last_month) self.doc_cluster.save(index=False) opinion = Opinion.objects.create(cluster=self.doc_cluster, type='Lead Opinion') opinion2 = Opinion.objects.create(cluster=self.doc_cluster, type='Concurrence') OpinionsCited.objects.create(citing_opinion=opinion2, cited_opinion=opinion) # Scrape the audio "site" and add its contents site = test_oral_arg_scraper.Site().parse() OralArgumentCommand().scrape_court(site, full_crawl=True)
def test_save_old_opinion(self): """Can we save opinions older than 1900?""" court = Court.objects.get(pk='test') docket = Docket(case_name=u"Blah", court=court, source=Docket.DEFAULT) docket.save() oc = OpinionCluster( case_name=u"Blah", docket=docket, date_filed=datetime.date(1899, 1, 1), ) oc.save() o = Opinion(cluster=oc, type='Lead Opinion') try: cf = ContentFile(StringIO.StringIO('blah').read()) o.file_with_date = datetime.date(1899, 1, 1) o.local_path.save('file_name.pdf', cf, save=False) o.save(index=True) except ValueError as e: raise ValueError("Unable to save a case older than 1900. Did you " "try to use `strftime`...again?")
def save(self, debug): """Save the item to the database, updating any existing items. Returns None if an error occurs. """ required_fields = ['case_name', 'date_filed'] for field in required_fields: if not getattr(self, field): print " Missing required field: %s" % field return None try: d = Docket.objects.get( Q(pacer_case_id=self.pacer_case_id) | Q(docket_number=self.docket_number), court=self.court, ) # Add RECAP as a source if it's not already. if d.source in [Docket.DEFAULT, Docket.SCRAPER]: d.source = Docket.RECAP_AND_SCRAPER elif d.source == Docket.COLUMBIA: d.source = Docket.COLUMBIA_AND_RECAP elif d.source == Docket.COLUMBIA_AND_SCRAPER: d.source = Docket.COLUMBIA_AND_RECAP_AND_SCRAPER except Docket.DoesNotExist: d = Docket(source=Docket.RECAP) except Docket.MultipleObjectsReturned: print " Got multiple results while attempting save." return None for attr, v in self.__dict__.items(): setattr(d, attr, v) if not debug: d.save() print " Saved as Docket %s: https://www.courtlistener.com%s" % ( d.pk, d.get_absolute_url()) return d
def setUp(self): self.court = Court.objects.get(pk="test") self.docket = Docket(case_name=u"Docket", court=self.court, source=Docket.DEFAULT) self.docket.save() self.audio = Audio( local_path_original_file=self.good_mp3_path, local_path_mp3=self.good_mp3_path, docket=self.docket, blocked=False, case_name_full="Ander v. Leo", date_created=datetime.date(2014, 6, 9), ) self.audio.save(index=False) self.opinioncluster = OpinionCluster( case_name=u"Hotline Bling", docket=self.docket, date_filed=datetime.date(2015, 12, 14), ) self.opinioncluster.save(index=False) self.txtopinion = Opinion( cluster=self.opinioncluster, type="Lead Opinion", local_path=self.good_txt_path, ) self.txtopinion.save(index=False) self.pdfopinion = Opinion( cluster=self.opinioncluster, type="Lead Opinion", local_path=self.good_pdf_path, ) self.pdfopinion.save(index=False)
def make_objects( item: Dict[str, Union[str, Any]], court: Court, sha1_hash: str, content: bytes, ) -> Tuple[Docket, Opinion, OpinionCluster, List[Citation]]: """Takes the meta data from the scraper and associates it with objects. Returns the created objects. """ blocked = item["blocked_statuses"] if blocked: date_blocked = date.today() else: date_blocked = None case_name_short = item.get("case_name_shorts") or cnt.make_case_name_short( item["case_names"] ) docket = Docket( docket_number=item.get("docket_numbers", ""), case_name=item["case_names"], case_name_short=case_name_short, court=court, blocked=blocked, date_blocked=date_blocked, source=item.get("source") or Docket.SCRAPER, ) west_cite_str = item.get("west_citations", "") state_cite_str = item.get("west_state_citations", "") neutral_cite_str = item.get("neutral_citations", "") cluster = OpinionCluster( judges=item.get("judges", ""), date_filed=item["case_dates"], date_filed_is_approximate=item["date_filed_is_approximate"], case_name=item["case_names"], case_name_short=case_name_short, source=item.get("cluster_source") or "C", precedential_status=item["precedential_statuses"], nature_of_suit=item.get("nature_of_suit", ""), blocked=blocked, date_blocked=date_blocked, syllabus=item.get("summaries", ""), ) citations = [] cite_types = [ (west_cite_str, Citation.WEST), (state_cite_str, Citation.STATE), (neutral_cite_str, Citation.NEUTRAL), ] for cite_str, cite_type in cite_types: if cite_str: citations.append(make_citation(cite_str, cluster, cite_type)) opinion = Opinion( type=Opinion.COMBINED, sha1=sha1_hash, download_url=item["download_urls"], ) cf = ContentFile(content) extension = get_extension(content) file_name = trunc(item["case_names"].lower(), 75) + extension opinion.file_with_date = cluster.date_filed opinion.local_path.save(file_name, cf, save=False) return docket, opinion, cluster, citations
def get_docket_by_pacer_case_id(self, pacer_case_id, court_id, session, tag=None, **kwargs): """Get a docket by PACER case id, CL court ID, and a collection of kwargs that can be passed to the DocketReport query. For details of acceptable parameters, see DocketReport.query() :param pacer_case_id: The internal case ID of the item in PACER. :param court_id: A courtlistener court ID. :param session: A valid PacerSession object. :param tag: The tag name that should be stored with the item in the DB. :param kwargs: A variety of keyword args to pass to DocketReport.query(). """ report = DocketReport(map_cl_to_pacer_id(court_id), session) logger.info("Querying docket report %s.%s" % (court_id, pacer_case_id)) try: d = Docket.objects.get( pacer_case_id=pacer_case_id, court_id=court_id, ) except Docket.DoesNotExist: d = None except Docket.MultipleObjectsReturned: d = None if d is not None: first_missing_id = get_first_missing_de_number(d) if d is not None and first_missing_id > 1: # We don't have to get the whole thing! kwargs.setdefault('doc_num_start', first_missing_id) report.query(pacer_case_id, **kwargs) docket_data = report.data logger.info("Querying and parsing complete for %s.%s" % (court_id, pacer_case_id)) # Merge the contents into CL. try: if d is None: d = Docket.objects.get( Q(pacer_case_id=pacer_case_id) | Q(docket_number=docket_data['docket_number']), court_id=court_id, ) # Add RECAP as a source if it's not already. if d.source in [Docket.DEFAULT, Docket.SCRAPER]: d.source = Docket.RECAP_AND_SCRAPER elif d.source == Docket.COLUMBIA: d.source = Docket.COLUMBIA_AND_RECAP elif d.source == Docket.COLUMBIA_AND_SCRAPER: d.source = Docket.COLUMBIA_AND_RECAP_AND_SCRAPER except Docket.DoesNotExist: d = Docket( source=Docket.RECAP, pacer_case_id=pacer_case_id, court_id=court_id ) except Docket.MultipleObjectsReturned: logger.error("Too many dockets returned when trying to look up '%s.%s'" % (court_id, pacer_case_id)) return None update_docket_metadata(d, docket_data) d.save() if tag is not None: tag, _ = Tag.objects.get_or_create(name=tag) d.tags.add(tag) # Add the HTML to the docket in case we need it someday. pacer_file = PacerHtmlFiles(content_object=d) pacer_file.filepath.save( 'docket.html', # We only care about the ext w/UUIDFileSystemStorage ContentFile(report.response.text), ) for docket_entry in docket_data['docket_entries']: try: de, created = DocketEntry.objects.update_or_create( docket=d, entry_number=docket_entry['document_number'], defaults={ 'description': docket_entry['description'], 'date_filed': docket_entry['date_filed'], } ) except DocketEntry.MultipleObjectsReturned: logger.error( "Multiple docket entries found for document entry number '%s' " "while processing '%s.%s'" % (docket_entry['document_number'], court_id, pacer_case_id) ) continue else: if tag is not None: de.tags.add(tag) try: rd = RECAPDocument.objects.get( docket_entry=de, # No attachments when uploading dockets. document_type=RECAPDocument.PACER_DOCUMENT, document_number=docket_entry['document_number'], ) except RECAPDocument.DoesNotExist: try: rd = RECAPDocument.objects.create( docket_entry=de, # No attachments when uploading dockets. document_type=RECAPDocument.PACER_DOCUMENT, document_number=docket_entry['document_number'], pacer_doc_id=docket_entry['pacer_doc_id'], is_available=False, ) except IntegrityError: # Race condition. The item was created after our get failed. rd = RECAPDocument.objects.get( docket_entry=de, # No attachments when uploading dockets. document_type=RECAPDocument.PACER_DOCUMENT, document_number=docket_entry['document_number'], ) except RECAPDocument.MultipleObjectsReturned: logger.error( "Multiple recap documents found for document entry " "number: '%s', docket: %s" % (docket_entry['document_number'], d) ) continue rd.pacer_doc_id = rd.pacer_doc_id or docket_entry['pacer_doc_id'] if tag is not None: rd.tags.add(tag) add_parties_and_attorneys(d, docket_data['parties']) logger.info("Created/updated docket: %s" % d) return d
def setUp(self): self.d = Docket() self.v_case_name = 'x v. y' self.new_case_name = 'x v. z' self.uct = 'Unknown Case Title'
def make_and_save(item, skipdupes=False, min_dates=None, start_dates=None, testing=True): """Associates case data from `parse_opinions` with objects. Saves these objects. min_date: if not none, will skip cases after min_date """ date_filed = date_argued = date_reargued = date_reargument_denied = date_cert_granted = date_cert_denied = None unknown_date = None for date_cluster in item['dates']: for date_info in date_cluster: # check for any dates that clearly aren't dates if date_info[1].year < 1600 or date_info[1].year > 2020: continue # check for untagged dates that will be assigned to date_filed if date_info[0] is None: date_filed = date_info[1] continue # try to figure out what type of date it is based on its tag string if date_info[0] in FILED_TAGS: date_filed = date_info[1] elif date_info[0] in DECIDED_TAGS: if not date_filed: date_filed = date_info[1] elif date_info[0] in ARGUED_TAGS: date_argued = date_info[1] elif date_info[0] in REARGUE_TAGS: date_reargued = date_info[1] elif date_info[0] in REARGUE_DENIED_TAGS: date_reargument_denied = date_info[1] elif date_info[0] in CERT_GRANTED_TAGS: date_cert_granted = date_info[1] elif date_info[0] in CERT_DENIED_TAGS: date_cert_denied = date_info[1] else: unknown_date = date_info[1] if date_info[0] not in UNKNOWN_TAGS: print("\nFound unknown date tag '%s' with date '%s'.\n" % date_info) # the main date (used for date_filed in OpinionCluster) and panel dates # (used for finding judges) are ordered in terms of which type of dates # best reflect them main_date = (date_filed or date_argued or date_reargued or date_reargument_denied or unknown_date) panel_date = (date_argued or date_reargued or date_reargument_denied or date_filed or unknown_date) if main_date is None: raise Exception("Failed to get a date for " + item['file']) # special rule for Kentucky if item['court_id'] == 'kycourtapp' and main_date <= date(1975, 12, 31): item['court_id'] = 'kycourtapphigh' if min_dates is not None: if min_dates.get(item['court_id']) is not None: if main_date >= min_dates[item['court_id']]: print(main_date, 'after', min_dates[item['court_id']], ' -- skipping.') return if start_dates is not None: if start_dates.get(item['court_id']) is not None: if main_date <= start_dates[item['court_id']]: print(main_date, 'before court founding:', start_dates[item['court_id']], ' -- skipping.') return docket = Docket(source=Docket.COLUMBIA, date_argued=date_argued, date_reargued=date_reargued, date_cert_granted=date_cert_granted, date_cert_denied=date_cert_denied, date_reargument_denied=date_reargument_denied, court_id=item['court_id'], case_name_short=item['case_name_short'] or '', case_name=item['case_name'] or '', case_name_full=item['case_name_full'] or '', docket_number=item['docket'] or '') # get citations in the form of, e.g. {'federal_cite_one': '1 U.S. 1', ...} found_citations = [] for c in item['citations']: found = get_citations(c) if not found: # if the docket number --is-- citation string, we're likely dealing # with a somewhat common triplet of (docket number, date, # jurisdiction), which isn't a citation at all (so there's no # problem) if item['docket']: docket_no = item['docket'].lower() if 'claim no.' in docket_no: docket_no = docket_no.split('claim no.')[0] for junk in DOCKET_JUNK: docket_no = docket_no.replace(junk, '') docket_no = docket_no.strip('.').strip() if docket_no and docket_no in c.lower(): continue # there are a trivial number of letters (except for months and a few # trivial words) in the citation, then it's not a citation at all non_trivial = c.lower() for trivial in TRIVIAL_CITE_WORDS: non_trivial = non_trivial.replace(trivial, '') num_letters = sum( non_trivial.count(letter) for letter in string.lowercase) if num_letters < 3: continue # if there is a string that's known to indicate a bad citation, then # it's not a citation if any(bad in c for bad in BAD_CITES): continue # otherwise, this is a problem raise Exception("Failed to get a citation from the string '%s' in " "court '%s' with docket '%s'." % (c, item['court_id'], item['docket'])) else: found_citations.extend(found) citations_map = map_citations_to_models(found_citations) cluster = OpinionCluster( judges=item.get('judges', '') or "", precedential_status=('Unpublished' if item['unpublished'] else 'Published'), date_filed=main_date, case_name_short=item['case_name_short'] or '', case_name=item['case_name'] or '', case_name_full=item['case_name_full'] or '', source='Z', attorneys=item['attorneys'] or '', posture=item['posture'] or '', **citations_map) panel = [ find_person(n, item['court_id'], case_date=panel_date) for n in item['panel'] ] panel = [x for x in panel if x is not None] opinions = [] for i, opinion_info in enumerate(item['opinions']): if opinion_info['author'] is None: author = None else: author = find_person(opinion_info['author'], item['court_id'], case_date=panel_date) converted_text = convert_columbia_html(opinion_info['opinion']) opinion_type = OPINION_TYPE_MAPPING[opinion_info['type']] if opinion_type == '020lead' and i > 0: opinion_type = '050addendum' opinion = Opinion( author=author, per_curiam=opinion_info['per_curiam'], type=opinion_type, # type=OPINION_TYPE_MAPPING[opinion_info['type']], html_columbia=converted_text, sha1=opinion_info['sha1'], local_path=opinion_info['local_path'], ) joined_by = [ find_person(n, item['court_id'], case_date=panel_date) for n in opinion_info['joining'] ] joined_by = [x for x in joined_by if x is not None] opinions.append((opinion, joined_by)) if min_dates is None: # check to see if this is a duplicate dups = find_dups(docket, cluster) if dups: if skipdupes: print('Duplicate. skipping.') else: raise Exception("Found %s duplicate(s)." % len(dups)) # save all the objects if not testing: try: docket.save() cluster.docket = docket cluster.save(index=False) for member in panel: cluster.panel.add(member) for opinion, joined_by in opinions: opinion.cluster = cluster opinion.save(index=False) for joiner in joined_by: opinion.joined_by.add(joiner) if settings.DEBUG: domain = "http://127.0.0.1:8000" else: domain = "https://www.courtlistener.com" print("Created item at: %s%s" % (domain, cluster.get_absolute_url())) except: # if anything goes wrong, try to delete everything try: docket.delete() except: pass raise
def process_recap_docket(pk): """Process an uploaded docket from the RECAP API endpoint. param pk: The primary key of the processing queue item you want to work on. """ pq = ProcessingQueue.objects.get(pk=pk) pq.status = pq.PROCESSING_IN_PROGRESS pq.save() logger.info("Processing RECAP item (debug is: %s): %s" % (pq.debug, pq)) report = DocketReport(map_cl_to_pacer_id(pq.court_id)) text = pq.filepath_local.read().decode('utf-8') report._parse_text(text) docket_data = report.data logger.info("Parsing completed of item %s" % pq) # Merge the contents of the docket into CL try: d = Docket.objects.get( Q(pacer_case_id=pq.pacer_case_id) | Q(docket_number=docket_data['docket_number']), court_id=pq.court_id, ) # Add RECAP as a source if it's not already. if d.source in [Docket.DEFAULT, Docket.SCRAPER]: d.source = Docket.RECAP_AND_SCRAPER elif d.source == Docket.COLUMBIA: d.source = Docket.COLUMBIA_AND_RECAP elif d.source == Docket.COLUMBIA_AND_SCRAPER: d.source = Docket.COLUMBIA_AND_RECAP_AND_SCRAPER except Docket.DoesNotExist: d = Docket( source=Docket.RECAP, pacer_case_id=pq.pacer_case_id, court_id=pq.court_id ) except Docket.MultipleObjectsReturned: msg = "Too many dockets found when trying to look up '%s'" % pq logger.error(msg) pq.error_message = msg pq.status = pq.PROCESSING_FAILED pq.save() return None update_docket_metadata(d, docket_data) if pq.debug: mark_pq_successful(pq, d_id=d.pk) return d d.save() # Add the HTML to the docket in case we need it someday. pacer_file = PacerHtmlFiles(content_object=d) pacer_file.filepath.save( 'docket.html', # We only care about the ext w/UUIDFileSystemStorage ContentFile(text), ) # Docket entries for docket_entry in docket_data['docket_entries']: try: de, created = DocketEntry.objects.update_or_create( docket=d, entry_number=docket_entry['document_number'], defaults={ 'description': docket_entry['description'], 'date_filed': docket_entry['date_filed'], } ) except DocketEntry.MultipleObjectsReturned: logger.error( "Multiple docket entries found for document entry number '%s' " "while processing '%s'" % (docket_entry['document_number'], pq) ) continue # Then make the RECAPDocument object. Try to find it. If we do, update # the pacer_doc_id field if it's blank. If we can't find it, create it # or throw an error. try: rd = RECAPDocument.objects.get( docket_entry=de, # No attachments when uploading dockets. document_type=RECAPDocument.PACER_DOCUMENT, document_number=docket_entry['document_number'], ) except RECAPDocument.DoesNotExist: RECAPDocument.objects.create( docket_entry=de, # No attachments when uploading dockets. document_type=RECAPDocument.PACER_DOCUMENT, document_number=docket_entry['document_number'], pacer_doc_id=docket_entry['pacer_doc_id'], is_available=False, ) except RECAPDocument.MultipleObjectsReturned: logger.error( "Multiple recap documents found for document entry number'%s' " "while processing '%s'" % (docket_entry['document_number'], pq) ) continue else: rd.pacer_doc_id = rd.pacer_doc_id or pq.pacer_doc_id add_parties_and_attorneys(d, docket_data['parties']) mark_pq_successful(pq, d_id=d.pk) return d
def setUp(self): self.d = Docket() self.v_case_name = "x v. y" self.new_case_name = "x v. z" self.uct = "Unknown Case Title"
def process_recap_docket(self, pk): """Process an uploaded docket from the RECAP API endpoint. :param pk: The primary key of the processing queue item you want to work on. :returns: A dict of the form: { // The PK of the docket that's created or updated 'docket_pk': 22, // A boolean indicating whether a new docket entry or recap document // was created (implying a Solr needs updating). 'needs_solr_update': True, } This value is a dict so that it can be ingested in a Celery chain. """ pq = ProcessingQueue.objects.get(pk=pk) mark_pq_status(pq, '', pq.PROCESSING_IN_PROGRESS) logger.info("Processing RECAP item (debug is: %s): %s" % (pq.debug, pq)) report = DocketReport(map_cl_to_pacer_id(pq.court_id)) text = pq.filepath_local.read().decode('utf-8') if 'History/Documents' in text: # Prior to 1.1.8, we did not separate docket history reports into their # own upload_type. Alas, we still have some old clients around, so we # need to handle those clients here. pq.upload_type = pq.DOCKET_HISTORY_REPORT pq.save() process_recap_docket_history_report(pk) self.request.callbacks = None return None report._parse_text(text) docket_data = report.data logger.info("Parsing completed of item %s" % pq) if docket_data == {}: # Not really a docket. Some sort of invalid document (see Juriscraper). msg = "Not a valid docket upload." mark_pq_status(pq, msg, pq.INVALID_CONTENT) self.request.callbacks = None return None # Merge the contents of the docket into CL. Attempt several lookups of # decreasing specificity. Note that pacer_case_id is required for Docket # uploads. d = None for kwargs in [{'pacer_case_id': pq.pacer_case_id, 'docket_number': docket_data['docket_number']}, {'pacer_case_id': pq.pacer_case_id}, {'docket_number': docket_data['docket_number'], 'pacer_case_id': None}]: try: d = Docket.objects.get(court_id=pq.court_id, **kwargs) break except Docket.DoesNotExist: continue except Docket.MultipleObjectsReturned: msg = "Too many dockets found when trying to look up '%s'" % pq mark_pq_status(pq, msg, pq.PROCESSING_FAILED) self.request.callbacks = None return None if d is None: # Couldn't find it. Make a new one. d = Docket( source=Docket.RECAP, pacer_case_id=pq.pacer_case_id, court_id=pq.court_id ) # Add RECAP as a source if it's not already. if d.source in [Docket.DEFAULT, Docket.SCRAPER]: d.source = Docket.RECAP_AND_SCRAPER elif d.source == Docket.COLUMBIA: d.source = Docket.COLUMBIA_AND_RECAP elif d.source == Docket.COLUMBIA_AND_SCRAPER: d.source = Docket.COLUMBIA_AND_RECAP_AND_SCRAPER update_docket_metadata(d, docket_data) if pq.debug: mark_pq_successful(pq, d_id=d.pk) self.request.callbacks = None return {'docket_pk': d.pk, 'needs_solr_update': False} d.save() # Add the HTML to the docket in case we need it someday. pacer_file = PacerHtmlFiles(content_object=d) pacer_file.filepath.save( 'docket.html', # We only care about the ext w/UUIDFileSystemStorage ContentFile(text), ) # Docket entries & documents rds_created = [] needs_solr_update = False for docket_entry in docket_data['docket_entries']: try: de, de_created = DocketEntry.objects.update_or_create( docket=d, entry_number=docket_entry['document_number'], defaults={ 'description': docket_entry['description'], 'date_filed': docket_entry['date_filed'], } ) except DocketEntry.MultipleObjectsReturned: logger.error( "Multiple docket entries found for document entry number '%s' " "while processing '%s'" % (docket_entry['document_number'], pq) ) continue if de_created: needs_solr_update = True # Then make the RECAPDocument object. Try to find it. If we do, update # the pacer_doc_id field if it's blank. If we can't find it, create it # or throw an error. params = { 'docket_entry': de, # No attachments when uploading dockets. 'document_type': RECAPDocument.PACER_DOCUMENT, 'document_number': docket_entry['document_number'], } try: rd = RECAPDocument.objects.get(**params) except RECAPDocument.DoesNotExist: rd = RECAPDocument.objects.create( pacer_doc_id=docket_entry['pacer_doc_id'], is_available=False, **params ) rds_created.append(rd) except RECAPDocument.MultipleObjectsReturned: logger.error( "Multiple recap documents found for document entry number'%s' " "while processing '%s'" % (docket_entry['document_number'], pq) ) continue else: rd.pacer_doc_id = rd.pacer_doc_id or pq.pacer_doc_id add_parties_and_attorneys(d, docket_data['parties']) process_orphan_documents(rds_created, pq.court_id, d.date_filed) mark_pq_successful(pq, d_id=d.pk) return { 'docket_pk': d.pk, 'needs_solr_update': bool(rds_created or needs_solr_update), }
def make_and_save(item, skipdupes=False, min_dates=None, start_dates=None, testing=True): """Associates case data from `parse_opinions` with objects. Saves these objects. min_date: if not none, will skip cases after min_date """ date_filed = (date_argued) = ( date_reargued ) = date_reargument_denied = date_cert_granted = date_cert_denied = None unknown_date = None for date_cluster in item["dates"]: for date_info in date_cluster: # check for any dates that clearly aren't dates if date_info[1].year < 1600 or date_info[1].year > 2020: continue # check for untagged dates that will be assigned to date_filed if date_info[0] is None: date_filed = date_info[1] continue # try to figure out what type of date it is based on its tag string if date_info[0] in FILED_TAGS: date_filed = date_info[1] elif date_info[0] in DECIDED_TAGS: if not date_filed: date_filed = date_info[1] elif date_info[0] in ARGUED_TAGS: date_argued = date_info[1] elif date_info[0] in REARGUE_TAGS: date_reargued = date_info[1] elif date_info[0] in REARGUE_DENIED_TAGS: date_reargument_denied = date_info[1] elif date_info[0] in CERT_GRANTED_TAGS: date_cert_granted = date_info[1] elif date_info[0] in CERT_DENIED_TAGS: date_cert_denied = date_info[1] else: unknown_date = date_info[1] if date_info[0] not in UNKNOWN_TAGS: print("\nFound unknown date tag '%s' with date '%s'.\n" % date_info) # the main date (used for date_filed in OpinionCluster) and panel dates # (used for finding judges) are ordered in terms of which type of dates # best reflect them main_date = (date_filed or date_argued or date_reargued or date_reargument_denied or unknown_date) panel_date = (date_argued or date_reargued or date_reargument_denied or date_filed or unknown_date) if main_date is None: raise Exception("Failed to get a date for " + item["file"]) # special rule for Kentucky if item["court_id"] == "kycourtapp" and main_date <= date(1975, 12, 31): item["court_id"] = "kycourtapphigh" if min_dates is not None: if min_dates.get(item["court_id"]) is not None: if main_date >= min_dates[item["court_id"]]: print( main_date, "after", min_dates[item["court_id"]], " -- skipping.", ) return if start_dates is not None: if start_dates.get(item["court_id"]) is not None: if main_date <= start_dates[item["court_id"]]: print( main_date, "before court founding:", start_dates[item["court_id"]], " -- skipping.", ) return docket = Docket( source=Docket.COLUMBIA, date_argued=date_argued, date_reargued=date_reargued, date_cert_granted=date_cert_granted, date_cert_denied=date_cert_denied, date_reargument_denied=date_reargument_denied, court_id=item["court_id"], case_name_short=item["case_name_short"] or "", case_name=item["case_name"] or "", case_name_full=item["case_name_full"] or "", docket_number=item["docket"] or "", ) # get citation objects in a list for addition to the cluster found_citations = [] for c in item["citations"]: found = get_citations(clean_text(c, ["html", "inline_whitespace"])) if not found: # if the docket number --is-- citation string, we're likely dealing # with a somewhat common triplet of (docket number, date, # jurisdiction), which isn't a citation at all (so there's no # problem) if item["docket"]: docket_no = item["docket"].lower() if "claim no." in docket_no: docket_no = docket_no.split("claim no.")[0] for junk in DOCKET_JUNK: docket_no = docket_no.replace(junk, "") docket_no = docket_no.strip(".").strip() if docket_no and docket_no in c.lower(): continue # there are a trivial number of letters (except for # months and a few trivial words) in the citation, # then it's not a citation at all non_trivial = c.lower() for trivial in TRIVIAL_CITE_WORDS: non_trivial = non_trivial.replace(trivial, "") num_letters = sum( non_trivial.count(letter) for letter in string.lowercase) if num_letters < 3: continue # if there is a string that's known to indicate # a bad citation, then it's not a citation if any(bad in c for bad in BAD_CITES): continue # otherwise, this is a problem raise Exception("Failed to get a citation from the string '%s' in " "court '%s' with docket '%s'." % (c, item["court_id"], item["docket"])) else: found_citations.extend(found.to_model()) cluster = OpinionCluster( judges=item.get("judges", "") or "", precedential_status=("Unpublished" if item["unpublished"] else "Published"), date_filed=main_date, case_name_short=item["case_name_short"] or "", case_name=item["case_name"] or "", case_name_full=item["case_name_full"] or "", source="Z", attorneys=item["attorneys"] or "", posture=item["posture"] or "", ) panel = lookup_judges_by_last_name_list(item["panel"], item["court_id"], panel_date) opinions = [] for i, opinion_info in enumerate(item["opinions"]): if opinion_info["author"] is None: author = None else: author = lookup_judge_by_last_name(opinion_info["author"], item["court_id"], panel_date) converted_text = convert_columbia_html(opinion_info["opinion"]) opinion_type = OPINION_TYPE_MAPPING[opinion_info["type"]] if opinion_type == Opinion.LEAD and i > 0: opinion_type = Opinion.ADDENDUM opinion = Opinion( author=author, per_curiam=opinion_info["per_curiam"], type=opinion_type, # type=OPINION_TYPE_MAPPING[opinion_info['type']], html_columbia=converted_text, sha1=opinion_info["sha1"], # This is surely not updated for the new S3 world. If you're # reading this, you'll need to update this code. local_path=opinion_info["local_path"], ) joined_by = lookup_judges_by_last_name_list(item["joining"], item["court_id"], panel_date) opinions.append((opinion, joined_by)) if min_dates is None: # check to see if this is a duplicate dups = find_dups(docket, cluster) if dups: if skipdupes: print("Duplicate. skipping.") else: raise Exception("Found %s duplicate(s)." % len(dups)) # save all the objects if not testing: try: docket.save() cluster.docket = docket cluster.save(index=False) for citation in found_citations: citation.cluster = cluster citation.save() for member in panel: cluster.panel.add(member) for opinion, joined_by in opinions: opinion.cluster = cluster opinion.save(index=False) for joiner in joined_by: opinion.joined_by.add(joiner) if settings.DEBUG: domain = "http://127.0.0.1:8000" else: domain = "https://www.courtlistener.com" print("Created item at: %s%s" % (domain, cluster.get_absolute_url())) except: # if anything goes wrong, try to delete everything try: docket.delete() except: pass raise
def lookup_and_save(new, debug=False): """Merge new docket info into the database. Start by attempting to lookup an existing Docket. If that's not found, create a new one. Either way, merge all the attributes of `new` into the Docket found, and then save the Docket. Returns None if an error occurs, else, return the new or updated Docket. """ try: d = Docket.objects.get( pacer_case_id=new.pacer_case_id, court=new.court ) except (Docket.DoesNotExist, Docket.MultipleObjectsReturned): d = None if d is None: ds = Docket.objects.filter( docket_number=new.docket_number, court=new.court ).order_by("-date_filed") count = ds.count() if count < 1: # Can't find it by pacer_case_id or docket_number. Make a new item. d = Docket(source=Docket.RECAP) elif count == 1: # Nailed it! d = ds[0] elif count > 1: # Too many dockets returned. Disambiguate. logger.error("Got multiple results while attempting save.") def is_different(x): return x.pacer_case_id and x.pacer_case_id != new.pacer_case_id if all([is_different(d) for d in ds]): # All the dockets found match on docket number, but have # different pacer_case_ids. This means that the docket has # multiple pacer_case_ids in PACER, and we should mirror that # in CL by creating a new docket for the new item. d = Docket(source=Docket.RECAP) else: # Just use the most recent docket. Looking at the data, this is # OK. Nearly all of these are dockets associated with clusters # that can be merged (however, that's a project for clusters). d = ds[0] # Add RECAP as a source if it's not already. if d.source in [Docket.DEFAULT, Docket.SCRAPER]: d.source = Docket.RECAP_AND_SCRAPER elif d.source == Docket.COLUMBIA: d.source = Docket.COLUMBIA_AND_RECAP elif d.source == Docket.COLUMBIA_AND_SCRAPER: d.source = Docket.COLUMBIA_AND_RECAP_AND_SCRAPER for attr, v in new.__dict__.items(): setattr(d, attr, v) if not debug: d.save() logger.info( "Saved as Docket %s: https://www.courtlistener.com%s" % (d.pk, d.get_absolute_url()) ) return d
def parse_harvard_opinions(reporter, volume, make_searchable): """ Parse downloaded CaseLaw Corpus from internet archive and add them to our database. Optionally uses a reporter abbreviation to identify cases to download as used by IA. (Ex. T.C. => tc) Optionally uses a volume integer. If neither is provided, code will cycle through all downloaded files. :param volume: The volume (int) of the reporters (optional) (ex 10) :param reporter: Reporter string as slugify'd (optional) (tc) for T.C. :param make_searchable: Boolean to indicate saving to solr :return: None """ if not reporter and volume: logger.error("You provided a volume but no reporter. Exiting.") return for file_path in filepath_list(reporter, volume): ia_download_url = "/".join( ["https://archive.org/download", file_path.split("/", 9)[-1]] ) if OpinionCluster.objects.filter( filepath_json_harvard=file_path ).exists(): logger.info("Skipping - already in system %s" % ia_download_url) continue try: with open(file_path) as f: data = json.load(f) except ValueError: logger.warning("Empty json: missing case at: %s" % ia_download_url) continue except Exception as e: logger.warning("Unknown error %s for: %s" % (e, ia_download_url)) continue cites = get_citations(data["citations"][0]["cite"]) if not cites: logger.info( "No citation found for %s." % data["citations"][0]["cite"] ) continue case_name = harmonize(data["name_abbreviation"]) case_name_short = cnt.make_case_name_short(case_name) case_name_full = harmonize(data["name"]) citation = cites[0] if skip_processing(citation, case_name, file_path): continue # TODO: Generalize this to handle all court types somehow. court_id = match_court_string( data["court"]["name"], state=True, federal_appeals=True, federal_district=True, ) soup = BeautifulSoup(data["casebody"]["data"], "lxml") # Some documents contain images in the HTML # Flag them for a later crawl by using the placeholder '[[Image]]' judge_list = [ extract_judge_last_name(x.text) for x in soup.find_all("judges") ] author_list = [ extract_judge_last_name(x.text) for x in soup.find_all("author") ] # Flatten and dedupe list of judges judges = ", ".join( sorted( list( set( itertools.chain.from_iterable(judge_list + author_list) ) ) ) ) judges = titlecase(judges) docket_string = ( data["docket_number"] .replace("Docket No.", "") .replace("Docket Nos.", "") .strip() ) short_fields = ["attorneys", "disposition", "otherdate", "seealso"] long_fields = [ "syllabus", "summary", "history", "headnotes", "correction", ] short_data = parse_extra_fields(soup, short_fields, False) long_data = parse_extra_fields(soup, long_fields, True) with transaction.atomic(): logger.info("Adding docket for: %s", citation.base_citation()) docket = Docket( case_name=case_name, case_name_short=case_name_short, case_name_full=case_name_full, docket_number=docket_string, court_id=court_id, source=Docket.HARVARD, ia_needs_upload=False, ) try: with transaction.atomic(): docket.save() except OperationalError as e: if "exceeds maximum" in str(e): docket.docket_number = ( "%s, See Corrections for full Docket Number" % trunc(docket_string, length=5000, ellipsis="...") ) docket.save() long_data["correction"] = "%s <br> %s" % ( data["docket_number"], long_data["correction"], ) # Handle partial dates by adding -01v to YYYY-MM dates date_filed, is_approximate = validate_dt(data["decision_date"]) logger.info("Adding cluster for: %s", citation.base_citation()) cluster = OpinionCluster( case_name=case_name, case_name_short=case_name_short, case_name_full=case_name_full, precedential_status="Published", docket_id=docket.id, source="U", date_filed=date_filed, date_filed_is_approximate=is_approximate, attorneys=short_data["attorneys"], disposition=short_data["disposition"], syllabus=long_data["syllabus"], summary=long_data["summary"], history=long_data["history"], other_dates=short_data["otherdate"], cross_reference=short_data["seealso"], headnotes=long_data["headnotes"], correction=long_data["correction"], judges=judges, filepath_json_harvard=file_path, ) cluster.save(index=False) logger.info("Adding citation for: %s", citation.base_citation()) Citation.objects.create( volume=citation.volume, reporter=citation.reporter, page=citation.page, type=map_reporter_db_cite_type( REPORTERS[citation.canonical_reporter][0]["cite_type"] ), cluster_id=cluster.id, ) new_op_pks = [] for op in soup.find_all("opinion"): # This code cleans author tags for processing. # It is particularly useful for identifiying Per Curiam for elem in [op.find("author")]: if elem is not None: [x.extract() for x in elem.find_all("page-number")] auth = op.find("author") if auth is not None: author_tag_str = titlecase(auth.text.strip(":")) author_str = titlecase( "".join(extract_judge_last_name(author_tag_str)) ) else: author_str = "" author_tag_str = "" per_curiam = True if author_tag_str == "Per Curiam" else False # If Per Curiam is True set author string to Per Curiam if per_curiam: author_str = "Per Curiam" op_type = map_opinion_type(op.get("type")) opinion_xml = str(op) logger.info("Adding opinion for: %s", citation.base_citation()) op = Opinion( cluster_id=cluster.id, type=op_type, author_str=author_str, xml_harvard=opinion_xml, per_curiam=per_curiam, extracted_by_ocr=True, ) # Don't index now; do so later if desired op.save(index=False) new_op_pks.append(op.pk) if make_searchable: add_items_to_solr.delay(new_op_pks, "search.Opinion") logger.info("Finished: %s", citation.base_citation())
def process_recap_docket(pk): """Process an uploaded docket from the RECAP API endpoint. :param pk: The primary key of the processing queue item you want to work on. :return: The docket that's created or updated. """ pq = ProcessingQueue.objects.get(pk=pk) mark_pq_status(pq, '', pq.PROCESSING_IN_PROGRESS) logger.info("Processing RECAP item (debug is: %s): %s" % (pq.debug, pq)) report = DocketReport(map_cl_to_pacer_id(pq.court_id)) text = pq.filepath_local.read().decode('utf-8') report._parse_text(text) docket_data = report.data logger.info("Parsing completed of item %s" % pq) if docket_data == {}: # Not really a docket. Some sort of invalid document (see Juriscraper). msg = "Not a valid docket upload." mark_pq_status(pq, msg, pq.INVALID_CONTENT) return None # Merge the contents of the docket into CL. Attempt several lookups of # decreasing specificity. d = None for kwargs in [{ 'pacer_case_id': pq.pacer_case_id, 'docket_number': docket_data['docket_number'] }, { 'pacer_case_id': pq.pacer_case_id }, { 'docket_number': docket_data['docket_number'] }]: try: d = Docket.objects.get(court_id=pq.court_id, **kwargs) break except Docket.DoesNotExist: continue except Docket.MultipleObjectsReturned: msg = "Too many dockets found when trying to look up '%s'" % pq mark_pq_status(pq, msg, pq.PROCESSING_FAILED) return None if d is None: # Couldn't find it. Make a new one. d = Docket(source=Docket.RECAP, pacer_case_id=pq.pacer_case_id, court_id=pq.court_id) # Add RECAP as a source if it's not already. if d.source in [Docket.DEFAULT, Docket.SCRAPER]: d.source = Docket.RECAP_AND_SCRAPER elif d.source == Docket.COLUMBIA: d.source = Docket.COLUMBIA_AND_RECAP elif d.source == Docket.COLUMBIA_AND_SCRAPER: d.source = Docket.COLUMBIA_AND_RECAP_AND_SCRAPER update_docket_metadata(d, docket_data) if pq.debug: mark_pq_successful(pq, d_id=d.pk) return d d.save() # Add the HTML to the docket in case we need it someday. pacer_file = PacerHtmlFiles(content_object=d) pacer_file.filepath.save( 'docket.html', # We only care about the ext w/UUIDFileSystemStorage ContentFile(text), ) # Docket entries for docket_entry in docket_data['docket_entries']: try: de, created = DocketEntry.objects.update_or_create( docket=d, entry_number=docket_entry['document_number'], defaults={ 'description': docket_entry['description'], 'date_filed': docket_entry['date_filed'], }) except DocketEntry.MultipleObjectsReturned: logger.error( "Multiple docket entries found for document entry number '%s' " "while processing '%s'" % (docket_entry['document_number'], pq)) continue # Then make the RECAPDocument object. Try to find it. If we do, update # the pacer_doc_id field if it's blank. If we can't find it, create it # or throw an error. try: rd = RECAPDocument.objects.get( docket_entry=de, # No attachments when uploading dockets. document_type=RECAPDocument.PACER_DOCUMENT, document_number=docket_entry['document_number'], ) except RECAPDocument.DoesNotExist: try: RECAPDocument.objects.create( docket_entry=de, # No attachments when uploading dockets. document_type=RECAPDocument.PACER_DOCUMENT, document_number=docket_entry['document_number'], pacer_doc_id=docket_entry['pacer_doc_id'], is_available=False, ) except IntegrityError: logger.warn( "Creating new document with pacer_doc_id of '%s' violates " "unique constraint on pacer_doc_id field." % docket_entry['pacer_doc_id']) continue except RECAPDocument.MultipleObjectsReturned: logger.error( "Multiple recap documents found for document entry number'%s' " "while processing '%s'" % (docket_entry['document_number'], pq)) continue else: rd.pacer_doc_id = rd.pacer_doc_id or pq.pacer_doc_id add_parties_and_attorneys(d, docket_data['parties']) mark_pq_successful(pq, d_id=d.pk) return d
def find_docket_object( court_id: str, pacer_case_id: str, docket_number: str, using: str = "default", ) -> Docket: """Attempt to find the docket based on the parsed docket data. If cannot be found, create a new docket. If multiple are found, return the oldest. :param court_id: The CourtListener court_id to lookup :param pacer_case_id: The PACER case ID for the docket :param docket_number: The docket number to lookup. :param using: The database to use for the lookup queries. :return The docket found or created. """ # Attempt several lookups of decreasing specificity. Note that # pacer_case_id is required for Docket and Docket History uploads. d = None docket_number_core = make_docket_number_core(docket_number) lookups = [ { "pacer_case_id": pacer_case_id, "docket_number_core": docket_number_core, }, {"pacer_case_id": pacer_case_id}, ] if docket_number_core: # Sometimes we don't know how to make core docket numbers. If that's # the case, we will have a blank value for the field. We must not do # lookups by blank values. See: freelawproject/courtlistener#1531 lookups.append( {"pacer_case_id": None, "docket_number_core": docket_number_core}, ) else: # Finally, as a last resort, we can try the docket number. It might not # match b/c of punctuation or whatever, but we can try. lookups.append( {"pacer_case_id": None, "docket_number": docket_number}, ) for kwargs in lookups: ds = Docket.objects.filter(court_id=court_id, **kwargs).using(using) count = ds.count() if count == 0: continue # Try a looser lookup. if count == 1: d = ds[0] break # Nailed it! elif count > 1: # Choose the oldest one and live with it. d = ds.earliest("date_created") break if d is None: # Couldn't find a docket. Return a new one. return Docket( source=Docket.RECAP, pacer_case_id=pacer_case_id, court_id=court_id, ) if using != "default": # Get the item from the default DB d = Docket.objects.get(pk=d.pk) return d
def make_objects(self, item, court, sha1_hash, content): """Takes the meta data from the scraper and associates it with objects. Returns the created objects. """ blocked = item['blocked_statuses'] if blocked: date_blocked = date.today() else: date_blocked = None case_name_short = (item.get('case_name_shorts') or self.cnt.make_case_name_short(item['case_names'])) docket = Docket( docket_number=item.get('docket_numbers', ''), case_name=item['case_names'], case_name_short=case_name_short, court=court, blocked=blocked, date_blocked=date_blocked, source=Docket.SCRAPER, ) west_cite_str = item.get('west_citations', '') state_cite_str = item.get('west_state_citations', '') neutral_cite_str = item.get('neutral_citations', '') cluster = OpinionCluster( judges=item.get('judges', ''), date_filed=item['case_dates'], date_filed_is_approximate=item['date_filed_is_approximate'], case_name=item['case_names'], case_name_short=case_name_short, source='C', precedential_status=item['precedential_statuses'], nature_of_suit=item.get('nature_of_suit', ''), blocked=blocked, date_blocked=date_blocked, # These three fields are replaced below. federal_cite_one=west_cite_str, state_cite_one=state_cite_str, neutral_cite=neutral_cite_str, syllabus=item.get('summaries', ''), ) citations = [] cite_types = [ (west_cite_str, Citation.WEST), (state_cite_str, Citation.STATE), (neutral_cite_str, Citation.NEUTRAL), ] for cite_str, cite_type in cite_types: if cite_str: citations.append(make_citation(cite_str, cluster, cite_type)) opinion = Opinion( type='010combined', sha1=sha1_hash, download_url=item['download_urls'], ) error = False try: cf = ContentFile(content) extension = get_extension(content) file_name = trunc(item['case_names'].lower(), 75) + extension opinion.file_with_date = cluster.date_filed opinion.local_path.save(file_name, cf, save=False) except: msg = ('Unable to save binary to disk. Deleted ' 'item: %s.\n %s' % (item['case_names'], traceback.format_exc())) logger.critical(msg.encode('utf-8')) ErrorLog(log_level='CRITICAL', court=court, message=msg).save() error = True return docket, opinion, cluster, citations, error
def test_make_title_no_docket_number(self) -> None: """Can we make titles?""" # No docket number d = Docket(case_name="foo", docket_number=None) self.assertEqual(make_docket_title(d), "foo")
def make_and_save(item): """Associates case data from `parse_opinions` with objects. Saves these objects.""" date_filed = date_argued = date_reargued = date_reargument_denied = date_cert_granted = date_cert_denied = None for date_cluster in item['dates']: for date_info in date_cluster: # check for any dates that clearly aren't dates if date_info[1].year < 1600 or date_info[1].year > 2020: continue # check for untagged dates that will be assigned to date_filed if date_info[0] is None: date_filed = date_info[1] continue # try to figure out what type of date it is based on its tag string if date_info[0] in FILED_TAGS: date_filed = date_info[1] elif date_info[0] in DECIDED_TAGS: if not date_filed: date_filed = date_info[1] elif date_info[0] in ARGUED_TAGS: date_argued = date_info[1] elif date_info[0] in REARGUE_TAGS: date_reargued = date_info[1] elif date_info[0] in REARGUE_DENIED_TAGS: date_reargument_denied = date_info[1] elif date_info[0] in CERT_GRANTED_TAGS: date_cert_granted = date_info[1] elif date_info[0] in CERT_DENIED_TAGS: date_cert_denied = date_info[1] else: print("Found unknown date tag '%s' with date '%s'." % date_info) docket = Docket( date_argued=date_argued ,date_reargued=date_reargued ,date_cert_granted=date_cert_granted ,date_cert_denied=date_cert_denied ,date_reargument_denied=date_reargument_denied ,court_id=item['court_id'] ,case_name_short=item['case_name_short'] or '' ,case_name=item['case_name'] or '' ,case_name_full=item['case_name_full'] or '' ,docket_number=item['docket'] or '' ) docket.save() # get citations in the form of, e.g. {'federal_cite_one': '1 U.S. 1', ...} found_citations = [] for c in item['citations']: found = get_citations(c) if not found: raise Exception("Failed to get a citation from the string '%s'." % c) elif len(found) > 1: raise Exception("Got multiple citations from string '%s' when there should have been one." % c) found_citations.append(found[0]) citations_map = map_citations_to_models(found_citations) cluster = OpinionCluster( docket=docket ,precedential_status=('Unpublished' if item['unpublished'] else 'Published') ,date_filed=date_filed ,case_name_short=item['case_name_short'] or '' ,case_name=item['case_name'] or '' ,case_name_full=item['case_name_full'] or '' ,source='Z' ,attorneys=item['attorneys'] or '' ,posture=item['posture'] or '' ,**citations_map ) cluster.save() if date_argued is not None: paneldate = date_argued else: paneldate = date_filed panel = [find_person(n, item['court_id'], paneldate) for n in item['panel']] panel = [x for x in panel if x is not None] for member in panel: cluster.panel.add(member) for opinion_info in item['opinions']: if opinion_info['author'] is None: author = None else: author = find_person(opinion_info['author'], item['court_id'], date_filed or date_argued) opinion = Opinion( cluster=cluster ,author=author ,type=OPINION_TYPE_MAPPING[opinion_info['type']] ,html_columbia=opinion_info['opinion'] ) opinion.save() joined_by = [find_person(n, item['court_id'], paneldate) for n in opinion_info['joining']] joined_by = [x for x in joined_by if x is not None] for joiner in joined_by: opinion.joined_by.add(joiner)
def make_objects(item, court, sha1_hash, content): """Takes the meta data from the scraper and associates it with objects. Returns the created objects. """ blocked = item["blocked_statuses"] if blocked: date_blocked = date.today() else: date_blocked = None case_name_short = item.get("case_name_shorts") or cnt.make_case_name_short( item["case_names"]) docket = Docket( docket_number=item.get("docket_numbers", ""), case_name=item["case_names"], case_name_short=case_name_short, court=court, blocked=blocked, date_blocked=date_blocked, source=item.get("source") or Docket.SCRAPER, ) west_cite_str = item.get("west_citations", "") state_cite_str = item.get("west_state_citations", "") neutral_cite_str = item.get("neutral_citations", "") cluster = OpinionCluster( judges=item.get("judges", ""), date_filed=item["case_dates"], date_filed_is_approximate=item["date_filed_is_approximate"], case_name=item["case_names"], case_name_short=case_name_short, source=item.get("cluster_source") or "C", precedential_status=item["precedential_statuses"], nature_of_suit=item.get("nature_of_suit", ""), blocked=blocked, date_blocked=date_blocked, syllabus=item.get("summaries", ""), ) citations = [] cite_types = [ (west_cite_str, Citation.WEST), (state_cite_str, Citation.STATE), (neutral_cite_str, Citation.NEUTRAL), ] for cite_str, cite_type in cite_types: if cite_str: citations.append(make_citation(cite_str, cluster, cite_type)) opinion = Opinion( type=Opinion.COMBINED, sha1=sha1_hash, download_url=item["download_urls"], ) error = False try: cf = ContentFile(content) extension = get_extension(content) file_name = trunc(item["case_names"].lower(), 75) + extension opinion.file_with_date = cluster.date_filed opinion.local_path.save(file_name, cf, save=False) except: msg = "Unable to save binary to disk. Deleted " "item: %s.\n %s" % ( item["case_names"], traceback.format_exc(), ) logger.critical(msg.encode("utf-8")) ErrorLog(log_level="CRITICAL", court=court, message=msg).save() error = True return docket, opinion, cluster, citations, error
def download_and_save(): """This function is run in many threads simultaneously. Each thread runs so long as there are items in the queue. Once an item is found, it's downloaded and saved. The number of items that can be concurrently saved is determined by the number of threads that are running this function. """ while True: item = queue.get() logger.info("%s: Attempting to add item at: %s" % (threading.current_thread().name, item['url'])) try: msg, r = get_binary_content( item['url'], {}, ) except: logger.info("%s: Unable to get item at: %s" % (threading.current_thread().name, item['url'])) queue.task_done() if msg: logger.warn(msg) queue.task_done() continue sha1_hash = hashlib.sha1(r.content).hexdigest() if Audio.objects.filter(sha1=sha1_hash).exists(): # Simpsons did it! Try the next one. logger.info("%s: Item already exists, moving to next item." % threading.current_thread().name) queue.task_done() continue else: # New item, onwards! logger.info('%s: Adding new document found at: %s' % (threading.current_thread().name, item['url'])) audio_file = Audio( source='H', sha1=sha1_hash, case_name=item['case_name'], download_url=item['url'], processing_complete=False, ) if item['judges']: audio_file.judges = item['judges'] if item['docket_number']: audio_file.docket.docket_number = item['docket_number'] court = Court.objects.get(pk=item['court_code']) docket = Docket( case_name=item['case_name'], court=court, date_argued=item['date_argued'], ) # Make and associate the file object try: cf = ContentFile(r.content) extension = get_extension(r.content) if extension not in ['.mp3', '.wma']: extension = '.' + item['url'].rsplit('.', 1)[1] # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(item['case_name'].lower(), 75) + extension audio_file.local_path_original_file.save(file_name, cf, save=False) except: msg = 'Unable to save binary. Deleted document: %s.\n%s' % \ (item['case_name'], traceback.format_exc()) logger.critical(msg) queue.task_done() docket.save() audio_file.docket = docket audio_file.save(index=False) random_delay = random.randint(0, 3600) process_audio_file.apply_async((audio_file.pk, ), countdown=random_delay) logger.info("%s: Successfully added audio file %s: %s" % (threading.current_thread().name, audio_file.pk, audio_file.case_name))