Beispiel #1
0
    def as_search_dict(self) -> Dict[str, Union[int, List[int], str]]:
        """Create a dict that can be ingested by Solr"""
        # IDs
        out = {
            "id": self.pk,
            "docket_id": self.docket_id,
            "court_id": self.docket.court_id,
        }

        # Docket
        docket = {"docketNumber": self.docket.docket_number}
        if self.docket.date_argued is not None:
            docket["dateArgued"] = midnight_pst(self.docket.date_argued)
        if self.docket.date_reargued is not None:
            docket["dateReargued"] = midnight_pst(self.docket.date_reargued)
        if self.docket.date_reargument_denied is not None:
            docket["dateReargumentDenied"] = midnight_pst(
                self.docket.date_reargument_denied)
        out.update(docket)

        # Court
        out.update({
            "court": self.docket.court.full_name,
            "court_citation_string": self.docket.court.citation_string,
            "court_exact": self.docket.court_id,  # For faceting
        })

        # Audio File
        out.update({
            "caseName":
            best_case_name(self),
            "panel_ids": [judge.pk for judge in self.panel.all()],
            "judge":
            self.judges,
            "file_size_mp3":
            deepgetattr(self, "local_path_mp3.size", None),
            "duration":
            self.duration,
            "source":
            self.source,
            "download_url":
            self.download_url,
            "local_path":
            deepgetattr(self, "local_path_mp3.name", None),
        })
        try:
            out["absolute_url"] = self.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                f"Unable to save to index due to missing absolute_url: {self.pk}"
            )

        text_template = loader.get_template("indexes/audio_text.txt")
        out["text"] = text_template.render({"item": self}).translate(null_map)

        return normalize_search_dicts(out)
Beispiel #2
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr"""
        # IDs
        out = {
            'id': self.pk,
            'docket_id': self.docket_id,
            'court_id': self.docket.court_id,
        }

        # Docket
        docket = {'docketNumber': self.docket.docket_number}
        if self.docket.date_argued is not None:
            docket['dateArgued'] = midnight_pst(self.docket.date_argued)
        if self.docket.date_reargued is not None:
            docket['dateReargued'] = midnight_pst(self.docket.date_reargued)
        if self.docket.date_reargument_denied is not None:
            docket['dateReargumentDenied'] = midnight_pst(
                self.docket.date_reargument_denied)
        out.update(docket)

        # Court
        out.update({
            'court': self.docket.court.full_name,
            'court_citation_string': self.docket.court.citation_string,
            'court_exact': self.docket.court_id,  # For faceting
        })

        # Audio File
        out.update({
            'caseName':
            best_case_name(self),
            'panel_ids': [judge.pk for judge in self.panel.all()],
            'judge':
            self.judges,
            'file_size_mp3':
            deepgetattr(self, 'local_path_mp3.size', None),
            'duration':
            self.duration,
            'source':
            self.source,
            'download_url':
            self.download_url,
            'local_path':
            unicode(getattr(self, 'local_path_mp3', None))
        })
        try:
            out['absolute_url'] = self.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url: %s" %
                self.pk)

        text_template = loader.get_template('indexes/audio_text.txt')
        out['text'] = text_template.render({'item': self}).translate(null_map)

        return normalize_search_dicts(out)
Beispiel #3
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr.

        Search results are presented as Dockets, but they're indexed as
        RECAPDocument's, which are then grouped back together in search results
        to form Dockets.
        """
        # IDs
        out = {
            'id':
            self.pk,
            'docket_entry_id':
            self.docket_entry.pk,
            'docket_id':
            self.docket_entry.docket.pk,
            'court_id':
            self.docket_entry.docket.court.pk,
            'assigned_to_id':
            getattr(self.docket_entry.docket.assigned_to, 'pk', None),
            'referred_to_id':
            getattr(self.docket_entry.docket.referred_to, 'pk', None)
        }

        # RECAPDocument
        out.update({
            'short_description': self.description,
            'document_type': self.get_document_type_display(),
            'document_number': self.document_number,
            'attachment_number': self.attachment_number,
            'is_available': self.is_available,
            'page_count': self.page_count,
        })
        if hasattr(self.filepath_local, 'path'):
            out['filepath_local'] = self.filepath_local.path

        # Docket Entry
        out['description'] = self.docket_entry.description
        if self.docket_entry.entry_number is not None:
            out['entry_number'] = self.docket_entry.entry_number
        if self.docket_entry.date_filed is not None:
            out['entry_date_filed'] = datetime.combine(
                self.docket_entry.date_filed, time())

        # Docket
        out.update({
            'docketNumber':
            self.docket_entry.docket.docket_number,
            'caseName':
            best_case_name(self.docket_entry.docket),
            'suitNature':
            self.docket_entry.docket.nature_of_suit,
            'cause':
            self.docket_entry.docket.cause,
            'juryDemand':
            self.docket_entry.docket.jury_demand,
            'jurisdictionType':
            self.docket_entry.docket.jurisdiction_type,
        })
        if self.docket_entry.docket.date_argued is not None:
            out['dateArgued'] = datetime.combine(
                self.docket_entry.docket.date_argued, time())
        if self.docket_entry.docket.date_filed is not None:
            out['dateFiled'] = datetime.combine(
                self.docket_entry.docket.date_filed, time())
        if self.docket_entry.docket.date_terminated is not None:
            out['dateTerminated'] = datetime.combine(
                self.docket_entry.docket.date_terminated, time())
        try:
            out['absolute_url'] = self.get_absolute_url()
            out['docket_absolute_url'] = self.docket_entry.docket.get_absolute_url(
            )
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url: %s" %
                self.pk)

        # Judges
        if self.docket_entry.docket.assigned_to is not None:
            out['assignedTo'] = self.docket_entry.docket.assigned_to.name_full
        elif self.docket_entry.docket.assigned_to_str is not None:
            out['assignedTo'] = self.docket_entry.docket.assigned_to_str
        if self.docket_entry.docket.referred_to is not None:
            out['referredTo'] = self.docket_entry.docket.referred_to.name_full
        elif self.docket_entry.docket.referred_to_str is not None:
            out['referredTo'] = self.docket_entry.docket.referred_to_str

        # Court
        out.update({
            'court':
            self.docket_entry.docket.court.full_name,
            'court_exact':
            self.docket_entry.docket.court_id,  # For faceting
            'court_citation_string':
            self.docket_entry.docket.court.citation_string
        })

        text_template = loader.get_template('indexes/dockets_text.txt')
        out['text'] = text_template.render({'item': self}).translate(null_map)

        return nuke_nones(out)
Beispiel #4
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr."""
        # IDs
        out = {
            'id': self.pk,
            'docket_id': self.cluster.docket.pk,
            'cluster_id': self.cluster.pk,
            'court_id': self.cluster.docket.court.pk
        }

        # Opinion
        out.update({
            'cites': [opinion.pk for opinion in self.opinions_cited.all()],
            'author_id':
            getattr(self.author, 'pk', None),
            # 'per_curiam': self.per_curiam,
            'joined_by_ids': [judge.pk for judge in self.joined_by.all()],
            'type':
            self.type,
            'download_url':
            self.download_url or None,
            'local_path':
            unicode(self.local_path),
        })

        # Cluster
        out.update({
            'caseName':
            best_case_name(self.cluster),
            'caseNameShort':
            self.cluster.case_name_short,
            'sibling_ids': [sibling.pk for sibling in self.siblings.all()],
            'panel_ids': [judge.pk for judge in self.cluster.panel.all()],
            'non_participating_judge_ids': [
                judge.pk
                for judge in self.cluster.non_participating_judges.all()
            ],
            'judge':
            self.cluster.judges,
            'lexisCite':
            self.cluster.lexis_cite,
            'citation': [cite for cite in self.cluster.citation_list
                         if cite],  # Nuke '' and None
            'neutralCite':
            self.cluster.neutral_cite,
            'scdb_id':
            self.cluster.scdb_id,
            'source':
            self.cluster.source,
            'attorney':
            self.cluster.attorneys,
            'suitNature':
            self.cluster.nature_of_suit,
            'citeCount':
            self.cluster.citation_count,
            'status':
            self.cluster.get_precedential_status_display(),
            'status_exact':
            self.cluster.get_precedential_status_display(),
        })
        if self.cluster.date_filed is not None:
            out['dateFiled'] = datetime.combine(self.cluster.date_filed,
                                                time())  # Midnight, PST
        try:
            out['absolute_url'] = self.cluster.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url "
                "(court_id: %s, item.pk: %s). Might the court have in_use set "
                "to False?" % (self.cluster.docket.court_id, self.pk))

        # Docket
        docket = {'docketNumber': self.cluster.docket.docket_number}
        if self.cluster.docket.date_argued is not None:
            docket['dateArgued'] = datetime.combine(
                self.cluster.docket.date_argued,
                time(),
            )
        if self.cluster.docket.date_reargued is not None:
            docket['dateReargued'] = datetime.combine(
                self.cluster.docket.date_reargued,
                time(),
            )
        if self.cluster.docket.date_reargument_denied is not None:
            docket['dateReargumentDenied'] = datetime.combine(
                self.cluster.docket.date_reargument_denied,
                time(),
            )
        out.update(docket)

        court = {
            'court': self.cluster.docket.court.full_name,
            'court_citation_string': self.cluster.docket.court.citation_string,
            'court_exact': self.cluster.docket.court_id,  # For faceting
        }
        out.update(court)

        # Load the document text using a template for cleanup and concatenation
        text_template = loader.get_template('indexes/opinion_text.txt')
        out['text'] = text_template.render({
            'item':
            self,
            'citation_string':
            self.cluster.citation_string
        }).translate(null_map)

        return nuke_nones(out)