Пример #1
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr"""
        # IDs
        out = {
            'id': self.pk,
            'docket_id': self.docket_id,
            'court_id': self.docket.court_id,
        }

        # Docket
        docket = {'docketNumber': self.docket.docket_number}
        if self.docket.date_argued is not None:
            docket['dateArgued'] = datetime.combine(
                self.docket.date_argued,
                time()
            )
        if self.docket.date_reargued is not None:
            docket['dateReargued'] = datetime.combine(
                self.docket.date_reargued,
                time()
            )
        if self.docket.date_reargument_denied is not None:
            docket['dateReargumentDenied'] = datetime.combine(
                self.docket.date_reargument_denied,
                time()
            )
        out.update(docket)

        # Court
        out.update({
            'court': self.docket.court.full_name,
            'court_citation_string': self.docket.court.citation_string,
            'court_exact': self.docket.court_id,  # For faceting
        })

        # Audio File
        out.update({
            'caseName': best_case_name(self),
            'panel_ids': [judge.pk for judge in self.panel.all()],
            'judge': self.judges,
            'file_size_mp3': deepgetattr(self, 'local_path_mp3.size', None),
            'duration': self.duration,
            'source': self.source,
            'download_url': self.download_url,
            'local_path': unicode(getattr(self, 'local_path_mp3', None))
        })
        try:
            out['absolute_url'] = self.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url: %s"
                % self.pk
            )

        text_template = loader.get_template('indexes/audio_text.txt')
        out['text'] = text_template.render({'item': self}).translate(null_map)

        return nuke_nones(out)
Пример #2
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr"""
        # IDs
        out = {
            'id': self.pk,
            'docket_id': self.docket_id,
            'court_id': self.docket.court_id,
        }

        # Docket
        docket = {'docketNumber': self.docket.docket_number}
        if self.docket.date_argued is not None:
            docket['dateArgued'] = datetime.combine(self.docket.date_argued,
                                                    time())
        if self.docket.date_reargued is not None:
            docket['dateReargued'] = datetime.combine(
                self.docket.date_reargued, time())
        if self.docket.date_reargument_denied is not None:
            docket['dateReargumentDenied'] = datetime.combine(
                self.docket.date_reargument_denied, time())
        out.update(docket)

        # Court
        out.update({
            'court': self.docket.court.full_name,
            'court_citation_string': self.docket.court.citation_string,
            'court_exact': self.docket.court_id,  # For faceting
        })

        # Audio File
        out.update({
            'caseName':
            best_case_name(self),
            'panel_ids': [judge.pk for judge in self.panel.all()],
            'judge':
            self.judges,
            'file_size_mp3':
            deepgetattr(self, 'local_path_mp3.size', None),
            'duration':
            self.duration,
            'source':
            self.source,
            'download_url':
            self.download_url,
            'local_path':
            unicode(getattr(self, 'local_path_mp3', None))
        })
        try:
            out['absolute_url'] = self.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url: %s" %
                self.pk)

        text_template = loader.get_template('indexes/audio_text.txt')
        out['text'] = text_template.render({'item': self}).translate(null_map)

        return nuke_nones(out)
Пример #3
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr.

        Search results are presented as Dockets, but they're indexed as
        RECAPDocument's, which are then grouped back together in search results
        to form Dockets.
        """
        # IDs
        out = {
            'id': self.pk,
            'docket_entry_id': self.docket_entry.pk,
            'docket_id': self.docket_entry.docket.pk,
            'court_id': self.docket_entry.docket.court.pk,
            'assigned_to_id': getattr(
                self.docket_entry.docket.assigned_to, 'pk', None),
            'referred_to_id': getattr(
                self.docket_entry.docket.referred_to, 'pk', None)
        }

        # RECAPDocument
        out.update({
            'document_type': self.get_document_type_display(),
            'document_number': self.document_number,
            'attachment_number': self.attachment_number,
            'is_available': self.is_available,
            'page_count': self.page_count,
        })
        if hasattr(self.filepath_local, 'path'):
            out['filepath_local'] = self.filepath_local.path


        # Docket Entry
        out['description'] = self.docket_entry.description
        if self.docket_entry.entry_number is not None:
            out['entry_number'] = self.docket_entry.entry_number
        if self.docket_entry.date_filed is not None:
            out['entry_date_filed'] = datetime.combine(
                self.docket_entry.date_filed,
                time()
            )

        # Docket
        out.update({
            'docketNumber': self.docket_entry.docket.docket_number,
            'caseName': best_case_name(self.docket_entry.docket),
            'suitNature': self.docket_entry.docket.nature_of_suit,
            'cause': self.docket_entry.docket.cause,
            'juryDemand': self.docket_entry.docket.jury_demand,
            'jurisdictionType': self.docket_entry.docket.jurisdiction_type,
        })
        if self.docket_entry.docket.date_argued is not None:
            out['dateArgued'] = datetime.combine(
                self.docket_entry.docket.date_argued,
                time()
            )
        if self.docket_entry.docket.date_filed is not None:
            out['dateFiled'] = datetime.combine(
                self.docket_entry.docket.date_filed,
                time()
            )
        if self.docket_entry.docket.date_terminated is not None:
            out['dateTerminated'] = datetime.combine(
                self.docket_entry.docket.date_terminated,
                time()
            )
        try:
            out['absolute_url'] = self.docket_entry.docket.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url: %s"
                % self.pk
            )

        # Judges
        if self.docket_entry.docket.assigned_to is not None:
            out['assignedTo'] = self.docket_entry.docket.assigned_to.name_full
        elif self.docket_entry.docket.assigned_to_str is not None:
            out['assignedTo'] = self.docket_entry.docket.assigned_to_str
        if self.docket_entry.docket.referred_to is not None:
            out['referredTo'] = self.docket_entry.docket.referred_to.name_full
        elif self.docket_entry.docket.referred_to_str is not None:
            out['referredTo'] = self.docket_entry.docket.referred_to_str

        # Court
        out.update({
            'court': self.docket_entry.docket.court.full_name,
            'court_exact': self.docket_entry.docket.court_id,  # For faceting
            'court_citation_string': self.docket_entry.docket.court.citation_string
        })

        text_template = loader.get_template('indexes/dockets_text.txt')
        out['text'] = text_template.render({'item': self}).translate(null_map)

        return nuke_nones(out)
Пример #4
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr."""
        # IDs
        out = {
            'id': self.pk,
            'docket_id': self.cluster.docket.pk,
            'cluster_id': self.cluster.pk,
            'court_id': self.cluster.docket.court.pk
        }

        # Opinion
        out.update({
            'cites': [opinion.pk for opinion in self.opinions_cited.all()],
            'author_id': getattr(self.author, 'pk', None),
            # 'per_curiam': self.per_curiam,
            'joined_by_ids': [judge.pk for judge in self.joined_by.all()],
            'type': self.type,
            'download_url': self.download_url or None,
            'local_path': unicode(self.local_path),
        })

        # Cluster
        out.update({
            'caseName': best_case_name(self.cluster),
            'caseNameShort': self.cluster.case_name_short,
            'sibling_ids': [sibling.pk for sibling in self.siblings.all()],
            'panel_ids': [judge.pk for judge in self.cluster.panel.all()],
            'non_participating_judge_ids': [
                judge.pk for judge in
                    self.cluster.non_participating_judges.all()
            ],
            'judge': self.cluster.judges,
            'lexisCite': self.cluster.lexis_cite,
            'citation': [
                cite for cite in
                    self.cluster.citation_list if cite],  # Nuke '' and None
            'neutralCite': self.cluster.neutral_cite,
            'scdb_id': self.cluster.scdb_id,
            'source': self.cluster.source,
            'attorney': self.cluster.attorneys,
            'suitNature': self.cluster.nature_of_suit,
            'citeCount': self.cluster.citation_count,
            'status': self.cluster.get_precedential_status_display(),
            'status_exact': self.cluster.get_precedential_status_display(),
        })
        if self.cluster.date_filed is not None:
            out['dateFiled'] = datetime.combine(
                self.cluster.date_filed,
                time()
            )  # Midnight, PST
        try:
            out['absolute_url'] = self.cluster.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url "
                "(court_id: %s, item.pk: %s). Might the court have in_use set "
                "to False?" % (self.cluster.docket.court_id, self.pk)
            )

        # Docket
        docket = {'docketNumber': self.cluster.docket.docket_number}
        if self.cluster.docket.date_argued is not None:
            docket['dateArgued'] = datetime.combine(
                self.cluster.docket.date_argued,
                time(),
            )
        if self.cluster.docket.date_reargued is not None:
            docket['dateReargued'] = datetime.combine(
                self.cluster.docket.date_reargued,
                time(),
            )
        if self.cluster.docket.date_reargument_denied is not None:
            docket['dateReargumentDenied'] = datetime.combine(
                self.cluster.docket.date_reargument_denied,
                time(),
            )
        out.update(docket)

        court = {
            'court': self.cluster.docket.court.full_name,
            'court_citation_string': self.cluster.docket.court.citation_string,
            'court_exact': self.cluster.docket.court_id,  # For faceting
        }
        out.update(court)

        # Load the document text using a template for cleanup and concatenation
        text_template = loader.get_template('indexes/opinion_text.txt')
        out['text'] = text_template.render({
            'item': self,
            'citation_string': self.cluster.citation_string
        }).translate(null_map)

        return nuke_nones(out)
Пример #5
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr.

        Search results are presented as Dockets, but they're indexed as
        RECAPDocument's, which are then grouped back together in search results
        to form Dockets.
        """
        # IDs
        out = {
            'id':
            self.pk,
            'docket_entry_id':
            self.docket_entry.pk,
            'docket_id':
            self.docket_entry.docket.pk,
            'court_id':
            self.docket_entry.docket.court.pk,
            'assigned_to_id':
            getattr(self.docket_entry.docket.assigned_to, 'pk', None),
            'referred_to_id':
            getattr(self.docket_entry.docket.referred_to, 'pk', None)
        }

        # RECAPDocument
        out.update({
            'short_description': self.description,
            'document_type': self.get_document_type_display(),
            'document_number': self.document_number,
            'attachment_number': self.attachment_number,
            'is_available': self.is_available,
            'page_count': self.page_count,
        })
        if hasattr(self.filepath_local, 'path'):
            out['filepath_local'] = self.filepath_local.path

        # Docket Entry
        out['description'] = self.docket_entry.description
        if self.docket_entry.entry_number is not None:
            out['entry_number'] = self.docket_entry.entry_number
        if self.docket_entry.date_filed is not None:
            out['entry_date_filed'] = datetime.combine(
                self.docket_entry.date_filed, time())

        # Docket
        out.update({
            'docketNumber':
            self.docket_entry.docket.docket_number,
            'caseName':
            best_case_name(self.docket_entry.docket),
            'suitNature':
            self.docket_entry.docket.nature_of_suit,
            'cause':
            self.docket_entry.docket.cause,
            'juryDemand':
            self.docket_entry.docket.jury_demand,
            'jurisdictionType':
            self.docket_entry.docket.jurisdiction_type,
        })
        if self.docket_entry.docket.date_argued is not None:
            out['dateArgued'] = datetime.combine(
                self.docket_entry.docket.date_argued, time())
        if self.docket_entry.docket.date_filed is not None:
            out['dateFiled'] = datetime.combine(
                self.docket_entry.docket.date_filed, time())
        if self.docket_entry.docket.date_terminated is not None:
            out['dateTerminated'] = datetime.combine(
                self.docket_entry.docket.date_terminated, time())
        try:
            out['absolute_url'] = self.get_absolute_url()
            out['docket_absolute_url'] = self.docket_entry.docket.get_absolute_url(
            )
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url: %s" %
                self.pk)

        # Judges
        if self.docket_entry.docket.assigned_to is not None:
            out['assignedTo'] = self.docket_entry.docket.assigned_to.name_full
        elif self.docket_entry.docket.assigned_to_str is not None:
            out['assignedTo'] = self.docket_entry.docket.assigned_to_str
        if self.docket_entry.docket.referred_to is not None:
            out['referredTo'] = self.docket_entry.docket.referred_to.name_full
        elif self.docket_entry.docket.referred_to_str is not None:
            out['referredTo'] = self.docket_entry.docket.referred_to_str

        # Court
        out.update({
            'court':
            self.docket_entry.docket.court.full_name,
            'court_exact':
            self.docket_entry.docket.court_id,  # For faceting
            'court_citation_string':
            self.docket_entry.docket.court.citation_string
        })

        text_template = loader.get_template('indexes/dockets_text.txt')
        out['text'] = text_template.render({'item': self}).translate(null_map)

        return nuke_nones(out)
Пример #6
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr."""
        # IDs
        out = {
            'id': self.pk,
            'docket_id': self.cluster.docket.pk,
            'cluster_id': self.cluster.pk,
            'court_id': self.cluster.docket.court.pk
        }

        # Opinion
        out.update({
            'cites': [opinion.pk for opinion in self.opinions_cited.all()],
            'author_id':
            getattr(self.author, 'pk', None),
            # 'per_curiam': self.per_curiam,
            'joined_by_ids': [judge.pk for judge in self.joined_by.all()],
            'type':
            self.type,
            'download_url':
            self.download_url or None,
            'local_path':
            unicode(self.local_path),
        })

        # Cluster
        out.update({
            'caseName':
            best_case_name(self.cluster),
            'caseNameShort':
            self.cluster.case_name_short,
            'sibling_ids': [sibling.pk for sibling in self.siblings.all()],
            'panel_ids': [judge.pk for judge in self.cluster.panel.all()],
            'non_participating_judge_ids': [
                judge.pk
                for judge in self.cluster.non_participating_judges.all()
            ],
            'judge':
            self.cluster.judges,
            'lexisCite':
            self.cluster.lexis_cite,
            'citation': [cite for cite in self.cluster.citation_list
                         if cite],  # Nuke '' and None
            'neutralCite':
            self.cluster.neutral_cite,
            'scdb_id':
            self.cluster.scdb_id,
            'source':
            self.cluster.source,
            'attorney':
            self.cluster.attorneys,
            'suitNature':
            self.cluster.nature_of_suit,
            'citeCount':
            self.cluster.citation_count,
            'status':
            self.cluster.get_precedential_status_display(),
            'status_exact':
            self.cluster.get_precedential_status_display(),
        })
        if self.cluster.date_filed is not None:
            out['dateFiled'] = datetime.combine(self.cluster.date_filed,
                                                time())  # Midnight, PST
        try:
            out['absolute_url'] = self.cluster.get_absolute_url()
        except NoReverseMatch:
            raise InvalidDocumentError(
                "Unable to save to index due to missing absolute_url "
                "(court_id: %s, item.pk: %s). Might the court have in_use set "
                "to False?" % (self.cluster.docket.court_id, self.pk))

        # Docket
        docket = {'docketNumber': self.cluster.docket.docket_number}
        if self.cluster.docket.date_argued is not None:
            docket['dateArgued'] = datetime.combine(
                self.cluster.docket.date_argued,
                time(),
            )
        if self.cluster.docket.date_reargued is not None:
            docket['dateReargued'] = datetime.combine(
                self.cluster.docket.date_reargued,
                time(),
            )
        if self.cluster.docket.date_reargument_denied is not None:
            docket['dateReargumentDenied'] = datetime.combine(
                self.cluster.docket.date_reargument_denied,
                time(),
            )
        out.update(docket)

        court = {
            'court': self.cluster.docket.court.full_name,
            'court_citation_string': self.cluster.docket.court.citation_string,
            'court_exact': self.cluster.docket.court_id,  # For faceting
        }
        out.update(court)

        # Load the document text using a template for cleanup and concatenation
        text_template = loader.get_template('indexes/opinion_text.txt')
        out['text'] = text_template.render({
            'item':
            self,
            'citation_string':
            self.cluster.citation_string
        }).translate(null_map)

        return nuke_nones(out)
Пример #7
0
    def as_search_dict(self):
        """Create a dict that can be ingested by Solr"""
        out = {
            'id': self.pk,
            'fjc_id': self.fjc_id,
            'cl_id': self.cl_id,
            'alias_ids': [alias.pk for alias in self.aliases.all()],
            'races': [r.get_race_display() for r in self.race.all()],
            'gender': self.get_gender_display(),
            'religion': self.religion,
            'name': self.name_full,
            'name_reverse': self.name_full_reverse,
            'date_granularity_dob': self.date_granularity_dob,
            'date_granularity_dod': self.date_granularity_dod,
            'dob_city': self.dob_city,
            'dob_state': self.get_dob_state_display(),
            'dob_state_id': self.dob_state,
            'absolute_url': self.get_absolute_url(),
            'school': [e.school.name for e in self.educations.all()],
            'political_affiliation': [
                pa.get_political_party_display() for pa in
                self.political_affiliations.all() if pa
            ],
            'political_affiliation_id': [
                pa.political_party for pa in
                self.political_affiliations.all() if pa
            ],
            'aba_rating': [
                r.get_rating_display() for r in
                self.aba_ratings.all() if r
            ],
        }

        # Dates
        if self.date_dob is not None:
            out['dob'] = datetime.combine(self.date_dob, time())
        if self.date_dod is not None:
            out['dod'] = datetime.combine(self.date_dod, time())

        # Joined Values. Brace yourself.
        positions = self.positions.all()
        if positions.count() > 0:
            p_out = {
                'court': [p.court.short_name for p in positions if p.court],
                'court_exact': [p.court.pk for p in positions if p.court],
                'position_type': [p.get_position_type_display() for p in
                                  positions],
                'appointer': [p.appointer.person.name_full_reverse for p in
                              positions if p.appointer],
                'supervisor': [p.supervisor.name_full_reverse for p in
                               positions if p.supervisor],
                'predecessor': [p.predecessor.name_full_reverse for p in
                                positions if p.predecessor],
                'date_nominated': solr_list(positions, 'date_nominated'),
                'date_elected': solr_list(positions, 'date_elected'),
                'date_recess_appointment': solr_list(
                    positions, 'date_recess_appointment',
                ),
                'date_referred_to_judicial_committee': solr_list(
                    positions, 'date_referred_to_judicial_committee',
                ),
                'date_judicial_committee_action': solr_list(
                    positions, 'date_judicial_committee_action',
                ),
                'date_hearing': solr_list(positions, 'date_hearing'),
                'date_confirmation': solr_list(positions, 'date_confirmation'),
                'date_start': solr_list(positions, 'date_start'),
                'date_granularity_start': solr_list(
                    positions, 'date_granularity_start',
                ),
                'date_retirement': solr_list(
                    positions, 'date_retirement',
                ),
                'date_termination': solr_list(
                    positions, 'date_termination',
                ),
                'date_granularity_termination': solr_list(
                    positions, 'date_granularity_termination',
                ),
                'judicial_committee_action': [
                    p.get_judicial_committee_action_display() for p in
                    positions if p.judicial_committee_action
                ],
                'nomination_process': [
                    p.get_nomination_process_display() for p in
                    positions if p.nomination_process
                ],
                'selection_method': [
                    p.get_how_selected_display() for p in
                    positions if p.how_selected
                ],
                'selection_method_id': [
                    p.how_selected for p in
                    positions if p.how_selected
                ],
                'termination_reason': [
                    p.get_termination_reason_display() for p in
                    positions if p.termination_reason
                ],
            }
            out.update(p_out)

        text_template = loader.get_template('indexes/person_text.txt')
        out['text'] = text_template.render({'item': self}).translate(null_map)

        return nuke_nones(out)