def as_search_dict(self): """Create a dict that can be ingested by Solr""" # IDs out = { 'id': self.pk, 'docket_id': self.docket_id, 'court_id': self.docket.court_id, } # Docket docket = {'docketNumber': self.docket.docket_number} if self.docket.date_argued is not None: docket['dateArgued'] = datetime.combine(self.docket.date_argued, time()) if self.docket.date_reargued is not None: docket['dateReargued'] = datetime.combine( self.docket.date_reargued, time()) if self.docket.date_reargument_denied is not None: docket['dateReargumentDenied'] = datetime.combine( self.docket.date_reargument_denied, time()) out.update(docket) # Court out.update({ 'court': self.docket.court.full_name, 'court_citation_string': self.docket.court.citation_string, 'court_exact': self.docket.court_id, # For faceting }) # Audio File out.update({ 'caseName': best_case_name(self), 'panel_ids': [judge.pk for judge in self.panel.all()], 'judge': self.judges, 'file_size_mp3': deepgetattr(self, 'local_path_mp3.size', None), 'duration': self.duration, 'source': self.source, 'download_url': self.download_url, 'local_path': unicode(getattr(self, 'local_path_mp3', None)) }) try: out['absolute_url'] = self.get_absolute_url() except NoReverseMatch: raise InvalidDocumentError( "Unable to save to index due to missing absolute_url: %s" % self.pk) text_template = loader.get_template('indexes/audio_text.txt') out['text'] = text_template.render({'item': self}).translate(null_map) return normalize_search_dicts(out)
def as_search_dict(self): """Create a dict that can be ingested by Solr""" # IDs out = { 'id': self.pk, 'docket_id': self.docket_id, 'court_id': self.docket.court_id, } # Docket docket = {'docketNumber': self.docket.docket_number} if self.docket.date_argued is not None: docket['dateArgued'] = datetime.combine( self.docket.date_argued, time() ) if self.docket.date_reargued is not None: docket['dateReargued'] = datetime.combine( self.docket.date_reargued, time() ) if self.docket.date_reargument_denied is not None: docket['dateReargumentDenied'] = datetime.combine( self.docket.date_reargument_denied, time() ) out.update(docket) # Court out.update({ 'court': self.docket.court.full_name, 'court_citation_string': self.docket.court.citation_string, 'court_exact': self.docket.court_id, # For faceting }) # Audio File out.update({ 'caseName': best_case_name(self), 'panel_ids': [judge.pk for judge in self.panel.all()], 'judge': self.judges, 'file_size_mp3': deepgetattr(self, 'local_path_mp3.size', None), 'duration': self.duration, 'source': self.source, 'download_url': self.download_url, 'local_path': unicode(getattr(self, 'local_path_mp3', None)) }) try: out['absolute_url'] = self.get_absolute_url() except NoReverseMatch: raise InvalidDocumentError( "Unable to save to index due to missing absolute_url: %s" % self.pk ) text_template = loader.get_template('indexes/audio_text.txt') out['text'] = text_template.render({'item': self}).translate(null_map) return normalize_search_dicts(out)
def as_search_dict(self): """Create a dict that can be ingested by Solr""" # IDs out = { "id": self.pk, "docket_id": self.docket_id, "court_id": self.docket.court_id, } # Docket docket = {"docketNumber": self.docket.docket_number} if self.docket.date_argued is not None: docket["dateArgued"] = midnight_pst(self.docket.date_argued) if self.docket.date_reargued is not None: docket["dateReargued"] = midnight_pst(self.docket.date_reargued) if self.docket.date_reargument_denied is not None: docket["dateReargumentDenied"] = midnight_pst( self.docket.date_reargument_denied ) out.update(docket) # Court out.update( { "court": self.docket.court.full_name, "court_citation_string": self.docket.court.citation_string, "court_exact": self.docket.court_id, # For faceting } ) # Audio File out.update( { "caseName": best_case_name(self), "panel_ids": [judge.pk for judge in self.panel.all()], "judge": self.judges, "file_size_mp3": deepgetattr( self, "local_path_mp3.size", None ), "duration": self.duration, "source": self.source, "download_url": self.download_url, "local_path": str(getattr(self, "local_path_mp3", None)), } ) try: out["absolute_url"] = self.get_absolute_url() except NoReverseMatch: raise InvalidDocumentError( "Unable to save to index due to missing absolute_url: %s" % self.pk ) text_template = loader.get_template("indexes/audio_text.txt") out["text"] = text_template.render({"item": self}).translate(null_map) return normalize_search_dicts(out)
def as_search_dict(self): """Create a dict that can be ingested by Solr""" out = { 'id': self.pk, 'fjc_id': self.fjc_id, 'cl_id': self.cl_id, 'alias_ids': [alias.pk for alias in self.aliases.all()], 'races': [r.get_race_display() for r in self.race.all()], 'gender': self.get_gender_display(), 'religion': self.religion, 'name': self.name_full, 'name_reverse': self.name_full_reverse, 'date_granularity_dob': self.date_granularity_dob, 'date_granularity_dod': self.date_granularity_dod, 'dob_city': self.dob_city, 'dob_state': self.get_dob_state_display(), 'dob_state_id': self.dob_state, 'absolute_url': self.get_absolute_url(), 'school': [e.school.name for e in self.educations.all()], 'political_affiliation': [ pa.get_political_party_display() for pa in self.political_affiliations.all() if pa ], 'political_affiliation_id': [ pa.political_party for pa in self.political_affiliations.all() if pa ], 'aba_rating': [ r.get_rating_display() for r in self.aba_ratings.all() if r ], } # Dates if self.date_dob is not None: out['dob'] = datetime.combine(self.date_dob, time()) if self.date_dod is not None: out['dod'] = datetime.combine(self.date_dod, time()) # Joined Values. Brace yourself. positions = self.positions.all() if positions.count() > 0: p_out = { 'court': [p.court.short_name for p in positions if p.court], 'court_exact': [p.court.pk for p in positions if p.court], 'position_type': [p.get_position_type_display() for p in positions], 'appointer': [p.appointer.person.name_full_reverse for p in positions if p.appointer], 'supervisor': [p.supervisor.name_full_reverse for p in positions if p.supervisor], 'predecessor': [p.predecessor.name_full_reverse for p in positions if p.predecessor], 'date_nominated': solr_list(positions, 'date_nominated'), 'date_elected': solr_list(positions, 'date_elected'), 'date_recess_appointment': solr_list( positions, 'date_recess_appointment', ), 'date_referred_to_judicial_committee': solr_list( positions, 'date_referred_to_judicial_committee', ), 'date_judicial_committee_action': solr_list( positions, 'date_judicial_committee_action', ), 'date_hearing': solr_list(positions, 'date_hearing'), 'date_confirmation': solr_list(positions, 'date_confirmation'), 'date_start': solr_list(positions, 'date_start'), 'date_granularity_start': solr_list( positions, 'date_granularity_start', ), 'date_retirement': solr_list( positions, 'date_retirement', ), 'date_termination': solr_list( positions, 'date_termination', ), 'date_granularity_termination': solr_list( positions, 'date_granularity_termination', ), 'judicial_committee_action': [ p.get_judicial_committee_action_display() for p in positions if p.judicial_committee_action ], 'nomination_process': [ p.get_nomination_process_display() for p in positions if p.nomination_process ], 'selection_method': [ p.get_how_selected_display() for p in positions if p.how_selected ], 'selection_method_id': [ p.how_selected for p in positions if p.how_selected ], 'termination_reason': [ p.get_termination_reason_display() for p in positions if p.termination_reason ], } out.update(p_out) text_template = loader.get_template('indexes/person_text.txt') out['text'] = text_template.render({'item': self}).translate(null_map) return normalize_search_dicts(out)
def as_search_dict(self): """Create a dict that can be ingested by Solr""" out = { 'id': self.pk, 'fjc_id': self.fjc_id, 'cl_id': self.cl_id, 'alias_ids': [alias.pk for alias in self.aliases.all()], 'races': [r.get_race_display() for r in self.race.all()], 'gender': self.get_gender_display(), 'religion': self.religion, 'name': self.name_full, 'name_reverse': self.name_full_reverse, 'date_granularity_dob': self.date_granularity_dob, 'date_granularity_dod': self.date_granularity_dod, 'dob_city': self.dob_city, 'dob_state': self.get_dob_state_display(), 'dob_state_id': self.dob_state, 'absolute_url': self.get_absolute_url(), 'school': [e.school.name for e in self.educations.all()], 'political_affiliation': [ pa.get_political_party_display() for pa in self.political_affiliations.all() if pa ], 'political_affiliation_id': [ pa.political_party for pa in self.political_affiliations.all() if pa ], 'aba_rating': [ r.get_rating_display() for r in self.aba_ratings.all() if r ], } # Dates if self.date_dob is not None: out['dob'] = datetime.combine(self.date_dob, time()) if self.date_dod is not None: out['dod'] = datetime.combine(self.date_dod, time()) # Joined Values. Brace yourself. positions = self.positions.all() if positions.count() > 0: p_out = { 'court': [p.court.short_name for p in positions if p.court], 'court_exact': [p.court.pk for p in positions if p.court], 'position_type': [p.get_position_type_display() for p in positions], 'appointer': [p.appointer.person.name_full_reverse for p in positions if p.appointer], 'supervisor': [p.supervisor.name_full_reverse for p in positions if p.supervisor], 'predecessor': [p.predecessor.name_full_reverse for p in positions if p.predecessor], 'date_nominated': solr_list(positions, 'date_nominated'), 'date_elected': solr_list(positions, 'date_elected'), 'date_recess_appointment': solr_list( positions, 'date_recess_appointment', ), 'date_referred_to_judicial_committee': solr_list( positions, 'date_referred_to_judicial_committee', ), 'date_judicial_committee_action': solr_list( positions, 'date_judicial_committee_action', ), 'date_hearing': solr_list(positions, 'date_hearing'), 'date_confirmation': solr_list(positions, 'date_confirmation'), 'date_start': solr_list(positions, 'date_start'), 'date_granularity_start': solr_list( positions, 'date_granularity_start', ), 'date_retirement': solr_list( positions, 'date_retirement', ), 'date_termination': solr_list( positions, 'date_termination', ), 'date_granularity_termination': solr_list( positions, 'date_granularity_termination', ), 'judicial_committee_action': [ p.get_judicial_committee_action_display() for p in positions if p.judicial_committee_action ], 'nomination_process': [ p.get_nomination_process_display() for p in positions if p.nomination_process ], 'selection_method': [ p.get_how_selected_display() for p in positions if p.how_selected ], 'selection_method_id': [ p.how_selected for p in positions if p.how_selected ], 'termination_reason': [ p.get_termination_reason_display() for p in positions if p.termination_reason ], } out.update(p_out) text_template = loader.get_template('indexes/person_text.txt') out['text'] = text_template.render({'item': self}).translate(null_map) return normalize_search_dicts(out)