def person_cosponsors(request, pk): # Load the cosponsors. from bill.models import Cosponsor person = get_object_or_404(Person, pk=pk) cosponsors = Cosponsor.objects.filter(bill__sponsor=person, withdrawn=None)\ .prefetch_related("bill", "bill__terms", "person", "person__roles") # Pre-fetch all of the top-terms. from bill.models import BillTerm top_terms = set(BillTerm.get_top_term_ids()) # Aggregate. total = 0 from collections import defaultdict ret = defaultdict(lambda: { "total": 0, "by_issue": defaultdict(lambda: 0), }) for cosp in cosponsors: total += 1 ret[cosp.person]["total"] += 1 for t in cosp.bill.terms.all(): if t.id in top_terms: ret[cosp.person]["by_issue"][t] += 1 if "first_date" not in ret[ cosp.person] or cosp.joined < ret[cosp.person]["first_date"]: ret[cosp.person]["first_date"] = cosp.joined if "last_date" not in ret[ cosp.person] or cosp.joined > ret[cosp.person]["last_date"]: ret[cosp.person]["last_date"] = cosp.joined # Sort. for info in ret.values(): info['by_issue'] = sorted(info['by_issue'].items(), key=lambda kv: kv[1], reverse=True) ret = sorted(ret.items(), key=lambda kv: (kv[1]['total'], kv[1]['last_date'], kv[0].sortname), reverse=True) # Total bills, date range. from bill.models import Bill total_bills = Bill.objects.filter(sponsor=person).count() date_range = (None, None) if len(ret) > 0: date_range = (min(r["first_date"] for p, r in ret), max(r["last_date"] for p, r in ret)) return { "person": person, "cosponsors": ret, "total": total, "total_bills": total_bills, "date_range": date_range, }
def person_cosponsors(request, pk): # Load the cosponsors. from bill.models import Cosponsor person = get_object_or_404(Person, pk=pk) cosponsors = Cosponsor.objects.filter(bill__sponsor=person, withdrawn=None)\ .prefetch_related("bill", "bill__terms", "person", "person__roles") # Pre-fetch all of the top-terms. from bill.models import BillTerm top_terms = set(BillTerm.get_top_term_ids()) # Aggregate. total = 0 from collections import defaultdict ret = defaultdict(lambda : { "total": 0, "by_issue": defaultdict(lambda : 0), }) for cosp in cosponsors: total += 1 ret[cosp.person]["total"] += 1 for t in cosp.bill.terms.all(): if t.id in top_terms: ret[cosp.person]["by_issue"][t] += 1 if "first_date" not in ret[cosp.person] or cosp.joined < ret[cosp.person]["first_date"]: ret[cosp.person]["first_date"] = cosp.joined if "last_date" not in ret[cosp.person] or cosp.joined > ret[cosp.person]["last_date"]: ret[cosp.person]["last_date"] = cosp.joined # Sort. for info in ret.values(): info['by_issue'] = sorted(info['by_issue'].items(), key = lambda kv : kv[1], reverse=True) ret = sorted(ret.items(), key = lambda kv : (kv[1]['total'], kv[1]['last_date'], kv[0].sortname), reverse=True) # Total bills, date range. from bill.models import Bill total_bills = Bill.objects.filter(sponsor=person).count() date_range = (None, None) if len(ret) > 0: date_range = (min(r["first_date"] for p, r in ret), max(r["last_date"] for p, r in ret)) return { "person": person, "cosponsors": ret, "total": total, "total_bills": total_bills, "date_range": date_range, }
def build_info(): global pronunciation_guide if re.match(r"\d", pk): person = get_object_or_404(Person, pk=pk) else: # support bioguide IDs for me person = get_object_or_404(Person, bioguideid=pk) # current role role = person.get_current_role() if role: active_role = True else: active_role = False try: role = person.roles.order_by('-enddate')[0] except IndexError: role = None # photo photo_url, photo_credit = person.get_photo() # analysis analysis_data = analysis.load_data(person) try: # Get session stats for the previous year. has_session_stats = person.get_session_stats( str(datetime.now().year - 1)) except: # Not everyone has current stats, obviously. They may have stats # corresponding to their most recent role. Since stats are a # session behind, even-year stats might not correspond to # a legislator's most recent role, which is why I hard-coded # the current session stats above. has_session_stats = False if role: try: has_session_stats = role.get_most_recent_session_stats() except: pass links = [] if role.current: if role.website: links.append(("%s's Official Website" % person.lastname, role.website, "fa fa-external-link")) if person.twitterid: links.append(("@" + person.twitterid, "http://twitter.com/" + person.twitterid, "fa fa-twitter")) if person.osid: links.append( ("OpenSecrets", "http://www.opensecrets.org/politicians/summary.php?cid=" + person.osid, "fa fa-money")) if person.pvsid: links.append( ("VoteSmart", "http://votesmart.org/candidate/" + person.pvsid, "fa fa-th-list")) if person.bioguideid: links.append( ("Bioguide", "http://bioguide.congress.gov/scripts/biodisplay.pl?index=" + person.bioguideid, "fa fa-user")) if person.cspanid: links.append(("C-SPAN", "http://www.c-spanvideo.org/person/" + str(person.cspanid), "fa fa-youtube-play")) # Get a break down of the top terms this person's sponsored bills fall into, # looking only at the most recent five years of bills. from bill.models import BillTerm most_recent_bill = person.sponsored_bills.order_by( "-introduced_date").first() bills_by_subject_counts = list(person.sponsored_bills.filter( terms__id__in=BillTerm.get_top_term_ids(), introduced_date__gt=(most_recent_bill.introduced_date if most_recent_bill else datetime.now())-timedelta(days=5*365.25))\ .values("terms")\ .annotate(count=Count('id')).order_by('-count')\ .filter(count__gt=1)\ [0:8]) terms = BillTerm.objects.in_bulk(item["terms"] for item in bills_by_subject_counts) total_count = sum(item["count"] for item in bills_by_subject_counts) while len(bills_by_subject_counts) > 2 and bills_by_subject_counts[-1][ "count"] < bills_by_subject_counts[0]["count"] / 8: bills_by_subject_counts.pop(-1) for item in bills_by_subject_counts: item["term"] = terms[item["terms"]] item["pct"] = int(round(float(item["count"]) / total_count * 100)) del item["terms"] # Missed vote explanations from ProPublica try: vote_explanations = http_rest_json( "https://projects.propublica.org/explanations/api/members/%s.json" % person.bioguideid) except: # squash all errors vote_explanations = {} # Misconduct - load and filter this person's entries, keeping original order. # Choose 'Alleged misconduct', 'Misconduct', 'Misconduct/alleged misconduct' as appropriate. from website.views import load_misconduct_data misconduct = [ m for m in load_misconduct_data() if m["person"] == person ] misconduct_any_alleged = (len([m for m in misconduct if m["alleged"]]) > 0) misconduct_any_not_alleged = (len( [m for m in misconduct if not m["alleged"]]) > 0) # Load pronunciation from guide. Turn into a mapping from GovTrack IDs to data. if pronunciation_guide is None: import rtyaml if not hasattr(settings, 'PRONUNCIATION_DATABASE_PATH'): # debugging pronunciation_guide = {} else: pronunciation_guide = { p["id"]["govtrack"]: p for p in rtyaml.load( open(settings.PRONUNCIATION_DATABASE_PATH)) } # Get this person's entry. pronunciation = pronunciation_guide.get(person.id) # TODO: Validate that the 'name' in the guide matches the name we're actually displaying. if pronunciation and not pronunciation.get("key"): # Show a key to the letters used in the pronunciation guide. Break apart the name # into words which we'll show in columns. pronunciation["key"] = [] for namepart in pronunciation["respell"].split(" // "): for nameword in namepart.split(" "): # Parse out the symbols actually used in the guide. Sweep from left to right chopping # off valid respelling letter combinations, chopping off the longest one where possible. pronunciation["key"].append([]) i = 0 while i < len(nameword): for s in sorted(pronunciation_guide_key, key=lambda s: -len(s)): if nameword[i:i + len(s)] in (s, s.upper()): pronunciation["key"][-1].append( (nameword[i:i + len(s)], pronunciation_guide_key[s])) i += len(s) break else: # respelling did not match any valid symbol, should be an error but we don't # want to issue an Oops! for this break # Get their enacted bills. enacted_bills_src_qs = person.sponsored_bills.exclude( original_intent_replaced=True).order_by('-current_status_date') return { 'person': person, 'role': role, 'active_role': active_role, 'active_congressional_role': active_role and role.role_type in (RoleType.senator, RoleType.representative), 'pronunciation': pronunciation, 'photo': photo_url, 'photo_credit': photo_credit, 'links': links, 'analysis_data': analysis_data, 'enacted_bills': [ b for b in enacted_bills_src_qs if b.was_enacted_ex( cache_related_bills_qs=enacted_bills_src_qs) ], 'recent_bills': person.sponsored_bills.all().order_by('-introduced_date')[0:7], 'committeeassignments': get_committee_assignments(person), 'feed': person.get_feed(), 'has_session_stats': has_session_stats, 'bill_subject_areas': bills_by_subject_counts, 'vote_explanations': vote_explanations, 'key_votes': load_key_votes(person), 'misconduct': misconduct, 'misconduct_any_alleged': misconduct_any_alleged, 'misconduct_any_not_alleged': misconduct_any_not_alleged, }
def build_info(): if re.match(r"\d", pk): person = get_object_or_404(Person, pk=pk) else: # support bioguide IDs for me person = get_object_or_404(Person, bioguideid=pk) # current role role = person.get_current_role() if role: active_role = True else: active_role = False try: role = person.roles.order_by('-enddate')[0] except IndexError: role = None # photo photo_url, photo_credit = person.get_photo() # analysis analysis_data = analysis.load_data(person) try: has_session_stats = person.get_session_stats('2016') except: # Not everyone has current stats, obviously. They may have stats # corresponding to their most recent role. Since stats are a # session behind, even-year stats might not correspond to # a legislator's most recent role, which is why I hard-coded # the current session stats above. has_session_stats = False if role: try: has_session_stats = role.get_most_recent_session_stats() except: pass links = [] if role.current: if role.website: links.append(("%s's Official Website" % person.lastname, role.website, "fa fa-external-link")) if person.twitterid: links.append(("@" + person.twitterid, "http://twitter.com/" + person.twitterid, "fa fa-twitter")) if person.osid: links.append(("OpenSecrets", "http://www.opensecrets.org/politicians/summary.php?cid=" + person.osid, "fa fa-money")) if person.pvsid: links.append(("VoteSmart", "http://votesmart.org/candidate/" + person.pvsid, "fa fa-th-list")) if person.bioguideid: links.append(("Bioguide", "http://bioguide.congress.gov/scripts/biodisplay.pl?index=" + person.bioguideid, "fa fa-user")) if person.cspanid: links.append(("C-SPAN", "http://www.c-spanvideo.org/person/" + str(person.cspanid), "fa fa-youtube-play")) # Get a break down of the top terms this person's sponsored bills fall into, # looking only at the most recent five years of bills. from bill.models import BillTerm from datetime import datetime, timedelta most_recent_bill = person.sponsored_bills.order_by("-introduced_date").first() bills_by_subject_counts = list(person.sponsored_bills.filter( terms__id__in=BillTerm.get_top_term_ids(), introduced_date__gt=(most_recent_bill.introduced_date if most_recent_bill else datetime.now())-timedelta(days=5*365.25))\ .values("terms")\ .annotate(count=Count('id')).order_by('-count')\ .filter(count__gt=1)\ [0:8]) terms = BillTerm.objects.in_bulk(item["terms"] for item in bills_by_subject_counts) total_count = sum(item["count"] for item in bills_by_subject_counts) while len(bills_by_subject_counts) > 2 and bills_by_subject_counts[-1]["count"] < bills_by_subject_counts[0]["count"]/8: bills_by_subject_counts.pop(-1) for item in bills_by_subject_counts: item["term"] = terms[item["terms"]] item["pct"] = int(round(float(item["count"]) / total_count * 100)) del item["terms"] # Missed vote explanations from ProPublica try: vote_explanations = http_rest_json("https://projects.propublica.org/explanations/api/members/%s.json" % person.bioguideid) except: # squash all errors vote_explanations = { } return {'person': person, 'role': role, 'active_role': active_role, 'active_congressional_role': active_role and role.role_type in (RoleType.senator, RoleType.representative), 'photo': photo_url, 'photo_credit': photo_credit, 'links': links, 'analysis_data': analysis_data, 'enacted_bills': [b for b in person.sponsored_bills.order_by('-current_status_date') if b.was_enacted_ex()], 'recent_bills': person.sponsored_bills.all().order_by('-introduced_date')[0:7], 'committeeassignments': get_committee_assignments(person), 'feed': person.get_feed(), 'has_session_stats': has_session_stats, 'bill_subject_areas': bills_by_subject_counts, 'vote_explanations': vote_explanations, 'key_votes': load_key_votes(person), }
def build_info(): if re.match(r"\d", pk): person = get_object_or_404(Person, pk=pk) else: # support bioguide IDs for me person = get_object_or_404(Person, bioguideid=pk) # current role role = person.get_current_role() if role: active_role = True else: active_role = False try: role = person.roles.order_by('-enddate')[0] except IndexError: role = None # photo photo_url, photo_credit = person.get_photo() # analysis analysis_data = analysis.load_data(person) try: has_session_stats = person.get_session_stats('2015') except: # Not everyone has 2014 stats, obviously. They may have stats # corresponding to their most recent role. Since stats are a # session behind, even-year stats might not correspond to # a legislator's most recent role, which is why I hard-coded # the current session stats above. has_session_stats = False if role: try: has_session_stats = role.get_most_recent_session_stats() except: pass links = [] if role.current: if role.website: links.append(("%s's Official Website" % person.lastname, role.website, "fa fa-external-link")) if person.twitterid: links.append(("@" + person.twitterid, "http://twitter.com/" + person.twitterid, "fa fa-twitter")) if person.osid: links.append(("OpenSecrets", "http://www.opensecrets.org/politicians/summary.php?cid=" + person.osid, "fa fa-money")) if person.pvsid: links.append(("VoteSmart", "http://votesmart.org/candidate/" + person.pvsid, "fa fa-th-list")) if person.bioguideid: links.append(("Bioguide", "http://bioguide.congress.gov/scripts/biodisplay.pl?index=" + person.bioguideid, "fa fa-user")) if person.cspanid: links.append(("C-SPAN", "http://www.c-spanvideo.org/person/" + str(person.cspanid), "fa fa-youtube-play")) # Get a break down of the top terms this person's sponsored bills fall into, # looking only at the most recent five years of bills. from bill.models import BillTerm from datetime import datetime, timedelta most_recent_bill = person.sponsored_bills.order_by("-introduced_date").first() bills_by_subject_counts = list(person.sponsored_bills.filter( terms__id__in=BillTerm.get_top_term_ids(), introduced_date__gt=(most_recent_bill.introduced_date if most_recent_bill else datetime.now())-timedelta(days=5*365.25))\ .values("terms")\ .annotate(count=Count('id')).order_by('-count')\ .filter(count__gt=1)\ [0:8]) terms = BillTerm.objects.in_bulk(item["terms"] for item in bills_by_subject_counts) total_count = sum(item["count"] for item in bills_by_subject_counts) while len(bills_by_subject_counts) > 2 and bills_by_subject_counts[-1]["count"] < bills_by_subject_counts[0]["count"]/8: bills_by_subject_counts.pop(-1) for item in bills_by_subject_counts: item["term"] = terms[item["terms"]] item["pct"] = int(round(float(item["count"]) / total_count * 100)) del item["terms"] # Missed vote explanations from ProPublica try: vote_explanations = http_rest_json("https://projects.propublica.org/explanations/api/members/%s.json" % person.bioguideid) except: # squash all errors vote_explanations = { } return {'person': person, 'role': role, 'active_role': active_role, 'active_congressional_role': active_role and role.role_type in (RoleType.senator, RoleType.representative), 'photo': photo_url, 'photo_credit': photo_credit, 'links': links, 'analysis_data': analysis_data, 'recent_bills': person.sponsored_bills.all().order_by('-introduced_date')[0:7], 'committeeassignments': get_committee_assignments(person), 'feed': person.get_feed(), 'cities': get_district_cities("%s-%02d" % (role.state.lower(), role.district)) if role and role.district else None, 'has_session_stats': has_session_stats, 'bill_subject_areas': bills_by_subject_counts, 'vote_explanations': vote_explanations, }
def build_info(): global pronunciation_guide if re.match(r"\d", pk): person = get_object_or_404(Person, pk=pk) else: # support bioguide IDs for me person = get_object_or_404(Person, bioguideid=pk) # current role role = person.get_current_role() if role: active_role = True else: active_role = False try: role = person.roles.order_by('-enddate')[0] except IndexError: role = None # photo photo_url, photo_credit = person.get_photo() # analysis analysis_data = analysis.load_data(person) try: # Get session stats for the previous year. has_session_stats = person.get_session_stats(str(datetime.now().year-1)) except: # Not everyone has current stats, obviously. They may have stats # corresponding to their most recent role. Since stats are a # session behind, even-year stats might not correspond to # a legislator's most recent role, which is why I hard-coded # the current session stats above. has_session_stats = False if role: try: has_session_stats = role.get_most_recent_session_stats() except: pass links = [] if role.current: if role.website: links.append(("%s's Official Website" % person.lastname, role.website, "fa fa-external-link")) if person.twitterid: links.append(("@" + person.twitterid, "http://twitter.com/" + person.twitterid, "fa fa-twitter")) if person.osid: links.append(("OpenSecrets", "http://www.opensecrets.org/politicians/summary.php?cid=" + person.osid, "fa fa-money")) if person.pvsid: links.append(("VoteSmart", "http://votesmart.org/candidate/" + person.pvsid, "fa fa-th-list")) if person.bioguideid: links.append(("Bioguide", "http://bioguide.congress.gov/scripts/biodisplay.pl?index=" + person.bioguideid, "fa fa-user")) if person.cspanid: links.append(("C-SPAN", "http://www.c-spanvideo.org/person/" + str(person.cspanid), "fa fa-youtube-play")) # Get a break down of the top terms this person's sponsored bills fall into, # looking only at the most recent five years of bills. from bill.models import BillTerm most_recent_bill = person.sponsored_bills.order_by("-introduced_date").first() bills_by_subject_counts = list(person.sponsored_bills.filter( terms__id__in=BillTerm.get_top_term_ids(), introduced_date__gt=(most_recent_bill.introduced_date if most_recent_bill else datetime.now())-timedelta(days=5*365.25))\ .values("terms")\ .annotate(count=Count('id')).order_by('-count')\ .filter(count__gt=1)\ [0:8]) terms = BillTerm.objects.in_bulk(item["terms"] for item in bills_by_subject_counts) total_count = sum(item["count"] for item in bills_by_subject_counts) while len(bills_by_subject_counts) > 2 and bills_by_subject_counts[-1]["count"] < bills_by_subject_counts[0]["count"]/8: bills_by_subject_counts.pop(-1) for item in bills_by_subject_counts: item["term"] = terms[item["terms"]] item["pct"] = int(round(float(item["count"]) / total_count * 100)) del item["terms"] # Missed vote explanations from ProPublica try: vote_explanations = http_rest_json("https://projects.propublica.org/explanations/api/members/%s.json" % person.bioguideid) except: # squash all errors vote_explanations = { } # Misconduct - load and filter this person's entries, keeping original order. # Choose 'Alleged misconduct', 'Misconduct', 'Misconduct/alleged misconduct' as appropriate. from website.views import load_misconduct_data misconduct = [m for m in load_misconduct_data() if m["person"] == person ] misconduct_any_alleged = (len([ m for m in misconduct if m["alleged"] ]) > 0) misconduct_any_not_alleged = (len([ m for m in misconduct if not m["alleged"] ]) > 0) # Load pronunciation from guide. Turn into a mapping from GovTrack IDs to data. if pronunciation_guide is None: import rtyaml if not hasattr(settings, 'PRONUNCIATION_DATABASE_PATH'): # debugging pronunciation_guide = { } else: pronunciation_guide = { p["id"]["govtrack"]: p for p in rtyaml.load(open(settings.PRONUNCIATION_DATABASE_PATH)) } # Get this person's entry. pronunciation = pronunciation_guide.get(person.id) # TODO: Validate that the 'name' in the guide matches the name we're actually displaying. if pronunciation and not pronunciation.get("key"): # Show a key to the letters used in the pronunciation guide. Break apart the name # into words which we'll show in columns. pronunciation["key"] = [] for namepart in pronunciation["respell"].split(" // "): for nameword in namepart.split(" "): # Parse out the symbols actually used in the guide. Sweep from left to right chopping # off valid respelling letter combinations, chopping off the longest one where possible. pronunciation["key"].append([]) i = 0 while i < len(nameword): for s in sorted(pronunciation_guide_key, key = lambda s : -len(s)): if nameword[i:i+len(s)] in (s, s.upper()): pronunciation["key"][-1].append( (nameword[i:i+len(s)], pronunciation_guide_key[s]) ) i += len(s) break else: # respelling did not match any valid symbol, should be an error but we don't # want to issue an Oops! for this break # Get their enacted bills. enacted_bills_src_qs = person.sponsored_bills.exclude(original_intent_replaced=True).order_by('-current_status_date') return {'person': person, 'role': role, 'active_role': active_role, 'active_congressional_role': active_role and role.role_type in (RoleType.senator, RoleType.representative), 'pronunciation': pronunciation, 'photo': photo_url, 'photo_credit': photo_credit, 'links': links, 'analysis_data': analysis_data, 'enacted_bills': [b for b in enacted_bills_src_qs if b.was_enacted_ex(cache_related_bills_qs=enacted_bills_src_qs)], 'recent_bills': person.sponsored_bills.all().order_by('-introduced_date')[0:7], 'committeeassignments': get_committee_assignments(person), 'feed': person.get_feed(), 'has_session_stats': has_session_stats, 'bill_subject_areas': bills_by_subject_counts, 'vote_explanations': vote_explanations, 'key_votes': load_key_votes(person), 'misconduct': misconduct, 'misconduct_any_alleged': misconduct_any_alleged, 'misconduct_any_not_alleged': misconduct_any_not_alleged, }