def month_detail(request, year, month): year_month = '%s%s' % (year, month) month_name = MONTH_NAMES[int(month) - 1] ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n - 1]] = capitolwords.top_phrases( entity_type='month', entity_value=year_month, n=n, per_page=30) ngrams = ngrams.iteritems() dates = capitolwords._dates_in_month(year=year, month=month) def date_str_to_date(date_str): year, month, day = map(int, date_str.split('-')) return datetime.date(year, month, day) dates_by_week = [ (weeknum, list(dates)) for weeknum, dates in itertools.groupby( dates, lambda x: date_str_to_date(x['date']).strftime('%U')) ] return render_to_response('cwod/month_detail.html', { 'month_name': month_name, 'year': year, 'ngrams': ngrams, 'dates_by_week': dates_by_week, }, context_instance=RequestContext(request))
def month_detail(request, year, month): year_month = '%s%s' % (year, month) month_name = MONTH_NAMES[int(month)-1] ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n-1]] = capitolwords.top_phrases( entity_type='month', entity_value=year_month, n=n, per_page=30 ) ngrams = ngrams.iteritems() dates = capitolwords._dates_in_month(year=year, month=month) def date_str_to_date(date_str): year, month, day = map(int, date_str.split('-')) return datetime.date(year, month, day) dates_by_week = [(weeknum, list(dates)) for weeknum, dates in itertools.groupby(dates, lambda x: date_str_to_date(x['date']).strftime('%U'))] return render_to_response('cwod/month_detail.html', {'month_name': month_name, 'year': year, 'ngrams': ngrams, 'dates_by_week': dates_by_week, }, context_instance=RequestContext(request))
def date_detail(request, year, month, day): kwargs = {} if request.GET.get('bioguide_id'): kwargs['bioguide_id'] = request.GET['bioguide_id'] date = datetime.date(year=int(year), month=int(month), day=int(day)) entries = entries_for_date(date, **kwargs) print entries if not entries: raise Http404 ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n - 1]] = capitolwords.top_phrases( entity_type='date', entity_value=date, n=n, per_page=30, ) ngrams = ngrams.iteritems() by_chamber = { 'House': [], 'Senate': [], 'Extensions of Remarks': [], } similar_dates = get_similar_entities('date', date) return render_to_response('cwod/date_detail.html', { 'date': date, 'ngrams': ngrams, 'entries': entries, 'similar_dates': similar_dates, }, context_instance=RequestContext(request))
def date_detail(request, year, month, day): kwargs = {} if request.GET.get('bioguide_id'): kwargs['bioguide_id'] = request.GET['bioguide_id'] date = datetime.date(year=int(year), month=int(month), day=int(day)) entries = entries_for_date(date, **kwargs) print entries if not entries: raise Http404 ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n-1]] = capitolwords.top_phrases( entity_type='date', entity_value=date, n=n, per_page=30, ) ngrams = ngrams.iteritems() by_chamber = {'House': [], 'Senate': [], 'Extensions of Remarks': [], } similar_dates = get_similar_entities('date', date) return render_to_response('cwod/date_detail.html', {'date': date, 'ngrams': ngrams, 'entries': entries, 'similar_dates': similar_dates, }, context_instance=RequestContext(request))
def legislator_detail(request, bioguide_id, slug=None): legislator = legislator_lookup(bioguide_id) if not legislator: raise Http404 if legislator['slug'] != slug: return HttpResponsePermanentRedirect(reverse('cwod_legislator_detail', kwargs={'bioguide_id':bioguide_id, 'slug':legislator['slug']})) similar_legislators = [] for i in get_similar_entities('bioguide', bioguide_id)[:10]: i['legislator'] = legislator_lookup(i['bioguide']) similar_legislators.append(i) ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n-1]] = capitolwords.top_phrases( entity_type='legislator', entity_value=bioguide_id, n=n, per_page=30 ) ngrams = ngrams.iteritems() entries = capitolwords.text(bioguide_id=bioguide_id, sort='date desc', per_page=5) return render_to_response('cwod/legislator_detail.html', {'legislator': legislator, 'current_congress': get_current_congress(), 'similar_legislators': similar_legislators, 'entries': entries, 'ngrams': ngrams, }, context_instance=RequestContext(request))
def state_detail(request, state): state_name = dict(STATE_CHOICES + TERRITORY_CHOICES).get(state) if not state_name: raise Http404 entries = capitolwords.text(state='"%s"' % state, sort='date desc,score desc', per_page=5) ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n - 1]] = capitolwords.top_phrases( entity_type='state', entity_value=state, n=n, per_page=30) ngrams = ngrams.iteritems() similar_states = get_similar_entities('state', state) # legislators = LegislatorRole.objects.filter(state=state, end_date__gte=datetime.date.today()) legislators = capitolwords.legislators(state=state, congress=get_current_congress()) def sort_districts(x, y): try: x_district = int(x['district']) except ValueError: x_district = 0 try: y_district = int(y['district']) except ValueError: y_district = 0 return cmp(x_district, y_district) legislators = sorted(legislators, sort_districts) bodies = { 'House': [], 'Senate': [], } for legislator in legislators: if legislator['chamber'] == 'Senate': bodies['Senate'].append(legislator) else: bodies['House'].append(legislator) bodies = sorted(bodies.items(), key=itemgetter(0), reverse=True) return render_to_response( 'cwod/state_detail.html', { 'state': state, 'state_name': state_name, 'entries': entries, 'ngrams': ngrams, #'other_states': other_states, 'similar_states': similar_states, 'bodies': bodies, }, context_instance=RequestContext(request))
def state_detail(request, state): state_name = dict(STATE_CHOICES + TERRITORY_CHOICES).get(state) if not state_name: raise Http404 entries = capitolwords.text(state='"%s"' % state, sort='date desc,score desc', per_page=5) ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n-1]] = capitolwords.top_phrases( entity_type='state', entity_value=state, n=n, per_page=30 ) ngrams = ngrams.iteritems() similar_states = get_similar_entities('state', state) # legislators = LegislatorRole.objects.filter(state=state, end_date__gte=datetime.date.today()) legislators = capitolwords.legislators(state=state, congress=get_current_congress()) def sort_districts(x, y): try: x_district = int(x['district']) except ValueError: x_district = 0 try: y_district = int(y['district']) except ValueError: y_district = 0 return cmp(x_district, y_district) legislators = sorted(legislators, sort_districts) bodies = {'House': [], 'Senate': [], } for legislator in legislators: if legislator['chamber'] == 'Senate': bodies['Senate'].append(legislator) else: bodies['House'].append(legislator) bodies = sorted(bodies.items(), key=itemgetter(0), reverse=True) return render_to_response('cwod/state_detail.html', {'state': state, 'state_name': state_name, 'entries': entries, 'ngrams': ngrams, #'other_states': other_states, 'similar_states': similar_states, 'bodies': bodies, }, context_instance=RequestContext(request))
def state_list(request): states = [] for abbrev, statename in (US_STATES + US_TERRITORIES): states.append((abbrev, statename, capitolwords.top_phrases(entity_type='state', entity_value=abbrev, n=1, per_page=5))) state_chunks = chunks(states, (len(states) + 1) / 3) return render_to_response('cwod/state_list.html', { 'state_chunks': state_chunks, }, context_instance=RequestContext(request))
def state_list(request): states = [] for abbrev, statename in (US_STATES + US_TERRITORIES): states.append((abbrev, statename, capitolwords.top_phrases( entity_type='state', entity_value=abbrev, n=1, per_page=5) )) state_chunks = chunks(states, (len(states)+1)/3) return render_to_response('cwod/state_list.html', {'state_chunks': state_chunks, }, context_instance=RequestContext(request))
def legislator_detail(request, bioguide_id, slug=None): legislator = legislator_lookup(bioguide_id) if not legislator: raise Http404 if legislator['slug'] != slug: return HttpResponsePermanentRedirect( reverse('cwod_legislator_detail', kwargs={ 'bioguide_id': bioguide_id, 'slug': legislator['slug'] })) similar_legislators = [] for i in get_similar_entities('bioguide', bioguide_id)[:10]: i['legislator'] = legislator_lookup(i['bioguide']) similar_legislators.append(i) ngrams = {} for n in range(1, 6): ngrams[GRAM_NAMES[n - 1]] = capitolwords.top_phrases( entity_type='legislator', entity_value=bioguide_id, n=n, per_page=30) ngrams = ngrams.iteritems() entries = capitolwords.text(bioguide_id=bioguide_id, sort='date desc', per_page=5) return render_to_response('cwod/legislator_detail.html', { 'legislator': legislator, 'current_congress': get_current_congress(), 'similar_legislators': similar_legislators, 'entries': entries, 'ngrams': ngrams, }, context_instance=RequestContext(request))