コード例 #1
0
    def handle(self, *args, **options):
        today = date(2014,11,1)
        last_week = get_week_number(today) - 1
        first_week = get_week_number(data_start)
        
        
        outf = "%s/%s" % (CHART_CSV_DOWNLOAD_DIR, CSV_FILE_NAME)
        print "writing to %s" % outf
        outfile = open(outf, 'w')
        field_names = make_header(first_week, last_week)
        outfile.write(",".join(field_names) +"\n")
        dw = csv.writer(outfile)

        if write_cumulative:

            outcumf = "%s/%s" % (CHART_CSV_DOWNLOAD_DIR, CSV_FILE_NAME_CUMULATIVE) 
            print "writing cumulative numbers to %s" % outcumf
            outcumfile = open(outcumf, 'w')
            field_names = make_header(first_week, last_week)
            outcumfile.write(",".join(field_names) +"\n")
            cumdw = csv.writer(outcumfile)


        
        for data in data_series:
            this_row = [data['data_id'], data['data_series_name']]
            this_cum_row = [data['data_id'], data['data_series_name']]
            for week in range(first_week, last_week+1):
                print "handling %s week %s" % (data['data_series_name'], week)
                this_row.append(summarize_week_queryset(week, data['q']))
                if write_cumulative:
                    this_cum_row.append(summarize_week_queryset_cumulative(week, data['q']))
            dw.writerow(this_row)
            if write_cumulative:
                cumdw.writerow(this_cum_row)
コード例 #2
0
    def handle(self, *args, **options):
        # stop after the election
        today = date(2014,11,1)
        last_week = get_week_number(today)
        first_week = get_week_number(data_start)
        
        outf = "%s/%s" % (CHART_CSV_DOWNLOAD_DIR, CSV_FILE_NAME)
        outfile = open(outf, 'w')
        field_names = make_header(first_week, last_week)
        outfile.write(",".join(field_names) +"\n")

        dw = csv.writer(outfile)
        
        
        summaries = DistrictWeekly.objects.filter(cycle_week_number__gte=first_week, cycle_week_number__lte=last_week, district__pk__in=district_list).select_related('District')
        summary_hash = {}
        for s in summaries:
            summary_hash["%s-%s" % (s.district.pk, s.cycle_week_number)] = s.outside_spending
        
        # regroup by week
        for i in senate_districts:
            row = [i['state'], i['id']]
            for week in range(first_week, last_week+1):
                key = "%s-%s" % (i['id'], week)
                try:
                    row.append(summary_hash[key])
                except KeyError:
                    row.append(0)
            dw.writerow(row)
コード例 #3
0
    def handle(self, *args, **options):
        cursor = connection.cursor()
        today = date.today()
        last_week = get_week_number(today) - 1
        first_week = get_week_number(data_start)

        outf = "%s/%s" % (CHART_CSV_DOWNLOAD_DIR, CSV_FILE_NAME)
        print "writing to %s" % outf
        outfile = open(outf, 'w')
        field_names = make_header(first_week, last_week)
        outfile.write(",".join(field_names) + "\n")
        dw = csv.writer(outfile)

        if write_cumulative:

            outcumf = "%s/%s" % (CHART_CSV_DOWNLOAD_DIR,
                                 CSV_FILE_NAME_CUMULATIVE)
            print "writing cumulative numbers to %s" % outcumf
            outcumfile = open(outcumf, 'w')
            cum_field_names = make_header(1, last_week)
            outcumfile.write(",".join(cum_field_names) + "\n")
            cumdw = csv.writer(outcumfile)

        for data in data_series:
            this_row = [data['data_id'], data['data_series_name']]
            this_cum_row = [data['data_id'], data['data_series_name']]
            for week in range(first_week, last_week + 1):
                print "handling %s week %s" % (data['data_series_name'], week)
                this_row.append(summarize_week(week, data['q'], cursor))
                if write_cumulative:
                    this_cum_row.append(
                        summarize_week_cumulative(week, data['q'], cursor))
            dw.writerow(this_row)
            if write_cumulative:
                cumdw.writerow(this_cum_row)
コード例 #4
0
    def handle(self, *args, **options):
        # stop after the election
        today = date(2014, 11, 1)
        last_week = get_week_number(today)
        first_week = get_week_number(data_start)

        outf = "%s/%s" % (CHART_CSV_DOWNLOAD_DIR, CSV_FILE_NAME)
        outfile = open(outf, 'w')
        field_names = make_header(first_week, last_week)
        outfile.write(",".join(field_names) + "\n")

        dw = csv.writer(outfile)

        summaries = DistrictWeekly.objects.filter(
            cycle_week_number__gte=first_week,
            cycle_week_number__lte=last_week,
            district__pk__in=district_list).select_related('District')
        summary_hash = {}
        for s in summaries:
            summary_hash["%s-%s" % (s.district.pk,
                                    s.cycle_week_number)] = s.outside_spending

        # regroup by week
        for i in senate_districts:
            row = [i['state'], i['id']]
            for week in range(first_week, last_week + 1):
                key = "%s-%s" % (i['id'], week)
                try:
                    row.append(summary_hash[key])
                except KeyError:
                    row.append(0)
            dw.writerow(row)
コード例 #5
0
ファイル: filters.py プロジェクト: dmc2015/read_FEC
def weekFilter(queryset, querydict):
    try:
        week = querydict["week"]

        # if there's no commas, it's just a single district
        if week.upper() == "NOW":
            queryset = queryset.filter(cycle_week_number=get_week_number(date.today()))
        if week.upper() == "LAST":
            queryset = queryset.filter(cycle_week_number=get_week_number(date.today()) - 1)

    except KeyError:
        pass

    return queryset
コード例 #6
0
ファイル: filters.py プロジェクト: hpetru/read_FEC
def weekFilter(queryset, querydict):
    try:
        week = querydict['week']

        # if there's no commas, it's just a single district
        if week.upper() == "NOW":
            queryset = queryset.filter(
                cycle_week_number=get_week_number(date.today()))
        if week.upper() == "LAST":
            queryset = queryset.filter(
                cycle_week_number=get_week_number(date.today()) - 1)

    except KeyError:
        pass

    return queryset
コード例 #7
0
ファイル: views.py プロジェクト: dmc2015/read_FEC
def top_current_races(request): 
    week_number = get_week_number(datetime.date.today()) - 1
    week_start = get_week_start(int(week_number))
    week_start_formatted = week_start.strftime('%m/%d')
    week_end = get_week_end(int(week_number))
    week_end_formatted = week_end.strftime('%m/%d, %Y')
    previous_week_number = int(week_number) - 1
    following_week_number = int(week_number) + 1
    period_start = week_start - datetime.timedelta(days=14)

    weeklysummaries = DistrictWeekly.objects.filter(cycle_week_number=week_number, outside_spending__gt=1000).order_by('-outside_spending')[:3]
    title = "Top races by outside spending, %s-%s" % (week_start_formatted, week_end_formatted)
    
    district_ids = weeklysummaries.values("district__pk")
    district_id_list = [str(x['district__pk']) for x in district_ids]
    district_list = ",".join(district_id_list)
    data_url = "http://realtime.influenceexplorer.com/api/districts-weekly/?week_start=%s&week_end=%s&districts=%s&format=json" % (int(week_number)-2, week_number, district_list)
    
    return render_to_response('datapages/top_races.html',
        {
        'previous_week_number':previous_week_number,
        'following_week_number':following_week_number,
        'title':title,
        'period_start': period_start,
        'week_start':week_start,
        'week_end':week_end,
        'weeklysummaries':weeklysummaries,
        'week_number':week_number,
        'data_url':data_url,
        }, 
        context_instance=RequestContext(request)
    )
コード例 #8
0
def top_current_races(request):
    week_number = get_week_number(datetime.date.today()) - 1
    week_start = get_week_start(int(week_number))
    week_start_formatted = week_start.strftime('%m/%d')
    week_end = get_week_end(int(week_number))
    week_end_formatted = week_end.strftime('%m/%d, %Y')
    previous_week_number = int(week_number) - 1
    following_week_number = int(week_number) + 1
    period_start = week_start - datetime.timedelta(days=14)

    weeklysummaries = DistrictWeekly.objects.filter(
        cycle_week_number=week_number,
        outside_spending__gt=1000).order_by('-outside_spending')[:3]
    title = "Top races by outside spending, %s-%s" % (week_start_formatted,
                                                      week_end_formatted)

    district_ids = weeklysummaries.values("district__pk")
    district_id_list = [str(x['district__pk']) for x in district_ids]
    district_list = ",".join(district_id_list)
    data_url = "http://realtime.influenceexplorer.com/api/districts-weekly/?week_start=%s&week_end=%s&districts=%s&format=json" % (
        int(week_number) - 2, week_number, district_list)

    return render_to_response('datapages/top_races.html', {
        'previous_week_number': previous_week_number,
        'following_week_number': following_week_number,
        'title': title,
        'period_start': period_start,
        'week_start': week_start,
        'week_end': week_end,
        'weeklysummaries': weeklysummaries,
        'week_number': week_number,
        'data_url': data_url,
    },
                              context_instance=RequestContext(request))
コード例 #9
0
 def handle(self, *args, **options):
     current_week_number = get_week_number(date.today())
     week_list = [current_week_number]
     if options['run_all']:
         week_list = range(1,current_week_number+1)
     
     for week_number in week_list:
         print "Summarizing week %s" % (week_number)
         summarize_week(week_number)