Example #1
0
    def get_context_data(self, **kwargs):
        # Quick notation to access all complaints
        fire_alarms = FireAlarm.objects.all()

        total_count = fire_alarms.count()
        region_names = ["batemans bay", "bourke", "broken hill", "byron bay", "canberra", \
             "cobar", "cooma", "deniliquin", "dubbo", "grafton", "gunnedah", \
             "katoomba", "kempsey", "kiama", "moree", "newcastle", "sydney", "tamworth", "wagga wagga"]
        regions = {}

        # Iterate over each name in our region_names list
        for reg in region_names:
            # Filter for complaints in each region
            qs = fire_alarms.filter(region=reg)
            # create a data dictionary for the region
            regions[reg] = {}
            regions[reg]['total'] = qs.count()
            regions[reg]['true_alarms'] = qs.filter(true_or_false_alarm=True).count()
            regions[reg]['false_alarms'] = qs.filter(true_or_false_alarm=False).count()
            # get a count of how many complaints total are in the queryset

            # use calculate to find percentages
            regions[reg]['per_true_alarms'] = calculate.percentage(regions[reg]['true_alarms'],regions[reg]['total'])
            regions[reg]['per_false_alarms'] = calculate.percentage(regions[reg]['false_alarms'],regions[reg]['total'])

        return locals()
Example #2
0
 def counties(self):
     """
     Returns all the counties that report results for this race as a list
     of ReportingUnit objects.
     """
     ru_list = sorted([
         o for o in self.reporting_units
         if o.fips and not o.is_state and o.fips != '00000'
     ],
                      key=lambda x: x.name)
     # If the AP reports sub-County data for this state, as they do for some
     # New England states, we'll need to aggregate it here. If not, we can
     # just pass out the data "as is."
     if self.state.abbrev in COUNTY_CROSSWALK.keys():
         d = {}
         for ru in ru_list:
             try:
                 d[ru.fips].append(ru)
             except KeyError:
                 d[ru.fips] = [ru]
         county_list = []
         for county, units in d.items():
             ru = ReportingUnit(
                 name=COUNTY_CROSSWALK[self.state.abbrev][county],
                 ap_number='',
                 fips=county,
                 abbrev=self.name,
                 precincts_reporting=sum(
                     [int(i.precincts_reporting) for i in units]),
                 precincts_total=sum(
                     [int(i.precincts_total) for i in units]),
                 num_reg_voters=sum([int(i.num_reg_voters) for i in units]),
                 votes_cast=sum([int(i.votes_cast) for i in units]))
             ru.precincts_reporting_percent = calculate.percentage(
                 ru.precincts_reporting, ru.precincts_total)
             # Group all the candidates
             cands = {}
             for unit in units:
                 for result in unit.results:
                     try:
                         cands[result.candidate.ap_polra_number].append(
                             result)
                     except KeyError:
                         cands[result.candidate.ap_polra_number] = [result]
             for ap_polra_number, results in cands.items():
                 combined = Result(candidate=results[0].candidate,
                                   reporting_unit=ru,
                                   vote_total=sum(
                                       [i.vote_total for i in results]),
                                   vote_total_percent=calculate.percentage(
                                       sum([i.vote_total for i in results]),
                                       ru.votes_cast))
                 # Update result connected to the reporting unit
                 ru.update_result(combined)
             # Load the finished county into our list
             county_list.append(ru)
         return county_list
     else:
         return ru_list
     return ru_list
Example #3
0
 def test_percentage(self):
     self.assertEqual(calculate.percentage(12, 60), 20)
     self.assertEqual(calculate.percentage(12, 60, multiply=False), 0.2)
     self.assertEqual(calculate.percentage(12, 0), None)
     self.assertRaises(ZeroDivisionError,
                       calculate.percentage,
                       12,
                       0,
                       fail_silently=False)
Example #4
0
 def test_percentage(self):
     self.assertEqual(calculate.percentage(12, 60), 20)
     self.assertEqual(calculate.percentage(12, 60, multiply=False), 0.2)
     self.assertEqual(calculate.percentage(12, 0), None)
     self.assertRaises(
         ZeroDivisionError,
         calculate.percentage,
         12,
         0,
         fail_silently=False
     )
    def handle(self, *args, **kwargs):
        """
        Make it happen.
        """
        # Loop through all the models and find any fields without docs
        field_count = 0
        missing_list = []
        for m in get_model_list():
            field_list = m().get_field_list()
            field_count += len(field_list)
            for f in field_list:
                if not self.has_docs(f):
                    self.log("Missing: %s.%s.%s" %
                             (m().klass_group, m().klass_name, f))
                    missing_list.append((m, f))

        # If everything is done, declare victory
        if not missing_list:
            self.success("All %s fields documented!" % field_count)
            return False

        # If not, loop through the missing and create issues
        missing_count = len(missing_list)
        self.failure("%s/%s (%d%%) of fields lack documentation" %
                     (intcomma(missing_count), intcomma(field_count),
                      calculate.percentage(missing_count, field_count)))
    def handle(self, *args, **kwargs):
        """
        Make it happen.
        """
        self.set_options()
        self.header(
            "Creating GitHub issues for model models without a UNIQUE_KEY")

        # Loop through all the models and find any fields without docs
        missing_list = []
        model_count = 0
        for m in get_model_list():
            model_count += 1
            if not m.UNIQUE_KEY:
                self.log("Missing: %s.%s" % (
                    m().klass_group,
                    m().klass_name,
                ))
                missing_list.append(m)

        # If everything is done, declare victory
        missing_count = len(missing_list)
        if not missing_count:
            self.success("All %s models have a UNIQUE_KEY!" % missing_count)
            return False

        # If not, loop through the missing and create issues
        self.log("- %s/%s (%d%%) of fields lack a UNIQUE_KEY" %
                 (intcomma(missing_count), intcomma(model_count),
                  calculate.percentage(missing_count, model_count)))
        for model in missing_list[1:]:
            self.create_issue(model)
    def handle(self, *args, **kwargs):
        """
        Make it happen.
        """
        # Loop through all the models and find any fields without docs
        missing_list = []
        model_count = 0
        for m in get_model_list():
            model_count += 1
            if not m.UNIQUE_KEY:
                self.log("Missing: %s.%s" % (
                        m().klass_group,
                        m().klass_name,
                    )
                )
                missing_list.append(m)

        # If everything is done, declare victory
        missing_count = len(missing_list)
        if not missing_count:
            self.success("All %s models have a UNIQUE_KEY!" % missing_count)
            return False

        # If not, loop through the missing and create issues
        self.failure(
            "%s/%s (%d%%) of models lack a UNIQUE_KEY" % (
                intcomma(missing_count),
                model_count,
                calculate.percentage(missing_count, model_count)
            )
        )
Example #8
0
    def handle(self, *args, **kwargs):
        """
        Make it happen.
        """
        self.set_options()
        self.header(
            "Creating GitHub issues for model fields without documentation")

        # Loop through all the models and find any fields without docs
        field_count = 0
        missing_list = []
        for m in get_model_list():
            field_list = m().get_field_list()
            field_count += len(field_list)
            for f in field_list:
                if not self.has_docs(f):
                    missing_list.append((m, f))

        # If everything is done, declare victory
        if not missing_list:
            self.success("All %s fields documented!" % field_count)
            return False

        # If not, loop through the missing and create issues
        missing_count = len(missing_list)
        self.log("- %s/%s (%d%%) of fields lack documentation" %
                 (intcomma(missing_count), intcomma(field_count),
                  calculate.percentage(missing_count, field_count)))
        for model, field in missing_list[611:]:
            # For now we are excluding the 'other' model module to
            # avoid overkill
            if model().klass_group != 'other':
                self.create_issue(model, field)
    def handle(self, *args, **kwargs):
        """
        Make it happen.
        """
        # Loop through all the models and find any fields without docs
        field_count = 0
        missing_list = []
        for m in get_model_list():
            field_list = m().get_field_list()
            field_count += len(field_list)
            for f in field_list:
                if not self.has_docs(f):
                    self.log("Missing: %s.%s.%s" % (
                            m().klass_group,
                            m().klass_name,
                            f
                        )
                    )
                    missing_list.append((m, f))

        # If everything is done, declare victory
        if not missing_list:
            self.success("All %s fields documented!" % field_count)
            return False

        # If not, loop through the missing and create issues
        missing_count = len(missing_list)
        self.failure(
            "%s/%s (%d%%) of fields lack documentation" % (
                intcomma(missing_count),
                intcomma(field_count),
                calculate.percentage(missing_count, field_count)
            )
        )
    def handle(self, *args, **kwargs):
        """
        Make it happen.
        """
        # Loop through all the models and find any fields without docs
        missing_list = []
        model_count = 0
        for m in get_model_list():
            model_count += 1
            if m.UNIQUE_KEY is None:
                self.log("Missing: %s.%s" % (
                    m().klass_group,
                    m().klass_name,
                ))
                missing_list.append(m)

        # If everything is done, declare victory
        missing_count = len(missing_list)
        if not missing_count:
            self.success("All %s models have a UNIQUE_KEY!" % missing_count)
            return False

        # If not, loop through the missing and create issues
        self.failure("%s/%s (%d%%) of models lack a UNIQUE_KEY" %
                     (intcomma(missing_count), model_count,
                      calculate.percentage(missing_count, model_count)))
Example #11
0
 def __init__(self, party, delegates_needed, delegates_total,
              delegates_chosen):
     self.party = party
     self.delegates_needed = delegates_needed
     self.delegates_total = delegates_total
     self.delegates_chosen = delegates_chosen
     self.delegates_chosen_percent = calculate.percentage(
         delegates_chosen, delegates_total)
     self._candidates = []
     self._states = []
Example #12
0
 def __init__(self, party, delegates_needed, delegates_total, delegates_chosen):
     self.party = party
     self.delegates_needed = delegates_needed
     self.delegates_total = delegates_total
     self.delegates_chosen = delegates_chosen
     self.delegates_chosen_percent = calculate.percentage(
         delegates_chosen,
         delegates_total
     )
     self._candidates = []
     self._states = []
    def handle(self, *args, **kwargs):
        """
        Make it happen.
        """
        self.set_options()
        self.header(
            "Creating GitHub issues for model fields without documentation"
        )

        # Loop through all the models and find any fields without docs
        field_count = 0
        missing_list = []
        for m in get_model_list():
            field_list = m().get_field_list()
            field_count += len(field_list)
            for f in field_list:
                if not self.has_docs(f):
                    missing_list.append((m, f))

        # If everything is done, declare victory
        if not missing_list:
            self.success("All %s fields documented!" % field_count)
            return False

        # If not, loop through the missing and create issues
        missing_count = len(missing_list)
        self.log(
            "- %s/%s (%d%%) of fields lack documentation" % (
                intcomma(missing_count),
                intcomma(field_count),
                calculate.percentage(missing_count, field_count)
            )
        )
        for model, field in missing_list[50:101]:
            # For now we are excluding the 'other' model module to
            # avoid overkill
            if model().klass_group != 'other':
                self.create_issue(model, field)
Example #14
0
 def counties(self):
     """
     Returns all the counties that report results for this race as a list
     of ReportingUnit objects.
     """
     ru_list = sorted(
         [o for o in self.reporting_units if o.fips and not o.is_state and o.fips != '00000'],
         key=lambda x: x.name
     )
     # If the AP reports sub-County data for this state, as they do for some
     # New England states, we'll need to aggregate it here. If not, we can
     # just pass out the data "as is."
     if self.state.abbrev in COUNTY_CROSSWALK.keys():
         d = {}
         for ru in ru_list:
             try:
                 d[ru.fips].append(ru)
             except KeyError:
                 d[ru.fips] = [ru]
         county_list = []
         for county, units in d.items():
             ru = ReportingUnit(
                 name = COUNTY_CROSSWALK[self.state.abbrev][county],
                 ap_number = '',
                 fips = county,
                 abbrev = self.name,
                 precincts_reporting = sum([int(i.precincts_reporting) for i in units]),
                 precincts_total = sum([int(i.precincts_total) for i in units]),
                 num_reg_voters = sum([int(i.num_reg_voters) for i in units]),
                 votes_cast = sum([int(i.votes_cast) for i in units])
             )
             ru.precincts_reporting_percent = calculate.percentage(
                 ru.precincts_reporting,
                 ru.precincts_total
             )
             # Group all the candidates
             cands = {}
             for unit in units:
                 for result in unit.results:
                     try:
                         cands[result.candidate.ap_polra_number].append(result)
                     except KeyError:
                         cands[result.candidate.ap_polra_number] = [result]
             for ap_polra_number, results in cands.items():
                 combined = Result(
                     candidate = results[0].candidate,
                     reporting_unit = ru,
                     vote_total = sum([i.vote_total for i in results]),
                     vote_total_percent = calculate.percentage(
                         sum([i.vote_total for i in results]),
                         ru.votes_cast
                     )
                 )
                 # Update result connected to the reporting unit
                 ru.update_result(combined)
             # Load the finished county into our list
             county_list.append(ru)
         return county_list
     else:
         return ru_list
     return ru_list
Example #15
0
 def _get_flat_results(self, ftp=None):
     """
     Download, parse and structure the state and county votes totals.
     """
     # Download the data
     flat_list = self.client._fetch_flatfile(
         self.results_file_path,
         [ # First the basic fields that will the same in each row
             'test',
             'election_date',
             'state_postal',
             'county_number',
             'fips',
             'county_name',
             'race_number',
             'office_id',
             'race_type_id',
             'seat_number',
             'office_name',
             'seat_name',
             'race_type_party',
             'race_type',
             'office_description',
             'number_of_winners',
             'number_in_runoff',
             'precincts_reporting',
             'total_precincts',
         ],
        [ # Then the candidate fields that will repeat after the basics
             'candidate_number',
             'order',
             'party',
             'first_name',
             'middle_name',
             'last_name',
             'junior',
             'use_junior',
             'incumbent',
             'vote_count',
             'is_winner',
             'national_politician_id',
         ]
     )
     
     # Figure out if we're dealing with test data or the real thing
     self.is_test = flat_list[0]['test'] == 't'
     
     # Start looping through the lines...
     for row in flat_list:
         
         # Get the race
         race = self.get_race(row['race_number'])
         
         # Figure out if it's a state or a county
         fips =row['fips']
         is_state = row['county_number'] == '1'
         county_number = str(row['county_number'])
         
         # Pull the reporting unit
         reporting_unit = race.get_reporting_unit(
             "%s%s" % (row['county_name'], county_number)
         )
         # Loop through all the candidates
         votes_cast = 0
         for cand in row['candidates']:
             # Skip it if the candidate is empty, as it sometimes is at
             # the end of the row
             if not cand['candidate_number']:
                 continue
             
             # Pull the existing candidate object
             candidate = race.get_candidate(cand["candidate_number"])
             
             # Pull the vote total
             vote_count = int(cand['vote_count'])
             
             # Add it to the overall total
             votes_cast += vote_count
             
             # Update the candidate's global vote total if data are statewide
             if is_state:
                 candidate.vote_total = vote_count
             
             # Set is_winner and is_runoff
             # (This will just get set over and over as we loop
             # but AP seems to put the statewide result in for every 
             # reporting unit so I think we're safe.)
             candidate.is_winner = cand['is_winner'] == 'X'
             candidate.is_runoff = cand['is_winner'] == 'R'
             
             # Set whether the candidate is an incumbent
             candidate.is_incumbent = cand['incumbent'] == '1'
             
             # Create the Result object, which is specific to the
             # reporting unit in this row of the flatfile.
             result = Result(
                 candidate = candidate,
                 vote_total = vote_count,
                 reporting_unit = reporting_unit
             )
             # Update result connected to the reporting unit
             reporting_unit.update_result(result)
             
         # Update the reporting unit's precincts status
         reporting_unit.precincts_total = int(row['total_precincts'])
         reporting_unit.precincts_reporting = int(row['precincts_reporting'])
         reporting_unit.precincts_reporting_percent = calculate.percentage(
             reporting_unit.precincts_reporting,
             reporting_unit.precincts_total
         )
         
         # Update the total votes cast
         reporting_unit.votes_cast = votes_cast
         
         # Loop back through the results and set the percentages now
         # that we know the overall total
         for result in reporting_unit.results:
             result.vote_total_percent = calculate.percentage(
                 result.vote_total, 
                 votes_cast
             )
Example #16
0
    def _get_results(self, ftp=None):
        """
        Download, parse and structure the state and county votes totals.
        """
        # Download the data
        flat_list = self._fetch_flatfile(
            self.results_file_path,
            [
                # First the basic fields that will the same in each row
                'test',
                'election_date',
                'state_postal',
                'county_number',
                'fips',
                'county_name',
                'race_number',
                'office_id',
                'race_type_id',
                'seat_number',
                'office_name',
                'seat_name',
                'race_type_party',
                'race_type',
                'office_description',
                'number_of_winners',
                'number_in_runoff',
                'precincts_reporting',
                'total_precincts',
            ],
            [
                # Then the candidate fields that will repeat after the basics
                'candidate_number',
                'order',
                'party',
                'first_name',
                'middle_name',
                'last_name',
                'junior',
                'use_junior',
                'incumbent',
                'vote_count',
                'is_winner',
                'national_politician_id',
            ]
        )

        # Figure out if we're dealing with test data or the real thing
        is_test = flat_list[0]['test'] == 't'

        # Start looping through the lines...
        for row in flat_list:

            # Get the race, with a special case for the presidential race
            ap_race_number = self.ap_number_template % ({
                'number': row['race_number'],
                'state': row['state_postal']
            })
            race = self.get_race(ap_race_number)

            # Pull the reporting unit
            ru_key = "%s%s" % (row['county_name'], row['county_number'])
            reporting_unit = self.get_reporting_unit(ru_key)

            # Total the votes
            votes_total = sum([int(o['vote_count']) for o in row['candidates']])

            # Loop through all the candidates
            for candrow in row['candidates']:
                # Skip it if the candidate is empty, as it sometimes is at
                # the end of the row
                if not candrow['candidate_number']:
                    continue

                # Pull the existing candidate object
                candidate = self.get_candidate(candrow["candidate_number"])

                cru = CandidateReportingUnit(
                    test=is_test,
                    initialization_data=False,
                    lastupdated=None,
                    # Race
                    electiondate=race.electiondate,
                    raceid=race.raceid,
                    statepostal=race.statepostal,
                    statename=race.statename,
                    racetype=race.racetype,
                    racetypeid=race.racetypeid,
                    officeid=race.officeid,
                    officename=race.officename,
                    seatname=race.seatname,
                    description=race.description,
                    seatnum=race.seatnum,
                    national=race.national,
                    is_ballot_measure=None,
                    uncontested=race.uncontested,
                    # Candidate
                    first=candidate.first,
                    last=candidate.last,
                    party=candidate.party,
                    candidateID=candidate.candidateid,
                    polID=candidate.polid,
                    polNum=candidate.polnum,
                    incumbent=candrow['incumbent'] == '1',
                    ballotOrder=candidate.ballotorder,
                    # Results
                    voteCount=int(candrow['vote_count']),
                    votePct=calculate.percentage(int(candrow['vote_count']), votes_total, multiply=False) or 0.0,
                    winner=candrow['is_winner'],
                    # Reporting unit
                    level=reporting_unit.level,
                    reportingunitname=reporting_unit.reportingunitname,
                    reportingunitid=reporting_unit.reportingunitid,
                    fipscode=reporting_unit.fipscode,
                    precinctsreporting=int(row['precincts_reporting']),
                    precinctstotal=int(row['total_precincts']),
                    precinctsreportingpct=calculate.percentage(int(row['precincts_reporting']), int(row['total_precincts']), multiply=True) or 0.0,
                )

                cru.key = "%s%s%s" % (
                    race.raceid,
                    ru_key,
                    candrow["candidate_number"],
                )
                self._results[cru.key] = cru

            # Update the reporting unit's precincts status
            reporting_unit.precinctstotal = int(row['total_precincts'])
            reporting_unit.precinctsreporting = int(row['precincts_reporting'])
            reporting_unit.precinctsreportingpct = calculate.percentage(
                reporting_unit.precinctsreporting,
                reporting_unit.precinctstotal,
                multiply=True
            ) or 0.0
            reporting_unit.votecount = votes_total
    def get_context_data(self, **kwargs):
        # Quick notation to access all complaints
        complaints = Complaint.objects.all()

        # Quick means of accessing both open and closed cases
        open_cases = complaints.filter(is_closed=False)
        closed_cases = complaints.filter(is_closed=True)

        # Overall complaints not addressed within a year
        over_one_year = complaints.filter(more_than_one_year=True)
        open_over_one_year = over_one_year.filter(is_closed=False)
        closed_over_one_year = over_one_year.filter(is_closed=True)

        # Total counts of cases, all priority levels
        total_count = complaints.all().count()
        total_by_csr = get_counts_by_csr(complaints)

        # Counts of open cases, all priority levels
        open_cases_count = open_cases.count()
        open_by_csr = get_counts_by_csr(open_cases)

        # Counts of cases that have been open fore more than a year, all priority levels
        open_over_one_year_count = open_over_one_year.count()
        open_over_one_year_by_csr = get_counts_by_csr(open_over_one_year)

        # Counts of cases that were closed, but have been open for more than a year, all priority levels.
        closed_over_one_year_count = closed_over_one_year.count()
        closed_over_one_year_by_csr = get_counts_by_csr(closed_over_one_year)

        # A much better means of getting expected wait times is to use a survival analysis function
        # In this case, we use a Kaplan-Meier estimator from the Python package lifelines
        # We repeat this for all complaints, and for each CSR priority levels.
        all_complaints = Complaint.objects.exclude(days_since_complaint__lt=0)
        kmf_fit = get_kmf_fit(all_complaints)
        median_wait_time_kmf = get_kmf_median(kmf_fit)

        csr1 = all_complaints.filter(csr_priority="1")
        kmf_fit_csr1 = get_kmf_fit(csr1)
        median_wait_time_csr1_kmf = get_kmf_median(kmf_fit_csr1)

        csr2 = all_complaints.filter(csr_priority="2")
        kmf_fit_csr2 = get_kmf_fit(csr2)
        median_wait_time_csr2_kmf = get_kmf_median(kmf_fit_csr2)

        csr3 = all_complaints.filter(csr_priority="3")
        kmf_fit_csr3 = get_kmf_fit(csr3)
        median_wait_time_csr3_kmf = get_kmf_median(kmf_fit_csr3)

        region_names = [
            'Central', 'East Los Angeles', 'Harbor', 'North Valley',
            'South Los Angeles', 'South Valley', 'West Los Angeles'
        ]
        regions = {}

        # Iterate over each name in our region_names list
        for region in region_names:
            # Filter for complaints in each region
            qs = complaints.filter(area_planning_commission=region,
                                   days_since_complaint__gte=0)
            # create a data dictionary for the region
            regions[region] = {}
            # get a count of how many complaints total are in the queryset
            regions[region]['total'] = qs.count()
            regions[region][
                'avg_complaints_per_year'] = get_avg_complaints_filed_per_year(
                    region)

            # Separate the complaints into querysets of their respective priority levels
            region_csr1 = qs.filter(csr_priority="1")
            region_csr2 = qs.filter(csr_priority="2")
            region_csr3 = qs.filter(csr_priority="3")

            # Find the KMF fit for all complaints in the area and by each priority level
            regional_kmf_fit = get_kmf_fit(qs)
            regional_kmf_fit_csr1 = get_kmf_fit(region_csr1)
            regional_kmf_fit_csr2 = get_kmf_fit(region_csr2)
            regional_kmf_fit_csr3 = get_kmf_fit(region_csr3)

            # Get the median value from the KMF fit.
            regions[region]['median_wait_kmf'] = get_kmf_median(
                regional_kmf_fit)
            regions[region]['median_wait_kmf_csr1'] = get_kmf_median(
                regional_kmf_fit_csr1)
            regions[region]['median_wait_kmf_csr2'] = get_kmf_median(
                regional_kmf_fit_csr2)
            regions[region]['median_wait_kmf_csr3'] = get_kmf_median(
                regional_kmf_fit_csr3)

            regions[region]['gt_year'] = qs.filter(
                more_than_one_year=True).count()

            # Paste response time breakdown here
            # Also grab counts of the number of complaints greater than 30, 90 and 180 days
            regions[region]['gt_30_days'] = qs.filter(gt_30_days=True).count()
            regions[region]['gt_90_days'] = qs.filter(gt_90_days=True).count()
            regions[region]['gt_180_days'] = qs.filter(
                gt_180_days=True).count()
            # use calculate to find percentages
            regions[region]['per_gt_30_days'] = calculate.percentage(
                regions[region]['gt_30_days'], regions[region]['total'])
            regions[region]['per_gt_90_days'] = calculate.percentage(
                regions[region]['gt_90_days'], regions[region]['total'])
            regions[region]['per_gt_180_days'] = calculate.percentage(
                regions[region]['gt_180_days'], regions[region]['total'])
            regions[region]['per_gt_year'] = calculate.percentage(
                regions[region]['gt_year'], regions[region]['total'])

        return locals()
Example #18
0
    def _get_results(self, ftp=None):
        """
        Download, parse and structure the state and county votes totals.
        """
        # Download the data
        flat_list = self._fetch_flatfile(
            self.results_file_path,
            [
                # First the basic fields that will the same in each row
                'test',
                'election_date',
                'state_postal',
                'county_number',
                'fips',
                'county_name',
                'race_number',
                'office_id',
                'race_type_id',
                'seat_number',
                'office_name',
                'seat_name',
                'race_type_party',
                'race_type',
                'office_description',
                'number_of_winners',
                'number_in_runoff',
                'precincts_reporting',
                'total_precincts',
            ],
            [
                # Then the candidate fields that will repeat after the basics
                'candidate_number',
                'order',
                'party',
                'first_name',
                'middle_name',
                'last_name',
                'junior',
                'use_junior',
                'incumbent',
                'vote_count',
                'is_winner',
                'national_politician_id',
            ])

        # Figure out if we're dealing with test data or the real thing
        is_test = flat_list[0]['test'] == 't'

        # Start looping through the lines...
        for row in flat_list:

            # Get the race, with a special case for the presidential race
            ap_race_number = self.ap_number_template % (
                {
                    'number': row['race_number'],
                    'state': row['state_postal']
                })
            race = self.get_race(ap_race_number)

            # Pull the reporting unit
            ru_key = "%s%s" % (row['county_name'], row['county_number'])
            reporting_unit = self.get_reporting_unit(ru_key)

            # Total the votes
            votes_total = sum(
                [int(o['vote_count']) for o in row['candidates']])

            # Loop through all the candidates
            for candrow in row['candidates']:
                # Skip it if the candidate is empty, as it sometimes is at
                # the end of the row
                if not candrow['candidate_number']:
                    continue

                # Pull the existing candidate object
                candidate = self.get_candidate(candrow["candidate_number"])

                cru = CandidateReportingUnit(
                    test=is_test,
                    initialization_data=False,
                    lastupdated=None,
                    # Race
                    electiondate=race.electiondate,
                    raceid=race.raceid,
                    statepostal=race.statepostal,
                    statename=race.statename,
                    racetype=race.racetype,
                    racetypeid=race.racetypeid,
                    officeid=race.officeid,
                    officename=race.officename,
                    seatname=race.seatname,
                    description=race.description,
                    seatnum=race.seatnum,
                    national=race.national,
                    is_ballot_measure=None,
                    uncontested=race.uncontested,
                    # Candidate
                    first=candidate.first,
                    last=candidate.last,
                    party=candidate.party,
                    candidateID=candidate.candidateid,
                    polID=candidate.polid,
                    polNum=candidate.polnum,
                    incumbent=candrow['incumbent'] == '1',
                    ballotOrder=candidate.ballotorder,
                    # Results
                    voteCount=int(candrow['vote_count']),
                    votePct=calculate.percentage(int(candrow['vote_count']),
                                                 votes_total,
                                                 multiply=False) or 0.0,
                    winner=candrow['is_winner'],
                    # Reporting unit
                    level=reporting_unit.level,
                    reportingunitname=reporting_unit.reportingunitname,
                    reportingunitid=reporting_unit.reportingunitid,
                    fipscode=reporting_unit.fipscode,
                    precinctsreporting=int(row['precincts_reporting']),
                    precinctstotal=int(row['total_precincts']),
                    precinctsreportingpct=calculate.percentage(
                        int(row['precincts_reporting']),
                        int(row['total_precincts']),
                        multiply=True) or 0.0,
                )

                cru.key = "%s%s%s" % (
                    race.raceid,
                    ru_key,
                    candrow["candidate_number"],
                )
                self._results[cru.key] = cru

            # Update the reporting unit's precincts status
            reporting_unit.precinctstotal = int(row['total_precincts'])
            reporting_unit.precinctsreporting = int(row['precincts_reporting'])
            reporting_unit.precinctsreportingpct = calculate.percentage(
                reporting_unit.precinctsreporting,
                reporting_unit.precinctstotal,
                multiply=True) or 0.0
            reporting_unit.votecount = votes_total
Example #19
0
    def _get_flat_results(self, ftp=None):
        """
        Download, parse and structure the state and county votes totals.
        """
        # Download the data
        flat_list = self.client._fetch_flatfile(
            self.results_file_path,
            [  # First the basic fields that will the same in each row
                'test',
                'election_date',
                'state_postal',
                'county_number',
                'fips',
                'county_name',
                'race_number',
                'office_id',
                'race_type_id',
                'seat_number',
                'office_name',
                'seat_name',
                'race_type_party',
                'race_type',
                'office_description',
                'number_of_winners',
                'number_in_runoff',
                'precincts_reporting',
                'total_precincts',
            ],
            [  # Then the candidate fields that will repeat after the basics
                'candidate_number',
                'order',
                'party',
                'first_name',
                'middle_name',
                'last_name',
                'junior',
                'use_junior',
                'incumbent',
                'vote_count',
                'is_winner',
                'national_politician_id',
            ])

        # Figure out if we're dealing with test data or the real thing
        self.is_test = flat_list[0]['test'] == 't'

        # Start looping through the lines...
        for row in flat_list:

            # Get the race
            race = self.get_race(row['race_number'])

            # Figure out if it's a state or a county
            fips = row['fips']
            is_state = row['county_number'] == '1'
            county_number = str(row['county_number'])

            # Pull the reporting unit
            reporting_unit = race.get_reporting_unit(
                "%s%s" % (row['county_name'], county_number))
            # Loop through all the candidates
            votes_cast = 0
            for cand in row['candidates']:
                # Skip it if the candidate is empty, as it sometimes is at
                # the end of the row
                if not cand['candidate_number']:
                    continue

                # Pull the existing candidate object
                candidate = race.get_candidate(cand["candidate_number"])

                # Pull the vote total
                vote_count = int(cand['vote_count'])

                # Add it to the overall total
                votes_cast += vote_count

                # Update the candidate's global vote total if data are statewide
                if is_state:
                    candidate.vote_total = vote_count

                # Set is_winner and is_runoff
                # (This will just get set over and over as we loop
                # but AP seems to put the statewide result in for every
                # reporting unit so I think we're safe.)
                candidate.is_winner = cand['is_winner'] == 'X'
                candidate.is_runoff = cand['is_winner'] == 'R'

                # Create the Result object, which is specific to the
                # reporting unit in this row of the flatfile.
                result = Result(candidate=candidate,
                                vote_total=vote_count,
                                reporting_unit=reporting_unit)
                # Update result connected to the reporting unit
                reporting_unit.update_result(result)

            # Update the reporting unit's precincts status
            reporting_unit.precincts_total = int(row['total_precincts'])
            reporting_unit.precincts_reporting = int(
                row['precincts_reporting'])
            reporting_unit.precincts_reporting_percent = calculate.percentage(
                reporting_unit.precincts_reporting,
                reporting_unit.precincts_total)

            # Update the total votes cast
            reporting_unit.votes_cast = votes_cast

            # Loop back through the results and set the percentages now
            # that we know the overall total
            for result in reporting_unit.results:
                result.vote_total_percent = calculate.percentage(
                    result.vote_total, votes_cast)
    def get_context_data(self, **kwargs):
        # Quick notation to access all complaints
        complaints = Complaint.objects.all()

        # Quick means of accessing both open and closed cases
        open_cases = complaints.filter(is_closed=False)
        closed_cases = complaints.filter(is_closed=True)

        # Overall complaints not addressed within a year
        over_one_year = complaints.filter(more_than_one_year=True)
        open_over_one_year = over_one_year.filter(is_closed=False)
        closed_over_one_year = over_one_year.filter(is_closed=True)

        # Total counts of cases, all priority levels
        total_count = complaints.all().count()
        total_by_csr = get_counts_by_csr(complaints)

        # Counts of open cases, all priority levels
        open_cases_count = open_cases.count()
        open_by_csr = get_counts_by_csr(open_cases)

        # Counts of cases that have been open fore more than a year, all priority levels
        open_over_one_year_count = open_over_one_year.count()
        open_over_one_year_by_csr = get_counts_by_csr(open_over_one_year)

        # Counts of cases that were closed, but have been open for more than a year, all priority levels.
        closed_over_one_year_count = closed_over_one_year.count()
        closed_over_one_year_by_csr = get_counts_by_csr(closed_over_one_year)

        # A much better means of getting expected wait times is to use a survival analysis function
        # In this case, we use a Kaplan-Meier estimator from the Python package lifelines
        # We repeat this for all complaints, and for each CSR priority levels.
        all_complaints = Complaint.objects.exclude(days_since_complaint__lt=0)
        kmf_fit = get_kmf_fit(all_complaints)
        median_wait_time_kmf = get_kmf_median(kmf_fit)

        csr1 = all_complaints.filter(csr_priority="1")
        kmf_fit_csr1 = get_kmf_fit(csr1)
        median_wait_time_csr1_kmf = get_kmf_median(kmf_fit_csr1)

        csr2 = all_complaints.filter(csr_priority="2")
        kmf_fit_csr2 = get_kmf_fit(csr2)   
        median_wait_time_csr2_kmf = get_kmf_median(kmf_fit_csr2)

        csr3 = all_complaints.filter(csr_priority="3")
        kmf_fit_csr3 = get_kmf_fit(csr3)
        median_wait_time_csr3_kmf = get_kmf_median(kmf_fit_csr3)

        region_names = ['Central','East Los Angeles','Harbor','North Valley','South Los Angeles','South Valley','West Los Angeles']
        regions = {}

        # Iterate over each name in our region_names list
        for region in region_names:
            # Filter for complaints in each region
            qs = complaints.filter(area_planning_commission=region, days_since_complaint__gte=0)
            # create a data dictionary for the region
            regions[region] = {}
            # get a count of how many complaints total are in the queryset
            regions[region]['total'] = qs.count()
            regions[region]['avg_complaints_per_year'] = get_avg_complaints_filed_per_year(region)

            # Separate the complaints into querysets of their respective priority levels 
            region_csr1 = qs.filter(csr_priority="1")
            region_csr2 = qs.filter(csr_priority="2")
            region_csr3 = qs.filter(csr_priority="3")

            # Find the KMF fit for all complaints in the area and by each priority level
            regional_kmf_fit = get_kmf_fit(qs)
            regional_kmf_fit_csr1 = get_kmf_fit(region_csr1)
            regional_kmf_fit_csr2 = get_kmf_fit(region_csr2)
            regional_kmf_fit_csr3 = get_kmf_fit(region_csr3)

            # Get the median value from the KMF fit. 
            regions[region]['median_wait_kmf'] = get_kmf_median(regional_kmf_fit)
            regions[region]['median_wait_kmf_csr1'] = get_kmf_median(regional_kmf_fit_csr1)
            regions[region]['median_wait_kmf_csr2'] = get_kmf_median(regional_kmf_fit_csr2)
            regions[region]['median_wait_kmf_csr3'] = get_kmf_median(regional_kmf_fit_csr3)

            regions[region]['gt_year'] = qs.filter(more_than_one_year=True).count()

            # Paste response time breakdown here
            # Also grab counts of the number of complaints greater than 30, 90 and 180 days
            regions[region]['gt_30_days'] = qs.filter(gt_30_days=True).count()
            regions[region]['gt_90_days'] = qs.filter(gt_90_days=True).count()
            regions[region]['gt_180_days'] = qs.filter(gt_180_days=True).count()
            # use calculate to find percentages
            regions[region]['per_gt_30_days'] = calculate.percentage(regions[region]['gt_30_days'],regions[region]['total'])
            regions[region]['per_gt_90_days'] = calculate.percentage(regions[region]['gt_90_days'],regions[region]['total'])
            regions[region]['per_gt_180_days'] = calculate.percentage(regions[region]['gt_180_days'],regions[region]['total'])
            regions[region]['per_gt_year'] = calculate.percentage(regions[region]['gt_year'],regions[region]['total'])
            
        return locals()