예제 #1
0
def run_state(state, states_only=False):
    """
    Run the R_t inference for each county in a state.

    Parameters
    ----------
    state: str
        State to run against.
    states_only: bool
        If True only run the state level.
    """
    state_obj = us.states.lookup(state)
    df = RtInferenceEngine.run_for_fips(state_obj.fips)
    output_path = get_run_artifact_path(state_obj.fips, RunArtifact.RT_INFERENCE_RESULT)
    df.to_json(output_path)

    # Run the counties.
    if not states_only:
        all_fips = load_data.get_all_fips_codes_for_a_state(state)

        # Something in here doesn't like multiprocessing...
        rt_inferences = all_fips.map(lambda x: RtInferenceEngine.run_for_fips(x)).tolist()

        for fips, rt_inference in zip(all_fips, rt_inferences):
            county_output_file = get_run_artifact_path(fips, RunArtifact.RT_INFERENCE_RESULT)
            if rt_inference is not None:
                rt_inference.to_json(county_output_file)
예제 #2
0
def run_state(state, ensemble_kwargs, states_only=False):
    """
    Run the EnsembleRunner for each county in a state.

    Parameters
    ----------
    state: str
        State to run against.
    ensemble_kwargs: dict
        Kwargs passed to the EnsembleRunner object.
    states_only: bool
        If True only run the state level.
    """
    # Run the state level
    runner = EnsembleRunner(fips=us.states.lookup(state).fips,
                            **ensemble_kwargs)
    runner.run_ensemble()

    if not states_only:
        # Run county level
        all_fips = load_data.get_all_fips_codes_for_a_state(state)
        p = Pool()
        f = partial(_run_county, ensemble_kwargs=ensemble_kwargs)
        p.map(f, all_fips)
        p.close()
예제 #3
0
    def generate_surge_spreadsheet(self):
        """
        Produce a spreadsheet summarizing peaks.

        Parameters
        ----------
        state: str
            State to generate sheet for.

        Returns
        -------

        """
        df = load_data.load_county_metadata()
        all_fips = load_data.get_all_fips_codes_for_a_state(self.state)
        all_data = {
            fips: load_data.load_ensemble_results(fips)
            for fips in all_fips
        }
        df = df.set_index("fips")

        records = []
        for fips, ensembles in all_data.items():
            county_name = df.loc[fips]["county"]
            t0 = fit_results.load_t0(fips)

            for suppression_policy, ensemble in ensembles.items():

                county_record = dict(
                    county_name=county_name,
                    county_fips=fips,
                    mitigation_policy=policy_to_mitigation(suppression_policy),
                )

                for compartment in [
                        "HGen",
                        "general_admissions_per_day",
                        "HICU",
                        "icu_admissions_per_day",
                        "total_new_infections",
                        "direct_deaths_per_day",
                        "total_deaths",
                        "D",
                ]:
                    compartment_name = compartment_to_name_map[compartment]

                    county_record[compartment_name + " Peak Value Mean"] = (
                        "%.0f" % ensemble[compartment]["peak_value_mean"])
                    county_record[compartment_name + " Peak Value Median"] = (
                        "%.0f" % ensemble[compartment]["peak_value_ci50"])
                    county_record[compartment_name + " Peak Value CI25"] = (
                        "%.0f" % ensemble[compartment]["peak_value_ci25"])
                    county_record[compartment_name + " Peak Value CI75"] = (
                        "%.0f" % ensemble[compartment]["peak_value_ci75"])
                    county_record[compartment_name + " Peak Time Median"] = ((
                        t0 +
                        timedelta(days=ensemble[compartment]["peak_time_ci50"])
                    ).date().isoformat())

                    # Leaving for now...
                    # if 'surge_start' in ensemble[compartment]:
                    #     if not np.isnan(np.nanmean(ensemble[compartment]['surge_start'])):
                    #         county_record[compartment_name + ' Surge Start Mean'] = (t0 + timedelta(days=np.nanmean(ensemble[compartment]['surge_start']))).date().isoformat()
                    #         county_record[compartment_name + ' Surge End Mean'] = (t0 + timedelta(days=np.nanmean(ensemble[compartment]['surge_end']))).date().isoformat()

                records.append(county_record)

        df = pd.DataFrame(records)
        df.write_json(self.surge_filename)