Esempio n. 1
0
 def test_get_sub_population_policies(self):
     expected_list = self.policy_list[:1]
     result_list = SparkPolicy.get_sub_population_policies(
         self.policy_list, sub_population={'test1': 'value'})
     self.assertEqual(expected_list, result_list)
Esempio n. 2
0
    def _build_sub_simulations(
        cls,
        outflows_data: pd.DataFrame,
        transitions_data: pd.DataFrame,
        total_population_data: pd.DataFrame,
        simulation_compartments: Dict[str, str],
        disaggregation_axes: List[str],
        user_inputs: Dict[str, Any],
        policy_list: List[SparkPolicy],
        first_relevant_ts: int,
        sub_group_ids_dict: Dict[str, Dict[str, Any]],
        should_initialize_compartment_populations: bool,
        should_scale_populations_after_step: bool,
    ) -> Dict[str, SubSimulation]:
        """Helper function for initialize_simulation. Initialize one sub simulation per sub-population."""
        sub_simulations = dict()

        # reset indices to facilitate unused data tracking
        transitions_data = transitions_data.reset_index(drop=True)
        outflows_data = outflows_data.reset_index(drop=True)
        total_population_data = total_population_data.reset_index(drop=True)

        unused_transitions_data = transitions_data
        unused_outflows_data = outflows_data
        unused_total_population_data = total_population_data
        for sub_group_id in sub_group_ids_dict:
            group_attributes = pd.Series(sub_group_ids_dict[sub_group_id])
            disaggregated_transitions_data = transitions_data[
                (transitions_data[disaggregation_axes] == group_attributes).all(axis=1)
            ]
            disaggregated_outflows_data = outflows_data[
                (outflows_data[disaggregation_axes] == group_attributes).all(axis=1)
            ]
            disaggregated_total_population_data = total_population_data[
                (total_population_data[disaggregation_axes] == group_attributes).all(
                    axis=1
                )
            ]

            unused_transitions_data = unused_transitions_data.drop(
                disaggregated_transitions_data.index
            )
            unused_outflows_data = unused_outflows_data.drop(
                disaggregated_outflows_data.index
            )
            unused_total_population_data = unused_total_population_data.drop(
                disaggregated_total_population_data.index
            )

            # Select the policies relevant to this simulation group
            group_policies = SparkPolicy.get_sub_population_policies(
                policy_list, sub_group_ids_dict[sub_group_id]
            )

            sub_simulations[sub_group_id] = SubSimulationFactory.build_sub_simulation(
                outflows_data=disaggregated_outflows_data,
                transitions_data=disaggregated_transitions_data,
                total_population_data=disaggregated_total_population_data,
                compartments_architecture=simulation_compartments,
                user_inputs=user_inputs,
                policy_list=group_policies,
                first_relevant_ts=first_relevant_ts,
                should_single_cohort_initialize_compartments=should_initialize_compartment_populations,
                should_scale_populations_after_step=should_scale_populations_after_step,
            )

        if len(unused_transitions_data) > 0:
            warn(
                f"Some transitions data left unused: {unused_transitions_data}", Warning
            )
        if len(unused_outflows_data) > 0:
            warn(f"Some outflows data left unused: {unused_outflows_data}", Warning)
        if len(unused_total_population_data) > 0:
            warn(
                f"Some total population data left unused: {unused_total_population_data}",
                Warning,
            )

        return sub_simulations
Esempio n. 3
0
    def initialize_simulation(self, outflows_data: pd.DataFrame,
                              transitions_data: pd.DataFrame,
                              total_population_data: pd.DataFrame,
                              simulation_compartments: Dict[str, Optional[CompartmentTransitions]],
                              disaggregation_axes: List[str],
                              user_inputs: Dict,
                              initialization_period=2,
                              microsim: bool = False,
                              microsim_data: pd.DataFrame = None):
        """Initialize the simulation parameters along with all of the sub simulations"""

        #  defined in both cases because it's required as an input to sub-simulation, but not actually
        #   used in microsimulations
        first_relevant_ts = None
        if not microsim:
            # TODO(#4512): cap this at 50 years (non-trivial because ts unit unknown)
            if user_inputs['speed_run']:
                max_sentence = user_inputs['projection_time_steps'] + 1
            else:
                max_sentence = max(transitions_data.compartment_duration)
            first_relevant_ts = int(user_inputs['start_time_step'] - initialization_period * max_sentence)

        self.population_projections = pd.DataFrame()

        # populate self.sub_group_ids_dict so we can recover sub-group properties during validation
        for simulation_group_name, _ in transitions_data.groupby(disaggregation_axes):
            sub_group_id = str(simulation_group_name)

            if len(disaggregation_axes) == 1:
                self.sub_group_ids_dict[sub_group_id] = {disaggregation_axes[0]: simulation_group_name}
            else:
                self.sub_group_ids_dict[sub_group_id] = \
                    {disaggregation_axes[i]: simulation_group_name[i] for i in range(len(disaggregation_axes))}

        # Initialize one sub simulation per sub-population
        for sub_group_id in self.sub_group_ids_dict:
            group_attributes = pd.Series(self.sub_group_ids_dict[sub_group_id])
            disaggregated_transitions_data = \
                transitions_data[(transitions_data[disaggregation_axes] == group_attributes).all(axis=1)]
            disaggregated_outflows_data = \
                outflows_data[(outflows_data[disaggregation_axes] == group_attributes).all(axis=1)]
            disaggregated_total_population_data = \
                total_population_data[(total_population_data[disaggregation_axes] == group_attributes).all(axis=1)]

            # Select the policies relevant to this simulation group

            # if micro-simulation, "policy" should be subbing in proper transitions data for the new admissions
            if microsim:
                if microsim_data is None:
                    raise ValueError("Microsim data is required for microsim mode")
                if len(user_inputs['policy_list']) != 0:
                    raise ValueError("Microsim option does not support policy inputs")
                disaggregated_microsim_data = \
                    microsim_data[(microsim_data[disaggregation_axes] == group_attributes).all(axis=1)]
                group_policies = list()
                for full_comp in [i for i in simulation_compartments
                                  if simulation_compartments[i] is not None]:
                    group_policies.append(SparkPolicy(
                        policy_fn=partial(
                            CompartmentTransitions.use_alternate_transitions_data,
                            alternate_historical_transitions=
                            disaggregated_microsim_data[disaggregated_microsim_data.compartment == full_comp],
                            retroactive=False
                        ),
                        spark_compartment=full_comp,
                        sub_population=self.sub_group_ids_dict[sub_group_id],
                        apply_retroactive=False
                    ))

            else:
                group_policies = SparkPolicy.get_sub_population_policies(user_inputs['policy_list'],
                                                                         self.sub_group_ids_dict[sub_group_id])
            self.sub_simulations[sub_group_id] = SubSimulation(
                outflows_data=disaggregated_outflows_data,
                transitions_data=disaggregated_transitions_data,
                total_population_data=disaggregated_total_population_data,
                simulation_compartments=simulation_compartments,
                user_inputs=user_inputs,
                policy_list=group_policies,
                first_relevant_ts=first_relevant_ts,
                microsim=microsim
            )
            self.sub_simulations[sub_group_id].initialize()

        if not microsim:
            # run simulation up to the start_year if the back-cast option is enabled
            self.step_forward(user_inputs['start_time_step'] - first_relevant_ts)
            for simulation_obj in self.sub_simulations.values():
                simulation_obj.scale_total_populations()
Esempio n. 4
0
 def test_get_sub_population_policies(self) -> None:
     expected_list = self.policy_list[:1]
     result_list = SparkPolicy.get_sub_population_policies(
         self.policy_list, sub_population={"test1": "value"})
     self.assertEqual(expected_list, result_list)