Beispiel #1
0
    def test_results_independent_of_data_order(self):

        compartment_policies = [
            SparkPolicy(
                policy_fn=CompartmentTransitions.test_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                apply_retroactive=True,
            ),
            SparkPolicy(
                policy_fn=CompartmentTransitions.test_non_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                apply_retroactive=False,
            ),
        ]
        compartment_transitions_default = CompartmentTransitions(
            self.test_data)
        compartment_transitions_shuffled = CompartmentTransitions(
            self.test_data.sample(frac=1))

        compartment_transitions_default.initialize(compartment_policies)

        compartment_transitions_shuffled.initialize(compartment_policies)

        self.assertEqual(compartment_transitions_default,
                         compartment_transitions_shuffled)
Beispiel #2
0
    def test_results_independent_of_data_order(self) -> None:

        compartment_policies = [
            SparkPolicy(
                policy_fn=TransitionTable.test_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=True,
            ),
            SparkPolicy(
                policy_fn=TransitionTable.test_non_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=False,
            ),
        ]
        transition_table_default = TransitionTable(
            5,
            compartment_policies,
            self.prev_table.get_table(TransitionTableType.AFTER),
        )
        transition_table_shuffled = TransitionTable(
            5,
            compartment_policies,
            self.prev_table.get_table(TransitionTableType.AFTER).sample(
                frac=1, axis=1),
        )

        self.assertEqual(transition_table_default, transition_table_shuffled)
Beispiel #3
0
    def test_preserve_normalized_outflow_behavior_preserves_normalized_outflow_behavior(
        self, ):
        compartment_policies = [
            SparkPolicy(
                policy_fn=CompartmentTransitions.test_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                apply_retroactive=True,
            ),
            SparkPolicy(
                policy_fn=partial(
                    CompartmentTransitions.
                    preserve_normalized_outflow_behavior,
                    outflows=["prison"],
                    state="after_retroactive",
                    before_state="before",
                ),
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                apply_retroactive=True,
            ),
        ]

        compartment_transitions = CompartmentTransitions(self.test_data)
        compartment_transitions.initialize(compartment_policies)

        baseline_transitions = CompartmentTransitions(self.test_data)
        baseline_transitions.initialize([])

        assert_series_equal(
            baseline_transitions.transition_dfs["after_retroactive"]["prison"],
            compartment_transitions.transition_dfs["after_retroactive"]
            ["prison"],
        )
    def test_results_independent_of_data_order(self):

        compartment_policies = [
            SparkPolicy(
                policy_fn=CompartmentTransitionsStub.test_retroactive_policy,
                sub_population={'compartment': 'test_compartment'},
                spark_compartment='jail',
                apply_retroactive=True),
            SparkPolicy(policy_fn=CompartmentTransitionsStub.
                        test_non_retroactive_policy,
                        sub_population={'compartment': 'test_compartment'},
                        spark_compartment='jail',
                        apply_retroactive=False),
        ]
        compartment_transitions_default = CompartmentTransitionsStub(
            self.test_data)
        compartment_transitions_shuffled = CompartmentTransitionsStub(
            self.test_data.sample(frac=1))

        compartment_transitions_default.initialize_transition_table()
        compartment_transitions_default.initialize(compartment_policies)

        compartment_transitions_shuffled.initialize_transition_table()
        compartment_transitions_shuffled.initialize(compartment_policies)

        self.assertEqual(compartment_transitions_default,
                         compartment_transitions_shuffled)
Beispiel #5
0
 def setUp(self):
     self.policy_list = [
         SparkPolicy(policy_fn=TestSparkPolicy.dummy_policy_method,
                     spark_compartment='prison',
                     sub_population={'test1': 'value'},
                     apply_retroactive=True),
         SparkPolicy(policy_fn=TestSparkPolicy.dummy_policy_method,
                     spark_compartment='jail',
                     sub_population={'test2': 'value'},
                     apply_retroactive=True)
     ]
Beispiel #6
0
    def test_apply_reduction_with_trivial_reductions_doesnt_change_transition_table(
        self, ) -> None:

        policy_mul = partial(
            TransitionTable.apply_reduction,
            reduction_df=pd.DataFrame({
                "outflow": ["prison"] * 2,
                "affected_fraction": [0, 0.5],
                "reduction_size": [0.5, 0],
            }),
            reduction_type="*",
            retroactive=False,
        )

        policy_add = partial(
            TransitionTable.apply_reduction,
            reduction_df=pd.DataFrame({
                "outflow": ["prison"] * 2,
                "affected_fraction": [0, 0.5],
                "reduction_size": [0.5, 0],
            }),
            reduction_type="+",
            retroactive=False,
        )

        compartment_policies = [
            SparkPolicy(
                policy_mul,
                "test_compartment",
                {"sub_group": "test_population"},
                5,
                False,
            ),
            SparkPolicy(
                policy_add,
                "test_compartment",
                {"sub_group": "test_population"},
                5,
                False,
            ),
        ]

        transition_table = TransitionTable(
            5,
            compartment_policies,
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        assert_frame_equal(
            transition_table.previous_table,
            transition_table.transition_dfs[TransitionTableType.AFTER],
        )
 def setUp(self):
     self.policy_list = [
         SparkPolicy(
             policy_fn=TestSparkPolicy.dummy_policy_method,
             spark_compartment="prison",
             sub_population={"test1": "value"},
             apply_retroactive=True,
         ),
         SparkPolicy(
             policy_fn=TestSparkPolicy.dummy_policy_method,
             spark_compartment="jail",
             sub_population={"test2": "value"},
             apply_retroactive=True,
         ),
     ]
Beispiel #8
0
    def test_reallocate_outflow_preserves_total_population(self) -> None:
        compartment_policies = [
            SparkPolicy(
                policy_fn=partial(
                    TransitionTable.reallocate_outflow,
                    reallocation_df=pd.DataFrame({
                        "outflow": ["jail", "jail"],
                        "affected_fraction": [0.25, 0.25],
                        "new_outflow": ["prison", "treatment"],
                    }),
                    reallocation_type="+",
                    retroactive=True,
                ),
                sub_population={"sub_group": "test_population"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=True,
            )
        ]

        transition_table = TransitionTable(
            5,
            compartment_policies,
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        if transition_table.previous_table is None:
            raise ValueError("previous table is not populated")

        assert_series_equal(
            transition_table.transition_dfs[TransitionTableType.BEFORE].sum(
                axis=1),
            transition_table.previous_table.sum(axis=1),
        )
Beispiel #9
0
    def test_reallocate_outflow_preserves_total_population(self):
        compartment_policies = [
            SparkPolicy(
                policy_fn=partial(
                    CompartmentTransitions.reallocate_outflow,
                    reallocation_df=pd.DataFrame({
                        "outflow": ["jail", "jail"],
                        "affected_fraction": [0.25, 0.25],
                        "new_outflow": ["prison", "treatment"],
                    }),
                    reallocation_type="+",
                    retroactive=True,
                ),
                sub_population={"sub_group": "test_population"},
                spark_compartment="test_compartment",
                apply_retroactive=True,
            )
        ]

        compartment_transitions = CompartmentTransitions(self.test_data)
        compartment_transitions.initialize(compartment_policies)

        assert_series_equal(
            compartment_transitions.transition_dfs["before"].sum(axis=1),
            compartment_transitions.transition_dfs["after_retroactive"].sum(
                axis=1),
        )
Beispiel #10
0
    def test_identity_policies_dont_change_probabilities(self) -> None:
        """Make sure splitting transitions into identical TransitionTables doesn't change behavior"""

        identity_functions = [
            SparkPolicy(
                policy_fn=partial(
                    TransitionTable.use_alternate_transitions_data,
                    alternate_historical_transitions=self.test_data,
                    retroactive=False,
                ),
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                policy_ts=i,
                apply_retroactive=False,
            ) for i in range(5)
        ]

        transitions = CompartmentTransitions(self.test_data)
        transitions.initialize_transition_tables([])

        split_transitions = CompartmentTransitions(self.test_data)
        split_transitions.initialize_transition_tables(identity_functions)

        for ts in range(-3, 8):
            assert_frame_equal(
                transitions.get_per_ts_transition_table(ts),
                split_transitions.get_per_ts_transition_table(ts),
            )
Beispiel #11
0
    def test_alternate_transitions_data_equal_to_differently_instantiated_transition_table(
        self, ):
        alternate_data = self.test_data.copy()
        alternate_data.compartment_duration *= 2
        alternate_data.total_population = 10 - alternate_data.total_population

        policy_function = SparkPolicy(
            policy_fn=partial(
                CompartmentTransitions.use_alternate_transitions_data,
                alternate_historical_transitions=alternate_data,
                retroactive=False,
            ),
            spark_compartment="test_compartment",
            sub_population={"sub_group": "test_population"},
            apply_retroactive=False,
        )

        compartment_transitions = CompartmentTransitions(self.test_data)
        compartment_transitions.initialize([policy_function])

        alternate_data_transitions = CompartmentTransitions(alternate_data)
        alternate_data_transitions.initialize([])

        assert_frame_equal(
            compartment_transitions.transition_dfs["after_non_retroactive"],
            alternate_data_transitions.transition_dfs["after_non_retroactive"],
        )
    def initialize_transition_tables(self, policy_list: List[SparkPolicy]) -> None:
        """Populate the 'before' transition table and initializes the max_sentence from historical data """
        self.transition_tables[MIN_POSSIBLE_POLICY_TS] = TransitionTable(
            MIN_POSSIBLE_POLICY_TS, []
        )
        self.transition_tables[MIN_POSSIBLE_POLICY_TS].generate_transition_table(
            TransitionTableType.AFTER, self.historical_outflows
        )

        policy_time_steps = list({policy.policy_ts for policy in policy_list})

        if (
            len(policy_time_steps) > 0
            and min(policy_time_steps) <= MIN_POSSIBLE_POLICY_TS
        ):
            raise ValueError(
                f"Policy ts exceeds minimum allowable value ({MIN_POSSIBLE_POLICY_TS}): {min(policy_time_steps)}"
            )

        policy_time_steps.append(MIN_POSSIBLE_POLICY_TS)
        policy_time_steps.sort()

        for ts_idx in range(1, len(policy_time_steps)):
            self.transition_tables[policy_time_steps[ts_idx]] = TransitionTable(
                policy_time_steps[ts_idx],
                SparkPolicy.get_ts_policies(policy_list, policy_time_steps[ts_idx]),
                self.transition_tables[policy_time_steps[ts_idx - 1]].get_table(
                    TransitionTableType.AFTER
                ),
            )

        # normalize all tables
        for transition_table in self.transition_tables.values():
            transition_table.normalize_transitions()
Beispiel #13
0
    def test_microsimulation_can_initialize_with_policy_list(self) -> None:
        """Run a policy scenario with a microsimulation to make sure it doesn't break along the way."""
        policy_list = [
            SparkPolicy(
                TransitionTable.test_non_retroactive_policy,
                "supervision",
                {"crime": "NAR"},
                self.user_inputs["start_time_step"] + 2,
            )
        ]
        policy_sim = PopulationSimulationFactory.build_population_simulation(
            self.test_outflows_data,
            self.test_transitions_data,
            self.test_total_population_data,
            self.simulation_architecture,
            ["crime"],
            self.user_inputs,
            policy_list,
            -5,
            self.test_transitions_data,
            True,
            False,
            None,
        )

        policy_sim.simulate_policies()
Beispiel #14
0
    def test_preserve_normalized_outflow_behavior_preserves_normalized_outflow_behavior(
        self, ) -> None:
        compartment_policies = [
            SparkPolicy(
                policy_fn=TransitionTable.test_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=True,
            ),
            SparkPolicy(
                policy_fn=partial(
                    TransitionTable.preserve_normalized_outflow_behavior,
                    outflows=["prison"],
                    state=TransitionTableType.BEFORE,
                ),
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=True,
            ),
        ]

        transition_table = TransitionTable(
            5,
            compartment_policies,
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        baseline_transitions = TransitionTable(
            5,
            [],
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        transition_table.normalize_transitions()
        baseline_transitions.normalize_transitions()

        assert_series_equal(
            baseline_transitions.transition_dfs[
                TransitionTableType.BEFORE]["prison"],
            transition_table.transition_dfs[TransitionTableType.BEFORE]
            ["prison"],
        )
Beispiel #15
0
    def test_use_alternate_data_equal_to_differently_instantiated_shell_compartment(
        self, ) -> None:
        alternate_outflow_data = pd.DataFrame({
            "total_population": [40, 21, 25, 30],
            "outflow_to": ["supervision", "prison", "supervision", "prison"],
            "compartment": ["pretrial"] * 4,
            "time_step": [1, 1, 2, 2],
        })
        preprocessed_alternate_outflows = alternate_outflow_data.groupby(
            ["compartment", "outflow_to",
             "time_step"])["total_population"].sum()
        # shadows logic in SubSimulationFactory._load_data()
        preprocessed_alternate_outflows = (
            preprocessed_alternate_outflows.unstack(
                level=["outflow_to", "time_step"]).stack(
                    level="outflow_to",
                    dropna=False).loc["pretrial"].fillna(0))

        use_alternate_outflows = SparkPolicy(
            partial(
                ShellCompartment.use_alternate_outflows_data,
                alternate_outflows_data=alternate_outflow_data,
                tag="pretrial",
            ),
            spark_compartment="pretrial",
            sub_population={"subgroup": "test"},
            policy_ts=self.policy_ts,
            apply_retroactive=False,
        )

        policy_shell_compartment = ShellCompartment(
            self.test_outflow_data,
            starting_ts=self.starting_ts,
            tag="pretrial",
            constant_admissions=True,
            policy_list=[use_alternate_outflows],
        )

        alternate_shell_compartment = ShellCompartment(
            preprocessed_alternate_outflows,
            starting_ts=self.starting_ts,
            tag="pretrial",
            constant_admissions=True,
            policy_list=[],
        )

        self.assertEqual(
            policy_shell_compartment.admissions_predictors[max(
                policy_shell_compartment.admissions_predictors)],
            alternate_shell_compartment.admissions_predictors[max(
                alternate_shell_compartment.admissions_predictors)],
        )
    def test_non_retroactive_policy_cannot_affect_retroactive_table(self):
        compartment_policies = [
            SparkPolicy(
                policy_fn=CompartmentTransitionsStub.test_retroactive_policy,
                sub_population={'compartment': 'test_compartment'},
                spark_compartment='jail',
                apply_retroactive=False)
        ]

        compartment_transitions = CompartmentTransitionsStub(self.test_data)
        compartment_transitions.initialize_transition_table()
        with self.assertRaises(ValueError):
            compartment_transitions.initialize(compartment_policies)
Beispiel #17
0
    def test_apply_reduction_with_trivial_reductions_doesnt_change_transition_table(
        self, ):

        policy_mul = partial(
            CompartmentTransitions.apply_reduction,
            reduction_df=pd.DataFrame({
                "outflow": ["prison"] * 2,
                "affected_fraction": [0, 0.5],
                "reduction_size": [0.5, 0],
            }),
            reduction_type="*",
            retroactive=False,
        )

        policy_add = partial(
            CompartmentTransitions.apply_reduction,
            reduction_df=pd.DataFrame({
                "outflow": ["prison"] * 2,
                "affected_fraction": [0, 0.5],
                "reduction_size": [0.5, 0],
            }),
            reduction_type="+",
            retroactive=False,
        )

        compartment_policies = [
            SparkPolicy(policy_mul, "test_compartment",
                        {"sub_group": "test_population"}, False),
            SparkPolicy(policy_add, "test_compartment",
                        {"sub_group": "test_population"}, False),
        ]

        compartment_transitions = CompartmentTransitions(self.test_data)
        compartment_transitions.initialize(compartment_policies)

        assert_frame_equal(
            compartment_transitions.transition_dfs["before"],
            compartment_transitions.transition_dfs["after_non_retroactive"],
        )
Beispiel #18
0
    def test_non_retroactive_policy_cannot_affect_retroactive_table(self):
        compartment_policies = [
            SparkPolicy(
                policy_fn=CompartmentTransitions.test_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                apply_retroactive=False,
            )
        ]

        compartment_transitions = CompartmentTransitions(self.test_data)
        with self.assertRaises(ValueError):
            compartment_transitions.initialize(compartment_policies)
Beispiel #19
0
    def initialize(self):
        """Initialize the transition tables, and then the compartments, for the SubSimulation"""
        # TODO(#4512): allow sparse data

        if not self.microsim and not self.total_population_data.empty:
            if self.user_inputs['start_time_step'] not in self.total_population_data.time_step.values:
                raise ValueError(f"Start time must be included in population data input\n"
                                 f"Expected: {self.user_inputs['start_time_step']}, "
                                 f"Actual: {self.total_population_data.time_step.values}")

        # Initialize a default transition class for each compartment to represent the no-policy scenario
        transitions_per_compartment = {}
        for compartment in self.simulation_architecture:
            transition_type = self.simulation_architecture[compartment]
            compartment_duration_data = self.transitions_data[self.transitions_data['compartment'] == compartment]

            if compartment_duration_data.empty:
                if transition_type is not None:
                    raise ValueError(f"Transition data missing for compartment {compartment}. Data is required for all "
                                     "disaggregtion axes. Even the 'release' compartment needs transition data even if "
                                     "it's just outflow to 'release'")
            else:
                if transition_type == 'incarcerated':
                    transition_class = IncarceratedTransitions(compartment_duration_data)
                elif transition_type == 'released':
                    transition_class = ReleasedTransitions(compartment_duration_data)
                else:
                    raise ValueError(f'unrecognized transition table type {transition_type}')

                transition_class.initialize_transition_table()
                transitions_per_compartment[compartment] = transition_class

        # Create a transition object for each compartment and year with policies applied and store shell policies
        shell_policies = dict()
        for compartment in self.simulation_architecture:
            # Select any policies that are applicable for this compartment
            compartment_policies = SparkPolicy.get_compartment_policies(self.policy_list, compartment)

            # add to the dict compartment -> transition class with policies applied
            if compartment in transitions_per_compartment:
                transitions_per_compartment[compartment].initialize(compartment_policies)

            # add shell policies to dict that gets passed to initialization
            else:
                shell_policies[compartment] = compartment_policies

        # Preprocess the historical data into separate pieces per compartment
        historical_outflows = self._load_data()

        # Initialize the compartment classes
        self._initialize_compartments(historical_outflows, transitions_per_compartment, shell_policies)
Beispiel #20
0
    def test_chop_technicals_chops_correctly(self) -> None:
        """
        Make sure CompartmentTransitions.chop_technical_revocations zeros technicals after the correct duration and
            that table sums to the same amount (i.e. total population shifted but not removed)
        """
        compartment_policies = [
            SparkPolicy(
                policy_fn=partial(
                    TransitionTable.chop_technical_revocations,
                    technical_outflow="prison",
                    release_outflow="jail",
                    retroactive=False,
                ),
                sub_population={"sub_group": "test_population"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=False,
            )
        ]

        transition_table = TransitionTable(
            5,
            compartment_policies,
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        baseline_transitions = TransitionTable(
            5,
            [],
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        transition_table.normalize_transitions()
        baseline_transitions.normalize_transitions()

        # check total population was preserved
        assert_series_equal(
            transition_table.transition_dfs[TransitionTableType.AFTER].iloc[0],
            baseline_transitions.transition_dfs[
                TransitionTableType.AFTER].iloc[0],
        )

        # check technicals chopped
        transition_table.unnormalize_table(TransitionTableType.AFTER)
        self.assertTrue((transition_table.transition_dfs[
            TransitionTableType.AFTER].loc[3:, "prison"] == 0).all())
        self.assertTrue(transition_table.transition_dfs[
            TransitionTableType.AFTER].loc[1, "prison"] != 0)
 def test_microsim_requires_empty_policy_list(self):
     population_simulation = PopulationSimulation()
     with self.assertRaises(ValueError):
         user_inputs = deepcopy(self.user_inputs)
         user_inputs['policy_list'] = [
             SparkPolicy(
                 IncarceratedTransitions.test_non_retroactive_policy,
                 'supervision', {'crime': 'NAR'})
         ]
         population_simulation.simulate_policies(
             outflows_data=self.test_outflows_data,
             transitions_data=self.test_transitions_data,
             total_population_data=self.test_total_population_data,
             simulation_compartments=self.simulation_architecture,
             disaggregation_axes=['crime'],
             user_inputs=user_inputs,
             microsim=True,
             microsim_data=self.test_outflows_data)
Beispiel #22
0
    def test_non_retroactive_policy_cannot_affect_retroactive_table(
            self) -> None:
        compartment_policies = [
            SparkPolicy(
                policy_fn=TransitionTable.test_retroactive_policy,
                sub_population={"compartment": "test_compartment"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=False,
            )
        ]

        with self.assertRaises(ValueError):
            TransitionTable(
                5,
                compartment_policies,
                self.prev_table.get_table(TransitionTableType.AFTER),
            )
Beispiel #23
0
    def test_apply_reduction_matches_example_by_hand(self) -> None:
        compartment_policy = [
            SparkPolicy(
                policy_fn=partial(
                    TransitionTable.apply_reduction,
                    reduction_df=pd.DataFrame({
                        "outflow": ["prison"],
                        "affected_fraction": [0.25],
                        "reduction_size": [0.5],
                    }),
                    reduction_type="+",
                    retroactive=True,
                ),
                sub_population={"sub_group": "test_population"},
                spark_compartment="test_compartment",
                policy_ts=5,
                apply_retroactive=True,
            )
        ]

        transition_table = TransitionTable(
            5,
            compartment_policy,
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        expected_result = pd.DataFrame(
            {
                "jail": [4, 2, 0, 0, 0, 0, 0, 0, 0, 0],
                "prison": [2, 0.5, 3.5, 0, 0, 0, 0, 0, 0.375, 2.625],
            },
            index=range(1, 11),
            dtype=float,
        )
        expected_result.index.name = "compartment_duration"
        expected_result.columns.name = "outflow_to"

        assert_frame_equal(
            round(transition_table.transition_dfs[TransitionTableType.BEFORE],
                  SIG_FIGS),
            round(expected_result, SIG_FIGS),
        )
Beispiel #24
0
    def test_chop_technicals_chops_correctly(self):
        """
        Make sure CompartmentTransitions.chop_technical_revocations zeros technicals after the correct duration and
            that table sums to the same amount (i.e. total population shifted but not removed)
        """
        compartment_policies = [
            SparkPolicy(
                policy_fn=partial(
                    CompartmentTransitions.chop_technical_revocations,
                    technical_outflow="prison",
                    release_outflow="jail",
                    retroactive=False,
                ),
                sub_population={"sub_group": "test_population"},
                spark_compartment="test_compartment",
                apply_retroactive=False,
            )
        ]

        compartment_transitions = CompartmentTransitions(self.test_data)
        compartment_transitions.initialize(compartment_policies)

        baseline_transitions = CompartmentTransitions(self.test_data)
        baseline_transitions.initialize([])

        # check total population was preserved
        assert_series_equal(
            compartment_transitions.transition_dfs["after_non_retroactive"].
            iloc[0],
            baseline_transitions.transition_dfs["after_non_retroactive"].
            iloc[0],
        )

        # check technicals chopped
        compartment_transitions.unnormalize_table("after_non_retroactive")
        self.assertTrue(
            (compartment_transitions.transition_dfs["after_non_retroactive"].
             loc[3:, "prison"] == 0).all())
        self.assertTrue(
            compartment_transitions.transition_dfs["after_non_retroactive"].
            loc[1, "prison"] != 0)
Beispiel #25
0
    def test_apply_reduction_matches_example_by_hand(self):
        compartment_transitions = CompartmentTransitions(self.test_data)
        compartment_policy = [
            SparkPolicy(
                policy_fn=partial(
                    CompartmentTransitions.apply_reduction,
                    reduction_df=pd.DataFrame({
                        "outflow": ["prison"],
                        "affected_fraction": [0.25],
                        "reduction_size": [0.5],
                    }),
                    reduction_type="+",
                    retroactive=True,
                ),
                sub_population={"sub_group": "test_population"},
                spark_compartment="test_compartment",
                apply_retroactive=True,
            )
        ]

        expected_result = pd.DataFrame(
            {
                "jail": [4, 2, 0, 0, 0, 0, 0, 0, 0, 0],
                "prison": [2, 0.5, 3.5, 0, 0, 0, 0, 0, 0.375, 2.625],
            },
            index=range(1, 11),
            dtype=float,
        )
        expected_result.index.name = "compartment_duration"
        expected_result.columns.name = "outflow_to"
        expected_result /= expected_result.sum().sum()

        compartment_transitions.initialize(compartment_policy)
        compartment_transitions.unnormalize_table("after_retroactive")
        assert_frame_equal(
            round(compartment_transitions.transition_dfs["after_retroactive"],
                  8),
            round(expected_result, 8),
        )
 def test_microsim_requires_empty_policy_list(self):
     with self.assertRaises(ValueError):
         policy_list = [
             SparkPolicy(
                 CompartmentTransitions.test_non_retroactive_policy,
                 "supervision",
                 {"crime": "NAR"},
             )
         ]
         _ = PopulationSimulationFactory.build_population_simulation(
             self.test_outflows_data,
             self.test_transitions_data,
             self.test_total_population_data,
             self.simulation_architecture,
             ["crime"],
             self.user_inputs,
             policy_list,
             -5,
             self.test_outflows_data,
             True,
             False,
         )
Beispiel #27
0
    def test_alternate_transitions_data_equal_to_differently_instantiated_transition_table(
        self, ) -> None:
        alternate_data = self.test_data.copy()
        alternate_data.compartment_duration *= 2
        alternate_data.total_population = 10 - alternate_data.total_population

        policy_function = SparkPolicy(
            policy_fn=partial(
                TransitionTable.use_alternate_transitions_data,
                alternate_historical_transitions=alternate_data,
                retroactive=False,
            ),
            spark_compartment="test_compartment",
            sub_population={"sub_group": "test_population"},
            policy_ts=5,
            apply_retroactive=False,
        )

        transition_table = TransitionTable(
            5,
            [policy_function],
            self.prev_table.get_table(TransitionTableType.AFTER),
        )

        alternate_prev_table = TransitionTable(-9999, [])
        alternate_prev_table.generate_transition_table(
            TransitionTableType.AFTER, alternate_data)

        alternate_data_table = TransitionTable(
            5,
            [],
            alternate_prev_table.get_table(TransitionTableType.AFTER),
        )

        assert_frame_equal(
            transition_table.transition_dfs[TransitionTableType.AFTER],
            alternate_data_table.transition_dfs[TransitionTableType.AFTER],
        )
    def _initialize_admissions_predictors(
        self, constant_admissions: bool, projection_type: Optional[str]
    ) -> None:
        """Generate the dictionary of one admission predictor per policy time step that defines outflow behaviors"""
        policy_time_steps = list({policy.policy_ts for policy in self.policy_list})

        if (
            len(policy_time_steps) > 0
            and min(policy_time_steps) <= MIN_POSSIBLE_POLICY_TS
        ):
            raise ValueError(
                f"Policy ts exceeds minimum allowable value ({MIN_POSSIBLE_POLICY_TS}): {min(policy_time_steps)}"
            )

        # TODO(#6727): inheritance -> composition for SparkCompartment so this reused logic is cleaned up

        policy_time_steps.append(MIN_POSSIBLE_POLICY_TS)
        policy_time_steps.sort()

        self.policy_data[MIN_POSSIBLE_POLICY_TS] = self.outflows_data

        # first pass transforms outflows data according to policies
        for ts_idx in range(1, len(policy_time_steps)):
            # start with a copy of the data from the previous policy data
            ts_data = self.policy_data[policy_time_steps[ts_idx - 1]].copy()

            for policy in SparkPolicy.get_ts_policies(
                self.policy_list, policy_time_steps[ts_idx]
            ):
                ts_data = policy.policy_fn(ts_data)

            self.policy_data[policy_time_steps[ts_idx]] = ts_data

        # second pass creates admissions predictors from transformed outflows data
        for ts, ts_data in self.policy_data.items():
            self.admissions_predictors[ts] = PredictedAdmissions(
                ts_data, constant_admissions, projection_type
            )
    def test_cost_multipliers_multiplicative(self):
        # test doubling multiplier doubles costs
        policy_function = partial(
            CompartmentTransitions.apply_reduction,
            reduction_df=pd.DataFrame({
                "outflow": ["RELEASE"],
                "reduction_size": [0.5],
                "affected_fraction": [0.75],
            }),
            reduction_type="*",
            retroactive=True,
        )
        cost_multipliers = pd.DataFrame({
            "crime_type": ["NONVIOLENT", "VIOLENT"],
            "multiplier": [2, 2]
        })

        policy_list = [
            SparkPolicy(
                policy_fn=policy_function,
                spark_compartment="PRISON",
                sub_population={"crime_type": crime_type},
                apply_retroactive=True,
            ) for crime_type in ["NONVIOLENT", "VIOLENT"]
        ]
        self.macrosim.simulate_policy(policy_list, "PRISON")

        outputs = self.macrosim.upload_simulation_results_to_bq("test")
        spending_diff, spending_diff_non_cumulative = (
            outputs["spending_diff"],
            outputs["spending_diff_non_cumulative"],
        )
        outputs_scaled = self.macrosim.upload_simulation_results_to_bq(
            "test", cost_multipliers)
        spending_diff_scaled, spending_diff_non_cumulative_scaled = (
            outputs_scaled["spending_diff"],
            outputs_scaled["spending_diff_non_cumulative"],
        )

        assert_frame_equal(spending_diff * 2, spending_diff_scaled)
        assert_frame_equal(spending_diff_non_cumulative * 2,
                           spending_diff_non_cumulative_scaled)

        # same test but for only one subgroup
        partial_cost_multipliers_double = pd.DataFrame({
            "crime_type": ["NONVIOLENT"],
            "multiplier": [2]
        })
        partial_cost_multipliers_triple = pd.DataFrame({
            "crime_type": ["NONVIOLENT"],
            "multiplier": [3]
        })
        outputs_doubled = self.macrosim.upload_simulation_results_to_bq(
            "test", partial_cost_multipliers_double)
        spending_diff_double, spending_diff_non_cumulative_double = (
            outputs_doubled["spending_diff"],
            outputs_doubled["spending_diff_non_cumulative"],
        )

        outputs_tripled = self.macrosim.upload_simulation_results_to_bq(
            "test", partial_cost_multipliers_triple)
        spending_diff_triple, spending_diff_non_cumulative_triple = (
            outputs_tripled["spending_diff"],
            outputs_tripled["spending_diff_non_cumulative"],
        )

        assert_frame_equal(
            (spending_diff_triple - spending_diff),
            (spending_diff_double - spending_diff) * 2,
        )
        assert_frame_equal(
            (spending_diff_non_cumulative_triple -
             spending_diff_non_cumulative),
            (spending_diff_non_cumulative_double -
             spending_diff_non_cumulative) * 2,
        )
Beispiel #30
0
 def test_get_compartment_policies(self):
     expected_list = self.policy_list[1:]
     result_list = SparkPolicy.get_compartment_policies(
         self.policy_list, spark_compartment='jail')
     self.assertEqual(expected_list, result_list)