def initialize_transition_tables(self, policy_list: List[SparkPolicy]) -> None:
        """Populate the 'before' transition table and initializes the max_sentence from historical data """
        self.transition_tables[MIN_POSSIBLE_POLICY_TS] = TransitionTable(
            MIN_POSSIBLE_POLICY_TS, []
        )
        self.transition_tables[MIN_POSSIBLE_POLICY_TS].generate_transition_table(
            TransitionTableType.AFTER, self.historical_outflows
        )

        policy_time_steps = list({policy.policy_ts for policy in policy_list})

        if (
            len(policy_time_steps) > 0
            and min(policy_time_steps) <= MIN_POSSIBLE_POLICY_TS
        ):
            raise ValueError(
                f"Policy ts exceeds minimum allowable value ({MIN_POSSIBLE_POLICY_TS}): {min(policy_time_steps)}"
            )

        policy_time_steps.append(MIN_POSSIBLE_POLICY_TS)
        policy_time_steps.sort()

        for ts_idx in range(1, len(policy_time_steps)):
            self.transition_tables[policy_time_steps[ts_idx]] = TransitionTable(
                policy_time_steps[ts_idx],
                SparkPolicy.get_ts_policies(policy_list, policy_time_steps[ts_idx]),
                self.transition_tables[policy_time_steps[ts_idx - 1]].get_table(
                    TransitionTableType.AFTER
                ),
            )

        # normalize all tables
        for transition_table in self.transition_tables.values():
            transition_table.normalize_transitions()
    def _initialize_admissions_predictors(
        self, constant_admissions: bool, projection_type: Optional[str]
    ) -> None:
        """Generate the dictionary of one admission predictor per policy time step that defines outflow behaviors"""
        policy_time_steps = list({policy.policy_ts for policy in self.policy_list})

        if (
            len(policy_time_steps) > 0
            and min(policy_time_steps) <= MIN_POSSIBLE_POLICY_TS
        ):
            raise ValueError(
                f"Policy ts exceeds minimum allowable value ({MIN_POSSIBLE_POLICY_TS}): {min(policy_time_steps)}"
            )

        # TODO(#6727): inheritance -> composition for SparkCompartment so this reused logic is cleaned up

        policy_time_steps.append(MIN_POSSIBLE_POLICY_TS)
        policy_time_steps.sort()

        self.policy_data[MIN_POSSIBLE_POLICY_TS] = self.outflows_data

        # first pass transforms outflows data according to policies
        for ts_idx in range(1, len(policy_time_steps)):
            # start with a copy of the data from the previous policy data
            ts_data = self.policy_data[policy_time_steps[ts_idx - 1]].copy()

            for policy in SparkPolicy.get_ts_policies(
                self.policy_list, policy_time_steps[ts_idx]
            ):
                ts_data = policy.policy_fn(ts_data)

            self.policy_data[policy_time_steps[ts_idx]] = ts_data

        # second pass creates admissions predictors from transformed outflows data
        for ts, ts_data in self.policy_data.items():
            self.admissions_predictors[ts] = PredictedAdmissions(
                ts_data, constant_admissions, projection_type
            )
 def test_get_ts_policies(self) -> None:
     expected_list = self.policy_list[1:]
     result_list = SparkPolicy.get_ts_policies(self.policy_list,
                                               time_step=1)
     self.assertEqual(expected_list, result_list)