def _create_param_specific_objects( complex_, choice_set, optim_paras, options, dense_key_to_dense_covariates, transit_keys=None, ): """Create param specific objects. This function creates objects that are not fixed for a given model. Depending on their size they are either kept in working memory such as wages or dumped on disk such as transition probabilities! In the medium run we could also allow for fixed params here by saving values on disk directly! For objects that we store on disk we will just return the prefix of the location. """ states = load_objects("states", complex_, options) wages, nonpecs = _create_choice_rewards(states, choice_set, optim_paras) if optim_paras["exogenous_processes"]: transition_probabilities = compute_transition_probabilities( states, transit_keys, optim_paras, dense_key_to_dense_covariates) dump_objects(transition_probabilities, "transition", complex_, options) return wages, nonpecs
def draw_dense_key_next_period(complex_tuple, core_index, options): """For exogenous processes draw the dense key for next period. Parameters ---------- complex_tuple core_index options Returns ------- dense_key_next_period : pd:Series A pandas Series containing the dense keys in the next period for all keys. """ dense_key_next_period = core_index.copy(deep=True) transition_mat = load_objects("transition", complex_tuple, options) core_index_counts = core_index.value_counts() for index, count in core_index_counts.items(): draws = np.random.choice( transition_mat.columns.values, size=count, p=transition_mat.iloc[index].to_numpy(), ) dense_key_next_period.loc[core_index == index] = draws.astype(int) return dense_key_next_period
def _collect_child_indices(complex_, choice_set, indexer, optim_paras, options): """Collect child indices for states. The function takes the states of one dense key, applies the law of motion for each available choice and maps the resulting states to core keys and core indices. Parameters ---------- complex_ : tuple See :ref:`complex`. choice_set : tuple Tuple representing admissible choices indexer : numba.typed.Dict A dictionary with core states as keys and the core key and core index as values. optim_paras : dict Contains model parameters. options : dict Contains model options. Returns ------- indices : numpy.ndarray Array with shape ``(n_states, n_choices * 2)``. Represents the mapping (core_index, choice) -> (dense_key, core_index). """ core_columns = create_core_state_space_columns(optim_paras) states = load_objects("states", complex_, options) n_choices = sum(choice_set) indices = np.full((states.shape[0], n_choices, 2), -1, dtype=np.int64) indices_valid_choices = [ i for i, is_valid in enumerate(choice_set) if is_valid ] for i, choice in enumerate(indices_valid_choices): states_ = states.copy(deep=True) states_["choice"] = choice states_ = apply_law_of_motion_for_core(states_, optim_paras) states_ = states_[["period"] + core_columns] indices[:, i, 0], indices[:, i, 1] = map_states_to_core_key_and_core_index( states_.to_numpy(), indexer) return indices
def weight_continuation_values( complex_, options, continuation_values, transit_key_to_choice_set ): """Weight continuation values by their probability. We weight continuation values for a dense key according to the probablity that she could end up in of these. Exogenous processes only depend upon the state in this period and not the choice thus we can calculate the cont values symetrically across choices. Caution has to be exercised when choice sets are restricted. Another imortant point are states that can only be reached with a change of exogenous process. Returns ------- continuation_values : np.array (n_states, n_choices) with the weighted continuation values. """ transition_df = load_objects("transition", complex_, options) choice_set = complex_[1] # Reconsider this setup choice_positons = { key: _get_representation_cols(value, choice_set) for key, value in transit_key_to_choice_set.items() } continuation_values_adjusted = { future_key: continuation_values[int(future_key)][ :, list(choice_positons[int(future_key)]) ] for future_key in transition_df.columns } weighted_columns = [ transition_df[future_key].values.reshape((transition_df.shape[0], 1)) * continuation_values_adjusted[future_key] for future_key in transition_df.columns ] continuation_values = functools.reduce(np.add, weighted_columns) return continuation_values