def wrapper_distribute_and_combine_likelihood(df, base_draws_est, *args, optim_paras, options): dense_columns = create_dense_state_space_columns(optim_paras) # Duplicate the DataFrame for each type. if dense_columns: n_obs = df.shape[0] n_types = optim_paras["n_types"] # Number each state to split the shocks later. This is necessary to keep the # regression tests from failing. df["__id"] = np.arange(n_obs) # Each row of indices corresponds to a state whereas the columns refer to # different types. indices = np.arange(n_obs * n_types).reshape(n_obs, n_types) df_ = pd.concat([df.copy().assign(type=i) for i in range(n_types)]) splitted_df = _split_dataframe(df_, dense_columns) splitted_shocks = _split_shocks(base_draws_est, splitted_df, indices, optim_paras) else: splitted_df = df splitted_shocks = base_draws_est out = func(splitted_df, splitted_shocks, *args, optim_paras, options) out = pd.concat(out.values()).sort_index() if isinstance(out, dict) else out return out
def _create_conversion_dictionaries(self): """Create mappings between state space location indices and properties. See :ref:`state space location indices <state_space_location_indices>`. """ self.dense_key_to_complex = { i: k for i, k in enumerate(self.dense_period_cores) } self.dense_key_to_core_key = { i: self.dense_period_cores[self.dense_key_to_complex[i]] for i in self.dense_key_to_complex } self.dense_key_to_choice_set = { i: self.dense_key_to_complex[i][1] for i in self.dense_key_to_complex } self.dense_key_to_core_indices = { i: np.array( self.core_key_to_core_indices[self.dense_key_to_core_key[i]]) for i in self.dense_key_to_complex } self.core_key_and_dense_index_to_dense_key = Dict.empty( key_type=nb.types.UniTuple(nb.types.int64, 2), value_type=nb.types.int64, ) for i in self.dense_key_to_complex: self.core_key_and_dense_index_to_dense_key[return_core_dense_key( self.dense_key_to_core_key[i], *self.dense_key_to_complex[i][2:], )] = i if self.dense is False: self.dense_covariates_to_dense_index = {} self.dense_key_to_dense_covariates = { i: {} for i in self.dense_key_to_complex } else: n_dense = len(create_dense_state_space_columns(self.optim_paras)) self.dense_covariates_to_dense_index = Dict.empty( key_type=nb.types.UniTuple(nb.types.int64, n_dense), value_type=nb.types.int64, ) for i, k in enumerate(self.dense): self.dense_covariates_to_dense_index[k] = i self.dense_key_to_dense_covariates = { i: list(self.dense.keys())[self.dense_key_to_complex[i][2]] for i in self.dense_key_to_complex }
def wrapper_distribute_and_combine_df(df, *args, optim_paras, **kwargs): dense_columns = create_dense_state_space_columns(optim_paras) if remove_type: dense_columns.remove("type") splitted_df = _split_dataframe( df, dense_columns) if dense_columns else df out = func(splitted_df, *args, optim_paras, **kwargs) df = pd.concat(out.values()).sort_index() if isinstance( out, dict) else out return df
def _create_dense_state_space_covariates(dense_grid, optim_paras, options): if dense_grid: columns = create_dense_state_space_columns(optim_paras) df = pd.DataFrame(data=dense_grid, columns=columns).set_index(columns, drop=False) covariates = compute_covariates(df, options["covariates_dense"]) covariates = covariates.apply(downcast_to_smallest_dtype) covariates = covariates.to_dict(orient="index") covariates = convert_dictionary_keys_to_dense_indices(covariates) else: covariates = False return covariates