def pyth_criterion( x, is_interpolated, num_points_interp, is_debug, data, tau, periods_draws_emax, periods_draws_prob, state_space, ): """Criterion function for the likelihood maximization.""" optim_paras = distribute_parameters(x, is_debug) # Calculate all systematic rewards state_space.update_systematic_rewards(optim_paras) state_space = pyth_backward_induction( periods_draws_emax, state_space, is_debug, is_interpolated, num_points_interp, optim_paras, "", False, ) contribs = pyth_contributions(state_space, data, periods_draws_prob, tau, optim_paras) crit_val = get_log_likl(contribs) return crit_val
def pyth_solve( is_interpolated, num_points_interp, num_periods, is_debug, periods_draws_emax, edu_spec, optim_paras, file_sim, num_types, ): """Solve the model. This function is a wrapper around state space creation and determining the optimal decision in each state by backward induction. Parameters ---------- is_interpolated : bool Indicator for whether the expected maximum utility should be interpolated. num_points_interp : int Number of points used for the interpolation. num_periods : int Number of periods. is_debug : bool Flag for debugging. periods_draws_emax : np.ndarray Array with shape (num_periods, num_draws, num_choices) containing draws for the Monte Carlo simulation of expected maximum utility. edu_spec : dict Information on education. optim_paras : dict Parameters affected by optimization. file_sim : ??? Undocumented parameter. num_types : int Number of types. """ record_solution_progress(1, file_sim) # Create the state space state_space = StateSpace(num_periods, num_types, edu_spec["start"], edu_spec["max"], optim_paras) record_solution_progress(-1, file_sim) record_solution_progress(2, file_sim) record_solution_progress(-1, file_sim) # Backward iteration procedure. There is a PYTHON and FORTRAN # implementation available. If agents are myopic, the backward induction # procedure is not called upon. record_solution_progress(3, file_sim) state_space = pyth_backward_induction( periods_draws_emax, state_space, is_debug, is_interpolated, num_points_interp, optim_paras, file_sim, True, ) if optim_paras["delta"]: record_solution_progress(-1, file_sim) return state_space
def pyth_solve(coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky, is_interpolated, num_draws_emax, num_periods, num_points_interp, is_myopic, edu_start, is_debug, edu_max, min_idx, delta, periods_draws_emax): """ Solving the model using pure PYTHON code. """ # Creating the state space of the model and collect the results in the # package class. record_solution_progress(1) # Create state space states_all, states_number_period, mapping_state_idx, max_states_period = \ pyth_create_state_space(num_periods, edu_start, edu_max, min_idx) # Cutting to size states_all = states_all[:, :max(states_number_period), :] record_solution_progress(-1) # Calculate systematic payoffs which are later used in the backward # induction procedure. These are calculated without any reference # to the alternative shock distributions. record_solution_progress(2) # Calculate all systematic payoffs periods_payoffs_systematic = pyth_calculate_payoffs_systematic( num_periods, states_number_period, states_all, edu_start, coeffs_a, coeffs_b, coeffs_edu, coeffs_home, max_states_period) record_solution_progress(-1) # Backward iteration procedure. There is a PYTHON and FORTRAN # implementation available. If agents are myopic, the backward induction # procedure is not called upon. record_solution_progress(3) # Initialize containers, which contain a lot of missing values as we # capture the tree structure in arrays of fixed dimension. i, j = num_periods, max_states_period periods_emax = np.tile(MISSING_FLOAT, (i, j)) if is_myopic: record_solution_progress(-2) # All other objects remain set to MISSING_FLOAT. This align the # treatment for the two special cases: (1) is_myopic and (2) # is_interpolated. for period, num_states in enumerate(states_number_period): periods_emax[period, :num_states] = 0.0 else: periods_emax = pyth_backward_induction( num_periods, max_states_period, periods_draws_emax, num_draws_emax, states_number_period, periods_payoffs_systematic, edu_max, edu_start, mapping_state_idx, states_all, delta, is_debug, is_interpolated, num_points_interp, shocks_cholesky) record_solution_progress(-1) # Collect return arguments in tuple args = (periods_payoffs_systematic, states_number_period, mapping_state_idx, periods_emax, states_all) # Finishing return args
def test_4(self): """ Testing the core functions of the solution step for the equality of results between the PYTHON and FORTRAN implementations. """ params_spec, options_spec = generate_random_model() respy_obj = RespyCls(params_spec, options_spec) # Ensure that backward induction routines use the same grid for the # interpolation. write_interpolation_grid(respy_obj) # Extract class attributes ( num_periods, edu_spec, optim_paras, num_draws_emax, seed_emax, is_debug, is_interpolated, num_points_interp, optimizer_options, file_sim, num_types, ) = dist_class_attributes( respy_obj, "num_periods", "edu_spec", "optim_paras", "num_draws_emax", "seed_emax", "is_debug", "is_interpolated", "num_points_interp", "optimizer_options", "file_sim", "num_types", ) shocks_cholesky = optim_paras["shocks_cholesky"] coeffs_common = optim_paras["coeffs_common"] coeffs_home = optim_paras["coeffs_home"] coeffs_edu = optim_paras["coeffs_edu"] coeffs_a = optim_paras["coeffs_a"] coeffs_b = optim_paras["coeffs_b"] delta = optim_paras["delta"] type_spec_shifts = optim_paras["type_shifts"] type_spec_shares = optim_paras["type_shares"] min_idx = edu_spec["max"] + 1 # Check the state space creation. state_space = StateSpace(num_periods, num_types, edu_spec["start"], edu_spec["max"], optim_paras) states_all, mapping_state_idx, _, _ = state_space._get_fortran_counterparts( ) pyth = ( states_all, state_space.states_per_period, mapping_state_idx, state_space.states_per_period.max(), ) f2py = fort_debug.wrapper_create_state_space(num_periods, num_types, edu_spec["start"], edu_spec["max"], min_idx) for i in range(4): # Slice Fortran output to shape of Python output. if isinstance(f2py[i], np.ndarray): f2py_reduced = f2py[i][tuple(map(slice, pyth[i].shape))] else: f2py_reduced = f2py[i] assert_allclose(pyth[i], f2py_reduced) _, _, pyth, _ = state_space._get_fortran_counterparts() f2py = fort_debug.wrapper_calculate_rewards_systematic( num_periods, state_space.states_per_period, states_all, state_space.states_per_period.max(), coeffs_common, coeffs_a, coeffs_b, coeffs_edu, coeffs_home, type_spec_shares, type_spec_shifts, ) assert_allclose(pyth, f2py) # Carry some results from the systematic rewards calculation for future use and # create the required set of disturbances. periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug) # Save result for next test. periods_rewards_systematic = pyth.copy() # Fix for hardcoded myopic agents. optim_paras["delta"] = 0.00000000000000001 # Check backward induction procedure. state_space = pyth_backward_induction( periods_draws_emax, state_space, is_debug, is_interpolated, num_points_interp, optim_paras, file_sim, False, ) _, _, _, pyth = state_space._get_fortran_counterparts() f2py = fort_debug.wrapper_backward_induction( num_periods, False, state_space.states_per_period.max(), periods_draws_emax, num_draws_emax, state_space.states_per_period, periods_rewards_systematic, mapping_state_idx, states_all, is_debug, is_interpolated, num_points_interp, edu_spec["start"], edu_spec["max"], shocks_cholesky, delta, coeffs_common, coeffs_a, coeffs_b, file_sim, False, ) assert_allclose(pyth, f2py)
def test_4(self): """ Testing the core functions of the solution step for the equality of results between the PYTHON and FORTRAN implementations. """ # Generate random initialization file generate_init() # Perform toolbox actions respy_obj = RespyCls('test.respy.ini') # Ensure that backward induction routines use the same grid for the # interpolation. write_interpolation_grid('test.respy.ini') # Extract class attributes num_periods, edu_start, edu_max, min_idx, model_paras, num_draws_emax, \ seed_emax, is_debug, delta, is_interpolated, num_points_interp, = \ dist_class_attributes(respy_obj, 'num_periods', 'edu_start', 'edu_max', 'min_idx', 'model_paras', 'num_draws_emax', 'seed_emax', 'is_debug', 'delta', 'is_interpolated', 'num_points_interp') # Auxiliary objects coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky = \ dist_model_paras(model_paras, is_debug) # Check the state space creation. args = (num_periods, edu_start, edu_max, min_idx) pyth = pyth_create_state_space(*args) f2py = fort_debug.f2py_create_state_space(*args) for i in range(4): np.testing.assert_allclose(pyth[i], f2py[i]) # Carry some results from the state space creation for future use. states_all, states_number_period = pyth[:2] mapping_state_idx, max_states_period = pyth[2:] # Cutting to size states_all = states_all[:, :max(states_number_period), :] # Check calculation of systematic components of payoffs. args = (num_periods, states_number_period, states_all, edu_start, coeffs_a, coeffs_b, coeffs_edu, coeffs_home, max_states_period) pyth = pyth_calculate_payoffs_systematic(*args) f2py = fort_debug.f2py_calculate_payoffs_systematic(*args) np.testing.assert_allclose(pyth, f2py) # Carry some results from the systematic payoff calculation for # future use and create the required set of disturbances. periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug) periods_payoffs_systematic = pyth # Check backward induction procedure. args = (num_periods, max_states_period, periods_draws_emax, num_draws_emax, states_number_period, periods_payoffs_systematic, edu_max, edu_start, mapping_state_idx, states_all, delta, is_debug, is_interpolated, num_points_interp, shocks_cholesky) pyth = pyth_backward_induction(*args) f2py = fort_debug.f2py_backward_induction(*args) np.testing.assert_allclose(pyth, f2py)
def pyth_solve(coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky, is_interpolated, num_draws_emax, num_periods, num_points_interp, is_myopic, edu_start, is_debug, edu_max, min_idx, delta, periods_draws_emax): """ Solving the model using pure PYTHON code. """ # Creating the state space of the model and collect the results in the # package class. record_solution_progress(1) # Create state space states_all, states_number_period, mapping_state_idx, max_states_period = \ pyth_create_state_space(num_periods, edu_start, edu_max, min_idx) # Cutting to size states_all = states_all[:, :max(states_number_period), :] record_solution_progress(-1) # Calculate systematic payoffs which are later used in the backward # induction procedure. These are calculated without any reference # to the alternative shock distributions. record_solution_progress(2) # Calculate all systematic payoffs periods_payoffs_systematic = pyth_calculate_payoffs_systematic(num_periods, states_number_period, states_all, edu_start, coeffs_a, coeffs_b, coeffs_edu, coeffs_home, max_states_period) record_solution_progress(-1) # Backward iteration procedure. There is a PYTHON and FORTRAN # implementation available. If agents are myopic, the backward induction # procedure is not called upon. record_solution_progress(3) # Initialize containers, which contain a lot of missing values as we # capture the tree structure in arrays of fixed dimension. i, j = num_periods, max_states_period periods_emax = np.tile(MISSING_FLOAT, (i, j)) if is_myopic: record_solution_progress(-2) # All other objects remain set to MISSING_FLOAT. This align the # treatment for the two special cases: (1) is_myopic and (2) # is_interpolated. for period, num_states in enumerate(states_number_period): periods_emax[period, :num_states] = 0.0 else: periods_emax = pyth_backward_induction(num_periods, max_states_period, periods_draws_emax, num_draws_emax, states_number_period, periods_payoffs_systematic, edu_max, edu_start, mapping_state_idx, states_all, delta, is_debug, is_interpolated, num_points_interp, shocks_cholesky) record_solution_progress(-1) # Collect return arguments in tuple args = (periods_payoffs_systematic, states_number_period, mapping_state_idx, periods_emax, states_all) # Finishing return args