Esempio n. 1
0
def pyth_backward_induction(num_periods, max_states_period, periods_draws_emax,
                            num_draws_emax, states_number_period,
                            periods_payoffs_systematic, edu_max, edu_start,
                            mapping_state_idx, states_all, delta, is_debug,
                            is_interpolated, num_points_interp,
                            shocks_cholesky):
    """ Backward induction procedure. There are two main threads to this
    function depending on whether interpolation is requested or not.
    """
    # Construct auxiliary objects
    shocks_cov = np.matmul(shocks_cholesky, shocks_cholesky.T)

    # Auxiliary objects. These shifts are used to determine the expected
    # values of the two labor market alternatives. These ar log normal
    # distributed and thus the draws cannot simply set to zero.
    shifts = [0.00, 0.00, 0.00, 0.00]
    shifts[0] = np.clip(np.exp(shocks_cov[0, 0] / 2.0), 0.0, HUGE_FLOAT)
    shifts[1] = np.clip(np.exp(shocks_cov[1, 1] / 2.0), 0.0, HUGE_FLOAT)

    # Initialize containers with missing values
    periods_emax = np.tile(MISSING_FLOAT, (num_periods, max_states_period))

    # Iterate backward through all periods
    for period in range(num_periods - 1, -1, -1):

        # Extract auxiliary objects
        draws_emax = periods_draws_emax[period, :, :]
        num_states = states_number_period[period]

        draws_emax_transformed = transform_disturbances(
            draws_emax, shocks_cholesky)

        record_solution_progress(4, period, num_states)

        # The number of interpolation points is the same for all periods.
        # Thus, for some periods the number of interpolation points is
        # larger than the actual number of states. In that case no
        # interpolation is needed.
        any_interpolated = (num_points_interp <=
                            num_states) and is_interpolated

        # Case distinction
        if any_interpolated:

            # Get indicator for interpolation and simulation of states
            is_simulated = get_simulated_indicator(num_points_interp,
                                                   num_states, period,
                                                   is_debug)

            # Constructing the exogenous variable for all states, including the
            # ones where simulation will take place. All information will be
            # used in either the construction of the prediction model or the
            # prediction step.
            exogenous, maxe = get_exogenous_variables(
                period, num_periods, num_states, delta,
                periods_payoffs_systematic, shifts, edu_max, edu_start,
                mapping_state_idx, periods_emax, states_all)

            # Constructing the dependent variables for at the random subset of
            # points where the EMAX is actually calculated.
            endogenous = get_endogenous_variable(
                period, num_periods, num_states, delta,
                periods_payoffs_systematic, edu_max, edu_start,
                mapping_state_idx, periods_emax, states_all, is_simulated,
                num_draws_emax, maxe, draws_emax_transformed)

            # Create prediction model based on the random subset of points where
            # the EMAX is actually simulated and thus dependent and
            # independent variables are available. For the interpolation
            # points, the actual values are used.
            predictions = get_predictions(endogenous, exogenous, maxe,
                                          is_simulated, num_points_interp,
                                          num_states, is_debug)

            # Store results
            periods_emax[period, :num_states] = predictions

        else:

            # Loop over all possible states
            for k in range(states_number_period[period]):

                # Extract payoffs
                payoffs_systematic = periods_payoffs_systematic[period, k, :]

                # Simulate the expected future value.
                emax = get_future_value(num_periods, num_draws_emax, period, k,
                                        draws_emax_transformed,
                                        payoffs_systematic, edu_max, edu_start,
                                        periods_emax, states_all,
                                        mapping_state_idx, delta)

                # Store results
                periods_emax[period, k] = emax

    # Finishing. Note that the last two return arguments are not available in
    # for periods, where interpolation is required.
    return periods_emax
Esempio n. 2
0
def pyth_simulate(periods_payoffs_systematic, mapping_state_idx, periods_emax,
                  states_all, shocks_cholesky, num_periods, edu_start, edu_max,
                  delta, num_agents_sim, periods_draws_sims, seed_sim):
    """ Wrapper for PYTHON and F2PY implementation of sample simulation.
    """

    record_simulation_start(num_agents_sim, seed_sim)

    # Standard deviates transformed to the distributions relevant for
    # the agents actual decision making as traversing the tree.
    periods_draws_sims_transformed = np.tile(np.nan,
                                             (num_periods, num_agents_sim, 4))

    for period in range(num_periods):
        periods_draws_sims_transformed[period, :, :] = transform_disturbances(
            periods_draws_sims[period, :, :], shocks_cholesky)

    # Simulate agent experiences
    count = 0

    # Initialize data
    dataset = np.tile(MISSING_FLOAT, (num_agents_sim * num_periods, 8))

    for i in range(num_agents_sim):

        current_state = states_all[0, 0, :].copy()

        dataset[count, 0] = i

        record_simulation_progress(i)

        # Iterate over each period for the agent
        for period in range(num_periods):

            # Distribute state space
            exp_a, exp_b, edu, edu_lagged = current_state

            k = mapping_state_idx[period, exp_a, exp_b, edu, edu_lagged]

            # Write agent identifier and current period to data frame
            dataset[count, :2] = i, period

            # Select relevant subset
            payoffs_systematic = periods_payoffs_systematic[period, k, :]
            draws = periods_draws_sims_transformed[period, i, :]

            # Get total value of admissible states
            total_payoffs = get_total_value(period, num_periods, delta,
                                            payoffs_systematic, draws, edu_max,
                                            edu_start, mapping_state_idx,
                                            periods_emax, k, states_all)

            # Determine optimal choice
            max_idx = np.argmax(total_payoffs)

            # Record agent decision
            dataset[count, 2] = max_idx + 1

            # Record earnings
            dataset[count, 3] = MISSING_FLOAT
            if max_idx in [0, 1]:
                dataset[count,
                        3] = payoffs_systematic[max_idx] * draws[max_idx]

            # Write relevant state space for period to data frame
            dataset[count, 4:8] = current_state

            # Special treatment for education
            dataset[count, 6] += edu_start

            # Update work experiences and education
            if max_idx == 0:
                current_state[0] += 1
            elif max_idx == 1:
                current_state[1] += 1
            elif max_idx == 2:
                current_state[2] += 1

            # Update lagged education
            current_state[3] = 0

            if max_idx == 2:
                current_state[3] = 1

            # Update row indicator
            count += 1

    record_simulation_stop()

    # Finishing
    return dataset
def pyth_simulate(
    state_space,
    num_agents_sim,
    periods_draws_sims,
    seed_sim,
    file_sim,
    edu_spec,
    optim_paras,
    is_debug,
):
    """ Wrapper for PYTHON and F2PY implementation of sample simulation.

    At the beginning, agents are initialized with zero experience in occupations and
    random values for years of education, lagged choices and types. Then, each simulated
    agent in each period is paired with its corresponding state in the state space. We
    recalculate utilities for each choice as the agents experience different shocks in
    the simulation. In the end, observed and unobserved information is recorded in the
    simulated dataset.

    Parameters
    ----------
    state_space : class
        Class of state space.
    num_agents_sim : int
        Number of simulated agents.
    periods_draws_sims : np.ndarray
        Array with shape (num_periods, num_agents_sim, num_choices)
    seed_sim : int
        Seed for the simulation.
    file_sim : ???
        Undocumented parameter.
    edu_spec : dict
        Information on education.
    optim_paras : dict
        Parameters affected by optimization.
    is_debug : bool
        Flag for debugging modus.

    Returns
    -------
    simulated_data : pd.DataFrame
        Dataset of simulated agents.

    """
    record_simulation_start(num_agents_sim, seed_sim, file_sim)

    # Standard deviates transformed to the distributions relevant for the agents actual
    # decision making as traversing the tree.
    periods_draws_sims_transformed = np.full(
        (state_space.num_periods, num_agents_sim, 4), np.nan)

    for period in range(state_space.num_periods):
        periods_draws_sims_transformed[period] = transform_disturbances(
            periods_draws_sims[period], np.zeros(4),
            optim_paras["shocks_cholesky"])

    # Get initial values of SCHOOLING, lagged choices and types for simulated agents.
    initial_education = get_random_edu_start(edu_spec, num_agents_sim,
                                             is_debug)
    initial_types = get_random_types(state_space.num_types, optim_paras,
                                     num_agents_sim, initial_education,
                                     is_debug)
    initial_choice_lagged = get_random_choice_lagged_start(
        edu_spec, num_agents_sim, initial_education, is_debug)

    # Create a matrix of initial states of simulated agents. OCCUPATION A and OCCUPATION
    # B are set to zero.
    current_states = np.column_stack((
        np.zeros((num_agents_sim, 2)),
        initial_education,
        initial_choice_lagged,
        initial_types,
    )).astype(np.uint8)

    data = []

    for period in range(state_space.num_periods):

        # Get indices which connect states in the state space and simulated agents.
        ks = state_space.indexer[np.full(num_agents_sim, period),
                                 current_states[:, 0], current_states[:, 1],
                                 current_states[:, 2],
                                 current_states[:, 3] - 1, current_states[:,
                                                                          4], ]

        # Select relevant subset of random draws.
        draws = periods_draws_sims_transformed[period]

        # Get total values and ex post rewards.
        total_values, rewards_ex_post = get_continuation_value_and_ex_post_rewards(
            state_space.rewards[ks, -2:],
            state_space.rewards[ks, :4],
            state_space.emaxs[ks, :4],
            draws.reshape(-1, 1, 4),
            optim_paras["delta"],
            state_space.states[ks, 3] >= state_space.edu_max,
        )
        total_values = total_values.reshape(-1, 4)
        rewards_ex_post = rewards_ex_post.reshape(-1, 4)

        # We need to ensure that no individual chooses an inadmissible state. This
        # cannot be done directly in the get_continuation_value function as the penalty
        # otherwise dominates the interpolation equation. The parameter
        # INADMISSIBILITY_PENALTY is a compromise. It is only relevant in very
        # constructed cases.
        total_values[:, 2] = np.where(current_states[:, 2] >= edu_spec["max"],
                                      -HUGE_FLOAT, total_values[:, 2])

        # Determine optimal choice.
        max_idx = np.argmax(total_values, axis=1)

        # Record wages. Expand matrix with NaNs for choice 2 and 3 for easier indexing.
        wages = (np.column_stack(
            (state_space.rewards[ks, -2:], np.full(
                (num_agents_sim, 2), np.nan))) * draws)
        # Do not swap np.arange with : (https://stackoverflow.com/a/46425896/7523785)!
        wage = wages[np.arange(num_agents_sim), max_idx]

        # Record data of all agents in one period.
        rows = np.column_stack((
            np.arange(num_agents_sim),
            np.full(num_agents_sim, period),
            max_idx + 1,
            wage,
            # Write relevant state space for period to data frame. However, the
            # individual's type is not part of the observed dataset. This is
            # included in the simulated dataset.
            current_states,
            # As we are working with a simulated dataset, we can also output
            # additional information that is not available in an observed dataset.
            # The discount rate is included as this allows to construct the EMAX
            # with the information provided in the simulation output.
            total_values,
            state_space.rewards[ks, :4],
            draws,
            np.full(num_agents_sim, optim_paras["delta"][0]),
            # For testing purposes, we also explicitly include the general reward
            # component, the common component, and the immediate ex post rewards.
            state_space.rewards[ks, 4:7],
            rewards_ex_post,
        ))
        data.append(rows)

        # Update work experiences or education and lagged choice for the next period.
        current_states[np.arange(num_agents_sim), max_idx] = np.where(
            max_idx <= 2,
            current_states[np.arange(num_agents_sim), max_idx] + 1,
            current_states[np.arange(num_agents_sim), max_idx],
        )
        current_states[:, 3] = max_idx + 1

    simulated_data = (pd.DataFrame(
        data=np.vstack(data),
        columns=DATA_LABELS_SIM).astype(DATA_FORMATS_SIM).sort_values(
            ["Identifier", "Period"]).reset_index(drop=True))

    for i in range(num_agents_sim):
        record_simulation_progress(i, file_sim)
    record_simulation_stop(file_sim)

    return simulated_data
def pyth_backward_induction(
    periods_draws_emax,
    state_space,
    is_debug,
    is_interpolated,
    num_points_interp,
    optim_paras,
    file_sim,
    is_write,
):
    """ Calculate utilities with backward induction.

    Parameters
    ----------
    periods_draws_emax : np.ndarray
        Array with shape (num_periods, num_draws, num_choices) containing the
        random draws used to simulate the emax.
    state_space : class
        State space object.
    is_debug : bool
        Flag for debug modus.
    is_interpolated : np.array
        Flag indicating whether interpolation is used to construct the emax in a period.
    num_points_interp : int
        Number of states for which the emax will be interpolated.
    optim_paras : dict
        Parameters affected by optimization.
    file_sim : ???
        Undocumented parameter.
    is_write : bool
        Undocumented parameter.

    Returns
    -------
    state_space : class
        State space containing the emax of the subsequent period of each choice, columns
        0-3, as well as the maximum emax of the current period for each state, column 4,
        in ``state_space.emaxs``.

    """
    state_space.emaxs = np.zeros((state_space.num_states, 5))

    # For myopic agents, utility of later periods does not play a role.
    if optim_paras["delta"] == 0:
        record_solution_progress(-2, file_sim)
        return state_space

    # Unpack arguments.
    delta = optim_paras["delta"]
    shocks_cholesky = optim_paras["shocks_cholesky"]

    shocks_cov = shocks_cholesky.dot(shocks_cholesky.T)

    # These shifts are used to determine the expected values of the two labor market
    # alternatives. These are log normal distributed and thus the draws cannot simply
    # set to zero.
    shifts = np.zeros(4)
    shifts[:2] = np.clip(np.exp(np.diag(shocks_cov)[:2] / 2.0), 0.0, HUGE_FLOAT)

    for period in reversed(range(state_space.num_periods)):

        if period == state_space.num_periods - 1:
            pass

        else:
            states_period = state_space.get_attribute_from_period("states", period)

            state_space.emaxs = get_emaxs_of_subsequent_period(
                states_period,
                state_space.indexer,
                state_space.emaxs,
                state_space.edu_max,
            )

        num_states = state_space.states_per_period[period]

        # Treatment of the disturbances for the risk-only case is straightforward. Their
        # distribution is fixed once and for all.
        draws_emax_standard = periods_draws_emax[period]
        draws_emax_risk = transform_disturbances(
            draws_emax_standard, np.zeros(4), shocks_cholesky
        )

        if is_write:
            record_solution_progress(4, file_sim, period, num_states)

        # The number of interpolation points is the same for all periods. Thus, for some
        # periods the number of interpolation points is larger than the actual number of
        # states. In that case no interpolation is needed.
        any_interpolated = (num_points_interp <= num_states) and is_interpolated

        # Unpack necessary attributes of the specific period.
        rewards_period = state_space.get_attribute_from_period("rewards", period)
        emaxs_period = state_space.get_attribute_from_period("emaxs", period)[:, :4]
        max_education = (
            state_space.get_attribute_from_period("states", period)[:, 3]
            >= state_space.edu_max
        )

        if any_interpolated:
            # Get indicator for interpolation and simulation of states
            is_simulated = get_simulated_indicator(
                num_points_interp, num_states, period, is_debug
            )

            # Constructing the exogenous variable for all states, including the ones
            # where simulation will take place. All information will be used in either
            # the construction of the prediction model or the prediction step.
            exogenous, max_emax = get_exogenous_variables(
                rewards_period, emaxs_period, shifts, delta, max_education
            )

            # Constructing the dependent variables for all states at the random subset
            # of points where the EMAX is actually calculated.
            endogenous = get_endogenous_variable(
                rewards_period,
                emaxs_period,
                max_emax,
                is_simulated,
                draws_emax_risk,
                delta,
                max_education,
            )

            # Create prediction model based on the random subset of points where the
            # EMAX is actually simulated and thus dependent and independent variables
            # are available. For the interpolation points, the actual values are used.
            emax = get_predictions(endogenous, exogenous, max_emax, is_simulated)

        else:
            emax = construct_emax_risk(
                rewards_period[:, -2:],
                rewards_period[:, :4],
                emaxs_period,
                draws_emax_risk,
                delta,
                max_education,
            )

        state_space.get_attribute_from_period("emaxs", period)[:, 4] = emax

    return state_space
Esempio n. 5
0
    def test_6(self):
        """ Further tests for the interpolation routines.
        """
        params_spec, options_spec = generate_random_model()
        respy_obj = RespyCls(params_spec, options_spec)
        respy_obj = simulate_observed(respy_obj)

        # Extract class attributes
        (
            periods_rewards_systematic,
            mapping_state_idx,
            seed_prob,
            periods_emax,
            num_periods,
            states_all,
            num_points_interp,
            edu_spec,
            num_draws_emax,
            is_myopic,
            is_debug,
            is_interpolated,
            optim_paras,
            optimizer_options,
            file_sim,
            num_types,
        ) = dist_class_attributes(
            respy_obj,
            "periods_rewards_systematic",
            "mapping_state_idx",
            "seed_prob",
            "periods_emax",
            "num_periods",
            "states_all",
            "num_points_interp",
            "edu_spec",
            "num_draws_emax",
            "is_myopic",
            "is_debug",
            "is_interpolated",
            "optim_paras",
            "optimizer_options",
            "file_sim",
            "num_types",
        )

        shocks_cholesky = optim_paras["shocks_cholesky"]
        shocks_cov = shocks_cholesky.dot(shocks_cholesky.T)
        coeffs_common = optim_paras["coeffs_common"]
        coeffs_a = optim_paras["coeffs_a"]
        coeffs_b = optim_paras["coeffs_b"]
        delta = optim_paras["delta"]

        # Add some additional objects required for the interfaces to the functions.
        period = np.random.choice(num_periods)

        periods_draws_emax = create_draws(num_periods, num_draws_emax,
                                          seed_prob, is_debug)

        draws_emax_standard = periods_draws_emax[period, :, :]

        draws_emax_risk = transform_disturbances(draws_emax_standard,
                                                 np.zeros(4), shocks_cholesky)

        # Initialize Python version and solve.
        state_space = StateSpace(num_periods, num_types, edu_spec["start"],
                                 edu_spec["max"], optim_paras)

        # Integrate periods_emax in state_space
        state_space.emaxs = np.column_stack((
            np.zeros((state_space.num_states, 4)),
            periods_emax[~np.isnan(periods_emax)
                         & (periods_emax != MISSING_FLOAT)],
        ))

        # Fill emaxs_a - emaxs_home in the requested period
        states_period = state_space.get_attribute_from_period("states", period)

        # Do not get the emaxs from the previous period if we are in the last one.
        if period != state_space.num_periods - 1:
            state_space.emaxs = get_emaxs_of_subsequent_period(
                states_period, state_space.indexer, state_space.emaxs,
                edu_spec["max"])

        num_states = state_space.states_per_period[period]

        shifts = np.random.randn(4)

        # Slight modification of request which assures that the interpolation code is
        # working.
        num_points_interp = min(num_points_interp, num_states)

        # Get the IS_SIMULATED indicator for the subset of points which are used for the
        # predication model.
        is_simulated = get_simulated_indicator(num_points_interp, num_states,
                                               period, is_debug)

        # Unpack necessary attributes
        rewards_period = state_space.get_attribute_from_period(
            "rewards", period)
        emaxs_period = state_space.get_attribute_from_period("emaxs",
                                                             period)[:, :4]
        max_education = (state_space.get_attribute_from_period(
            "states", period)[:, 3] >= edu_spec["max"])

        # Construct the exogenous variables for all points of the state space.
        exogenous, max_emax = get_exogenous_variables(rewards_period,
                                                      emaxs_period, shifts,
                                                      optim_paras["delta"],
                                                      max_education)

        # Align output between Python and Fortran version.
        py = (exogenous, max_emax)

        f90 = fort_debug.wrapper_get_exogenous_variables(
            period,
            num_periods,
            num_states,
            periods_rewards_systematic,
            shifts,
            mapping_state_idx,
            periods_emax,
            states_all,
            edu_spec["start"],
            edu_spec["max"],
            delta,
            coeffs_common,
            coeffs_a,
            coeffs_b,
            num_types,
        )

        assert_almost_equal(py[0], f90[0])
        assert_almost_equal(py[1], f90[1])

        # Construct endogenous variable so that the prediction model can be fitted.
        endogenous = get_endogenous_variable(
            rewards_period,
            emaxs_period,
            max_emax,
            is_simulated,
            draws_emax_risk,
            optim_paras["delta"],
            max_education,
        )

        f90 = fort_debug.wrapper_get_endogenous_variable(
            period,
            num_periods,
            num_states,
            periods_rewards_systematic,
            mapping_state_idx,
            periods_emax,
            states_all,
            is_simulated,
            num_draws_emax,
            max_emax,
            draws_emax_risk,
            edu_spec["start"],
            edu_spec["max"],
            shocks_cov,
            delta,
            coeffs_common,
            coeffs_a,
            coeffs_b,
        )
        assert_almost_equal(endogenous, replace_missing_values(f90))

        py = get_predictions(endogenous, exogenous, max_emax, is_simulated)

        f90 = fort_debug.wrapper_get_predictions(
            endogenous,
            exogenous,
            max_emax,
            is_simulated,
            num_points_interp,
            num_states,
            file_sim,
            False,
        )

        # This assertion fails if a column is all zeros.
        if not exogenous.any(axis=0).any():
            assert_array_almost_equal(py, f90)
Esempio n. 6
0
    def test_1(self):
        """ Compare the evaluation of the criterion function for the ambiguity
        optimization and the simulated expected future value between the FORTRAN and
        PYTHON implementations. These tests are set up a separate test case due to the
        large setup cost to construct the ingredients for the interface.
        """
        # Generate constraint periods
        constr = {"program": {"version": "python"}}
        # Generate random initialization file
        params_spec, options_spec = generate_random_model(point_constr=constr)
        respy_obj = RespyCls(params_spec, options_spec)
        respy_obj = simulate_observed(respy_obj)

        # Extract class attributes
        (
            state_space,
            states_all,
            mapping_state_idx,
            periods_rewards_systematic,
            periods_emax,
            num_periods,
            num_draws_emax,
            edu_spec,
            optim_paras,
            num_types,
        ) = dist_class_attributes(
            respy_obj,
            "state_space",
            "states_all",
            "mapping_state_idx",
            "periods_rewards_systematic",
            "periods_emax",
            "num_periods",
            "num_draws_emax",
            "edu_spec",
            "optim_paras",
            "num_types",
        )

        # Sample draws
        draws_emax_standard = np.random.multivariate_normal(
            np.zeros(4), np.identity(4), num_draws_emax)
        draws_emax_risk = transform_disturbances(
            draws_emax_standard, np.zeros(4), optim_paras["shocks_cholesky"])

        # Sampling of random period and admissible state index
        period = np.random.choice(range(num_periods))
        k = np.random.choice(range(state_space.states_per_period[period]))

        # Select systematic rewards
        rewards_systematic = periods_rewards_systematic[period, k, :]

        # Evaluation of simulated expected future values. Limit to one individual as the
        # Fortran version.
        rewards_period = state_space.get_attribute_from_period(
            "rewards", period)[k]
        emaxs_period = state_space.get_attribute_from_period("emaxs",
                                                             period)[k, :4]
        max_education_period = (state_space.get_attribute_from_period(
            "states", period)[k, 3] >= edu_spec["max"])

        py = construct_emax_risk(
            rewards_period[-2:],
            rewards_period[:4],
            emaxs_period,
            draws_emax_risk,
            optim_paras["delta"],
            max_education_period,
        )

        f90 = fort_debug.wrapper_construct_emax_risk(
            num_periods,
            num_draws_emax,
            period,
            k,
            draws_emax_risk,
            rewards_systematic,
            periods_emax,
            states_all,
            mapping_state_idx,
            edu_spec["start"],
            edu_spec["max"],
            optim_paras["delta"],
            optim_paras["coeffs_common"],
            optim_paras["coeffs_a"],
            optim_paras["coeffs_b"],
            num_types,
        )

        assert_allclose(py, f90)