コード例 #1
0
def test_equality_for_myopic_agents_and_tiny_delta():
    """Test equality of simulated data and likelihood with myopia and tiny delta."""
    # Get simulated data and likelihood for myopic model.
    params, options = generate_random_model(myopic=True)

    simulate = rp.get_simulate_func(params, options)
    df = simulate(params)

    log_like = get_log_like_func(params, options, df)
    likelihood = log_like(params)

    # Get simulated data and likelihood for model with tiny delta.
    params.loc["delta", "value"] = 1e-12

    df_ = simulate(params)

    log_like = rp.get_log_like_func(params, options, df_)
    likelihood_ = log_like(params)

    # The continuation values are different because for delta = 0 the backward induction
    # is completely skipped and all continuation values are set to zero whereas for a
    # tiny delta, the delta ensures that continuation have no impact.
    columns = df.filter(like="Continu").columns.tolist()
    pd.testing.assert_frame_equal(df.drop(columns=columns),
                                  df_.drop(columns=columns))

    np.testing.assert_almost_equal(likelihood, likelihood_, decimal=12)
コード例 #2
0
def main():
    """Evaluate the criterion function multiple times for a scalability report.

    The criterion function is evaluated ``maxfun``-times. The number of threads used is
    limited by environment variables. **respy** has to be imported after the environment
    variables are set as Numpy, Numba and others load them at import time.

    """
    model = sys.argv[1]
    maxfun = int(sys.argv[2])
    n_threads = int(sys.argv[3])

    # Validate input.
    assert maxfun >= 0, "Maximum number of function evaluations cannot be negative."
    assert n_threads >= 1 or n_threads == -1, (
        "Use -1 to impose no restrictions on maximum number of threads or choose a "
        "number higher than zero.")

    # Set number of threads
    os.environ["NUMBA_NUM_THREADS"] = f"{n_threads}"
    os.environ["MKL_NUM_THREADS"] = f"{n_threads}"
    os.environ["OMP_NUM_THREADS"] = f"{n_threads}"
    os.environ["NUMEXPR_NUM_THREADS"] = f"{n_threads}"

    # Late import of respy to ensure that environment variables are read by Numpy, etc..
    import respy as rp

    # Get model
    params, options = rp.get_example_model(model, with_data=False)

    # Simulate the data
    simulate = rp.get_simulate_func(params, options)
    df = simulate(params)

    # Get the criterion function and the parameter vector.
    crit_func = rp.get_log_like_func(params, options, df)

    # Run the estimation
    start = dt.datetime.now()

    for _ in range(maxfun):
        crit_func(params)

    end = dt.datetime.now()

    # Aggregate information
    output = {
        "model": model,
        "maxfun": maxfun,
        "n_threads": n_threads,
        "start": str(start),
        "end": str(end),
        "duration": str(end - start),
    }

    # Save time to file
    with open("scalability_results.txt", "a+") as file:
        file.write(json.dumps(output))
        file.write("\n")
def run_bootstrap(df, params, options, constr, num_boots, is_perturb=False):
    """Run bootstrap."""
    boot_params = pd.DataFrame(index=params.index)

    for iter_ in range(num_boots):

        np.random.seed(iter_)

        boot_df = get_bootstrap_sample(df, seed=iter_)

        # Set up starting values
        params_start = params.copy()

        if is_perturb:
            for index in params.index:
                lower, upper = params_start.loc[index, ["lower", "upper"]]
                params_start.loc[index,
                                 "value"] = np.random.uniform(lower, upper)

            for dict_ in constr:
                try:
                    stat = params.loc[(dict_["loc"]), "value"].values
                except:  # noqa: E722
                    stat = params.loc[(dict_["loc"]), "value"]
                params_start.loc[(dict_["loc"]), "value"] = stat

        crit_func = rp.get_log_like_func(params, options, boot_df)

        results, params_rslt = maximize(
            crit_func,
            params_start,
            "nlopt_bobyqa",
            algo_options={"maxeval": 100},
            constraints=constr,
        )

        boot_params[f"bootstrap_{iter_}"] = params_rslt["value"]

    return boot_params
コード例 #4
0
for num_agents in GRID_AGENTS:

    options = options_base.copy()

    options["estimation_draws"] = num_draws
    options["solution_draws"] = num_draws

    for num_draws in GRID_DRAWS:

        simulate = rp.get_simulate_func(params_base, options)
        df = simulate(params_base)

        for tau in GRID_TAU:

            options["estimation_tau"] = tau
            options["simulation_agents"] = num_agents

            crit_func = rp.get_log_like_func(params_base, options, df)
            grid = np.concatenate((np.linspace(0.948, 0.952,
                                               40), [delta_true]))

            fvals = []
            for value in grid:
                params = params_base.copy()
                params.loc[("delta", "delta"), "value"] = value
                fvals.append(crit_func(params))

            rslts.loc[(num_agents, num_draws, tau),
                      "delta"] = grid[fvals.index(max(fvals))]
            rslts.to_pickle("tuning.delta.pkl")
コード例 #5
0
ファイル: workflow.py プロジェクト: jkoenig97/ekw-pres
import respy as rp
from estimagic import maximize

# obtain model input
params, options, df = rp.get_example_model("kw_97_extended_respy")

# process model specification
log_like = rp.get_log_like_func(params, options, df)
simulate = rp.get_simulate_func(params, options)

# perform calibration
results, params_rslt = maximize(log_like, params, "nlopt_bobyqa")

# conduct analysis
df_rslt = simulate(params_rslt)