Exemple #1
0
def pyth_criterion(
    x,
    is_interpolated,
    num_points_interp,
    is_debug,
    data,
    tau,
    periods_draws_emax,
    periods_draws_prob,
    state_space,
):
    """Criterion function for the likelihood maximization."""
    optim_paras = distribute_parameters(x, is_debug)

    # Calculate all systematic rewards
    state_space.update_systematic_rewards(optim_paras)

    state_space = pyth_backward_induction(
        periods_draws_emax,
        state_space,
        is_debug,
        is_interpolated,
        num_points_interp,
        optim_paras,
        "",
        False,
    )

    contribs = pyth_contributions(state_space, data, periods_draws_prob, tau,
                                  optim_paras)

    crit_val = get_log_likl(contribs)

    return crit_val
Exemple #2
0
    def update_optim_paras(self, x_econ):
        """Update model parameters."""
        x_econ = copy.deepcopy(x_econ)

        self.reset()

        new_paras_dict = distribute_parameters(
            paras_vec=x_econ, is_debug=True, paras_type="econ"
        )
        self.attr["optim_paras"].update(new_paras_dict)
Exemple #3
0
def scripts_update(init_file):
    """ Update model parametrization in initialization file.
    """
    # Collect baseline update
    init_dict = read_init_file(init_file)

    paras_steps = get_est_info()["paras_step"]

    # While sometimes useful, we cannot use this script if there are missing values in
    # the parameters due to too large values.
    if "---" in paras_steps.tolist():
        raise UserError("Missing values in est.respy.info")

    # We need to make sure that the size of the parameter vector does fit the
    # initialization file. For example, this might not be the case when the number of
    # types is changed in the initialization file and an update is requested with an
    # earlier logfile.
    num_types, num_paras = (
        len(init_dict["TYPE SHARES"]["coeffs"]) / 2 + 1,
        len(paras_steps),
    )
    if num_paras != 53 + (num_types - 1) * 6:
        raise UserError("Info does not fit the current model specification")

    optim_paras = distribute_parameters(paras_steps, True)
    shocks_coeffs = paras_steps[43:53]

    # Update initialization dictionary
    init_dict["COMMON"]["coeffs"] = optim_paras["coeffs_common"]
    init_dict["OCCUPATION A"]["coeffs"] = optim_paras["coeffs_a"]
    init_dict["OCCUPATION B"]["coeffs"] = optim_paras["coeffs_b"]
    init_dict["EDUCATION"]["coeffs"] = optim_paras["coeffs_edu"]
    init_dict["HOME"]["coeffs"] = optim_paras["coeffs_home"]
    init_dict["BASICS"]["coeffs"] = optim_paras["delta"]
    init_dict["SHOCKS"]["coeffs"] = shocks_coeffs
    init_dict["TYPE SHARES"]["coeffs"] = optim_paras["type_shares"][2:]
    init_dict["TYPE SHIFTS"]["coeffs"] = optim_paras["type_shifts"].flatten(
    )[4:]

    # We first print to an intermediate file as otherwise the original file is lost in
    # case a problem during printing occurs.
    write_init_file(init_dict, ".model.respy.ini")
    shutil.move(".model.respy.ini", init_file)
Exemple #4
0
    def test_1(self):
        """ Testing whether back-and-forth transformation have no effect.
        """
        for _ in range(10):
            num_types = np.random.randint(1, 5)
            num_paras = 53 + (num_types - 1) * 6

            # Create random parameter vector
            base = np.random.uniform(size=num_paras)

            x = base.copy()

            # Apply numerous transformations
            for _ in range(10):
                optim_paras = distribute_parameters(x, is_debug=True)
                args = (optim_paras, num_paras, "all", True)
                x = get_optim_paras(*args)

            np.testing.assert_allclose(base, x)
def record_estimation_eval(opt_obj, fval, x_optim_all_unscaled, start):
    """Log the progress of an estimation.

    This function contains two parts as two files provide information about the
    progress.

    """
    # Distribute class attributes
    paras_fixed = opt_obj.paras_fixed
    num_paras = opt_obj.num_paras
    num_types = opt_obj.num_types

    shocks_cholesky, _ = extract_cholesky(x_optim_all_unscaled, 0)
    shocks_coeffs = cholesky_to_coeffs(shocks_cholesky)

    # Identify events
    is_start = opt_obj.num_eval == 0
    is_step = opt_obj.crit_vals[1] > fval
    x_optim_shares = x_optim_all_unscaled[53:53 + (num_types - 1) * 2]

    for i in range(3):
        if i == 0 and not is_start:
            continue

        if i == 1:
            if not is_step:
                continue
            else:
                opt_obj.num_step += 1

        if i == 2:
            opt_obj.num_eval += 1

        opt_obj.crit_vals[i] = fval
        opt_obj.x_optim_container[:, i] = x_optim_all_unscaled
        opt_obj.x_econ_container[:43, i] = x_optim_all_unscaled[:43]
        opt_obj.x_econ_container[43:53, i] = shocks_coeffs
        opt_obj.x_econ_container[53:53 + (num_types - 1) * 2,
                                 i] = x_optim_shares
        opt_obj.x_econ_container[53 + (num_types - 1) * 2:num_paras,
                                 i] = x_optim_all_unscaled[53 +
                                                           (num_types - 1) *
                                                           2:]

    x_optim_container = opt_obj.x_optim_container
    x_econ_container = opt_obj.x_econ_container

    # Now we turn to est.respy.info
    with open("est.respy.log", "a") as out_file:
        fmt_ = " {0:>4}{1:>13}" + " " * 10 + "{2:>4}{3:>10}\n\n"
        line = ["EVAL", opt_obj.num_eval, "STEP", opt_obj.num_step]
        out_file.write(fmt_.format(*line))
        fmt_ = "   {0:<9}     {1:>25}\n"
        out_file.write(fmt_.format(*["Date", time.strftime("%d/%m/%Y")]))
        fmt_ = "   {0:<9}     {1:>25}\n"
        out_file.write(fmt_.format(*["Time", time.strftime("%H:%M:%S")]))

        fmt_ = "   {:<9} " + "    {:>25}\n\n"
        duration = int((datetime.now() - start).total_seconds())
        out_file.write(fmt_.format(*["Duration", duration]))

        fmt_ = "   {0:>9}     {1:>25}\n"
        out_file.write(fmt_.format(*["Criterion", char_floats(fval)[0]]))

        out_file.write("\n")
        fmt_ = "{:>13}    " + "{:>25}    " * 3
        out_file.write(
            fmt_.format(*["Identifier", "Start", "Step", "Current"]))
        out_file.write("\n\n")

        # Formatting for the file
        fmt_ = "   {:>10}" + "    {:>25}" * 3
        for i in range(num_paras):
            if paras_fixed[i]:
                continue
            line = [i] + char_floats(x_optim_container[i, :])
            out_file.write(fmt_.format(*line).rstrip(" ") + "\n")
        out_file.write("\n")

        # Get information on the spectral condition number of the covariance
        # matrix of the shock distribution.
        cond = []
        for i in range(3):
            shocks_cholesky = distribute_parameters(
                x_econ_container[:, i], paras_type="econ")["shocks_cholesky"]
            shocks_cov = shocks_cholesky.dot(shocks_cholesky.T)
            cond += [np.log(_spectral_condition_number(shocks_cov))]
        fmt_ = "   {:>9} " + "    {:25.15f}" * 3 + "\n"
        out_file.write(fmt_.format(*["Condition"] + cond))

        out_file.write("\n")

        # Record warnings
        value_current = opt_obj.crit_vals[2]
        value_start = opt_obj.crit_vals[0]

        is_large = [False, False, False]
        is_large[0] = abs(value_start) > LARGE_FLOAT
        is_large[1] = abs(opt_obj.crit_vals[1]) > LARGE_FLOAT
        is_large[2] = abs(value_current) > LARGE_FLOAT

        for i in range(3):
            if is_large[i]:
                record_warning(i + 1)

    write_est_info(
        opt_obj.crit_vals[0],
        x_econ_container[:, 0],
        opt_obj.num_step,
        opt_obj.crit_vals[1],
        x_econ_container[:, 1],
        opt_obj.num_eval,
        opt_obj.crit_vals[2],
        x_econ_container[:, 2],
        num_paras,
    )
Exemple #6
0
def _create_attribute_dictionary(params_spec, options_spec):
    attr = {
        "edu_max":
        int(options_spec["edu_spec"]["max"]),
        "file_est":
        str(options_spec["estimation"]["file"]),
        "file_sim":
        str(options_spec["simulation"]["file"]),
        "is_debug":
        bool(options_spec["program"]["debug"]),
        "is_interpolated":
        bool(options_spec["interpolation"]["flag"]),
        "is_store":
        bool(options_spec["solution"]["store"]),
        "maxfun":
        int(options_spec["estimation"]["maxfun"]),
        "num_agents_est":
        int(options_spec["estimation"]["agents"]),
        "num_agents_sim":
        int(options_spec["simulation"]["agents"]),
        "num_draws_emax":
        int(options_spec["solution"]["draws"]),
        "num_draws_prob":
        int(options_spec["estimation"]["draws"]),
        "num_points_interp":
        int(options_spec["interpolation"]["points"]),
        "num_procs":
        int(options_spec["program"]["procs"]),
        "num_threads":
        int(options_spec["program"]["threads"]),
        "num_types":
        int(_get_num_types(params_spec)),
        "optim_paras":
        distribute_parameters(params_spec["para"].to_numpy(), is_debug=True),
        "optimizer_used":
        str(options_spec["estimation"]["optimizer"]),
        # make type conversions here
        "precond_spec":
        options_spec["preconditioning"],
        "seed_emax":
        int(options_spec["solution"]["seed"]),
        "seed_prob":
        int(options_spec["estimation"]["seed"]),
        "seed_sim":
        int(options_spec["simulation"]["seed"]),
        "tau":
        float(options_spec["estimation"]["tau"]),
        "version":
        str(options_spec["program"]["version"]),
        "derivatives":
        str(options_spec["derivatives"]),
        # to-do: add type conversions and checks for edu spec
        "edu_spec":
        options_spec["edu_spec"],
        "num_periods":
        int(options_spec["num_periods"]),
        "num_paras":
        len(params_spec),
    }

    # todo: add assert statements for bounds
    bounds = []
    for coeff in params_spec.index:
        bound = []
        for bounds_type in ["lower", "upper"]:
            if pd.isnull(params_spec.loc[coeff, bounds_type]):
                bound.append(None)
            else:
                bound.append(float(params_spec.loc[coeff, bounds_type]))
        bounds.append(bound)

    attr["optim_paras"]["paras_bounds"] = bounds
    attr["optim_paras"]["paras_fixed"] = (
        params_spec["fixed"].astype(bool).to_numpy().tolist())

    optimizers = [
        "FORT-NEWUOA",
        "FORT-BFGS",
        "FORT-BOBYQA",
        "SCIPY-BFGS",
        "SCIPY-POWELL",
        "SCIPY-LBFGSB",
    ]
    attr["optimizer_options"] = {}
    for opt in optimizers:
        attr["optimizer_options"][opt] = options_spec[opt]

    attr["is_myopic"] = params_spec.loc[("delta", "delta"), "para"] == 0.0

    return attr