コード例 #1
0
def test_robustness_1():
    np.random.seed(5470)
    true_params = get_random_params(3)
    start_params = get_random_params(3)
    bounds = tuple(true_params[["lower", "upper"]].to_numpy().T)

    exog, endog = _simulate_sample(NUM_AGENTS, true_params, 0.5)
    objective = functools.partial(_nonlinear_criterion, endog, exog)
    results = minimize_pounders_np(objective, start_params["value"].to_numpy(), bounds)

    np.testing.assert_array_almost_equal(
        true_params["value"], results["x"], decimal=0.1
    )
コード例 #2
0
def test_box_constr():
    np.random.seed(5472)
    true_params = get_random_params(2, 0.3, 0.4, 0, 0.3)
    bounds = tuple(true_params[["lower", "upper"]].to_numpy().T)

    start_params = true_params.copy()
    start_params["value"] = get_random_params(2, 0.1, 0.2)["value"]

    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)
    objective = functools.partial(_ols_criterion, endog, exog)
    calculated = minimize_pounders_np(
        objective, start_params["value"].to_numpy(), bounds
    )
    assert 0 <= calculated["x"][0] <= 0.3
    assert 0 <= calculated["x"][1] <= 0.3
コード例 #3
0
def test_robustness_2():
    np.random.seed(5471)
    true_params = get_random_params(2)
    start_params = get_random_params(2)
    bounds = tuple(true_params[["lower", "upper"]].to_numpy().T)

    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)
    objective = functools.partial(_ols_criterion, endog, exog)
    results = minimize_pounders_np(objective, start_params["value"].to_numpy(), bounds)
    calculated = results["x"]

    x = np.column_stack([np.ones_like(exog), exog])
    y = endog.reshape(len(endog), 1)
    expected = np.linalg.lstsq(x, y, rcond=None)[0].flatten()

    np.testing.assert_almost_equal(calculated, expected, decimal=6)
コード例 #4
0
def test_max_iters():
    np.random.seed(5473)
    true_params = get_random_params(2, 0.3, 0.4, 0, 0.3)
    start_params = get_random_params(2, 0.1, 0.2)
    bounds = tuple(true_params[["lower", "upper"]].to_numpy().T)

    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)
    objective = functools.partial(_ols_criterion, endog, exog)
    calculated = minimize_pounders_np(
        objective, start_params["value"].to_numpy(), bounds, max_iterations=25
    )

    assert (
        calculated["conv"] == "user defined" or calculated["conv"] == "step size small"
    )
    if calculated["conv"] == 8:
        assert calculated["sol"][0] == 25
コード例 #5
0
def test_tol():
    np.random.seed(5477)
    true_params = get_random_params(2, 0.3, 0.4, 0, 0.3)
    start_params = get_random_params(2, 0.1, 0.2)
    bounds = tuple(true_params[["lower", "upper"]].to_numpy().T)

    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)
    objective = functools.partial(_ols_criterion, endog, exog)
    calculated = minimize_pounders_np(
        objective,
        start_params["value"].to_numpy(),
        bounds=bounds,
        gatol=1e-7,
        grtol=1e-7,
        gttol=1e-9,
    )

    if calculated["conv"] == 3:
        assert calculated["sol"][2] < 0.00000001
    elif calculated["conv"] == 4:
        assert calculated["sol"][2] / calculated["sol"][1] < 0.00000001
コード例 #6
0
def test_gatol():
    np.random.seed(5475)
    true_params = get_random_params(2, 0.3, 0.4, 0, 0.3)
    start_params = get_random_params(2, 0.1, 0.2)
    bounds = tuple(true_params[["lower", "upper"]].to_numpy().T)

    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)
    objective = functools.partial(_ols_criterion, endog, exog)
    calculated = minimize_pounders_np(
        objective,
        start_params["value"].to_numpy(),
        bounds=bounds,
        grtol=False,
        gttol=False,
    )

    assert (
        calculated["conv"] == "gatol below critical value"
        or calculated["conv"] == "step size small"
    )
    if calculated["conv"] == 3:
        assert calculated["sol"][2] < 1e-4
コード例 #7
0
def _internal_minimize(
    criterion,
    criterion_kwargs,
    params,
    internal_params,
    constraints,
    algorithm,
    algo_options,
    gradient,
    gradient_options,
    general_options,
    database,
    queue,
    fitness_factor,
):
    """Create the internal criterion function and minimize it.

    Args:
        criterion (function):
            Python function that takes a pandas DataFrame with parameters as the first
            argument and returns a scalar floating point value.

        criterion_kwargs (dict):
            additional keyword arguments for criterion

        params (pd.DataFrame):
            See :ref:`params`.

        internal_params (DataFrame):
            See :ref:`params`.

        constraints (list):
            list with constraint dictionaries. See for details.

        algorithm (str):
            specifies the optimization algorithm. See :ref:`list_of_algorithms`.

        algo_options (dict):
            algorithm specific configurations for the optimization

        gradient (callable or None):
            Gradient function.

        gradient_options (dict):
            Options for the gradient function.

        general_options (dict):
            additional configurations for the optimization

        database (sqlalchemy.sql.schema.MetaData). The engine that connects to the
            database can be accessed via ``database.bind``.

        queue (Queue):
            queue to which the fitness evaluations and params DataFrames are supplied.

        fitness_factor (float):
            multiplicative factor for the fitness displayed in the dashboard.
            Set to -1 for maximizations to plot the fitness that is being maximized.

    """
    logging_decorator = functools.partial(
        log_evaluation,
        database=database,
        tables=["params_history", "criterion_history", "comparison_plot"],
    )

    exception_decorator = functools.partial(
        handle_exceptions,
        database=database,
        params=params,
        constraints=constraints,
        start_params=internal_params,
        general_options=general_options,
    )

    internal_criterion = create_internal_criterion(
        criterion=criterion,
        params=params,
        constraints=constraints,
        criterion_kwargs=criterion_kwargs,
        logging_decorator=logging_decorator,
        exception_decorator=exception_decorator,
        queue=queue,
        fitness_factor=fitness_factor,
    )

    internal_gradient = create_internal_gradient(
        gradient=gradient,
        gradient_options=gradient_options,
        criterion=criterion,
        params=params,
        internal_params=internal_params,
        constraints=constraints,
        criterion_kwargs=criterion_kwargs,
        database=database,
        exception_decorator=exception_decorator,
        fitness_factor=fitness_factor,
        algorithm=algorithm,
        general_options=general_options,
    )

    current_dir_path = Path(__file__).resolve().parent
    with open(current_dir_path / "algo_dict.json") as j:
        algos = json.load(j)
    origin, algo_name = algorithm.split("_", 1)

    try:
        assert algo_name in algos[
            origin], "Invalid algorithm requested: {}".format(algorithm)
    except (AssertionError, KeyError):
        proposals = propose_algorithms(algorithm, algos)
        raise NotImplementedError(
            f"{algorithm} is not a valid choice. Did you mean one of {proposals}?"
        )

    bounds = tuple(
        params.query("_internal_free")[["lower", "upper"]].to_numpy().T)

    if database:
        update_scalar_field(database, "optimization_status", "running")

    if origin in ["nlopt", "pygmo"]:
        results = minimize_pygmo_np(
            internal_criterion,
            internal_params,
            bounds,
            origin,
            algo_name,
            algo_options,
            internal_gradient,
        )

    elif origin == "scipy":
        results = minimize_scipy_np(
            internal_criterion,
            internal_params,
            bounds=bounds,
            algo_name=algo_name,
            algo_options=algo_options,
            gradient=internal_gradient,
        )
    elif origin == "tao":
        crit_val = general_options["start_criterion_value"]
        len_criterion_value = 1 if np.isscalar(crit_val) else len(crit_val)
        results = minimize_pounders_np(
            internal_criterion,
            internal_params,
            bounds,
            n_errors=len_criterion_value,
            **algo_options,
        )
    else:
        raise NotImplementedError("Invalid algorithm requested.")

    if database:
        update_scalar_field(database, "optimization_status", results["status"])

    params = reparametrize_from_internal(
        internal=results["x"],
        fixed_values=params["_internal_fixed_value"].to_numpy(),
        pre_replacements=params["_pre_replacements"].to_numpy().astype(int),
        processed_constraints=constraints,
        post_replacements=params["_post_replacements"].to_numpy().astype(int),
        processed_params=params,
    )

    return results, params
コード例 #8
0
def test_exception():
    np.random.seed(5478)
    with pytest.raises(Exception):
        minimize_pounders_np(_return_exception, 0)
コード例 #9
0
ファイル: optimize.py プロジェクト: SofiaBadini/estimagic
def _internal_minimize(
    internal_criterion,
    internal_params,
    bounds,
    origin,
    algo_name,
    algo_options,
    internal_gradient,
    database,
    general_options,
):
    """Run one optimization of the transformed optimization problem.

    The transformed optimization problem is converted from the original problem
    which consists of the user supplied criterion, params DataFrame, criterion_kwargs,
    constraints and gradient (if supplied).
    In addition, the transformed optimization problem provides sophisticated logging
    tools if activated by the user.

    The transformed problem can be solved by almost any optimizer package:
        1. The only constraints are bounds on the parameters.
        2. The internal_criterion function takes an one dimensional np.array as input.
        3. The internal criterion function returns a scalar value
            (except for the case of the tao_pounders algorithm).

    Note that because of the reparametrizations done by estimagic to implement
    constraints on behalf of the user the internal params cannot be interpreted without
    reparametrizing it to the full params DataFrame.

    Args:
        internal_criterion (func): The transformed criterion function.
            It takes the internal_params numpy array as only argument, automatically
            enforcing constraints specified by the user. It calls the original
            criterion function after the necessary reparametrizations.
            If logging is activated it protocols every call automatically to the
            specified database.
        internal_params (np.array): One-dimenisonal array with the values of
            the free parameters.
        bounds (tuple): tuple of the length of internal_params. Every entry contains
            the lower and upper bound of the respective internal parameter.
        origin (str): Name of the package to which the algorithm belongs.
        algo_name (str): Name of the algorithm.
        algo_options (dict): Algorithm specific configurations.
        internal_gradient (func): The internal gradient
        database (sqlalchemy.MetaData or False). The engine that connects to the
            database can be accessed via ``database.bind``. This is only used to record
            the start and end of the optimization
        general_options (dict): Only used to pass the start_criterion_value in case
            the tao pounders algorithm is used.

    Returns:
        results (tuple): Tuple of the harmonized result info dictionary and the params
            DataFrame with the minimizing parameter values of the untransformed problem
            as specified of the user.

    """
    if database:
        update_scalar_field(database, "optimization_status", "running")

    if origin in ["nlopt", "pygmo"]:
        results = minimize_pygmo_np(
            internal_criterion,
            internal_params,
            bounds,
            origin,
            algo_name,
            algo_options,
            internal_gradient,
        )
    elif origin == "scipy":
        results = minimize_scipy_np(
            internal_criterion,
            internal_params,
            bounds=bounds,
            algo_name=algo_name,
            algo_options=algo_options,
            gradient=internal_gradient,
        )
    elif origin == "tao":
        crit_val = general_options["start_criterion_value"]
        len_criterion_value = 1 if np.isscalar(crit_val) else len(crit_val)
        results = minimize_pounders_np(
            internal_criterion,
            internal_params,
            bounds,
            n_errors=len_criterion_value,
            **algo_options,
        )
    else:
        raise NotImplementedError("Invalid algorithm requested.")

    if database:
        update_scalar_field(database, "optimization_status", results["status"])

    return results