예제 #1
0
        def wrapper_log_gradient_status(params, *args, **kwargs):
            criterion_value, _ = func(params, *args, **kwargs)

            if database:
                c = next(counter)
                if n_gradient_evaluations is None:
                    status = c
                else:
                    status = (c %
                              n_gradient_evaluations) / n_gradient_evaluations
                    status = 1 if status == 0 else status
                update_scalar_field(database, "gradient_status", status)

            return criterion_value
예제 #2
0
def test_handle_exception(database, monkeypatch):
    def mock_execute_write_statements(statements, database):
        if not isinstance(statements, (list, tuple)):
            statements = [statements]
        exception_info = "Mocked"
        upd_db._handle_exception(statements, database, exception_info)

    monkeypatch.setattr(upd_db, "_execute_write_statements",
                        mock_execute_write_statements)

    with pytest.warns(Warning):
        upd_db.update_scalar_field(database=database,
                                   table="optimization_status",
                                   value="failure")
예제 #3
0
def _internal_minimize(
    criterion,
    criterion_kwargs,
    params,
    internal_params,
    constraints,
    algorithm,
    algo_options,
    gradient,
    gradient_options,
    general_options,
    database,
    queue,
    fitness_factor,
):
    """Create the internal criterion function and minimize it.

    Args:
        criterion (function):
            Python function that takes a pandas DataFrame with parameters as the first
            argument and returns a scalar floating point value.

        criterion_kwargs (dict):
            additional keyword arguments for criterion

        params (pd.DataFrame):
            See :ref:`params`.

        internal_params (DataFrame):
            See :ref:`params`.

        constraints (list):
            list with constraint dictionaries. See for details.

        algorithm (str):
            specifies the optimization algorithm. See :ref:`list_of_algorithms`.

        algo_options (dict):
            algorithm specific configurations for the optimization

        gradient (callable or None):
            Gradient function.

        gradient_options (dict):
            Options for the gradient function.

        general_options (dict):
            additional configurations for the optimization

        database (sqlalchemy.sql.schema.MetaData). The engine that connects to the
            database can be accessed via ``database.bind``.

        queue (Queue):
            queue to which the fitness evaluations and params DataFrames are supplied.

        fitness_factor (float):
            multiplicative factor for the fitness displayed in the dashboard.
            Set to -1 for maximizations to plot the fitness that is being maximized.

    """
    logging_decorator = functools.partial(
        log_evaluation,
        database=database,
        tables=["params_history", "criterion_history", "comparison_plot"],
    )

    exception_decorator = functools.partial(
        handle_exceptions,
        database=database,
        params=params,
        constraints=constraints,
        start_params=internal_params,
        general_options=general_options,
    )

    internal_criterion = create_internal_criterion(
        criterion=criterion,
        params=params,
        constraints=constraints,
        criterion_kwargs=criterion_kwargs,
        logging_decorator=logging_decorator,
        exception_decorator=exception_decorator,
        queue=queue,
        fitness_factor=fitness_factor,
    )

    internal_gradient = create_internal_gradient(
        gradient=gradient,
        gradient_options=gradient_options,
        criterion=criterion,
        params=params,
        internal_params=internal_params,
        constraints=constraints,
        criterion_kwargs=criterion_kwargs,
        database=database,
        exception_decorator=exception_decorator,
        fitness_factor=fitness_factor,
        algorithm=algorithm,
        general_options=general_options,
    )

    current_dir_path = Path(__file__).resolve().parent
    with open(current_dir_path / "algo_dict.json") as j:
        algos = json.load(j)
    origin, algo_name = algorithm.split("_", 1)

    try:
        assert algo_name in algos[
            origin], "Invalid algorithm requested: {}".format(algorithm)
    except (AssertionError, KeyError):
        proposals = propose_algorithms(algorithm, algos)
        raise NotImplementedError(
            f"{algorithm} is not a valid choice. Did you mean one of {proposals}?"
        )

    bounds = tuple(
        params.query("_internal_free")[["lower", "upper"]].to_numpy().T)

    if database:
        update_scalar_field(database, "optimization_status", "running")

    if origin in ["nlopt", "pygmo"]:
        results = minimize_pygmo_np(
            internal_criterion,
            internal_params,
            bounds,
            origin,
            algo_name,
            algo_options,
            internal_gradient,
        )

    elif origin == "scipy":
        results = minimize_scipy_np(
            internal_criterion,
            internal_params,
            bounds=bounds,
            algo_name=algo_name,
            algo_options=algo_options,
            gradient=internal_gradient,
        )
    elif origin == "tao":
        crit_val = general_options["start_criterion_value"]
        len_criterion_value = 1 if np.isscalar(crit_val) else len(crit_val)
        results = minimize_pounders_np(
            internal_criterion,
            internal_params,
            bounds,
            n_errors=len_criterion_value,
            **algo_options,
        )
    else:
        raise NotImplementedError("Invalid algorithm requested.")

    if database:
        update_scalar_field(database, "optimization_status", results["status"])

    params = reparametrize_from_internal(
        internal=results["x"],
        fixed_values=params["_internal_fixed_value"].to_numpy(),
        pre_replacements=params["_pre_replacements"].to_numpy().astype(int),
        processed_constraints=constraints,
        post_replacements=params["_post_replacements"].to_numpy().astype(int),
        processed_params=params,
    )

    return results, params
예제 #4
0
def test_update_scalar_field(database):
    upd_db.update_scalar_field(database=database,
                               table="optimization_status",
                               value="failure")
    assert read_scalar_field(database, "optimization_status") == "failure"
예제 #5
0
def _internal_minimize(
    internal_criterion,
    internal_params,
    bounds,
    origin,
    algo_name,
    algo_options,
    internal_gradient,
    database,
    general_options,
):
    """Run one optimization of the transformed optimization problem.

    The transformed optimization problem is converted from the original problem
    which consists of the user supplied criterion, params DataFrame, criterion_kwargs,
    constraints and gradient (if supplied).
    In addition, the transformed optimization problem provides sophisticated logging
    tools if activated by the user.

    The transformed problem can be solved by almost any optimizer package:
        1. The only constraints are bounds on the parameters.
        2. The internal_criterion function takes an one dimensional np.array as input.
        3. The internal criterion function returns a scalar value
            (except for the case of the tao_pounders algorithm).

    Note that because of the reparametrizations done by estimagic to implement
    constraints on behalf of the user the internal params cannot be interpreted without
    reparametrizing it to the full params DataFrame.

    Args:
        internal_criterion (func): The transformed criterion function.
            It takes the internal_params numpy array as only argument, automatically
            enforcing constraints specified by the user. It calls the original
            criterion function after the necessary reparametrizations.
            If logging is activated it protocols every call automatically to the
            specified database.
        internal_params (np.array): One-dimenisonal array with the values of
            the free parameters.
        bounds (tuple): tuple of the length of internal_params. Every entry contains
            the lower and upper bound of the respective internal parameter.
        origin (str): Name of the package to which the algorithm belongs.
        algo_name (str): Name of the algorithm.
        algo_options (dict): Algorithm specific configurations.
        internal_gradient (func): The internal gradient
        database (sqlalchemy.MetaData or False). The engine that connects to the
            database can be accessed via ``database.bind``. This is only used to record
            the start and end of the optimization
        general_options (dict): Only used to pass the start_criterion_value in case
            the tao pounders algorithm is used.

    Returns:
        results (tuple): Tuple of the harmonized result info dictionary and the params
            DataFrame with the minimizing parameter values of the untransformed problem
            as specified of the user.

    """
    if database:
        update_scalar_field(database, "optimization_status", "running")

    if origin in ["nlopt", "pygmo"]:
        results = minimize_pygmo_np(
            internal_criterion,
            internal_params,
            bounds,
            origin,
            algo_name,
            algo_options,
            internal_gradient,
        )
    elif origin == "scipy":
        results = minimize_scipy_np(
            internal_criterion,
            internal_params,
            bounds=bounds,
            algo_name=algo_name,
            algo_options=algo_options,
            gradient=internal_gradient,
        )
    elif origin == "tao":
        crit_val = general_options["start_criterion_value"]
        len_criterion_value = 1 if np.isscalar(crit_val) else len(crit_val)
        results = minimize_pounders_np(
            internal_criterion,
            internal_params,
            bounds,
            n_errors=len_criterion_value,
            **algo_options,
        )
    else:
        raise NotImplementedError("Invalid algorithm requested.")

    if database:
        update_scalar_field(database, "optimization_status", results["status"])

    return results