示例#1
0
def scipy_trust_constr(
    criterion_and_derivative,
    x,
    lower_bounds,
    upper_bounds,
    *,
    convergence_absolute_gradient_tolerance=1e-08,
    convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,
    stopping_max_iterations=STOPPING_MAX_ITERATIONS,
    trustregion_initial_radius=None,
):
    """Minimize a scalar function of one or more variables subject to constraints.

    For details see :ref:`list_of_scipy_algorithms`.

    """
    algo_info = DEFAULT_ALGO_INFO.copy()
    algo_info["name"] = "scipy_trust_constr"
    func = functools.partial(
        criterion_and_derivative,
        task="criterion",
        algorithm_info=algo_info,
    )
    gradient = functools.partial(criterion_and_derivative,
                                 task="derivative",
                                 algorithm_info=algo_info)

    if trustregion_initial_radius is None:
        trustregion_initial_radius = calculate_trustregion_initial_radius(x)

    options = {
        "gtol": convergence_absolute_gradient_tolerance,
        "maxiter": stopping_max_iterations,
        "xtol": convergence_relative_params_tolerance,
        "initial_tr_radius": trustregion_initial_radius,
        # don't have "grad" here as we already supply the gradient via the "jac"
        # argument supplied directly to scipy.optimize.minimize.
    }

    res = scipy.optimize.minimize(
        fun=func,
        jac=gradient,
        x0=x,
        method="trust-constr",
        bounds=get_scipy_bounds(lower_bounds, upper_bounds),
        options=options,
    )

    return process_scipy_result(res)
示例#2
0
def scipy_cobyla(
    criterion_and_derivative,
    x,
    *,
    stopping_max_iterations=STOPPING_MAX_ITERATIONS,
    convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,
    trustregion_initial_radius=None,
):
    """Minimize a scalar function of one or more variables using the COBYLA algorithm.

    For details see :ref:`list_of_scipy_algorithms`.

    """
    algo_info = DEFAULT_ALGO_INFO.copy()
    algo_info["name"] = "scipy_cobyla"

    func = functools.partial(
        criterion_and_derivative,
        task="criterion",
        algorithm_info=algo_info,
    )

    if trustregion_initial_radius is None:
        trustregion_initial_radius = calculate_trustregion_initial_radius(x)

    options = {
        "maxiter": stopping_max_iterations,
        "rhobeg": trustregion_initial_radius
    }

    res = scipy.optimize.minimize(
        fun=func,
        x0=x,
        method="COBYLA",
        options=options,
        tol=convergence_relative_params_tolerance,
    )

    return process_scipy_result(res)
示例#3
0
def tao_pounders(
    criterion_and_derivative,
    x,
    lower_bounds,
    upper_bounds,
    *,
    convergence_absolute_gradient_tolerance=CONVERGENCE_ABSOLUTE_GRADIENT_TOLERANCE,
    convergence_relative_gradient_tolerance=CONVERGENCE_RELATIVE_GRADIENT_TOLERANCE,
    convergence_scaled_gradient_tolerance=CONVERGENCE_SCALED_GRADIENT_TOLERANCE,
    trustregion_initial_radius=None,
    stopping_max_iterations=STOPPING_MAX_ITERATIONS,
):
    r"""Minimize a function using the POUNDERs algorithm.

    For details see :ref:`tao_algorithm`.
    """
    if not IS_PETSC4PY_INSTALLED:
        raise NotImplementedError(
            "The petsc4py package is not installed and required for 'tao_pounders'. If "
            "you are using Linux or MacOS, install the package with 'conda install -c "
            "conda-forge petsc4py. The package is not available on Windows.")

    func = functools.partial(
        criterion_and_derivative,
        task="criterion",
        algorithm_info=POUNDERS_ALGO_INFO,
    )

    x = _initialise_petsc_array(x)
    # We need to know the number of contributions of the criterion value to allocate the
    # array.
    n_errors = len(
        criterion_and_derivative.keywords["first_criterion_evaluation"]
        ["output"]["root_contributions"])
    residuals_out = _initialise_petsc_array(n_errors)

    # Create the solver object.
    tao = PETSc.TAO().create(PETSc.COMM_WORLD)

    # Set the solver type.
    tao.setType("pounders")

    tao.setFromOptions()

    def func_tao(tao, x, resid_out):
        """Evaluate objective and attach result to an petsc object f.

        This is required to use the pounders solver from tao.

        Args:
             tao: The tao object we created for the optimization task.
             x (PETSc.array): Current parameter values.
             f: Petsc object in which we save the current function value.

        """
        resid_out.array = func(x.array)

    # Set the procedure for calculating the objective. This part has to be changed if we
    # want more than pounders.
    tao.setResidual(func_tao, residuals_out)

    if trustregion_initial_radius is None:
        trustregion_initial_radius = calculate_trustregion_initial_radius(x)
    elif trustregion_initial_radius <= 0:
        raise ValueError("The initial trust region radius must be > 0.")
    tao.setInitialTrustRegionRadius(trustregion_initial_radius)

    # Add bounds.
    lower_bounds = _initialise_petsc_array(lower_bounds)
    upper_bounds = _initialise_petsc_array(upper_bounds)
    tao.setVariableBounds(lower_bounds, upper_bounds)

    # Put the starting values into the container and pass them to the optimizer.
    tao.setInitial(x)

    # Obtain tolerances for the convergence criteria. Since we can not create
    # scaled_gradient_tolerance manually we manually set absolute_gradient_tolerance and
    # or relative_gradient_tolerance to zero once a subset of these two is turned off
    # and scaled_gradient_tolerance is still turned on.
    default_gatol = (convergence_absolute_gradient_tolerance
                     if convergence_absolute_gradient_tolerance else -1)
    default_gttol = (convergence_scaled_gradient_tolerance
                     if convergence_scaled_gradient_tolerance else -1)
    default_grtol = (convergence_relative_gradient_tolerance
                     if convergence_relative_gradient_tolerance else -1)
    # Set tolerances for default convergence tests.
    tao.setTolerances(
        gatol=default_gatol,
        grtol=default_grtol,
        gttol=default_gttol,
    )

    # Set user defined convergence tests. Beware that specifying multiple tests could
    # overwrite others or lead to unclear behavior.
    if stopping_max_iterations is not None:
        tao.setConvergenceTest(
            functools.partial(_max_iters, stopping_max_iterations))
    elif (convergence_scaled_gradient_tolerance is False
          and convergence_absolute_gradient_tolerance is False):
        tao.setConvergenceTest(
            functools.partial(_grtol_conv,
                              convergence_relative_gradient_tolerance))
    elif (convergence_relative_gradient_tolerance is False
          and convergence_scaled_gradient_tolerance is False):
        tao.setConvergenceTest(
            functools.partial(_gatol_conv,
                              convergence_absolute_gradient_tolerance))
    elif convergence_scaled_gradient_tolerance is False:
        tao.setConvergenceTest(
            functools.partial(
                _grtol_gatol_conv,
                convergence_relative_gradient_tolerance,
                convergence_absolute_gradient_tolerance,
            ))

    # Run the problem.
    tao.solve()

    results = _process_pounders_results(residuals_out, tao)

    # Destroy petsc objects for memory reasons.
    for obj in [tao, x, residuals_out, lower_bounds, upper_bounds]:
        obj.destroy()

    return results
示例#4
0
def test_initial_trust_radius_large_x():
    x = np.array([20.5, 10])
    expected = 2.05
    res = calculate_trustregion_initial_radius(x)
    assert expected == pytest.approx(res, abs=1e-8)
示例#5
0
def test_initial_trust_radius_small_x():
    x = np.array([0.01, 0.01])
    expected = 0.1
    res = calculate_trustregion_initial_radius(x)
    assert expected == pytest.approx(res, abs=1e-8)
示例#6
0
def _create_nag_advanced_options(
    x,
    noise_multiplicative_level,
    noise_additive_level,
    trustregion_initial_radius,
    noise_n_evals_per_point,
    convergence_noise_corrected_criterion_tolerance,
    trustregion_reset_options,
    convergence_slow_progress,
    interpolation_rounding_error,
    threshold_for_safety_step,
    clip_criterion_if_overflowing,
    initial_directions,
    random_directions_orthogonal,
    trustregion_precondition_interpolation,
    trustregion_threshold_successful,
    trustregion_threshold_very_successful,
    trustregion_shrinking_factor_not_successful,
    trustregion_expansion_factor_successful,
    trustregion_expansion_factor_very_successful,
    trustregion_shrinking_factor_lower_radius,
    trustregion_shrinking_factor_upper_radius,
):
    if noise_multiplicative_level is not None and noise_additive_level is not None:
        raise ValueError(
            "You cannot specify both multiplicative and additive noise.")
    if trustregion_initial_radius is None:
        trustregion_initial_radius = calculate_trustregion_initial_radius(x)
    # -np.inf as a default leads to errors when building the documentation with sphinx.
    noise_n_evals_per_point = _change_evals_per_point_interface(
        noise_n_evals_per_point)
    trustregion_reset_options = _build_options_dict(
        user_input=trustregion_reset_options,
        default_options=RESET_OPTIONS,
    )
    if trustregion_reset_options["reset_type"] not in ["soft", "hard"]:
        raise ValueError(
            "reset_type in the trustregion_reset_options must be soft or hard."
        )
    if initial_directions not in ["coordinate", "random"]:
        raise ValueError(
            "inital_directions must be either 'coordinate' or 'random'.")
    convergence_slow_progress = _build_options_dict(
        user_input=convergence_slow_progress,
        default_options=CONVERGENCE_SLOW_PROGRESS,
    )

    is_noisy = bool(noise_additive_level or noise_multiplicative_level)

    advanced_options = {
        "general.rounding_error_constant":
        interpolation_rounding_error,
        "general.safety_step_thresh":
        threshold_for_safety_step,
        "general.check_objfun_for_overflow":
        clip_criterion_if_overflowing,
        "tr_radius.eta1":
        trustregion_threshold_successful,
        "tr_radius.eta2":
        trustregion_threshold_very_successful,
        "tr_radius.gamma_dec":
        trustregion_shrinking_factor_not_successful,
        "tr_radius.gamma_inc":
        trustregion_expansion_factor_successful,
        "tr_radius.gamma_inc_overline":
        trustregion_expansion_factor_very_successful,
        "tr_radius.alpha1":
        trustregion_shrinking_factor_lower_radius,
        "tr_radius.alpha2":
        trustregion_shrinking_factor_upper_radius,
        "general.rounding_error_constant":
        interpolation_rounding_error,
        "general.safety_step_thresh":
        threshold_for_safety_step,
        "general.check_objfun_for_overflow":
        clip_criterion_if_overflowing,
        "init.random_initial_directions":
        initial_directions == "random",
        "init.random_directions_make_orthogonal":
        random_directions_orthogonal,
        "slow.thresh_for_slow":
        convergence_slow_progress["threshold_to_characterize_as_slow"],
        "slow.max_slow_iters":
        convergence_slow_progress["max_insufficient_improvements"],
        "slow.history_for_slow":
        convergence_slow_progress["comparison_period"],
        "noise.multiplicative_noise_level":
        noise_multiplicative_level,
        "noise.additive_noise_level":
        noise_additive_level,
        "noise.quit_on_noise_level":
        (convergence_noise_corrected_criterion_tolerance > 0) and is_noisy,
        "noise.scale_factor_for_quit":
        convergence_noise_corrected_criterion_tolerance,
        "interpolation.precondition":
        trustregion_precondition_interpolation,
        "restarts.use_restarts":
        trustregion_reset_options["use_resets"],
        "restarts.max_unsuccessful_restarts":
        trustregion_reset_options["max_consecutive_unsuccessful_resets"],
        "restarts.rhoend_scale":
        trustregion_reset_options[
            "minimal_trustregion_radius_tolerance_scaling_at_reset"],
        "restarts.use_soft_restarts":
        trustregion_reset_options["reset_type"] == "soft",
        "restarts.soft.move_xk":
        trustregion_reset_options["move_center_at_soft_reset"],
        "restarts.soft.max_fake_successful_steps":
        trustregion_reset_options[
            "max_iterations_without_new_best_after_soft_reset"],  # noqa: E501
        "restarts.auto_detect":
        trustregion_reset_options["auto_detect"],
        "restarts.auto_detect.history":
        trustregion_reset_options["auto_detect_history"],  # noqa: E501
        "restarts.auto_detect.min_correl":
        trustregion_reset_options["auto_detect_min_correlations"],
        "restarts.soft.num_geom_steps":
        trustregion_reset_options["points_to_replace_at_soft_reset"],
    }

    return advanced_options, trustregion_reset_options