Ejemplo n.º 1
0
def test_wrong_options():
    lb, ub, x0 = finite_bounds_exlude_optimum()
    fun = rosenboth

    with pytest.raises(ValueError):
        Optimizer(
            fun, ub=ub, lb=lb, verbose=logging.INFO,
            options={'option_doesnt_exist': 1}
        )
    with pytest.raises(TypeError):
        Optimizer(
            fun, ub=ub, lb=lb, verbose=logging.INFO,
            options={Options.FATOL: 'not a number'}
        )

    # check we can pass floats for ints
    Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={Options.MAXITER: 1e4}
    )
    with pytest.raises(ValueError):
        Optimizer(
            fun, ub=ub, lb=lb, verbose=logging.INFO,
            options={Options.SUBSPACE_DIM: 'invalid_subspace'}
        )

    # check we can pass strings for enums
    Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={Options.SUBSPACE_DIM: '2D'}
    )
Ejemplo n.º 2
0
def test_history():
    lb, ub, x0 = finite_bounds_exlude_optimum()
    fun = fletcher

    h5file = 'history.h5'

    opt = Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={fides.Options.FATOL: 0,
                 fides.Options.HISTORY_FILE: h5file},
        hessian_update=GNSBFGS(),
        resfun=True
    )
    opt.minimize(x0)
    opt.minimize(x0)
    with h5py.File(h5file, 'r') as f:
        assert len(f.keys()) == 2  # one group per optimization

    # create new optimizer to check we are appending
    opt = Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={fides.Options.FATOL: 0,
                 fides.Options.HISTORY_FILE: h5file},
        hessian_update=GNSBFGS(),
        resfun=True
    )
    opt.minimize(x0)
    opt.minimize(x0)
    opt.minimize(x0)
    with h5py.File(h5file, 'r') as f:
        assert len(f.keys()) == 5
    os.remove(h5file)
Ejemplo n.º 3
0
def test_multistart(subspace_dim, stepback):
    lb, ub, x0 = finite_bounds_exlude_optimum()
    fun = rosenboth

    opt = Optimizer(fun,
                    ub=ub,
                    lb=lb,
                    verbose=logging.INFO,
                    options={
                        fides.Options.FATOL: 0,
                        fides.Options.SUBSPACE_DIM: subspace_dim,
                        fides.Options.STEPBACK_STRAT: stepback,
                        fides.Options.REFINE_STEPBACK: False,
                        fides.Options.MAXITER: 1e3
                    })
    for _ in range(int(1e2)):
        x0 = np.random.random(x0.shape) * (ub - lb) + lb
        opt.minimize(x0)
        assert opt.fval >= opt.fval_min
        if opt.fval == opt.fval_min:
            assert np.isclose(opt.grad, opt.grad_min).all()
            assert np.isclose(opt.x, opt.x_min).all()
        if np.all(ub > 1):
            assert np.isclose(opt.x, [1, 1]).all()
            assert np.isclose(opt.grad, np.zeros(opt.x.shape), atol=1e-6).all()
Ejemplo n.º 4
0
def test_wrong_x():
    lb, ub, x0 = finite_bounds_exlude_optimum()
    fun = rosen

    opt = Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={fides.Options.FATOL: 0},
        hessian_update=DFP()
    )

    with pytest.raises(ValueError):
        opt.minimize(np.expand_dims(x0, 1))
Ejemplo n.º 5
0
def test_wrong_dim(fun):
    lb, ub, x0 = finite_bounds_exlude_optimum()

    opt = Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={fides.Options.FATOL: 0,
                 fides.Options.MAXITER: 1e3}
    )

    with pytest.raises(ValueError):
        x0 = np.random.random(x0.shape) * (ub - lb) + lb
        opt.minimize(x0)
Ejemplo n.º 6
0
def test_maxiter_maxtime():
    lb, ub, x0 = finite_bounds_exlude_optimum()
    fun = rosengrad

    opt = Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={fides.Options.FATOL: 0},
        hessian_update=DFP()
    )
    tstart = time.time()
    opt.minimize(x0)
    t_elapsed = time.time() - tstart

    maxiter = opt.iteration - 1
    maxtime = t_elapsed/10

    opt.options[fides.Options.MAXITER] = maxiter
    opt.minimize(x0)
    assert opt.exitflag == fides.ExitFlag.MAXITER
    del opt.options[fides.Options.MAXITER]

    opt.options[fides.Options.MAXTIME] = maxtime
    opt.minimize(x0)
    assert opt.exitflag == fides.ExitFlag.MAXTIME
    del opt.options[fides.Options.MAXTIME]
Ejemplo n.º 7
0
def test_multistart_randomfail():
    lb, ub, x0 = finite_bounds_exlude_optimum()
    fun = rosenrandomfail

    opt = Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        options={fides.Options.FATOL: 0,
                 fides.Options.MAXITER: 1e3}
    )

    for _ in range(int(1e2)):
        with pytest.raises(RuntimeError):
            x0 = np.random.random(x0.shape) * (ub - lb) + lb
            opt.minimize(x0)
Ejemplo n.º 8
0
def test_wrong_options():
    lb, ub, x0 = finite_bounds_exlude_optimum()
    fun = rosenboth

    with pytest.raises(ValueError):
        Optimizer(
            fun, ub=ub, lb=lb, verbose=logging.INFO,
            options={'option_doesnt_exist': 1}
        )
Ejemplo n.º 9
0
def test_minimize_hess_approx(bounds_and_init, fun, happ, subspace_dim,
                              stepback, refine):
    lb, ub, x0 = bounds_and_init

    opt = Optimizer(
        fun, ub=ub, lb=lb, verbose=logging.INFO,
        hessian_update=happ if happ is not None else None,
        options={fides.Options.FATOL: 0,
                 fides.Options.SUBSPACE_DIM: subspace_dim,
                 fides.Options.STEPBACK_STRAT: stepback,
                 fides.Options.MAXITER: 1e3,
                 fides.Options.REFINE_STEPBACK: refine, }
    )
    opt.minimize(x0)
    assert opt.fval >= opt.fval_min
    if opt.fval == opt.fval_min:
        assert np.isclose(opt.grad, opt.grad_min).all()
        assert np.isclose(opt.x, opt.x_min).all()
    if np.all(ub > 1):
        assert np.isclose(opt.x, [1, 1]).all()
        assert np.isclose(opt.grad, np.zeros(opt.x.shape), atol=1e-6).all()
Ejemplo n.º 10
0
def test_minimize_hess_approx(bounds_and_init, fun, happ, subspace_dim,
                              stepback):
    lb, ub, x0 = bounds_and_init

    if (x0 == 0).all() and fun is fletcher:
        x0 += 1

    kwargs = dict(
        fun=fun, ub=ub, lb=lb, verbose=logging.WARNING,
        hessian_update=happ if happ is not None else None,
        options={fides.Options.FATOL: 0,
                 fides.Options.FRTOL: 1e-12 if fun is fletcher else 1e-8,
                 fides.Options.SUBSPACE_DIM: subspace_dim,
                 fides.Options.STEPBACK_STRAT: stepback,
                 fides.Options.MAXITER: 2e2},
        resfun=happ.requires_resfun if happ is not None else False
    )
    if not (subspace_dim == fides.SubSpaceDim.STEIHAUG and
            stepback == fides.StepBackStrategy.REFINE):
        opt = Optimizer(**kwargs)
    else:
        with pytest.raises(ValueError):
            Optimizer(**kwargs)
        return
    opt.minimize(x0)
    assert opt.fval >= opt.fval_min

    if fun is fletcher:
        xsol = [0, 0]
    else:
        xsol = [1, 1]

    if opt.fval == opt.fval_min:
        assert np.isclose(opt.grad, opt.grad_min).all()
        assert np.isclose(opt.x, opt.x_min).all()
    if np.all(ub > 1) and not isinstance(happ, BB):  # bad broyden is bad
        assert np.isclose(opt.x, xsol,
                          atol=1e-4 if fun is fletcher else 1e-6).all()
        assert np.isclose(opt.grad, np.zeros(opt.x.shape),
                          atol=1e-4 if fun is fletcher else 1e-6).all()
def fides(
    criterion_and_derivative,
    x,
    lower_bounds,
    upper_bounds,
    *,
    hessian_update_strategy="bfgs",
    convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,
    convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,
    convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,
    convergence_absolute_gradient_tolerance=CONVERGENCE_ABSOLUTE_GRADIENT_TOLERANCE,
    convergence_relative_gradient_tolerance=CONVERGENCE_RELATIVE_GRADIENT_TOLERANCE,
    stopping_max_iterations=STOPPING_MAX_ITERATIONS,
    stopping_max_seconds=np.inf,
    trustregion_initial_radius=1.0,
    trustregion_stepback_strategy="truncate",
    trustregion_subspace_dimension="full",
    trustregion_max_stepback_fraction=0.95,
    trustregion_decrease_threshold=0.25,
    trustregion_increase_threshold=0.75,
    trustregion_decrease_factor=0.25,
    trustregion_increase_factor=2.0,
):
    """Minimize a scalar function using the Fides Optimizer.

    For details see :ref:`fides_algorithm`.

    """
    if not IS_FIDES_INSTALLED:
        raise NotInstalledError(
            "The 'fides' algorithm requires the fides package to be installed. "
            "You can install it with `pip install fides>=0.7.4`.")

    fides_options = {
        "delta_init": trustregion_initial_radius,
        "eta": trustregion_increase_threshold,
        "fatol": convergence_absolute_criterion_tolerance,
        "frtol": convergence_relative_criterion_tolerance,
        "gamma1": trustregion_decrease_factor,
        "gamma2": trustregion_increase_factor,
        "gatol": convergence_absolute_gradient_tolerance,
        "grtol": convergence_relative_gradient_tolerance,
        "maxiter": stopping_max_iterations,
        "maxtime": stopping_max_seconds,
        "mu": trustregion_decrease_threshold,
        "stepback_strategy": trustregion_stepback_strategy,
        "subspace_solver": trustregion_subspace_dimension,
        "theta_max": trustregion_max_stepback_fraction,
        "xtol": convergence_absolute_params_tolerance,
    }

    hessian_instance = _create_hessian_updater_from_user_input(
        hessian_update_strategy)

    opt = Optimizer(
        fun=criterion_and_derivative,
        lb=lower_bounds,
        ub=upper_bounds,
        verbose=logging.ERROR,
        options=fides_options,
        funargs=None,
        hessian_update=hessian_instance,
        resfun=False,
    )
    raw_res = opt.minimize(x)
    res = _process_fides_res(raw_res, opt)
    return res