def fmin_cma(objective_function, x0, xL, xU, sigma0=0.01, maxfun=1000):
    """ Minimize objective function in hypercube using CMA-ES.

    This function optimizes an objective function in a search space bounded by
    a hypercube. One corner of the hypercube is given by xL and the opposite by
    xU. The initial mean of the search distribution is given by x0. The search
    space is scaled internally to the unit hypercube to accommodate CMA-ES.

    Parameters
    ----------
    objective_function : callable
        The objective function to be minimized. Must return a scalar value

    x0 : array-like
        Initial mean of the search distribution

    xL: array-like
        Lower, left corner of the bounding hypercube

    xU: array-like
        Upper, right corner of the bounding hypercube

    sigma0: float, default=0.01
        Initial variance of search distribution of CMA-ES

    maxfun: int, default=1000
        Maximum number of evaluations of the objective function after which the
        optimization is stopped.

    Returns
    ----------
    x_opt : array-like
        The minimum of objective function identified by CMA-ES
    """
    try:
        from bolero.optimizer import fmin
    except ImportError:
        raise Exception("'cmaes' optimizer requires the package bolero.")

    x0 = np.asarray(x0)
    xL = np.asarray(xL)
    xU = np.asarray(xU)
    # Scale parameters such that search space is a unit-hypercube
    x0 = (x0 - xL) / (xU - xL)
    x0[~np.isfinite(x0)] = 0  # Deal with situations where xU == xL
    bounds = np.array([np.zeros_like(x0), np.ones_like(x0)]).T

    # Rescale in objective function
    def scaled_objective_function(x):
        return objective_function(x * (xU - xL) + xL)

    # Minimize objective function using CMA-ES. Restart if no valid solution is
    # found
    res = (None, np.inf)
    while not np.isfinite(res[1]):
        res = fmin(scaled_objective_function, x0=x0, variance=sigma0,
                   bounds=bounds, maxfun=maxfun)
        x_opt = res[0]
    # Return rescaled solution
    return x_opt * (xU - xL) + xL
Esempio n. 2
0
def test_cmaes_maximize_eval_initial_x_best():
    def x0_best(x):
        if np.all(x == 0.0):
            return 0.0
        else:
            return -1.0

    _, f = fmin(x0_best, cma_type="standard", x0=np.zeros(2), random_state=0,
                maxfun=300, maximize=True, eval_initial_x=True)
    assert_equal(f, 0.0)
Esempio n. 3
0
def fmin_cma(objective_function, x0, xL, xU, sigma0=0.01, maxfun=1000):
    """ Minimize objective function in hypercube using CMA-ES.

    This function optimizes an objective function in a search space bounded by
    a hypercube. One corner of the hypercube is given by xL and the opposite by
    xU. The initial mean of the search distribution is given by x0. The search
    space is scaled internally to the unit hypercube to accommodate CMA-ES.

    Parameters
    ----------
    objective_function : callable
        The objective function to be minimized. Must return a scalar value

    x0 : array-like
        Initial mean of the search distribution

    xL: array-like
        Lower, left corner of the bounding hypercube

    xU: array-like
        Upper, right corner of the bounding hypercube

    sigma0: float, default=0.01
        Initial variance of search distribution of CMA-ES

    maxfun: int, default=1000
        Maximum number of evaluations of the objective function after which the
        optimization is stopped.

    Returns
    ----------
    x_opt : array-like
        The minimum of objective function identified by CMA-ES
    """
    try:
        from bolero.optimizer import fmin
    except ImportError:
        raise Exception("'cmaes' optimizer requires the package bolero.")

    x0 = np.asarray(x0)
    xL = np.asarray(xL)
    xU = np.asarray(xU)
    # Scale parameters such that search space is a unit-hypercube
    x0 = (x0 - xL) / (xU - xL)
    x0[~np.isfinite(x0)] = 0  # Deal with situations where xU == xL
    bounds = np.array([np.zeros_like(x0), np.ones_like(x0)]).T

    # Rescale in objective function
    def scaled_objective_function(x):
        return objective_function(x * (xU - xL) + xL)

    # Minimize objective function using CMA-ES. Restart if no valid solution is
    # found
    res = (None, np.inf)
    while not np.isfinite(res[1]):
        res = fmin(scaled_objective_function,
                   x0=x0,
                   variance=sigma0,
                   bounds=bounds,
                   maxfun=maxfun)
        x_opt = res[0]
    # Return rescaled solution
    return x_opt * (xU - xL) + xL
Esempio n. 4
0
def test_cmaes_maximize_eval_initial_x():
    _, f = fmin(lambda x: -np.linalg.norm(x), cma_type="standard",
                x0=np.ones(2), random_state=0, maxfun=300, maximize=True,
                eval_initial_x=True)
    assert_greater(f, -1e-5)
Esempio n. 5
0
def test_cmaes_maximize():
    _, f = fmin(lambda x: -np.linalg.norm(x), cma_type="standard",
                x0=np.zeros(2), random_state=0, maxfun=300, maximize=True)
    assert_greater(f, -1e-5)
Esempio n. 6
0
def test_cmaes_minimize():
    _, f = fmin(lambda x: np.linalg.norm(x), cma_type="standard",
                x0=np.zeros(2), random_state=0, maxfun=300)
    assert_less(f, 1e-5)
Esempio n. 7
0
def test_bipop_cmaes():
    _, f = fmin(lambda x: np.linalg.norm(x), cma_type="bipop",
                x0=np.zeros(2), random_state=0, maxfun=300)
    assert_less(f, 1e-5)
Esempio n. 8
0
def test_cmaes_minimize_many_params():
    _, f = fmin(lambda x: np.linalg.norm(x), cma_type="standard",
                x0=np.zeros(30), random_state=0, maxfun=500)
    assert_less(f, 1.0)