예제 #1
0
def test_constant_liar_runs(strategy, surrogate, acq_func):
    """
    Tests whether the optimizer runs properly during the random
    initialization phase and beyond

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_func=acq_func,
        acq_optimizer='sampling',
        random_state=0
    )

    # test arguments check
    assert_raises(ValueError, optimizer.ask, {"strategy": "cl_maen"})
    assert_raises(ValueError, optimizer.ask, {"n_points": "0"})
    assert_raises(ValueError, optimizer.ask, {"n_points": 0})

    for i in range(n_steps):
        x = optimizer.ask(n_points=n_points, strategy=strategy)
        # check if actually n_points was generated
        assert_equal(len(x), n_points)

        if "ps" in acq_func:
            optimizer.tell(x, [[branin(v), 1.1] for v in x])
        else:
            optimizer.tell(x, [branin(v) for v in x])
예제 #2
0
def test_reproducible_runs(strategy, surrogate):
    # two runs of the optimizer should yield exactly the same results

    optimizer = Optimizer(base_estimator=surrogate(random_state=1),
                          dimensions=[Real(-5.0, 10.0),
                                      Real(0.0, 15.0)],
                          acq_optimizer='sampling',
                          random_state=1)

    points = []
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        points.append(x)
        optimizer.tell(x, [branin(v) for v in x])

    # the x's should be exaclty as they are in `points`
    optimizer = Optimizer(base_estimator=surrogate(random_state=1),
                          dimensions=[Real(-5.0, 10.0),
                                      Real(0.0, 15.0)],
                          acq_optimizer='sampling',
                          random_state=1)
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)

        assert points[i] == x

        optimizer.tell(x, [branin(v) for v in x])
예제 #3
0
def check_minimizer_api(result, n_calls, n_models=None):
    # assumes the result was produced on branin
    assert(isinstance(result.space, Space))

    if n_models is not None:
        assert_equal(len(result.models), n_models)

    assert_equal(len(result.x_iters), n_calls)
    assert_array_equal(result.func_vals.shape, (n_calls,))

    assert(isinstance(result.x, list))
    assert_equal(len(result.x), 2)

    assert(isinstance(result.x_iters, list))
    for n in range(n_calls):
        assert(isinstance(result.x_iters[n], list))
        assert_equal(len(result.x_iters[n]), 2)

        assert(isinstance(result.func_vals[n], float))
        assert_almost_equal(result.func_vals[n], branin(result.x_iters[n]))

    assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
    assert_almost_equal(result.fun, branin(result.x))

    assert(isinstance(result.specs, dict))
    assert("args" in result.specs)
    assert("function" in result.specs)
예제 #4
0
def test_reproducible_runs(strategy, surrogate):
    # two runs of the optimizer should yield exactly the same results

    optimizer = Optimizer(
        base_estimator=surrogate(random_state=1),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )

    points = []
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        points.append(x)
        optimizer.tell(x, [branin(v) for v in x])

    # the x's should be exaclty as they are in `points`
    optimizer = Optimizer(
        base_estimator=surrogate(random_state=1),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)

        assert points[i] == x

        optimizer.tell(x, [branin(v) for v in x])
예제 #5
0
def check_minimizer_api(result, n_calls, n_models=None):
    # assumes the result was produced on branin
    assert(isinstance(result.space, Space))

    if n_models is not None:
        assert_equal(len(result.models), n_models)

    assert_equal(len(result.x_iters), n_calls)
    assert_array_equal(result.func_vals.shape, (n_calls,))

    assert(isinstance(result.x, list))
    assert_equal(len(result.x), 2)

    assert(isinstance(result.x_iters, list))
    for n in range(n_calls):
        assert(isinstance(result.x_iters[n], list))
        assert_equal(len(result.x_iters[n]), 2)

        assert(isinstance(result.func_vals[n], float))
        assert_almost_equal(result.func_vals[n], branin(result.x_iters[n]))

    assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
    assert_almost_equal(result.fun, branin(result.x))

    assert(isinstance(result.specs, dict))
    assert("args" in result.specs)
    assert("function" in result.specs)
예제 #6
0
def test_constant_liar_runs(strategy, surrogate, acq_func):
    """
    Tests whether the optimizer runs properly during the random
    initialization phase and beyond

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(base_estimator=surrogate(),
                          dimensions=[Real(-5.0, 10.0),
                                      Real(0.0, 15.0)],
                          acq_func=acq_func,
                          acq_optimizer='sampling',
                          random_state=0)

    # test arguments check
    assert_raises(ValueError, optimizer.ask, {"strategy": "cl_maen"})
    assert_raises(ValueError, optimizer.ask, {"n_points": "0"})
    assert_raises(ValueError, optimizer.ask, {"n_points": 0})

    for i in range(n_steps):
        x = optimizer.ask(n_points=n_points, strategy=strategy)
        # check if actually n_points was generated
        assert_equal(len(x), n_points)

        if "ps" in acq_func:
            optimizer.tell(x, [[branin(v), 1.1] for v in x])
        else:
            optimizer.tell(x, [branin(v) for v in x])
예제 #7
0
def test_same_set_of_points_ask(strategy, surrogate):
    """
    For n_points not None, tests whether two consecutive calls to ask
    return the same sets of points.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """

    optimizer = Optimizer(base_estimator=surrogate(),
                          dimensions=[Real(-5.0, 10.0),
                                      Real(0.0, 15.0)],
                          acq_optimizer='sampling',
                          random_state=2)

    for i in range(n_steps):
        xa = optimizer.ask(n_points, strategy)
        xb = optimizer.ask(n_points, strategy)
        optimizer.tell(xa, [branin(v) for v in xa])
        assert_equal(xa, xb)  # check if the sets of points generated are equal
예제 #8
0
def test_all_points_different(strategy, surrogate):
    """
    Tests whether the parallel optimizer always generates
    different points to evaluate.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(base_estimator=surrogate(),
                          dimensions=[Real(-5.0, 10.0),
                                      Real(0.0, 15.0)],
                          acq_optimizer='sampling',
                          random_state=1)

    tolerance = 1e-3  # distance above which points are assumed same
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        optimizer.tell(x, [branin(v) for v in x])
        distances = pdist(x)
        assert all(distances > tolerance)
예제 #9
0
def test_all_points_different(strategy, surrogate):
    """
    Tests whether the parallel optimizer always generates
    different points to evaluate.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )

    tolerance = 1e-3  # distance above which points are assumed same
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        optimizer.tell(x, [branin(v) for v in x])
        distances = pdist(x)
        assert all(distances > tolerance)
예제 #10
0
def test_same_set_of_points_ask(strategy, surrogate):
    """
    For n_points not None, tests whether two consecutive calls to ask
    return the same sets of points.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """

    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=2
    )

    for i in range(n_steps):
        xa = optimizer.ask(n_points, strategy)
        xb = optimizer.ask(n_points, strategy)
        optimizer.tell(xa, [branin(v) for v in xa])
        assert_equal(xa, xb)  # check if the sets of points generated are equal
예제 #11
0
def plot_branin():
    fig, ax = plt.subplots()

    x1_values = np.linspace(-5, 10, 100)
    x2_values = np.linspace(0, 15, 100)
    x_ax, y_ax = np.meshgrid(x1_values, x2_values)
    vals = np.c_[x_ax.ravel(), y_ax.ravel()]
    fx = np.reshape([branin(val) for val in vals], (100, 100))

    cm = ax.pcolormesh(x_ax, y_ax, fx,
                       norm=LogNorm(vmin=fx.min(),
                                    vmax=fx.max()),
                       cmap='viridis_r')

    minima = np.array([[-np.pi, 12.275], [+np.pi, 2.275], [9.42478, 2.475]])
    ax.plot(minima[:, 0], minima[:, 1], "r.", markersize=14,
            lw=0, label="Minima")

    cb = fig.colorbar(cm)
    cb.set_label("f(x)")

    ax.legend(loc="best", numpoints=1)

    ax.set_xlabel("$X_0$")
    ax.set_xlim([-5, 10])
    ax.set_ylabel("$X_1$")
    ax.set_ylim([0, 15])
예제 #12
0
def test_seven_iterations():
    for minimizer in MINIMIZERS:
        result = gbrt_minimize(branin, [(-5.0, 10.0), (0.0, 15.0)],
                               n_start=3, maxiter=7, random_state=1)

        assert_equal(len(result.models), 4)
        assert_array_equal(result.x_iters.shape, (7, 2))
        assert_array_equal(result.func_vals.shape, (7,))
        assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
        assert_almost_equal(result.fun, branin(result.x))
예제 #13
0
def test_one_iteration():
    result = gbrt_minimize(branin, [[-5, 10], [0, 15]],
                           maxiter=1,
                           random_state=1)

    assert_equal(len(result.models), 0)
    assert_array_equal(result.x_iters.shape, (1, 2))
    assert_array_equal(result.func_vals.shape, (1, ))
    assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
    assert_almost_equal(result.fun, branin(result.x))
예제 #14
0
def test_seven_iterations():
    result = gbrt_minimize(branin, [[-5, 10], [0, 15]],
                           n_start=3,
                           maxiter=7,
                           random_state=1)

    assert_equal(len(result.models), 4)
    assert_array_equal(result.x_iters.shape, (7, 2))
    assert_array_equal(result.func_vals.shape, (7, ))
    assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
    assert_almost_equal(result.fun, branin(result.x))
예제 #15
0
def test_defaults_are_equivalent():
    # check that the defaults of Optimizer reproduce the defaults of
    # gp_minimize
    space = [(-5., 10.), (0., 15.)]
    opt = Optimizer(space, random_state=1)

    for n in range(15):
        x = opt.ask()
        res_opt = opt.tell(x, branin(x))

    res_min = gp_minimize(branin, space, n_calls=15, random_state=1)

    assert res_min.space == res_opt.space
    # tolerate small differences in the points sampled
    assert np.allclose(res_min.x_iters, res_opt.x_iters, atol=1e-5)
    assert np.allclose(res_min.x, res_opt.x, atol=1e-5)
예제 #16
0
def test_defaults_are_equivalent():
    # check that the defaults of Optimizer reproduce the defaults of
    # gp_minimize
    space = [(-5., 10.), (0., 15.)]
    opt = Optimizer(space, random_state=1)

    for n in range(15):
        x = opt.ask()
        res_opt = opt.tell(x, branin(x))

    res_min = gp_minimize(branin, space, n_calls=15, random_state=1)

    assert res_min.space == res_opt.space
    # tolerate small differences in the points sampled
    assert np.allclose(res_min.x_iters, res_opt.x_iters, atol=1e-5)
    assert np.allclose(res_min.x, res_opt.x, atol=1e-5)
예제 #17
0
def check_minimizer_api(result, n_models):
    assert(isinstance(result.space, Space))
    assert_equal(len(result.models), n_models)
    assert_equal(len(result.x_iters), 7)
    assert_array_equal(result.func_vals.shape, (7,))

    assert(isinstance(result.x, list))
    assert_equal(len(result.x), 2)

    assert(isinstance(result.x_iters, list))
    for n in range(7):
        assert(isinstance(result.x_iters[n], list))
        assert_equal(len(result.x_iters[n]), 2)

        assert(isinstance(result.func_vals[n], float))

    assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)])
    assert_almost_equal(result.fun, branin(result.x))

    assert(isinstance(result.specs, dict))
    assert("args" in result.specs)
    assert("function" in result.specs)
예제 #18
0
def test_defaults_are_equivalent():
    # check that the defaults of Optimizer reproduce the defaults of
    # gp_minimize
    space = [(-5., 10.), (0., 15.)]
    #opt = Optimizer(space, 'ET', acq_func="EI", random_state=1)
    opt = Optimizer(space, random_state=1)

    for n in range(12):
        x = opt.ask()
        res_opt = opt.tell(x, branin(x))

    #res_min = forest_minimize(branin, space, n_calls=12, random_state=1)
    res_min = gp_minimize(branin, space, n_calls=12, random_state=1)

    assert res_min.space == res_opt.space
    # tolerate small differences in the points sampled
    assert np.allclose(res_min.x_iters, res_opt.x_iters)  #, atol=1e-5)
    assert np.allclose(res_min.x, res_opt.x)  #, atol=1e-5)

    res_opt2 = opt.get_result()
    assert np.allclose(res_min.x_iters, res_opt2.x_iters)  # , atol=1e-5)
    assert np.allclose(res_min.x, res_opt2.x)  # , atol=1e-5)
예제 #19
0
def test_branin():
    xstars = np.asarray([(-np.pi, 12.275), (+np.pi, 2.275), (9.42478, 2.475)])
    f_at_xstars = np.asarray([branin(xstar) for xstar in xstars])
    branin_min = np.array([0.397887] * xstars.shape[0])
    assert_array_almost_equal(f_at_xstars, branin_min)
def test_branin():
    xstars = np.asarray([(-np.pi, 12.275), (+np.pi, 2.275), (9.42478, 2.475)])
    f_at_xstars = np.asarray([branin(xstar) for xstar in xstars])
    branin_min = np.array([0.397887] * xstars.shape[0])
    assert_array_almost_equal(f_at_xstars, branin_min)
예제 #21
0
파일: tasks.py 프로젝트: yrath/law
 def run(self):
     from skopt.benchmarks import branin
     with self.output().localize("w") as tmp:
         tmp.dump({"x": self.branch_data, "y": branin(self.branch_data)})
예제 #22
0
subplot_no += 1

plt.subplot(subplot_no)
plt.pcolormesh(x_ax, y_ax, acquis_values)
plt.plot(opt_points[:, 0], opt_points[:, 1], 'wo', markersize=5)
plt.plot(best_min[0], best_min[1], 'ro', markersize=5)
plt.plot(min_x, min_y, 'go', markersize=5)
plt.colorbar()
plt.xlabel('X1')
plt.xlim([-5, 10])
plt.ylabel('X2')
plt.ylim([0, 15])
plt.title("LCB after 20 iterations.")
subplot_no += 1

plt.subplot(subplot_no)
func_values = np.reshape([branin(val) for val in vals], (100, 100))
plt.plot(opt_points[:, 0], opt_points[:, 1], 'wo', markersize=5)
plt.plot(best_min[0], best_min[1], 'ro', markersize=5)
plt.plot(min_x, min_y, 'go', markersize=5)
plt.pcolormesh(x_ax, y_ax, func_values)
plt.colorbar()
plt.xlabel('X1')
plt.xlim([-5, 10])
plt.ylabel('X2')
plt.ylim([0, 15])
plt.title("Function values")

plt.suptitle("2-D acquisition values on the branin function")
plt.show()
subplot_no += 1

plt.subplot(subplot_no)
plt.pcolormesh(x_ax, y_ax, acquis_values)
plt.plot(opt_points[:, 0], opt_points[:, 1], 'wo', markersize=5)
plt.plot(best_min[0], best_min[1], 'ro', markersize=5)
plt.plot(min_x, min_y, 'go', markersize=5)
plt.colorbar()
plt.xlabel('X1')
plt.xlim([-5, 10])
plt.ylabel('X2')
plt.ylim([0, 15])
plt.title("LCB after 20 iterations.")
subplot_no += 1

plt.subplot(subplot_no)
func_values = np.reshape([branin(val) for val in vals], (100, 100))
plt.plot(opt_points[:, 0], opt_points[:, 1], 'wo', markersize=5)
plt.plot(best_min[0], best_min[1], 'ro', markersize=5)
plt.plot(min_x, min_y, 'go', markersize=5)
plt.pcolormesh(x_ax, y_ax, func_values)
plt.colorbar()
plt.xlabel('X1')
plt.xlim([-5, 10])
plt.ylabel('X2')
plt.ylim([0, 15])
plt.title("Function values")

plt.suptitle("2-D acquisition values on the branin function")
plt.show()