コード例 #1
0
def test_ei():
    np.random.seed(0)
    ackley = Ackley(dim=1)
    X = np.expand_dims([-15, -10, 0, 1, 20], axis=1)
    fX = np.array([ackley.eval(x) for x in X])

    gp = GPRegressor(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub)
    gp.add_points(X, fX)

    # Find the global optimizer of EI
    x_true = -1.7558
    x_next = ei_ga(X=X,
                   Xpend=None,
                   dtol=0.0,
                   ei_tol=0,
                   fX=fX,
                   num_pts=1,
                   opt_prob=ackley,
                   surrogate=gp)
    assert np.isclose(x_next, x_true, atol=1e-2)

    # Find the optimizer at least distance 5 from other points
    x_true = 10.6656
    x_next = ei_ga(X=X,
                   Xpend=None,
                   dtol=5.0,
                   ei_tol=0,
                   fX=fX,
                   num_pts=1,
                   opt_prob=ackley,
                   surrogate=gp)
    assert np.isclose(x_next, x_true, atol=1e-2)
コード例 #2
0
def test_unit_box():
    ackley = Ackley(dim=1)
    np.random.seed(0)
    x = np.random.rand(30, 1)
    fX = np.expand_dims([ackley.eval(y) for y in x], axis=1)

    xx = np.expand_dims(np.linspace(0, 1, 100), axis=1)

    # RBF with internal scaling to unit hypercube
    rbf1 = RBFInterpolant(dim=1, lb=np.zeros(1), ub=100 * np.ones(1), eta=1e-6)
    rbf1.add_points(x, fX)

    # Normal RBF
    rbf2 = RBFInterpolant(dim=1, lb=np.zeros(1), ub=100 * np.ones(1), eta=1e-6)
    rbf2.add_points(x, fX)

    assert np.max(np.abs(rbf1.predict(xx) - rbf2.predict(xx))) < 1e-10
    assert np.max(
        np.abs(rbf1.predict_deriv(x[0, :]) -
               rbf2.predict_deriv(x[0, :]))) < 1e-10
    assert np.max(np.abs(rbf1.X - rbf2.X)) < 1e-10
    assert np.max(np.abs(rbf1.fX - rbf2.fX)) < 1e-10

    rbf1.reset()
    assert rbf1.num_pts == 0 and rbf1.dim == 1
    assert rbf1.X.size == 0 and rbf1.fX.size == 0
コード例 #3
0
def test_srbf():
    np.random.seed(0)
    ackley = Ackley(dim=1)
    X = np.expand_dims([-15, -10, 0, 1, 20], axis=1)
    fX = np.array([ackley.eval(x) for x in X])

    gp = GPRegressor(dim=1)
    gp.add_points(X, fX)

    # Find the next point with w = 0.25
    x_true = 10.50
    x_next = candidate_uniform(
        num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000,
        surrogate=gp, opt_prob=ackley, weights=[0.25])
    assert np.isclose(x_next, x_true, atol=1e-2)

    x_next = candidate_srbf(
        num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000,
        surrogate=gp, opt_prob=ackley, weights=[0.25],
        sampling_radius=0.5)
    assert np.isclose(x_next, x_true, atol=1e-2)

    # Find the next point with w = 0.75
    x_true = -3.395
    x_next = candidate_uniform(
        num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000,
        surrogate=gp, opt_prob=ackley, weights=[0.75])
    assert np.isclose(x_next, x_true, atol=1e-2)

    x_next = candidate_srbf(
        num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000,
        surrogate=gp, opt_prob=ackley, weights=[0.75],
        sampling_radius=0.5)
    assert np.isclose(x_next, x_true, atol=1e-2)
コード例 #4
0
def test_ei():
    np.random.seed(0)
    ackley = Ackley(dim=1)
    X = np.expand_dims([-15, -10, 0, 1, 20], axis=1)
    fX = np.array([ackley.eval(x) for x in X])

    gp = GPRegressor(dim=1)
    gp.add_points(X, fX)

    # Find the global optimizer of EI
    x_true = -3.0556
    x_next = expected_improvement_ga(
        X=X, Xpend=None, dtol=0.0, ei_tol=0,
        fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp)
    assert np.isclose(x_next, x_true, atol=1e-2)

    x_next = expected_improvement_uniform(
        X=X, Xpend=None, dtol=0.0, ei_tol=0,
        fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp,
        num_cand=10000)
    assert np.isclose(x_next, x_true, atol=1e-2)

    # Find the optimizer at least distance 5 from other points
    x_true = 11.14
    x_next = expected_improvement_ga(
        X=X, Xpend=None, dtol=5.0, ei_tol=0,
        fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp)
    assert np.isclose(x_next, x_true, atol=1e-2)

    x_next = expected_improvement_uniform(
        X=X, Xpend=None, dtol=5.0, ei_tol=0,
        fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp,
        num_cand=10000)
    assert np.isclose(x_next, x_true, atol=1e-2)
コード例 #5
0
def example_extra_vals():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_extra_vals.log"):
        os.remove("./logfiles/example_extra_vals.log")
    logging.basicConfig(filename="./logfiles/example_extra_vals.log",
                        level=logging.INFO)

    num_threads = 4
    max_evals = 500

    ackley = Ackley(dim=10)
    num_extra = 10
    extra = np.random.uniform(ackley.lb, ackley.ub, (num_extra, ackley.dim))
    extra_vals = np.nan * np.ones((num_extra, 1))
    for i in range(num_extra):  # Evaluate every second point
        if i % 2 == 0:
            extra_vals[i] = ackley.eval(extra[i, :])

    rbf = RBFInterpolant(dim=ackley.dim, kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True, batch_size=num_threads,
        extra_points=extra, extra_vals=extra_vals)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Append the known function values to the POAP database since
    # POAP won't evaluate these points
    for i in range(len(extra_vals)):
        if not np.isnan(extra_vals[i]):
            record = EvalRecord(
                params=(np.ravel(extra[i, :]),), status='completed')
            record.value = extra_vals[i]
            record.feasible = True
            controller.fevals.append(record)

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0], max_line_width=np.inf,
                     precision=5, suppress_small=True)))