def test_noise_equals_gaussian():
    gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)

    # gpr2 sets the noise component to zero at predict time.
    gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
    assert_false(gpr1.noise_)
    assert_true(gpr2.noise_)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_array_almost_equal(mean1, mean2, 4)
    assert_false(np.any(std1 == std2))
Exemple #2
0
def test_noise_equals_gaussian():
    gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)

    # gpr2 sets the noise component to zero at predict time.
    gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
    assert_false(gpr1.noise_)
    assert_true(gpr2.noise_)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_array_almost_equal(mean1, mean2, 4)
    assert_false(np.any(std1 == std2))
Exemple #3
0
def interpolate(thetas,
                z_thetas,
                xx,
                yy,
                method='linear',
                z_uncertainties_thetas=None,
                matern_exponent=0.5,
                length_scale_min=0.001,
                length_scale_default=1.,
                length_scale_max=1000.,
                noise_level=0.001,
                subtract_min=False):
    if method == 'cubic':

        interpolator = CloughTocher2DInterpolator(thetas[:], z_thetas)

        zz = interpolator(np.dstack((xx.flatten(), yy.flatten())))
        zi = zz.reshape(xx.shape)

    elif method == 'gp':

        if z_uncertainties_thetas is not None:
            gp = GaussianProcessRegressor(
                normalize_y=True,
                kernel=ConstantKernel(1.0, (1.e-9, 1.e9)) * Matern(
                    length_scale=[length_scale_default],
                    length_scale_bounds=[(length_scale_min, length_scale_max)],
                    nu=matern_exponent) + WhiteKernel(noise_level),
                n_restarts_optimizer=10,
                alpha=z_uncertainties_thetas)
        else:
            gp = GaussianProcessRegressor(
                normalize_y=True,
                kernel=ConstantKernel(1.0, (1.e-9, 1.e9)) * Matern(
                    length_scale=length_scale_default,
                    length_scale_bounds=(length_scale_min, length_scale_max),
                    nu=matern_exponent) + WhiteKernel(noise_level),
                n_restarts_optimizer=10)

        gp.fit(thetas[:], z_thetas[:])

        zz, _ = gp.predict(np.c_[xx.ravel(), yy.ravel()], return_std=True)
        zi = zz.reshape(xx.shape)

    elif method == 'linear':
        interpolator = LinearNDInterpolator(thetas[:], z_thetas)
        zz = interpolator(np.dstack((xx.flatten(), yy.flatten())))
        zi = zz.reshape(xx.shape)

    else:
        raise ValueError

    mle = np.unravel_index(zi.argmin(), zi.shape)

    if subtract_min:
        zi -= zi[mle]

    return zi, mle
Exemple #4
0
def test_white_kernel_as_noise():
    # first .fit()
    gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)
    gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    assert not np.any(std1 == std2)
    assert _param_for_white_kernel_in_Sum(gpr1.kernel_)[1] == 'k2'
    assert _param_for_white_kernel_in_Sum(gpr2.kernel_)[1] == 'k2'
    # second .fit()
    gpr1 = gpr1.fit(X, y)
    gpr2 = gpr2.fit(X, y)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    assert _param_for_white_kernel_in_Sum(gpr1.kernel_)[1] == 'k2'
    assert _param_for_white_kernel_in_Sum(gpr2.kernel_)[1] == 'k2'
    assert not np.any(std1 == std2)
Exemple #5
0
def test_gpr_uses_noise():
    """ Test that gpr is using WhiteKernel by default"""
    X = np.random.normal(size=[100, 2])
    Y = np.random.normal(size=[100])

    g_gaussian = GaussianProcessRegressor()
    g_gaussian.fit(X, Y)
    m, sigma = g_gaussian.predict(X[0:1], return_cov=True)

    assert sigma > 0
Exemple #6
0
def test_gpr_uses_noise():
    """ Test that gpr is using WhiteKernel"""

    X = np.random.normal(size=[100, 2])
    Y = np.random.normal(size=[100])

    g_gaussian = GaussianProcessRegressor(noise='gaussian')
    g_gaussian.fit(X, Y)
    m, sigma = g_gaussian.predict(X[0:1], return_cov=True)
    assert sigma > 0
def test_mean_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    mean, std, mean_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
    assert_array_almost_equal(mean_grad, num_grad, decimal=3)
Exemple #8
0
def test_mean_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    mean, std, mean_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
    assert_array_almost_equal(mean_grad, num_grad, decimal=3)
Exemple #9
0
from skopt.learning.gaussian_process.kernels import RBF

all_x = np.reshape(np.linspace(0, 6, 100), (-1, 1))
all_f = [black_box(xi) for xi in all_x]

# Plot all points.
plt.plot(all_x, all_f)

# Train only one third of the training data.
X = np.reshape(np.linspace(4, 6, 10), (-1, 1))
y = [black_box(xi) for xi in X]

# Use RBF kernel.
rbf = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=rbf, alpha=1e-12)
gpr.fit(X, y)
plt.plot(np.ravel(X), y, "ro", label="Fit points")

# Predict on all data.
y_pred, y_std = gpr.predict(all_x, return_std=True)
all_x_plot = np.ravel(all_x)
upper_bound = y_pred + 1.96 * y_std
lower_bound = y_pred - 1.96 * y_std

plt.plot(all_x_plot, y_pred, "r--", label="Predictions")
plt.plot(all_x_plot, lower_bound, color="red")
plt.plot(all_x_plot, upper_bound, color="red")
plt.fill_between(all_x_plot, lower_bound, upper_bound, facecolor="lightcoral")
plt.legend()
plt.show()
krigingModel = GaussianProcessRegressor(random_state=0)
krigingModel.fit(XSobol, YSobol)

# Compute the XGBoost surrogate
print("Fitting the XGBoost Model")
XGBoostModel = fitXGBoost(XSobol, YSobol)

# At this point, we have the XGBoost surrogate model.  What we need next is the bit which returns the parameterisations
# for positive calibrations.

nbModels = 2
predictions = [None] * nbModels

# This is not nice at all
print("Predicting on the two models")
predictions[0] = krigingModel.predict(XOSS).flatten()
predictions[1] = XGBoostModel.predict(XOSS).flatten()

print("Evaluating MSE Performance")
MSEperf = np.zeros((nbModels, monteCarlos))
for modelIndex in range(len(predictions)):
    for i in range(monteCarlos):
        MSEperf[modelIndex, i] = mean_squared_error(YOSS[i * testSize:(i + 1) * testSize],
                                                    predictions[int(modelIndex)][i * testSize:(i + 1) * testSize])


print("Plotting")

experiment_labels = ["Kriging", "XGBoost (Batch)"]

MSEperf = pd.DataFrame(MSEperf, index=experiment_labels)