Example #1
0
def interpolate(thetas,
                z_thetas,
                xx,
                yy,
                method='linear',
                z_uncertainties_thetas=None,
                matern_exponent=0.5,
                length_scale_min=0.001,
                length_scale_default=1.,
                length_scale_max=1000.,
                noise_level=0.001,
                subtract_min=False):
    if method == 'cubic':

        interpolator = CloughTocher2DInterpolator(thetas[:], z_thetas)

        zz = interpolator(np.dstack((xx.flatten(), yy.flatten())))
        zi = zz.reshape(xx.shape)

    elif method == 'gp':

        if z_uncertainties_thetas is not None:
            gp = GaussianProcessRegressor(
                normalize_y=True,
                kernel=ConstantKernel(1.0, (1.e-9, 1.e9)) * Matern(
                    length_scale=[length_scale_default],
                    length_scale_bounds=[(length_scale_min, length_scale_max)],
                    nu=matern_exponent) + WhiteKernel(noise_level),
                n_restarts_optimizer=10,
                alpha=z_uncertainties_thetas)
        else:
            gp = GaussianProcessRegressor(
                normalize_y=True,
                kernel=ConstantKernel(1.0, (1.e-9, 1.e9)) * Matern(
                    length_scale=length_scale_default,
                    length_scale_bounds=(length_scale_min, length_scale_max),
                    nu=matern_exponent) + WhiteKernel(noise_level),
                n_restarts_optimizer=10)

        gp.fit(thetas[:], z_thetas[:])

        zz, _ = gp.predict(np.c_[xx.ravel(), yy.ravel()], return_std=True)
        zi = zz.reshape(xx.shape)

    elif method == 'linear':
        interpolator = LinearNDInterpolator(thetas[:], z_thetas)
        zz = interpolator(np.dstack((xx.flatten(), yy.flatten())))
        zi = zz.reshape(xx.shape)

    else:
        raise ValueError

    mle = np.unravel_index(zi.argmin(), zi.shape)

    if subtract_min:
        zi -= zi[mle]

    return zi, mle
Example #2
0
def test_param_for_white_kernel_in_Sum(kernel):
    kernel_with_noise = kernel + wk
    wk_present, wk_param = _param_for_white_kernel_in_Sum(kernel + wk)
    assert_true(wk_present)
    kernel_with_noise.set_params(**{wk_param: WhiteKernel(noise_level=0.0)})
    assert_array_equal(kernel_with_noise(X), kernel(X))

    assert_false(_param_for_white_kernel_in_Sum(kernel5)[0])
Example #3
0
File: bo.py Project: mborisyak/abo
def gpbo_cycle(ndim,
               space,
               target_f,
               n_iters=10,
               acq_function=ei,
               model=None,
               n_multi_start=100,
               show_progress=True):
    xrange = (lambda title, n: tqdm_notebook(range(n), postfix=title)
              ) if show_progress else (lambda title, n: range(n))

    space = np.array(space)

    if model is None:
        kernel = WhiteKernel(0.001, noise_level_bounds=[1.0e-5, 1.0e-3]) + \
                 Matern(1.0, nu=1.5, length_scale_bounds=[1.0e-3, 1.0e+3])

        model = GaussianProcessRegressor(kernel=kernel,
                                         normalize_y=False,
                                         noise=None,
                                         n_restarts_optimizer=2)

    known_points = []
    known_values = []
    cost = []

    for i in xrange('BO iteration', n_iters):
        acq = acq_function(model, known_points, known_values)

        candidates = []
        for _ in xrange('acquisition', n_multi_start):
            x0 = np.random.uniform(size=(ndim, ))

            x, f, _ = fmin_l_bfgs_b(maxiter=1000,
                                    func=acq,
                                    x0=x0,
                                    approx_grad=False,
                                    bounds=[(0, 1)] * ndim)

            candidates.append((x, f))

        best = np.argmin([f for x, f in candidates])
        suggestion, _ = candidates[best]
        suggestion = reverse_transform(suggestion.reshape(1, -1), space)[0, :]

        point_cost, observed = target_f(suggestion)

        known_points.append(suggestion)
        known_values.append(observed)
        cost.append(point_cost)

        model.fit(transform(np.array(known_points), space),
                  np.array(known_values))

        yield model, acq, space, known_points, known_values, cost
Example #4
0
 def noise_set_to_zero(self):
     current_theta = self.theta
     try:
         # Now we set the noise to 0, but do NOT recalculate the alphas!:
         white_present, white_param = _param_for_white_kernel_in_Sum(
             self.kernel_)
         self.kernel_.set_params(
             **{white_param: WhiteKernel(noise_level=0.0)})
         yield self
     finally:
         self.kernel_.theta = current_theta
Example #5
0
def test_acquisition_gradient():
    rng = np.random.RandomState(0)
    X = rng.randn(20, 5)
    y = rng.randn(20)
    X_new = rng.randn(5)
    mat = Matern()
    wk = WhiteKernel()
    gpr = GaussianProcessRegressor(kernel=mat + wk)
    gpr.fit(X, y)

    for acq_func in ["LCB", "PI", "EI"]:
        check_gradient_correctness(X_new, gpr, acq_func, np.max(y))
Example #6
0
    def noise_set_to_zero(self):
        """Context manager in which the noise of the Gaussian process is 0.

        This is useful when you want to predict the epistemic uncertainty of the
        Gaussian process without the noise.
        """
        current_theta = self.theta
        try:
            # Now we set the noise to 0, but do NOT recalculate the alphas!:
            white_present, white_param = _param_for_white_kernel_in_Sum(self.kernel_)
            self.kernel_.set_params(**{white_param: WhiteKernel(noise_level=0.0)})
            yield self
        finally:
            self.kernel_.theta = current_theta
Example #7
0
def test_guess_priors():
    """Construct a complicated kernel and check if priors are constructed
    correctly."""
    kernel = Exponentiation(
        ConstantKernel(constant_value_bounds="fixed") * Matern() +
        WhiteKernel() + CompoundKernel([RBF(), Matern()]),
        2.0,
    )

    priors = guess_priors(kernel)

    assert len(priors) == 4
    expected = [
        -1.737085713764618,
        -4.107091211892862,
        -1.737085713764618,
        -1.737085713764618,
    ]
    for p, v in zip(priors, expected):
        assert_almost_equal(p(0.0), v)
Example #8
0
def test_guess_priors():
    """Construct a complicated kernel and check if priors are constructed
    correctly."""
    kernel = Exponentiation(
        ConstantKernel(constant_value_bounds="fixed") * Matern() +
        WhiteKernel() + RBF(length_scale=(1.0, 1.0)),
        2.0,
    )

    priors = guess_priors(kernel)

    assert len(priors) == 4
    expected = [
        -0.02116327824572739,
        -2.112906921232193,
        -0.02116327824572739,
        -0.02116327824572739,
    ]
    for p, v in zip(priors, expected):
        assert_almost_equal(p(-0.9), v)
Example #9
0
from skopt.learning.gaussian_process.kernels import RationalQuadratic
from skopt.learning.gaussian_process.kernels import RBF
from skopt.learning.gaussian_process.kernels import WhiteKernel

KERNELS = []

for length_scale in [np.arange(1, 6), [0.2, 0.3, 0.5, 0.6, 0.1]]:
    KERNELS.extend([
        RBF(length_scale=length_scale),
        Matern(length_scale=length_scale, nu=0.5),
        Matern(length_scale=length_scale, nu=1.5),
        Matern(length_scale=length_scale, nu=2.5),
        RationalQuadratic(alpha=2.0, length_scale=2.0),
        ExpSineSquared(length_scale=2.0, periodicity=3.0),
        ConstantKernel(constant_value=1.0),
        WhiteKernel(noise_level=2.0),
        Matern(length_scale=length_scale, nu=2.5)**3.0,
        RBF(length_scale=length_scale) +
        Matern(length_scale=length_scale, nu=1.5),
        RBF(length_scale=length_scale) *
        Matern(length_scale=length_scale, nu=1.5),
        DotProduct(sigma_0=2.0)
    ])


# Copied (shamelessly) from sklearn.gaussian_process.kernels
def _approx_fprime(xk, f, epsilon, args=()):
    f0 = f(*((xk, ) + args))
    grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
    ei = np.zeros((len(xk), ), float)
    for k in range(len(xk)):
Example #10
0
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
import pytest

from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.kernels import RBF
from skopt.learning.gaussian_process.kernels import Matern
from skopt.learning.gaussian_process.kernels import WhiteKernel
from skopt.learning.gaussian_process.gpr import _param_for_white_kernel_in_Sum

rng = np.random.RandomState(0)
X = rng.randn(5, 5)
y = rng.randn(5)

rbf = RBF()
wk = WhiteKernel()
mat = Matern()
kernel1 = rbf
kernel2 = mat + rbf
kernel3 = mat * rbf
kernel4 = wk * rbf
kernel5 = mat + rbf * wk


def predict_wrapper(X, gpr):
    """Predict that can handle 1-D input"""
    X = np.expand_dims(X, axis=0)
    return gpr.predict(X, return_std=True)


@pytest.mark.parametrize("kernel", [kernel1, kernel2, kernel3, kernel4])
Example #11
0
import numpy as np
from skopt import Optimizer
from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.kernels import ConstantKernel
from skopt.learning.gaussian_process.kernels import Matern, WhiteKernel



#cov_amplitude = ConstantKernel(1.0, (0.01, 5.0))
cov_amplitude = ConstantKernel(1.0, "fixed")

other_kernel = Matern(
    length_scale=np.ones(1),
    length_scale_bounds=[(0.3, 10)],
    nu=2.5)

white_kernel = WhiteKernel()

gp = GaussianProcessRegressor(
    kernel=cov_amplitude * other_kernel + white_kernel,
    normalize_y=True, alpha=0.0, noise=10e-7,
    n_restarts_optimizer=2)


def get_optimizer(range,nrandom):
    return Optimizer(dimensions=[range],
                    base_estimator=gp,
                    n_random_starts=nrandom)
Example #12
0
    def fit(self, X, y):
        """Fit Gaussian process regression model.

        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)
            Training data

        y : array-like, shape = (n_samples, [n_output_dims])
            Target values

        Returns
        -------
        self
            Returns an instance of self.
        """

        if self.kernel is None:
            self.kernel = ConstantKernel(1.0, constant_value_bounds="fixed") \
                          * RBF(1.0, length_scale_bounds="fixed")
        if self.noise and not _param_for_white_kernel_in_Sum(self.kernel)[0]:
            if self.noise == "gaussian":
                self.kernel = self.kernel + WhiteKernel()
            else:
                self.kernel = self.kernel + WhiteKernel(
                    noise_level=self.noise, noise_level_bounds="fixed")
        super(GaussianProcessRegressor, self).fit(X, y)

        self.noise_ = None

        if self.noise:
            # The noise component of this kernel should be set to zero
            # while estimating K(X_test, X_test)
            # Note that the term K(X, X) should include the noise but
            # this (K(X, X))^-1y is precomputed as the attribute `alpha_`.
            # (Notice the underscore).
            # This has been described in Eq 2.24 of
            # http://www.gaussianprocess.org/gpml/chapters/RW2.pdf
            # Hence this hack
            if isinstance(self.kernel_, WhiteKernel):
                self.kernel_.set_params(noise_level=0.0)

            else:
                white_present, white_param = _param_for_white_kernel_in_Sum(
                    self.kernel_)

                # This should always be true. Just in case.
                if white_present:
                    noise_kernel = self.kernel_.get_params()[white_param]
                    self.noise_ = noise_kernel.noise_level
                    self.kernel_.set_params(
                        **{white_param: WhiteKernel(noise_level=0.0)})

        # Precompute arrays needed at prediction
        L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
        self.K_inv_ = L_inv.dot(L_inv.T)

        # Fix deprecation warning #462
        if int(sklearn.__version__[2:4]) >= 23:
            self.y_train_std_ = self._y_train_std
            self.y_train_mean_ = self._y_train_mean
        elif int(sklearn.__version__[2:4]) >= 19:
            self.y_train_mean_ = self._y_train_mean
            self.y_train_std_ = 1
        else:
            self.y_train_mean_ = self.y_train_mean
            self.y_train_std_ = 1

        return self