示例#1
0
    def test_sum(self):
        lengthscale1, lengthscale2 = 5., 10.
        X = np.random.normal(size=(10, 20))

        sk_sum = sklearn_RBF(lengthscale1) + sklearn_RBF(lengthscale2)
        ours_sum = RBF(lengthscale1) + RBF(lengthscale2)
        assert np.allclose(sk_sum(X), ours_sum(X))
def test_kernel_operator_commutative():
    # Adding kernels and multiplying kernels should be commutative.
    # Check addition
    assert_array_almost_equal((RBF(2.0) + 1.0)(X), (1.0 + RBF(2.0))(X))

    # Check multiplication
    assert_array_almost_equal((3.0 * RBF(2.0))(X), (RBF(2.0) * 3.0)(X))
示例#3
0
def test_iris_example():
    iris = datasets.load_iris()
    X = jnp.asarray(iris.data)
    y = jnp.array(iris.target, dtype=int)

    kernel = 1. + RBF(length_scale=1.0)
    gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
示例#4
0
    def test_exponentiation(self):
        lengthscale = 5.
        exponent = 2.
        X = np.random.normal(size=(10, 20))

        sk = sklearn_RBF(lengthscale) ** exponent
        ours = RBF(lengthscale) ** exponent
        assert np.allclose(sk(X), ours(X))
示例#5
0
 def test_distance_from_end_spectrum_kernel(self):
     distance_kernel = RBF(1.0)
     strings = ['abc', 'cba']
     strings_transformed = AsciiBytesTransformer().transform(strings)
     kernel = DistanceFromEndSpectrumKernel(distance_kernel, 1)
     K = kernel(strings_transformed)
     K_gt = np.array([[3., 3.], [3., 3.]])
     assert np.allclose(K, K_gt)
示例#6
0
 def test_distance_spectrum_kernel(self):
     distance_kernel = RBF(1.0)
     strings = ['aabbcc', 'aaabac']
     strings_transformed = AsciiBytesTransformer().transform(strings)
     kernel = DistanceSpectrumKernel(distance_kernel, 2)
     K = kernel(strings_transformed)
     K_gt = np.array([[5., 2.2130613], [2.2130613, 6.2130613]])
     assert np.allclose(K, K_gt)
示例#7
0
    def test_value(self, save_memory):
        config.SAVE_MEMORY = save_memory

        lengthscale = 15.
        X = np.random.normal(size=(10, 20))

        sk_rbf = sklearn_RBF(lengthscale)
        rbf = RBF(lengthscale)
        assert np.allclose(sk_rbf(X), rbf(X))
def test_kernel_anisotropic():
    # Anisotropic kernel should be consistent with isotropic kernels.
    kernel = 3.0 * RBF([0.5, 2.0])

    K = kernel(X)
    X1 = np.array(X)
    X1[:, 0] *= 4
    K1 = 3.0 * RBF(2.0)(X1)
    assert_array_almost_equal(K, K1)

    X2 = np.array(X)
    X2[:, 1] /= 4
    K2 = 3.0 * RBF(0.5)(X2)
    assert_array_almost_equal(K, K2)

    # Check getting and setting via theta
    kernel.theta = kernel.theta + np.log(2)
    assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
    assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
示例#9
0
    def test_gradient(self, save_memory):
        config.SAVE_MEMORY = save_memory

        lengthscale = 1.
        X = np.random.normal(size=(5, 2))

        sk_rbf = sklearn_RBF(lengthscale)
        _, sk_grad = sk_rbf(X, eval_gradient=True)
        rbf = RBF(lengthscale)
        _, grad = rbf(X, eval_gradient=True)
        assert np.allclose(sk_grad, grad)
示例#10
0
    def test_RBF_grad_same_XY(self):
        X = np.random.normal(size=(3, 20))
        kernel = NormalizedKernel(RBF(1.))
        K, K_grad = kernel(X, X, eval_gradient=True)

        # Compute the kernel using instance wise formulation
        from jax import vmap, grad
        kernel_fn = partial(grad(kernel.pure_kernel_fn), kernel.theta)
        K_grad_instance_wise = \
            vmap(lambda x: vmap(lambda y: kernel_fn(x, y))(X))(X)

        assert np.allclose(K_grad, K_grad_instance_wise)
示例#11
0
    def test_RBF_value_same(self):
        X = np.random.normal(size=(10, 20))
        kernel = NormalizedKernel(RBF(1.))
        K = kernel(X)

        # Compute the kernel using instance wise formulation
        from jax import vmap
        kernel_fn = partial(kernel.pure_kernel_fn, kernel.theta)
        K_instance_wise = \
            vmap(lambda x: vmap(lambda y: kernel_fn(x, y))(X))(X)

        assert np.allclose(K, K_instance_wise)
示例#12
0
    def test_distance_spectrum_kernel_ngram_transform(self):
        n_gram_length = 2
        distance_kernel = RBF(1.0)
        strings = ['aabbcc', 'aaabac']
        strings_transformed = AsciiBytesTransformer().transform(strings)
        ngrams = NGramTransformer(n_gram_length).transform(strings_transformed)

        kernel_strings = DistanceSpectrumKernel(distance_kernel, n_gram_length)
        kernel_ngrams = DistanceSpectrumKernel(distance_kernel, None)
        K_strings = kernel_strings(strings_transformed)
        K_ngrams = kernel_ngrams(ngrams)
        assert np.allclose(K_strings, K_ngrams)
示例#13
0
 def test_rev_comp_distance_spectrum_kernel_string_mismatch(self):
     distance_kernel = RBF(1.)
     strings = ['ATGC', 'CGAT']
     transformer = CompressAlphabetTransformer()
     strings_transf = transformer.fit_transform(strings)
     table = get_translation_table(['A', 'T', 'G', 'C'],
                                   ['T', 'A', 'C', 'G'],
                                   transformer._mapping)
     kernel = DistanceRevComplementSpectrumKernel(distance_kernel, 2, table)
     K = kernel(strings_transf)
     K_gt = np.array([[3.2706707, 1.1353353], [1.1353353, 3.2706707]])
     assert np.allclose(K, K_gt)
示例#14
0
def test_random_starts():
    # Test that an increasing number of random-starts of GP fitting only
    # increases the log marginal likelihood of the chosen theta.
    n_samples, n_features = 25, 2
    rng = np.random.RandomState(0)
    X = rng.randn(n_samples, n_features) * 2 - 1
    y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0

    kernel = C(1.0, (1e-2, 1e2)) \
        * RBF(length_scale=[1e-3] * n_features,
              length_scale_bounds=[(1e-4, 1e+2)] * n_features)
    last_lml = -np.inf
    for n_restarts_optimizer in range(5):
        gp = GaussianProcessClassifier(
            kernel=kernel,
            n_restarts_optimizer=n_restarts_optimizer,
            random_state=0).fit(X, y)
        lml = gp.log_marginal_likelihood(gp.kernel_.theta)
        assert lml > last_lml - np.finfo(np.float32).eps
        last_lml = lml
示例#15
0

def f(x):
    return np.sin(x)


X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int)  # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2

fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [
    RBF(length_scale=0.1), fixed_kernel,
    RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
    C(1.0,
      (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
]
non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]


@pytest.mark.parametrize('kernel', kernels)
def test_predict_consistent(kernel):
    # Check binary predict decision has also predicted probability above 0.5.
    gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
    assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
from sklearn.base import clone
from sklearn.utils._testing import (assert_almost_equal, assert_array_equal,
                                    assert_array_almost_equal)

from sklearn_jax_kernels import (RBF, Kernel, KernelOperator, ConstantKernel,
                                 Exponentiation)

from jax.config import config
config.update("jax_enable_x64",
              True)  # Required for numerical gradients checks

X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))

kernels = [
    RBF(length_scale=2.0),
    RBF(length_scale_bounds=(0.5, 2.0)),
    ConstantKernel(constant_value=10.0),
    2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
    2.0 * RBF(length_scale=0.5), 2.0 * RBF(length_scale=[0.5, 2.0]),
    RBF(length_scale=[2.0])
]


@pytest.mark.parametrize('kernel', kernels)
def test_kernel_gradient(kernel):
    # Compare analytic and numeric gradient of kernels.
    K, K_gradient = kernel(X, eval_gradient=True)

    assert K_gradient.shape[0] == X.shape[0]
    assert K_gradient.shape[1] == X.shape[0]