def test_combine():
    x1 = B.linspace(0, 2, 10)
    x2 = B.linspace(2, 4, 10)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Matern12(), measure=m)
    y1 = p1(x1).sample()
    y2 = p2(x2).sample()

    # Check the one-argument case.
    assert_equal_normals(combine(p1(x1, 1)), p1(x1, 1))
    fdd_combined, y_combined = combine((p1(x1, 1), B.squeeze(y1)))
    assert_equal_normals(fdd_combined, p1(x1, 1))
    approx(y_combined, y1)

    # Check the two-argument case.
    fdd_combined = combine(p1(x1, 1), p2(x2, 2))
    assert_equal_normals(
        fdd_combined,
        Normal(B.block_diag(p1(x1, 1).var,
                            p2(x2, 2).var)),
    )
    fdd_combined, y_combined = combine((p1(x1, 1), B.squeeze(y1)),
                                       (p2(x2, 2), y2))
    assert_equal_normals(
        fdd_combined,
        Normal(B.block_diag(p1(x1, 1).var,
                            p2(x2, 2).var)),
    )
    approx(y_combined, B.concat(y1, y2, axis=0))
示例#2
0
def test_take_x():
    m = Measure()
    f1 = GP(EQ())
    f2 = GP(EQ())
    k = MultiOutputKernel(m, f1)
    with pytest.raises(ValueError):
        _take_x(k, f2(B.linspace(0, 1, 10)), B.randn(10) > 0)
示例#3
0
def test_logpdf_missing_data():
    # Setup model.
    m = 3
    noise = 1e-2
    latent_noises = 2e-2 * B.ones(m)
    kernels = [0.5 * EQ().stretch(0.75) for _ in range(m)]
    x = B.linspace(0, 10, 20)

    # Concatenate two orthogonal matrices, to make the missing data
    # approximation exact.
    u1 = B.svd(B.randn(m, m))[0]
    u2 = B.svd(B.randn(m, m))[0]
    u = Dense(B.concat(u1, u2, axis=0) / B.sqrt(2))

    s_sqrt = Diagonal(B.rand(m))

    # Construct a reference model.
    oilmm_pp = ILMMPP(kernels, u @ s_sqrt, noise, latent_noises)

    # Sample to generate test data.
    y = oilmm_pp.sample(x, latent=False)

    # Throw away data, but retain orthogonality.
    y[5:10, 3:] = np.nan
    y[10:, :3] = np.nan

    # Construct OILMM to test.
    oilmm = OILMM(kernels, u, s_sqrt, noise, latent_noises)

    # Check that evidence is still exact.
    approx(oilmm_pp.logpdf(x, y), oilmm.logpdf(x, y), atol=1e-7)
示例#4
0
文件: test_net.py 项目: wesselb/wbml
def test_ff():
    vs = Vars(np.float32)

    nn = ff(10, (20, 30), normalise=True)
    nn.initialise(5, vs)
    x = B.randn(2, 3, 5)

    # Check number of weights and width.
    assert B.length(vs.get_vector()) == nn.num_weights(5)
    assert nn.width == 10

    # Test batch consistency.
    check_batch_consistency(nn, x)

    # Check composition.
    assert len(nn.layers) == 7
    assert type(nn.layers[0]) == Linear
    assert nn.layers[0].A.shape[0] == 5
    assert nn.layers[0].width == 20
    assert type(nn.layers[1]) == Activation
    assert nn.layers[1].width == 20
    assert type(nn.layers[2]) == Normalise
    assert nn.layers[2].width == 20
    assert type(nn.layers[3]) == Linear
    assert nn.layers[3].width == 30
    assert type(nn.layers[4]) == Activation
    assert nn.layers[4].width == 30
    assert type(nn.layers[5]) == Normalise
    assert nn.layers[5].width == 30
    assert type(nn.layers[6]) == Linear
    assert nn.layers[6].width == 10

    # Check that one-dimensional calls are okay.
    vs = Vars(np.float32)
    nn.initialise(1, vs)
    approx(nn(B.linspace(0, 1, 10)), nn(B.linspace(0, 1, 10)[:, None]))

    # Check that zero-dimensional calls fail.
    with pytest.raises(ValueError):
        nn(0)

    # Check normalisation layers disappear.
    assert len(ff(10, (20, 30), normalise=False).layers) == 5
示例#5
0
def test_infer_size():
    x = B.linspace(0, 2, 5)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)
    k = MultiOutputKernel(m, p1, p2)

    assert infer_size(k, x) == 10
    assert infer_size(k, p1(x)) == 5
    assert infer_size(k, (x, p1(x))) == 15
示例#6
0
def test_compare_ilmm():
    # Setup models.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    noise_obs = 0.1
    noises_latent = np.array([0.1, 0.2])
    u, s_sqrt = B.svd(B.randn(3, 2))[:2]
    u = Dense(u)
    s_sqrt = Diagonal(s_sqrt)

    # Construct models.
    ilmm = ILMMPP(kernels, u @ s_sqrt, noise_obs, noises_latent)
    oilmm = OILMM(kernels, u, s_sqrt, noise_obs, noises_latent)

    # Construct data.
    x = B.linspace(0, 3, 5)
    y = ilmm.sample(x, latent=False)
    x2 = B.linspace(4, 7, 5)
    y2 = ilmm.sample(x2, latent=False)

    # Check LML before conditioning.
    approx(ilmm.logpdf(x, y), oilmm.logpdf(x, y))
    approx(ilmm.logpdf(x2, y2), oilmm.logpdf(x2, y2))

    ilmm = ilmm.condition(x, y)
    oilmm = oilmm.condition(x, y)

    # Check LML after conditioning.
    approx(ilmm.logpdf(x, y), oilmm.logpdf(x, y))
    approx(ilmm.logpdf(x2, y2), oilmm.logpdf(x2, y2))

    # Predict.
    means_pp, lowers_pp, uppers_pp = ilmm.predict(x2)
    means, lowers, uppers = oilmm.predict(x2)

    # Check predictions.
    approx(means_pp, means)
    approx(lowers_pp, lowers)
    approx(uppers_pp, uppers)
示例#7
0
def test_linspace(check_lazy_shapes):
    # Check correctness.
    approx(B.linspace(0, 1, 10), np.linspace(0, 1, 10, dtype=B.default_dtype))

    # Check consistency.
    check_function(
        B.linspace,
        (
            Value(np.float32, tf.float32, torch.float32, jnp.float32),
            Value(0),
            Value(10),
            Value(20),
        ),
    )
示例#8
0
def test_fdd_properties():
    p = GP(1, EQ())

    # Sample observations.
    x = B.linspace(0, 5, 5)
    y = p(x, 0.1).sample()

    # Compute posterior.
    p = p | (p(x, 0.1), y)

    fdd = p(B.linspace(0, 5, 10), 0.2)
    mean, var = fdd.mean, fdd.var

    # Check `var_diag`.
    fdd = p(B.linspace(0, 5, 10), 0.2)
    approx(fdd.var_diag, B.diag(var))

    # Check `mean_var`.
    fdd = p(B.linspace(0, 5, 10), 0.2)
    approx(fdd.mean_var, (mean, var))

    # Check `marginals()`.
    fdd = p(B.linspace(0, 5, 10), 0.2)
    approx(fdd.marginals(), (B.flatten(mean), B.diag(var)))
示例#9
0
def test_infer_size():
    x = B.linspace(0, 2, 5)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)
    k = MultiOutputKernel(m, p1, p2)

    assert infer_size(k, x) == 10
    assert infer_size(k, p1(x)) == 5
    assert infer_size(k, (x, p1(x))) == 15

    # Check that the dimensionality must be inferrable.
    assert infer_size(EQ(), x) == 5
    with pytest.raises(RuntimeError):
        infer_size(ADK(EQ()), x)
示例#10
0
def test_mom():
    x = B.linspace(0, 1, 10)

    prior = Measure()
    p1 = GP(lambda x: 2 * x, 1 * EQ(), measure=prior)
    p2 = GP(1, 2 * EQ().stretch(2), measure=prior)

    m = MultiOutputMean(prior, p1, p2)
    ms = prior.means

    # Check dimensionality.
    assert dimensionality(m) == 2

    # Check representation.
    assert str(m) == "MultiOutputMean(<lambda>, 1)"

    # Check computation.
    approx(m(x), B.concat(ms[p1](x), ms[p2](x), axis=0))
    approx(m(p1(x)), ms[p1](x))
    approx(m(p2(x)), ms[p2](x))
    approx(m((p2(x), p1(x))), B.concat(ms[p2](x), ms[p1](x), axis=0))
示例#11
0
def test_adk():
    # Test properties.
    assert ADK(EQ()).stationary
    assert not ADK(Linear()).stationary

    # Test printing.
    assert str(ADK(EQ())) == "EQ()"
    assert str(ADK(EQ()) + ADK(Linear())) == "EQ() + Linear()"
    assert str(ADK(EQ() + Linear())) == "EQ() + Linear()"

    # Test equality.
    assert ADK(EQ()) == ADK(EQ())
    assert ADK(EQ()) != ADK(Linear())

    # Test computation.
    x = B.linspace(0, 5, 10)
    approx(pairwise(ADK(EQ()), x), pairwise(EQ(), x))
    approx(elwise(ADK(EQ()), x), elwise(EQ(), x))

    # Check that the dimensionality resolves to `None`.
    assert dimensionality(EQ()) == 1
    assert dimensionality(ADK(EQ())) is None
示例#12
0
def test_fdd_take():
    with Measure():
        f1 = GP(1, EQ())
        f2 = GP(2, Exp())
        f = cross(f1, f2)

    x = B.linspace(0, 3, 5)
    # Build an FDD with a very complicated input specification.
    fdd = f((x, (f2(x), x), f1(x), (f2(x), (f1(x), x))))
    n = infer_size(fdd.p.kernel, fdd.x)
    fdd = f(fdd.x, matrix.Diagonal(B.rand(n)))

    # Flip a coin for every element.
    mask = B.randn(n) > 0
    taken_fdd = B.take(fdd, mask)

    approx(taken_fdd.mean, B.take(fdd.mean, mask))
    approx(taken_fdd.var, B.submatrix(fdd.var, mask))
    approx(taken_fdd.noise, B.submatrix(fdd.noise, mask))
    assert isinstance(taken_fdd.noise, matrix.Diagonal)

    # Test that only masks are supported, for now.
    with pytest.raises(AssertionError):
        B.take(fdd, np.array([1, 2]))
示例#13
0
import lab as B
import pytest

from stheno.input import Input, MultiInput
from stheno.kernel import EQ
from stheno.measure import Measure, GP
from stheno.mokernel import MultiOutputKernel
from .util import approx


@pytest.mark.parametrize(
    "x1",
    [B.linspace(0, 1, 10), Input(B.linspace(0, 1, 10))])
@pytest.mark.parametrize(
    "x2",
    [B.linspace(1, 2, 5), Input(B.linspace(0, 2, 5))])
@pytest.mark.parametrize(
    "x3",
    [B.linspace(1, 2, 10), Input(B.linspace(1, 2, 10))])
def test_mokernel(x1, x2, x3):
    m = Measure()
    p1 = GP(1 * EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)

    k = MultiOutputKernel(m, p1, p2)
    ks = m.kernels

    # Check representation.
    assert str(k) == "MultiOutputKernel(EQ(), 2 * (EQ() > 2))"

    # Input versus input:
示例#14
0
        assert "cpu" in str(B.device(a)).lower()
        approx(B.to_active_device(a), a)

    # Check that numbers remain unchanged.
    a = 1
    assert B.to_active_device(a) is a


@pytest.mark.parametrize("t", [tf.float32, torch.float32, jnp.float32])
@pytest.mark.parametrize(
    "f",
    [
        lambda t: B.zeros(t, 2, 2),
        lambda t: B.ones(t, 2, 2),
        lambda t: B.eye(t, 2),
        lambda t: B.linspace(t, 0, 5, 10),
        lambda t: B.range(t, 10),
        lambda t: B.rand(t, 10),
        lambda t: B.randn(t, 10),
    ],
)
def test_on_device(f, t, check_lazy_shapes):
    f_t = f(t)  # Contruct on current and existing device.

    # Set the active device to something else.
    B.ActiveDevice.active_name = "previous"

    # Check that explicit allocation on CPU works.
    with B.on_device("cpu"):
        assert B.device(f(t)) == B.device(f_t)
示例#15
0
文件: smk.py 项目: wesselb/gpcm
import lab as B
from stheno import EQ, GP, Delta, Measure

from experiments.experiment import run, setup

args, wd = setup("smk")

# Setup experiment.
n = 881  # Add last one for `linspace`.
noise = 1.0
t = B.linspace(-44, 44, n)
t_plot = B.linspace(0, 10, 500)

# Setup true model and GPCM models.
kernel = EQ().stretch(1) * (lambda x: B.cos(2 * B.pi * x))
kernel = kernel + EQ().stretch(1) * (lambda x: B.sin(2 * B.pi * x))
window = 4
scale = 0.5
n_u = 40
n_z = 88

# Sample data.
m = Measure()
gp_f = GP(kernel, measure=m)
gp_y = gp_f + GP(noise * Delta(), measure=m)
truth, y = map(B.flatten, m.sample(gp_f(t_plot), gp_y(t)))

# Remove region [-8.8, 8.8].
inds = ~((t >= -8.8) & (t <= 8.8))
t = t[inds]
y = y[inds]
示例#16
0
def x():
    return B.linspace(0, 3, 5)
示例#17
0
    def __init__(
        self,
        scheme="structured",
        noise=5e-2,
        fix_noise=False,
        alpha=None,
        alpha_t=None,
        window=None,
        fix_window=False,
        lam=None,
        gamma=None,
        gamma_t=None,
        a=None,
        b=None,
        m_max=None,
        m_max_cap=150,
        n_z=None,
        scale=None,
        fix_scale=False,
        ms=None,
        n_u=None,
        n_u_cap=300,
        t_u=None,
        extend_t_z=None,
        t=None,
    ):
        AbstractGPCM.__init__(self, scheme)

        # Ensure that `t` is a vector.
        if t is not None:
            t = np.array(t)

        # Store whether to fix the length scale, window length, and noise.
        self.fix_scale = fix_scale
        self.fix_window = fix_window
        self.fix_noise = fix_noise

        # First initialise optimisable model parameters.
        if alpha is None:
            alpha = 1 / window

        if alpha_t is None:
            alpha_t = B.sqrt(2 * alpha)

        if lam is None:
            lam = 1 / scale

        self.noise = noise
        self.alpha = alpha
        self.alpha_t = alpha_t
        self.lam = lam

        # For convenience, also store the extent of the filter.
        self.extent = 4 / self.alpha

        # Then initialise fixed variables.
        if t_u is None:
            # Place inducing points until the filter is `exp(-pi) = 4.32%`.
            t_u_max = B.pi / self.alpha

            # `n_u` is required to initialise `t_u`.
            if n_u is None:
                # Set it to two inducing points per wiggle, multiplied by two to account
                # for the longer range.
                n_u = int(np.ceil(2 * 2 * window / scale))
                if n_u > n_u_cap:
                    warnings.warn(
                        f"Using {n_u} inducing points for the filter, which is too "
                        f"many. It is capped to {n_u_cap}.",
                        category=UserWarning,
                    )
                    n_u = n_u_cap

            t_u = B.linspace(0, t_u_max, n_u)

        if n_u is None:
            n_u = B.shape(t_u)[0]

        if a is None:
            a = B.min(t) - B.max(t_u)

        if b is None:
            b = B.max(t)

        # First, try to determine `m_max` from `n_z`.
        if m_max is None and n_z is not None:
            m_max = int(np.ceil(n_z / 2))

        if m_max is None:
            freq = 1 / scale
            m_max = int(np.ceil(freq * (b - a)))
            if m_max > m_max_cap:
                warnings.warn(
                    f"Using {m_max} inducing features, which is too "
                    f"many. It is capped to {m_max_cap}.",
                    category=UserWarning,
                )
                m_max = m_max_cap

        if ms is None:
            ms = B.range(2 * m_max + 1)

        self.a = a
        self.b = b
        self.m_max = m_max
        self.ms = ms
        self.n_z = len(ms)
        self.n_u = n_u
        self.t_u = t_u

        # Initialise dependent model parameters.
        if gamma is None:
            gamma = 1 / (2 * (self.t_u[1] - self.t_u[0]))

        if gamma_t is None:
            gamma_t = B.sqrt(2 * gamma)

        # Must ensure that `gamma < alpha`.
        self.gamma = min(gamma, self.alpha / 1.5)
        self.gamma_t = gamma_t
示例#18
0
文件: eq.py 项目: wesselb/gpcm
import lab as B
from stheno import EQ, GP, Delta, Measure

from experiments.experiment import run, setup

args, wd = setup("eq")

# Setup experiment.
n = 881  # Add last one for `linspace`.
noise = 0.1
t = B.linspace(-44, 44, n)
t_plot = B.linspace(44, 44, 500)

# Setup true model and GPCM models.
kernel = EQ()
window = 2
scale = 1
n_u = 40
n_z = 88

# Sample data.
m = Measure()
gp_f = GP(kernel, measure=m)
gp_y = gp_f + GP(noise * Delta(), measure=m)
truth, y = map(B.flatten, m.sample(gp_f(t_plot), gp_y(t)))

# Remove region [-8.8, 8.8].
inds = ~((t >= -8.8) & (t <= 8.8))
t = t[inds]
y = y[inds]
示例#19
0
    def __init__(
        self,
        scheme="structured",
        causal=False,
        noise=5e-2,
        fix_noise=False,
        alpha=None,
        alpha_t=None,
        window=None,
        fix_window=False,
        gamma=None,
        scale=None,
        fix_scale=False,
        omega=None,
        n_u=None,
        n_u_cap=300,
        t_u=None,
        n_z=None,
        n_z_cap=300,
        t_z=None,
        extend_t_z=False,
        t=None,
    ):
        AbstractGPCM.__init__(self, scheme)

        # Store whether this is the CGPCM instead of the GPCM.
        self.causal = causal

        # Store whether to fix the length scale, window length, and noise.
        self.fix_scale = fix_scale
        self.fix_window = fix_window
        self.fix_noise = fix_noise

        # Ensure that `t` is a vector.
        if t is not None:
            t = np.array(t)

        # First initialise optimisable model parameters.
        if alpha is None:
            alpha = scale_to_factor(window)

        if alpha_t is None:
            if causal:
                alpha_t = (8 * alpha / B.pi) ** 0.25
            else:
                alpha_t = (2 * alpha / B.pi) ** 0.25

        if gamma is None:
            gamma = scale_to_factor(scale) - 0.5 * alpha

        self.noise = noise
        self.alpha = alpha
        self.alpha_t = alpha_t
        self.gamma = gamma

        # For convenience, also store the extent of the filter.
        self.extent = 4 * factor_to_scale(self.alpha)

        # Then initialise fixed variables.
        if t_u is None:
            # Place inducing points until the filter is `exp(-pi) = 4.32%`.
            t_u_max = 2 * factor_to_scale(self.alpha)

            # `n_u` is required to initialise `t_u`.
            if n_u is None:
                # Set it to two inducing points per wiggle, multiplied by two to account
                # for both sides (acausal model) or the longer range (causal model).
                n_u = int(np.ceil(2 * 2 * window / scale))
                if n_u > n_u_cap:
                    warnings.warn(
                        f"Using {n_u} inducing points for the filter, which is too "
                        f"many. It is capped to {n_u_cap}.",
                        category=UserWarning,
                    )
                    n_u = n_u_cap

            # For the causal case, only need inducing points on the right side.
            if causal:
                d_t_u = t_u_max / (n_u - 1)
                n_u += 2
                t_u = B.linspace(-2 * d_t_u, t_u_max, n_u)
            else:
                if n_u % 2 == 0:
                    n_u += 1
                t_u = B.linspace(-t_u_max, t_u_max, n_u)

        if n_u is None:
            n_u = B.shape(t_u)[0]

        if t_z is None:
            if n_z is None:
                # Use two inducing points per wiggle.
                n_z = int(np.ceil(2 * (max(t) - min(t)) / scale))
                if n_z > n_z_cap:
                    warnings.warn(
                        f"Using {n_z} inducing points, which is too "
                        f"many. It is capped to {n_z_cap}.",
                        category=UserWarning,
                    )
                    n_z = n_z_cap

            t_z = B.linspace(min(t), max(t), n_z)

        if n_z is None:
            n_z = B.shape(t_z)[0]

        if extend_t_z:
            # See above.
            t_z_extra = 2 * factor_to_scale(self.alpha)

            d_t_u = (max(t) - min(t)) / (n_z - 1)
            n_z_extra = int(np.ceil(t_z_extra / d_t_u))
            t_z_extra = n_z_extra * d_t_u  # Make it align exactly.

            # For the causal case, only need inducing points on the left side.
            if causal:
                n_z += n_z_extra
                t_z = B.linspace(min(t) - t_z_extra, max(t), n_z)
            else:
                n_z += 2 * n_z_extra
                t_z = B.linspace(min(t) - t_z_extra, max(t) + t_z_extra, n_z)

        self.n_u = n_u
        self.t_u = t_u
        self.n_z = n_z
        self.t_z = t_z

        # Initialise dependent optimisable model parameters.
        if omega is None:
            omega = scale_to_factor(0.5 * (self.t_z[1] - self.t_z[0]))

        # Fix variance of inter-domain process to one.
        omega_t = (2 * omega / B.pi) ** 0.25

        self.omega = omega
        self.omega_t = omega_t
示例#20
0
import pytest
import lab as B

from stheno.measure import Measure, GP
from stheno.input import Input, MultiInput
from stheno.kernel import EQ
from stheno.momean import MultiOutputMean
from .util import approx


@pytest.mark.parametrize("x", [B.linspace(0, 1, 10), Input(B.linspace(0, 1, 10))])
def test_momean(x):
    prior = Measure()
    p1 = GP(lambda x: 2 * x, 1 * EQ(), measure=prior)
    p2 = GP(1, 2 * EQ().stretch(2), measure=prior)

    m = MultiOutputMean(prior, p1, p2)
    ms = prior.means

    # Check representation.
    assert str(m) == "MultiOutputMean(<lambda>, 1)"

    # Check computation.
    approx(m(x), B.concat(ms[p1](x), ms[p2](x), axis=0))
    approx(m(p1(x)), ms[p1](x))
    approx(m(p2(x)), ms[p2](x))
    approx(m(MultiInput(p2(x), p1(x))), B.concat(ms[p2](x), ms[p1](x), axis=0))
示例#21
0
import lab as B
import wbml.out as out
from slugify import slugify
from stheno import EQ, CEQ, Exp, GP, Delta
from wbml.experiment import WorkingDirectory

from gpcm import GPCM, CGPCM, RGPCM

# Setup script.
out.report_time = True
B.epsilon = 1e-8
wd = WorkingDirectory("_experiments", "comparison")

# Setup experiment.
noise = 1.0
t = B.linspace(0, 40, 400)
t_k = B.linspace(0, 4, 200)

# Setup GPCM models.
window = 2
scale = 0.5
n_u = 30
n_z = 80

for kernel, model_constructor in [
    (
        EQ(),
        lambda scheme: GPCM(
            scheme=scheme,
            window=window,
            scale=scale,
示例#22
0
文件: smk.py 项目: wesselb/gpcm
from gpcm.util import estimate_psd

# Setup script.
out.report_time = True
B.epsilon = 1e-8
tex()
wd = WorkingDirectory("_experiments", "smk")

# Parse arguments.
parser = argparse.ArgumentParser()
parser.add_argument("--train", action="store_true")
args = parser.parse_args()

# Setup experiment.
noise = 0.1
t = B.linspace(0, 40, 200)
t_k = B.linspace(0, 4, 200)

# Setup GPCM models.
window = 2
scale = 0.25
n_u = 80
n_z = 80

# Sample data.
kernel = (lambda x: B.sin(B.pi * x)) * EQ() + (
    lambda x: B.cos(B.pi * x)) * EQ()
y = B.flatten(GP(kernel)(t, noise).sample())
k = B.flatten(kernel(t_k, 0))

示例#23
0
def test_mok():
    x1 = B.linspace(0, 1, 10)
    x2 = B.linspace(1, 2, 5)
    x3 = B.linspace(1, 2, 10)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)

    k = MultiOutputKernel(m, p1, p2)
    ks = m.kernels

    # Check dimensionality.
    assert dimensionality(k) == 2

    # Check representation.
    assert str(k) == "MultiOutputKernel(EQ(), 2 * (EQ() > 2))"

    # Input versus input:
    approx(
        k(x1, x2),
        B.concat2d(
            [ks[p1, p1](x1, x2), ks[p1, p2](x1, x2)],
            [ks[p2, p1](x1, x2), ks[p2, p2](x1, x2)],
        ),
    )
    approx(
        k.elwise(x1, x3),
        B.concat(ks[p1, p1].elwise(x1, x3), ks[p2, p2].elwise(x1, x3), axis=0),
    )

    # Input versus `FDD`:
    approx(k(p1(x1), x2), B.concat(ks[p1, p1](x1, x2), ks[p1, p2](x1, x2), axis=1))
    approx(k(p2(x1), x2), B.concat(ks[p2, p1](x1, x2), ks[p2, p2](x1, x2), axis=1))
    approx(k(x1, p1(x2)), B.concat(ks[p1, p1](x1, x2), ks[p2, p1](x1, x2), axis=0))
    approx(k(x1, p2(x2)), B.concat(ks[p1, p2](x1, x2), ks[p2, p2](x1, x2), axis=0))
    with pytest.raises(ValueError):
        k.elwise(x1, p2(x3))
    with pytest.raises(ValueError):
        k.elwise(p1(x1), x3)

    # `FDD` versus `FDD`:
    approx(k(p1(x1), p1(x2)), ks[p1](x1, x2))
    approx(k(p1(x1), p2(x2)), ks[p1, p2](x1, x2))
    approx(k.elwise(p1(x1), p1(x3)), ks[p1].elwise(x1, x3))
    approx(k.elwise(p1(x1), p2(x3)), ks[p1, p2].elwise(x1, x3))

    # Multiple inputs versus input:
    approx(
        k((p2(x1), p1(x2)), x1),
        B.concat2d(
            [ks[p2, p1](x1, x1), ks[p2, p2](x1, x1)],
            [ks[p1, p1](x2, x1), ks[p1, p2](x2, x1)],
        ),
    )
    approx(
        k(x1, (p2(x1), p1(x2))),
        B.concat2d(
            [ks[p1, p2](x1, x1), ks[p1, p1](x1, x2)],
            [ks[p2, p2](x1, x1), ks[p2, p1](x1, x2)],
        ),
    )
    with pytest.raises(ValueError):
        k.elwise((p2(x1), p1(x3)), p2(x1))
    with pytest.raises(ValueError):
        k.elwise(p2(x1), (p2(x1), p1(x3)))

    # Multiple inputs versus `FDD`:
    approx(
        k((p2(x1), p1(x2)), p2(x1)),
        B.concat(ks[p2, p2](x1, x1), ks[p1, p2](x2, x1), axis=0),
    )
    approx(
        k(p2(x1), (p2(x1), p1(x2))),
        B.concat(ks[p2, p2](x1, x1), ks[p2, p1](x1, x2), axis=1),
    )
    with pytest.raises(ValueError):
        k.elwise((p2(x1), p1(x3)), p2(x1))
    with pytest.raises(ValueError):
        k.elwise(p2(x1), (p2(x1), p1(x3)))

    # Multiple inputs versus multiple inputs:
    approx(
        k((p2(x1), p1(x2)), (p2(x1))),
        B.concat(ks[p2, p2](x1, x1), ks[p1, p2](x2, x1), axis=0),
    )
    with pytest.raises(ValueError):
        k.elwise((p2(x1), p1(x3)), (p2(x1),))
    approx(
        k.elwise((p2(x1), p1(x3)), (p2(x1), p1(x3))),
        B.concat(ks[p2, p2].elwise(x1, x1), ks[p1, p1].elwise(x3, x3), axis=0),
    )
示例#24
0
    def objective_wrapped(vs_, *args):
        return objective_vectorised(vs_.get_latent_vector(), *args)

    return objective_wrapped


# Setup script.
out.report_time = True
B.epsilon = 1e-8
tex()
wd = WorkingDirectory("_experiments", "compare_inference")

# Setup experiment.
noise = 0.5
t = B.linspace(0, 20, 500)

# Setup GPCM models.
window = 2
scale = 1
n_u = 40
n_z = 40

# Sample data.
kernel = EQ()
y = B.flatten(GP(kernel)(t, noise).sample())
gp_logpdf = GP(kernel)(t, noise).logpdf(y)


# Run the original mean-field scheme.
示例#25
0
# Year 2014 needs extra stability.
if args.year == 2014:
    B.epsilon = 1e-6
else:
    B.epsilon = 1e-8
tex()
wd = WorkingDirectory("_experiments", "crude_oil", str(args.year))

# Load and process data.
data = load()
lower = datetime(args.year, 1, 1)
upper = datetime(args.year + 1, 1, 1)
data = data[(lower <= data.index) & (data.index < upper)]
t = np.array([(ti - lower).days for ti in data.index], dtype=float)
y = np.array(data.open)
t_pred = B.linspace(min(t), max(t), 500)

# Split data.
test_inds = np.empty(t.shape, dtype=bool)
test_inds.fill(False)
for lower, upper in [(
        datetime(args.year, 1, 1) + i * timedelta(weeks=1),
        datetime(args.year, 1, 1) + (i + 1) * timedelta(weeks=1),
) for i in range(26, 53) if i % 2 == 1]:
    lower_mask = lower <= data.index
    upper_mask = upper > data.index
    test_inds = test_inds | (lower_mask & upper_mask)
t_train = t[~test_inds]
y_train = y[~test_inds]
t_test = t[test_inds]
y_test = y[test_inds]
示例#26
0
    def orthogonal(self,
                   init=None,
                   shape=None,
                   dtype=None,
                   name=None,
                   method="svd"):
        init, shape = _check_init_shape(init, shape)

        if method == "svd":
            _check_matrix_shape(shape, square=False)
            n, m = shape
            shape_latent = (n, m)

            # Fix singular values.
            sing_vals = B.linspace(self._resolve_dtype(dtype), 1, 2, min(n, m))

            def transform(x):
                u, s, v = B.svd(x)
                # u * v' is the closest orthogonal matrix to x in Frobenius norm.
                return B.matmul(u, v, tr_b=True)

            def inverse_transform(x):
                if n >= m:
                    return x * sing_vals[None, :]
                else:
                    return x * sing_vals[:, None]

            def generate_init(shape, dtype):
                mat = B.randn(dtype, *shape)
                return transform(mat)

        elif method == "expm":
            _check_matrix_shape(shape)
            side = shape[0]
            shape_latent = (int(side * (side + 1) / 2 - side), )

            def transform(x):
                tril = B.vec_to_tril(x, offset=-1)
                skew = tril - B.transpose(tril)
                return B.expm(skew)

            def inverse_transform(x):
                return B.tril_to_vec(B.logm(x), offset=-1)

            def generate_init(shape, dtype):
                mat = B.randn(dtype, *shape)
                return transform(B.tril_to_vec(mat, offset=-1))

        elif method == "cayley":
            _check_matrix_shape(shape)
            side = shape[0]
            shape_latent = (int(side * (side + 1) / 2 - side), )

            def transform(x):
                tril = B.vec_to_tril(x, offset=-1)
                skew = tril - B.transpose(tril)
                eye = B.eye(skew)
                return B.solve(eye + skew, eye - skew)

            def inverse_transform(x):
                eye = B.eye(x)
                skew = B.solve(eye + x, eye - x)
                return B.tril_to_vec(skew, offset=-1)

            def generate_init(shape, dtype):
                mat = B.randn(dtype, *shape)
                return transform(B.tril_to_vec(mat, offset=-1))

        else:
            raise ValueError(f'Unknown parametrisation "{method}".')

        return self._get_var(
            transform=transform,
            inverse_transform=inverse_transform,
            init=init,
            generate_init=generate_init,
            shape=shape,
            shape_latent=shape_latent,
            dtype=dtype,
            name=name,
        )