Esempio n. 1
0
def test_natural_normal():
    chol = B.randn(2, 2)
    dist = Normal(B.randn(2, 1), B.reg(chol @ chol.T, diag=1e-1))
    nat = NaturalNormal.from_normal(dist)

    # Test properties.
    assert dist.dtype == nat.dtype
    for name in ["dim", "mean", "var", "m2"]:
        approx(getattr(dist, name), getattr(nat, name))

    # Test sampling.
    state = B.create_random_state(dist.dtype, seed=0)
    state, sample = nat.sample(state, num=1_000_000)
    emp_mean = B.mean(B.dense(sample), axis=1, squeeze=False)
    emp_var = (sample - emp_mean) @ (sample - emp_mean).T / 1_000_000
    approx(dist.mean, emp_mean, rtol=5e-2)
    approx(dist.var, emp_var, rtol=5e-2)

    # Test KL.
    chol = B.randn(2, 2)
    other_dist = Normal(B.randn(2, 1), B.reg(chol @ chol.T, diag=1e-2))
    other_nat = NaturalNormal.from_normal(other_dist)
    approx(dist.kl(other_dist), nat.kl(other_nat))

    # Test log-pdf.
    x = B.randn(2, 1)
    approx(dist.logpdf(x), nat.logpdf(x))
Esempio n. 2
0
def test_logpdf_missing_data():
    # Setup model.
    m = 3
    noise = 1e-2
    latent_noises = 2e-2 * B.ones(m)
    kernels = [0.5 * EQ().stretch(0.75) for _ in range(m)]
    x = B.linspace(0, 10, 20)

    # Concatenate two orthogonal matrices, to make the missing data
    # approximation exact.
    u1 = B.svd(B.randn(m, m))[0]
    u2 = B.svd(B.randn(m, m))[0]
    u = Dense(B.concat(u1, u2, axis=0) / B.sqrt(2))

    s_sqrt = Diagonal(B.rand(m))

    # Construct a reference model.
    oilmm_pp = ILMMPP(kernels, u @ s_sqrt, noise, latent_noises)

    # Sample to generate test data.
    y = oilmm_pp.sample(x, latent=False)

    # Throw away data, but retain orthogonality.
    y[5:10, 3:] = np.nan
    y[10:, :3] = np.nan

    # Construct OILMM to test.
    oilmm = OILMM(kernels, u, s_sqrt, noise, latent_noises)

    # Check that evidence is still exact.
    approx(oilmm_pp.logpdf(x, y), oilmm.logpdf(x, y), atol=1e-7)
Esempio n. 3
0
def test_normal1d_logpdf():
    means = B.randn(3, 3)
    covs = B.randn(3, 3) ** 2
    x = B.randn(3, 3)
    logpdfs = normal1d_logpdf(x, covs, means)
    for i in range(3):
        for j in range(3):
            dist = Normal(means[i : i + 1, j : j + 1], covs[i : i + 1, j : j + 1])
            approx(logpdfs[i, j], dist.logpdf(x[i, j]))
Esempio n. 4
0
def test_packer():
    a, b, c = B.randn(5, 10), B.randn(20), B.randn(5, 1, 15)

    for packer, args in zip(
        [Packer(a, b, c), Packer([a, b, c])], [(a, b, c), ((a, b, c), )]):
        # Test packing.
        packed = packer.pack(*args)
        assert B.rank(packed) == 1

        # Test unpacking.
        a_, b_, c_ = packer.unpack(packed)
        allclose(a, a_)
        allclose(b, b_)
        allclose(c, c_)
Esempio n. 5
0
def test_zero_one(f, check_lazy_shapes):
    # Check consistency.
    check_function(
        f, (Value(np.float32, tf.float32, torch.float32, jnp.float32), ))

    # Check reference calls.
    for t32, t64 in [
        (np.float32, np.float64),
        (tf.float32, tf.float64),
        (torch.float32, torch.float64),
        (jnp.float32, jnp.float64),
    ]:
        assert B.dtype(f(B.randn(t32))) is t32
        assert B.dtype(f(B.randn(t64))) is t64
        assert B.dtype(f(B.randn(t32), B.randn(t64))) is t64
Esempio n. 6
0
def test_device(t, FWDevice, check_lazy_shapes):
    a = B.randn(t, 2, 2)
    assert isinstance(B.device(a), FWDevice)
    assert isinstance(B.device(a), B.Device)

    # Test conversion to string.
    assert isinstance(convert(B.device(a), str), str)
Esempio n. 7
0
def test_pack_unpack():
    a, b, c = B.randn(5, 10), B.randn(20), B.randn(5, 1, 15)

    # Test packing.
    package = pack(a, b, c)
    assert B.rank(package) == 1

    # Test unpacking.
    a2, b2, c2 = unpack(package, B.shape(a), B.shape(b), B.shape(c))
    approx(a, a2)
    approx(b, b2)
    approx(c, c2)

    # Check that the package must be a vector.
    with pytest.raises(ValueError):
        unpack(B.randn(2, 2), (2, 2))
Esempio n. 8
0
def test_take_x():
    m = Measure()
    f1 = GP(EQ())
    f2 = GP(EQ())
    k = MultiOutputKernel(m, f1)
    with pytest.raises(ValueError):
        _take_x(k, f2(B.linspace(0, 1, 10)), B.randn(10) > 0)
Esempio n. 9
0
def test_resolve_axis(check_lazy_shapes):
    a = B.randn(2, 2, 2)

    # `None`s should just pass through.
    assert resolve_axis(a, None) is None

    # Test `negative = False`.
    with pytest.raises(ValueError):
        resolve_axis(a, -4)
    assert resolve_axis(a, -3) == 0
    assert resolve_axis(a, -2) == 1
    assert resolve_axis(a, -1) == 2
    assert resolve_axis(a, 0) == 0
    assert resolve_axis(a, 1) == 1
    assert resolve_axis(a, 2) == 2
    with pytest.raises(ValueError):
        resolve_axis(a, 3)

    # Test `negative = True`.
    with pytest.raises(ValueError):
        resolve_axis(a, -4, negative=True)
    assert resolve_axis(a, -3, negative=True) == -3
    assert resolve_axis(a, -2, negative=True) == -2
    assert resolve_axis(a, -1, negative=True) == -1
    assert resolve_axis(a, 0, negative=True) == -3
    assert resolve_axis(a, 1, negative=True) == -2
    assert resolve_axis(a, 2, negative=True) == -1
    with pytest.raises(ValueError):
        resolve_axis(a, 3, negative=True)
Esempio n. 10
0
def test_normal_logpdf(normal1):
    normal1_sp = multivariate_normal(normal1.mean[:, 0], B.dense(normal1.var))
    x = B.randn(3, 10)
    approx(normal1.logpdf(x), normal1_sp.logpdf(x.T))

    # Test the the output of `logpdf` is flattened appropriately.
    assert B.shape(normal1.logpdf(B.ones(3, 1))) == ()
    assert B.shape(normal1.logpdf(B.ones(3, 2))) == (2, )
Esempio n. 11
0
def test_jit_to_numpy(check_lazy_shapes):
    @B.jit
    def f(x):
        available = B.jit_to_numpy(~B.isnan(x))
        return B.sum(x[available])

    x = B.sqrt(B.randn(100))
    approx(f(x), f(jnp.array(x)))
Esempio n. 12
0
def test_normal_logpdf_missing_data(normal1):
    x = B.randn(3, 1)
    x[1] = B.nan
    approx(
        normal1.logpdf(x),
        Normal(
            normal1.mean[[0, 2]],
            normal1.var[[0, 2], :][:, [0, 2]],
        ).logpdf(x[[0, 2]]),
    )
Esempio n. 13
0
def sample(a, num=1):  # pragma: no cover
    """Sample from covariance matrices.

    Args:
        a (tensor): Covariance matrix to sample from.
        num (int): Number of samples.

    Returns:
        tensor: Samples as rank 2 column vectors.
    """
    chol = B.cholesky(a)
    return B.matmul(chol, B.randn(B.dtype_float(a), B.shape(chol)[1], num))
Esempio n. 14
0
def test_normal_mean_is_zero():
    # Check zero case.
    dist = Normal(B.eye(3))
    assert dist.mean_is_zero
    approx(dist.mean, B.zeros(3, 1))

    # Check another zero case.
    dist = Normal(Zero(np.float32, 3, 1), B.eye(3))
    assert dist.mean_is_zero
    approx(dist.mean, B.zeros(3, 1))

    # Check nonzero case.
    assert not Normal(B.randn(3, 1), B.eye(3)).mean_is_zero
Esempio n. 15
0
def test_zeros_ones_eye(f, check_lazy_shapes):
    # Check consistency.
    check_function(
        f,
        (Value(np.float32, tf.float32, torch.float32,
               jnp.float32), Value(2), Value(3)),
    )

    # Check shape of calls.
    assert B.shape(f(2)) == (2, 2) if f is B.eye else (2, )
    assert B.shape(f(2, 3)) == (2, 3)
    assert B.shape(f(2, 3, 4)) == (2, 3, 4)

    # Check type of calls.
    assert B.dtype(f(2)) == B.default_dtype
    assert B.dtype(f(2, 3)) == B.default_dtype
    assert B.dtype(f(2, 3, 4)) == B.default_dtype

    # Specify a data type:
    for t1, t2 in [
        (np.float32, np.int64),
        (tf.float32, tf.int64),
        (torch.float32, torch.int64),
        (jnp.float32, jnp.int64),
    ]:
        # Check shape of calls.
        assert B.shape(f(t2, 2)) == (2, 2) if f is B.eye else (2, )
        assert B.shape(f(t2, 2, 3)) == (2, 3)
        assert B.shape(f(t2, 2, 3, 4)) == (2, 3, 4)

        # Check type of calls.
        assert B.dtype(f(t2, 2)) is t2
        assert B.dtype(f(t2, 2, 3)) is t2
        assert B.dtype(f(t2, 2, 3, 4)) is t2

        # Check reference calls.
        for ref in [B.randn(t1, 4, 5), B.randn(t1, 3, 4, 5)]:
            assert B.shape(f(ref)) == B.shape(ref)
            assert B.dtype(f(ref)) is t1
Esempio n. 16
0
def construct_oilmm():
    # Setup model.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    u, s_sqrt = B.svd(B.randn(3, 2))[:2]
    u = Dense(u)
    s_sqrt = Diagonal(s_sqrt)

    def construct_iolmm(noise_amplification=1):
        noise_obs = noise_amplification
        noises_latent = np.array([0.1, 0.2]) * noise_amplification
        return OILMM(kernels, u, s_sqrt, noise_obs, noises_latent)

    return construct_iolmm
Esempio n. 17
0
def test_recurrent():
    vs = Vars(np.float32)

    # Test setting the initial hidden state.
    layer = Recurrent(GRU(10), B.zeros(1, 10))
    layer.initialise(5, vs)
    approx(layer.h0, B.zeros(1, 10))

    layer = Recurrent(GRU(10))
    layer.initialise(5, vs)
    assert layer.h0 is not None

    # Check batch consistency.
    check_batch_consistency(layer, B.randn(30, 20, 5))

    # Test preservation of rank upon calls.
    assert B.shape(layer(B.randn(20, 5))) == (20, 10)
    assert B.shape(layer(B.randn(30, 20, 5))) == (30, 20, 10)

    # Check that zero-dimensional calls fail.
    with pytest.raises(ValueError):
        layer(0)
Esempio n. 18
0
def test_activation():
    layer = Activation()
    x = B.randn(10, 5, 3)

    # Check number of weights and width.
    assert layer.num_weights(10) == 0
    assert layer.width == 10

    # Check initialisation and width.
    layer.initialise(3, None)
    assert layer.width == 3

    # Check correctness
    approx(layer(x), B.relu(x))
Esempio n. 19
0
def test_isabstract_true(t, check_lazy_shapes):
    tracked = []

    @B.jit
    def f(x):
        tracked.append(B.isabstract(x))
        return B.sum(x)

    f(B.randn(t, 2, 2))

    # First the function should be run concretely.
    assert not tracked[0]
    # In the next runs, at least one should be abstract.
    assert any(tracked[1:])
Esempio n. 20
0
def test_rnn():
    for final_dense, gru, nn in [
        (True, False,
         rnn(10, (20, 30), normalise=True, gru=False, final_dense=True)),
        (False, True,
         rnn(10, (20, 30), normalise=True, gru=True, final_dense=False)),
    ]:
        vs = Vars(np.float32)
        nn.initialise(5, vs)
        x = B.randn(2, 3, 5)

        # Check number of weights and width.
        assert B.length(vs.get_vector()) == nn.num_weights(5)
        assert nn.width == 10

        # Test batch consistency.
        check_batch_consistency(nn, x)

        # Check composition.
        assert len(nn.layers) == 9 if final_dense else 7
        assert type(nn.layers[0]) == Recurrent
        assert type(nn.layers[0].cell) == GRU if gru else Elman
        assert nn.layers[0].width == 20
        assert type(nn.layers[1]) == Activation
        assert nn.layers[1].width == 20
        assert type(nn.layers[2]) == Normalise
        assert nn.layers[2].width == 20
        assert type(nn.layers[3]) == Recurrent
        assert type(nn.layers[3].cell) == GRU if gru else Elman
        assert nn.layers[3].width == 30
        assert type(nn.layers[4]) == Activation
        assert nn.layers[4].width == 30
        assert type(nn.layers[5]) == Normalise
        assert nn.layers[5].width == 30
        if final_dense:
            assert type(nn.layers[6]) == Linear
            assert nn.layers[6].width == 10
            assert type(nn.layers[7]) == Activation
            assert nn.layers[7].width == 10
            assert type(nn.layers[8]) == Linear
            assert nn.layers[8].width == 10
        else:
            assert type(nn.layers[6]) == Linear
            assert nn.layers[6].width == 10

    # Check that normalisation layers disappear.
    assert (len(
        rnn(10, (20, 30), normalise=False, gru=True,
            final_dense=False).layers) == 5)
Esempio n. 21
0
def test_noise_as_matrix():
    def check(noise, dtype, n, asserted_type):
        noise = _noise_as_matrix(noise, dtype, n)
        assert isinstance(noise, asserted_type)
        assert B.dtype(noise) == dtype
        assert B.shape(noise) == (n, n)

    check(None, int, 5, matrix.Zero)
    check(None, float, 5, matrix.Zero)
    check(1, np.int64, 5, matrix.Diagonal)
    check(1.0, np.float64, 5, matrix.Diagonal)
    check(B.ones(int, 5), np.int64, 5, matrix.Diagonal)
    check(B.ones(float, 5), np.float64, 5, matrix.Diagonal)
    check(matrix.Dense(B.ones(int, 5, 5)), np.int64, 5, matrix.Dense)
    check(matrix.Dense(B.randn(float, 5, 5)), np.float64, 5, matrix.Dense)
Esempio n. 22
0
def test_normalise():
    layer = Normalise(epsilon=0)
    x = B.randn(10, 5, 3)

    # Check number of weights and width.
    assert layer.num_weights(10) == 0
    assert layer.width == 10

    # Check initialisation and width.
    layer.initialise(3, None)
    assert layer.width == 3

    # Check correctness
    out = layer(x)
    approx(B.std(out, axis=2), B.ones(10, 5), rtol=1e-4)
    approx(B.mean(out, axis=2), B.zeros(10, 5), atol=1e-4)
Esempio n. 23
0
    def sample(self, x, latent=False):
        """Sample from the model.

        Args:
            x (matrix): Locations to sample at.
            latent (bool, optional): Sample noiseless processes. Defaults
                to `False`.

        Returns:
            matrix: Sample.
        """
        sample = B.dense(
            B.matmul(self.model.sample(x, latent=latent), self.h, tr_b=True)
        )
        if not latent:
            sample = sample + B.sqrt(self.noise_obs) * B.randn(sample)
        return sample
Esempio n. 24
0
def test_two_arg(name):
    metric = getattr(wbml.metric, name)

    # Test scalar usage.
    assert isinstance(metric(1, B.randn(10)), B.Number)

    # Test series usage.
    assert isinstance(metric(B.randn(10), B.randn(10)), B.Number)

    # Test matrix usage.
    assert isinstance(metric(B.randn(10, 10), B.randn(10, 10)), pd.Series)

    # Check that higher-order tensors fail.
    with pytest.raises(ValueError):
        metric(B.randn(10, 10, 10), B.randn(10, 10, 10))
Esempio n. 25
0
def test_boxing_autograd():
    k = EQ()
    objs = []

    def objective(x):
        x = x + 2
        x = x * 2
        objs.append(x + k)
        objs.append(x + k(x))
        objs.append(x * k)
        objs.append(x * k(x))
        return B.sum(x)

    grad(objective)(B.randn(10))

    for obj in objs:
        assert isinstance(obj, Element)
Esempio n. 26
0
def test_take_consistency(check_lazy_shapes):
    # Check consistency between indices and mask.
    check_function(
        B.take,
        (Matrix(3, 3), Value([0, 1], [True, True, False])),
        {"axis": Value(0, 1, -1)},
    )

    # Test PyTorch separately, because it has a separate implementation for framework
    # masks or indices.
    for indices_or_mask in [
        torch.tensor([True, True, False], dtype=torch.bool),
        torch.tensor([0, 1], dtype=torch.int32),
        torch.tensor([0, 1], dtype=torch.int64),
    ]:
        a = B.randn(torch.float32, 3, 3)
        approx(B.take(a, indices_or_mask), a[[0, 1]])
Esempio n. 27
0
def test_noise_as_matrix():
    def check(noise, dtype, n, asserted_type):
        noise = _noise_as_matrix(noise, dtype, n)
        assert isinstance(noise, asserted_type)
        assert B.dtype(noise) == dtype
        assert B.shape(noise) == (n, n)

    # Check that the type for `n` is appropriate.
    for d in [5, Dimension(5)]:
        check(None, int, d, matrix.Zero)
        check(None, float, d, matrix.Zero)
        check(1, np.int64, d, matrix.Diagonal)
        check(1.0, np.float64, d, matrix.Diagonal)
        check(B.ones(int, 5), np.int64, d, matrix.Diagonal)
        check(B.ones(float, 5), np.float64, d, matrix.Diagonal)
        check(matrix.Dense(B.ones(int, 5, 5)), np.int64, d, matrix.Dense)
        check(matrix.Dense(B.randn(float, 5, 5)), np.float64, d, matrix.Dense)
Esempio n. 28
0
def test_boxing_objective(grad):
    k = EQ()
    objs = []

    def objective(x):
        x = x + 2
        x = x * 2
        objs.append(x[0] + k)
        objs.append(x[0] + Dense(k(x)))
        objs.append(x[0] * k)
        objs.append(x[0] * Dense(k(x)))
        return B.sum(x)

    grad(objective)(B.randn(10))

    for obj in objs:
        assert isinstance(obj, (Element, AbstractMatrix))
Esempio n. 29
0
def test_normal_arithmetic(normal1, normal2):
    a = Dense(B.randn(3, 3))
    b = 5.0

    # Test matrix multiplication.
    approx(normal1.lmatmul(a).mean, a @ normal1.mean)
    approx(normal1.lmatmul(a).var, a @ normal1.var @ a.T)
    approx(normal1.rmatmul(a).mean, a.T @ normal1.mean)
    approx(normal1.rmatmul(a).var, a.T @ normal1.var @ a)

    # Test multiplication.
    approx((normal1 * b).mean, normal1.mean * b)
    approx((normal1 * b).var, normal1.var * b**2)
    approx((b * normal1).mean, normal1.mean * b)
    approx((b * normal1).var, normal1.var * b**2)
    with pytest.raises(NotFoundLookupError):
        normal1.__mul__(normal1)
    with pytest.raises(NotFoundLookupError):
        normal1.__rmul__(normal1)

    # Test addition.
    approx((normal1 + normal2).mean, normal1.mean + normal2.mean)
    approx((normal1 + normal2).var, normal1.var + normal2.var)
    approx(normal1.__radd__(b).mean, normal1.mean + b)
    approx(normal1.__radd__(b).mean, normal1.mean + b)
    with pytest.raises(NotFoundLookupError):
        normal1.__add__(RandomVector())
    with pytest.raises(NotFoundLookupError):
        normal1.__radd__(RandomVector())

    # Test negation.
    approx((-normal1).mean, -normal1.mean)
    approx((-normal1).var, normal1.var)

    # Test substraction.
    approx((normal1 - normal2).mean, normal1.mean - normal2.mean)
    approx((normal1 - normal2).var, normal1.var + normal2.var)
    approx(normal1.__rsub__(normal2).mean, normal2.mean - normal1.mean)
    approx(normal1.__rsub__(normal2).var, normal1.var + normal2.var)

    # Test division.
    approx(normal1.__div__(b).mean, normal1.mean / b)
    approx(normal1.__div__(b).var, normal1.var / b**2)
    approx(normal1.__truediv__(b).mean, normal1.mean / b)
    approx(normal1.__truediv__(b).var, normal1.var / b**2)
Esempio n. 30
0
def test_linear():
    layer = Linear(20)
    x = B.randn(10, 5, 3)

    # Check number of weights and width.
    assert layer.num_weights(3) == 3 * 20 + 20
    assert layer.width == 20

    # Check initialisation and width.
    vs = Vars(np.float64)
    layer.initialise(3, vs)
    assert layer.width == 20

    # Check batch consistency.
    check_batch_consistency(layer, x)

    # Check correctness.
    approx(layer(x), B.matmul(x, layer.A[None, :, :]) + layer.b[None, :, :])