def test_logpdf_missing_data(): # Setup model. m = 3 noise = 1e-2 latent_noises = 2e-2 * B.ones(m) kernels = [0.5 * EQ().stretch(0.75) for _ in range(m)] x = B.linspace(0, 10, 20) # Concatenate two orthogonal matrices, to make the missing data # approximation exact. u1 = B.svd(B.randn(m, m))[0] u2 = B.svd(B.randn(m, m))[0] u = Dense(B.concat(u1, u2, axis=0) / B.sqrt(2)) s_sqrt = Diagonal(B.rand(m)) # Construct a reference model. oilmm_pp = ILMMPP(kernels, u @ s_sqrt, noise, latent_noises) # Sample to generate test data. y = oilmm_pp.sample(x, latent=False) # Throw away data, but retain orthogonality. y[5:10, 3:] = np.nan y[10:, :3] = np.nan # Construct OILMM to test. oilmm = OILMM(kernels, u, s_sqrt, noise, latent_noises) # Check that evidence is still exact. approx(oilmm_pp.logpdf(x, y), oilmm.logpdf(x, y), atol=1e-7)
def test_three_arg(name): metric = getattr(wbml.metric, name) # Test scalar usage. assert isinstance(metric(1, 1, B.randn(10)), B.Number) # Test series usage. assert isinstance(metric(B.randn(10), B.rand(10), B.randn(10)), B.Number) # Test matrix usage. assert isinstance(metric(B.randn(10, 10), B.rand(10, 10), B.randn(10, 10)), pd.Series) # Check that higher-order tensors fail. with pytest.raises(ValueError): metric(B.randn(10, 10, 10), B.rand(10, 10, 10), B.randn(10, 10, 10))
def test_create_random_state(dtype): # Test specification without argument. B.create_random_state(dtype) # Check that it does the right thing. state = B.create_random_state(dtype, seed=0) state, x1 = B.rand(state, dtype) state, x2 = B.rand(state, dtype) x1, x2 = to_np(x1), to_np(x2) state = B.create_random_state(dtype, seed=0) state, y1 = B.rand(state, dtype) state, y2 = B.rand(state, dtype) y1, y2 = to_np(y1), to_np(y2) assert x1 != x2 assert x1 == y1 assert x2 == y2
def test_set_seed_set_global_random_state(dtype, f_plain, check_lazy_shapes): B.set_random_seed(0) x1 = to_np(B.rand(dtype)) x2 = to_np(f_plain()) B.set_random_seed(0) y1 = to_np(B.rand(dtype)) y2 = to_np(f_plain()) assert x1 == y1 assert x2 == y2 B.set_global_random_state(B.create_random_state(dtype, seed=0)) x1 = to_np(B.rand(dtype)) x2 = to_np(f_plain()) B.set_global_random_state(B.create_random_state(dtype, seed=0)) y1 = to_np(B.rand(dtype)) y2 = to_np(f_plain()) assert x1 == y1 # TODO: Make this work with TF! if not isinstance(dtype, B.TFDType): assert x2 == y2
def test_fdd_take(): with Measure(): f1 = GP(1, EQ()) f2 = GP(2, Exp()) f = cross(f1, f2) x = B.linspace(0, 3, 5) # Build an FDD with a very complicated input specification. fdd = f((x, (f2(x), x), f1(x), (f2(x), (f1(x), x)))) n = infer_size(fdd.p.kernel, fdd.x) fdd = f(fdd.x, matrix.Diagonal(B.rand(n))) # Flip a coin for every element. mask = B.randn(n) > 0 taken_fdd = B.take(fdd, mask) approx(taken_fdd.mean, B.take(fdd.mean, mask)) approx(taken_fdd.var, B.submatrix(fdd.var, mask)) approx(taken_fdd.noise, B.submatrix(fdd.noise, mask)) assert isinstance(taken_fdd.noise, matrix.Diagonal) # Test that only masks are supported, for now. with pytest.raises(AssertionError): B.take(fdd, np.array([1, 2]))
def test_bvn_cdf(check_lazy_shapes): check_sensitivity(bvn_cdf, s_bvn_cdf, (B.rand(3), B.rand(3), B.rand(3))) check_grad(bvn_cdf, (B.rand(3), B.rand(3), B.rand(3))) # Check that function runs on both `float32`s and `float64`s. a, b, c = B.rand(3), B.rand(3), B.rand(3) approx( B.bvn_cdf(a, b, c), B.bvn_cdf(B.cast(np.float32, a), B.cast(np.float32, b), B.cast(np.float32, c)), ) # Check that, in JAX, the function check the shape of the inputs. with pytest.raises(ValueError): B.bvn_cdf(B.rand(jnp.float32, 2), B.rand(jnp.float32, 3), B.rand(jnp.float32, 3)) with pytest.raises(ValueError): B.bvn_cdf(B.rand(jnp.float32, 3), B.rand(jnp.float32, 2), B.rand(jnp.float32, 3)) with pytest.raises(ValueError): B.bvn_cdf(B.rand(jnp.float32, 3), B.rand(jnp.float32, 3), B.rand(jnp.float32, 2))
# Trigger the warning! f(int, 5) assert len(w) == 1 @pytest.mark.parametrize("x", Tensor(2).forms() + Tensor(2, 3).forms() + Tensor(2, 3, 4).forms()) @pytest.mark.parametrize( "p", [ None, # Give unnormalised probabilities. B.rand(2), ], ) def test_choice(x, p, check_lazy_shapes): state = B.create_random_state(B.dtype(x)) # Make `p` a dictionary so that we can optionally give it. if p is None: p = {} else: # Cast weights to the right framework. p = {"p": B.cast(B.dtype(x), p)} # Check shape. assert B.shape(B.choice(x, **p)) == B.shape(x)[1:] assert B.shape(B.choice(x, 5, **p)) == (5, ) + B.shape(x)[1:]
# Check that numbers remain unchanged. a = 1 assert B.to_active_device(a) is a @pytest.mark.parametrize("t", [tf.float32, torch.float32, jnp.float32]) @pytest.mark.parametrize( "f", [ lambda t: B.zeros(t, 2, 2), lambda t: B.ones(t, 2, 2), lambda t: B.eye(t, 2), lambda t: B.linspace(t, 0, 5, 10), lambda t: B.range(t, 10), lambda t: B.rand(t, 10), lambda t: B.randn(t, 10), ], ) def test_on_device(f, t, check_lazy_shapes): f_t = f(t) # Contruct on current and existing device. # Set the active device to something else. B.ActiveDevice.active_name = "previous" # Check that explicit allocation on CPU works. with B.on_device("cpu"): assert B.device(f(t)) == B.device(f_t) # Also test inferring the device from a tensor. with B.on_device(f_t):
def generate_init(shape, dtype): return lower + B.rand(dtype, *shape) * (upper - lower)
def generate_init(shape, dtype): return B.rand(dtype, *shape)