Exemplo n.º 1
0
def test_minimise_disconnected_gradient(minimise_method):
    dtype, minimise, kw_args = minimise_method
    vs = Vars(dtype=dtype)
    vs.ubnd(name="x")

    # Check that optimiser runs for objective that returns the constant zero.
    minimise(lambda v: B.cast(v.dtype, 0), vs, **kw_args)
Exemplo n.º 2
0
def test_copy_torch():
    vs = Vars(torch.float64)

    # Create a variable.
    vs.pos(1, name="a")

    # Make a normal and detached copy.
    vs_copy = vs.copy()
    vs_detached = vs.copy(detach=True)

    # Require gradients for both.
    vs.requires_grad(True)
    vs_detached.requires_grad(True)

    # Do a backward pass.
    (vs_detached["a"]**2).backward()

    # Check that values are equal, but gradients only computed for one.
    assert vs["a"] == 1
    assert vs.get_latent_vars("a")[0].grad is None
    assert vs_detached["a"] == 1
    assert vs_detached.get_latent_vars("a")[0].grad == 2

    # Check that copied fields are, in fact, copies.
    # is also copied.
    del vs.transforms[:]
    del vs.inverse_transforms[:]
    del vs.vars[:]
    vs.name_to_index.clear()

    for vs_copied in [vs_copy, vs_detached]:
        assert len(vs_copied.transforms) > 0
        assert len(vs_copied.inverse_transforms) > 0
        assert len(vs_copied.vars) > 0
        assert len(vs_copied.name_to_index) > 0
Exemplo n.º 3
0
def test_rnn():
    for final_dense, gru, nn in [
        (True, False,
         rnn(10, (20, 30), normalise=True, gru=False, final_dense=True)),
        (False, True,
         rnn(10, (20, 30), normalise=True, gru=True, final_dense=False)),
    ]:
        vs = Vars(np.float32)
        nn.initialise(5, vs)
        x = B.randn(2, 3, 5)

        # Check number of weights and width.
        assert B.length(vs.get_vector()) == nn.num_weights(5)
        assert nn.width == 10

        # Test batch consistency.
        check_batch_consistency(nn, x)

        # Check composition.
        assert len(nn.layers) == 9 if final_dense else 7
        assert type(nn.layers[0]) == Recurrent
        assert type(nn.layers[0].cell) == GRU if gru else Elman
        assert nn.layers[0].width == 20
        assert type(nn.layers[1]) == Activation
        assert nn.layers[1].width == 20
        assert type(nn.layers[2]) == Normalise
        assert nn.layers[2].width == 20
        assert type(nn.layers[3]) == Recurrent
        assert type(nn.layers[3].cell) == GRU if gru else Elman
        assert nn.layers[3].width == 30
        assert type(nn.layers[4]) == Activation
        assert nn.layers[4].width == 30
        assert type(nn.layers[5]) == Normalise
        assert nn.layers[5].width == 30
        if final_dense:
            assert type(nn.layers[6]) == Linear
            assert nn.layers[6].width == 10
            assert type(nn.layers[7]) == Activation
            assert nn.layers[7].width == 10
            assert type(nn.layers[8]) == Linear
            assert nn.layers[8].width == 10
        else:
            assert type(nn.layers[6]) == Linear
            assert nn.layers[6].width == 10

    # Check that normalisation layers disappear.
    assert (len(
        rnn(10, (20, 30), normalise=False, gru=True,
            final_dense=False).layers) == 5)
Exemplo n.º 4
0
def test_minimise_trace(minimise_method):
    dtype, minimise, kw_args = minimise_method

    def f(vs_):
        return vs_.ubnd(name="x")**2

    # Test that `trace=False` prints nothing.
    with OutStream() as stream:
        minimise(f, Vars(dtype=dtype), trace=False, **kw_args)
        assert stream.output == ""

    # Test that `trace=False` prints something.
    with OutStream() as stream:
        minimise(f, Vars(dtype=dtype), trace=True, **kw_args)
        assert stream.output != ""
Exemplo n.º 5
0
    def __init__(self,
                 replace=False,
                 impute=True,
                 scale=1.0,
                 scale_tie=False,
                 per=False,
                 per_period=1.0,
                 per_scale=1.0,
                 per_decay=10.0,
                 input_linear=False,
                 input_linear_scale=100.0,
                 linear=True,
                 linear_scale=100.0,
                 nonlinear=False,
                 nonlinear_scale=1.0,
                 rq=False,
                 markov=None,
                 noise=0.1,
                 x_ind=None,
                 normalise_y=True,
                 transform_y=(lambda x: x, lambda x: x)):
        # Model configuration.
        self.replace = replace
        self.impute = impute
        self.sparse = x_ind is not None
        if x_ind is None:
            self.x_ind = None
        else:
            self.x_ind = B.uprank(_to_torch(x_ind))
        self.model_config = {
            'scale': scale,
            'scale_tie': scale_tie,
            'per': per,
            'per_period': per_period,
            'per_scale': per_scale,
            'per_decay': per_decay,
            'input_linear': input_linear,
            'input_linear_scale': input_linear_scale,
            'linear': linear,
            'linear_scale': linear_scale,
            'nonlinear': nonlinear,
            'nonlinear_scale': nonlinear_scale,
            'rq': rq,
            'markov': markov,
            'noise': noise
        }

        # Model fitting.
        self.vs = Vars(dtype=torch.float64)
        self.is_conditioned = False
        self.x = None  # Inputs of training data
        self.y = None  # Outputs of training data
        self.n = None  # Number of data points
        self.m = None  # Number of input features
        self.p = None  # Number of outputs

        # Output normalisation and transformation.
        self.normalise_y = normalise_y
        self._unnormalise_y, self._normalise_y = lambda x: x, lambda x: x
        self._transform_y, self._untransform_y = transform_y
Exemplo n.º 6
0
def test_minimise(minimise_method):
    dtype, minimise, kw_args = minimise_method
    vs = Vars(dtype=dtype)

    # Initialise a variable that is not used.
    vs.ubnd(name="other")

    # Define some objective.
    def f(vs_):
        return (-3 - vs_.pos(name="x", init=1.0))**2

    # Minimise it, until convergence.
    val_opt = minimise(f, vs, iters=2000, **kw_args)

    # Check for equality up to two digits.
    approx(val_opt, 9, atol=1e-2)
    approx(vs["x"], 0, atol=1e-2)
Exemplo n.º 7
0
def test_minimise(minimise_method):
    dtype, minimise, kw_args = minimise_method
    vs = Vars(dtype=dtype)

    # Initialise a variable that is not used.
    vs.get(name='other')

    # Define some objective.
    def f(vs_):
        return (-3 - vs_.pos(name='x', init=5.)) ** 2

    # Minimise it, until convergence.
    val_opt = minimise(f, vs, iters=10000, **kw_args)

    # Check for equality up to three digits.
    approx(val_opt, 9, digits=3)
    approx(vs['x'], 0, digits=3)
Exemplo n.º 8
0
def maximum_a_posteriori(f, x_init, iters=2000):
    """Compute the MAP estimate.

    Args:
        f (function): Possibly unnormalised log-density.
        x_init (column vector): Starting point to start the optimisation.
        iters (int, optional): Number of optimisation iterations. Defaults to `2000`.

    Returns:
        tensor: MAP estimate.
    """
    def objective(vs_):
        return -f(vs_["x"])

    vs = Vars(B.dtype(x_init))
    vs.unbounded(init=x_init, name="x")
    minimise_l_bfgs_b(objective, vs, iters=iters, jit=True, trace=True)
    return vs["x"]
Exemplo n.º 9
0
def test_requires_grad_detach_vars_torch():
    vs = Vars(torch.float64)
    vs.pos(1, name='a')

    # Test that gradients need to first be required.
    with pytest.raises(RuntimeError):
        (2 * vs['a']).backward()

    # Test that gradients can be required and are then computed.
    vs.requires_grad(True)
    (2 * vs['a']).backward()
    assert type(vs.vars[0].grad) != type(None)

    # Test that variables can be detached.
    vs.pos(1, name='b')
    result = 2 * vs['b']
    vs.detach()
    with pytest.raises(RuntimeError):
        result.backward()
Exemplo n.º 10
0
def test_no_jit_autograd():
    vs = Vars(dtype=np.float64)

    def f(vs_):
        return vs_.ubnd(name="x")

    with pytest.raises(ValueError):
        varz.autograd.minimise_l_bfgs_b(f, vs, jit=True)
    with pytest.raises(ValueError):
        varz.autograd.minimise_adam(f, vs, jit=True)
Exemplo n.º 11
0
def test_requires_grad_detach_vars_torch():
    vs = Vars(torch.float64)
    vs.pos(1, name="a")

    # Test that gradients need to first be required.
    with pytest.raises(RuntimeError):
        (2 * vs["a"]).backward()

    # Test that gradients can be required and are then computed.
    vs.requires_grad(True)
    (2 * vs["a"]).backward()
    assert vs.vars[0].grad is not None

    # Test that variables can be detached.
    vs.pos(1, name="b")
    result = 2 * vs["b"]
    vs.detach()
    with pytest.raises(RuntimeError):
        result.backward()
Exemplo n.º 12
0
def test_ff():
    vs = Vars(np.float32)

    nn = ff(10, (20, 30), normalise=True)
    nn.initialise(5, vs)
    x = B.randn(2, 3, 5)

    # Check number of weights and width.
    assert B.length(vs.get_vector()) == nn.num_weights(5)
    assert nn.width == 10

    # Test batch consistency.
    check_batch_consistency(nn, x)

    # Check composition.
    assert len(nn.layers) == 7
    assert type(nn.layers[0]) == Linear
    assert nn.layers[0].A.shape[0] == 5
    assert nn.layers[0].width == 20
    assert type(nn.layers[1]) == Activation
    assert nn.layers[1].width == 20
    assert type(nn.layers[2]) == Normalise
    assert nn.layers[2].width == 20
    assert type(nn.layers[3]) == Linear
    assert nn.layers[3].width == 30
    assert type(nn.layers[4]) == Activation
    assert nn.layers[4].width == 30
    assert type(nn.layers[5]) == Normalise
    assert nn.layers[5].width == 30
    assert type(nn.layers[6]) == Linear
    assert nn.layers[6].width == 10

    # Check that one-dimensional calls are okay.
    vs = Vars(np.float32)
    nn.initialise(1, vs)
    approx(nn(B.linspace(0, 1, 10)), nn(B.linspace(0, 1, 10)[:, None]))

    # Check that zero-dimensional calls fail.
    with pytest.raises(ValueError):
        nn(0)

    # Check normalisation layers disappear.
    assert len(ff(10, (20, 30), normalise=False).layers) == 5
Exemplo n.º 13
0
def test_minimise_zero_calls(minimise_method):
    dtype, minimise, kw_args = minimise_method
    vs = Vars(dtype=dtype)

    calls = Value(0)

    def f(vs_):
        calls.val += 1
        return vs_.ubnd(name="x", init=5.0)**2

    # Check that running the optimiser for zero iterations only incurs a
    # single call.
    minimise(f, vs, iters=0, **kw_args)
    assert calls.val == 1
Exemplo n.º 14
0
def test_minimise_exception(minimise_method):
    dtype, minimise, kw_args = minimise_method
    vs = Vars(dtype=dtype)

    first_call = Value(True)

    # Define an objective that sometimes fails after the first call.
    def f(vs_):
        if first_call.val:
            first_call.val = False
        else:
            if np.random.rand() > .5:
                raise Exception('Fail!')
        return vs_.get(name='x', init=5.) ** 2

    # Check that the optimiser runs.
    minimise(f, vs, **kw_args)
Exemplo n.º 15
0
def test_linear():
    layer = Linear(20)
    x = B.randn(10, 5, 3)

    # Check number of weights and width.
    assert layer.num_weights(3) == 3 * 20 + 20
    assert layer.width == 20

    # Check initialisation and width.
    vs = Vars(np.float64)
    layer.initialise(3, vs)
    assert layer.width == 20

    # Check batch consistency.
    check_batch_consistency(layer, x)

    # Check correctness.
    approx(layer(x), B.matmul(x, layer.A[None, :, :]) + layer.b[None, :, :])
Exemplo n.º 16
0
def analyse_elbos(models,
                  t,
                  y,
                  true_noisy_kernel=None,
                  comparative_kernel=None):
    """Compare ELBOs of models.

    Args:
        models (list): Models to analyse.
        t (vector): Time points of data.
        y (vector): Observations.
        true_noisy_kernel (:class:`stheno.Kernel`, optional): True kernel that
            generated the data, including noise.
        comparative_kernel (function, optional): A function that takes in a
            variable container and gives back a kernel. A GP with this
            kernel will be trained on the data to compute a likelihood that
            will be compared to the ELBOs.
    """

    # Print LML under true GP if the true kernel is given.
    if true_noisy_kernel:
        wbml.out.kv("LML under true GP", GP(true_noisy_kernel)(t).logpdf(y))

    # Print LML under a trained GP if a comparative kernel is given.
    if comparative_kernel:

        def objective(vs_):
            gp = GP(sequential(comparative_kernel)(vs_))
            return -gp(t).logpdf(y)

        # Fit the GP.
        vs = Vars(jnp.float64)
        lml_gp_opt = -minimise_l_bfgs_b(objective, vs, jit=True, iters=1000)

        # Print likelihood.
        wbml.out.kv("LML under optimised GP", lml_gp_opt)

    # Estimate ELBOs.
    with wbml.out.Section("ELBOs"):
        for model in models:
            state, elbo = model.elbo(B.global_random_state(model.dtype), t, y)
            B.set_global_random_state(state)
            wbml.out.kv(model.name, elbo)
Exemplo n.º 17
0
def test_minimise_exception(minimise_method):
    dtype, minimise, kw_args = minimise_method
    vs = Vars(dtype=dtype)

    # Skip this tests when the JIT is used, because tracing may then fail.
    if "jit" in kw_args and kw_args["jit"]:
        return

    first_call = Value(True)

    # Define an objective that sometimes fails after the first call.
    def f(vs_):
        if first_call.val:
            first_call.val = False
        else:
            if np.random.rand() > 0.5:
                raise Exception("Fail!")
        return vs_.ubnd(name="x", init=5.0)**2

    # Check that the optimiser runs.
    minimise(f, vs, **kw_args)
Exemplo n.º 18
0
def test_recurrent():
    vs = Vars(np.float32)

    # Test setting the initial hidden state.
    layer = Recurrent(GRU(10), B.zeros(1, 10))
    layer.initialise(5, vs)
    approx(layer.h0, B.zeros(1, 10))

    layer = Recurrent(GRU(10))
    layer.initialise(5, vs)
    assert layer.h0 is not None

    # Check batch consistency.
    check_batch_consistency(layer, B.randn(30, 20, 5))

    # Test preservation of rank upon calls.
    assert B.shape(layer(B.randn(20, 5))) == (20, 10)
    assert B.shape(layer(B.randn(30, 20, 5))) == (30, 20, 10)

    # Check that zero-dimensional calls fail.
    with pytest.raises(ValueError):
        layer(0)
Exemplo n.º 19
0
def test_get_set_vector(dtype):
    vs = Vars(dtype=dtype)

    # Test stacking a matrix and a vector.
    vs.get(shape=(2, ), name='a', init=np.array([1, 2]))
    vs.get(shape=(2, 2), name='b', init=np.array([[3, 4], [5, 6]]))
    allclose(vs.get_vector('a', 'b'), [1, 2, 3, 4, 5, 6])

    # Test setting elements.
    vs.set_vector(np.array([6, 5, 4, 3, 2, 1]), 'a', 'b')
    allclose(vs['a'], np.array([6, 5]))
    allclose(vs['b'], np.array([[4, 3], [2, 1]]))

    # Test setting elements in a differentiable way. This should allow for
    # any values.
    vs.set_vector(np.array(['1', '2', '3', '4', '5', '6']),
                  'a',
                  'b',
                  differentiable=True)
    assert np.all(vs['a'] == ['1', '2'])
    assert np.all(vs['b'] == np.array([['3', '4'], ['5', '6']]))
Exemplo n.º 20
0
def vs_source(request):
    if request.param:
        source = B.randn(np.float64, 1000)
        return Vars(np.float64, source=source)
    else:
        return Vars(np.float64)
Exemplo n.º 21
0
                  random_state=seed).fit(x).cluster_centers_


X = np.random.rand(N, input_dim)
y = np.random.rand(N, 1)

X_test = np.random.rand(N * 2, input_dim)

# GPy

m = GPy.models.GPRegression(X, y, GPy.kern.RBF(input_dim, ARD=True))
GPy_pred_y, GPy_pred_var = m.predict(X_test, full_cov=True)

# Stheno model

vs = Vars(tf.float64)
# Local params
vs.positive(init=local_gp_std, shape=(input_dim, ), name='local_gp_std')
vs.positive(init=local_gp_ls, shape=(input_dim, ), name='local_gp_ls')
vs.positive(init=local_ls,
            shape=(input_dim, num_inducing_points),
            name='local_ls')
vs.positive(init=local_gp_noise_std,
            shape=(input_dim, ),
            name='local_gp_noise_std')

# Global params
vs.positive(init=global_gp_std, name='global_gp_std')
vs.positive(init=global_gp_noise_std, name='global_gp_noise_std')

model = NSS(X, y, vs, num_inducing_points, f_indu, seed=seed)
Exemplo n.º 22
0
def test_assignment(dtype):
    vs = Vars(dtype=dtype)

    # Generate some variables.
    vs.get(1., name='unbounded')
    vs.pos(2., name='positive')
    vs.bnd(3., lower=0, upper=10, name='bounded')

    # Check that they have the right values.
    allclose(1., vs['unbounded'])
    allclose(2., vs['positive'])
    allclose(3., vs['bounded'])

    # Assign some new values.
    vs.assign('unbounded', 4.)
    vs.assign('positive', 5.)
    vs.assign('bounded', 6.)

    # Again check that they have the right values.
    allclose(4., vs['unbounded'])
    allclose(5., vs['positive'])
    allclose(6., vs['bounded'])

    # Differentiably assign new values. This should allow for anything.
    vs.assign('unbounded', 'value', differentiable=True)
    assert vs['unbounded'] == 'value'
Exemplo n.º 23
0
    def __init__(
            self,
            replace=False,
            impute=True,
            scale=1.0,
            scale_tie=False,
            per=False,
            per_period=1.0,
            per_scale=1.0,
            per_decay=10.0,
            input_linear=False,
            input_linear_scale=100.0,
            linear=True,
            linear_scale=100.0,
            nonlinear=False,
            nonlinear_scale=1.0,
            rq=False,
            markov=None,
            noise=0.1,
            x_ind=None,
            normalise_y=True,
            transform_y=(lambda x: x, lambda x: x),
    ):
        # Model configuration.
        self.replace = replace
        self.impute = impute
        self.sparse = x_ind is not None
        if x_ind is None:
            self.x_ind = None
        else:
            self.x_ind = B.uprank(_to_torch(x_ind))
        self.model_config = {
            "scale": scale,
            "scale_tie": scale_tie,
            "per": per,
            "per_period": per_period,
            "per_scale": per_scale,
            "per_decay": per_decay,
            "input_linear": input_linear,
            "input_linear_scale": input_linear_scale,
            "linear": linear,
            "linear_scale": linear_scale,
            "nonlinear": nonlinear,
            "nonlinear_scale": nonlinear_scale,
            "rq": rq,
            "markov": markov,
            "noise": noise,
        }

        # Model fitting.
        self.vs = Vars(dtype=torch.float64)
        self.is_conditioned = False
        self.x = None  # Inputs of training data
        self.y = None  # Outputs of training data
        self.w = None  # Weights for every time stamp
        self.n = None  # Number of data points
        self.m = None  # Number of input features
        self.p = None  # Number of outputs

        # Output normalisation and transformation.
        self.normalise_y = normalise_y
        self._unnormalise_y, self._normalise_y = lambda x: x, lambda x: x
        self._transform_y, self._untransform_y = transform_y
Exemplo n.º 24
0
    # Determine number of outputs.
    p_s = len(sims)
    p_r = loc.shape[0]
    p = p_s * p_r

    # Compute inducing point locations, assuming that inputs are time.
    n_ind = int(args.n / 60)  # One inducing point per two months.
    x_ind_init = B.linspace(x_data.min(), x_data.max(), n_ind)

    # Determine initialisation for covariance between sims.
    rho = 0.5
    u, s, _ = B.svd((1 - rho) * B.eye(p_s) + rho * B.ones(p_s, p_s))
    u_s_init = u[:, :m_s]
    s_sqrt_s_init = B.sqrt(s[:m_s])

    vs = Vars(torch.float64)

    def construct_model(vs):
        if args.separable:
            # Copy same kernel `m` times.
            kernel = [
                Mat52().stretch(vs.bnd(6 * 30, lower=60, name="k_scale"))
            ]
            kernels = kernel * m
        else:
            # Parametrise different kernels.
            kernels = [
                Mat52().stretch(vs.bnd(6 * 30, lower=60, name=f"{i}/k_scale"))
                for i in range(m)
            ]
        noise = vs.bnd(1e-2, name="noise")
def model(vs):
    """Construct a model with learnable parameters."""
    p = vs.struct  # Varz handles positivity (and other) constraints.
    kernel = p.variance.positive() * EQ().stretch(p.scale.positive())
    return GP(kernel), p.noise.positive()


@parametrised
def model_alternative(vs, scale: Positive, variance: Positive,
                      noise: Positive):
    """Equivalent to :func:`model`, but with `@parametrised`."""
    kernel = variance * EQ().stretch(scale)
    return GP(kernel), noise


vs = Vars(torch.float32)
f, noise = model(vs)

# Condition on observations and make predictions before optimisation.
f_post = f | (f(x_obs, noise), y_obs)
prior_before = f, noise
pred_before = f_post(x, noise).marginal_credible_bounds()


def objective(vs):
    f, noise = model(vs)
    evidence = f(x_obs, noise).logpdf(y_obs)
    return -evidence


# Learn hyperparameters.
Exemplo n.º 26
0
 def __init__(self, scheme: str = "structured"):
     self.vs = Vars(jnp.float64)
     self.scheme = scheme.lower()
Exemplo n.º 27
0
def test_source():
    vs = Vars(np.float32, source=np.ones(10))

    assert vs.get() == 1.
    approx(vs.pos(shape=(5, )), np.exp(np.ones(5)))
    approx(vs.pos(), np.exp(1.))
    with pytest.raises(ValueError):
        vs.pos(shape=(5, ))

    # Test that the source variables are casted to the right data type.

    vs = Vars(np.float32, source=np.array([1]))
    assert vs.get().dtype == np.float32

    vs = Vars(np.float64, source=np.array([1]))
    assert vs.get().dtype == np.float64
Exemplo n.º 28
0
def test_get_vars():
    vs = Vars(np.int)

    # This test also tests that `Vars.get_vars` always returns the collection
    # of variables in the right order. This is important for optimisation.

    # Initialise some variables.
    vs.get(1, name='a')
    vs.get(2, name='1/b')
    vs.get(3, name='2/c')
    vs.unbounded(4, name='2/d')  # Test alias.

    # Test getting all.
    assert vs.get_vars() == [1, 2, 3, 4]
    assert vs.get_vars(indices=True) == [0, 1, 2, 3]

    # Test that names must exist.
    with pytest.raises(ValueError):
        vs.get_vars('e')

    # Test some queries.
    assert vs.get_vars('a') == [1]
    assert vs.get_vars('a', '*/b') == [1, 2]
    assert vs.get_vars('*/c', 'a') == [1, 3]
    assert vs.get_vars('*/c', '*/b', 'a') == [1, 2, 3]

    assert vs.get_vars('a', indices=True) == [0]
    assert vs.get_vars('a', '*/b', indices=True) == [0, 1]
    assert vs.get_vars('*/c', 'a', indices=True) == [0, 2]
    assert vs.get_vars('*/c', '*/b', 'a', indices=True) == [0, 1, 2]

    # Test some more queries.
    assert vs.get_vars('1/*') == [2]
    assert vs.get_vars('2/*') == [3, 4]
    assert vs.get_vars('1/*', '2/*') == [2, 3, 4]

    assert vs.get_vars('1/*', indices=True) == [1]
    assert vs.get_vars('2/*', indices=True) == [2, 3]
    assert vs.get_vars('1/*', '2/*', indices=True) == [1, 2, 3]

    # Test even more queries.
    assert vs.get_vars('*/b', '1/*') == [2]
    assert vs.get_vars('a', '2/*') == [1, 3, 4]
    assert vs.get_vars('a', '2/d', '2/*') == [1, 3, 4]
    assert vs.get_vars('2/d', '2/c', 'a', '1/*') == [1, 2, 3, 4]
    assert vs.get_vars('1/*') == [2]
    assert vs.get_vars('2/*') == [3, 4]
    assert vs.get_vars('1/*', '2/*') == [2, 3, 4]

    assert vs.get_vars('*/b', '1/*', indices=True) == [1]
    assert vs.get_vars('a', '2/*', indices=True) == [0, 2, 3]
    assert vs.get_vars('a', '2/d', '2/*', indices=True) == [0, 2, 3]
    assert vs.get_vars('2/d', '2/c', 'a', '1/*', indices=True) == [0, 1, 2, 3]
    assert vs.get_vars('1/*', indices=True) == [1]
    assert vs.get_vars('2/*', indices=True) == [2, 3]
    assert vs.get_vars('1/*', '2/*', indices=True) == [1, 2, 3]
Exemplo n.º 29
0
    wd = WorkingDirectory("_experiments", "exchange_ilmm")

    B.epsilon = 1e-8

    _, train, test = load()

    x = np.array(train.index)
    y = np.array(train)

    # Normalise data.
    normaliser = Normaliser(y)
    y_norm = normaliser.normalise(y)

    p = B.shape(y)[1]
    m = 3
    vs = Vars(torch.float64)

    def construct_model(vs):
        kernels = [
            vs.pos(1, name=f"{i}/var") *
            Matern12().stretch(vs.pos(0.1, name=f"{i}/scale"))
            for i in range(m)
        ]
        noise = vs.pos(1e-2, name="noise")
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
        h = Dense(vs.get(shape=(p, m), name="h"))

        return ILMMPP(kernels, h, noise, latent_noises)

    def objective(vs):
        return -construct_model(vs).logpdf(torch.tensor(x),
Exemplo n.º 30
0
class AbstractGPCM(Model):
    """GPCM model.

    Args:
        scheme (str, optional): Approximation scheme. Must be one of `structured`,
            `mean-field-ca`, `mean-field-gradient`, `mean-field-collapsed-gradient`,
            `mean-field-ca-gradient`, or `mean-field-ca-collapsed-gradient`.
            Defaults to `structured`.
    """
    @_dispatch
    def __init__(self, scheme: str = "structured"):
        self.vs = Vars(jnp.float64)
        self.scheme = scheme.lower()

    def __prior__(self):
        # Construct kernel matrices.
        self.K_z = self.compute_K_z()
        self.K_z_inv = B.pd_inv(self.K_z)
        self.K_u = self.compute_K_u()
        self.K_u_inv = B.pd_inv(self.K_u)

        # Construct priors.
        self.p_u = Normal(self.K_u_inv)
        self.p_z = Normal(self.K_z_inv)

        # Construct approximation scheme.
        if self.scheme == "structured":
            self.approximation = Structured(self)
        elif self.scheme == "mean-field":
            # Use the best mean-field scheme.
            self.approximation = MeanField(self, fit="ca-collapsed-bfgs")
        elif self.scheme == "mean-field-ca":
            self.approximation = MeanField(self, fit="ca")
        elif self.scheme == "mean-field-gradient":
            self.approximation = MeanField(self, fit="bfgs")
        elif self.scheme == "mean-field-collapsed-gradient":
            self.approximation = MeanField(self, fit="collapsed-bfgs")
        elif self.scheme == "mean-field-ca-gradient":
            self.approximation = MeanField(self, fit="ca-bfgs")
        elif self.scheme == "mean-field-ca-collapsed-gradient":
            self.approximation = MeanField(self, fit="ca-collapsed-bfgs")
        else:
            raise ValueError(
                f'Invalid value "{self.scheme}" for the approximation scheme.')

    def __condition__(self, t, y):
        self.approximation.condition(t, y)

    @instancemethod
    @cast
    def elbo(self, *args, **kw_args):
        return self.approximation.elbo(*args, **kw_args)

    @instancemethod
    @cast
    def predict(self, *args, **kw_args):
        return self.approximation.predict(*args, **kw_args)

    @instancemethod
    def predict_kernel(self, t_k=None, num_samples=1000):
        """Predict kernel and normalise prediction.

        Args:
            t_k (vector, optional): Inputs to sample kernel at. Will be automatically
                determined if not given.
            num_samples (int, optional): Number of samples to use. Defaults to `1000`.

        Returns:
            :class:`collections.namedtuple`: The prediction.
        """
        return summarise_samples(
            *self.sample_kernel(t_k=t_k, num_samples=num_samples))

    @instancemethod
    def sample_kernel(self, t_k=None, num_samples=1000):
        """Predict kernel and normalise prediction.

        Args:
            t_k (vector, optional): Inputs to sample kernel at. Will be automatically
                determined if not given.
            num_samples (int, optional): Number of samples to use. Defaults to `1000`.

        Returns:
            tuple[vector, tensor]: Tuple containing the inputs of the samples and the
                samples.
        """
        if t_k is None:
            t_k = B.linspace(self.dtype, 0, self.extent, 300)

        ks = self.approximation.sample_kernel(t_k, num_samples=num_samples)

        # Normalise predicted kernel.
        var_mean = B.mean(ks[:, 0])
        wbml.out.kv("Mean variance of kernel samples", var_mean)

        return t_k, ks

    @instancemethod
    def predict_psd(self, t_k=None, num_samples=1000):
        """Predict the PSD in dB.

        Args:
            t_k (vector, optional): Inputs to sample kernel at. Will be automatically
                determined if not given.
            num_samples (int, optional): Number of samples to use. Defaults to `1000`.

        Returns:
            :class:`collections.namedtuple`: Predictions.
        """
        if t_k is None:
            t_k = B.linspace(self.dtype, 0, 2 * self.extent, 1000)
        t_k, ks = self.sample_kernel(t_k, num_samples=num_samples)

        # Estimate PSDs.
        freqs, psds = zip(*[estimate_psd(t_k, k, db=False) for k in ks])
        freqs = freqs[0]
        psds = B.stack(*psds, axis=0)

        return summarise_samples(freqs, psds, db=True)

    @instancemethod
    def predict_fourier(self, num_samples=1000):
        """Predict Fourier features.

        Args:
            num_samples (int, optional): Number of samples to use. Defaults to `1000`.

        Returns:
            tuple: Marginals of the predictions.
        """
        return self.approximation.predict_z(num_samples=num_samples)

    @instancemethod
    def predict_filter(self, t_h=None, num_samples=1000, min_phase=True):
        """Predict the learned filter.

        Args:
            t_h (vector, optional): Inputs to sample filter at.
            num_samples (int, optional): Number of samples to use. Defaults to `1000`.
            min_phase (bool, optional): Predict a minimum-phase version of the filter.
                Defaults to `True`.

        Returns:
            :class:`collections.namedtuple`: Predictions.
        """
        if t_h is None:
            t_h = B.linspace(self.dtype, -self.extent, self.extent, 601)

        @B.jit
        def sample_h(state):
            state, u = self.approximation.p_u.sample(state)
            u = B.mm(self.K_u, u)  # Transform :math:`\hat u` into :math:`u`.
            h = GP(self.k_h())
            h = h | (h(self.t_u), u)  # Condition on sample.
            state, h = h(t_h).sample(state)  # Sample at desired points.
            return state, B.flatten(h)

        # Perform sampling.
        state = B.global_random_state(self.dtype)
        samples = []
        for _ in range(num_samples):
            state, h = sample_h(state)

            # Transform sample according to specification.
            if min_phase:
                h = transform_min_phase(h)

            samples.append(h)
        B.set_global_random_state(state)

        if min_phase:
            # Start at zero.
            t_h = t_h - t_h[0]
        return summarise_samples(t_h, B.stack(*samples, axis=0))

    @instancemethod
    @cast
    def kernel_approx(self, t1, t2, u):
        """Kernel approximation using inducing variables :math:`u` for the
        impulse response :math:`h`.

        Args:
            t1 (vector): First time input.
            t2 (vector): Second time input.
            u (vector): Values of the inducing variables.

        Returns:
            tensor: Approximation of the kernel matrix broadcasted over `t1` and `t2`.
        """
        # Construct the first part.
        part1 = self.compute_i_hx(t1[:, None], t2[None, :])

        # Construct the second part.
        L_u = B.cholesky(self.K_u)
        inv_L_u = B.trisolve(L_u, B.eye(L_u))
        prod = B.mm(inv_L_u, B.uprank(u, rank=2))
        I_ux = self.compute_I_ux(t1, t2)
        trisolved = B.mm(inv_L_u, I_ux, inv_L_u, tr_c=True)
        part2 = B.trace(trisolved) - B.trace(
            B.mm(prod, trisolved, prod, tr_a=True))

        return part1 - part2

    @priormethod
    @cast
    def sample(self, t, normalise=False):
        """Sample the kernel then the function.

        Args:
            t (vector): Time points to sample the function at.
            normalise (bool, optional): Normalise the sample of the kernel.
                Defaults to `False`.

        Returns:
            tuple: Tuple containing the kernel matrix and the function.
        """
        u = B.sample(self.compute_K_u())[:, 0]
        K = self.kernel_approx(t, t, u)
        if normalise:
            K = K / K[0, 0]
        f = B.sample(closest_psd(K))[:, 0]
        y = f + B.sqrt(self.noise) * B.randn(f)
        return K, y

    def save(self, path):
        """Save model and inference results to a file.

        Args:
            path (str): Path to save to.
        """
        data = {
            name: B.to_numpy(B.dense(self.vs[name]))
            for name in self.vs.names
        }
        with open(path, "wb") as f:
            pickle.dump(data, f)

    def load(self, path):
        """Load model from a file.

        Args:
            path (str): Path to load from.
        """
        with open(path, "rb") as f:
            data = pickle.load(f)
        for name, value in data.items():
            if name in self.vs:
                # Overwrite existing values.
                self.vs.assign(name, value)
            else:
                # Assign an invisible bounded variable: we lost the information about
                # the constraints.
                self.vs.unbounded(init=value, visible=False, name=name)