コード例 #1
0
    def logpdf(self, x):
        """Compute the log-pdf of some data.

        Args:
            x (column vector): Data to compute log-pdf of.

        Returns:
            scalar: Log-pdf of `x`.
        """
        diff = B.subtract(x, self.mean)
        return -0.5 * (-B.logdet(self.prec) + B.cast(self.dtype, self.dim) *
                       B.cast(self.dtype, B.log_2_pi) +
                       B.sum(B.mm(self.prec, diff) * diff))
コード例 #2
0
ファイル: test_minimise.py プロジェクト: wesselb/varz
def test_minimise_disconnected_gradient(minimise_method):
    dtype, minimise, kw_args = minimise_method
    vs = Vars(dtype=dtype)
    vs.ubnd(name="x")

    # Check that optimiser runs for objective that returns the constant zero.
    minimise(lambda v: B.cast(v.dtype, 0), vs, **kw_args)
コード例 #3
0
def integral_abcd(a, b, c, d):
    """Compute the a-b-c-d integral from the paper.

    Args:
        a (tensor): First upper integration bound.
        b (tensor): Second upper integration bound.
        c (tensor): Decay for sum.
        d (tensor): Decay for absolute difference.

    Returns:
        tensor: Value of the integral.
    """
    # Compute the conditional and signs.
    sign = B.sign(a)
    condition = a * b >= 0

    # Compute the two parts.
    part1 = sign * d / c * (1 - B.exp(2 * c * sign * B.minimum(B.abs(a), B.abs(b))))
    part2 = (
        1
        - B.exp(c * a - d * B.abs(a))
        - B.exp(c * b - d * B.abs(b))
        + B.exp(c * (a + b) - d * B.abs(a - b))
    )

    # Combine and return.
    condition = B.cast(B.dtype(part1), condition)
    return (condition * part1 + part2) / (c**2 - d**2)
コード例 #4
0
def sum(a: Zero, axis=None):
    if axis is None:
        return B.cast(a.dtype, 0)
    elif axis == 0:
        return B.zeros(a.dtype, a.cols)
    elif axis == 1:
        return B.zeros(a.dtype, a.rows)
    else:
        _raise(axis)
コード例 #5
0
def safe_sqrt(x):
    """Perform a square root that is safe to use in AD.

    Args:
        x (tensor): Tensor to take square root of.

    Returns:
        tensor: Square root of `x`.
    """
    return B.sqrt(B.maximum(x, B.cast(B.dtype(x), 1e-30)))
コード例 #6
0
    def k_h(self):
        """Get the kernel function of the filter.

        Returns:
            :class:`mlkernels.Kernel`: Kernel for :math:`h`.
        """
        k_h = Exp().stretch(1 / self.lam)  # Kernel of filter before window
        k_h *= lambda t: B.exp(-self.alpha * B.abs(t))  # Window
        k_h *= lambda t: B.cast(self.dtype, t >= 0)  # Causality constraint
        return k_h
コード例 #7
0
def compute_K_z(model):
    """Covariance matrix :math:`K_z` of :math:`z_m` for :math:`m=0,\\ldots,2M`.

    Args:
        model (:class:`.gprv.GPRV`): Model.

    Returns:
        matrix: :math:`K_z`.
    """
    # Compute harmonic frequencies.
    m = model.ms - B.cast(model.dtype, model.ms > model.m_max) * model.m_max
    omega = 2 * B.pi * m / (model.b - model.a)

    # Compute the parameters of the kernel matrix.
    lam_t = 1
    alpha = 0.5 * (model.b - model.a) / psd_matern_12(omega, model.lam, lam_t)
    alpha = alpha + alpha * B.cast(model.dtype, model.ms == 0)
    beta = 1 / (lam_t**0.5) * B.cast(model.dtype, model.ms <= model.m_max)

    return Diagonal(alpha) + LowRank(left=beta[:, None])
コード例 #8
0
    def k_h(self):
        """Get the kernel function of the filter.

        Returns:
            :class:`mlkernels.Kernel`: Kernel for :math:`h`.
        """
        # Convert `self.gamma` to a regular length scale.
        gamma_scale = B.sqrt(1 / (2 * self.gamma))
        k_h = EQ().stretch(gamma_scale)  # Kernel of filter before window
        k_h *= lambda t: B.exp(-self.alpha * t**2)  # Window
        if self.causal:
            k_h *= lambda t: B.cast(self.dtype, t >= 0)  # Causality constraint
        return k_h
コード例 #9
0
ファイル: test_custom.py プロジェクト: wesselb/lab
def test_bvn_cdf(check_lazy_shapes):
    check_sensitivity(bvn_cdf, s_bvn_cdf, (B.rand(3), B.rand(3), B.rand(3)))
    check_grad(bvn_cdf, (B.rand(3), B.rand(3), B.rand(3)))

    # Check that function runs on both `float32`s and `float64`s.
    a, b, c = B.rand(3), B.rand(3), B.rand(3)
    approx(
        B.bvn_cdf(a, b, c),
        B.bvn_cdf(B.cast(np.float32, a), B.cast(np.float32, b),
                  B.cast(np.float32, c)),
    )

    # Check that, in JAX, the function check the shape of the inputs.
    with pytest.raises(ValueError):
        B.bvn_cdf(B.rand(jnp.float32, 2), B.rand(jnp.float32, 3),
                  B.rand(jnp.float32, 3))
    with pytest.raises(ValueError):
        B.bvn_cdf(B.rand(jnp.float32, 3), B.rand(jnp.float32, 2),
                  B.rand(jnp.float32, 3))
    with pytest.raises(ValueError):
        B.bvn_cdf(B.rand(jnp.float32, 3), B.rand(jnp.float32, 3),
                  B.rand(jnp.float32, 2))
コード例 #10
0
    def f_wrapped(x):
        x = B.cast(vs.dtype, x)

        # Compute objective function value and gradient.
        try:
            obj_value, grad = f_value_and_grad(x)
        except Exception as e:
            return exception(x, e)

        # Perform requested conversion.
        obj_value, grad = _convert(obj_value, grad)

        f_evals.append(obj_value)
        return obj_value, grad
コード例 #11
0
    def kl(self, other: "NaturalNormal"):
        """Compute the Kullback-Leibler divergence with respect to another normal
        parametrised by its natural parameters.

        Args:
            other (:class:`.NaturalNormal`): Other.

        Returns:
            scalar: KL divergence with respect to `other`.
        """
        ratio = B.solve(B.chol(self.prec), B.chol(other.prec))
        diff = self.mean - other.mean
        return 0.5 * (B.sum(ratio**2) - B.logdet(B.mm(
            ratio, ratio, tr_a=True)) + B.sum(B.mm(other.prec, diff) * diff) -
                      B.cast(self.dtype, self.dim))
コード例 #12
0
ファイル: test_random.py プロジェクト: wesselb/lab
def test_choice(x, p, check_lazy_shapes):
    state = B.create_random_state(B.dtype(x))

    # Make `p` a dictionary so that we can optionally give it.
    if p is None:
        p = {}
    else:
        # Cast weights to the right framework.
        p = {"p": B.cast(B.dtype(x), p)}

    # Check shape.
    assert B.shape(B.choice(x, **p)) == B.shape(x)[1:]
    assert B.shape(B.choice(x, 5, **p)) == (5, ) + B.shape(x)[1:]
    assert B.shape(B.choice(x, 5, 5, **p)) == (5, 5) + B.shape(x)[1:]

    assert isinstance(B.choice(state, x, **p)[0], B.RandomState)
    assert B.shape(B.choice(state, x, **p)[1]) == B.shape(x)[1:]
    assert B.shape(B.choice(state, x, 5, **p)[1]) == (5, ) + B.shape(x)[1:]
    assert B.shape(B.choice(state, x, 5, 5, **p)[1]) == (5, 5) + B.shape(x)[1:]

    # Check correctness.
    dtype = B.dtype(x)
    choices = set(to_np(B.choice(B.range(dtype, 5), 1000)))
    assert choices == set(to_np(B.range(dtype, 5)))
コード例 #13
0
def test_normal_cast(normal1):
    assert B.dtype(normal1) == np.float64
    assert B.dtype(B.cast(np.float32, normal1)) == np.float32
コード例 #14
0
ファイル: minimise.py プロジェクト: wesselb/varz
 def _convert(*xs):
     return [B.cast(np.float64, B.to_numpy(x)) for x in xs]
コード例 #15
0
ファイル: test_generic.py プロジェクト: wesselb/lab
def test_cast(check_lazy_shapes):
    # Test casting to a given data type.
    assert B.dtype(B.cast(np.float64, 1)) is np.float64
    assert B.dtype(B.cast(np.float64, np.array(1))) is np.float64
    assert B.dtype(B.cast(np.float64, autograd_box(
        np.float32(1)))) is np.float64

    assert B.dtype(B.cast(tf.float64, 1)) is tf.float64
    assert B.dtype(B.cast(tf.float64, np.array(1))) is tf.float64
    assert B.dtype(B.cast(tf.float64, tf.constant(1))) is tf.float64

    assert B.dtype(B.cast(torch.float64, 1)) is torch.float64
    assert B.dtype(B.cast(torch.float64, np.array(1))) is torch.float64
    assert B.dtype(B.cast(torch.float64, torch.tensor(1))) is torch.float64

    assert B.dtype(B.cast(jnp.float64, 1)) is jnp.float64
    assert B.dtype(B.cast(jnp.float64, np.array(1))) is jnp.float64
    assert B.dtype(B.cast(jnp.float64, jnp.array(1))) is jnp.float64
コード例 #16
0
ファイル: test_generic.py プロジェクト: wesselb/lab
def test_cast_own_dtype(x, check_lazy_shapes):
    # Test that casting to its own data type does nothing.
    assert x is B.cast(B.dtype(x), x)
コード例 #17
0
ファイル: vars.py プロジェクト: gergely-flamich/varz
    def _get_var(self,
                 transform,
                 inverse_transform,
                 init,
                 generate_init,
                 shape,
                 dtype,
                 name):
        # If the name already exists, return that variable.
        try:
            return self[name]
        except KeyError:
            pass

        # A new variable will be added. Clear lookup cache.
        self._get_vars_cache.clear()

        # Resolve data type.
        dtype = self.dtype if dtype is None else dtype

        # If no source is provided, get the latent from from the provided
        # initialiser.
        if self.source is None:
            # Resolve initialisation and inverse transform.
            if init is None:
                init = generate_init(shape=shape, dtype=dtype)
            else:
                init = B.cast(dtype, init)

            # Construct optimisable variable.
            latent = inverse_transform(init)
            if isinstance(self.dtype, B.TFDType):
                latent = tf.Variable(latent)
            elif isinstance(self.dtype, B.TorchDType):
                pass  # All is good in this case.
            else:
                # Must be a NumPy data type.
                assert isinstance(self.dtype, B.NPDType)
                latent = np.array(latent)
        else:
            # Get the latent variable from the source.
            length = reduce(mul, shape, 1)
            latent_flat = \
                self.source[self.source_index:self.source_index + length]
            self.source_index += length

            # Cast to the right data type.
            latent = B.cast(dtype, B.reshape(latent_flat, *shape))

        # Store transforms.
        self.vars.append(latent)
        self.transforms.append(transform)
        self.inverse_transforms.append(inverse_transform)

        # Get index of the variable.
        index = len(self.vars) - 1

        # Store name if given.
        if name is not None:
            self.name_to_index[name] = index

        # Generate the variable and return.
        return transform(latent)
コード例 #18
0
ファイル: test_generic.py プロジェクト: wesselb/lab
def test_cast_shape_element(dtype, check_lazy_shapes):
    assert B.dtype(B.cast(dtype, B.shape(B.ones(dtype, 1))[0])) is dtype
コード例 #19
0
ファイル: benchmark.py プロジェクト: wesselb/lab
from time import time

import autograd.numpy as np

import lab as B

n = 20
m = 1

t = np.float64
eps = B.cast(t, B.epsilon)


def f1(x):
    dists2 = (x - B.transpose(x))**2
    K = B.exp(-0.5 * dists2)
    K = K + B.epsilon * B.eye(t, n)
    L = B.cholesky(K)
    return B.matmul(L, B.ones(t, n, m))


def f2(x):
    dists2 = (x - np.transpose(x))**2
    K = np.exp(-0.5 * dists2)
    K = K + B.epsilon * np.eye(n, dtype=t)
    L = np.linalg.cholesky(K)
    return np.matmul(L, np.ones((n, m)))


# Perform computation once.
x = np.linspace(0, 1, n, dtype=t)[:, None]
コード例 #20
0
    def _get_var(
        self,
        transform,
        inverse_transform,
        init,
        generate_init,
        shape,
        shape_latent,
        dtype,
        name,
    ):
        # If the name already exists, return that variable.
        try:
            return self[name]
        except KeyError:
            pass

        # A new variable will be added. Clear lookup cache.
        self._get_latent_vars_cache.clear()

        # Resolve data type.
        dtype = self._resolve_dtype(dtype)

        # If no source is provided, get the latent from from the provided
        # initialiser.
        if self.source is None:
            # Resolve initialisation.
            if init is None:
                init = generate_init(shape=shape, dtype=dtype)
            else:
                init = B.cast(dtype, init)

            # Ensure that the initialisation is on the right device.
            init = B.to_active_device(init)

            # Allow broadcasting in the initialisation.
            if shape is not None:
                init = init * B.ones(B.dtype(init), *shape)

            # Double check the shape of the initialisation.
            if shape is not None and Shape(*shape) != Shape(*B.shape(init)):
                raise ValueError(
                    f"Shape of initial value {B.shape(init)} is not equal to the "
                    f"desired shape {shape}.")

            # Construct optimisable variable.
            latent = inverse_transform(init)
            if isinstance(self.dtype, B.TFDType):
                latent = tf.Variable(latent)
            elif isinstance(self.dtype, B.TorchDType):
                pass  # All is good in this case.
            elif isinstance(self.dtype, B.JAXDType):
                latent = jnp.array(latent)
            else:
                # Must be a NumPy data type.
                assert isinstance(self.dtype, B.NPDType)
                latent = np.array(latent)
        else:
            # Get the latent variable from the source.
            length = reduce(mul, shape_latent, 1)
            latent_flat = self.source[self.source_index:self.source_index +
                                      length]
            self.source_index += length

            # Cast to the right data type.
            latent = B.cast(dtype, B.reshape(latent_flat, *shape_latent))

        # Store transforms.
        self.vars.append(latent)
        self.transforms.append(transform)
        self.inverse_transforms.append(inverse_transform)

        # Get index of the variable.
        index = len(self.vars) - 1

        # Store name if given.
        if name is not None:
            self.name_to_index[name] = index

        # Generate the variable and return.
        return transform(latent)