def test_types_of_iterables():
    assert _types_of_iterable([1]) == Type(int)
    assert _types_of_iterable(['1']) == Type(str)
    assert _types_of_iterable([1, '1']) == Union(int, str)
    assert _types_of_iterable((1, )) == Type(int)
    assert _types_of_iterable(('1', )) == Type(str)
    assert _types_of_iterable((1, '1')) == Union(int, str)
def test_tupletype():
    # Standard type tests.
    assert hash(Tuple(int)) == hash(Tuple(int))
    assert hash(Tuple(int)) != hash(Tuple(str))
    assert hash(Tuple(Tuple(int))) == hash(Tuple(Tuple(int)))
    assert hash(Tuple(Tuple(int))) != hash(Tuple(Tuple(str)))
    assert repr(Tuple(int)) == 'TupleType({})'.format(repr(Type(int)))
    assert issubclass(Tuple(int).get_types()[0], tuple)
    assert not issubclass(Tuple(int).get_types()[0], int)
    assert not issubclass(Tuple(int).get_types()[0], list)

    # Test instance check.
    assert isinstance((), Tuple(Union()))
    assert isinstance((1, 2), Tuple(Union(int)))

    # Check tracking of parametric.
    assert Tuple(int).parametric
    assert as_type([Tuple(int)]).parametric
    assert as_type({Tuple(int)}).parametric
    promise = PromisedType()
    promise.deliver(Tuple(int))
    assert promise.resolve().parametric

    # Test correctness.
    dispatch = Dispatcher()

    @dispatch(object)
    def f(x):
        return 'fallback'

    @dispatch(tuple)
    def f(x):
        return 'tup'

    @dispatch(Tuple(int))
    def f(x):
        return 'tup of int'

    @dispatch(Tuple(Tuple(int)))
    def f(x):
        return 'tup of tup of int'

    assert f((1, )) == 'tup of int'
    assert f(1) == 'fallback'
    assert f((1, 2)) == 'tup of int'
    assert f((1, 2, '3')) == 'tup'
    assert f(((1, ), )) == 'tup of tup of int'
    assert f(((1, ), (1, ))) == 'tup of tup of int'
    assert f(((1, ), (1, 2))) == 'tup of tup of int'
    assert f(((1, ), (1, 2, '3'))) == 'tup'
def test_listtype():
    # Standard type tests.
    assert hash(List(int)) == hash(List(int))
    assert hash(List(int)) != hash(List(str))
    assert hash(List(List(int))) == hash(List(List(int)))
    assert hash(List(List(int))) != hash(List(List(str)))
    assert repr(List(int)) == 'ListType({})'.format(repr(Type(int)))
    assert issubclass(List(int).get_types()[0], list)
    assert not issubclass(List(int).get_types()[0], int)
    assert not issubclass(List(int).get_types()[0], tuple)

    # Test instance check.
    assert isinstance([], List(Union()))
    assert isinstance([1, 2], List(Union(int)))

    # Check tracking of parametric.
    assert List(int).parametric
    assert as_type([List(int)]).parametric
    assert as_type({List(int)}).parametric
    promise = PromisedType()
    promise.deliver(List(int))
    assert promise.resolve().parametric

    # Test correctness.
    dispatch = Dispatcher()

    @dispatch(object)
    def f(x):
        return 'fallback'

    @dispatch(list)
    def f(x):
        return 'list'

    @dispatch(List(int))
    def f(x):
        return 'list of int'

    @dispatch(List(List(int)))
    def f(x):
        return 'list of list of int'

    assert f([1]) == 'list of int'
    assert f(1) == 'fallback'
    assert f([1, 2]) == 'list of int'
    assert f([1, 2, '3']) == 'list'
    assert f([[1]]) == 'list of list of int'
    assert f([[1], [1]]) == 'list of list of int'
    assert f([[1], [1, 2]]) == 'list of list of int'
    assert f([[1], [1, 2, '3']]) == 'list'
Exemple #4
0
class AbstractObservations(metaclass=Referentiable):
    """Abstract base class for observations."""

    _dispatch = Dispatcher(in_class=Self)

    @_dispatch({B.Numeric, Input}, B.Numeric, [PromisedGP])
    def __init__(self, x, y, ref=None):
        self._ref = ref
        self.x = ensure_at(x, self._ref)
        self.y = y
        self.graph = type_parameter(self.x).graph

    @_dispatch([Union(tuple, list, PromisedGP)])
    def __init__(self, *pairs, **kw_args):
        # Check whether there's a reference.
        self._ref = kw_args['ref'] if 'ref' in kw_args else None

        # Ensure `At` for all pairs.
        pairs = [(ensure_at(x, self._ref), y) for x, y in pairs]

        # Get the graph from the first pair.
        self.graph = type_parameter(pairs[0][0]).graph

        # Extend the graph by the Cartesian product `p` of all processes.
        p = self.graph.cross(*self.graph.ps)

        # Condition on the newly created vector-valued GP.
        xs, ys = zip(*pairs)
        self.x = p(MultiInput(*xs))
        self.y = B.concat(*[uprank(y) for y in ys], axis=0)

    @_dispatch({tuple, list})
    def __ror__(self, ps):
        return self.graph.condition(ps, self)

    def posterior_kernel(self, p_i, p_j):  # pragma: no cover
        """Get the posterior kernel between two processes.

        Args:
            p_i (:class:`.graph.GP`): First process.
            p_j (:class:`.graph.GP`): Second process.

        Returns:
            :class:`.kernel.Kernel`: Posterior kernel between the first and
                second process.
        """
        raise NotImplementedError('Posterior kernel construction not '
                                  'implemented.')

    def posterior_mean(self, p):  # pragma: no cover
        """Get the posterior kernel of a process.

        Args:
            p (:class:`.graph.GP`): Process.

        Returns:
            :class:`.mean.Mean`: Posterior mean of `p`.
        """
        raise NotImplementedError('Posterior mean construction not '
                                  'implemented.')
Exemple #5
0
class Mean(algebra.Function):
    """Mean function.

    Means can be added and multiplied.
    """

    _dispatch = Dispatcher(in_class=Self)

    @_dispatch(object)
    def __call__(self, x):
        """Construct the mean for a design matrix.

        Args:
            x (input): Points to construct the mean at.

        Returns:
            tensor: Mean vector as a rank 2 column vector.
        """
        raise RuntimeError(
            f'For mean {self}, could not resolve argument "{x}".')

    @_dispatch(Union(Input, FDD))
    def __call__(self, x):
        return self(unwrap(x))

    @_dispatch(MultiInput)
    def __call__(self, x):
        return B.concat(*[self(xi) for xi in x.get()], axis=0)
def test_astype():
    # Need `ok` here: printing will resolve `Self`.
    assert isinstance(as_type(Self), Self)
    assert isinstance(as_type([]), VarArgs)
    assert isinstance(as_type([int]), VarArgs)
    with pytest.raises(TypeError):
        as_type([int, str])
    assert as_type({int, str}) == Union(int, str)
    assert as_type(Type(int)) == Type(int)
    assert as_type(int) == Type(int)
    with pytest.raises(RuntimeError):
        as_type(1)
def test_union():
    assert hash(Union(int, str)) == hash(Union(str, int))
    assert repr(Union(int, str)) == repr(Union(int, str))
    assert set(Union(int, str).get_types()) == {str, int}
    assert not Union(int).parametric

    # Test equivalence between `Union` and `Type`.
    assert hash(Union(int)) == hash(Type(int))
    assert hash(Union(int, str)) != hash(Type(int))
    assert repr(Union(int)) == repr(Type(int))
    assert repr(Union(int, str)) != repr(Type(int))

    # Test lazy conversion to set.
    t = Union(int, int, str)
    assert isinstance(t._types, tuple)
    t.get_types()
    assert isinstance(t._types, set)

    # Test aliases.
    assert repr(Union(int, alias='MyUnion')) == 'tests.test_type.MyUnion'
    assert repr(Union(int, str, alias='MyUnion')) == 'tests.test_type.MyUnion'
def test_comparabletype():
    assert isinstance(1, Union(int))
    assert not isinstance('1', Union(int))
    assert isinstance('1', Union(int, str))
    assert issubclass(Union(int), Union(int))
    assert issubclass(Union(int), Union(int, str))
    assert not issubclass(Union(int, str), Union(int))
    assert Union(int).mro() == int.mro()
    with pytest.raises(RuntimeError):
        Union(int, str).mro()
Exemple #9
0
class SparseObservations(AbstractObservations):
    """Observations through inducing points.

    Takes further arguments according to the constructor of
    :class:`.measure.Observations`.

    Args:
        u (:class:`.measure.FDD`): Inducing points
        e (:class:`.measure.GP`): Additive, independent noise process.
    """

    _dispatch = Dispatcher(in_class=Self)

    @_dispatch(Union(tuple, PromisedFDD), [tuple])
    def __init__(self, u, *pairs):
        es, fdds, ys = zip(*pairs)

        # Copy the noises to a measure under which they are independent.
        measure = Measure()
        e = cross(*[GP(e.mean, e.kernel, measure=measure) for e in es])

        fdd = cross(*[fdd.p for fdd in fdds])(MultiInput(*fdds))
        y = B.concat(*[uprank(y) for y in ys], axis=0)
        SparseObservations.__init__(self, u, e, fdd, y)

    @_dispatch(tuple, PromisedGP, PromisedFDD, B.Numeric)
    def __init__(self, us, e, fdd, y):
        u = cross(*[u.p for u in us])(MultiInput(*us))
        SparseObservations.__init__(self, u, e, fdd, y)

    @_dispatch(PromisedFDD, PromisedGP, PromisedFDD, B.Numeric)
    def __init__(self, u, e, fdd, y):
        AbstractObservations.__init__(self, fdd, y)
        self.u = u
        self.e = e
        self._K_z_store = {}
        self._elbo_store = {}
        self._mu_store = {}
        self._A_store = {}

    def K_z(self, measure):
        """Kernel matrix of the data.

        Args:
            measure (:class:`.measure.Measure`): Measure.

        Returns:
            matrix: Kernel matrix.
        """
        try:
            return self._K_z_store[id(measure)]
        except KeyError:
            self._compute(measure)
            return self._K_z_store[id(measure)]

    def elbo(self, measure):
        """ELBO.

        Args:
            measure (:class:`.measure.Measure`): Measure.

        Returns:
            scalar: ELBO.
        """
        try:
            return self._elbo_store[id(measure)]
        except KeyError:
            self._compute(measure)
            return self._elbo_store[id(measure)]

    def mu(self, measure):
        """Mean of optimal approximating distribution.

        Args:
            measure (:class:`.measure.Measure`): Measure.

        Returns:
            matrix: Mean.
        """
        try:
            return self._mu_store[id(measure)]
        except KeyError:
            self._compute(measure)
            return self._mu_store[id(measure)]

    def A(self, measure):
        """Parameter of the corrective variance of the kernel of the optimal
        approximating distribution.

        Args:
            measure (:class:`.measure.Measure`): Measure.

        Returns:
            matrix: Corrective variance.
        """
        try:
            return self._A_store[id(measure)]
        except KeyError:
            self._compute(measure)
            return self._A_store[id(measure)]

    def _compute(self, measure):
        # Extract processes and inputs.
        p_x, x = self.fdd.p, self.fdd.x
        p_z, z = self.u.p, self.u.x

        # Construct the necessary kernel matrices.
        K_zx = measure.kernels[p_z, p_x](z, x)
        K_z = convert(measure.kernels[p_z](z), AbstractMatrix)
        self._K_z_store[id(measure)] = K_z

        # Evaluating `e.kernel(x)` will yield incorrect results if `x` is a
        # `MultiInput`, because `x` then still designates the particular components
        # of `f`. Fix that by instead designating the elements of `e`.
        if isinstance(x, MultiInput):
            x_n = MultiInput(*(e(fdd.x)
                               for e, fdd in zip(self.e.kernel.ps, x.get())))
        else:
            x_n = x

        # Construct the noise kernel matrix.
        K_n = self.e.kernel(x_n)

        # The approximation can only handle diagonal noise matrices.
        if not isinstance(K_n, Diagonal):
            raise RuntimeError("Kernel matrix of noise must be diagonal.")

        # And construct the components for the inducing point approximation.
        L_z = B.cholesky(K_z)
        A = B.add(B.eye(K_z), B.iqf(K_n, B.transpose(B.solve(L_z, K_zx))))
        self._A_store[id(measure)] = A
        y_bar = uprank(self.y) - self.e.mean(x_n) - measure.means[p_x](x)
        prod_y_bar = B.solve(L_z, B.iqf(K_n, B.transpose(K_zx), y_bar))

        # Compute the optimal mean.
        mu = B.add(
            measure.means[p_z](z),
            B.iqf(A, B.solve(L_z, K_z), prod_y_bar),
        )
        self._mu_store[id(measure)] = mu

        # Compute the ELBO.
        # NOTE: The calculation of `trace_part` asserts that `K_n` is diagonal.
        # The rest, however, is completely generic.
        trace_part = B.ratio(
            Diagonal(measure.kernels[p_x].elwise(x)[:, 0]) -
            Diagonal(B.iqf_diag(K_z, K_zx)),
            K_n,
        )
        det_part = B.logdet(2 * B.pi * K_n) + B.logdet(A)
        iqf_part = B.iqf(K_n, y_bar)[0, 0] - B.iqf(A, prod_y_bar)[0, 0]
        self._elbo_store[id(measure)] = -0.5 * (trace_part + det_part +
                                                iqf_part)

    def posterior_kernel(self, measure, p_i, p_j):
        return PosteriorKernel(
            measure.kernels[p_i, p_j],
            measure.kernels[self.u.p, p_i],
            measure.kernels[self.u.p, p_j],
            self.u.x,
            self.K_z(measure),
        ) + CorrectiveKernel(
            measure.kernels[self.u.p, p_i],
            measure.kernels[self.u.p, p_j],
            self.u.x,
            self.A(measure),
            self.K_z(measure),
        )

    def posterior_mean(self, measure, p):
        return PosteriorMean(
            measure.means[p],
            measure.means[self.u.p],
            measure.kernels[self.u.p, p],
            self.u.x,
            self.K_z(measure),
            self.mu(measure),
        )
Exemple #10
0
class MultiOutputKernel(Kernel):
    """A generic multi-output kernel.

    Args:
        measure (:class:`.measure.Measure`): Measure to take the kernels from.
        *ps (:class:`.measure.GP`): Processes that make up the multi-valued process.

    Attributes:
        measure (:class:`.measure.Measure`): Measure to take the kernels from.
        ps (tuple[:class:`.measure.GP`]): Processes that make up the multi-valued
            process.
    """

    _dispatch = Dispatcher(in_class=Self)

    def __init__(self, measure, *ps):
        self.measure = measure
        self.ps = ps

    # No `FDD` nor `MultiInput`.

    @_dispatch({B.Numeric, Input}, {B.Numeric, Input})
    def __call__(self, x, y):
        return self(
            MultiInput(*(p(x) for p in self.ps)), MultiInput(*(p(y) for p in self.ps))
        )

    # One `FDD`.

    @_dispatch(FDD, {B.Numeric, Input})
    def __call__(self, x, y):
        return self(MultiInput(x), MultiInput(*(p(y) for p in self.ps)))

    @_dispatch({B.Numeric, Input}, FDD)
    def __call__(self, x, y):
        return self(MultiInput(*(p(x) for p in self.ps)), MultiInput(y))

    # Two `FDD`s.

    @_dispatch(FDD, FDD)
    def __call__(self, x, y):
        return self.measure.kernels[x.p, y.p](x.x, y.x)

    # One `MultiInput`.

    @_dispatch(MultiInput, FDD)
    def __call__(self, x, y):
        return self(x, MultiInput(y))

    @_dispatch(MultiInput, {B.Numeric, Input})
    def __call__(self, x, y):
        return self(x, MultiInput(*(p(y) for p in self.ps)))

    @_dispatch(FDD, MultiInput)
    def __call__(self, x, y):
        return self(MultiInput(x), y)

    @_dispatch({B.Numeric, Input}, MultiInput)
    def __call__(self, x, y):
        return self(MultiInput(*(p(x) for p in self.ps)), y)

    # Two `MultiInput`s.

    @_dispatch(MultiInput, MultiInput)
    def __call__(self, x, y):
        return B.block(*[[self(xi, yi) for yi in y.get()] for xi in x.get()])

    # No `FDD` nor `MultiInput`.

    @_dispatch({B.Numeric, Input}, {B.Numeric, Input})
    def elwise(self, x, y):
        return self.elwise(
            MultiInput(*(p(x) for p in self.ps)), MultiInput(*(p(y) for p in self.ps))
        )

    # One `FDD`.

    @_dispatch(FDD, {B.Numeric, Input})
    def elwise(self, x, y):
        raise ValueError(
            "Unclear combination of arguments given to MultiOutputKernel.elwise."
        )

    @_dispatch({B.Numeric, Input}, FDD)
    def elwise(self, x, y):
        raise ValueError(
            "Unclear combination of arguments given to " "MultiOutputKernel.elwise."
        )

    # Two `FDD`s.

    @_dispatch(FDD, FDD)
    def elwise(self, x, y):
        return self.measure.kernels[x.p, y.p].elwise(x.x, y.x)

    # One `MultiInput`.

    @_dispatch(MultiInput, Union(B.Numeric, Input, FDD), precedence=1)
    def elwise(self, x, y):
        raise ValueError(
            "Unclear combination of arguments given to MultiOutputKernel.elwise."
        )

    @_dispatch(Union(B.Numeric, Input, FDD), MultiInput, precedence=1)
    def elwise(self, x, y):
        raise ValueError(
            "Unclear combination of arguments given to MultiOutputKernel.elwise."
        )

    # Two `MultiInput`s.

    @_dispatch(MultiInput, MultiInput)
    def elwise(self, x, y):
        if len(x.get()) != len(y.get()):
            raise ValueError(
                "MultiOutputKernel.elwise must be called with similarly sized "
                "MultiInputs."
            )
        return B.concat(
            *[self.elwise(xi, yi) for xi, yi in zip(x.get(), y.get())], axis=0
        )

    def render(self, formatter):
        ks = [str(self.measure.kernels[p]) for p in self.ps]
        return "MultiOutputKernel({})".format(", ".join(ks))
Exemple #11
0
class SparseObservations(AbstractObservations):
    """Observations through inducing points. Takes further arguments
    according to the constructor of :class:`.graph.Observations`.

    Attributes:
        elbo (scalar): ELBO.

    Args:
        z (input): Locations of the inducing points.
        e (:class:`.graph.GP`): Additive, independent noise process.
    """

    _dispatch = Dispatcher(in_class=Self)

    @_dispatch({B.Numeric, Input, tuple, list},
               [Union(tuple, list, PromisedGP)])
    def __init__(self, z, *pairs, **kw_args):
        es, xs, ys = zip(*pairs)
        AbstractObservations.__init__(self, *zip(xs, ys), **kw_args)
        SparseObservations.__init__(self,
                                    z,
                                    self.graph.cross(*es),
                                    self.x,
                                    self.y,
                                    **kw_args)

    @_dispatch({list, tuple},
               PromisedGP,
               {B.Numeric, Input},
               B.Numeric,
               [PromisedGP])
    def __init__(self, zs, e, x, y, ref=None):
        # Ensure `At` everywhere.
        zs = [ensure_at(z, ref=ref) for z in zs]

        # Extract graph.
        graph = type_parameter(zs[0]).graph

        # Create a representative multi-output process.
        p_z = graph.cross(*(type_parameter(z) for z in zs))

        SparseObservations.__init__(self,
                                    p_z(MultiInput(*zs)),
                                    e, x, y, ref=ref)

    @_dispatch({B.Numeric, Input},
               PromisedGP,
               {B.Numeric, Input},
               B.Numeric,
               [PromisedGP])
    def __init__(self, z, e, x, y, ref=None):
        AbstractObservations.__init__(self, x, y, ref=ref)
        self.z = ensure_at(z, self._ref)
        self.e = e

        self._K_z = None
        self._elbo = None
        self._mu = None
        self._A = None

    @property
    def K_z(self):
        """Kernel matrix of the data."""
        if self._K_z is None:  # Cache computation.
            self._compute()
        return self._K_z

    @property
    def elbo(self):
        """ELBO."""
        if self._elbo is None:  # Cache computation.
            self._compute()
        return self._elbo

    @property
    def mu(self):
        """Mean of optimal approximating distribution."""
        if self._mu is None:  # Cache computation.
            self._compute()
        return self._mu

    @property
    def A(self):
        """Parameter of the corrective variance of the kernel of the optimal
        approximating distribution."""
        if self._A is None:  # Cache computation.
            self._compute()
        return self._A

    def _compute(self):
        # Extract processes.
        p_x, x = type_parameter(self.x), self.x.get()
        p_z, z = type_parameter(self.z), self.z.get()

        # Construct the necessary kernel matrices.
        K_zx = self.graph.kernels[p_z, p_x](z, x)
        self._K_z = matrix(self.graph.kernels[p_z](z))

        # Evaluating `e.kernel(x)` will yield incorrect results if `x` is a
        # `MultiInput`, because `x` then still designates the particular
        # components of `f`. Fix that by instead designating the elements of
        # `e`.
        if isinstance(x, MultiInput):
            x_n = MultiInput(*(p(xi.get())
                               for p, xi in zip(self.e.kernel.ps, x.get())))
        else:
            x_n = x

        # Construct the noise kernel matrix.
        K_n = self.e.kernel(x_n)

        # The approximation can only handle diagonal noise matrices.
        if not isinstance(K_n, Diagonal):
            raise RuntimeError('Kernel matrix of noise must be diagonal.')

        # And construct the components for the inducing point approximation.
        L_z = B.cholesky(self._K_z)
        self._A = B.eye(self._K_z) + \
                  B.qf(K_n, B.transpose(B.trisolve(L_z, K_zx)))
        y_bar = uprank(self.y) - self.e.mean(x_n) - self.graph.means[p_x](x)
        prod_y_bar = B.trisolve(L_z, B.qf(K_n, B.transpose(K_zx), y_bar))

        # Compute the optimal mean.
        self._mu = self.graph.means[p_z](z) + \
                   B.qf(self._A, B.trisolve(L_z, self._K_z), prod_y_bar)

        # Compute the ELBO.
        # NOTE: The calculation of `trace_part` asserts that `K_n` is diagonal.
        #       The rest, however, is completely generic.
        trace_part = B.ratio(Diagonal(self.graph.kernels[p_x].elwise(x)[:, 0]) -
                             Diagonal(B.qf_diag(self._K_z, K_zx)), K_n)
        det_part = B.logdet(2 * B.pi * K_n) + B.logdet(self._A)
        qf_part = B.qf(K_n, y_bar)[0, 0] - B.qf(self._A, prod_y_bar)[0, 0]
        self._elbo = -0.5 * (trace_part + det_part + qf_part)

    def posterior_kernel(self, p_i, p_j):
        p_z, z = type_parameter(self.z), self.z.get()
        return PosteriorKernel(self.graph.kernels[p_i, p_j],
                               self.graph.kernels[p_z, p_i],
                               self.graph.kernels[p_z, p_j],
                               z, self.K_z) + \
               CorrectiveKernel(self.graph.kernels[p_z, p_i],
                                self.graph.kernels[p_z, p_j],
                                z, self.A, self.K_z)

    def posterior_mean(self, p):
        p_z, z = type_parameter(self.z), self.z.get()
        return PosteriorMean(self.graph.means[p],
                             self.graph.means[p_z],
                             self.graph.kernels[p_z, p],
                             z, self.K_z, self.mu)
Exemple #12
0
class Observations(Referentiable):
    """Observations.

    Can alternatively construct an instance of `Observations` with tuples or
    lists of valid constructors.

    Args:
        x (input): Locations of points to condition on.
        y (tensor): Observations to condition on.
        ref (:class:`.class.GP`, optional): Reference process. See
            :func:`.graph.ensure_at`.
    """

    _dispatch = Dispatcher(in_class=Self)

    @_dispatch({B.Numeric, Input}, B.Numeric, [PromisedGP])
    def __init__(self, x, y, ref=None):
        self._ref = ref
        self.x = ensure_at(x, self._ref)
        self.y = y
        self.graph = type_parameter(self.x).graph
        self._K_x = None

    @_dispatch([Union(tuple, list, PromisedGP)])
    def __init__(self, *pairs, **kw_args):
        # Check whether there's a reference.
        self._ref = kw_args['ref'] if 'ref' in kw_args else None

        # Ensure `At` for all pairs.
        pairs = [(ensure_at(x, self._ref), y) for x, y in pairs]

        # Get the graph from the first pair.
        self.graph = type_parameter(pairs[0][0]).graph

        # Extend the graph by the Cartesian product `p` of all processes.
        p = self.graph.cross(*self.graph.ps)

        # Condition on the newly created vector-valued GP.
        xs, ys = zip(*pairs)
        self.x = p(MultiInput(*xs))
        self.y = B.concat([uprank(y) for y in ys], axis=0)

        self._K_x = None

    @property
    def K_x(self):
        """Kernel matrix of the data."""
        # Cache computation of the kernel matrix.
        if self._K_x is None:
            p_x, x = type_parameter(self.x), self.x.get()
            self._K_x = matrix(self.graph.kernels[p_x](x))
        return self._K_x

    def posterior_kernel(self, p_i, p_j):
        """Get the posterior kernel between two processes.

        Args:
            p_i (:class:`.graph.GP`): First process.
            p_j (:class:`.graph.GP`): Second process.

        Returns:
            :class:`.kernel.Kernel`: Posterior kernel between the first and
                second process.
        """
        p_x, x = type_parameter(self.x), self.x.get()
        return PosteriorKernel(self.graph.kernels[p_i, p_j],
                               self.graph.kernels[p_x, p_i],
                               self.graph.kernels[p_x, p_j], x, self.K_x)

    def posterior_mean(self, p):
        """Get the posterior kernel of a process.

        Args:
            p (:class:`.graph.GP`): Process.

        Returns:
            :class:`.mean.Mean`: Posterior mean of `p`.
        """
        p_x, x = type_parameter(self.x), self.x.get()
        return PosteriorMean(self.graph.means[p], self.graph.means[p_x],
                             self.graph.kernels[p_x, p], x, self.K_x, self.y)

    @_dispatch({tuple, list})
    def __ror__(self, ps):
        return self.graph.condition(ps, self)
Exemple #13
0
    class B(A):
        _dispatch = Dispatcher(in_class=Self)

        @_dispatch(Union(int, Self, str), return_type=Union(int, Self))
        def do(self, x):
            return x
Exemple #14
0
class Kernel(algebra.Function):
    """Kernel function.

    Kernels can be added and multiplied.
    """

    _dispatch = Dispatcher(in_class=Self)

    @_dispatch(object, object)
    def __call__(self, x, y):
        """Construct the kernel matrix between all `x` and `y`.

        Args:
            x (input): First argument.
            y (input, optional): Second argument. Defaults to first
                argument.

        Returns:
            matrix: Kernel matrix.
        """
        raise RuntimeError(
            f'For kernel "{self}", could not resolve arguments "{x}" and "{y}".'
        )

    @_dispatch(object)
    def __call__(self, x):
        return self(x, x)

    @_dispatch(Union(Input, FDD), Union(Input, FDD))
    def __call__(self, x, y):
        return self(unwrap(x), unwrap(y))

    @_dispatch(Union(Input, FDD), object)
    def __call__(self, x, y):
        return self(unwrap(x), y)

    @_dispatch(object, Union(Input, FDD))
    def __call__(self, x, y):
        return self(x, unwrap(y))

    @_dispatch(MultiInput, object, precedence=1)
    def __call__(self, x, y):
        return self(x, MultiInput(y))

    @_dispatch(object, MultiInput, precedence=1)
    def __call__(self, x, y):
        return self(MultiInput(x), y)

    @_dispatch(MultiInput, MultiInput)
    def __call__(self, x, y):
        return B.block(*[[self(xi, yi) for yi in y.get()] for xi in x.get()])

    @_dispatch(object, object)
    def elwise(self, x, y):
        """Construct the kernel vector `x` and `y` element-wise.

        Args:
            x (input): First argument.
            y (input, optional): Second argument. Defaults to first
                argument.

        Returns:
            tensor: Kernel vector as a rank 2 column vector.
        """
        # TODO: Throw warning.
        return B.expand_dims(B.diag(self(x, y)), axis=1)

    @_dispatch(object)
    def elwise(self, x):
        return self.elwise(x, x)

    @_dispatch(Union(Input, FDD), Union(Input, FDD))
    def elwise(self, x, y):
        return self.elwise(unwrap(x), unwrap(y))

    @_dispatch(Union(Input, FDD), object)
    def elwise(self, x, y):
        return self.elwise(unwrap(x), y)

    @_dispatch(object, Union(Input, FDD))
    def elwise(self, x, y):
        return self.elwise(x, unwrap(y))

    @_dispatch(MultiInput, object, precedence=1)
    def elwise(self, x, y):
        raise ValueError(
            "Unclear combination of arguments given to Kernel.elwise.")

    @_dispatch(object, MultiInput, precedence=1)
    def elwise(self, x, y):
        raise ValueError(
            "Unclear combination of arguments given to Kernel.elwise.")

    @_dispatch(MultiInput, MultiInput)
    def elwise(self, x, y):
        if len(x.get()) != len(y.get()):
            raise ValueError(
                "Kernel.elwise must be called with similarly sized MultiInputs."
            )
        return B.concat(
            *[self.elwise(xi, yi) for xi, yi in zip(x.get(), y.get())], axis=0)

    def periodic(self, period=1):
        """Map to a periodic space.

        Args:
            period (tensor, optional): Period. Defaults to `1`.

        Returns:
            :class:`.kernel.Kernel`: Periodic version of the kernel.
        """
        return periodicise(self, period)

    @property
    def stationary(self):
        """Stationarity of the kernel."""
        try:
            return self._stationary_cache
        except AttributeError:
            self._stationary_cache = self._stationary
            return self._stationary_cache

    @property
    def _stationary(self):
        return False
Exemple #15
0
_torch_randomstate = ModuleType("torch", "Generator")
_torch_retrievables = [_torch_tensor, _torch_dtype, _torch_device, _torch_randomstate]

# Define AutoGrad module types.
_ag_tensor = ModuleType("autograd.tracer", "Box")
_ag_retrievables = [_ag_tensor]

# Define JAX module types.
_jax_tensor = ModuleType("jax.interpreters.xla", "DeviceArray")
_jax_tracer = ModuleType("jax.core", "Tracer")
_jax_dtype = ModuleType("jax._src.numpy.lax_numpy", "_ScalarMeta")
_jax_device = ModuleType("jaxlib.xla_extension", "Device")
_jax_retrievables = [_jax_tensor, _jax_tracer, _jax_dtype, _jax_device]

# Numeric types:
Int = Union(*([int, Dimension] + np.sctypes["int"] + np.sctypes["uint"]), alias="Int")
Float = Union(*([float] + np.sctypes["float"]), alias="Float")
Complex = Union(*([complex] + np.sctypes["complex"]), alias="Complex")
Bool = Union(bool, np.bool_, alias="Bool")
Number = Union(Int, Bool, Float, Complex, alias="Number")
NPNumeric = Union(np.ndarray, alias="NPNumeric")
AGNumeric = Union(_ag_tensor, alias="AGNumeric")
TFNumeric = Union(_tf_tensor, _tf_variable, _tf_indexedslices, alias="TFNumeric")
TorchNumeric = Union(_torch_tensor, alias="TorchNumeric")
JAXNumeric = Union(_jax_tensor, _jax_tracer, alias="JAXNumeric")
Numeric = Union(
    Number, NPNumeric, AGNumeric, TFNumeric, JAXNumeric, TorchNumeric, alias="Numeric"
)

# Define corresponding promotion rules and conversion methods.
add_promotion_rule(NPNumeric, TFNumeric, TFNumeric)