import numpy as np
import tensorflow as tf

from typing import Any, List
from gpflow import kernels
from gpflow.config import default_float
from gpflow.utilities import Dispatcher

# ---- Exports
__all__ = ('bias_initializer', 'weight_initializer')

# ==============================================
#                                   initializers
# ==============================================
MaternKernel = kernels.Matern12, kernels.Matern32, kernels.Matern52
bias_initializer = Dispatcher("bias_initializer")
weight_initializer = Dispatcher("weight_initializer")


@bias_initializer.register(kernels.Stationary, int)
def _bias_initializer_fallback(kern: kernels.Stationary,
                               ndims: int,
                               *,
                               batch_shape: List = None,
                               dtype: Any = None,
                               maxval: float = 2 * np.pi) -> tf.Tensor:
    if dtype is None:
        dtype = default_float()

    shape = [ndims] if batch_shape is None else list(batch_shape) + [ndims]
    return tf.random.uniform(shape=shape, maxval=maxval, dtype=dtype)
Exemplo n.º 2
0
import numpy as np
import tensorflow as tf
from gpflow.kernels import Kernel, \
                           MultioutputKernel, \
                           SeparateIndependent, \
                           SharedIndependent, \
                           LinearCoregionalization, \
                           Matern52 as GPflowMatern52

from gpflow.inducing_variables import InducingVariables, \
                                       MultioutputInducingVariables, \
                                       SharedIndependentInducingVariables, \
                                       SeparateIndependentInducingVariables

from gpflow.utilities import Dispatcher
slice_multioutput_kernel = Dispatcher("slice_multioutput_kernel")
slice_multioutput_inducing = Dispatcher("slice_multioutput_inducing")

# ---- Exports
__all__ = (
  'Matern52',
  'slice_multioutput_kernel',
  'slice_multioutput_inducing',
)


# ==============================================
#                                  gpflow_extras
# ==============================================
class Matern52(GPflowMatern52):
  def K_r2(self, r2, eps=None):  # faster than <gpflow.kernels.Matern52.K_r2>
# ---- Imports
import tensorflow as tf

from typing import List, Callable
from gpflow import inducing_variables
from gpflow.base import TensorLike
from gpflow.utilities import Dispatcher
from gpflow.kernels import Kernel, MultioutputKernel, LinearCoregionalization
from gpflow_sampling.sampling.updates import exact as exact_update
from gpflow_sampling.sampling.core import AbstractSampler, CompositeSampler
from gpflow_sampling.kernels import Conv2d
from gpflow_sampling.inducing_variables import InducingImages

# ---- Exports
__all__ = ('decoupled', )
decoupled = Dispatcher("decoupled")


# ==============================================
#                             decoupled_samplers
# ==============================================
@decoupled.register(Kernel, AbstractSampler, TensorLike, TensorLike)
def _decoupled_fallback(kern: Kernel,
                        prior: AbstractSampler,
                        Z: TensorLike,
                        u: TensorLike,
                        *,
                        mean_function: Callable = None,
                        update_rule: Callable = exact_update,
                        join_rule: Callable = sum,
                        **kwargs):
Exemplo n.º 4
0
# ==============================================
# ---- Imports
import tensorflow as tf

from typing import Any, List, Callable
from gpflow.config import default_float
from gpflow.kernels import Kernel, MultioutputKernel
from gpflow.utilities import Dispatcher
from gpflow_sampling.bases import fourier as fourier_basis
from gpflow_sampling.sampling.core import DenseSampler, MultioutputDenseSampler
from gpflow_sampling.kernels import Conv2d, DepthwiseConv2d


# ---- Exports
__all__ = ('random_fourier',)
random_fourier = Dispatcher("random_fourier")


# ==============================================
#                                 fourier_priors
# ==============================================
@random_fourier.register(Kernel)
def _random_fourier(kernel: Kernel,
                    sample_shape: List,
                    num_bases: int,
                    basis: Callable = None,
                    dtype: Any = None,
                    name: str = None,
                    **kwargs):

  if dtype is None:
Exemplo n.º 5
0
#the following code is adapted from https://github.com/GPflow/GPflow/tree/develop/gpflow/conditionals

from gpflow.utilities import Dispatcher
conditional_train =Dispatcher("conditional_train")

import tensorflow as tf

from gpflow.covariances import Kuf, Kuu
from gpflow.inducing_variables import InducingVariables
from gpflow.kernels import Kernel
from gpflow.utilities.ops import eye
from gpflow.config import default_jitter
from gpflow.conditionals.util import base_conditional, expand_independent_outputs


@conditional_train.register(object, InducingVariables, Kernel, object)
def _conditional_train(
    Xnew: tf.Tensor,
    inducing_variable: InducingVariables,
    kernel: Kernel,
    f: tf.Tensor,
    *,
    full_cov=False,
    full_output_cov=False,
    q_sqrt=None,
    white=False,
):
    """
    Single-output GP conditional.

    The covariance matrices used to calculate the conditional have the following shape:
Exemplo n.º 6
0
import tensorflow as tf

from gpflow.base import TensorType
from gpflow.conditionals import conditional
from gpflow.config import default_float, default_jitter
from gpflow.covariances import Kuf, Kuu
from gpflow.inducing_variables import InducingVariables
from gpflow.kernels import Kernel
from gpflow.utilities import Dispatcher

from gpflux.math import compute_A_inv_b
from gpflux.sampling.kernel_with_feature_decomposition import KernelWithFeatureDecomposition
from gpflux.sampling.utils import draw_conditional_sample

efficient_sample = Dispatcher("efficient_sample")
""" A function that returns a :class:`Sample` of a GP posterior. """


class Sample(abc.ABC):
    """
    This class represents a sample from a GP that you can evaluate by using the ``__call__``
    at new locations within the support of the GP.

    Importantly, the same function draw (sample) is evaluated when calling it multiple
    times. This property is called consistency. Achieving consistency for vanilla GPs is costly
    because it scales cubically with the number of evaluation points,
    but works with any kernel. It is implemented in
    :meth:`_efficient_sample_conditional_gaussian`.
    For :class:`KernelWithFeatureDecomposition`, the more efficient approach
    following :cite:t:`wilson2020efficiently` is implemented in
Exemplo n.º 7
0
"""
# ---- Imports
import tensorflow as tf

from gpflow import inducing_variables
from gpflow.base import TensorLike
from gpflow.config import default_jitter
from gpflow.utilities import Dispatcher
from gpflow_sampling.utils import swap_axes, move_axis, inducing_to_tensor
from gpflow_sampling.bases.core import AbstractBasis
from gpflow_sampling.sampling.core import DenseSampler, MultioutputDenseSampler

# ==============================================
#                                  linear_update
# ==============================================
linear = Dispatcher("linear_updates")


@linear.register(TensorLike, TensorLike, TensorLike)
def _linear_fallback(Z: TensorLike,
                     u: TensorLike,
                     f: TensorLike,
                     *,
                     L: TensorLike = None,
                     diag: TensorLike = None,
                     basis: AbstractBasis = None,
                     **kwargs):

    u_shape = tuple(u.shape)
    f_shape = tuple(f.shape)
    assert u_shape[-1] == 1, "Recieved multiple output features"
Exemplo n.º 8
0
from gpflow import kernels, inducing_variables
from gpflow.base import TensorLike
from gpflow.config import default_jitter
from gpflow.utilities import Dispatcher
from gpflow_sampling import covariances, kernels as kernels_ext
from gpflow_sampling.utils import swap_axes, move_axis
from gpflow_sampling.bases import kernel as kernel_basis
from gpflow_sampling.bases.core import AbstractBasis
from gpflow_sampling.sampling.core import DenseSampler, MultioutputDenseSampler
from gpflow_sampling.inducing_variables import InducingImages

# ==============================================
#                                  exact_updates
# ==============================================
exact = Dispatcher("exact_updates")


@exact.register(kernels.Kernel, TensorLike, TensorLike, TensorLike)
def _exact_fallback(kern: kernels.Kernel,
                    Z: TensorLike,
                    u: TensorLike,
                    f: TensorLike,
                    *,
                    L: TensorLike = None,
                    diag: TensorLike = None,
                    basis: AbstractBasis = None,
                    **kwargs):
    """
  Return pathwise updates of a prior processes $f$ subject to the
  condition $p(f | u) = N(f | u, diag)$ on $f = f(Z)$.
from gpflow import kernels as gpflow_kernels
from gpflow.base import TensorType
from gpflow.utilities import Dispatcher
from gpflow.inducing_variables import InducingVariables
from gpflow_sampling import kernels
from gpflow_sampling.bases import fourier_bases
from gpflow_sampling.bases.core import KernelBasis


# ---- Exports
__all__ = (
  'kernel_basis',
  'fourier_basis',
)

kernel_basis = Dispatcher("kernel_basis")
fourier_basis = Dispatcher("fourier_basis")


# ==============================================
#                                       dispatch
# ==============================================
@kernel_basis.register(gpflow_kernels.Kernel)
def _kernel_fallback(kern: gpflow_kernels.Kernel,
                     centers: Union[TensorType, InducingVariables],
                     **kwargs):
  return KernelBasis(kernel=kern, centers=centers, **kwargs)


@fourier_basis.register(gpflow_kernels.Stationary)
def _fourier_stationary(kern: gpflow_kernels.Stationary, **kwargs):
Exemplo n.º 10
0
# ==============================================
#                                       Preamble
# ==============================================
# ---- Imports
import numpy as np
import tensorflow as tf
import torch
import gpflow
import gpytorch

from gpflow.utilities import Dispatcher
from typing import *

# ---- Exports
convert_kernel = Dispatcher("convert_kernel")
convert_likelihood = Dispatcher("convert_likelihood")
__all__ = (
    'convert_kernel',
    'convert_likelihood',
    'GridGPR',
    'GPyTorchSampler',
)


# ==============================================
#                                gpytorch_extras
# ==============================================
@convert_kernel.register(gpflow.kernels.SquaredExponential)
def convert_kernel_squaredExp(kernel, **kwargs):
    base_kernel = gpytorch.kernels.RBFKernel(**kwargs)
from gpflow import kernels
from gpflow.base import TensorLike
from gpflow.utilities import Dispatcher
from gpflow.inducing_variables import (InducingVariables,
                                       SharedIndependentInducingVariables)
from gpflow.covariances.dispatch import Kuf as Kuf_dispatch
from gpflow_sampling.kernels import Conv2d, DepthwiseConv2d
from gpflow_sampling.utils import move_axis, get_inducing_shape
from gpflow_sampling.inducing_variables import (InducingImages,
                                                DepthwiseInducingImages)

# ==============================================
#                                           Kfus
# ==============================================
Kfu = Dispatcher("Kfu")


@Kfu.register(InducingVariables, kernels.Kernel, TensorLike)
def _Kfu_fallback(Z, kern, X, **kwargs):
    Kuf = Kuf_dispatch(Z, kern, X, **kwargs)

    # Assume features of x and z are 1-dimensional
    ndims_x = X.shape.ndims - 1  # assume x lives in 1d space
    ndims_z = len(get_inducing_shape(Z)) - 1
    assert ndims_x + ndims_z == Kuf.shape.ndims

    # Swap the batch axes of x and z
    axes = list(range(ndims_x + ndims_z))
    perm = axes[ndims_z:ndims_z + ndims_x] + axes[:ndims_z]
    return tf.transpose(Kuf, perm)
Exemplo n.º 12
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from gpflow.utilities import Dispatcher

__all__ = ('location_scale', 'finite_fourier', 'decoupled')

location_scale = Dispatcher("location_scale")
finite_fourier = Dispatcher("finite_fourier")
decoupled = Dispatcher("decoupled")

Exemplo n.º 13
0
from gpflow_sampling import covariances, kernels as kernels_ext
from gpflow_sampling.utils import batch_tensordot
from gpflow_sampling.inducing_variables import *

SupportedBaseKernels = (kernels.Matern12, kernels.Matern32, kernels.Matern52,
                        kernels.SquaredExponential)

# ---- Export
__all__ = ('sample_joint', 'avg_spatial_inner_product', 'test_update_sparse',
           'test_update_sparse_shared', 'test_update_sparse_separate',
           'test_update_conv2d')

# ==============================================
#                                         common
# ==============================================
sample_joint = Dispatcher("sample_joint")


@sample_joint.register(kernels.Kernel, TensorLike, TensorLike)
def _sample_joint_fallback(kern,
                           X,
                           Xnew,
                           num_samples: int,
                           L: TensorLike = None,
                           diag: TensorLike = None):
    """
  Sample from the joint distribution of $f(X), g(Z)$ via a
  location-scale transform.
  """
    if diag is None:
        diag = default_jitter()