Example #1
0
    def __init__(self, input_dim, output_dim, active_dims=None):
        Kernel.__init__(self, active_dims)
        self.input_dim = input_dim
        self.output_dim = output_dim

        noise = np.random.random((output_dim))
        self.noise = gpflow.Parameter(noise,
                                      transform=gpflow.utilities.positive(),
                                      name="noise")
Example #2
0
 def __init__(self, base_kern, branchPtTensor, b, fDebug=False):
     ''' branchPtTensor is tensor of branch points of size F X F X B where F the number of
     functions and B the number of branching points '''
     Kernel.__init__(self, input_dim=base_kern.input_dim + 1)
     self.kern = base_kern
     self.fm = branchPtTensor
     self.fDebug = fDebug
     assert isinstance(b, np.ndarray)
     assert self.fm.shape[0] == self.fm.shape[1]
     assert self.fm.shape[2] > 0
     self.Bv = DataHolder(b)
def Kuf_kernel_GPinducingvariables(inducing_variable: InducingVariables,
                                   kernel: Kernel, X: tf.Tensor):
    GP_IV = inducing_variable.GP_IV

    Kuf = kernel.K(GP_IV, X)

    return Kuf
Example #4
0
 def __init__(self,
              input_dim,
              variance=1.0,
              frequency=np.array([1.0, 1.0]),
              lengthscale=1.0,
              correlation=0.0,
              max_freq=1.0,
              active_dims=None):
     assert (input_dim == 1
             )  # the derivations are valid only for one dimensional input
     Kernel.__init__(self, input_dim=input_dim, active_dims=active_dims)
     self.variance = Param(variance, transforms.positive)
     self.frequency = Param(frequency, transforms.Logistic(0.0, max_freq))
     self.lengthscale = Param(lengthscale, transforms.positive)
     correlation = np.clip(correlation, 1e-4,
                           1 - 1e-4)  # clip for numerical reasons
     self.correlation = Param(correlation, transforms.Logistic())
def Kuu_kernel_GPinducingvariables(inducing_variable: InducingVariables,
                                   kernel: Kernel,
                                   jitter=0.0):
    GP_IV = inducing_variable.GP_IV

    Kuu = kernel.K(GP_IV)
    Kuu += jitter * tf.eye(tf.shape(Kuu)[0], dtype=Kuu.dtype)

    return Kuu
Example #6
0
    def __init__(self,
                 base_kern,
                 len_seqs,
                 len_windows,
                 num_features,
                 normalized=True):
        Kernel.__init__(self, len_seqs * num_features)
        self.len_seqs = len_seqs
        self.len_windows = len_windows
        self.base_kern = base_kern
        self.num_features = num_features

        self.normalized = normalized

        self.variance = Parameter(1.0,
                                  transform=transforms.positive,
                                  dtype=settings.float_type)

        if self.base_kern.input_dim != len_windows * num_features:
            raise ValueError(
                "Base_kern input dimensions must be consistent with window length."
            )
Example #7
0
def _conditional_train(
    Xnew: tf.Tensor,
    inducing_variable: InducingVariables,
    kernel: Kernel,
    f: tf.Tensor,
    *,
    full_cov=False,
    full_output_cov=False,
    q_sqrt=None,
    white=False,
):
    """
    Single-output GP conditional.

    The covariance matrices used to calculate the conditional have the following shape:
    - Kuu: [M, M]
    - Kuf: [M, N]
    - Kff: [N, N]

    Further reference
    -----------------
    - See `gpflow.conditionals._conditional` (below) for a detailed explanation of
      conditional in the single-output case.
    - See the multiouput notebook for more information about the multiouput framework.

    Parameters
    ----------
    :param Xnew: data matrix, size [N, D].
    :param f: data matrix, [M, R]
    :param full_cov: return the covariance between the datapoints
    :param full_output_cov: return the covariance between the outputs.
           NOTE: as we are using a single-output kernel with repetitions
                 these covariances will be zero.
    :param q_sqrt: matrix of standard-deviations or Cholesky matrices,
        size [M, R] or [R, M, M].
    :param white: boolean of whether to use the whitened representation
    :return:
        - mean:     [N, R]
        - variance: [N, R], [R, N, N], [N, R, R] or [N, R, N, R]
        Please see `gpflow.conditional._expand_independent_outputs` for more information
        about the shape of the variance, depending on `full_cov` and `full_output_cov`.
    """
    Kmm = Kuu(inducing_variable, kernel, jitter=default_jitter())  # [M, M]
    Kmn = Kuf(inducing_variable, kernel, Xnew)  # [M, N]
    Knn = kernel.diag_tr() #uses optimzied function to calculate the covariances
    fmean, fvar = base_conditional(
        Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white
    )  # [N, R],  [R, N, N] or [N, R]
    return fmean, expand_independent_outputs(fvar, full_cov, full_output_cov)
Example #8
0
 def __init__(self, base_kern):
     Kernel.__init__(self, input_dim=base_kern.input_dim + 1)
     self.kern = base_kern
Example #9
0
 def __init__(self, input_dim, output_dim, active_dims=None, name=None):
     Kernel.__init__(self, active_dims, name)
     self.input_dim = input_dim
     self.output_dim = output_dim
Example #10
0
def independent_multisample_sample_conditional(Xnew: tf.Tensor, feat: InducingPoints, kern: Kernel, f: tf.Tensor, *,
                                               full_cov=False, full_output_cov=False, q_sqrt=None, white=False):
    """
    Multisample, single-output GP conditional.

    NB if full_cov=False is required, this functionality can be achieved by reshaping Xnew to SN x D
    nd using conditional. The purpose of this function is to compute full covariances in batch over S samples.

    The covariance matrices used to calculate the conditional have the following shape:
    - Kuu: M x M
    - Kuf: S x M x N
    - Kff: S x N or S x N x N
    ----------
    :param Xnew: data matrix, size S x N x D.
    :param f: data matrix, M x R
    :param full_cov: return the covariance between the datapoints
    :param full_output_cov: return the covariance between the outputs. Must be False
    :param q_sqrt: matrix of standard-deviations or Cholesky matrices,
        size M x R or R x M x M.
    :param white: boolean of whether to use the whitened representation
    :return:
        - mean:     S x N x R
        - variance: S x N x R, S x R x N x N
    """
    if full_output_cov:
        raise NotImplementedError

    Kmm = Kuu(feat, kern, jitter=settings.numerics.jitter_level)  # M x M

    S, N, D = tf.shape(Xnew)[0], tf.shape(Xnew)[1], tf.shape(Xnew)[2]
    M = tf.shape(Kmm)[0]

    Kmn_M_SN = Kuf(feat, kern, tf.reshape(Xnew, [S * N, D]))  # M x SN
    Knn = kern.K(Xnew) if full_cov else kern.Kdiag(Xnew)  # S x N or S x N x N

    num_func = tf.shape(f)[1]  # (=R)
    Lm = tf.cholesky(Kmm)  # M x M

    # Compute the projection matrix A
    A_M_SN = tf.matrix_triangular_solve(Lm, Kmn_M_SN, lower=True)
    A = tf.transpose(tf.reshape(A_M_SN, [M, S, N]), [1, 0, 2])  # S x M x N

    # compute the covariance due to the conditioning
    if full_cov:
        fvar = Knn - tf.matmul(A, A, transpose_a=True)  # S x N x N
        fvar = tf.tile(fvar[:, None, :, :], [1, num_func, 1, 1])  # S x R x N x N
    else:
        fvar = Knn - tf.reduce_sum(tf.square(A), -2)  # S x N
        fvar = tf.tile(fvar[:, None, :], [1, num_func, 1])  # S x R x N

    # another backsubstitution in the unwhitened case
    if not white:
        A_M_SN = tf.matrix_triangular_solve(tf.transpose(Lm), A_M_SN, lower=False)
        A = tf.transpose(tf.reshape(A_M_SN, [M, S, N]), [1, 0, 2])  # S x M x N

    # construct the conditional mean
    fmean = tf.matmul(A, tf.tile(f[None, :, :], [S, 1, 1]), transpose_a=True)  # S x N x R
    # fmean = tf.einsum('snm,nr->smr', A, f)  # S x N x R

    if q_sqrt is not None:
        if q_sqrt.get_shape().ndims == 2:
            LTA = A[:, None, :, :] * tf.transpose(q_sqrt)[None, :, :, None]  # S x R x M x N
        elif q_sqrt.get_shape().ndims == 3:
            # L = tf.tile(tf.matrix_band_part(q_sqrt, -1, 0)[None, :, :, :], [S, 1, 1, 1])  # S x R x M x M
            # A_tiled = tf.tile(tf.expand_dims(A, 1), tf.stack([1, num_func, 1, 1]))  # S x R x M x N
            # LTA = tf.matmul(L, A_tiled, transpose_a=True)  # S x R x M x N
            
            LTA = tf.einsum('rMm,sMn->srmn', tf.matrix_band_part(q_sqrt, -1, 0), A)
        else:  # pragma: no cover
            raise ValueError("Bad dimension for q_sqrt: %s" %
                             str(q_sqrt.get_shape().ndims))
        if full_cov:
            fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True)  # S x R x N x N
        else:
            fvar = fvar + tf.reduce_sum(tf.square(LTA), 2)  # S x R x N


    if not full_cov:
        z = tf.random_normal(tf.shape(fmean), dtype=settings.float_type)
        fvar = tf.matrix_transpose(fvar)  # S x N x R
        sample = fmean + z * fvar**0.5
    else:
        fmean_SRN1 = tf.transpose(fmean, [0, 2, 1])[:, :, :, None]
        z = tf.random_normal(tf.shape(fmean_SRN1), dtype=settings.float_type)
        sample_SRN1 = fmean + tf.matmul(tf.cholesky(fvar), z)
        sample = tf.transpose(sample_SRN1[:, :, :, 0], [0, 2, 1])

    return sample, fmean, fvar  # fmean is S x N x R, fvar is S x R x N x N or S x N x R