Beispiel #1
0
def pruneConvInput(tensorNP: tf.Tensor, rindexes: list = None) -> tf.Tensor:
    #Prune input channels of everything
    if not rindexes is None:
        c = tensorNP[:, :, rindexes, :].copy()
    else:
        c = tensorNP.copy()
    return c
Beispiel #2
0
    def __init__(self,
                 data: tf.Tensor,
                 x_data_mean: tf.Tensor,
                 x_data_var: tf.Tensor,
                 kernel: Kernel,
                 num_inducing_variables: Optional[int] = None,
                 inducing_variable=None,
                 x_prior_mean=None,
                 x_prior_var=None):
        """
        Initialise Bayesian GPLVM object. This method only works with a Gaussian likelihood.

        :param data: data matrix, size N (number of points) x D (dimensions)
        :param x_data_mean: initial latent positions, size N (number of points) x Q (latent dimensions).
        :param x_data_var: variance of latent positions ([N, Q]), for the initialisation of the latent space.
        :param kernel: kernel specification, by default Squared Exponential
        :param num_inducing_variables: number of inducing points, M
        :param inducing_variable: matrix of inducing points, size M (inducing points) x Q (latent dimensions). By default
            random permutation of x_data_mean.
        :param x_prior_mean: prior mean used in KL term of bound. By default 0. Same size as x_data_mean.
        :param x_prior_var: pripor variance used in KL term of bound. By default 1.
        """
        super().__init__(kernel, likelihoods.Gaussian())
        self.data = data
        assert x_data_var.ndim == 2

        self.x_data_mean = Parameter(x_data_mean)
        self.x_data_var = Parameter(x_data_var, transform=positive())

        self.num_data, self.num_latent = x_data_mean.shape
        self.output_dim = data.shape[-1]

        assert np.all(x_data_mean.shape == x_data_var.shape)
        assert x_data_mean.shape[0] == data.shape[0], 'X mean and Y must be same size.'
        assert x_data_var.shape[0] == data.shape[0], 'X var and Y must be same size.'

        if (inducing_variable is None) == (num_inducing_variables is None):
            raise ValueError("BayesianGPLVM needs exactly one of `inducing_variable` and `num_inducing_variables`")

        if inducing_variable is None:
            # By default we initialize by subset of initial latent points
            inducing_variable = np.random.permutation(x_data_mean.copy())[:num_inducing_variables]

        self.inducing_variable = inducingpoint_wrapper(inducing_variable)

        assert x_data_mean.shape[1] == self.num_latent

        # deal with parameters for the prior mean variance of X
        if x_prior_mean is None:
            x_prior_mean = tf.zeros((self.num_data, self.num_latent), dtype=default_float())
        if x_prior_var is None:
            x_prior_var = tf.ones((self.num_data, self.num_latent))

        self.x_prior_mean = tf.convert_to_tensor(np.atleast_1d(x_prior_mean), dtype=default_float())
        self.x_prior_var = tf.convert_to_tensor(np.atleast_1d(x_prior_var), dtype=default_float())

        assert self.x_prior_mean.shape[0] == self.num_data
        assert self.x_prior_mean.shape[1] == self.num_latent
        assert self.x_prior_var.shape[0] == self.num_data
        assert self.x_prior_var.shape[1] == self.num_latent