Example #1
0
    def __init__(self, X, Y, kern, mean_function=Zero()):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        kern, mean_function are appropriate GPflow objects

        This is a vanilla implementation of GP regression with a Gaussian
        likelihood.  Multiple columns of Y are treated independently.
        """
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
        self.num_data = X.shape[0]
        self.num_latent = Y.shape[1]
Example #2
0
    def __init__(self, X, Y, kern, mean_function=Zero()):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x multivariate_norma is an appropriate GPflow object

        kern, mean_function are appropriate GPflow objects

        This is a vanilla implementation of a GP regression with a Gaussian
        likelihood. 
        """
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
        self.num_data = X.shape[0]
        self.num_latent = Y.shape[1]
Example #3
0
def evaluation(landmarks, print_less=True):
    landmarks = np.reshape(landmarks, [1, -1])
    landmarks = DataSet(landmarks, landmarks)

    tf.set_random_seed(FLAGS.seed)
    np.random.seed(FLAGS.seed)

    data, test = import_dataset(FLAGS.dataset, FLAGS.k_fold)

    error_rate = losses.RootMeanSqError(test.Dout)
    like = likelihoods.Gaussian()
    optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate)

    ## Main dgp object
    dgp = DgpRff(like, data.num_examples, data.X.shape[1], data.Y.shape[1],
                 FLAGS.nl, FLAGS.n_rff, FLAGS.df, FLAGS.kernel_type,
                 FLAGS.kernel_arccosine_degree, FLAGS.is_ard,
                 FLAGS.feed_forward, FLAGS.q_Omega_fixed, FLAGS.theta_fixed,
                 FLAGS.learn_Omega)

    error_result = dgp.test(landmarks, FLAGS.mc_test, error_rate, print_less)

    return error_result
Example #4
0
    return data, test


if __name__ == '__main__':
    FLAGS = utils.get_flags()

    ## Set random seed for tensorflow and numpy operations
    tf.set_random_seed(FLAGS.seed)
    np.random.seed(FLAGS.seed)

    data, test = import_dataset(FLAGS.dataset, FLAGS.fold)

    ## Here we define a custom loss for dgp to show
    error_rate = losses.RootMeanSqError(data.Dout)
    # error_rate = losses.NegLogLikelihood(data.Dout)

    ## Likelihood
    like = likelihoods.Gaussian()

    ## Optimizer
    optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate)

    ## Main dgp object
    dgp = DgpRff(like, data.num_examples, data.X.shape[1], data.Y.shape[1], FLAGS.nl, FLAGS.n_rff, FLAGS.df, FLAGS.kernel_type, 
        FLAGS.kernel_arccosine_degree, FLAGS.is_ard, FLAGS.feed_forward, FLAGS.q_Omega_fixed, FLAGS.theta_fixed, FLAGS.learn_Omega)

    # Learning
    dgp.learn(data, FLAGS.learning_rate, FLAGS.mc_train, FLAGS.batch_size, FLAGS.n_iterations, optimizer,
                 FLAGS.display_step, test, FLAGS.mc_test, error_rate, FLAGS.duration, FLAGS.less_prints, FLAGS.kernel_type, FLAGS.dataset)
Example #5
0
y_test = y[ind_test]

var_y = .1
var_f = 1.  # GP variance
len_f = 1.  # GP lengthscale
period = 1.  # period of quasi-periodic component
len_p = 5.  # lengthscale of quasi-periodic component
var_f_mat = 1.
len_f_mat = 1.

prior1 = priors.Matern32(variance=var_f_mat, lengthscale=len_f_mat)
prior2 = priors.QuasiPeriodicMatern12(variance=var_f, lengthscale_periodic=len_p,
                                      period=period, lengthscale_matern=len_f)
prior = priors.Sum([prior1, prior2])

lik = likelihoods.Gaussian(variance=var_y)

if method == 0:
    inf_method = approx_inf.EKS(damping=.1)
elif method == 1:
    inf_method = approx_inf.UKS(damping=.1)
elif method == 2:
    inf_method = approx_inf.GHKS(damping=.1)
elif method == 3:
    inf_method = approx_inf.EP(power=1, intmethod='GH', damping=.1)
elif method == 4:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=.1)
elif method == 5:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=.1)
elif method == 6:
    inf_method = approx_inf.VI(intmethod='GH', damping=.1)
Example #6
0
 def fit(self, X, Y):
     lik = llh.Gaussian(np.var(Y, 0))
     return self._fit(X, Y, lik)