def evaluation(landmarks, print_less=True): landmarks = np.reshape(landmarks, [1, -1]) landmarks = DataSet(landmarks, landmarks) tf.set_random_seed(FLAGS.seed) np.random.seed(FLAGS.seed) data, test = import_dataset(FLAGS.dataset, FLAGS.k_fold) error_rate = losses.RootMeanSqError(test.Dout) like = likelihoods.Gaussian() optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate) ## Main dgp object dgp = DgpRff(like, data.num_examples, data.X.shape[1], data.Y.shape[1], FLAGS.nl, FLAGS.n_rff, FLAGS.df, FLAGS.kernel_type, FLAGS.kernel_arccosine_degree, FLAGS.is_ard, FLAGS.feed_forward, FLAGS.q_Omega_fixed, FLAGS.theta_fixed, FLAGS.learn_Omega) error_result = dgp.test(landmarks, FLAGS.mc_test, error_rate, print_less) return error_result
test = DataSet(test_X, test_Y) return data, test if __name__ == '__main__': FLAGS = utils.get_flags() ## Set random seed for tensorflow and numpy operations tf.set_random_seed(FLAGS.seed) np.random.seed(FLAGS.seed) data, test = import_dataset(FLAGS.dataset, FLAGS.fold) ## Here we define a custom loss for dgp to show error_rate = losses.RootMeanSqError(data.Dout) # error_rate = losses.NegLogLikelihood(data.Dout) ## Likelihood like = likelihoods.Gaussian() ## Optimizer optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate) ## Main dgp object dgp = DgpRff(like, data.num_examples, data.X.shape[1], data.Y.shape[1], FLAGS.nl, FLAGS.n_rff, FLAGS.df, FLAGS.kernel_type, FLAGS.kernel_arccosine_degree, FLAGS.is_ard, FLAGS.feed_forward, FLAGS.q_Omega_fixed, FLAGS.theta_fixed, FLAGS.learn_Omega) # Learning dgp.learn(data, FLAGS.learning_rate, FLAGS.mc_train, FLAGS.batch_size, FLAGS.n_iterations, optimizer, FLAGS.display_step, test, FLAGS.mc_test, error_rate, FLAGS.duration, FLAGS.less_prints, FLAGS.kernel_type, FLAGS.dataset)