def test_grad_log_likelihood_pings(self): """Ping test (compare analytic result to finite difference) the log likelihood gradient wrt hyperparameters.""" numpy.random.seed(2014) h = 2.0e-4 tolerance = 5.0e-6 for num_sampled in self.num_sampled_list: self.gp_test_environment_input.num_sampled = num_sampled _, gaussian_process = self._build_gaussian_process_test_data(self.gp_test_environment_input) python_cov, historical_data = gaussian_process.get_core_data_copy() lml = GaussianProcessLogMarginalLikelihood(python_cov, historical_data) analytic_grad = lml.compute_grad_log_likelihood() for k in xrange(lml.num_hyperparameters): hyperparameters_old = lml.hyperparameters # hyperparamter + h hyperparameters_p = numpy.copy(hyperparameters_old) hyperparameters_p[k] += h lml.hyperparameters = hyperparameters_p cov_p = lml.compute_log_likelihood() lml.hyperparameters = hyperparameters_old # hyperparamter - h hyperparameters_m = numpy.copy(hyperparameters_old) hyperparameters_m[k] -= h lml.hyperparameters = hyperparameters_m cov_m = lml.compute_log_likelihood() lml.hyperparameters = hyperparameters_old # calculate finite diff fd_grad = (cov_p - cov_m) / (2.0 * h) self.assert_scalar_within_relative(fd_grad, analytic_grad[k], tolerance)
def test_evaluate_log_likelihood_at_points(self): """Check that ``evaluate_log_likelihood_at_hyperparameter_list`` computes and orders results correctly.""" num_sampled = 5 self.gp_test_environment_input.num_sampled = num_sampled _, gaussian_process = self._build_gaussian_process_test_data(self.gp_test_environment_input) python_cov, historical_data = gaussian_process.get_core_data_copy() lml = GaussianProcessLogMarginalLikelihood(python_cov, historical_data) num_to_eval = 10 domain_bounds = [self.gp_test_environment_input.hyperparameter_interval] * self.gp_test_environment_input.num_hyperparameters domain = TensorProductDomain(domain_bounds) hyperparameters_to_evaluate = domain.generate_uniform_random_points_in_domain(num_to_eval) test_values = evaluate_log_likelihood_at_hyperparameter_list(lml, hyperparameters_to_evaluate) for i, value in enumerate(test_values): lml.hyperparameters = hyperparameters_to_evaluate[i, ...] truth = lml.compute_log_likelihood() assert value == truth
import matplotlib.pyplot as plt from moe.optimal_learning.python.python_version.log_likelihood import GaussianProcessLogMarginalLikelihood print "prior mean\n{0}\nprior sig diag\n{1}".format(prior_mean, numpy.diag(prior_sig)) print "num_is {0}".format(obj_func_max.getNumIS() - 1 + separateIS0) hyperparam_search_domain = pythonTensorProductDomain( [ClosedInterval(bound[0], bound[1]) for bound in hyper_bounds]) print "hyper bounds\n{0}".format(hyper_bounds) cov = MixedSquareExponential(hyperparameters=prior_mean, total_dim=obj_func_max.getDim() + 1, num_is=obj_func_max.getNumIS() - 1 + separateIS0) gp_likelihood = GaussianProcessLogMarginalLikelihood(cov, historical_data) with PdfPages('posterior_plot_1.pdf') as pdf: for d in range(len(prior_mean)): x_list = numpy.linspace(hyper_bounds[d][0], hyper_bounds[d][1], 100) y_list = numpy.zeros(len(x_list)) x = numpy.copy(prior_mean) for i, e in enumerate(x_list): print "plot {0}, {1}th pt".format(d, i) x[d] = e gp_likelihood.set_hyperparameters(x) y_list[i] = gp_likelihood.compute_log_likelihood( ) + prior.compute_log_likelihood(x) plt.figure() plt.plot(x_list, y_list, 'r-o') plt.title(str(d)) pdf.savefig() plt.close()