def test_likelihood_hessian(self): for corr_str in [ "abs_exp", "squar_exp", "act_exp", "matern32", "matern52", ]: # For every kernel for poly_str in ["constant", "linear", "quadratic"]: # For every method if corr_str == "act_exp": kr = MGP(print_global=False) theta = self.random.rand(4) else: kr = KRG(print_global=False) theta = self.theta kr.options["poly"] = poly_str kr.options["corr"] = corr_str kr.set_training_values(self.X, self.y) kr.train() grad_red, dpar = kr._reduced_likelihood_gradient(theta) hess, hess_ij, _ = kr._reduced_likelihood_hessian(theta) Hess = np.zeros((theta.shape[0], theta.shape[0])) Hess[hess_ij[:, 0], hess_ij[:, 1]] = hess[:, 0] Hess[hess_ij[:, 1], hess_ij[:, 0]] = hess[:, 0] grad_norm_all = [] diff_norm_all = [] ind_theta = [] for j, omega_j in enumerate(theta): eps_omega = theta.copy() eps_omega[j] += self.eps grad_red_eps, _ = kr._reduced_likelihood_gradient( eps_omega) for i, theta_i in enumerate(theta): hess_eps = (grad_red_eps[i] - grad_red[i]) / self.eps grad_norm_all.append( np.linalg.norm(Hess[i, j]) / np.linalg.norm(Hess)) diff_norm_all.append( np.linalg.norm(hess_eps) / np.linalg.norm(Hess)) ind_theta.append(r"$x_%d,x_%d$" % (j, i)) self.assert_error( np.array(grad_norm_all), np.array(diff_norm_all), atol=1e-5, rtol=1e-3, ) # from utils/smt_test_case.py
def test_likelihood_derivatives(self): for corr_str in [ "abs_exp", "squar_exp", "act_exp", "matern32", "matern52", ]: # For every kernel for poly_str in ["constant", "linear", "quadratic"]: # For every method if corr_str == "act_exp": kr = MGP(print_global=False) theta = self.random.rand(4) else: kr = KRG(print_global=False) theta = self.theta kr.options["poly"] = poly_str kr.options["corr"] = corr_str kr.set_training_values(self.X, self.y) kr.train() grad_red, dpar = kr._reduced_likelihood_gradient(theta) red, par = kr._reduced_likelihood_function(theta) grad_norm_all = [] diff_norm_all = [] ind_theta = [] for i, theta_i in enumerate(theta): eps_theta = theta.copy() eps_theta[i] = eps_theta[i] + self.eps red_dk, par_dk = kr._reduced_likelihood_function(eps_theta) dred_dk = (red_dk - red) / self.eps grad_norm_all.append(grad_red[i]) diff_norm_all.append(float(dred_dk)) ind_theta.append(r"$x_%d$" % i) grad_norm_all = np.atleast_2d(grad_norm_all) diff_norm_all = np.atleast_2d(diff_norm_all).T self.assert_error(grad_norm_all, diff_norm_all, atol=1e-5, rtol=1e-3) # from utils/smt_test_case.py