def test_rff_feature_map_hessian_theano_execute(): if not theano_available: raise SkipTest("Theano not available.") D = 2 x = np.random.randn(D) m = 10 sigma = 1. omega, u = rff_sample_basis(D, m, sigma) for i in range(m): rff_feature_map_comp_hessian_theano(x, omega[:, i], u[i])
def hessian(self, x): """ Computes the Hessian of the learned log-density function. WARNING: This implementation slow, so don't call repeatedly. """ assert_array_shape(x, ndim=1, dims={0: self.D}) H = np.zeros((self.D, self.D)) for i, theta_i in enumerate(self.theta): H += theta_i * rff_feature_map_comp_hessian_theano(x, self.omega[:, i], self.u[i]) # RFF is a monte carlo average, so have to normalise by np.sqrt(m) here return H / np.sqrt(self.m)
def hessian(self, x): """ Computes the Hessian of the learned log-density function. WARNING: This implementation slow, so don't call repeatedly. """ assert_array_shape(x, ndim=1, dims={0: self.D}) H = np.zeros((self.D, self.D)) for i, theta_i in enumerate(self.theta): H += theta_i * rff_feature_map_comp_hessian_theano( x, self.omega[:, i], self.u[i]) # RFF is a monte carlo average, so have to normalise by np.sqrt(m) here return H / np.sqrt(self.m)