コード例 #1
0
 def test_linear_iso(self):
     # homogenous linear kernel is poorly conditioned with random data
     linear = LinearKernel(1.) + WhiteKernel()
     self.assertTrue(np.allclose(linear(self.X, self.X), 2 * self.X))
     res = np.array([[2, 1, 1], [1, 3, 2], [1, 2, 4]], dtype=float)
     self.assertTrue(np.allclose(linear(self.Z, self.Z), res))
     self._gpr_grad(linear)
コード例 #2
0
 def test_constant(self):
     const = ConstantKernel(1.)
     white = WhiteKernel()
     res = np.ones((3, 3))
     self.assertTrue(np.allclose(const(self.X, self.X), res))
     self.assertTrue(np.allclose(const(self.X, self.Z), res))
     # constant kernel cannot be tested for jacobian alone due to singularity
     self._gpr_grad(const * white)
コード例 #3
0
 def test_linear_ard(self):
     linear = WhiteKernel() + LinearKernel(np.ones((3, )),
                                           l_bounds=(np.ones(
                                               (3, )) * 1e-5, np.ones(
                                                   (3, )) * 1e5))
     self.assertTrue(np.allclose(linear(self.X, self.X), 2 * self.X))
     res = np.array([[2, 1, 1], [1, 3, 2], [1, 2, 4]], dtype=float)
     self.assertTrue(np.allclose(linear(self.Z, self.Z), res))
     self._gpr_grad(linear)
コード例 #4
0
 def test_white(self):
     white = WhiteKernel()
     res = np.eye(3)
     self.assertTrue(np.allclose(white(self.X, self.X), res))
     res = np.zeros((3, 3))
     res[0, 0] += 1.
     self.assertTrue(np.allclose(white(self.X, self.Z), res))
     # jacobian
     gpr = GaussianProcessRegressor(kernel=white)
     f = gpr._obj(self.X, self.y)
     fun = lambda x: f(x)[0]
     jac = lambda x: f(x)[1]
     for p in (np.ones((len(gpr.thetas), )), gpr.thetas.values,
               np.random.uniform(0., 1., (len(gpr.thetas), ))):
         self.assertTrue(jac(p).shape == (0, ))  # no learnable parameter
コード例 #5
0
 def test_gpr(self):
     k = self.X.shape[1]
     ker = 0.5 * RBFKernel() + \
           0.4 * RationalQuadraticKernel() + \
           0.3 * WhiteKernel()
     gpr = GaussianProcessRegressor(kernel=ker)
     # gradient
     f = gpr._obj(self.X, self.y)
     fun = lambda x: f(x)[0]
     jac = lambda x: f(x)[1]
     for p in (gpr.thetas.values, np.ones((len(gpr.thetas), ))):
         ag = jac(p)
         ng = approx_fprime(p, fun, 1e-8)
         err = np.abs(ag - ng).max()
         try:
             assert err < 1e-6
         except AssertionError:
             print('analytical gradient {}'.format(ag))
             print('numerical  gradient {}'.format(ng))
             self.fail('gpr gradient fails.')
     # fitted
     self.assertFalse(gpr.fitted())
     # fit
     try:
         gpr.fit(self.X, self.y)
     except Exception:
         self.fail('gpr fit fails.')
     # precompute
     err = np.abs(gpr.kernel(self.X, self.X) - gpr.L @ gpr.L.T).max()
     self.assertTrue(err < 1e-6)
     # point prediction
     try:
         gpr.predict(self.Z)
     except Exception:
         self.fail('gpr predict fails.')
     # distributive prediction
     try:
         gpr.posterior_predictive(self.Z)
         gpr.posterior_predictive(self.Z)
     except Exception:
         self.fail('gpr predictive fails.')
コード例 #6
0
 def test_prod(self):
     ker = RBFKernel(1.) * WhiteKernel()
     res = np.eye(3)
     self.assertTrue(np.allclose(ker(self.X, self.X), res))
コード例 #7
0
 def test_sum(self):
     ker = RBFKernel(1.) + WhiteKernel()
     res = np.ones((3, 3)) * exp(-1)
     res[np.diag_indices_from(res)] = 2.
     self.assertTrue(np.allclose(ker(self.X, self.X), res))