Esempio n. 1
0
    def test_lnprob_and_grad_scalar(self):
        prior = HorseshoePrior(scale=1, rng=np.random.RandomState(1))

        # Legal scalar
        self.assertEqual(prior.lnprob(-1), 1.1450937952919953)
        self.assertEqual(prior.gradient(-1), -0.6089187456211098)

        # Boundary
        self.assertTrue(np.isinf(prior._lnprob(0)))
        self.assertTrue(np.isinf(prior._gradient(0)))
Esempio n. 2
0
 def test_gradient(self):
     for scale in (0.1, 0.5, 1., 2.):
         prior = HorseshoePrior(scale=scale, rng=np.random.RandomState(1))
         # The function appears to be unstable above 15
         for theta in range(-20, 15):
             if theta == 0:
                 continue
             error = scipy.optimize.check_grad(
                 lambda x: prior.lnprob(x[0]),
                 lambda x: prior.gradient(x[0]),
                 np.array([theta]),
                 epsilon=1e-5,
             )
             self.assertAlmostEqual(error, 0, delta=5)