Exemplo n.º 1
0
 def test_sample_from_prior(self):
     prior = TophatPrior(lower_bound=np.exp(-10), upper_bound=np.exp(2), rng=np.random.RandomState(1))
     samples = prior.sample_from_prior(10)
     np.testing.assert_array_equal(samples >= -10, True)
     np.testing.assert_array_equal(samples <= 2, True)
     # Test that the rng is set
     self.assertAlmostEqual(samples[0], -4.995735943569112)
Exemplo n.º 2
0
class DefaultPrior(BasePrior):
    def __init__(self, n_dims: int, rng: np.random.RandomState = None):
        """
        This class is a verbatim copy of the implementation of RoBO:

        Klein, A. and Falkner, S. and Mansur, N. and Hutter, F.
        RoBO: A Flexible and Robust Bayesian Optimization Framework in Python
        In: NIPS 2017 Bayesian Optimization Workshop
        """
        if rng is None:
            self.rng = np.random.RandomState(np.random.randint(0, 10000))
        else:
            self.rng = rng

        # The number of hyperparameters
        self.n_dims = n_dims

        # Prior for the Matern52 lengthscales
        self.tophat = TophatPrior(-10, 2, rng=self.rng)

        # Prior for the covariance amplitude
        self.ln_prior = LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng)

        # Prior for the noise
        self.horseshoe = HorseshoePrior(scale=0.1, rng=self.rng)

    def lnprob(self, theta: np.ndarray):
        lp = 0
        # Covariance amplitude
        lp += self.ln_prior.lnprob(theta[0])
        # Lengthscales
        lp += self.tophat.lnprob(theta[1:-1])
        # Noise
        lp += self.horseshoe.lnprob(theta[-1])

        return lp

    def sample_from_prior(self, n_samples: int):
        p0 = np.zeros([n_samples, self.n_dims])
        # Covariance amplitude
        p0[:, 0] = self.ln_prior.sample_from_prior(n_samples)[:, 0]
        # Lengthscales
        ls_sample = np.array([
            self.tophat.sample_from_prior(n_samples)[:, 0]
            for _ in range(1, (self.n_dims - 1))
        ]).T
        p0[:, 1:(self.n_dims - 1)] = ls_sample
        # Noise
        p0[:, -1] = self.horseshoe.sample_from_prior(n_samples)[:, 0]
        return p0

    def gradient(self, theta: np.ndarray):
        # TODO: Implement real gradient here
        return np.zeros([theta.shape[0]])
Exemplo n.º 3
0
 def test_sample_from_prior_shapes(self):
     rng = np.random.RandomState(1)
     lower_bound = 2 + rng.random_sample() * 50
     upper_bound = lower_bound + rng.random_sample() * 50
     prior = TophatPrior(lower_bound=lower_bound, upper_bound=upper_bound)
     sample = prior.sample_from_prior(1)
     self.assertEqual(sample.shape, (1, ))
     sample = prior.sample_from_prior(2)
     self.assertEqual(sample.shape, (2, ))
     sample = prior.sample_from_prior(10)
     self.assertEqual(sample.shape, (10, ))
     with self.assertRaises(ValueError):
         prior.sample_from_prior(0)
     with self.assertRaises(ValueError):
         prior.sample_from_prior((2, ))
Exemplo n.º 4
0
    def __init__(self, n_dims: int, rng: np.random.RandomState = None):
        """
        This class is a verbatim copy of the implementation of RoBO:

        Klein, A. and Falkner, S. and Mansur, N. and Hutter, F.
        RoBO: A Flexible and Robust Bayesian Optimization Framework in Python
        In: NIPS 2017 Bayesian Optimization Workshop
        """
        if rng is None:
            self.rng = np.random.RandomState(np.random.randint(0, 10000))
        else:
            self.rng = rng

        # The number of hyperparameters
        self.n_dims = n_dims

        # Prior for the Matern52 lengthscales
        self.tophat = TophatPrior(-10, 2, rng=self.rng)

        # Prior for the covariance amplitude
        self.ln_prior = LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng)

        # Prior for the noise
        self.horseshoe = HorseshoePrior(scale=0.1, rng=self.rng)
Exemplo n.º 5
0
    def test_lnprob_and_grad_scalar(self):
        prior = TophatPrior(lower_bound=np.exp(-10), upper_bound=np.exp(2))

        # Legal scalar
        for val in (-1, 0, 1):
            self.assertEqual(prior.lnprob(val), 0, msg=str(val))
            self.assertEqual(prior.gradient(val), 0, msg=str(val))

        # Boundary
        for val in (-10, 2):
            self.assertEqual(prior.lnprob(val), 0)
            self.assertEqual(prior.gradient(val), 0)

        # Values outside the boundary
        for val in (-10 - VERY_SMALL_NUMBER, 2 + VERY_SMALL_NUMBER, -50, 50):
            self.assertTrue(np.isinf(prior.lnprob(val)))
            self.assertEqual(prior.gradient(val), 0)