예제 #1
0
 def test_scalar_normal_prior_log_transform(self):
     prior = NormalPrior(0, 1, log_transform=True)
     self.assertTrue(prior.log_transform)
     self.assertAlmostEqual(prior.log_prob(prior.loc.new([0.0])).item(),
                            math.log(1 / math.sqrt(2 * math.pi) *
                                     math.exp(-0.5)),
                            places=5)
예제 #2
0
    def __init__(self, hypers=None):
        super(LogRBFMean, self).__init__()
        if hypers is not None:
            self.register_parameter(
                name="constant",
                parameter=torch.nn.Parameter(hypers[-5] +
                                             softplus(hypers[-3]).log()))
            self.register_parameter(name="lengthscale",
                                    parameter=torch.nn.Parameter(hypers[-4]))
        else:
            self.register_parameter(name="constant",
                                    parameter=torch.nn.Parameter(
                                        0. * torch.ones(1)))
            self.register_parameter(name="lengthscale",
                                    parameter=torch.nn.Parameter(
                                        -0.3 * torch.ones(1)))

        # register prior
        self.register_prior(name='constant_prior',
                            prior=NormalPrior(torch.zeros(1),
                                              100. * torch.ones(1),
                                              transform=None),
                            param_or_closure='constant')
        self.register_prior(name='lengthscale_prior',
                            prior=NormalPrior(
                                torch.zeros(1),
                                100. * torch.ones(1),
                                transform=torch.nn.functional.softplus),
                            param_or_closure='lengthscale')
예제 #3
0
def extract_prior(cfg):

    if cfg.lengthscales.which == "box":
        lengthscale_prior = SmoothedBoxPrior(cfg.lengthscales.prior_box.lb,
                                             cfg.lengthscales.prior_box.ub,
                                             sigma=0.001)
    elif cfg.lengthscales.which == "gamma":
        lengthscale_prior = GammaPrior(
            concentration=cfg.lengthscales.prior_gamma.concentration,
            rate=cfg.lengthscales.prior_gamma.rate)
    elif cfg.lengthscales.which == "gaussian":
        lengthscale_prior = NormalPrior(
            loc=cfg.lengthscales.prior_gaussian.loc,
            scale=cfg.lengthscales.prior_gaussian.scale)
    else:
        lengthscale_prior = None
        print("Using no prior for the lengthscale")

    if cfg.outputscale.which == "box":
        outputscale_prior = SmoothedBoxPrior(cfg.outputscale.prior_box.lb,
                                             cfg.outputscale.prior_box.ub,
                                             sigma=0.001)
    elif cfg.outputscale.which == "gamma":
        outputscale_prior = GammaPrior(
            concentration=cfg.outputscale.prior_gamma.concentration,
            rate=cfg.outputscale.prior_gamma.rate)
    elif cfg.outputscale.which == "gaussian":
        outputscale_prior = NormalPrior(
            loc=cfg.outputscale.prior_gaussian.loc,
            scale=cfg.outputscale.prior_gaussian.scale)
    else:
        outputscale_prior = None
        print("Using no prior for the outputscale")

    return lengthscale_prior, outputscale_prior
예제 #4
0
def extract_prior(cfg_node: dict, which_type: str):

	assert which_type in ["obj","cons"]

	if cfg_node["lengthscale_prior_type"] == "box": # par1: low, par2: high
		lengthscale_prior = SmoothedBoxPrior(cfg_node["lengthscale_prior_par1_{0:s}".format(which_type)],
																													cfg_node["lengthscale_prior_par2_{0:s}".format(which_type)], sigma=0.001)
	elif cfg_node["lengthscale_prior_type"] == "gamma": # par1: alpha (concentration), par2: beta (rate)
		lengthscale_prior = GammaPrior(	concentration=cfg_node["lengthscale_prior_par1_{0:s}".format(which_type)], 
																										rate=cfg_node["lengthscale_prior_par2_{0:s}".format(which_type)])
	elif cfg_node["lengthscale_prior_type"] == "gaussian":
		lengthscale_prior = NormalPrior(loc=cfg_node["lengthscale_prior_par1_{0:s}".format(which_type)], 	
																										scale=cfg_node["lengthscale_prior_par2_{0:s}".format(which_type)])
	else:
		lengthscale_prior = None
		print("Using no prior for the length scale")

	if cfg_node["outputscale_prior_type"] == "box": # par1: low, par2: high
		outputscale_prior = SmoothedBoxPrior(cfg_node["outputscale_prior_par1_{0:s}".format(which_type)],
																													cfg_node["outputscale_prior_par2_{0:s}".format(which_type)], sigma=0.001)
	elif cfg_node["outputscale_prior_type"] == "gamma": # par1: alpha (concentration), par2: beta (rate)
		outputscale_prior = GammaPrior(	concentration=cfg_node["outputscale_prior_par1_{0:s}".format(which_type)], 
																										rate=cfg_node["outputscale_prior_par2_{0:s}".format(which_type)])
	elif cfg_node["outputscale_prior_type"] == "gaussian":
		outputscale_prior = NormalPrior(loc=cfg_node["outputscale_prior_par1_{0:s}".format(which_type)], 	
																										scale=cfg_node["outputscale_prior_par2_{0:s}".format(which_type)])
	else:
		outputscale_prior = None
		print("Using no prior for the length scale")

	return lengthscale_prior, outputscale_prior
예제 #5
0
 def test_prior_type(self):
     """
     Raising TypeError if prior type is other than gpytorch.priors.Prior
     """
     self.create_kernel_with_prior(None, None)
     self.create_kernel_with_prior(NormalPrior(0, 1), NormalPrior(0, 1))
     self.assertRaises(TypeError, self.create_kernel_with_prior, None, 1)
     self.assertRaises(TypeError, self.create_kernel_with_prior, 1, None)
예제 #6
0
 def test_scalar_normal_prior(self):
     prior = NormalPrior(0, 1)
     self.assertFalse(prior.log_transform)
     self.assertTrue(prior.is_in_support(torch.rand(1)))
     self.assertEqual(prior.shape, torch.Size([1]))
     self.assertEqual(prior.loc.item(), 0.0)
     self.assertEqual(prior.scale.item(), 1.0)
     self.assertAlmostEqual(prior.log_prob(prior.loc.new([0.0])).item(),
                            math.log(1 / math.sqrt(2 * math.pi)),
                            places=5)
예제 #7
0
 def test_vector_normal_prior_size(self):
     prior = NormalPrior(0, 1, size=2)
     self.assertFalse(prior.log_transform)
     self.assertTrue(prior.is_in_support(torch.zeros(1)))
     self.assertEqual(prior.shape, torch.Size([2]))
     self.assertTrue(torch.equal(prior.loc, torch.tensor([0.0, 0.0])))
     self.assertTrue(torch.equal(prior.scale, torch.tensor([1.0, 1.0])))
     parameter = torch.tensor([1.0, 2.0])
     self.assertAlmostEqual(
         prior.log_prob(parameter).item(),
         2 * math.log(1 / math.sqrt(2 * math.pi)) - 0.5 * (parameter ** 2).sum().item(),
         places=5,
     )
예제 #8
0
    def test_normal_prior_log_prob_log_transform(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        mean = torch.tensor(0.0, device=device)
        variance = torch.tensor(1.0, device=device)
        prior = NormalPrior(mean, variance, transform=torch.exp)
        dist = Normal(mean, variance)

        t = torch.tensor(0.0, device=device)
        self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
        t = torch.tensor([-1, 0.5], device=device)
        self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
        t = torch.tensor([[-1, 0.5], [0.1, -2.0]], device=device)
        self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
예제 #9
0
 def test_prior(self):
     if self.batch_shape is None:
         prior = NormalPrior(0.0, 1.0)
     else:
         prior = NormalPrior(torch.zeros(self.batch_shape),
                             torch.ones(self.batch_shape))
     mean = self.create_mean(prior=prior)
     self.assertEqual(mean.mean_prior, prior)
     pickle.loads(pickle.dumps(
         mean))  # Should be able to pickle and unpickle with a prior
     value = prior.sample()
     mean._constant_closure(mean, value)
     self.assertTrue(
         torch.equal(mean.constant.data,
                     value.reshape(mean.constant.data.shape)))
    def __init__(self, train_x, train_y, likelihood, Z_init):

        # Locations Z corresponding to u, they can be randomly initialized or
        # regularly placed.
        self.inducing_inputs = Z_init
        self.num_inducing = len(Z_init)
        self.n = len(train_y)
        self.data_dim = train_x.shape[1]
        # Sparse Variational Formulation
        q_u = CholeskyVariationalDistribution(self.num_inducing)
        q_f = VariationalStrategy(self,
                                  self.inducing_inputs,
                                  q_u,
                                  learn_inducing_locations=True)
        super(BayesianStochasticVariationalGP, self).__init__(q_f)
        self.likelihood = likelihood
        self.train_x = train_x
        self.train_y = train_y

        self.mean_module = ZeroMean()
        self.base_covar_module = ScaleKernel(RBFKernel())
        self.covar_module = gpytorch.kernels.ScaleKernel(
            gpytorch.kernels.RBFKernel())

        # Hyperparameter Variational distribution
        hyper_prior_mean = torch.Tensor([0])
        hyper_dim = len(hyper_prior_mean)

        log_hyper_prior = NormalPrior(hyper_prior_mean,
                                      torch.ones_like(hyper_prior_mean))
        self.log_theta = LogHyperVariationalDist(hyper_dim, log_hyper_prior,
                                                 self.n, self.data_dim)
예제 #11
0
 def test_prior_type(self):
     """
     Raising TypeError if prior type is other than gpytorch.priors.Prior
     """
     kernel_fn = lambda prior: self.create_kernel_with_prior(prior)
     kernel_fn(None)
     kernel_fn(NormalPrior(0, 1))
     self.assertRaises(TypeError, kernel_fn, 1)
예제 #12
0
 def test_vector_normal_prior(self):
     prior = NormalPrior(torch.tensor([-0.5, 0.5]), torch.tensor([0.5,
                                                                  1.0]))
     self.assertFalse(prior.log_transform)
     self.assertTrue(prior.is_in_support(torch.rand(1)))
     self.assertEqual(prior.shape, torch.Size([2]))
     self.assertTrue(torch.equal(prior.loc, prior.loc.new([-0.5, 0.5])))
     self.assertTrue(torch.equal(prior.scale, prior.scale.new([0.5, 1.0])))
     parameter = prior.loc.new([1.0, 2.0])
     expected_log_prob = (
         ((1 / math.sqrt(2 * math.pi) / prior.scale).log() -
          0.5 / prior.scale**2 *
          (prior.loc.new_tensor(parameter) - prior.loc)**2).sum().item())
     self.assertAlmostEqual(prior.log_prob(
         prior.loc.new_tensor(parameter)).item(),
                            expected_log_prob,
                            places=5)
예제 #13
0
def select_next_points_botorch(observed_X: List[List[float]],
                               observed_y: List[float]) -> np.ndarray:
    """Generate the next sample to evaluate with XTB

    Uses BOTorch to pick the next sample using Expected Improvement

    Args:
        observed_X: Observed coordinates
        observed_y: Observed energies
    Returns:
        Next coordinates to try
    """

    # Clip the energies if needed
    observed_y = np.clip(observed_y, -np.inf,
                         2 + np.log10(np.clip(observed_y, 1, np.inf)))

    # Convert inputs to torch arrays
    train_X = torch.tensor(observed_X, dtype=torch.float)
    train_y = torch.tensor(observed_y, dtype=torch.float)
    train_y = train_y[:, None]
    train_y = standardize(-1 * train_y)

    # Make the GP
    gp = SingleTaskGP(train_X,
                      train_y,
                      covar_module=gpykernels.ScaleKernel(
                          gpykernels.ProductStructureKernel(
                              num_dims=train_X.shape[1],
                              base_kernel=gpykernels.PeriodicKernel(
                                  period_length_prior=NormalPrior(360, 0.1)))))
    mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
    fit_gpytorch_model(mll)

    # Solve the optimization problem
    #  Following boss, we use Eq. 5 of https://arxiv.org/pdf/1012.2599.pdf with delta=0.1
    n_sampled, n_dim = train_X.shape
    kappa = np.sqrt(
        2 *
        np.log10(np.power(n_sampled, n_dim / 2 + 2) * np.pi**2 /
                 (3.0 * 0.1)))  # Results in more exploration over time
    ei = UpperConfidenceBound(gp, kappa)
    bounds = torch.zeros(2, train_X.shape[1])
    bounds[1, :] = 360
    candidate, acq_value = optimize_acqf(ei,
                                         bounds=bounds,
                                         q=1,
                                         num_restarts=64,
                                         raw_samples=64)
    return candidate.detach().numpy()[0, :]
예제 #14
0
파일: demo.py 프로젝트: vr308/GPLVM
    def __init__(self, n, data_dim, latent_dim, n_inducing, pca=False):

        self.n = n
        self.batch_shape = torch.Size([data_dim])

        # Locations Z_{d} corresponding to u_{d}, they can be randomly initialized or
        # regularly placed with shape (D x n_inducing x latent_dim).
        self.inducing_inputs = torch.randn(data_dim, n_inducing, latent_dim)

        # Sparse Variational Formulation

        q_u = CholeskyVariationalDistribution(n_inducing,
                                              batch_shape=self.batch_shape)
        q_f = VariationalStrategy(self,
                                  self.inducing_inputs,
                                  q_u,
                                  learn_inducing_locations=True)

        # Define prior for X
        X_prior_mean = torch.zeros(n, latent_dim)  # shape: N x Q
        prior_x = NormalPrior(X_prior_mean, torch.ones_like(X_prior_mean))

        # Initialise X with PCA or 0s.
        if pca == True:
            X_init = _init_pca(Y, latent_dim)  # Initialise X to PCA
        else:
            X_init = torch.nn.Parameter(torch.zeros(n, latent_dim))

        # LatentVariable (X)
        X = VariationalLatentVariable(n, data_dim, latent_dim, X_init, prior_x)
        #X = PointLatentVariable(n, latent_dim, X_init)
        #X = MAPLatentVariable(n, latent_dim, X_init, prior_x)

        super(My_GPLVM_Model, self).__init__(X, q_f)

        # Kernel
        self.mean_module = ConstantMean(ard_num_dims=latent_dim)
        self.covar_module = ScaleKernel(RBFKernel(ard_num_dims=latent_dim))
예제 #15
0
 def test_vector_normal_prior_invalid_params(self):
     with self.assertRaises(ValueError):
         NormalPrior(torch.tensor([-0.5, 0.5]), torch.tensor([-0.1, 1.0]))
예제 #16
0
    def test_normal_prior_batch_log_prob(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")

        mean = torch.tensor([0.0, 1.0], device=device)
        variance = torch.tensor([1.0, 2.0], device=device)
        prior = NormalPrior(mean, variance)
        dist = Normal(mean, variance)
        t = torch.zeros(2, device=device)
        self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
        t = torch.zeros(2, 2, device=device)
        self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
        with self.assertRaises(RuntimeError):
            prior.log_prob(torch.zeros(3, device=device))

        mean = torch.tensor([[0.0, 1.0], [-1.0, 2.0]], device=device)
        variance = torch.tensor([[1.0, 2.0], [0.5, 1.0]], device=device)
        prior = NormalPrior(mean, variance)
        dist = Normal(mean, variance)
        t = torch.zeros(2, device=device)
        self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
        t = torch.zeros(2, 2, device=device)
        self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
        with self.assertRaises(RuntimeError):
            prior.log_prob(torch.zeros(3, device=device))
        with self.assertRaises(RuntimeError):
            prior.log_prob(torch.zeros(2, 3, device=device))
예제 #17
0
 def test_normal_prior_validate_args(self):
     with self.assertRaises(ValueError):
         NormalPrior(0, -1, validate_args=True)
예제 #18
0
 def test_normal_prior_to_gpu(self):
     if torch.cuda.is_available():
         prior = NormalPrior(0, 1).cuda()
         self.assertEqual(prior.loc.device.type, "cuda")
         self.assertEqual(prior.scale.device.type, "cuda")
예제 #19
0
파일: demo.py 프로젝트: gpleiss/GPLVM
if __name__ == '__main__':

    # Load some data

    Y, n, d, labels = load_unsupervised_data('oilflow')

    # Setting shapes

    n_data_dims = Y.shape[1]
    n_latent_dims = 2
    n_inducing = 32
    X_prior_mean = torch.zeros(Y.shape[0], n_latent_dims)  # shape: N x Q

    # Declaring model with initial inducing inputs and latent prior

    latent_prior = NormalPrior(X_prior_mean, torch.ones_like(X_prior_mean))
    model = GPLVM(Y=Y.T,
                  latent_dim=n_latent_dims,
                  n_inducing=n_inducing,
                  X_init=None,
                  pca=True,
                  latent_prior=None,
                  kernel=None,
                  likelihood=None)

    # Declaring objective to be optimised along with optimiser

    mll = VariationalELBO(model.likelihood, model, num_data=len(Y.T))

    optimizer = torch.optim.Adam([
        {
예제 #20
0
 def test_pickle_with_prior(self):
     kernel = self.create_kernel_with_prior(NormalPrior(0, 1))
     pickle.loads(pickle.dumps(
         kernel))  # Should be able to pickle and unpickle with a prior
예제 #21
0
 def test_scalar_normal_prior_invalid_params(self):
     with self.assertRaises(ValueError):
         NormalPrior(0, -1)