Ejemplo n.º 1
0
def test_deepgplayer_checks_constructor_args():
    with pytest.raises(ValueError):
        deepgp.Layer(kernels.RBFKernel(), input_dim=0)
    with pytest.raises(ValueError):
        deepgp.Layer(kernels.RBFKernel(), output_dim=0)
    with pytest.raises(ValueError):
        deepgp.Layer(kernels.RBFKernel(), grid_bound=0)
    with pytest.raises(ValueError):
        deepgp.Layer(kernels.RBFKernel(), grid_num=0)
Ejemplo n.º 2
0
def test_deepgp_can_compute_kl_regularization():
    dgp = deepgp.DeepGP(
        layers=(deepgp.Layer(kernels.RBFKernel()),
                deepgp.Layer(kernels.RBFKernel())),
        likelihood=deepgp.ExpPoisson(),
    )
    dgp(torch.randn(10, 1))
    assert torch.is_tensor(dgp.kl_regularization)
    assert dgp.kl_regularization > 0.0
Ejemplo n.º 3
0
def test_deepgp_can_compute_outputs():
    dgp = deepgp.DeepGP(
        layers=(deepgp.Layer(kernels.RBFKernel()),
                deepgp.Layer(kernels.RBFKernel())),
        likelihood=deepgp.ExpPoisson(),
    )
    loss = dgp(torch.randn(10, 1)).log_prob(torch.ones(10, 1)).sum()
    loss.backward()
    loss = dgp(torch.randn(10, 1)).log_prob(torch.ones(10, 1)).sum()
    loss.backward()
    dgp.eval()
    dgp(torch.randn(10, 1))
Ejemplo n.º 4
0
def test_deepgp_can_compute_negative_elbo_loss():
    dgp = deepgp.DeepGP(
        layers=(deepgp.Layer(kernels.RBFKernel()),
                deepgp.Layer(kernels.RBFKernel())),
        likelihood=deepgp.ExpPoisson(),
    )
    elbo = dgp.negative_elbo(torch.randn(10, 1),
                             torch.ones((10, 1)),
                             num_data=10)
    elbo.backward()
    elbo = dgp.negative_elbo(torch.randn(10, 1),
                             torch.ones((10, 1)),
                             num_data=10)
    elbo.backward()
    assert torch.is_tensor(elbo)
Ejemplo n.º 5
0
def test_dgplayer_can_compute_predictions():
    dgp = deepgp.Layer(kernels.RBFKernel(),
                       input_dim=2,
                       output_dim=3,
                       grid_num=4)
    output = dgp(torch.rand(10, 2))
    assert output.size() == (10, 3)
Ejemplo n.º 6
0
def test_deepgplayer_checks_input_dims():
    dgp = deepgp.Layer(kernels.RBFKernel(),
                       input_dim=2,
                       output_dim=3,
                       grid_num=4)
    with pytest.raises(ValueError):
        dgp(torch.rand(10, 10))
Ejemplo n.º 7
0
def test_deepgplayer_can_turn_off_kl_regularization():
    dgp = deepgp.Layer(kernels.RBFKernel(),
                       input_dim=2,
                       output_dim=3,
                       grid_num=4)
    dgp(torch.rand(10, 2), compute_kl=False)
    assert dgp.kl_regularization.item() == 0.0
Ejemplo n.º 8
0
def test_deepgp_can_backward_with_more_than_one_dim():
    dgp = deepgp.DeepGP(
        layers=(deepgp.Layer(kernels.RBFKernel(),
                             input_dim=2,
                             output_dim=1,
                             grid_num=4), ),
        likelihood=deepgp.ExpPoisson(),
    )
    dgp.forward(torch.randn(10, 2)).mean.sum().backward()
Ejemplo n.º 9
0
    def __init__(self, x_train, y_train, likelihood):
        super().__init__(x_train, y_train, likelihood)
        self.mean_module = means.ConstantMean(prior=priors.UniformPrior(0, 5))
        # periodic_kernel = kernels.PeriodicKernel(lengthscale_prior=priors.UniformPrior(0.01, 0.5),
        #                                          period_length_prior=priors.UniformPrior(0.05, 2.5))

        rbf_kernel = kernels.RBFKernel(
            lengthscale_prior=priors.UniformPrior(1, 30))
        self.covar_module = kernels.ScaleKernel(
            rbf_kernel, outputscale_prior=priors.UniformPrior(0, 1))
 def __init__(self, x_train, y_train, likelihood):
     super().__init__(x_train, y_train, likelihood)
     # SKI requires a grid size hyperparameter. This util can help with that.
     # Here we are using a grid that has the same number of points as the training data (a ratio of 1.0).
     # Performance can be sensitive to this parameter, so you may want to adjust it for your own problem on a validation set
     grid_size = choose_grid_size(x_train, ratio=1.0)
     self.mean_module = means.ConstantMean()
     self.covar_module = kernels.ScaleKernel(
         kernels.GridInterpolationKernel(kernels.RBFKernel(),
                                         grid_size=grid_size,
                                         num_dims=1))
Ejemplo n.º 11
0
 def set_variational_model(self, init_inducing: torch.Tensor):
     self.num_latents = init_inducing.size(0)
     variational_distribution = CholeskyVariationalDistribution(
         init_inducing.size(-2), batch_shape=[self.num_latents])
     self.variational_strategy = variational.LMCVariationalStrategy(
         gpytorch.variational.VariationalStrategy(
             self,
             init_inducing,
             variational_distribution,
             learn_inducing_locations=True),
         num_tasks=self.num_tasks,
         num_latents=self.num_latents,
         latent_dim=self.latent_dim)
     batch_shape = torch.Size([self.num_latents])
     self.mean_module = means.ConstantMean(batch_shape=batch_shape)
     self.covar_module = kernels.ScaleKernel(
         kernels.RBFKernel(batch_shape=batch_shape),
         batch_shape=batch_shape)
Ejemplo n.º 12
0
 def gp_mapping_pred(self, inputs, outputs, omegas, sigma_ard):
     # theta = theta.view(theta.shape[0])
     # omegas, sigma_ard = theta[:theta.shape[0]-1],theta[theta.shape[0]-1]
     Kss = self.gp_mapping_prior(inputs, omegas, sigma_ard)
     dim = inputs.shape
     Kss = Kss.view(Kss.shape[1], -1)
     batch_size = omegas.shape[0]
     # get \xi values
     xi = torch.randn(batch_size, 1, dim[1])
     ard_kernel = kernels.RBFKernel(ard_num_dims=dim[1])
     Kes = ard_kernel.forward(xi.mul(omegas.float()),
                              inputs.mul(omegas.float())).mul(sigma_ard)
     Kss_inv = torch.inverse(Kss)
     Kee = ard_kernel.forward(xi.mul(omegas.float()),
                              xi.mul(omegas.float())).mul(sigma_ard)
     mean = torch.matmul(torch.matmul(Kes, Kss_inv), outputs)
     mean = mean.view(mean.shape[0], -1)
     #cholsky decomposition
     cov = Kee - torch.matmul(torch.matmul(Kes, Kss_inv),
                              Kes.view(Kes.shape[0], Kes.shape[2], -1))
     cov = cov.view(cov.shape[0], -1)
     cov = torch.ones(mean.shape).mul(cov).view(mean.shape[0], -1)
     return mean, cov
Ejemplo n.º 13
0
def run(num_epochs=1024, log_interval: int = 1) -> None:
    train_loader, val_loader = get_data_loaders()
    num_data = len(train_loader) * train_loader.batch_size
    model = dsvi.DeepGP(
        layers=(
            dsvi.Layer(
                kernels.ScaleKernel(kernels.RBFKernel()),
                input_dim=2,
                output_dim=1,
                grid_num=128,
                grid_bound=2.0,
            ),
            dsvi.Layer(
                kernels.ScaleKernel(kernels.RBFKernel()),
                input_dim=1,
                output_dim=1,
                grid_num=8,
                grid_bound=2.0,
            ),
        ),
        likelihood=dsvi.LogisticBernoulli(),
    )

    device = "cpu"
    if torch.cuda.is_available():
        device = "cuda"

    optimizer = optim.Adam(model.parameters(), lr=0.01)

    def train_and_store_loss(
            engine: ignite.engine.Engine,
            batch: Tuple[torch.Tensor, torch.Tensor]) -> Dict[str, Any]:
        model.train()
        inputs, targets = batch
        optimizer.zero_grad()
        loss = model.negative_elbo(inputs,
                                   targets.unsqueeze(-1),
                                   num_data=num_data,
                                   num_samples=3)
        loss.backward()
        optimizer.step()
        return {"loss": loss.item()}

    trainer = ignite.engine.Engine(train_and_store_loss)

    evaluator = ignite.engine.create_supervised_evaluator(
        model,
        metrics={"accuracy": ignite.metrics.Accuracy()},
        output_transform=lambda x, y, y_pred: ((y_pred.mean > 0.5).float(), y),
    )

    desc = "loss: {:.2f}"
    pbar = tqdm.tqdm(initial=0,
                     leave=False,
                     total=num_epochs,
                     desc=desc.format(0))

    @trainer.on(ignite.engine.Events.EPOCH_COMPLETED)
    def log_training_loss(engine):
        pbar.desc = desc.format(engine.state.output["loss"])
        pbar.update()

    @trainer.on(ignite.engine.Events.EPOCH_COMPLETED)
    def log_validation_results(engine):
        model.eval()
        evaluator.run(val_loader)
        metrics = evaluator.state.metrics
        avg_accuracy = metrics["accuracy"]
        tqdm.tqdm.write(
            "Validation Results - Epoch: {}  Avg accuracy: {:.2f}".format(
                engine.state.epoch, avg_accuracy))

    trainer.run(train_loader, max_epochs=num_epochs)
Ejemplo n.º 14
0
 def gp_mapping_prior(self, inputs, omegas, sigma_ard):
     #omegas, sigma_ard = parameters.split(list(parameters.shape)[0]-2)
     ard_kernel = kernels.RBFKernel(ard_num_dims=list(omegas.shape)[0])
     return ard_kernel.forward(inputs.mul(omegas.float()),
                               inputs.mul(omegas.float())).mul(
                                   sigma_ard)  #, omegas, sigma_ard