def test_kissgp_gp_mean_abs_error(self):
        likelihood = GaussianLikelihood()
        gp_model = GPRegressionModel(train_x.data, train_y.data, likelihood)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

        # Optimize the model
        gp_model.train()
        likelihood.train()

        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.2)
        optimizer.n_iter = 0
        for _ in range(15):
            optimizer.zero_grad()
            output = gp_model(train_x)
            loss = -mll(output, train_y)
            loss.backward()
            optimizer.n_iter += 1

        for param in gp_model.parameters():
            self.assertTrue(param.grad is not None)
            self.assertGreater(param.grad.norm().item(), 0)
        for param in likelihood.parameters():
            self.assertTrue(param.grad is not None)
            self.assertGreater(param.grad.norm().item(), 0)
        optimizer.step()

        # Test the model
        gp_model.eval()
        likelihood.eval()

        with gpytorch.fast_pred_var():
            test_preds = likelihood(gp_model(test_x)).mean()
        mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
        self.assertLess(mean_abs_error.data.squeeze().item(), 0.15)
def test_kissgp_gp_fast_pred_var():
    with gpytorch.fast_pred_var():
        train_x, train_y, test_x, test_y = make_data()
        likelihood = GaussianLikelihood()
        gp_model = GPRegressionModel(train_x.data, train_y.data, likelihood)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

        # Optimize the model
        gp_model.train()
        likelihood.train()

        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
        optimizer.n_iter = 0
        for i in range(25):
            optimizer.zero_grad()
            output = gp_model(train_x)
            loss = -mll(output, train_y)
            loss.backward()
            optimizer.n_iter += 1
            optimizer.step()

        # Test the model
        gp_model.eval()
        likelihood.eval()
        # Set the cache
        test_function_predictions = likelihood(gp_model(train_x))

        # Now bump up the likelihood to something huge
        # This will make it easy to calculate the variance
        likelihood.log_noise.data.fill_(3)
        test_function_predictions = likelihood(gp_model(train_x))

        noise = likelihood.log_noise.exp()
        var_diff = (test_function_predictions.var() - noise).abs()
        assert(torch.max(var_diff.data / noise.data) < 0.05)
예제 #3
0
def test_kissgp_gp_mean_abs_error():
    likelihood = GaussianLikelihood()
    gp_model = GPRegressionModel(train_x.data, train_y.data, likelihood)
    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

    # Optimize the model
    gp_model.train()
    likelihood.train()

    optimizer = optim.Adam(list(gp_model.parameters()) +
                           list(likelihood.parameters()),
                           lr=0.2)
    optimizer.n_iter = 0
    for i in range(15):
        optimizer.zero_grad()
        output = gp_model(train_x)
        loss = -mll(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Test the model
    gp_model.eval()
    likelihood.eval()

    with gpytorch.fast_pred_var():
        test_preds = likelihood(gp_model(test_x)).mean()
    mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
    assert (mean_abs_error.data.squeeze()[0] < 0.15)
예제 #4
0
def test_kissgp_classification_fast_pred_var():
    with gpytorch.fast_pred_var():
        train_x, train_y = train_data()
        likelihood = BernoulliLikelihood()
        model = GPClassificationModel(train_x.data)
        mll = gpytorch.mlls.VariationalMarginalLogLikelihood(
            likelihood, model, n_data=len(train_y))

        # Find optimal model hyperparameters
        model.train()
        likelihood.train()
        optimizer = optim.Adam(model.parameters(), lr=0.1)
        optimizer.n_iter = 0
        for i in range(50):
            optimizer.zero_grad()
            output = model(train_x)
            loss = -mll(output, train_y)
            loss.backward()
            optimizer.n_iter += 1
            optimizer.step()

        # Set back to eval mode
        model.eval()
        likelihood.eval()
        test_preds = likelihood(
            model(train_x)).mean().ge(0.5).float().mul(2).sub(1).squeeze()

        mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
        assert (mean_abs_error.data.squeeze()[0] < 1e-5)
    def test_classification_fast_pred_var(self):
        with gpytorch.fast_pred_var():
            train_x, train_y = train_data()
            likelihood = BernoulliLikelihood()
            model = GPClassificationModel(train_x)
            mll = gpytorch.mlls.VariationalMarginalLogLikelihood(likelihood, model, num_data=len(train_y))

            # Find optimal model hyperparameters
            model.train()
            likelihood.train()
            optimizer = optim.Adam(model.parameters(), lr=0.1)
            optimizer.n_iter = 0
            for _ in range(50):
                optimizer.zero_grad()
                output = model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            optimizer.step()

            # Set back to eval mode
            model.eval()
            likelihood.eval()
            test_preds = likelihood(model(train_x)).mean.ge(0.5).float().mul(2).sub(1).squeeze()

            mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
            self.assertLess(mean_abs_error.item(), 1e-5)
예제 #6
0
파일: mk_tester.py 프로젝트: g-benton/mkgp
def mk_tester(train_x, train_y, test_x):

    class MultitaskModel(gpytorch.models.ExactGP):
        def __init__(self, train_x, train_y, likelihood):
            super(MultitaskModel, self).__init__(train_x, train_y, likelihood)
            self.mean_module = gpytorch.means.MultitaskMean(
            gpytorch.means.ConstantMean(), num_tasks=2
            )
            # self.mean_module = gpytorch.means.ConstantMean()
            # self.covar_module = mk_kernel.MultitaskRBFKernel(num_tasks=2,log_task_lengthscales=torch.Tensor([math.log(2.5), math.log(0.3)]))
            self.covar_module = mk_kernel.MultitaskRBFKernel(num_tasks=2)
            # self.covar_module = gpytorch.kernels.ScaleKernel(mk_kernel.multi_kernel())
        def forward(self, x):
            mean_x = self.mean_module(x)
            covar_x = self.covar_module(x)
            return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)

    # test_data = torch.linspace(0, 10, 100)
    # mod1, mod2 = data_gen(test_data, num_samples=1)

    # dat = torch.stack([mod1, mod2,], -1)[0]
    likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
    model = MultitaskModel(train_x, train_y, likelihood)

    model.train();
    likelihood.train();

    optimizer = torch.optim.Adam([{'params': model.parameters()}, ], lr=0.1)

    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

    n_iter = 50
    for i in range(n_iter):
        optimizer.zero_grad()
        output = model(train_x)
        loss = -mll(output, train_y)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
        # print('Iter %d/%d - Loss: %.3f   log_length1: %.3f log_length2: %.3f log_noise1: %.3f  log_noise2: %.3f' % (
        #     i + 1, n_iter, loss.item(),
        #     model.covar_module.in_task1.lengthscale.item(),
        #     model.covar_module.in_task2.lengthscale.item(),
        #     model.likelihood.log_task_noises.data[0][0],
        #     model.likelihood.log_task_noises.data[0][1]
        # ))

        optimizer.step()

    model.eval();
    likelihood.eval();


    with torch.no_grad(), gpytorch.fast_pred_var():
        # test_x = torch.linspace(0, 1, 51)
        predictions = likelihood(model(test_x))
        mean = predictions.mean

    return likelihood(model(test_x))
예제 #7
0
    def test_posterior_latent_gp_and_likelihood_fast_pred_var(
            self, cuda=False):
        train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
        with gpytorch.fast_pred_var(), gpytorch.settings.debug(False):
            # We're manually going to set the hyperparameters to
            # something they shouldn't be
            likelihood = GaussianLikelihood(
                noise_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))
            gp_model = ExactGPModel(train_x, train_y, likelihood)
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                likelihood, gp_model)
            gp_model.covar_module.base_kernel.initialize(log_lengthscale=1)
            gp_model.mean_module.initialize(constant=0)
            likelihood.initialize(log_noise=1)

            if cuda:
                gp_model.cuda()
                likelihood.cuda()

            # Find optimal model hyperparameters
            gp_model.train()
            likelihood.train()
            optimizer = optim.Adam(list(gp_model.parameters()) +
                                   list(likelihood.parameters()),
                                   lr=0.1)
            optimizer.n_iter = 0
            for _ in range(50):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in gp_model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            optimizer.step()

            # Test the model
            gp_model.eval()
            likelihood.eval()
            # Set the cache
            test_function_predictions = likelihood(gp_model(train_x))

            # Now bump up the likelihood to something huge
            # This will make it easy to calculate the variance
            likelihood.noise_covar.raw_noise.data.fill_(3)
            test_function_predictions = likelihood(gp_model(train_x))

            noise = likelihood.noise_covar.noise
            var_diff = (test_function_predictions.variance - noise).abs()

            self.assertLess(torch.max(var_diff / noise), 0.05)
예제 #8
0
    def test_sgpr_fast_pred_var(self):
        train_x, train_y, test_x, test_y = make_data()
        likelihood = GaussianLikelihood()
        gp_model = GPRegressionModel(train_x.data, train_y.data, likelihood)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

        # Optimize the model
        gp_model.train()
        likelihood.train()

        optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
        for _ in range(50):
            optimizer.zero_grad()
            output = gp_model(train_x)
            loss = -mll(output, train_y)
            loss.backward()
            optimizer.step()

        for param in gp_model.parameters():
            self.assertTrue(param.grad is not None)
            self.assertGreater(param.grad.norm().item(), 0)
        for param in likelihood.parameters():
            self.assertTrue(param.grad is not None)
            self.assertGreater(param.grad.norm().item(), 0)

        # Test the model
        gp_model.eval()
        likelihood.eval()

        with gpytorch.settings.max_preconditioner_size(
            5
        ), gpytorch.settings.max_cg_iterations(
            50
        ):
            with gpytorch.fast_pred_var(True):
                fast_var = gp_model(test_x).var()
                fast_var_cache = gp_model(test_x).var()
                self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3)

            with gpytorch.fast_pred_var(False):
                slow_var = gp_model(test_x).var()

        self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3)
예제 #9
0
def main():

    ## set up data ##
    # train_x = torch.Tensor([i for i in range(1, 11)])
    train_x = torch.linspace(0, 1, 10)
    test_x = torch.linspace(0, 1, 1000)

    # train_x = torch.linspace(0, 3.14)
    # test_x = torch.linspace(0, 3.14, 1000)
    # train_y = torch.stack([torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,], -1)
    train_y = torch.stack([
        torch.sin(train_x * (2 * math.pi)),
        torch.cos(train_x * (2 * math.pi)),
    ], -1)
    train_y.shape
    train_y[:, 1]
    train_y
    likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
    model = MultitaskModel(train_x, train_y, likelihood)

    temp_mat = model.covar_module(train_x, train_x)
    # print(temp_mat.evaluate())

    model.eval()
    likelihood.eval()
    f_pred = model(train_x)

    with torch.no_grad(), gpytorch.fast_pred_var():
        # test_x = torch.linspace(0, 1, 51)
        predictions = likelihood(model(train_x))
        mean = predictions.mean
        # lower, upper = f_pred.confidence_region()

    mean = f_pred.mean

    f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))
    y1_ax.plot(train_x.detach().numpy(), train_y[:, 0].detach().numpy(), 'k*')
    # Predictive mean as blue line
    y1_ax.plot(test_x.numpy(), mean[:, 0].detach().numpy(), 'b')
    # Shade in confidence
    # y1_ax.fill_between(test_x.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)
    y1_ax.set_ylim([-3, 3])
    y1_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    y1_ax.set_title('Observed Values (Likelihood)')

    # Plot training data as black stars
    y2_ax.plot(train_x.detach().numpy(), train_y[:, 1].detach().numpy(), 'k*')
    # Predictive mean as blue line
    y2_ax.plot(test_x.numpy(), mean[:, 1].detach().numpy(), 'b')
    # Shade in confidence
    # y2_ax.fill_between(test_x.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)
    y2_ax.set_ylim([-3, 3])
    y2_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    y2_ax.set_title('Observed Values (Likelihood)')
    plt.show()
예제 #10
0
    def test_posterior_latent_gp_and_likelihood_fast_pred_var(self):
        with gpytorch.fast_pred_var():
            # We're manually going to set the hyperparameters to
            # something they shouldn't be
            likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
            gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                likelihood, gp_model)
            gp_model.rbf_covar_module.initialize(log_lengthscale=1)
            gp_model.mean_module.initialize(constant=0)
            likelihood.initialize(log_noise=1)

            # Find optimal model hyperparameters
            gp_model.train()
            likelihood.train()
            optimizer = optim.Adam(list(gp_model.parameters()) +
                                   list(likelihood.parameters()),
                                   lr=0.1)
            optimizer.n_iter = 0
            for _ in range(50):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in gp_model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            optimizer.step()

            # Test the model
            gp_model.eval()
            likelihood.eval()
            # Set the cache
            test_function_predictions = likelihood(gp_model(train_x))

            # Now bump up the likelihood to something huge
            # This will make it easy to calculate the variance
            likelihood.log_noise.data.fill_(3)
            test_function_predictions = likelihood(gp_model(train_x))

            noise = likelihood.log_noise.exp()
            var_diff = (test_function_predictions.var() - noise).abs()

            self.assertLess(torch.max(var_diff.data / noise.data), 0.05)
예제 #11
0
파일: test_kron.py 프로젝트: g-benton/mkgp
def main():
    num_pts = 100
    test_x = torch.linspace(0, 10, num_pts)
    dat1, mean1, dat2, mean2 = data_gen(test_x)
    test_y = torch.stack([dat1, dat2], -1)[0]
    num_train = 20
    indices = random.sample(range(num_pts), num_train)
    train_x = test_x[indices]
    train_y = test_y[indices, :]

    class KronMultitaskModel(gpytorch.models.ExactGP):
        def __init__(self, train_x, train_y, likelihood):
            super(KronMultitaskModel, self).__init__(train_x, train_y, likelihood)
            self.mean_module = gpytorch.means.MultitaskMean(
                gpytorch.means.ConstantMean(), num_tasks=2
            )
            self.covar_module = gpytorch.kernels.MultitaskKernel(
                gpytorch.kernels.RBFKernel(), num_tasks=2, rank=1
            )
        def forward(self, x):
            mean_x = self.mean_module(x)
            covar_x = self.covar_module(x)
            return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)


    kronlikelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
    kronmodel = KronMultitaskModel(train_x,train_y,kronlikelihood)
    kronmodel.train()
    kronlikelihood.train()
    optimizer = torch.optim.Adam([ {'params': kronmodel.parameters()}, ], lr=0.1)
    mll = gpytorch.mlls.ExactMarginalLogLikelihood(kronlikelihood, kronmodel)
    n_iter = 50
    for i in range(n_iter):
        optimizer.zero_grad()
        output = kronmodel(train_x)
        loss = -mll(output, train_y)
        loss.backward()
    kronmodel.eval()
    kronlikelihood.eval()
    with torch.no_grad(), gpytorch.fast_pred_var():
        eval_x = torch.linspace(0,10,1000)
        kronpredictions = kronlikelihood(kronmodel(eval_x))
        kronmean = kronpredictions.mean
        kronlower,kronupper = kronpredictions.confidence_region()

    print("lower = ", kronlower)
    return 1
예제 #12
0
    def test_kissgp_gp_fast_pred_var(self):
        with gpytorch.fast_pred_var(), gpytorch.settings.debug(False):
            train_x, train_y, test_x, test_y = make_data()
            likelihood = GaussianLikelihood()
            gp_model = GPRegressionModel(train_x, train_y, likelihood)
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                likelihood, gp_model)

            # Optimize the model
            gp_model.train()
            likelihood.train()

            optimizer = optim.Adam(list(gp_model.parameters()) +
                                   list(likelihood.parameters()),
                                   lr=0.1)
            optimizer.n_iter = 0
            for _ in range(25):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in gp_model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)

            # Test the model
            gp_model.eval()
            likelihood.eval()
            # Set the cache
            test_function_predictions = likelihood(gp_model(train_x))

            # Now bump up the likelihood to something huge
            # This will make it easy to calculate the variance
            likelihood.log_noise.data.fill_(3)
            test_function_predictions = likelihood(gp_model(train_x))

            noise = likelihood.log_noise.exp()
            var_diff = (test_function_predictions.variance - noise).abs()
            self.assertLess(torch.max(var_diff / noise), 0.05)
예제 #13
0
파일: models.py 프로젝트: zphilip/MOGP-AL
    def predict(self, x, y_ind, return_var=False, return_ent=False):
        # in absence of any training data
        if self.model is None:
            if return_ent:
                return np.full(len(x), 0.0)
            elif return_var:
                # assuming rbf kernel with scale = 1
                return np.full(len(x), 0.0), np.full(len(x), 1.0)
            else:
                raise NotImplementedError(
                    'Predictive distribution can not be estimated in absence of training data'
                )

        self.model.eval()
        self.likelihood.eval()
        ind_ = to_torch(y_ind).long()

        # TODO: for fast variance computation, add all the relevant flags
        # fast_pred_var uses LOVE
        with torch.no_grad():
            x_ = to_torch(x)
            if len(self._train_x) > 10:
                with gpytorch.fast_pred_var():
                    pred_grv = self.likelihood(self.model(x_, ind_))
            else:
                pred_grv = self.likelihood(self.model(x_, ind_))

            if return_ent:
                return entropy_from_cov(
                    pred_grv.covariance_matrix.cpu().numpy())

            # single mean
            mu = pred_grv.mean + self._train_y_mean

            # category-wise mean
            # mu = pred_grv.mean + torch.gather(self._train_y_mean, 0, ind_)

            mu = mu.cpu().numpy()
            if return_var:
                var = pred_grv.variance.cpu().numpy()
                return mu, var
        return mu
예제 #14
0
def indep_rbf(train_x, train_y, test_x):
    class ExactGPModel(gpytorch.models.ExactGP):
        def __init__(self, train_x, train_y, likelihood):
            super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
            self.mean_module = gpytorch.means.ConstantMean()
            self.covar_module = gpytorch.kernels.ScaleKernel(
                gpytorch.kernels.RBFKernel())

        def forward(self, x):
            mean_x = self.mean_module(x)
            covar_x = self.covar_module(x)
            return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)

    # TODO: edit this so it reads in stored data (necessary for good comparison)

    # test_data = torch.linspace(0, 10, 100)
    # mod1, mod2 = data_gen(test_data, num_samples=1)
    # mod1_mean = mod1.mean()
    # mod2_mean = mod2.mean()
    # mod1 = mod1[0] - mod1_mean
    # mod2 = mod2[0] - mod2_mean
    mod1 = train_y[:, 0]
    mod2 = train_y[:, 1]
    l1 = gpytorch.likelihoods.GaussianLikelihood()
    model1 = ExactGPModel(train_x, mod1, l1)

    l2 = gpytorch.likelihoods.GaussianLikelihood()
    model2 = ExactGPModel(train_x, mod2, l2)

    mll = gpytorch.mlls.ExactMarginalLogLikelihood(l1, model1)

    model1.train()
    l1.train()
    optimizer = torch.optim.Adam([
        {
            'params': model1.parameters()
        },
    ], lr=0.1)

    training_iter = 50
    for i in range(training_iter):
        # Zero gradients from previous iteration
        optimizer.zero_grad()
        # Output from model
        output = model1(train_x)

        # Calc loss and backprop gradients
        loss = -mll(output, mod1)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f   log_lengthscale: %.3f   log_noise: %.3f' % (
        #     i + 1, training_iter, loss.item(),
        #     model1.covar_module.base_kernel.log_lengthscale.item(),
        #     model1.likelihood.log_noise.item()
        # ))
        optimizer.step()

    mll2 = gpytorch.mlls.ExactMarginalLogLikelihood(l2, model2)
    model2.train()
    l2.train()
    optimizer = torch.optim.Adam([
        {
            'params': model2.parameters()
        },
    ], lr=0.1)

    training_iter = 100
    for i in range(training_iter):
        # Zero gradients from previous iteration
        optimizer.zero_grad()
        # Output from model
        output = model2(train_x)

        # Calc loss and backprop gradients
        loss = -mll2(output, mod2)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f   log_lengthscale: %.3f   log_noise: %.3f' % (
        #     i + 1, training_iter, loss.item(),
        #     model2.covar_module.base_kernel.log_lengthscale.item(),
        #     model2.likelihood.log_noise.item()
        # ))
        optimizer.step()

    model1.eval()
    model2.eval()
    l1.eval()
    l2.eval()
    with torch.no_grad(), gpytorch.fast_pred_var():
        # test_x = torch.linspace(0, 1, 51)
        predictions = l1(model1(test_x))
        mean1 = predictions.mean

        predictions = l2(model2(test_x))
        mean2 = predictions.mean

    return torch.stack([mean1, mean2], -1)
예제 #15
0
        #    i + 1,
        #    n_iter,
        #    loss.item(),
        #    model.covar_module.in_task1.lengthscale,
        #    model.covar_module.in_task2.lengthscale,
        #    model.covar_module.output_scale_kernel.covar_matrix[0,0,0],
        #    model.covar_module.output_scale_kernel.covar_matrix[0,1,1],
        #    model.covar_module.output_scale_kernel.covar_matrix[0,1,0],
        #    likelihood.log_task_noises[0,0].exp(),
        #    likelihood.log_task_noises[0,1].exp()
        #))
        optimizer.step()

    model.eval()
    likelihood.eval()
    with torch.no_grad(), gpytorch.fast_pred_var():
        eval_x = torch.linspace(0, 10, 1000)
        predictions = likelihood(model(eval_x))
        mean = predictions.mean
        #lower,upper = predictions.confidence_region()

    multi_mse_task1 = np.append(
        multi_mse_task1,
        np.sum(np.power(mean[test_indices, 0].numpy() - test_y[:, 0], 2)) /
        np.shape(test_y)[0])
    multi_mse_task2 = np.append(
        multi_mse_task2,
        np.sum(np.power(mean[test_indices, 1].numpy() - test_y[:, 1], 2)) /
        np.shape(test_y)[0])

    ## FIT KRONECKER MULTITASK METHOD
예제 #16
0
def main():
    train_x = torch.linspace(0, 1, 100)

    train_y = torch.stack([
        torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,
        torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,
    ], -1)

    likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
    model = MultitaskModel(train_x, train_y, likelihood)

    # Set into eval mode
    # Find optimal model hyperparameters
    model.train()
    likelihood.train()

    # Use the adam optimize

    optimizer = torch.optim.Adam([
        {
            'params': model.parameters()
        },
    ], lr=0.1)

    # "Loss" for GPs - the marginal log likelihood
    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

    n_iter = 50
    for i in range(n_iter):
        optimizer.zero_grad()
        output = model(train_x)
        loss = -mll(output, train_y)
        loss.backward()
        print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
        optimizer.step()

    model.eval()
    likelihood.eval()

    f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))

    # # Make predictions
    with torch.no_grad(), gpytorch.fast_pred_var():
        test_x = torch.linspace(0, 1, 51)
        predictions = likelihood(model(test_x))
        mean = predictions.mean
        # lower, upper = predictions.confidence_region()

    # This contains predictions for both tasks, flattened out
    # The first half of the predictions is for the first task
    # The second half is for the second task

    # Plot training data as black stars
    y1_ax.plot(train_x.detach().numpy(), train_y[:, 0].detach().numpy(), 'k*')
    # Predictive mean as blue line
    y1_ax.plot(test_x.numpy(), mean[:, 0].numpy(), 'b')
    # Shade in confidence
    # y1_ax.fill_between(test_x.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)
    y1_ax.set_ylim([-3, 3])
    y1_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    y1_ax.set_title('Observed Values (Likelihood)')

    # Plot training data as black stars
    y2_ax.plot(train_x.detach().numpy(), train_y[:, 1].detach().numpy(), 'k*')
    # Predictive mean as blue line
    y2_ax.plot(test_x.numpy(), mean[:, 1].numpy(), 'b')
    # Shade in confidence
    # y2_ax.fill_between(test_x.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)
    y2_ax.set_ylim([-3, 3])
    y2_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    y2_ax.set_title('Observed Values (Likelihood)')
    plt.show()

    plt.figure()
    plt.plot(mean[:, 0].numpy(), 'b')
    plt.plot(mean[:, 1].numpy(), 'k')
    plt.show()
예제 #17
0
def main():

    ## set up data ##
    train_x = torch.linspace(0, 1, 100)
    test_x = torch.linspace(0.1, 1.1, 52)
    train_y = torch.stack([
        torch.sin(train_x *
                  (4 * math.pi)) + torch.randn(train_x.size()) * 0.2 + 1,
        torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,
    ], -1)
    train_y.shape
    # train_y1 = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2
    # train_y2 = torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2
    # train_y = torch.cat((train_y1, train_y2), 0)

    ## set up model ##
    likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
    model = MultitaskModel(train_x, train_y, likelihood)
    # model.covar_module.log_task_lengthscales = torch.Tensor([math.log(2.5), math.log(0.3)])

    model.train()
    likelihood.train()
    for i in model.named_parameters():
        print(i)
    optimizer = torch.optim.Adam([
        {
            'params': model.parameters()
        },
    ], lr=0.1)

    optimizer.param_groups
    model.likelihood.log_task_noises.data[0]

    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

    n_iter = 50
    for i in range(n_iter):
        optimizer.zero_grad()
        output = model(train_x)
        loss = -mll(output, train_y)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
        print(
            'Iter %d/%d - Loss: %.3f   logscale1: %.3f  logscale2: %.3f  log_noise1: %.3f  log_noise2: %.3f'
            % (i + 1, n_iter, loss.item(),
               model.covar_module.log_task_lengthscales.data[0][0],
               model.covar_module.log_task_lengthscales.data[0][1],
               model.likelihood.log_task_noises.data[0][0],
               model.likelihood.log_task_noises.data[0][1]))

        for ind, ii in enumerate(model.named_parameters()):
            print(ii[1].grad)
        optimizer.step()

    model.eval()
    likelihood.eval()

    # print(model.covar_module.log_task_lengthscales)

    with torch.no_grad(), gpytorch.fast_pred_var():
        # test_x = torch.linspace(0, 1, 51)
        predictions = likelihood(model(test_x))
        mean = predictions.mean

    f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))
    y1_ax.plot(train_x.detach().numpy(), train_y[:, 0].detach().numpy(), 'k*')
    # Predictive mean as blue line
    y1_ax.plot(test_x.numpy(), mean[:, 0].numpy(), 'b')
    # Shade in confidence
    # y1_ax.fill_between(test_x.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)
    y1_ax.set_ylim([-3, 3])
    y1_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    y1_ax.set_title('Observed Values (Likelihood)')

    # Plot training data as black stars
    y2_ax.plot(train_x.detach().numpy(), train_y[:, 1].detach().numpy(), 'k*')
    # Predictive mean as blue line
    y2_ax.plot(test_x.numpy(), mean[:, 1].numpy(), 'b')
    # Shade in confidence
    # y2_ax.fill_between(test_x.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)
    y2_ax.set_ylim([-3, 3])
    y2_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    y2_ax.set_title('Observed Values (Likelihood)')
    plt.show()
예제 #18
0
def multitask(test_data, test_y):
    class MultitaskGPModel(gpytorch.models.ExactGP):
        def __init__(self, train_x, train_y, likelihood):
            super(MultitaskGPModel, self).__init__(train_x, train_y,
                                                   likelihood)
            self.mean_module = gpytorch.means.MultitaskMean(
                gpytorch.means.ConstantMean(), num_tasks=2)
            self.covar_module = gpytorch.kernels.MultitaskKernel(
                gpytorch.kernels.RBFKernel(), num_tasks=2, rank=1)

        def forward(self, x):
            mean_x = self.mean_module(x)
            covar_x = self.covar_module(x)
            return gpytorch.distributions.MultitaskMultivariateNormal(
                mean_x, covar_x)

    # test_data = torch.linspace(0, 10, 100)
    # mod1, mod2 = data_gen(test_data, num_samples=1)
    # dat = torch.stack([mod1, mod2,], -1)[0]
    dat = test_y
    likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
    model = MultitaskGPModel(test_data, dat, likelihood)

    model.train()
    likelihood.train()

    # Use the adam optimizer
    optimizer = torch.optim.Adam(
        [
            {
                'params': model.parameters()
            },  # Includes GaussianLikelihood parameters
        ],
        lr=0.1)

    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

    n_iter = 50
    for i in range(n_iter):
        optimizer.zero_grad()
        output = model(test_data)
        loss = -mll(output, dat)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f   logscale: %.3f  log_noise: %.3f' % (
        #     i + 1, n_iter, loss.item(),
        #     model.covar_module.data_covar_module.log_lengthscale.data.item(),
        #     model.likelihood.log_noise.item()
        # ))
        # print(model.covar_module.task_covar_module.covar_matrix.evaluate())
        optimizer.step()

    model.eval()
    likelihood.eval()

    # f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))

    # # Make predictions
    with torch.no_grad(), gpytorch.fast_pred_var():
        test_x = torch.linspace(0, 1, 51)
        predictions = likelihood(model(test_data))
        mean = predictions.mean
        lower, upper = predictions.confidence_region()

    # y1_ax.plot(test_data.detach().numpy(), dat[:, 0].detach().numpy(), 'k*')
    # # Predictive mean as blue line
    # y1_ax.plot(test_data.numpy(), mean[:, 0].numpy(), 'b')
    # # Shade in confidence
    # y1_ax.fill_between(test_data.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)
    # # y1_ax.set_ylim([-3, 3])
    # y1_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    # y1_ax.set_title('Observed Values (Likelihood)')
    #
    # # Plot training data as black stars
    # y2_ax.plot(test_data.detach().numpy(), dat[:, 1].detach().numpy(), 'k*')
    # # Predictive mean as blue line
    # y2_ax.plot(test_data.numpy(), mean[:, 1].numpy(), 'b')
    # # Shade in confidence
    # y2_ax.fill_between(test_data.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)
    # # y2_ax.set_ylim([-3, 3])
    # y2_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    # y2_ax.set_title('Observed Values (Likelihood)')
    # plt.show()
    return mean
예제 #19
0
def indep_rbf(test_data, test_y):

    class ExactGPModel(gpytorch.models.ExactGP):
        def __init__(self, train_x, train_y, likelihood):
            super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
            self.mean_module = gpytorch.means.ConstantMean()
            self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())

        def forward(self, x):
            mean_x = self.mean_module(x)
            covar_x = self.covar_module(x)
            return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
    # TODO: edit this so it reads in stored data (necessary for good comparison)

    # test_data = torch.linspace(0, 10, 100)
    # mod1, mod2 = data_gen(test_data, num_samples=1)
    # mod1_mean = mod1.mean()
    # mod2_mean = mod2.mean()
    # mod1 = mod1[0] - mod1_mean
    # mod2 = mod2[0] - mod2_mean
    mod1 = test_y[:, 0]
    mod2 = test_y[:, 1]
    l1 = gpytorch.likelihoods.GaussianLikelihood()
    model1 =  ExactGPModel(test_data, mod1, l1)

    l2 = gpytorch.likelihoods.GaussianLikelihood()
    model2 = ExactGPModel(test_data, mod2, l2)

    mll = gpytorch.mlls.ExactMarginalLogLikelihood(l1, model1)

    model1.train();
    l1.train();
    optimizer = torch.optim.Adam([{'params': model1.parameters()},], lr=0.1)

    training_iter = 100
    for i in range(training_iter):
        # Zero gradients from previous iteration
        optimizer.zero_grad()
        # Output from model
        output = model1(test_data)

        # Calc loss and backprop gradients
        loss = -mll(output, mod1)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f   log_lengthscale: %.3f   log_noise: %.3f' % (
        #     i + 1, training_iter, loss.item(),
        #     model1.covar_module.base_kernel.log_lengthscale.item(),
        #     model1.likelihood.log_noise.item()
        # ))
        optimizer.step();

    mll2 = gpytorch.mlls.ExactMarginalLogLikelihood(l2, model2)
    model2.train();
    l2.train();
    optimizer = torch.optim.Adam([{'params': model2.parameters()},], lr=0.1)

    training_iter = 100
    for i in range(training_iter):
        # Zero gradients from previous iteration
        optimizer.zero_grad()
        # Output from model
        output = model2(test_data)

        # Calc loss and backprop gradients
        loss = -mll2(output, mod2)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f   log_lengthscale: %.3f   log_noise: %.3f' % (
        #     i + 1, training_iter, loss.item(),
        #     model2.covar_module.base_kernel.log_lengthscale.item(),
        #     model2.likelihood.log_noise.item()
        # ))
        optimizer.step();


    model1.eval();
    model2.eval();
    l1.eval();
    l2.eval();
    with torch.no_grad(), gpytorch.fast_pred_var():
        # test_x = torch.linspace(0, 1, 51)
        predictions = l1(model1(test_data))
        predictions = model1(test_data)
        mean1 = predictions.mean

        predictions = l2(model2(test_data))
        predictions = model2(test_data)
        mean2 = predictions.mean

    # f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))
    # y1_ax.plot(test_data.detach().numpy(), mod1.numpy(), 'k*')
    # # Predictive mean as blue line
    # y1_ax.plot(test_data.numpy(), mean1.detach().numpy(), 'b')
    # # Shade in confidence
    # # y1_ax.fill_between(test_data.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)
    # # y1_ax.set_ylim([-3, 3])
    # y1_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    # y1_ax.set_title('Observed Values (Likelihood)')
    #
    # # Plot training data as black stars
    # y2_ax.plot(test_data.detach().numpy(), mod2.detach().numpy(), 'k*')
    # # Predictive mean adetach().s blue line
    # y2_ax.plot(test_data.numpy(), mean2.detach().numpy(), 'b')
    # # Shade in confidence
    # # y2_ax.fill_between(test_x.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)
    # # y2_ax.set_ylim([-3, 3])
    # y2_ax.legend(['Observed Data', 'Mean', 'Confidence'])
    # y2_ax.set_title('Observed Values (Likelihood)')
    # plt.show()

    return torch.stack([mean1, mean2], -1)
예제 #20
0
def multitask(train_x, train_y, test_x):
    class MultitaskGPModel(gpytorch.models.ExactGP):
        def __init__(self, train_x, train_y, likelihood):
            super(MultitaskGPModel, self).__init__(train_x, train_y,
                                                   likelihood)
            self.mean_module = gpytorch.means.MultitaskMean(
                gpytorch.means.ConstantMean(), num_tasks=2)
            self.covar_module = gpytorch.kernels.MultitaskKernel(
                gpytorch.kernels.RBFKernel(), num_tasks=2, rank=1)

        def forward(self, x):
            mean_x = self.mean_module(x)
            covar_x = self.covar_module(x)
            return gpytorch.distributions.MultitaskMultivariateNormal(
                mean_x, covar_x)

    # test_data = torch.linspace(0, 10, 100)
    # mod1, mod2 = data_gen(test_data, num_samples=1)
    # dat = torch.stack([mod1, mod2,], -1)[0]
    likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
    model = MultitaskGPModel(train_x, train_y, likelihood)

    model.train()
    likelihood.train()

    # Use the adam optimizer
    optimizer = torch.optim.Adam(
        [
            {
                'params': model.parameters()
            },  # Includes GaussianLikelihood parameters
        ],
        lr=0.1)

    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

    n_iter = 50
    for i in range(n_iter):
        optimizer.zero_grad()
        output = model(train_x)
        loss = -mll(output, train_y)
        loss.backward()
        # print('Iter %d/%d - Loss: %.3f   logscale: %.3f  log_noise: %.3f' % (
        #     i + 1, n_iter, loss.item(),
        #     model.covar_module.data_covar_module.log_lengthscale.data.item(),
        #     model.likelihood.log_noise.item()
        # ))
        # print(model.covar_module.task_covar_module.covar_matrix.evaluate())
        optimizer.step()

    model.eval()
    likelihood.eval()

    # f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))

    # # Make predictions
    with torch.no_grad(), gpytorch.fast_pred_var():
        # test_x = torch.linspace(0, 1, 51)
        predictions = likelihood(model(test_x))
        mean = predictions.mean
        lower, upper = predictions.confidence_region()

    return mean