Esempio n. 1
0
    def test_batch_separate(self):
        a = torch.tensor([[[4, 1], [2, 2], [8, 0]], [[2, 5], [6, 1], [0, 1]]], dtype=torch.float)
        b = torch.tensor([[[0, 0], [2, 1], [1, 0]], [[1, 1], [2, 3], [1, 0]]], dtype=torch.float)
        period = torch.tensor([1, 2], dtype=torch.float).view(2, 1, 1)
        kernel = CosineKernel(batch_shape=torch.Size([2])).initialize(period_length=period)
        kernel.eval()

        actual = torch.zeros(2, 3, 3)
        for k in range(2):
            for i in range(3):
                for j in range(3):
                    actual[k, i, j] = torch.cos(math.pi * ((a[k, i] - b[k, j]) / period[k]).norm(2, dim=-1))

        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # diag
        res = kernel(a, b).diag()
        actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims
        actual = torch.zeros(2, 2, 3, 3)
        for k in range(2):
            for i in range(3):
                for j in range(3):
                    for l in range(2):
                        actual[k, l, i, j] = torch.cos(math.pi * ((a[k, i, l] - b[k, j, l]) / period[k]))
        res = kernel(a, b, last_dim_is_batch=True).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims + diag
        res = kernel(a, b, last_dim_is_batch=True).diag()
        actual = actual.diagonal(dim1=-2, dim2=-1)
        self.assertLess(torch.norm(res - actual), 1e-5)
Esempio n. 2
0
    def test_computes_periodic_function(self):
        a = torch.tensor([[4, 1], [2, 2], [8, 0]], dtype=torch.float)
        b = torch.tensor([[0, 0], [2, 1], [1, 0]], dtype=torch.float)
        period = 1
        kernel = CosineKernel().initialize(period_length=period)
        kernel.eval()

        actual = torch.zeros(3, 3)
        for i in range(3):
            for j in range(3):
                actual[i, j] = torch.cos(math.pi * ((a[i] - b[j]) / period).norm(2, dim=-1))

        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # diag
        res = kernel(a, b).diag()
        actual = actual.diag()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims
        actual = torch.zeros(2, 3, 3)
        for i in range(3):
            for j in range(3):
                for l in range(2):
                    actual[l, i, j] = torch.cos(math.pi * ((a[i, l] - b[j, l]) / period))
        res = kernel(a, b, last_dim_is_batch=True).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims + diag
        res = kernel(a, b, last_dim_is_batch=True).diag()
        actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
        self.assertLess(torch.norm(res - actual), 1e-5)
    def __init__(self,
                 mean_name='constant',
                 kernel_name='RBF',
                 grid_bounds=[(-1, 1), (-1, 1)],
                 grid_size=100,
                 num_samples=1000):

        self.mean = mean_name
        self.kernel = kernel_name
        self.num_samples = num_samples
        self.grid_bounds = grid_bounds
        self.grid_size = grid_size
        self.x_dim = 2  # x and y dim are fixed for this dataset.
        self.y_dim = 1

        self.data = []

        # create grid
        grid = torch.zeros(grid_size, len(grid_bounds))
        for i in range(len(grid_bounds)):
            grid_diff = float(grid_bounds[i][1] -
                              grid_bounds[i][0]) / (grid_size - 2)
            grid[:,
                 i] = torch.linspace(grid_bounds[i][0] - grid_diff,
                                     grid_bounds[i][1] + grid_diff, grid_size)

        x = gpytorch.utils.grid.create_data_from_grid(grid)

        # initialize likelihood and model
        likelihood = gpytorch.likelihoods.GaussianLikelihood()

        mean_dict = {'constant': ConstantMean()}
        kernel_dict = {
            'RBF': RBFKernel(),
            'cosine': CosineKernel(),
            'linear': LinearKernel(),
            'periodic': PeriodicKernel(),
            'LCM': LCMKernel(base_kernels=[CosineKernel()], num_tasks=1),
            'polynomial': PolynomialKernel(power=3),
            'matern': MaternKernel()
        }

        # evaluate GP on prior distribution
        with gpytorch.settings.prior_mode(True):
            model = ExactGPModel(x,
                                 None,
                                 likelihood,
                                 mean_module=mean_dict[self.mean],
                                 kernel_module=gpytorch.kernels.GridKernel(
                                     kernel_dict[self.kernel], grid=grid))
            gp = model(x)
            for i in range(num_samples):
                y = gp.sample()
                self.data.append(
                    (x, y.unsqueeze(1)))  #+torch.randn(y.size())*0.2))
Esempio n. 4
0
    def test_computes_periodic_function(self):
        a = torch.tensor([4, 2, 8], dtype=torch.float).view(3, 1)
        b = torch.tensor([0, 2], dtype=torch.float).view(2, 1)
        period = 1
        kernel = CosineKernel().initialize(log_period_length=math.log(period))
        kernel.eval()

        actual = torch.zeros(3, 2)
        for i in range(3):
            for j in range(2):
                actual[i, j] = torch.cos(math.pi * (a[i] - b[j]) / period)

        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)
    def __init__(self,
                 mean_list,
                 kernel_list,
                 num_points=100,
                 num_samples=1000,
                 amplitude_range=(-5., 5.)):
        self.mean_list = mean_list
        self.kernel_list = kernel_list
        self.num_config = len(mean_list) * len(kernel_list)
        self.num_samples = num_samples
        self.num_points = num_points
        self.x_dim = 1  # x and y dim are fixed for this dataset.
        self.y_dim = 1
        self.amplitude_range = amplitude_range
        self.data = []

        # initialize likelihood and model
        x = torch.linspace(self.amplitude_range[0], self.amplitude_range[1],
                           num_points).unsqueeze(1)

        likelihood = gpytorch.likelihoods.GaussianLikelihood()
        mean_dict = {'constant': ConstantMean(), 'linear': LinearMean(1)}
        kernel_dict = {
            'RBF': RBFKernel(),
            'cosine': CosineKernel(),
            'linear': LinearKernel(),
            'periodic': PeriodicKernel(period_length=0.5),
            'LCM': LCMKernel(base_kernels=[CosineKernel()], num_tasks=1),
            'polynomial': PolynomialKernel(power=2),
            'matern': MaternKernel()
        }

        # create a different GP from each possible configuration
        for mean in self.mean_list:
            for kernel in self.kernel_list:
                # evaluate GP on prior distribution
                with gpytorch.settings.prior_mode(True):
                    model = ExactGPModel(x,
                                         None,
                                         likelihood,
                                         mean_module=mean_dict[mean],
                                         kernel_module=kernel_dict[kernel])

                    gp = model(x)
                    # sample from current configuration
                    for i in range(num_samples // self.num_config + 1):
                        y = gp.sample()
                        self.data.append(
                            (x, y.unsqueeze(1)))  #+torch.randn(y.shape)*0))
Esempio n. 6
0
    def test_batch(self):
        a = torch.tensor([[4, 2, 8], [1, 2, 3]], dtype=torch.float).view(2, 3, 1)
        b = torch.tensor([[0, 2, 1], [-1, 2, 0]], dtype=torch.float).view(2, 3, 1)
        period = torch.tensor(1, dtype=torch.float).view(1, 1, 1)
        kernel = CosineKernel().initialize(period_length=period)
        kernel.eval()

        actual = torch.zeros(2, 3, 3)
        for k in range(2):
            for i in range(3):
                for j in range(3):
                    actual[k, i, j] = torch.cos(math.pi * ((a[k, i] - b[k, j]) / period))

        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)
Esempio n. 7
0
    def test_kernel_learning_COS(self):

        for learning_mode in ['learn_kernel', 'both']:

            gpr_model_vanilla = GPRegressionLearned(
                self.x_train,
                self.y_train_sin,
                learning_mode='vanilla',
                num_iter_fit=1,
                mean_module='constant',
                covar_module=CosineKernel())
            gpr_model_vanilla.fit()

            gpr_model_learn_kernel = GPRegressionLearned(
                self.x_train,
                self.y_train_sin,
                learning_mode='learn_kernel',
                num_iter_fit=500,
                mean_module='constant',
                covar_module=CosineKernel())

            print(gpr_model_learn_kernel.model.covar_module.lengthscale)
            gpr_model_learn_kernel.fit(valid_x=self.x_train,
                                       valid_t=self.y_train_sin)
            print(gpr_model_learn_kernel.model.covar_module.lengthscale)

            ll_vanilla, rmse_vanilla, _ = gpr_model_vanilla.eval(
                self.x_train, self.y_train_sin)
            ll_kernel, rmse_kernel, _ = gpr_model_learn_kernel.eval(
                self.x_train, self.y_train_sin)

            print('learning_mode', learning_mode)
            print(ll_kernel, ll_vanilla)
            print(rmse_kernel, rmse_vanilla)
            self.assertGreater(ll_kernel, ll_vanilla)
            self.assertLess(rmse_kernel, rmse_vanilla)
Esempio n. 8
0
 def create_kernel_with_prior(self, period_length_prior):
     return CosineKernel(period_length_prior=period_length_prior)
def create_cosine(sigma2, period_length):
    cosine = CosineKernel()
    cosine.period_length = period_length
    kernel = ScaleKernel(cosine)
    kernel.outputscale = sigma2
    return kernel