def __init__(self, num_dim, grid_bounds=(-10., 10.), grid_size=64): variational_distribution = variational.CholeskyVariationalDistribution( num_inducing_points=grid_size, batch_shape=torch.Size([num_dim]) ) # Our base variational strategy is a GridInterpolationVariationalStrategy, # which places variational inducing points on a Grid # We wrap it with a MultitaskVariationalStrategy so that our output is a vector-valued GP variational_strategy = variational.MultitaskVariationalStrategy( variational.GridInterpolationVariationalStrategy( self, grid_size=grid_size, grid_bounds=[grid_bounds], variational_distribution=variational_distribution, ), num_tasks=num_dim, ) super().__init__(variational_strategy) self.covar_module = kernels.ScaleKernel( kernels.RBFKernel( lengthscale_prior=priors.SmoothedBoxPrior( math.exp(-1), math.exp(1), sigma=0.1, transform=torch.exp ) ) ) self.mean_module = means.ConstantMean() self.grid_bounds = grid_bounds
def __init__(self, num_dim, grid_bounds = (-100., 100.), grid_size = 100): variational_distribution = variational.CholeskyVariationalDistribution(num_inducing_points = grid_size, batch_shape = torch.Size([num_dim])) variational_strategy = variational.MultitaskVariationalStrategy(variational.GridInterpolationVariationalStrategy(self, grid_size = grid_size, grid_bounds = [grid_bounds], variational_distribution = variational_distribution, ), num_tasks = num_dim,) super().__init__(variational_strategy) self.covar_module = kernels.ScaleKernel(kernels.RBFKernel(lengthscale_priors = priors.SmoothedBoxPrior(math.exp(-1), math.exp(1), sigma = 0.1, transform = torch.exp))) self.mean_module = means.ConstantMean() self.grid_bounds = grid_bounds