def query_covar(covar_name: str, scale: bool, outputscale: float, lscales: Tensor, **kwargs) -> Kernel: lengthscale_prior = GammaPrior(3.0, 6.0) kws = dict( lengthscale_prior=lengthscale_prior, ard_num_dims=lscales.shape[-1], ) if covar_name.lower()[:6] == 'matern': kernel_class = MaternKernel if covar_name[-2:] == '52': kws['nu'] = 2.5 elif covar_name[-2:] == '32': kws['nu'] = 1.5 elif covar_name[-2:] == '12': kws['nu'] = .5 else: raise ValueError(covar_name) elif covar_name.lower() == 'rbf': kernel_class = RBFKernel else: raise ValueError(covar_name) kws.update(**kwargs) kernel = kernel_class(**kws) kernel.lengthscale = lscales if scale: kernel = ScaleKernel(kernel, outputscale_prior=GammaPrior(2.0, 0.15)) kernel.outputscale = outputscale return kernel
def test_get_deterministic_model(self): tkwargs = {"device": self.device} for dtype, m in product((torch.float, torch.double), (1, 2)): tkwargs["dtype"] = dtype weights = [] bases = [] for i in range(m): num_rff = 2 * (i + 2) weights.append(torch.rand(num_rff, **tkwargs)) kernel = ScaleKernel(RBFKernel(ard_num_dims=2)).to(**tkwargs) kernel.outputscale = 0.3 + torch.rand(1, **tkwargs).view( kernel.outputscale.shape) kernel.base_kernel.lengthscale = 0.3 + torch.rand( 2, **tkwargs).view(kernel.base_kernel.lengthscale.shape) bases.append( RandomFourierFeatures( kernel=kernel, input_dim=2, num_rff_features=num_rff, )) model = get_deterministic_model(weights=weights, bases=bases) self.assertIsInstance(model, DeterministicModel) self.assertEqual(model.num_outputs, m) for batch_shape in (torch.Size([]), torch.Size([3])): X = torch.rand(*batch_shape, 1, 2, **tkwargs) Y = model(X) expected_Y = torch.stack( [basis(X) @ w for w, basis in zip(weights, bases)], dim=-1) self.assertTrue(torch.equal(Y, expected_Y)) self.assertEqual(Y.shape, torch.Size([*batch_shape, 1, m]))
def create_rq(sigma2, lengthscale, alpha): rq = RQKernel() rq.lengthscale = lengthscale rq.alpha = alpha kernel = ScaleKernel(rq) kernel.outputscale = sigma2 return kernel
def create_per(sigma2, lengthscale, period_length): per = PeriodicKernel() per.lengthscale = lengthscale per.period_length = period_length kernel = ScaleKernel(per) kernel.outputscale = sigma2 return kernel
def _parse_kernel(input_size: int, dim_outputs: int = 1, shared: bool = False, kind: str = 'rbf', ard_num_dims: int = None, outputscale: float = None, lengthscale: float = None, learn_outputscale: bool = True, learn_lengthscale: bool = True) -> Kernel: batch_size = 1 if shared else dim_outputs ard_num_dims = ard_num_dims if ard_num_dims is not None else input_size if kind.lower() == 'rbf': kernel = ScaleKernel(RBFKernel(ard_num_dims=ard_num_dims, batch_size=batch_size), batch_size=batch_size) elif kind.lower() == 'matern12': kernel = ScaleKernel(MaternKernel(nu=0.5, ard_num_dims=ard_num_dims, batch_size=batch_size), batch_size=batch_size) elif kind.lower() == 'matern32': kernel = ScaleKernel(MaternKernel(nu=1.5, ard_num_dims=ard_num_dims, batch_size=batch_size), batch_size=batch_size) elif kind.lower() == 'matern52': kernel = ScaleKernel(MaternKernel(nu=2.5, ard_num_dims=ard_num_dims, batch_size=batch_size), batch_size=batch_size) elif kind.lower() == 'linear': kernel = ScaleKernel(LinearKernel(input_size=input_size, ard_num_dims=ard_num_dims, batch_size=batch_size), batch_size=batch_size) else: raise NotImplementedError( 'Kernel function {} not implemented.'.format(kind)) if outputscale is not None: new_outputscale = outputscale * torch.ones(batch_size, 1) kernel.outputscale = new_outputscale kernel.raw_outputscale.requires_grad = learn_outputscale if lengthscale is not None: new_lengthscale = lengthscale * torch.ones(batch_size, ard_num_dims) kernel.base_kernel.lengthscale = new_lengthscale kernel.base_kernel.raw_lengthscale.requires_grad = learn_lengthscale return kernel
def test_get_deterministic_model_multi_samples(self): tkwargs = {"device": self.device} n_samples = 5 for dtype, m in product((torch.float, torch.double), (1, 2)): tkwargs["dtype"] = dtype for batch_shape_w, batch_shape_x in product( [torch.Size([]), torch.Size([3])], repeat=2): weights = [] bases = [] for i in range(m): num_rff = 2 * (i + 2) # we require weights to be of shape # `n_samples x (batch_shape) x num_rff` weights.append( torch.rand(*batch_shape_w, n_samples, num_rff, **tkwargs)) kernel = ScaleKernel( RBFKernel(ard_num_dims=2)).to(**tkwargs) kernel.outputscale = 0.3 + torch.rand(1, **tkwargs).view( kernel.outputscale.shape) kernel.base_kernel.lengthscale = 0.3 + torch.rand( 2, **tkwargs).view( kernel.base_kernel.lengthscale.shape) bases.append( RandomFourierFeatures( kernel=kernel, input_dim=2, num_rff_features=num_rff, sample_shape=torch.Size([n_samples]), )) model = get_deterministic_model_multi_samples(weights=weights, bases=bases) self.assertIsInstance(model, DeterministicModel) self.assertEqual(model.num_outputs, m) X = torch.rand(*batch_shape_x, n_samples, 1, 2, **tkwargs) Y = model(X) for i in range(m): wi = weights[i] for _ in range(len(batch_shape_x)): wi = wi.unsqueeze(-3) wi = wi.expand(*batch_shape_w, *batch_shape_x, *wi.shape[-2:]) expected_Yi = (bases[i](X) @ wi.unsqueeze(-1)).squeeze(-1) self.assertTrue(torch.allclose(Y[..., i], expected_Yi)) self.assertEqual( Y.shape, torch.Size( [*batch_shape_w, *batch_shape_x, n_samples, 1, m]), )
def create_bayesian_quadrature_iso_gauss(): x1 = torch.from_numpy(np.array([[-1, 1], [0, 0], [-2, 0.1]])) x2 = torch.from_numpy(np.array([[-1, 1], [0, 0], [-2, 0.1], [-3, 3]])) M1 = x1.size()[0] M2 = x2.size()[0] D = x1.size()[1] prior_mean = torch.from_numpy(np.arange(D))[None, :] prior_variance = 2. rbf = RBFKernel() rbf.lengthscale = 1. kernel = ScaleKernel(rbf) kernel.outputscale = 1. bqkernel = QuadratureRBFGaussPrior(kernel, prior_mean, prior_variance) return bqkernel, x1, x2, M1, M2, D
def create_rbf(sigma2, lengthscale): rbf = RBFKernel() rbf.lengthscale = lengthscale kernel = ScaleKernel(rbf) kernel.outputscale = sigma2 return kernel
def create_cosine(sigma2, period_length): cosine = CosineKernel() cosine.period_length = period_length kernel = ScaleKernel(cosine) kernel.outputscale = sigma2 return kernel