def test_active_dims_range(self): active_dims = list(range(3, 9)) kernel = LinearTruncatedFidelityKernel(dimension=10, active_dims=active_dims) x = self.create_data_no_batch() covar_mat = kernel(x).evaluate_kernel().evaluate() kernel_basic = LinearTruncatedFidelityKernel(dimension=6) covar_mat_actual = kernel_basic( x[:, active_dims]).evaluate_kernel().evaluate() self.assertLess( torch.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(), 1e-4)
def test_initialize_lengthscale_prior(self): kernel = LinearTruncatedFidelityKernel() self.assertTrue( isinstance(kernel.covar_module_1.lengthscale_prior, GammaPrior)) self.assertTrue( isinstance(kernel.covar_module_2.lengthscale_prior, GammaPrior)) kernel2 = LinearTruncatedFidelityKernel( lengthscale_prior=NormalPrior(1, 1)) self.assertTrue( isinstance(kernel2.covar_module_1.lengthscale_prior, NormalPrior)) kernel2 = LinearTruncatedFidelityKernel( lengthscale_2_prior=NormalPrior(1, 1)) self.assertTrue( isinstance(kernel2.covar_module_2.lengthscale_prior, NormalPrior))
def test_compute_linear_truncated_kernel_with_batch(self): x1 = torch.tensor([1, 0.1, 0.2, 3, 0.3, 0.4, 5, 0.5, 0.6, 7, 0.7, 0.8], dtype=torch.float).view(2, 2, 3) x2 = torch.tensor([2, 0.8, 0.7, 4, 0.6, 0.5, 6, 0.4, 0.3, 8, 0.2, 0.1], dtype=torch.float).view(2, 2, 3) t_1 = torch.tensor( [0.2736, 0.44, 0.2304, 0.36, 0.3304, 0.3816, 0.1736, 0.1944], dtype=torch.float, ).view(2, 2, 2) batch_shape = torch.Size([2]) dimension = 3 for nu in {0.5, 1.5, 2.5}: for train_data_fidelity in {False, True}: kernel = LinearTruncatedFidelityKernel( nu=nu, dimension=dimension, train_data_fidelity=train_data_fidelity, batch_shape=batch_shape, ) kernel.power = 1 kernel.train_data_fidelity = train_data_fidelity if train_data_fidelity: active_dimsM = [0] t_2 = torch.tensor( [ 0.0527, 0.167, 0.0383, 0.1159, 0.1159, 0.167, 0.0383, 0.0527 ], dtype=torch.float, ).view(2, 2, 2) t_3 = torch.tensor( [ 0.1944, 0.3816, 0.1736, 0.3304, 0.36, 0.44, 0.2304, 0.2736 ], dtype=torch.float, ).view(2, 2, 2) t = 1 + t_1 + t_2 + t_3 else: active_dimsM = [0, 1] t = 1 + t_1 matern_ker = MaternKernel(nu=nu, active_dims=active_dimsM, batch_shape=batch_shape) matern_term = matern_ker(x1, x2).evaluate() actual = t * matern_term res = kernel(x1, x2).evaluate() self.assertLess(torch.norm(res - actual), 1e-4)
def __init__( self, train_X: Tensor, train_Y: Tensor, nu: float = 2.5, train_iteration_fidelity: bool = True, train_data_fidelity: bool = True, likelihood: Optional[Likelihood] = None, ) -> None: if not train_iteration_fidelity and not train_data_fidelity: raise UnsupportedError( "You should have at least one fidelity parameter.") self._set_dimensions(train_X=train_X, train_Y=train_Y) kernel = LinearTruncatedFidelityKernel( nu=nu, dimension=train_X.shape[-1], train_iteration_fidelity=train_iteration_fidelity, train_data_fidelity=train_data_fidelity, batch_shape=self._aug_batch_shape, power_prior=GammaPrior(3.0, 3.0), ) covar_module = ScaleKernel( kernel, batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) super().__init__(train_X=train_X, train_Y=train_Y, covar_module=covar_module) self.to(train_X)
def create_kernel_no_ard(self, **kwargs): return LinearTruncatedFidelityKernel(**kwargs)
def test_raise_matern_error(self): with self.assertRaises(ValueError): LinearTruncatedFidelityKernel(nu=1)
def test_initialize_power_batch(self): kernel = LinearTruncatedFidelityKernel(batch_shape=torch.Size([2])) power_init = torch.tensor([1, 2], dtype=torch.float) kernel.initialize(power=power_init) actual_value = power_init.view_as(kernel.power) self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
def test_initialize_power(self): kernel = LinearTruncatedFidelityKernel() kernel.initialize(power=1) actual_value = torch.tensor(1, dtype=torch.float).view_as(kernel.power) self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
def test_initialize_power_prior(self): kernel = LinearTruncatedFidelityKernel(power_prior=NormalPrior(1, 1)) self.assertTrue(isinstance(kernel.power_prior, NormalPrior))